filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
src/test/java/views/AbstractViewTest.java | package views;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Rule;
import org.openqa.selenium.By;
import org.openqa.selenium.WebElement;
import org.openqa.selenium.chrome.ChromeDriver;
import com.vaadin.flow.theme.AbstractTheme;
import com.vaadin.testbench.ScreenshotOnFailureRule;
import com.vaadin.testbench.TestBench;
import com.vaadin.testbench.parallel.ParallelTest;
/**
* Base class for TestBench IntegrationTests on chrome.
* <p>
* The tests use Chrome driver (see pom.xml for integration-tests profile) to
* run integration tests on a headless Chrome. If a property {@code test.use
* .hub} is set to true, {@code AbstractViewTest} will assume that the
* TestBench test is running in a CI environment. In order to keep the this
* class light, it makes certain assumptions about the CI environment (such
* as available environment variables). It is not advisable to use this class
* as a base class for you own TestBench tests.
* <p>
* To learn more about TestBench, visit
* <a href="https://vaadin.com/docs/v10/testbench/testbench-overview.html">Vaadin TestBench</a>.
*/
public abstract class AbstractViewTest extends ParallelTest {
private static final int SERVER_PORT = 8080;
private final String route;
private final By rootSelector;
@Rule
public ScreenshotOnFailureRule rule = new ScreenshotOnFailureRule(this,
false);
public AbstractViewTest() {
this("", By.tagName("body"));
}
protected AbstractViewTest(String route, By rootSelector) {
this.route = route;
this.rootSelector = rootSelector;
}
@Before
public void setup() throws Exception {
if (isUsingHub()) {
super.setup();
} else {
setDriver(TestBench.createDriver(new ChromeDriver()));
}
getDriver().get(getURL(route));
}
/**
* Convenience method for getting the root element of the view based on
* the selector passed to the constructor.
*
* @return the root element
*/
protected WebElement getRootElement() {
return findElement(rootSelector);
}
/**
* Asserts that the given {@code element} is rendered using a theme
* identified by {@code themeClass}. If the the is not found, JUnit
* assert will fail the test case.
*
* @param element web element to check for the theme
* @param themeClass theme class (such as {@code Lumo.class}
*/
protected void assertThemePresentOnElement(
WebElement element, Class<? extends AbstractTheme> themeClass) {
String themeName = themeClass.getSimpleName().toLowerCase();
Boolean hasStyle = (Boolean) executeScript("" +
"var styles = Array.from(arguments[0]._template.content" +
".querySelectorAll('style'))" +
".filter(style => style.textContent.indexOf('" +
themeName + "') > -1);" +
"return styles.length > 0;", element);
Assert.assertTrue("Element '" + element.getTagName() + "' should have" +
" had theme '" + themeClass.getSimpleName() + "'.",
hasStyle);
}
/**
* Property set to true when running on a test hub.
*/
private static final String USE_HUB_PROPERTY = "test.use.hub";
/**
* Returns deployment host name concatenated with route.
*
* @return URL to route
*/
private static String getURL(String route) {
return String.format("http://%s:%d/%s", getDeploymentHostname(),
SERVER_PORT, route);
}
/**
* Returns whether we are using a test hub. This means that the starter
* is running tests in Vaadin's CI environment, and uses TestBench to
* connect to the testing hub.
*
* @return whether we are using a test hub
*/
private static boolean isUsingHub() {
return Boolean.TRUE.toString().equals(
System.getProperty(USE_HUB_PROPERTY));
}
/**
* If running on CI, get the host name from environment variable HOSTNAME
*
* @return the host name
*/
private static String getDeploymentHostname() {
return isUsingHub() ? System.getenv("HOSTNAME") : "localhost";
}
}
| [
"\"HOSTNAME\""
]
| []
| [
"HOSTNAME"
]
| [] | ["HOSTNAME"] | java | 1 | 0 | |
cloudbucket/cloudBucket.go | package cloudbucket
import (
"context"
"io"
"net/http"
"net/url"
"os"
"cloud.google.com/go/storage"
"github.com/gin-gonic/gin"
// "google.golang.org/api/option"
)
var (
storageClient *storage.Client
)
// HandleFileUploadToBucket uploads file to bucket
func HandleFileUploadToBucket(c *gin.Context) {
bucket := os.Getenv("BUCKET_NAME")
var err error
ctx := context.Background()
storageClient, err = storage.NewClient(ctx)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{
"message": err.Error(),
"error": true,
})
return
}
f, uploadedFile, err := c.Request.FormFile("file")
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{
"message": err.Error(),
"error": true,
})
return
}
defer f.Close()
sw := storageClient.Bucket(bucket).Object(uploadedFile.Filename).NewWriter(ctx)
if _, err := io.Copy(sw, f); err != nil {
c.JSON(http.StatusInternalServerError, gin.H{
"message": err.Error(),
"error": true,
})
return
}
if err := sw.Close(); err != nil {
c.JSON(http.StatusInternalServerError, gin.H{
"message": err.Error(),
"error": true,
})
return
}
u, err := url.Parse("/" + bucket + "/" + sw.Attrs().Name)
if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{
"message": err.Error(),
"Error": true,
})
return
}
c.JSON(http.StatusOK, gin.H{
"message": "file uploaded successfully",
"pathname": u.EscapedPath(),
})
} | [
"\"BUCKET_NAME\""
]
| []
| [
"BUCKET_NAME"
]
| [] | ["BUCKET_NAME"] | go | 1 | 0 | |
integration/tests/test_agent.py | import requests
import os
import time
import pytest
import yaml
import uuid
URL = os.environ["URL"]
EMAIL = os.environ["EMAIL"]
PASSWORD = os.environ["PASSWORD"]
CLUSTER_NAME = "agent-integration-test-" + uuid.uuid4().hex[:5]
NAMESPACE = "agent-integration-test"
AGET_IMAGE = os.environ["AGENT_IMAGE"]
TEST_CONSTRAINTS_NAMES = [
"Custom Policy - Using latest Image Tag",
"Custom Policy - Services are not using ports over 1024",
"Custom Policy - Missing Owner Label",
"Custom Policy - Containers running with PrivilegeEscalation"
]
AGENT_YAML_PATH = "/tmp/agent_resources.yaml"
def patch_agent_resources(yml):
resources = yaml.safe_load_all(yml)
output_resources = []
for resource in resources:
if resource.get("kind") == "Deployment":
containers = resource["spec"]["template"]["spec"]["containers"]
for container in containers:
if container.get("name") == "agent":
container["args"] = ["-test.coverprofile=coverage.txt", "DEVEL"] + container["args"]
container_envars = container.get("env", [])
container_envars.append({
"name": "CODECOV_URL",
"value": os.environ.get("CODECOV_URL")
})
container["env"] = container_envars
container["image"] = AGET_IMAGE
del container["securityContext"]
output_resources.append(resource)
return output_resources
def query_constraints_by_names(session, account_id, names):
body = {
"filters": {
"names": names,
"enabled": True,
}
}
resp = session.post(URL + f"/api/{account_id}/policies/v1/constraints/query", json=body)
assert resp.ok, "failed to query constraints"
return resp.json()["data"]
def query_templates_by_names(session, account_id, names):
body = {
"filters": {
"names": names
}
}
resp = session.post(URL + f"/api/{account_id}/policies/v1/templates/query", json=body)
assert resp.ok, "failed to query templates"
return resp.json()["data"]
def create_test_policy(session, account_id):
templates = query_templates_by_names(session, account_id, ["Metadata Missing Label And Value"])
body = templates[0]
data = {
"id": str(uuid.uuid4()),
"name": "Test Policy " + uuid.uuid4().hex[:5],
"category": body["category_id"],
"enabled": True,
"targets": {
"cluster": [],
"kind": [
"Deployment",
"ReplicaSet"
],
"namespace": [],
"label": {"test": "agent.integration.test"}
},
"parameters": [
{
"name": "label",
"type": "string",
"required": True,
"default": "test-label",
},
{
"name": "value",
"type": "string",
"required": True,
"default": "test",
},
{
"name": "exclude_namespace",
"type": "string",
"default": None,
"required": False
},
{
"name": "exclude_label_key",
"type": "string",
"default": None,
"required": False
},
{
"name": "exclude_label_value",
"type": "string",
"default": None,
"required": False
}
]
}
body.update(data)
resp = session.post(URL + f"/api/{account_id}/policies/v1/policies", json=body)
assert resp.ok, "Failed to create test policy"
resp = session.get(URL + f"/api/{account_id}/policies/v1/policies/{resp.json()['id']}")
assert resp.ok, "Failed to get created test policy"
return body
def update_test_policy(session, account_id, policy):
policy["parameters"] = [
{
"name": "label",
"type": "string",
"required": True,
"default": "test-label-2",
},
{
"name": "value",
"type": "string",
"required": True,
"default": "test",
},
{
"name": "exclude_namespace",
"type": "string",
"default": None,
"required": False
},
{
"name": "exclude_label_key",
"type": "string",
"default": None,
"required": False
},
{
"name": "exclude_label_value",
"type": "string",
"default": None,
"required": False
}
]
resp = session.put(URL + f"/api/{account_id}/policies/v1/policies/{policy['id']}", json=policy)
assert resp.ok, "Failed to update test policy"
def delete_test_policy(session, account_id, policy_id):
resp = session.delete(URL + f"/api/{account_id}/policies/v1/constraints/{policy_id}")
assert resp.ok, "Failed to delete test constraint"
def get_constraint_violation_count(session, account_id, cluster_id, constraint_id):
body = {
"filters": {
"cluster_id":[cluster_id],
"constraint_id":[constraint_id]
},
"limit": 100
}
resp = session.post(URL + f"/api/{account_id}/recommendations/v1/query", json=body)
assert resp.ok, "Failed to get cluster recommendations"
return resp.json()["count"]
class TestViolations:
@pytest.fixture
def prepare_env(self):
os.system(f"kubectl create namespace {NAMESPACE}")
exit_code = os.system(f"kubectl apply -f resources.yaml")
assert exit_code == 0, "Failed to setup testing environment"
yield
exit_code = os.system(f"kubectl delete -f resources.yaml")
assert exit_code == 0, "Failed to cleanup testing environment"
@pytest.fixture
def login(self, prepare_env):
session = requests.Session()
body = {"email": EMAIL, "password": PASSWORD}
resp = session.post(URL + "/api/accounts/v1/public/login", json=body)
assert resp.ok, "Login failed"
account_id = resp.json()["account_id"]
auth = resp.headers["authorization"]
session.headers = {"authorization": auth}
yield account_id, session
@pytest.fixture
def create_cluster(self, login):
account_id, session = login
test_constraints = query_constraints_by_names(session, account_id, TEST_CONSTRAINTS_NAMES)
test_policy = create_test_policy(session, account_id)
body = {"name": CLUSTER_NAME, "description": "agent integration test"}
resp = session.post(URL + f"/api/accounts/v1/{account_id}/clusters", json=body)
assert resp.ok, "Creating cluster failed"
cluster_id = resp.json()["id"]
resp = session.get(URL + f"/api/accounts/v1/{account_id}/clusters/{cluster_id}/url")
assert resp.ok, "Failed to get cluster connect url"
agent_yaml_url = resp.json()["url"]
response = requests.get(agent_yaml_url)
response.raise_for_status()
agent_yaml = patch_agent_resources(response.text)
with open(AGENT_YAML_PATH, "w") as f:
yaml.dump_all(agent_yaml, f)
exit_code = os.system(f"kubectl apply -f {AGENT_YAML_PATH}")
assert exit_code == 0, "Failed to run agent create deployment command"
yield account_id, cluster_id, session, test_policy, test_constraints
exit_code = os.system(f"kubectl delete -f {AGENT_YAML_PATH}")
assert exit_code == 0, "Failed to clean up agent deployment"
resp = session.delete(URL + f"/api/accounts/v1/{account_id}/clusters/{cluster_id}")
assert resp.ok, "Failed to delete cluster from console"
try:
delete_test_policy(session, account_id, test_policy["id"])
except:
pass
def test_agent_violations(self, create_cluster):
print("[+] Create cluster")
account_id, cluster_id, session, test_policy, test_constraints = create_cluster
time.sleep(180)
print("[+] Check violations")
for constraint in test_constraints:
violations_count = get_constraint_violation_count(session, account_id, cluster_id, constraint["id"])
assert violations_count == 1, "constraint: %s, expected 1 violation, but found %d" % (constraint["name"], violations_count)
print("[+] Apply resources fixes")
exit_code = os.system(f"kubectl apply -f fixed_resources.yaml")
assert exit_code == 0, "Failed to apply fixed resources environment"
time.sleep(180)
print("[+] Check violations")
for constraint in test_constraints:
violations_count = get_constraint_violation_count(session, account_id, cluster_id, constraint["id"])
assert violations_count == 0, "constraint: %s, expected 0 violations, but found %d" % (constraint["name"], violations_count)
print("[+] Update test policy")
update_test_policy(session, account_id, test_policy)
time.sleep(180)
print("[+] Check violations")
violations_count = get_constraint_violation_count(session, account_id, cluster_id, test_policy["id"])
assert violations_count == 1, f"expected 1 violations after updating test constraint, but found {violations_count}"
print("[+] Delete test policy")
delete_test_policy(session, account_id, test_policy["id"])
time.sleep(180)
print("[+] Check violations")
violations_count = get_constraint_violation_count(session, account_id, cluster_id, test_policy["id"])
assert violations_count == 0, f"expected 0 violations after deleting test constraint, but found {violations_count}"
| []
| []
| [
"EMAIL",
"PASSWORD",
"URL",
"CODECOV_URL",
"AGENT_IMAGE"
]
| [] | ["EMAIL", "PASSWORD", "URL", "CODECOV_URL", "AGENT_IMAGE"] | python | 5 | 0 | |
adapters/uploader.py | import logging
import os
import subprocess
import datetime
from app import yadisk_dir, yadisk_token
def upload(local_path, project_name):
for file in os.listdir(local_path):
logging.info(u'Uploading ' + file)
src = os.path.join(local_path, file)
today = '{:%Y-%m-%d}'.format(datetime.date.today())
dest = os.path.join(yadisk_dir, project_name + '/', today, file).replace(os.path.sep, "/")
try:
my_env = os.environ.copy()
my_env["YDCMD_TOKEN"] = yadisk_token
proc = subprocess.Popen('ydcmd put %s %s --verbose' % (src, dest), shell=True, env=my_env)
outs, errs = proc.communicate()
if errs:
raise ConnectionError(errs)
except subprocess.CalledProcessError as e:
logging.error('ydcmd error %s %s' % (e.stdout, e.stderr))
| []
| []
| []
| [] | [] | python | 0 | 0 | |
data_related/data_augmentation/signal_augment.py | import os
import subprocess
import numpy as np
from tqdm import tqdm
from typing import Dict
MAX_FREQ = 7999
def to_str(v):
if isinstance(v, tuple):
s = " ".join(str(x) for x in v)
elif isinstance(v, float) or isinstance(v, int):
s = str(v)
else:
assert False
return s
def build_sox_distortions(audio_file, params):
param_str = " ".join([k + " " + to_str(v) for k, v in params.items()])
sox_params = "sox {} -p {} ".format(audio_file, param_str)
return sox_params
def build_sox_noise(
audio_file,
amod_lowpass_cutoff=0.1,
lowpass_cutoff=MAX_FREQ,
highpass_cutoff=1,
noise_gain=-4,
):
"""
play original.wav synth whitenoise lowpass 0.1 synth whitenoise amod gain -n 0 lowpass 100 highpass 1
"""
sox_params = "sox {audio_file} -p synth whitenoise lowpass {amod_lowpass_cutoff} synth whitenoise amod gain -n {noise_gain} lowpass {lowpass_cutoff} highpass {highpass_cutoff}".format(
audio_file=audio_file,
amod_lowpass_cutoff=amod_lowpass_cutoff,
lowpass_cutoff=lowpass_cutoff,
highpass_cutoff=highpass_cutoff,
noise_gain=noise_gain,
)
return sox_params
def build_varying_amplitude_factor(audio_file, lowpass_cutoff=1, ac_gain=-9):
ac = "sox {} -p synth whitenoise lowpass {} gain -n {}".format(
audio_file, lowpass_cutoff, ac_gain
)
dc = "sox {} -p gain -90 dcshift 0.5".format(audio_file)
return "sox -m <({}) <({}) -p".format(ac, dc)
def multiply_signals(signal_a, signal_b):
return ("sox -T <({signal_a}) <({signal_b}) -p").format(
signal_a=signal_a, signal_b=signal_b,
)
def build_sox_interference(
interfere_file, interfere_signal, lowpass_cutoff=1, ac_gain=-6
):
factor = build_varying_amplitude_factor(interfere_file, lowpass_cutoff, ac_gain)
return multiply_signals(factor, interfere_signal)
def add_signals_trim_to_len(original, signals, augmented):
signals_to_add = " ".join(["<(%s)" % s for s in signals])
sox_cmd = "sox -m {signals} -b 16 {augmented} trim 0 $(soxi -D {original})".format(
signals=signals_to_add, original=original, augmented=augmented
)
return sox_cmd
def build_random_bandpass(min_low=50, min_band_width=100, max_high=1000) -> Dict:
d = {}
max_high_cutoff = MAX_FREQ
if np.random.choice([True, False], p=[0.5, 0.5]):
lowpass = int(round(np.random.uniform(low=min_low, high=MAX_FREQ)))
d["lowpass"] = lowpass
max_high_cutoff = lowpass - min_band_width
if np.random.choice([True, False], p=[0.5, 0.5]):
highpass = int(
round(np.random.uniform(low=1, high=min(max_high, max_high_cutoff)))
)
d["highpass"] = highpass
return d
def augment_with_sox(original_file, audio_files, augmented_file):
interfere_file = np.random.choice(audio_files)
min_SNR = 20 # normal:20, less:30, evenless:40
min_SIR = 5 # normal:10, less:20, evenless:30
signal_gain = round(np.random.uniform(low=-10, high=0), 2)
signal_params = {
"tempo": round(np.random.triangular(left=0.7, mode=1.0, right=1.3), 2),
"pitch": int(
round(np.random.triangular(left=-200, mode=0, right=200))
), # normal 100, less: 50, evenless: 30
"reverb": (int(round(np.random.uniform(low=0, high=50))), 50, 100, 100, 0, 0,),
"gain -n": signal_gain,
}
signal_params.update(build_random_bandpass(1000, 1000, 100))
interfere_params = {
"tempo": round(np.random.uniform(low=0.6, high=1.4), 2),
"pitch": int(round(np.random.uniform(low=-500, high=500))),
"reverb": (int(round(np.random.uniform(low=0, high=100))), 50, 100, 100, 0, 0),
"gain -n": round(np.random.uniform(low=-50, high=signal_gain - min_SIR), 2),
}
interfere_params.update(build_random_bandpass(50, 100, 1000))
# params = {'signal_params':signal_params,'interfere_params':interfere_params,'noise_power':noise_power}
# pprint(params)
signal = build_sox_distortions(original_file, signal_params)
interfere_signal = build_sox_distortions(interfere_file, interfere_params)
noise_power = round(np.random.uniform(-60, signal_gain - min_SNR), 2)
lowpass = int(round(np.random.uniform(low=100, high=MAX_FREQ)))
highpass = int(round(np.random.uniform(low=1, high=lowpass)))
noise = build_sox_noise(
original_file, np.random.uniform(0.1, 2), lowpass, highpass, noise_power
)
interf = build_sox_interference(
interfere_file,
interfere_signal,
lowpass_cutoff=np.random.uniform(0.5, 2),
ac_gain=int(round(np.random.uniform(-9, -3))),
)
sox_cmd = add_signals_trim_to_len(
original_file, [signal, noise, interf], augmented_file
)
FNULL = open(os.devnull, "w")
subprocess.call(["bash", "-c", sox_cmd], stdout=FNULL, stderr=subprocess.STDOUT)
# subprocess.call(["bash", "-c", sox_cmd])
# output = subprocess.check_output(["bash", "-c", sox_cmd])
# if len(output)>0 and 'FAIL' in output:
# print(output)
# return 1 if len(output)>0 else 0
def augment_with_specific_params():
signal_gain = 0
signal_params = dict(tempo=1.0, pitch=0, reverb=0)
signal_params["gain -n"] = 0
signal = build_sox_distortions(original, signal_params)
interfere_signal = build_sox_distortions(
interfering, dict(gain=signal_gain - 10, tempo=0.8, pitch=100, reverb=50)
)
noise = build_sox_noise(
original, noise_gain=signal_gain - 20, lowpass_cutoff=6000, highpass_cutoff=10
)
interf = build_sox_interference(interfering, interfere_signal)
sox_cmd = add_signals_trim_to_len(original, [signal, noise, interf], augmented)
subprocess.call(["bash", "-c", sox_cmd])
if __name__ == "__main__":
import librosa
original = "../../original.wav"
augmented = "/tmp/augmented.wav"
interfering = "../../interference.wav"
# augment_with_specific_params()
for k in range(9):
augment_with_sox(original, [interfering], "/tmp/augmented_%d.wav" % k)
# assert False
# path = os.environ['HOME']+"/data/asr_data/SPANISH"
# audio_files = librosa.util.find_files(path)
#
# with open('spanish_train_manifest.csv') as f:
# audio_text_files = f.readlines()
# audio_files = [x.strip().split(",")[0] for x in audio_text_files]
#
# for k in tqdm(range(100000)):
# original = np.random.choice(audio_files)
# random_augmentation(original, audio_files, augmented)
| []
| []
| [
"HOME"
]
| [] | ["HOME"] | python | 1 | 0 | |
main.go | //go:generate go install -v github.com/kevinburke/go-bindata/go-bindata
//go:generate go-bindata -prefix res/ -pkg assets -o assets/assets.go res/Firefox.lnk
//go:generate go install -v github.com/josephspurrier/goversioninfo/cmd/goversioninfo
//go:generate goversioninfo -icon=res/papp.ico -manifest=res/papp.manifest
package main
import (
"fmt"
"os"
"path"
"strings"
"text/template"
"github.com/Jeffail/gabs"
"github.com/pkg/errors"
"github.com/portapps/phyrox-portable/assets"
"github.com/portapps/portapps/v3"
"github.com/portapps/portapps/v3/pkg/log"
"github.com/portapps/portapps/v3/pkg/mutex"
"github.com/portapps/portapps/v3/pkg/shortcut"
"github.com/portapps/portapps/v3/pkg/utl"
"github.com/portapps/portapps/v3/pkg/win"
)
type config struct {
Profile string `yaml:"profile" mapstructure:"profile"`
MultipleInstances bool `yaml:"multiple_instances" mapstructure:"multiple_instances"`
Locale string `yaml:"locale" mapstructure:"locale"`
Cleanup bool `yaml:"cleanup" mapstructure:"cleanup"`
}
var (
app *portapps.App
cfg *config
)
const (
defaultLocale = "en-US"
)
func init() {
var err error
// Default config
cfg = &config{
Profile: "default",
MultipleInstances: false,
Locale: defaultLocale,
Cleanup: false,
}
// Init app
if app, err = portapps.NewWithCfg("phyrox-portable", "Phyrox", cfg); err != nil {
log.Fatal().Err(err).Msg("Cannot initialize application. See log file for more info.")
}
}
func main() {
utl.CreateFolder(app.DataPath)
profileFolder := utl.CreateFolder(app.DataPath, "profile", cfg.Profile)
app.Process = utl.PathJoin(app.AppPath, "firefox.exe")
app.Args = []string{
"--profile",
profileFolder,
}
// Set env vars
crashreporterFolder := utl.CreateFolder(app.DataPath, "crashreporter")
pluginsFolder := utl.CreateFolder(app.DataPath, "plugins")
os.Setenv("MOZ_CRASHREPORTER", "0")
os.Setenv("MOZ_CRASHREPORTER_DATA_DIRECTORY", crashreporterFolder)
os.Setenv("MOZ_CRASHREPORTER_DISABLE", "1")
os.Setenv("MOZ_CRASHREPORTER_NO_REPORT", "1")
os.Setenv("MOZ_DATA_REPORTING", "0")
os.Setenv("MOZ_MAINTENANCE_SERVICE", "0")
os.Setenv("MOZ_PLUGIN_PATH", pluginsFolder)
os.Setenv("MOZ_UPDATER", "0")
// Create and check mutex
mu, err := mutex.Create(app.ID)
defer mutex.Release(mu)
if err != nil {
if !cfg.MultipleInstances {
log.Error().Msg("You have to enable multiple instances in your configuration if you want to launch another instance")
if _, err = win.MsgBox(
fmt.Sprintf("%s portable", app.Name),
"Other instance detected. You have to enable multiple instances in your configuration if you want to launch another instance.",
win.MsgBoxBtnOk|win.MsgBoxIconError); err != nil {
log.Error().Err(err).Msg("Cannot create dialog box")
}
return
} else {
log.Warn().Msg("Another instance is already running")
}
}
// Cleanup on exit
if cfg.Cleanup {
defer func() {
utl.Cleanup([]string{
path.Join(os.Getenv("APPDATA"), "Mozilla", "Firefox"),
path.Join(os.Getenv("LOCALAPPDATA"), "Mozilla", "Firefox"),
path.Join(os.Getenv("USERPROFILE"), "AppData", "LocalLow", "Mozilla"),
})
}()
}
// Locale
locale, err := checkLocale()
if err != nil {
log.Error().Err(err).Msg("Cannot set locale")
}
// Multiple instances
if cfg.MultipleInstances {
log.Info().Msg("Multiple instances enabled")
app.Args = append(app.Args, "--no-remote")
}
// Policies
if err := createPolicies(); err != nil {
log.Fatal().Err(err).Msg("Cannot create policies")
}
// Autoconfig
prefFolder := utl.CreateFolder(app.AppPath, "defaults/pref")
autoconfig := utl.PathJoin(prefFolder, "autoconfig.js")
if err := utl.CreateFile(autoconfig, `//
pref("general.config.filename", "portapps.cfg");
pref("general.config.obscure_value", 0);`); err != nil {
log.Fatal().Err(err).Msg("Cannot write autoconfig.js")
}
// Mozilla cfg
mozillaCfgPath := utl.PathJoin(app.AppPath, "portapps.cfg")
mozillaCfgFile, err := os.Create(mozillaCfgPath)
if err != nil {
log.Fatal().Err(err).Msg("Cannot create portapps.cfg")
}
mozillaCfgData := struct {
Locale string
}{
locale,
}
mozillaCfgTpl := template.Must(template.New("mozillaCfg").Parse(`// Set locale
pref("intl.locale.requested", "{{ .Locale }}");
// Extensions scopes
lockPref("extensions.enabledScopes", 4);
lockPref("extensions.autoDisableScopes", 3);
// Don't show 'know your rights' on first run
pref("browser.rights.3.shown", true);
// Don't show WhatsNew on first run after every update
pref("browser.startup.homepage_override.mstone", "ignore");
`))
if err := mozillaCfgTpl.Execute(mozillaCfgFile, mozillaCfgData); err != nil {
log.Fatal().Err(err).Msg("Cannot write portapps.cfg")
}
// Fix extensions path
if err := updateAddonStartup(profileFolder); err != nil {
log.Error().Err(err).Msg("Cannot fix extensions path")
}
// Copy default shortcut
shortcutPath := path.Join(os.Getenv("APPDATA"), "Microsoft", "Windows", "Start Menu", "Programs", "Phyrox Portable.lnk")
defaultShortcut, err := assets.Asset("Firefox.lnk")
if err != nil {
log.Error().Err(err).Msg("Cannot load asset Firefox.lnk")
}
err = os.WriteFile(shortcutPath, defaultShortcut, 0644)
if err != nil {
log.Error().Err(err).Msg("Cannot write default shortcut")
}
// Update default shortcut
err = shortcut.Create(shortcut.Shortcut{
ShortcutPath: shortcutPath,
TargetPath: app.Process,
Arguments: shortcut.Property{Clear: true},
Description: shortcut.Property{Value: "Phyrox Portable by Portapps"},
IconLocation: shortcut.Property{Value: app.Process},
WorkingDirectory: shortcut.Property{Value: app.AppPath},
})
if err != nil {
log.Error().Err(err).Msg("Cannot create shortcut")
}
defer func() {
if err := os.Remove(shortcutPath); err != nil {
log.Error().Err(err).Msg("Cannot remove shortcut")
}
}()
defer app.Close()
app.Launch(os.Args[1:])
}
func createPolicies() error {
appFile := utl.PathJoin(utl.CreateFolder(app.AppPath, "distribution"), "policies.json")
dataFile := utl.PathJoin(app.DataPath, "policies.json")
defaultPolicies := struct {
Policies map[string]interface{} `json:"policies"`
}{
Policies: map[string]interface{}{
"DisableAppUpdate": true,
"DontCheckDefaultBrowser": true,
},
}
jsonPolicies, err := gabs.Consume(defaultPolicies)
if err != nil {
return errors.Wrap(err, "Cannot consume default policies")
}
log.Debug().Msgf("Default policies: %s", jsonPolicies.String())
if utl.Exists(dataFile) {
rawCustomPolicies, err := os.ReadFile(dataFile)
if err != nil {
return errors.Wrap(err, "Cannot read custom policies")
}
jsonPolicies, err = gabs.ParseJSON(rawCustomPolicies)
if err != nil {
return errors.Wrap(err, "Cannot consume custom policies")
}
log.Debug().Msgf("Custom policies: %s", jsonPolicies.String())
jsonPolicies.Set(true, "policies", "DisableAppUpdate")
jsonPolicies.Set(true, "policies", "DontCheckDefaultBrowser")
}
log.Debug().Msgf("Applied policies: %s", jsonPolicies.String())
err = os.WriteFile(appFile, []byte(jsonPolicies.StringIndent("", " ")), 0644)
if err != nil {
return errors.Wrap(err, "Cannot write policies")
}
return nil
}
func checkLocale() (string, error) {
extSourceFile := fmt.Sprintf("%s.xpi", cfg.Locale)
extDestFile := fmt.Sprintf("langpack-%[email protected]", cfg.Locale)
extsFolder := utl.CreateFolder(app.AppPath, "distribution", "extensions")
localeXpi := utl.PathJoin(app.AppPath, "langs", extSourceFile)
// If default locale skip (already embedded)
if cfg.Locale == defaultLocale {
return cfg.Locale, nil
}
// Check .xpi file exists
if !utl.Exists(localeXpi) {
return defaultLocale, fmt.Errorf("XPI file does not exist in %s", localeXpi)
}
// Copy .xpi
if err := utl.CopyFile(localeXpi, utl.PathJoin(extsFolder, extDestFile)); err != nil {
return defaultLocale, err
}
return cfg.Locale, nil
}
func updateAddonStartup(profileFolder string) error {
lz4File := path.Join(profileFolder, "addonStartup.json.lz4")
if !utl.Exists(lz4File) || app.Prev.RootPath == "" {
return nil
}
lz4Raw, err := mozLz4Decompress(lz4File)
if err != nil {
return err
}
prevPathLin := strings.Replace(utl.FormatUnixPath(app.Prev.RootPath), ` `, `%20`, -1)
currPathLin := strings.Replace(utl.FormatUnixPath(app.RootPath), ` `, `%20`, -1)
lz4Str := strings.Replace(string(lz4Raw), prevPathLin, currPathLin, -1)
prevPathWin := strings.Replace(strings.Replace(utl.FormatWindowsPath(app.Prev.RootPath), `\`, `\\`, -1), ` `, `%20`, -1)
currPathWin := strings.Replace(strings.Replace(utl.FormatWindowsPath(app.RootPath), `\`, `\\`, -1), ` `, `%20`, -1)
lz4Str = strings.Replace(lz4Str, prevPathWin, currPathWin, -1)
lz4Enc, err := mozLz4Compress([]byte(lz4Str))
if err != nil {
return err
}
return os.WriteFile(lz4File, lz4Enc, 0644)
}
| [
"\"APPDATA\"",
"\"LOCALAPPDATA\"",
"\"USERPROFILE\"",
"\"APPDATA\""
]
| []
| [
"APPDATA",
"USERPROFILE",
"LOCALAPPDATA"
]
| [] | ["APPDATA", "USERPROFILE", "LOCALAPPDATA"] | go | 3 | 0 | |
examples/user/sendMagicLink/sendAMagicLink/main.go | package main
import (
"fmt"
"os"
"go.m3o.com/user"
)
// Login using email only - Passwordless
func main() {
userService := user.NewUserService(os.Getenv("M3O_API_TOKEN"))
rsp, err := userService.SendMagicLink(&user.SendMagicLinkRequest{
Address: "www.example.com",
Email: "[email protected]",
Endpoint: "verifytoken",
FromName: "Awesome Dot Com",
Subject: "MagicLink to access your account",
TextContent: `Hi there,
Click here to access your account $micro_verification_link`,
})
fmt.Println(rsp, err)
}
| [
"\"M3O_API_TOKEN\""
]
| []
| [
"M3O_API_TOKEN"
]
| [] | ["M3O_API_TOKEN"] | go | 1 | 0 | |
Code/log/run_for_test.py | # -*- coding:utf-8 -*-
import time
import argparse
import pickle
import sys, os
import random
import json
import torch
sys.path.append(os.getcwd().replace("src/dialogue_system/run",""))
from src.dialogue_system.agent import AgentRandom
from src.dialogue_system.agent import AgentDQN
from src.dialogue_system.agent import AgentRule
from src.dialogue_system.agent import AgentHRL
from src.dialogue_system.agent import AgentWithGoalJoint
from src.dialogue_system.agent import AgentWithGoal
from src.dialogue_system.agent.agent_with_goal_2 import AgentWithGoal as AgentWithGoal2
from src.dialogue_system.run.utils import verify_params
from src.dialogue_system.utils.utils import get_dir_list
from src.dialogue_system.utils.IOHandler import FileIO
from src.dialogue_system.run import RunningSteward
def boolean_string(s):
if s not in {'False', 'True'}:
raise ValueError('Not a valid boolean string')
if s.lower() == 'true':
return True
else:
return False
disease_number = 4
parser = argparse.ArgumentParser()
parser.add_argument("--disease_number", dest="disease_number", type=int,default=disease_number,help="the number of disease.")
# simulation configuration
parser.add_argument("--simulate_epoch_number", dest="simulate_epoch_number", type=int, default=1, help="The number of simulate epoch.")
parser.add_argument("--simulation_size", dest="simulation_size", type=int, default=100, help="The number of simulated sessions in each simulated epoch.")
parser.add_argument("--evaluate_session_number", dest="evaluate_session_number", type=int, default=1000, help="the size of each simulate epoch when evaluation.")
parser.add_argument("--experience_replay_pool_size", dest="experience_replay_pool_size", type=int, default=10000, help="the size of experience replay.")
parser.add_argument("--hidden_size_dqn", dest="hidden_size_dqn", type=int, default=100, help="the hidden_size of DQN.")
parser.add_argument("--warm_start", dest="warm_start",type=boolean_string, default=False, help="Filling the replay buffer with the experiences of rule-based agents. {True, False}")
parser.add_argument("--warm_start_epoch_number", dest="warm_start_epoch_number", type=int, default=30, help="the number of epoch of warm starting.")
parser.add_argument("--batch_size", dest="batch_size", type=int, default=30, help="the batch size when training.")
parser.add_argument("--log_dir", dest="log_dir", type=str, default="./../../../log/", help="directory where event file of training will be written, ending with /")
parser.add_argument("--epsilon", dest="epsilon", type=float, default=0.1, help="The greedy probability of DQN")
parser.add_argument("--gamma", dest="gamma", type=float, default=0.95, help="The discount factor of immediate reward in RL.")
parser.add_argument("--gamma_worker", dest="gamma_worker", type=float, default=0.95, help="The discount factor of immediate reward of the lower agent in HRL.")
parser.add_argument("--train_mode", dest="train_mode", type=boolean_string, default=False, help="Runing this code in training mode? [True, False]")
# Save model, performance and dialogue content ? And what is the path if yes?
parser.add_argument("--save_performance",dest="save_performance", type=boolean_string, default=False, help="save the performance? [True, False]")
parser.add_argument("--save_model", dest="save_model", type=boolean_string, default=False,help="Save model during training? [True, False]")
parser.add_argument("--saved_model", dest="saved_model", type=str, default="./../../model/DQN/checkpoint/0220173244_AgentWithGoal_T22_lr0.0001_RFS44_RFF-22_RFNCY-1_RFIRS-1_mls0_gamma0.95_gammaW0.95_epsilon0.1_awd0_crs0_hwg0_wc0_var0_sdai0_wfrs0.0_dtft1_dataReal_World_RID3_DQN/model_d4_agentAgentWithGoal_s0.993_r41.658_t6.799_wd0.0_e-822.pkl")
parser.add_argument("--save_dialogue", dest="save_dialogue", type=boolean_string, default=False, help="Save the dialogue? [True, False]")
parser.add_argument("--dialogue_file", dest="dialogue_file", type=str, default="./../../data/dialogue_output/dialogue_file.txt", help="the file that used to save dialogue content.")
parser.add_argument("--run_id", dest='run_id', type=int, default=0, help='the id of this running.')
# user configuration.
parser.add_argument("--allow_wrong_disease", dest="allow_wrong_disease", type=boolean_string, default=False, help="Allow the agent to inform wrong disease? 0220173244_AgentWithGoal_T22_lr0.0001_RFS44_RFF-22_RFNCY-1_RFIRS-1_mls0_gamma0.95_gammaW0.95_epsilon0.1_awd0_crs0_hwg0_wc0_var0_sdai0_wfrs0.0_dtft1_dataReal_World_RID3_DQN:Yes, 0:No")
# Learning rate for dqn.
parser.add_argument("--dqn_learning_rate", dest="dqn_learning_rate", type=float, default=0.0001, help="the learning rate of dqn.")
# the number condition of explicit symptoms and implicit symptoms in each user goal.
parser.add_argument("--explicit_number", dest="explicit_number", type=int, default=0, help="the number of explicit symptoms of used sample")
# parser.add_argument("--implicit_number", dest="implicit_number", type=int, default=0220173244_AgentWithGoal_T22_lr0.0001_RFS44_RFF-22_RFNCY-1_RFIRS-1_mls0_gamma0.95_gammaW0.95_epsilon0.1_awd0_crs0_hwg0_wc0_var0_sdai0_wfrs0.0_dtft1_dataReal_World_RID3_DQN, help="the number of implicit symptoms of used sample")
parser.add_argument("--implicit_number", dest="implicit_number", type=int, default=0, help="the number of implicit symptoms of used sample")
# agent to use.
# parser.add_argument("--agent_id", dest="agent_id", type=str, default='AgentDQN', help="The agent to be used:[AgentRule, AgentDQN, AgentRandom, AgentHRL, AgentHRLGoal]")
parser.add_argument("--agent_id", dest="agent_id", type=str, default='AgentWithGoal3', help="The agent to be used:[AgentRule, AgentDQN, AgentRandom, AgentHRL, AgentHRLGoal]")
# goal set, slot set, action set.
max_turn = 22
parser.add_argument("--action_set", dest="action_set", type=str, default='./../../data/real_world/action_set.p',help='path and filename of the action set')
parser.add_argument("--slot_set", dest="slot_set", type=str, default='./../../data/real_world/slot_set.p',help='path and filename of the slots set')
parser.add_argument("--goal_set", dest="goal_set", type=str, default='./../../data/real_world/goal_set.p',help='path and filename of user goal')
parser.add_argument("--disease_symptom", dest="disease_symptom", type=str,default="./../../data/real_world/disease_symptom.p",help="path and filename of the disease_symptom file")
parser.add_argument("--max_turn", dest="max_turn", type=int, default=max_turn, help="the max turn in one episode.")
parser.add_argument("--input_size_dqn", dest="input_size_dqn", type=int, default=max_turn + 477, help="the input_size of DQN.")
# parser.add_argument("--input_size_dqn", dest="input_size_dqn", type=int, default=2438, help="the input_size of DQN.")
parser.add_argument("--reward_for_not_come_yet", dest="reward_for_not_come_yet", type=float,default=-1)
parser.add_argument("--reward_for_success", dest="reward_for_success", type=float,default=2*max_turn)
parser.add_argument("--reward_for_fail", dest="reward_for_fail", type=float,default=-1*max_turn)
parser.add_argument("--reward_for_inform_right_symptom", dest="reward_for_inform_right_symptom", type=float,default=-1)
parser.add_argument("--minus_left_slots", dest="minus_left_slots", type=boolean_string, default=False,help="Success reward minus the number of left slots as the final reward for a successful session.{True, False}")
parser.add_argument("--gpu", dest="gpu", type=str, default="0",help="The id of GPU on the running machine.")
parser.add_argument("--check_related_symptoms", dest="check_related_symptoms", type=boolean_string, default=False, help="Check the realted symptoms if the dialogue is success? True:Yes, False:No")
parser.add_argument("--dqn_type", dest="dqn_type", default="DQN", type=str, help="[DQN, DoubleDQN")
# noisy channel
parser.add_argument("--noisy_channel", dest="noisy_channel", type=boolean_string, default=False, help="noisy channel for user action?")
parser.add_argument("--error_prob", dest="error_prob", type=float, default=0.05, help="Error probability when applying noisy channel?")
# HRL with goal
parser.add_argument("--temperature", dest="temperature", type=float, default=1.0, help="the temperature in gumbel-softmax")
parser.add_argument("--hrl_with_goal", dest="hrl_with_goal", type=boolean_string, default=False, help="Using hierarchical RL with goal?")
parser.add_argument("--weight_correction", dest="weight_correction", type=boolean_string, default=False, help="weight corrention for the master agent in HRL? {True, False}")
parser.add_argument("--value_as_reward", dest="value_as_reward", type=boolean_string, default=False, help="The state value of lower agent is the reward for the higher agent? {True, False}")
parser.add_argument("--symptom_dist_as_input", dest="symptom_dist_as_input", type=boolean_string, default=False, help="The distribution over symptoms of each disease is taken as input to the lower agent? {True, False}")
parser.add_argument("--disease_tag_for_terminating", dest="disease_tag_for_terminating", type=boolean_string, default=True, help="using the disease tag for inform disease ? {True, False}")
parser.add_argument("--upper_bound_critic", dest="upper_bound_critic", type=float, default=0.97, help="The upper bound for terminating the current sub-task.")
parser.add_argument("--lower_bound_critic", dest="lower_bound_critic", type=float, default=1e-1, help="The upper bound for terminating the current sub-task.")
# reward shapping
parser.add_argument("--weight_for_reward_shaping", dest='weight_for_reward_shaping', type=float, default=0.0, help="weight for reward shaping. 0 means no reward shaping.")
parser.add_argument("--is_relational_dqn", dest='is_relational_dqn', type=boolean_string, default=False, help="Using relational DQN? {True, False}")
args = parser.parse_args()
parameter = vars(args)
def run(parameter):
"""
The entry function of this code.
Args:
parameter: the super-parameter
"""
print(json.dumps(parameter, indent=2))
time.sleep(2)
slot_set = pickle.load(file=open(parameter["slot_set"], "rb"))
action_set = pickle.load(file=open(parameter["action_set"], "rb"))
disease_symptom = pickle.load(file=open(parameter["disease_symptom"], "rb"))
steward = RunningSteward(parameter=parameter,checkpoint_path=parameter["checkpoint_path"])
print('action_set', action_set)
warm_start = parameter.get("warm_start")
warm_start_epoch_number = parameter.get("warm_start_epoch_number")
train_mode = parameter.get("train_mode")
agent_id = parameter.get("agent_id")
simulate_epoch_number = parameter.get("simulate_epoch_number")
# Warm start.
if warm_start == True and train_mode == True:
print("warm starting...")
agent = AgentRule(action_set=action_set,slot_set=slot_set,disease_symptom=disease_symptom,parameter=parameter)
steward.dialogue_manager.set_agent(agent=agent)
steward.warm_start(epoch_number=warm_start_epoch_number)
# exit()
if agent_id.lower() == 'agentdqn':
agent = AgentDQN(action_set=action_set,slot_set=slot_set,disease_symptom=disease_symptom,parameter=parameter)
elif agent_id.lower() == 'agentrandom':
agent = AgentRandom(action_set=action_set,slot_set=slot_set,disease_symptom=disease_symptom,parameter=parameter)
elif agent_id.lower() == 'agentrule':
agent = AgentRule(action_set=action_set,slot_set=slot_set,disease_symptom=disease_symptom,parameter=parameter)
elif agent_id.lower() == 'agenthrl':
agent = AgentHRL(action_set=action_set, slot_set=slot_set, disease_symptom=disease_symptom, parameter=parameter)
elif agent_id.lower() == 'agentwithgoaljoint':
agent = AgentWithGoalJoint(action_set=action_set, slot_set=slot_set, disease_symptom=disease_symptom, parameter=parameter)
elif agent_id.lower() == 'agentwithgoal':
agent = AgentWithGoal(action_set=action_set, slot_set=slot_set, disease_symptom=disease_symptom, parameter=parameter)
elif agent_id.lower() == 'agentwithgoal2':
agent = AgentWithGoal2(action_set=action_set, slot_set=slot_set, disease_symptom=disease_symptom, parameter=parameter)
elif agent_id.lower() == 'agentwithgoal3':
from src.dialogue_system.agent.agent_with_goal_3 import AgentWithGoal as AgentWithGoal3
agent = AgentWithGoal3(action_set=action_set, slot_set=slot_set, disease_symptom=disease_symptom,
parameter=parameter)
else:
raise ValueError('Agent id should be one of [AgentRule, AgentDQN, AgentRandom, AgentHRL, AgentWithGoal, AgentWithGoal2, AgentWithGoalJoint].')
steward.dialogue_manager.set_agent(agent=agent)
if train_mode is True: # Train
steward.simulate(epoch_number=simulate_epoch_number, train_mode=train_mode)
else: # test
for index in range(simulate_epoch_number):
res = steward.evaluate_model(dataset='test', index=index)
return res
def list_to_dict(model_name_list):
model_name_dict = {}
for name in model_name_list:
index = name.split('-')[-1].split('.pkl')[0]
model_name_dict[int(index)] = name
return model_name_dict
if __name__ == "__main__":
params = verify_params(parameter)
gpu_str = params["gpu"]
os.environ['CUDA_VISIBLE_DEVICES'] = gpu_str# '0,0220173244_AgentWithGoal_T22_lr0.0001_RFS44_RFF-22_RFNCY-1_RFIRS-1_mls0_gamma0.95_gammaW0.95_epsilon0.1_awd0_crs0_hwg0_wc0_var0_sdai0_wfrs0.0_dtft1_dataReal_World_RID3_DQN,2'
torch.cuda.manual_seed(12345)
torch.manual_seed(12345)
checkpoint_path = '../../model/DQN/checkpoint'
agent_id = 'AgentDQN'
date_time_list = []
result_file = 'test_result.txt'
FileIO.writeToFile('\n\n' + '**'*30, result_file)
FileIO.writeToFile(agent_id, result_file)
FileIO.writeToFile('**'*30 + '\n' , result_file)
params['agent_id'] = agent_id
for date_time in date_time_list:
key_list = [agent_id, date_time]
run_info = get_dir_list(checkpoint_path, key_word_list=key_list)[0]
model_path = os.path.join(checkpoint_path, run_info)
model_name_list = get_dir_list(model_path, key_word_list=[agent_id, ".pkl"])
model_name_dict = list_to_dict(model_name_list)
# print(model_name_dict)
FileIO.writeToFile(run_info, result_file)
FileIO.writeToFile('**' * 30 + '\n', result_file)
params['run_info'] = run_info
for epoch_index in sorted(model_name_dict.keys()):
model_name = model_name_dict[epoch_index]
params['saved_model'] = os.path.join(model_path,model_name)
print(params['run_info'])
result = run(parameter=parameter)
print(result_file)
FileIO.writeToFile(str(epoch_index)+ " " + json.dumps(result), result_file)
| []
| []
| [
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
cmd/server/wire_gen.go | // Code generated by Wire. DO NOT EDIT.
//go:generate go run github.com/google/wire/cmd/wire
//go:build !wireinject
// +build !wireinject
package main
import (
"context"
"github.com/thebartekbanach/imcaxy/pkg/cache"
"github.com/thebartekbanach/imcaxy/pkg/cache/repositories"
"github.com/thebartekbanach/imcaxy/pkg/cache/repositories/connections"
"github.com/thebartekbanach/imcaxy/pkg/filefetcher"
"github.com/thebartekbanach/imcaxy/pkg/hub"
"github.com/thebartekbanach/imcaxy/pkg/hub/storage"
"github.com/thebartekbanach/imcaxy/pkg/processor"
"github.com/thebartekbanach/imcaxy/pkg/processor/imaginary"
"github.com/thebartekbanach/imcaxy/pkg/proxy"
"log"
"net/url"
"os"
"strings"
"time"
)
// Injectors from wire.go:
func InitializeCache(ctx context.Context) cache.CacheService {
cacheDBConfig := InitializeMongoConnectionConfig()
cacheDBConnection := InitializeMongoConnection(ctx, cacheDBConfig)
cachedImagesRepository := cacherepositories.NewCachedImagesRepository(cacheDBConnection)
minioBlockStorageProductionConnectionConfig := InitializeMinioConnectionConfig()
minioBlockStorageConnection := InitializeMinioConnection(ctx, minioBlockStorageProductionConnectionConfig)
cachedImagesStorage := cacherepositories.NewCachedImagesStorage(minioBlockStorageConnection)
cacheService := cache.NewCacheService(cachedImagesRepository, cachedImagesStorage)
return cacheService
}
func InitializeInvalidator(ctx context.Context, cacheService cache.CacheService) cache.InvalidationService {
cacheDBConfig := InitializeMongoConnectionConfig()
cacheDBConnection := InitializeMongoConnection(ctx, cacheDBConfig)
invalidationsRepository := cacherepositories.NewInvalidationsRepository(cacheDBConnection)
invalidationService := cache.NewInvalidationService(invalidationsRepository, cacheService)
return invalidationService
}
func InitializeProxy(ctx context.Context, cache2 cache.CacheService) proxy.ProxyService {
processor := InitializeImaginaryProcessingService()
proxyServiceConfig := InitializeProxyConfig(processor)
storageAdapter := datahubstorage.NewStorage()
dataHub := InitializeDataHub(ctx, storageAdapter)
fetcher := filefetcher.NewDataHubFetcher()
proxyService := proxy.NewProxyService(proxyServiceConfig, cache2, dataHub, fetcher)
return proxyService
}
// wire.go:
func InitializeMongoConnectionConfig() dbconnections.CacheDBConfig {
config := dbconnections.CacheDBConfig{
ConnectionString: os.Getenv("IMCAXY_MONGO_CONNECTION_STRING"),
}
if config.ConnectionString == "" {
log.Panic("IMCAXY_MONGO_CONNECTION_STRING is required environment variable")
}
parsedConnectionString, err := url.Parse(config.ConnectionString)
if err != nil {
log.Panicf("Error ocurred when parsing IMCAXY_MONGO_CONNECTION_STRING: %s", err)
}
if parsedConnectionString.User == nil {
log.Panicf("IMCAXY_MONGO_CONNECTION_STRING must contain credentials")
}
return config
}
func InitializeMongoConnection(ctx context.Context, mongoConfig dbconnections.CacheDBConfig) dbconnections.CacheDBConnection {
ctx, cancel := context.WithTimeout(ctx, time.Minute)
defer cancel()
cacheDbConnection, err := dbconnections.NewCacheDBProductionConnection(ctx, mongoConfig)
if err != nil {
log.Panicf("Error ocurred when initializing MongoDB connection: %s", err)
}
return cacheDbConnection
}
func InitializeMinioConnectionConfig() dbconnections.MinioBlockStorageProductionConnectionConfig {
config := dbconnections.MinioBlockStorageProductionConnectionConfig{
Endpoint: os.Getenv("IMCAXY_MINIO_ENDPOINT"),
AccessKey: os.Getenv("IMCAXY_MINIO_ACCESS_KEY"),
SecretKey: os.Getenv("IMCAXY_MINIO_SECRET_KEY"),
Location: os.Getenv("IMCAXY_MINIO_LOCATION"),
Bucket: os.Getenv("IMCAXY_MINIO_BUCKET"),
UseSSL: os.Getenv("IMCAXY_MINIO_SSL") == "true",
}
if config.Endpoint == "" {
log.Panic("IMCAXY_MINIO_ENDPOINT is required environment variable")
}
if _, err := url.Parse(config.Endpoint); err != nil {
log.Panicf("Error ocurred when parsing IMCAXY_MINIO_ENDPOINT: %s", err)
}
if config.AccessKey == "" {
log.Panic("IMCAXY_MINIO_ACCESS_KEY is required environment variable")
}
if config.SecretKey == "" {
log.Panic("IMCAXY_MINIO_SECRET_KEY is required environment variable")
}
if config.Location == "" {
config.Location = "us-east-1"
}
if config.Bucket == "" {
log.Panic("IMCAXY_MINIO_BUCKET is required environment variable")
}
return config
}
func InitializeMinioConnection(ctx context.Context, minioConfig dbconnections.MinioBlockStorageProductionConnectionConfig) dbconnections.MinioBlockStorageConnection {
ctx, cancel := context.WithTimeout(ctx, time.Minute)
defer cancel()
minioBlockStorageConnection, err := dbconnections.NewMinioBlockStorageProductionConnection(ctx, minioConfig)
if err != nil {
log.Panicf("Error ocurred when initializing Minio connection: %s", err)
}
return &minioBlockStorageConnection
}
func InitializeImaginaryProcessingService() imaginaryprocessor.Processor {
config := imaginaryprocessor.Config{
ImaginaryServiceURL: os.Getenv("IMCAXY_IMAGINARY_SERVICE_URL"),
}
if config.ImaginaryServiceURL == "" {
log.Panic("IMCAXY_IMAGINARY_SERVICE_URL is required environment variable")
}
if _, err := url.Parse(config.ImaginaryServiceURL); err != nil {
log.Panicf("Error ocurred when parsing IMCAXY_IMAGINARY_SERVICE_URL: %s", err)
}
return imaginaryprocessor.NewProcessor(config)
}
func InitializeDataHub(ctx context.Context, storage datahubstorage.StorageAdapter) hub.DataHub {
dataHub := hub.NewDataHub(storage)
dataHub.StartMonitors(ctx)
return dataHub
}
func InitializeProxyConfig(imaginaryProcessingService imaginaryprocessor.Processor) proxy.ProxyServiceConfig {
config := proxy.ProxyServiceConfig{
Processors: map[string]processor.ProcessingService{
"imaginary": &imaginaryProcessingService,
},
AllowedDomains: strings.Split(os.Getenv("IMCAXY_ALLOWED_DOMAINS"), ","),
AllowedOrigins: strings.Split(os.Getenv("IMCAXY_ALLOWED_ORIGINS"), ","),
}
if len(config.AllowedDomains) == 0 || config.AllowedDomains[0] == "" && len(config.AllowedDomains) == 1 {
config.AllowedDomains = []string{"*"}
}
if len(config.AllowedOrigins) == 0 || config.AllowedOrigins[0] == "" && len(config.AllowedOrigins) == 1 {
config.AllowedOrigins = []string{"*"}
}
return config
}
| [
"\"IMCAXY_MONGO_CONNECTION_STRING\"",
"\"IMCAXY_MINIO_ENDPOINT\"",
"\"IMCAXY_MINIO_ACCESS_KEY\"",
"\"IMCAXY_MINIO_SECRET_KEY\"",
"\"IMCAXY_MINIO_LOCATION\"",
"\"IMCAXY_MINIO_BUCKET\"",
"\"IMCAXY_MINIO_SSL\"",
"\"IMCAXY_IMAGINARY_SERVICE_URL\"",
"\"IMCAXY_ALLOWED_DOMAINS\"",
"\"IMCAXY_ALLOWED_ORIGINS\""
]
| []
| [
"IMCAXY_MINIO_LOCATION",
"IMCAXY_MINIO_SSL",
"IMCAXY_MINIO_ACCESS_KEY",
"IMCAXY_MINIO_BUCKET",
"IMCAXY_ALLOWED_DOMAINS",
"IMCAXY_MINIO_ENDPOINT",
"IMCAXY_ALLOWED_ORIGINS",
"IMCAXY_MONGO_CONNECTION_STRING",
"IMCAXY_IMAGINARY_SERVICE_URL",
"IMCAXY_MINIO_SECRET_KEY"
]
| [] | ["IMCAXY_MINIO_LOCATION", "IMCAXY_MINIO_SSL", "IMCAXY_MINIO_ACCESS_KEY", "IMCAXY_MINIO_BUCKET", "IMCAXY_ALLOWED_DOMAINS", "IMCAXY_MINIO_ENDPOINT", "IMCAXY_ALLOWED_ORIGINS", "IMCAXY_MONGO_CONNECTION_STRING", "IMCAXY_IMAGINARY_SERVICE_URL", "IMCAXY_MINIO_SECRET_KEY"] | go | 10 | 0 | |
cmd/integration.go | package cmd
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"net/http/httptest"
"os"
"strconv"
"time"
"github.com/google/go-github/github"
"github.com/google/uuid"
"github.com/dollarshaveclub/acyl/pkg/ghapp"
"github.com/dollarshaveclub/acyl/pkg/config"
"github.com/dollarshaveclub/acyl/pkg/ghclient"
"github.com/dollarshaveclub/acyl/pkg/ghevent"
"github.com/dollarshaveclub/acyl/pkg/locker"
"github.com/dollarshaveclub/acyl/pkg/models"
"github.com/dollarshaveclub/acyl/pkg/namegen"
nitroenv "github.com/dollarshaveclub/acyl/pkg/nitro/env"
"github.com/dollarshaveclub/acyl/pkg/nitro/images"
"github.com/dollarshaveclub/acyl/pkg/nitro/meta"
"github.com/dollarshaveclub/acyl/pkg/nitro/metahelm"
"github.com/dollarshaveclub/acyl/pkg/nitro/metrics"
"github.com/dollarshaveclub/acyl/pkg/nitro/notifier"
"github.com/dollarshaveclub/acyl/pkg/persistence"
"github.com/dollarshaveclub/acyl/pkg/spawner"
"github.com/pkg/errors"
"github.com/spf13/cobra"
"golang.org/x/sync/errgroup"
"gopkg.in/src-d/go-billy.v4/osfs"
)
type integrationConfig struct {
dataFile, webhookFile, githubToken string
appHookSecret string
PrivateKeyPEM string
appIDstr string
}
var integrationcfg integrationConfig
// integrationCmd represents the integration command
var integrationCmd = &cobra.Command{
Use: "integration",
Short: "Run a set of integration tests",
Long: `Intended to be executed as a Kubernetes Job. Runs creation, update and deletion tests using fake implementations of the database, notifier and Furan.
Uses a mocked GitHub webhook payload. The referenced repository must exist, as well as acyl.yml and dependencies. All referenced images and tags must exist.
Must be run under a k8s service account with the ClusterAdmin role.`,
Run: integration,
}
func init() {
integrationCmd.Flags().StringVar(&integrationcfg.dataFile, "data-file", "testdata/integration/data.json", "path to JSON data file")
integrationCmd.Flags().StringVar(&integrationcfg.webhookFile, "webhook-file", "testdata/integration/webhook.json", "path to JSON webhook file")
integrationCmd.Flags().StringVar(&integrationcfg.githubToken, "github-token", os.Getenv("GITHUB_TOKEN"), "GitHub access token")
integrationCmd.Flags().StringVar(&integrationcfg.appIDstr, "github-app-id", os.Getenv("GITHUB_APP_ID"), "GitHub App ID")
integrationCmd.Flags().StringVar(&integrationcfg.PrivateKeyPEM, "github-app-private-key", os.Getenv("GITHUB_APP_PRIVATE_KEY"), "GitHub App private key")
integrationCmd.Flags().StringVar(&integrationcfg.appHookSecret, "github-app-hook-secret", os.Getenv("GITHUB_APP_HOOK_SECRET"), "GitHub App webhook secret")
RootCmd.AddCommand(integrationCmd)
}
func integration(cmd *cobra.Command, args []string) {
setupServerLogger()
dl, err := loadData()
if err != nil {
clierr("error loading data: %v", err)
}
wm, err := loadWebhooks()
if err != nil {
clierr("error loading webhook: %v", err)
}
nmgr, rc, err := setupNitro(dl, true)
if err != nil {
clierr("error setting up Nitro: %v", err)
}
eh := setupEventHandler(rc, dl)
ctx, cf := context.WithTimeout(context.Background(), 30*time.Minute)
defer cf()
g, ctx := errgroup.WithContext(ctx)
// static github token
g.Go(func() error {
if err := createIntegrationTest(ctx, pullRequestEventToGitHubEvent(wm["create"]), eh, nmgr); err != nil {
return errors.Wrap(err, "error performing static token create integration test")
}
if err := updateIntegrationTest(ctx, pullRequestEventToGitHubEvent(wm["update"]), eh, nmgr); err != nil {
return errors.Wrap(err, "error performing static token update integration test")
}
if err := deleteIntegrationTest(ctx, pullRequestEventToGitHubEvent(wm["delete"]), eh, nmgr); err != nil {
return errors.Wrap(err, "error performing static token delete integration test")
}
return nil
})
// github app
g.Go(func() error {
//use new datastore and dependencies so this can run in parallel with static token tests
dl2, err := loadData()
if err != nil {
return errors.Wrap(err, "error loading data")
}
nmgr2, _, err := setupNitro(dl2, false)
if err != nil {
return errors.Wrap(err, "error setting up app Nitro")
}
if integrationcfg.PrivateKeyPEM == "" {
return errors.New("empty private key")
}
appid, err := strconv.Atoi(integrationcfg.appIDstr)
if err != nil || appid < 1 {
return errors.Wrap(err, "invalid app id")
}
// this is only used to create valid payload signatures
ge := ghevent.NewGitHubEventWebhook(nil, integrationcfg.appHookSecret, "", dl2)
prh := func(ctx context.Context, action string, rrd models.RepoRevisionData) error {
switch action {
case "opened":
_, err := nmgr2.Create(ctx, rrd)
return errors.Wrap(err, "error running github app create test")
case "synchronize":
_, err := nmgr2.Update(ctx, rrd)
return errors.Wrap(err, "error running github app update test")
case "closed":
err := nmgr2.Destroy(ctx, rrd, models.DestroyApiRequest)
return errors.Wrap(err, "error running github app destroy test")
default:
return fmt.Errorf("unexpected action: %v", action)
}
}
gha, err := ghapp.NewGitHubApp([]byte(integrationcfg.PrivateKeyPEM), uint(appid), integrationcfg.appHookSecret, []string{"opened", "closed", "synchronize"}, prh, dl2)
if err != nil {
return errors.Wrap(err, "error creating GitHub app")
}
runevent := func(event github.PullRequestEvent) error {
payload, err := json.Marshal(event)
if err != nil {
return errors.Wrapf(err, "error marshaling %v webhook", event.Action)
}
r := httptest.NewRequest("POST", "/ghapp/webhook", bytes.NewBuffer(payload))
r.Header.Add("X-GitHub-Delivery", uuid.Must(uuid.NewRandom()).String())
r.Header.Add("X-GitHub-Event", "pull_request")
r.Header.Add("Content-Type", "application/json")
r.Header.Add("X-Hub-Signature", ge.GenerateSignatureString(payload))
w := httptest.NewRecorder()
gha.Handler().ServeHTTP(w, r)
if w.Result().StatusCode != http.StatusAccepted {
out, _ := ioutil.ReadAll(w.Result().Body)
return fmt.Errorf("error in %v response: %v: %v", event.Action, w.Result().StatusCode, string(out))
}
return nil
}
if err := runevent(wm["create"]); err != nil {
return err
}
if err := runevent(wm["update"]); err != nil {
return err
}
if err := runevent(wm["delete"]); err != nil {
return err
}
return nil
})
if err := g.Wait(); err != nil {
clierr("error running tests: %v", err)
}
// allow the async namespace delete to finish
time.Sleep(5 * time.Second)
logger.Printf("integration tests successful")
}
// pullRequestEventToGitHubEvent marshals to JSON, then unmarshals into GitHubEvent
func pullRequestEventToGitHubEvent(pre github.PullRequestEvent) *ghevent.GitHubEvent {
out := &ghevent.GitHubEvent{}
j, err := json.Marshal(&pre)
if err != nil {
fmt.Printf("error marshalling PullRequestEvent: %v", err)
return out
}
if err := json.Unmarshal(j, out); err != nil {
fmt.Printf("error unmarshalling into GitHubEvent: %v", err)
}
return out
}
func createIntegrationTest(ctx context.Context, e *ghevent.GitHubEvent, eh *ghevent.GitHubEventWebhook, nmgr spawner.EnvironmentSpawner) error {
d, err := json.Marshal(e)
if err != nil {
return errors.Wrap(err, "error marshaling event")
}
wh, err := eh.New(d, uuid.Must(uuid.NewRandom()), eh.GenerateSignatureString(d))
action := wh.Action
rdd := wh.RRD
if err != nil || rdd == nil {
return errors.Wrap(err, "error processing event")
}
if action != ghevent.CreateNew {
return fmt.Errorf("unexpected event action (wanted CreateNew): %v", action.String())
}
name, err := nmgr.Create(ctx, *rdd)
if err != nil {
return errors.Wrap(err, "error creating environment")
}
logger.Printf("environment created: %v", name)
return nil
}
func updateIntegrationTest(ctx context.Context, e *ghevent.GitHubEvent, eh *ghevent.GitHubEventWebhook, nmgr spawner.EnvironmentSpawner) error {
d, err := json.Marshal(e)
if err != nil {
return errors.Wrap(err, "error marshaling event")
}
wh, err := eh.New(d, uuid.Must(uuid.NewRandom()), eh.GenerateSignatureString(d))
action := wh.Action
rdd := wh.RRD
if err != nil || rdd == nil {
return errors.Wrap(err, "error processing event")
}
if action != ghevent.Update {
return fmt.Errorf("unexpected event action (wanted Update): %v", action.String())
}
name, err := nmgr.Update(ctx, *rdd)
if err != nil {
return errors.Wrap(err, "error updating environment")
}
logger.Printf("environment updated: %v", name)
return nil
}
func deleteIntegrationTest(ctx context.Context, e *ghevent.GitHubEvent, eh *ghevent.GitHubEventWebhook, nmgr spawner.EnvironmentSpawner) error {
d, err := json.Marshal(e)
if err != nil {
return errors.Wrap(err, "error marshaling event")
}
wh, err := eh.New(d, uuid.Must(uuid.NewRandom()), eh.GenerateSignatureString(d))
action := wh.Action
rdd := wh.RRD
if err != nil || rdd == nil {
return errors.Wrap(err, "error processing event")
}
if action != ghevent.Destroy {
return fmt.Errorf("unexpected event action (wanted Destroy): %v", action.String())
}
err = nmgr.Destroy(ctx, *rdd, models.DestroyApiRequest)
if err != nil {
return errors.Wrap(err, "error destroying environment")
}
logger.Printf("environment destroyed")
return nil
}
func setupEventHandler(rc ghclient.RepoClient, dl persistence.DataLayer) *ghevent.GitHubEventWebhook {
return ghevent.NewGitHubEventWebhook(rc, "foobar", "acyl.yml", dl)
}
func setupNitro(dl persistence.DataLayer, useGHToken bool) (spawner.EnvironmentSpawner, ghclient.RepoClient, error) {
ghtkn := "invalid"
if useGHToken {
ghtkn = integrationcfg.githubToken
}
rc := ghclient.NewGitHubClient(ghtkn)
ng := &namegen.FakeNameGenerator{Unique: true}
mc := &metrics.FakeCollector{}
plf, err := locker.NewFakePreemptiveLockerFactory([]locker.LockProviderOption{
locker.WithLockTimeout(1 * time.Second),
})
if err != nil {
return nil, nil, errors.Wrap(err, "unable to create new fake preeemptive locker factory")
}
nf := func(lf func(string, ...interface{}), notifications models.Notifications, user string) notifier.Router {
sb := ¬ifier.SlackBackend{
Username: "john.doe",
API: ¬ifier.FakeSlackAPIClient{},
}
return ¬ifier.MultiRouter{Backends: []notifier.Backend{sb}}
}
fs := osfs.New("")
mg := &meta.DataGetter{RC: rc, FS: fs}
ib := &images.FakeImageBuilder{BatchCompletedFunc: func(envname, repo string) (bool, error) { return true, nil }}
ci, err := metahelm.NewChartInstaller(ib, dl, fs, mc, map[string]string{}, []string{}, map[string]config.K8sSecret{}, metahelm.TillerConfig{}, k8sClientConfig.JWTPath, false)
if err != nil {
return nil, nil, errors.Wrap(err, "error getting metahelm chart installer")
}
return &nitroenv.Manager{
NF: nf,
DL: dl,
RC: rc,
MC: mc,
NG: ng,
FS: fs,
MG: mg,
CI: ci,
PLF: plf,
}, rc, nil
}
type testData struct {
QAEnvironments []models.QAEnvironment `json:"qa_environments"`
K8sEnvironments []models.KubernetesEnvironment `json:"kubernetes_environments"`
HelmReleases []models.HelmRelease `json:"helm_releases"`
}
func loadData() (persistence.DataLayer, error) {
d, err := ioutil.ReadFile(integrationcfg.dataFile)
if err != nil {
return nil, errors.Wrap(err, "error opening data file")
}
td := testData{}
if err := json.Unmarshal(d, &td); err != nil {
return nil, errors.Wrap(err, "error unmarshaling data file")
}
return persistence.NewPopulatedFakeDataLayer(td.QAEnvironments, td.K8sEnvironments, td.HelmReleases), nil
}
type testWebhooks struct {
Create github.PullRequestEvent `json:"create"`
Update github.PullRequestEvent `json:"update"`
Delete github.PullRequestEvent `json:"delete"`
}
func loadWebhooks() (map[string]github.PullRequestEvent, error) {
d, err := ioutil.ReadFile(integrationcfg.webhookFile)
if err != nil {
return nil, errors.Wrap(err, "error opening webhook file")
}
twh := testWebhooks{}
if err := json.Unmarshal(d, &twh); err != nil {
return nil, errors.Wrap(err, "error unmarshaling webhook file")
}
out := make(map[string]github.PullRequestEvent, 3)
out["create"] = twh.Create
out["update"] = twh.Update
out["delete"] = twh.Delete
return out, nil
}
| [
"\"GITHUB_TOKEN\"",
"\"GITHUB_APP_ID\"",
"\"GITHUB_APP_PRIVATE_KEY\"",
"\"GITHUB_APP_HOOK_SECRET\""
]
| []
| [
"GITHUB_APP_PRIVATE_KEY",
"GITHUB_APP_HOOK_SECRET",
"GITHUB_TOKEN",
"GITHUB_APP_ID"
]
| [] | ["GITHUB_APP_PRIVATE_KEY", "GITHUB_APP_HOOK_SECRET", "GITHUB_TOKEN", "GITHUB_APP_ID"] | go | 4 | 0 | |
snippets/assets.py | # These code snippets are used in Azure Media Services documentation.
# DO NOT EDIT
#<AssetImports>
from dotenv import load_dotenv
from azure.identity import DefaultAzureCredential
from azure.mgmt.media import AzureMediaServices
from azure.mgmt.media.models import (Asset)
import os
#</AssetImports>
#<EnvironmentVariables>
#Get environment variables
load_dotenv()
subscriptionId = os.getenv("SUBSCRIPTIONID")
accountName=os.getenv("ACCOUNTNAME")
resourceGroupName=os.getenv("RESOURCEGROUP")
clientId = os.getenv("AZURE_CLIENT_ID")
storageAccountName=os.getenv("STORAGEACCOUNTNAME")
#</EnvironmentVariables>
#<CreateAMSClient>
# Create the Media Services client and authenticate using the DefaultAzureCredential
default_credential = DefaultAzureCredential()
client = AzureMediaServices(default_credential, subscriptionId)
#</CreateAMSClient>
#<CreateAsset>
#Create an Asset object
#From SDK
# Asset(*, alternate_id: Optional[str] = None, description: Optional[str] = None, container: Optional[str] = None,
# storage_account_name: Optional[str] = None, **kwargs)
assetName = "MyAsset"
assetObj = Asset(alternate_id="myAlternateId",description="My description")
#From SDK
#create_or_update(resource_group_name: str, account_name: str, asset_name: str, parameters: "_models.Asset", **kwargs: Any) -> _models.Asset
def createAsset(account_name, resource_group_name, asset_name,asset):
thisAsset = client.assets.create_or_update(account_name, resource_group_name, asset_name,asset)
createAsset(resourceGroupName,accountName,assetName,assetObj)
#</CreateAsset>
#<GetAsset>
# Set the name of the asset for which you want to get properties.
assetName = "MyAsset"
# From SDK
# get(resource_group_name: str, account_name: str, asset_name: str, **kwargs: Any) -> _models.Asset
def getAsset(resource_group_name,account_name,asset_name):
results = client.assets.get(resource_group_name,account_name,asset_name)
#You can get any of the properties of an asset. Here we are printing the asset name.
print(results.name)
getAsset(resourceGroupName,accountName,assetName)
#</GetAsset>
#<GetAssetEncryptionKey>
# Set the name of the asset for which you want to get the asset encryption key.
assetName = "MyAsset"
# From SDK
# get_encryption_key(resource_group_name: str, account_name: str, asset_name: str, **kwargs: Any)
# -> _models.StorageEncryptedAssetDecryptionData
# If an encryption key doesn't exist yet, the results will tell you.
def getAssetEncKey(resource_group_name,account_name,asset_name):
results = client.assets.get_encryption_key(resource_group_name,account_name,asset_name)
print(results)
getAssetEncKey(resourceGroupName,accountName,assetName)
#</GetAssetEncryptionKey>
#<ListAssets>
# From SDK
# list(resource_group_name: str, account_name: str,
# filter: Optional[str] = None, top: Optional[int] = None, orderby: Optional[str] = None, **kwargs: Any)
# -> Iterable['_models.AssetCollection']
def listAssets(resource_group_name, account_name):
results=client.assets.list(resource_group_name,account_name)
# Results is a collection so you can iterate over it to get an attribute of the asset
for result in results:
print(result.name)
listAssets(resourceGroupName,accountName)
#</ListAssets>
#<ListAssetsContainerSAS>
# TO DO
#list_container_sas(resource_group_name,account_name,asset_name, parameters: "_models.ListContainerSasInput", **kwargs: Any) -> _models.AssetContainerSas
#</ListAssetsContainerSAS>
#<ListAssetStreamingLocators>
# Set the name of the asset for which you want a streaming locator list
assetName = "MyAsset"
# From SDK
# list_streaming_locators(resource_group_name: str, account_name: str, asset_name: str, **kwargs: Any) -> _models.ListStreamingLocatorsResponse
def listStreamingLocators(resource_group_name, account_name, asset_name):
results=client.assets.list_streaming_locators(resource_group_name,account_name,asset_name)
streamingLocators = results.streaming_locators
# streaminglocators is a list so you can iterate over it to get an attribute of the streaming locator
# If no streaming locators have been created for the asset, then the results will return nothing.
for locator in streamingLocators:
print(locator.name)
listStreamingLocators(resourceGroupName,accountName,assetName)
#</ListAssetStreamingLocators>
#<UpdateAsset>
# From SDK
# update(resource_group_name: str, account_name: str, asset_name: str, parameters: "_models.Asset", **kwargs: Any) -> _models.Asset
# Set the name of the asset that you want to update
assetName = "MyAsset"
#Create an asset object for updating
assetObj1 = Asset(description="My new description.")
def updateAsset(resource_group_name, account_name, asset_name,parameters):
client.assets.update(resource_group_name,account_name,asset_name,assetObj1)
updateAsset(resourceGroupName,accountName,assetName,assetObj1)
#</UpdateAsset>
#<DeleteAsset>
# Set the name of the asset that you want to delete
assetName = "MyAsset"
# From SDK
# delete(resource_group_name: str, account_name: str, asset_name: str, **kwargs: Any) -> None
def deleteAsset(resource_group,account_name,asset_name):
client.assets.delete(resource_group,account_name,asset_name)
deleteAsset(resourceGroupName,accountName,assetName)
#</DeleteAsset>
| []
| []
| [
"RESOURCEGROUP",
"AZURE_CLIENT_ID",
"ACCOUNTNAME",
"STORAGEACCOUNTNAME",
"SUBSCRIPTIONID"
]
| [] | ["RESOURCEGROUP", "AZURE_CLIENT_ID", "ACCOUNTNAME", "STORAGEACCOUNTNAME", "SUBSCRIPTIONID"] | python | 5 | 0 | |
datadogext/datadogext.go | // package datadogext provides an abstraction of the datadog golang api. Environment variables provided by github
// actions are loaded, producing an event, which is then sent to Datadog.
package datadogext
import (
"os"
"strconv"
"strings"
"time"
"github.com/levenlabs/golib/timeutil"
log "github.com/sirupsen/logrus"
"github.com/zorkian/go-datadog-api"
)
const (
estring string = "error"
warning string = "warning"
info string = "info"
success string = "success"
Github string = "GITHUB"
count string = "count"
one float64 = 1
)
// NewDatadogClient loads the environment variables required for configuration and returns an instantiated DD Client Struct.
func NewDatadogClient() *datadog.Client {
ddAPIKey := os.Getenv("INPUT_DD_API_KEY")
if ddAPIKey == "" {
log.Fatalf("input DD_API_KEY must be set")
}
ddAppKey := os.Getenv("INPUT_DD_APP_KEY")
if ddAppKey == "" {
log.Fatalf("input DD_APP_KEY must be set")
}
return datadog.NewClient(ddAPIKey, ddAppKey)
}
type datadogEvent struct {
client *datadog.Client
event *datadog.Event
eventMetric *float64
eventMetricName *string
}
// GetSource returns the SourceType of the event
func (dde datadogEvent) GetSource() *string {
return dde.event.SourceType
}
// GetTime returns the time of the event in UnixTime
func (dde datadogEvent) GetTime() *int {
return dde.event.Time
}
// GetTitle returns the Title of the event
func (dde datadogEvent) GetTitle() *string {
return dde.event.Title
}
// GetTags returns the Tags on the event
func (dde datadogEvent) GetTags() []string {
return dde.event.Tags
}
// GetStatus returns the status of the event
func (dde datadogEvent) GetStatus() *string {
return dde.event.AlertType
}
func (dde *datadogEvent) setSource(source string) {
dde.event.SourceType = &source
}
func (dde *datadogEvent) setTitle(title string) {
if title == "" {
log.Fatalf("input EVENT_TITLE must be set")
}
dde.event.Title = &title
}
func (dde *datadogEvent) setTimeToNow() {
loc, _ := time.LoadLocation("UTC")
now := time.Now().In(loc)
unixNow := int(now.Unix())
dde.event.Time = &unixNow
}
func (dde *datadogEvent) setTagList(tags string) {
dde.event.Tags = strings.Split(tags, ",")
}
func (dde *datadogEvent) setStatus(status string) {
status = strings.ToLower(status)
switch status {
case estring, warning, info, success: //valid
dde.event.AlertType = &status
}
}
func (dde *datadogEvent) setEventMetric(status string) {
if status == "" {
return
}
metric, err := strconv.ParseFloat(status, 64)
if err != nil {
log.Fatalf("unable to convert `%s` to int", status)
}
dde.eventMetric = &metric
}
func (dde *datadogEvent) setEventMetricName(name string) {
if name != "" {
dde.eventMetricName = &name
}
}
// NewDatadogEvent retreives inputs from the environment and returns a constructed event.
func NewDatadogEvent() *datadogEvent {
client := NewDatadogClient()
event := &datadogEvent{client, &datadog.Event{}, nil, nil}
event.setSource(Github)
event.setTimeToNow()
event.setTitle(os.Getenv("INPUT_EVENT_TITLE"))
event.setTagList(os.Getenv("INPUT_EVENT_TAGS"))
event.setStatus(os.Getenv("INPUT_EVENT_STATUS"))
event.setEventMetric(os.Getenv("INPUT_EVENT_METRIC"))
event.setEventMetricName(os.Getenv("INPUT_EVENT_METRIC_NAME"))
log.Debugf("New Event Generated and Configured: `%+v`", event)
return event
}
// Post calls the Datadog api, creating the event.
func (dde datadogEvent) Post() (err error) {
_, err = dde.client.PostEvent(dde.event)
if err != nil {
return err
}
log.Info("Event posted successfully.")
countType := count
int64Time := int64(*dde.event.Time)
convertedTime := timeutil.TimestampFromInt64(int64Time).Float64()
if dde.eventMetric == nil {
return nil
}
sOne := one
statusMetricName := strings.Join([]string{*dde.eventMetricName, *dde.event.AlertType}, ".")
err = dde.client.PostMetrics(
[]datadog.Metric{
{
Metric: dde.eventMetricName,
Tags: dde.event.Tags,
Type: &countType,
Points: []datadog.DataPoint{
{
&convertedTime,
dde.eventMetric,
},
},
},
{
Metric: &statusMetricName,
Tags: dde.event.Tags,
Type: &countType,
Points: []datadog.DataPoint{
{
&convertedTime,
&sOne,
},
},
},
},
)
if err != nil {
return err
}
log.Info("Metric posted successfully.")
return nil
}
| [
"\"INPUT_DD_API_KEY\"",
"\"INPUT_DD_APP_KEY\"",
"\"INPUT_EVENT_TITLE\"",
"\"INPUT_EVENT_TAGS\"",
"\"INPUT_EVENT_STATUS\"",
"\"INPUT_EVENT_METRIC\"",
"\"INPUT_EVENT_METRIC_NAME\""
]
| []
| [
"INPUT_DD_API_KEY",
"INPUT_EVENT_METRIC",
"INPUT_EVENT_METRIC_NAME",
"INPUT_DD_APP_KEY",
"INPUT_EVENT_STATUS",
"INPUT_EVENT_TITLE",
"INPUT_EVENT_TAGS"
]
| [] | ["INPUT_DD_API_KEY", "INPUT_EVENT_METRIC", "INPUT_EVENT_METRIC_NAME", "INPUT_DD_APP_KEY", "INPUT_EVENT_STATUS", "INPUT_EVENT_TITLE", "INPUT_EVENT_TAGS"] | go | 7 | 0 | |
lambda/server.py | #!/usr/bin/python
import traceback, json, sys, socket, os
import rethinkdb
import tornado.ioloop
import tornado.web
import tornado.httpserver
import tornado.netutil
HOST_PATH = '/host'
SOCK_PATH = '%s/ol.sock' % HOST_PATH
STDOUT_PATH = '%s/stdout' % HOST_PATH
STDERR_PATH = '%s/stderr' % HOST_PATH
PROCESSES_DEFAULT = 10
initialized = False
config = None
db_conn = None
# run once per process
def init():
global initialized, config, db_conn, lambda_func
if initialized:
return
sys.stdout = open(STDOUT_PATH, 'w')
sys.stderr = open(STDERR_PATH, 'w')
config = json.loads(os.environ['ol.config'])
if config.get('db', None) == 'rethinkdb':
host = config.get('rethinkdb.host', 'localhost')
port = config.get('rethinkdb.port', 28015)
print 'Connect to %s:%d' % (host, port)
db_conn = rethinkdb.connect(host, port)
sys.path.append('/handler')
import lambda_func # assume submitted .py file is /handler/lambda_func.py
initialized = True
class SockFileHandler(tornado.web.RequestHandler):
def post(self):
try:
init()
data = self.request.body
try :
event = json.loads(data)
except:
self.set_status(400)
self.write('bad POST data: "%s"'%str(data))
return
self.write(json.dumps(lambda_func.handler(db_conn, event)))
except Exception:
self.set_status(500) # internal error
self.write(traceback.format_exc())
tornado_app = tornado.web.Application([
(r".*", SockFileHandler),
])
# listen on sock file with Tornado
def lambda_server():
server = tornado.httpserver.HTTPServer(tornado_app)
socket = tornado.netutil.bind_unix_socket(SOCK_PATH)
server.add_socket(socket)
tornado.ioloop.IOLoop.instance().start()
server.start(PROCESSES_DEFAULT)
if __name__ == '__main__':
lambda_server()
| []
| []
| [
"ol.config"
]
| [] | ["ol.config"] | python | 1 | 0 | |
starter/asgi.py | """
ASGI config for starter project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'starter.settings')
application = get_asgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
go/examples/test/delete_configuration_test.go | package test
import (
"fmt"
"os"
"testing"
"github.com/avinetworks/sdk/go/clients"
"github.com/avinetworks/sdk/go/session"
)
func TestDeleteConfigurations(t *testing.T) {
aviClient, err := clients.NewAviClient(os.Getenv("controller"), "admin",
session.SetPassword(os.Getenv("password")),
session.SetTenant("avinetworks"),
session.SetVersion(os.Getenv("version")),
session.SetInsecure)
if err != nil {
fmt.Println("Couldn't create session: ", err)
t.Fail()
}
cv, err := aviClient.AviSession.GetControllerVersion()
fmt.Printf("Avi Controller Version: %v:%v\n", cv, err)
// Delete Virtualservice
vsRes := aviClient.VirtualService.DeleteByName("Test-vs")
fmt.Printf("\n Virtualservice deleted successfully : %+v", vsRes)
// Delete Pool
poolRes := aviClient.Pool.DeleteByName("Test-pool")
fmt.Printf("Pool Deleted Successfully, : %+v", poolRes)
// Create session for webapp tenant
aviClient1, err := clients.NewAviClient(os.Getenv("controller"), "admin",
session.SetPassword(os.Getenv("password")),
session.SetTenant("admin"),
session.SetVersion(os.Getenv("version")),
session.SetInsecure)
// Delete persistence profile
appProfRes := aviClient1.ApplicationPersistenceProfile.DeleteByName("Test-Persistece-Profile")
fmt.Printf("\n Application persistence profile deleted successfully: %+v", appProfRes)
// Delete healthmonitor
sslProfRes := aviClient1.SSLProfile.DeleteByName("Test-Ssl-Profile")
fmt.Printf("\n Ssl profile deleted successfully: %+v", sslProfRes)
// Delete healthmonitor
res := aviClient1.HealthMonitor.DeleteByName("Test-Healthmonitor")
fmt.Printf("\n Healthmonitor deleted successfully: %+v", res)
// Delete tenant
tenantRes := aviClient1.Tenant.DeleteByName("avinetworks")
fmt.Printf("Tenant avinetworks deleted successfully %+v", tenantRes)
// Delete cloud
cloudRes := aviClient1.Cloud.DeleteByName("Test-vcenter-cloud")
fmt.Printf("\n Cloud deleted successfully : %-v", cloudRes)
}
| [
"\"controller\"",
"\"password\"",
"\"version\"",
"\"controller\"",
"\"password\"",
"\"version\""
]
| []
| [
"password",
"version",
"controller"
]
| [] | ["password", "version", "controller"] | go | 3 | 0 | |
eLarning_LMS/eLarning_LMS/wsgi.py | """
WSGI config for eLarning_LMS project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'eLarning_LMS.settings')
application = get_wsgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
main.go | package main
import (
"database/sql"
"encoding/json"
"fmt"
"log"
"net/http"
"os"
"text/template"
"github.com/zwergon/pwallet/utils"
_ "github.com/go-sql-driver/mysql"
)
//RegistrationInfo to store data for web identification
type RegistrationInfo struct {
Id int
Comment string
Company string
Detail string
Login string
Passwd string
}
var tmpl = template.Must(template.ParseGlob("form/*"))
//IndexHandler request to show all elements
type IndexHandler struct {
db *sql.DB
}
//Index read all data in registrationinfo table
func (h *IndexHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
selDB, err := h.db.Query("SELECT id,company,comment FROM registrationinfo ORDER BY company ASC")
if err != nil {
panic(err.Error())
}
rInfo := RegistrationInfo{}
res := []RegistrationInfo{}
for selDB.Next() {
var id int
var company string
var comment sql.NullString
err = selDB.Scan(&id, &company, &comment)
if err != nil {
panic(err.Error())
}
rInfo.Id = id
if comment.Valid {
rInfo.Comment = comment.String
} else {
rInfo.Comment = ""
}
rInfo.Company = company
res = append(res, rInfo)
}
json.NewEncoder(w).Encode(res)
}
type ShowHandler struct {
db *sql.DB
}
//Show show one registrationinfy selected by id
func (s *ShowHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
nID := r.Header.Get("id")
selDB, err := s.db.Query("SELECT id,company,login,passwd FROM registrationinfo WHERE id=?", nID)
if err != nil {
panic(err.Error())
}
rInfo := RegistrationInfo{}
for selDB.Next() {
var id int
var company, login, passwd string
err = selDB.Scan(&id, &company, &login, &passwd)
if err != nil {
panic(err.Error())
}
rInfo.Id = id
rInfo.Company = company
rInfo.Login = login
rInfo.Passwd = passwd
}
json.NewEncoder(w).Encode(rInfo)
}
/*
//New create New
func New(w http.ResponseWriter, r *http.Request) {
tmpl.ExecuteTemplate(w, "New", nil)
}
//Edit edit a registrationinfo
func Edit(w http.ResponseWriter, r *http.Request) {
db := dbConn()
nId := r.URL.Query().Get("id")
selDB, err := db.Query("SELECT * FROM Employee WHERE id=?", nId)
if err != nil {
panic(err.Error())
}
rInfo := RegistrationInfo{}
for selDB.Next() {
var id int
var comment, company, detail, login, passwd string
err = selDB.Scan(&id, &comment, &company, &detail, &login, &passwd)
if err != nil {
panic(err.Error())
}
rInfo.Id = id
rInfo.Comment = comment
rInfo.Company = company
rInfo.Detail = detail
rInfo.Login = login
rInfo.Passwd = passwd
}
tmpl.ExecuteTemplate(w, "Edit", rInfo)
defer db.Close()
}
//Insert insert a new element in database
func Insert(w http.ResponseWriter, r *http.Request) {
db := dbConn()
if r.Method == "POST" {
name := r.FormValue("name")
city := r.FormValue("city")
insForm, err := db.Prepare("INSERT INTO Employee(name, city) VALUES(?,?)")
if err != nil {
panic(err.Error())
}
insForm.Exec(name, city)
log.Println("INSERT: Name: " + name + " | City: " + city)
}
defer db.Close()
http.Redirect(w, r, "/", 301)
}
//Update update an element in database
func Update(w http.ResponseWriter, r *http.Request) {
db := dbConn()
if r.Method == "POST" {
name := r.FormValue("name")
city := r.FormValue("city")
id := r.FormValue("uid")
insForm, err := db.Prepare("UPDATE Employee SET name=?, city=? WHERE id=?")
if err != nil {
panic(err.Error())
}
insForm.Exec(name, city, id)
log.Println("UPDATE: Name: " + name + " | City: " + city)
}
defer db.Close()
http.Redirect(w, r, "/", 301)
}
//Delete remove an element in database
func Delete(w http.ResponseWriter, r *http.Request) {
db := dbConn()
rInfo := r.URL.Query().Get("id")
delForm, err := db.Prepare("DELETE FROM Employee WHERE id=?")
if err != nil {
panic(err.Error())
}
delForm.Exec(rInfo)
log.Println("DELETE")
defer db.Close()
http.Redirect(w, r, "/", 301)
}
*/
func main() {
database := "local"
if len(os.Args) > 1 {
database = os.Args[1]
}
host := os.Getenv("IP")
port := os.Getenv("PORT")
adress := fmt.Sprintf("%s:%s", host, port)
dbInfo := utils.NewDB(database)
db := dbInfo.DbConn()
log.Println("Server started on: http://" + adress)
idxHandler := IndexHandler{db: db}
http.Handle("/idx", &idxHandler)
ShowHandler := ShowHandler{db: db}
http.Handle("/show", &ShowHandler)
//http.HandleFunc("/new", New)
//http.HandleFunc("/edit", Edit)
//http.HandleFunc("/insert", Insert)
//http.HandleFunc("/update", Update)
//http.HandleFunc("/delete", Delete)
http.ListenAndServe(adress, nil)
log.Println("Server stopped!")
defer db.Close()
}
| [
"\"IP\"",
"\"PORT\""
]
| []
| [
"PORT",
"IP"
]
| [] | ["PORT", "IP"] | go | 2 | 0 | |
discrete_controller.py | from scipy.linalg import lstsq
import os
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
import numpy as np
import torch
from utils import criteria, estimate, estimate_batch, gramian, toeplitz
from adjoint import Evaluation
class DiscreteController:
def __init__(self, A, B, T, gamma, sigma, columns=None, x=None, X_data=None, optimality=''):
super().__init__()
self.T = T
self.A = A
self.B = B
self.d, self.m = B.shape
self.x = torch.zeros(self.d) if x is None else x
self.columns = columns if columns is not None else torch.ones(self.d, dtype=torch.bool)
self.X_data = X_data if X_data is not None else torch.zeros(1, self.columns.sum())
self.gamma = gamma
self.sigma = sigma
self.U = torch.randn(self.T, self.m, requires_grad=True)
# self.U = torch.ones(self.T, self.m, requires_grad=True)
self.criterion = criteria.get(optimality)
self.gramian = gramian(A, T)
self.covariates_matrix = toeplitz(A, T)
def forward(self, x, stochastic=True):
U = self.gamma * np.sqrt(self.T) * self.U / torch.norm(self.U)
return self.integration(x, self.A, U, stochastic), U
# return self.integration(x, self.covariates_matrix, U, stochastic), U
def integration_(self, x, covariates, U, stochastic):
batch_size = x.shape[0]
X = x.unsqueeze(1).expand(-1, self.T+1, -1).clone()
control_input = ([email protected]).view(self.d*self.T)
control_X = (covariates_matrix@control_input).view(self.T, self.d)
X[:, 1:] += control_X.unsqueeze(0).expand(batch_size, -1, -1)
if stochastic:
W = self.sigma * torch.randn(self.T*self.d, batch_size)
noise_X = (self.covariates_matrix@W).reshape(batch_size, self.T, self.d)
X[:, 1:] += noise_X
return X
def integration(self, x, A, U, stochastic):
batch_size = x.shape[0]
X = torch.zeros(batch_size, self.T+1, self.d)
for t in range(self.T):
u = U[t, :]
x = (A @ x.T).T + self.B@u
if stochastic:
noise = self.sigma * torch.randn_like(x)
x += noise
X[:, t+1, :] = x
# print(f'played mean energy {(U**2).sum()/self.T}')
return X
def play(self, x, A, U):
# print(f'played mean energy {(U**2).sum() / self.T}')
energy_constraint = (torch.sum(U**2) / self.T <= (self.gamma**2)*1.1)
assert energy_constraint, f'energy constraint not met : mean energy {torch.sum(U**2) / self.T}'
covariates = toeplitz(A, self.T)
# return self.integration(x, covariates, U, stochastic=True), U
return self.integration(x, A, U, stochastic=True), U
def play_control(self, x, A):
U = self.gamma * np.sqrt(self.T) * self.U / torch.norm(self.U)
return self.play(x, A, U)
def play_random(self, x, A):
U = torch.randn(self.T, self.m)
U_normalized = self.gamma * np.sqrt(self.T) * U / torch.norm(U)
return self.play(x, A, U_normalized)
def plan(self, n_steps, batch_size, stochastic=True, learning_rate=0.1, test=None):
if not stochastic:
return self.plan_certainty(n_steps, batch_size, learning_rate, test)
optimizer = torch.optim.Adam([self.U], lr=learning_rate)
loss_values = []
error_values = []
for step_index in range(n_steps):
if test is not None:
# and int(100*step_index/n_steps)%10 == 0:
test_loss, error = self.test(test, batch_size)
# test_loss, error = self.test_batch(batch_size)
loss_values.append(test_loss)
error_values.append(error.item())
x = self.x.unsqueeze(0).expand(batch_size, self.d)
X, U = self.forward(x, stochastic)
X_data = self.X_data.unsqueeze(0).expand(batch_size, -1, -1)
# print(f'{X_data.shape}, {X.shape}')
X_total = torch.cat((X_data, X[:, :, self.columns]), dim=1)
S = torch.linalg.svdvals(X_total[:, :-1])
# print(S)
# print(S.min())
loss = self.criterion(S, self.T)
# print(f'loss {loss}')
optimizer.zero_grad()
loss.backward()
optimizer.step()
self.U.data = self.gamma *np.sqrt(self.T) * self.U / torch.norm(self.U)
return loss_values, error_values
def plan_certainty(self, n_steps, batch_size, learning_rate=0.1, test=None):
optimizer = torch.optim.Adam([self.U], lr=learning_rate)
loss_values = []
error_values = []
for step_index in range(n_steps):
if test is not None:
# and int(100*step_index/n_steps)%10 == 0:
test_loss, error = self.test(test, batch_size)
# test_loss, error = self.test_batch(batch_size)
loss_values.append(test_loss)
error_values.append(error.item())
x = torch.zeros(1, self.d)
X, U = self.forward(x, False)
X = X.squeeze()
M = X.T @ X
M += (self.sigma**2) * self.gramian
S = torch.linalg.eigvals(M).unsqueeze(0)
S, _ = torch.sort(torch.real(S), descending=True)
# print(S.min())
loss = self.criterion(S, self.T)
# print(f'loss {loss}')
optimizer.zero_grad()
loss.backward()
optimizer.step()
self.U.data = self.gamma *np.sqrt(self.T) * self.U / torch.norm(self.U)
return loss_values, error_values
def plan_adjoint(self, n_steps, batch_size, stochastic, learning_rate=0.1, test=False):
optimizer = torch.optim.Adam([self.U], lr=learning_rate)
loss_values = []
error_values = []
for step_index in range(n_steps):
if test:
test_loss, error = self.test_batch(batch_size)
# print(f'test loss {test_loss.item()}')
loss_values.append(test_loss.item())
error_values.append(error.item())
U = self.gamma * np.sqrt(self.T) * self.U / torch.norm(self.U)
loss = Evaluation.apply(self.A, self.B, self.U, self.T, self.sigma)
# print(f'training loss {loss.item()}')
optimizer.zero_grad()
loss.backward()
optimizer.step()
self.U.data = self.gamma * np.sqrt(self.T) * self.U / torch.norm(self.U)
return loss_values, error_values
def test(self, test_type, batch_size):
with torch.no_grad():
x = torch.zeros(1, self.d)
X, U = self.play_control(x, self.A)
# X, U = self.forward(x, False)
S = torch.linalg.svdvals(X[:, :-1])
if test_type == 'criterion':
test_loss = self.criterion(S, self.T)
elif test_type == 'sv':
test_loss = [S[0, -1], S[0, 0]]
elif test_type == 'partial':
test_loss = torch.linalg.norm(X[:, -1, :2])
X_tilde = X.squeeze()[:-1, :2]
X_bar = X.squeeze()[:-1, 2:]
A_bar = self.A[2:, 2:]
A_tilde = self.A[2:, :2]
Y = (X.squeeze()[1:, :] - [email protected])[:, 2:] - X_bar@A_bar.T
solution ,_, _, _ = lstsq(X_tilde, Y)
estimation = solution.T
# print(f'estimation {estimation}')
# print(f'A_tilde {A_tilde}')
error = np.linalg.norm(estimation - A_tilde.numpy())
return test_loss, error
# M = X.permute(0, 2, 1) @ X.permute(0, 1, 2)
# test_loss = - torch.log(torch.det(M)).mean()
A_hat = estimate(X.squeeze(), U)
error = torch.linalg.norm(A_hat - self.A)
energy = torch.sum(U**2)/ self.T
# print(f'X.shape {X.shape}, energy {energy}, A = {self.A}, A_hat = {A_hat}')
# print(f'error {error}')
return test_loss, error
def test_batch(self, batch_size):
with torch.no_grad():
x = torch.zeros(batch_size, self.d)
X, U = self.play_control(x, self.A)
energy_constraint = (torch.sum(U**2) / self.T <= (self.gamma**2)*1.1)
assert energy_constraint, f'energy constraint not met : mean energy {torch.sum(U**2) / self.T}'
# X, U = self.forward(x, True)
A_hat = estimate_batch(X, U.unsqueeze(0))
error = torch.linalg.norm(A_hat - self.A, dim=(1,2)).mean()
# print(f'test error {error}')
S = torch.linalg.svdvals(X[:, :-1, :])
# test_loss = self.criterion(S, self.T)
test_loss = S[:, -1].mean()
# M = X.permute(0, 2, 1) @ X.permute(0, 1, 2)
# test_loss = - torch.log(torch.det(M)).mean()
return test_loss, error
| []
| []
| [
"KMP_DUPLICATE_LIB_OK"
]
| [] | ["KMP_DUPLICATE_LIB_OK"] | python | 1 | 0 | |
manage.py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "dialog.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
catkin_tools/notifications/impl.py | # Copyright 2014 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This modules provides a portable, failsafe notification function"""
import os
import platform
import subprocess
from catkin_tools.utils import which
this_dir = os.path.dirname(__file__)
def _notify_osx(title, msg):
app_path = os.path.join(this_dir, 'resources', 'osx', 'catkin build.app')
open_exec = which('open')
if open_exec is None:
return
command = [open_exec, app_path, '--args', title, msg]
terminal = os.environ['TERM_PROGRAM']
if terminal == "Apple_Terminal":
command += ["-activate", "com.apple.Terminal"]
elif terminal == "iTerm.app":
command += ["-activate", "com.googlecode.iterm2"]
subprocess.Popen(command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
def _notify_linux(title, msg):
icon_path = os.path.join(this_dir, 'resources', 'linux', 'catkin_icon.png')
notify_send_exec = which('notify-send')
if notify_send_exec is None:
return
subprocess.Popen([notify_send_exec, '-i', icon_path, '-t', '2000', '--hint', 'int:transient:1', title, msg],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
def notify(title, msg):
if platform.system() == 'Darwin':
return _notify_osx(title, msg)
if platform.system() == 'Linux':
return _notify_linux(title, msg)
| []
| []
| [
"TERM_PROGRAM"
]
| [] | ["TERM_PROGRAM"] | python | 1 | 0 | |
stock_analysis/scripts/initializedb.py | """Initialize database."""
import os
import sys
import transaction
from pyramid.paster import (
get_appsettings,
setup_logging,
)
from pyramid.scripts.common import parse_vars
from ..models.meta import Base
from ..models import (
get_engine,
get_session_factory,
get_tm_session,
)
from ..models import User
def usage(argv):
cmd = os.path.basename(argv[0])
print('usage: %s <config_uri> [var=value]\n'
'(example: "%s development.ini")' % (cmd, cmd))
sys.exit(1)
def main(argv=sys.argv):
if len(argv) < 2:
usage(argv)
config_uri = argv[1]
options = parse_vars(argv[2:])
setup_logging(config_uri)
settings = get_appsettings(config_uri, options=options)
settings['sqlalchemy.url'] = os.environ.get('DATABASE_URL', '')
engine = get_engine(settings)
Base.metadata.drop_all(engine)
Base.metadata.create_all(engine)
session_factory = get_session_factory(engine)
with transaction.manager:
dbsession = get_tm_session(session_factory, transaction.manager)
model = User(username='chelseadole', password='potato')
dbsession.add(model)
| []
| []
| [
"DATABASE_URL"
]
| [] | ["DATABASE_URL"] | python | 1 | 0 | |
apiserver/apiserver/api/tests.py | import django, sys, os
os.environ['DJANGO_SETTINGS_MODULE'] = 'apiserver.settings'
django.setup()
from django.test import TestCase
import datetime
from dateutil import relativedelta
from rest_framework.exceptions import ValidationError
from apiserver.api import utils, utils_paypal, models
testing_member, _ = models.Member.objects.get_or_create(
first_name='unittest',
preferred_name='unittest',
last_name='tester',
)
class TestMonthsSpanned(TestCase):
def test_num_months_spanned_one_month(self):
date2 = datetime.date(2020, 1, 10)
date1 = datetime.date(2020, 2, 10)
spanned = utils.num_months_spanned(date1, date2)
self.assertEqual(spanned, 1)
def test_num_months_spanned_one_week(self):
date1 = datetime.date(2020, 2, 5)
date2 = datetime.date(2020, 1, 28)
spanned = utils.num_months_spanned(date1, date2)
self.assertEqual(spanned, 1)
def test_num_months_spanned_two_days(self):
date1 = datetime.date(2020, 2, 1)
date2 = datetime.date(2020, 1, 31)
spanned = utils.num_months_spanned(date1, date2)
self.assertEqual(spanned, 1)
def test_num_months_spanned_two_years(self):
date1 = datetime.date(2022, 1, 18)
date2 = datetime.date(2020, 1, 18)
spanned = utils.num_months_spanned(date1, date2)
self.assertEqual(spanned, 24)
def test_num_months_spanned_same_month(self):
date1 = datetime.date(2020, 1, 31)
date2 = datetime.date(2020, 1, 1)
spanned = utils.num_months_spanned(date1, date2)
self.assertEqual(spanned, 0)
class TestMonthsDifference(TestCase):
def test_num_months_difference_one_month(self):
date2 = datetime.date(2020, 1, 10)
date1 = datetime.date(2020, 2, 10)
difference = utils.num_months_difference(date1, date2)
self.assertEqual(difference, 1)
def test_num_months_difference_one_week(self):
date1 = datetime.date(2020, 2, 5)
date2 = datetime.date(2020, 1, 28)
difference = utils.num_months_difference(date1, date2)
self.assertEqual(difference, 0)
def test_num_months_difference_two_days(self):
date1 = datetime.date(2020, 2, 1)
date2 = datetime.date(2020, 1, 31)
difference = utils.num_months_difference(date1, date2)
self.assertEqual(difference, 0)
def test_num_months_difference_two_years(self):
date1 = datetime.date(2022, 1, 18)
date2 = datetime.date(2020, 1, 18)
difference = utils.num_months_difference(date1, date2)
self.assertEqual(difference, 24)
def test_num_months_difference_same_month(self):
date1 = datetime.date(2020, 1, 31)
date2 = datetime.date(2020, 1, 1)
difference = utils.num_months_difference(date1, date2)
self.assertEqual(difference, 0)
class TestAddMonths(TestCase):
def test_add_months_one_month(self):
date = datetime.date(2020, 1, 18)
num_months = 1
new_date = utils.add_months(date, num_months)
self.assertEqual(new_date, datetime.date(2020, 2, 18))
def test_add_months_february(self):
date = datetime.date(2020, 1, 31)
num_months = 1
new_date = utils.add_months(date, num_months)
self.assertEqual(new_date, datetime.date(2020, 2, 29))
def test_add_months_february_leap(self):
date = datetime.date(2020, 2, 29)
num_months = 12
new_date = utils.add_months(date, num_months)
self.assertEqual(new_date, datetime.date(2021, 2, 28))
def test_add_months_hundred_years(self):
date = datetime.date(2020, 1, 31)
num_months = 1200
new_date = utils.add_months(date, num_months)
self.assertEqual(new_date, datetime.date(2120, 1, 31))
class TestCalcStatus(TestCase):
def test_calc_member_status_14_days(self):
expire_date = datetime.date.today() + datetime.timedelta(days=14)
status, former = utils.calc_member_status(expire_date)
self.assertEqual(status, 'Current')
self.assertEqual(former, False)
def test_calc_member_status_90_days(self):
expire_date = datetime.date.today() + datetime.timedelta(days=90)
status, former = utils.calc_member_status(expire_date)
self.assertEqual(status, 'Prepaid')
self.assertEqual(former, False)
def test_calc_member_status_tomorrow(self):
expire_date = datetime.date.today() + datetime.timedelta(days=1)
status, former = utils.calc_member_status(expire_date)
self.assertEqual(status, 'Current')
self.assertEqual(former, False)
def test_calc_member_status_today(self):
expire_date = datetime.date.today()
status, former = utils.calc_member_status(expire_date)
self.assertEqual(status, 'Due')
self.assertEqual(former, False)
def test_calc_member_status_yesterday(self):
expire_date = datetime.date.today() - datetime.timedelta(days=1)
status, former = utils.calc_member_status(expire_date)
self.assertEqual(status, 'Due')
self.assertEqual(former, False)
def test_calc_member_status_85_days_ago(self):
expire_date = datetime.date.today() - datetime.timedelta(days=85)
status, former = utils.calc_member_status(expire_date)
self.assertEqual(status, 'Overdue')
self.assertEqual(former, False)
def test_calc_member_status_95_days_ago(self):
expire_date = datetime.date.today() - datetime.timedelta(days=95)
status, former = utils.calc_member_status(expire_date)
self.assertEqual(status, 'Overdue')
self.assertEqual(former, True)
class TestFakeMonths(TestCase):
def test_fake_missing_membership_months_one_month(self):
testing_member.current_start_date = datetime.date(2018, 6, 6)
testing_member.expire_date = datetime.date(2018, 7, 6)
tx, count = utils.fake_missing_membership_months(testing_member)
self.assertEqual(count, 1)
def test_fake_missing_membership_months_one_and_half_month(self):
testing_member.current_start_date = datetime.date(2018, 6, 1)
testing_member.expire_date = datetime.date(2018, 7, 15)
tx, count = utils.fake_missing_membership_months(testing_member)
self.assertEqual(count, 1)
def test_fake_missing_membership_months_one_year(self):
testing_member.current_start_date = datetime.date(2018, 6, 6)
testing_member.expire_date = datetime.date(2019, 6, 6)
tx, count = utils.fake_missing_membership_months(testing_member)
self.assertEqual(count, 12)
def test_fake_missing_membership_months_same_month(self):
testing_member.current_start_date = datetime.date(2018, 6, 6)
testing_member.expire_date = datetime.date(2018, 6, 16)
tx, count = utils.fake_missing_membership_months(testing_member)
self.assertEqual(count, 0)
class TestTallyMembership(TestCase):
def get_member_clear_transactions(self):
member = testing_member
member.paused_date = None
member.expire_date = None
return member
def test_tally_membership_months_prepaid(self):
member = self.get_member_clear_transactions()
test_num_months = 8
start_date = datetime.date.today() - relativedelta.relativedelta(months=6, days=14)
end_date = start_date + relativedelta.relativedelta(months=test_num_months)
member.current_start_date = start_date
member.save()
for i in range(test_num_months):
models.Transaction.objects.create(
amount=0,
member_id=member.id,
number_of_membership_months=1,
)
result = utils.tally_membership_months(member)
self.assertEqual(member.expire_date, end_date)
self.assertEqual(member.status, 'Prepaid')
def test_tally_membership_months_current(self):
member = self.get_member_clear_transactions()
test_num_months = 7
start_date = datetime.date.today() - relativedelta.relativedelta(months=6, days=14)
end_date = start_date + relativedelta.relativedelta(months=test_num_months)
member.current_start_date = start_date
member.save()
for i in range(test_num_months):
models.Transaction.objects.create(
amount=0,
member_id=member.id,
number_of_membership_months=1,
)
result = utils.tally_membership_months(member)
self.assertEqual(member.expire_date, end_date)
self.assertEqual(member.status, 'Current')
def test_tally_membership_months_due(self):
member = self.get_member_clear_transactions()
test_num_months = 6
start_date = datetime.date.today() - relativedelta.relativedelta(months=6, days=14)
end_date = start_date + relativedelta.relativedelta(months=test_num_months)
member.current_start_date = start_date
member.save()
for i in range(test_num_months):
models.Transaction.objects.create(
amount=0,
member_id=member.id,
number_of_membership_months=1,
)
result = utils.tally_membership_months(member)
self.assertEqual(member.expire_date, end_date)
self.assertEqual(member.status, 'Due')
def test_tally_membership_months_overdue(self):
member = self.get_member_clear_transactions()
test_num_months = 5
start_date = datetime.date.today() - relativedelta.relativedelta(months=6, days=14)
end_date = start_date + relativedelta.relativedelta(months=test_num_months)
member.current_start_date = start_date
member.save()
for i in range(test_num_months):
models.Transaction.objects.create(
amount=0,
member_id=member.id,
number_of_membership_months=1,
)
result = utils.tally_membership_months(member)
self.assertEqual(member.expire_date, end_date)
self.assertEqual(member.status, 'Overdue')
def test_tally_membership_months_overdue_pause(self):
member = self.get_member_clear_transactions()
test_num_months = 1
start_date = datetime.date.today() - relativedelta.relativedelta(months=6, days=14)
end_date = start_date + relativedelta.relativedelta(months=test_num_months)
member.current_start_date = start_date
member.save()
for i in range(test_num_months):
models.Transaction.objects.create(
amount=0,
member_id=member.id,
number_of_membership_months=1,
)
result = utils.tally_membership_months(member)
self.assertEqual(member.expire_date, end_date)
self.assertEqual(member.paused_date, end_date)
self.assertEqual(member.status, 'Overdue')
def test_tally_membership_months_dont_run(self):
member = self.get_member_clear_transactions()
start_date = datetime.date.today()
member.current_start_date = start_date
member.paused_date = start_date
member.save()
result = utils.tally_membership_months(member)
self.assertEqual(result, False)
class TestParsePayPalDate(TestCase):
def test_parse(self):
string = '20:12:59 Jan 13, 2009 PST'
result = utils_paypal.parse_paypal_date(string)
self.assertEqual(str(result), '2009-01-14 04:12:59+00:00')
def test_parse_dst(self):
string = '20:12:59 Jul 13, 2009 PDT'
result = utils_paypal.parse_paypal_date(string)
self.assertEqual(str(result), '2009-07-14 03:12:59+00:00')
def test_parse_bad_tz(self):
string = '20:12:59 Jul 13, 2009 QOT'
self.assertRaises(ValidationError, utils_paypal.parse_paypal_date, string)
def test_parse_bad_string(self):
string = 'ave satanas'
self.assertRaises(ValidationError, utils_paypal.parse_paypal_date, string)
| []
| []
| [
"DJANGO_SETTINGS_MODULE"
]
| [] | ["DJANGO_SETTINGS_MODULE"] | python | 1 | 0 | |
cmd/sslCheck.go | package cmd
import (
"io/ioutil"
"os"
"github.com/GannettDigital/go-newrelic-plugin/sslCheck"
status "github.com/GannettDigital/goStateModule"
"github.com/spf13/cobra"
)
func init() {
RootCmd.AddCommand(sslCheckCmd)
}
var sslCheckCmd = &cobra.Command{
Use: "sslCheck",
Short: "Records events based on host certificate expirations",
Run: func(cmd *cobra.Command, args []string) {
rootCaFile := os.Getenv("SSLCHECK_ROOT_CAS")
var rootCAPem []byte
var err error
if rootCaFile != "" {
rootCAPem, err = ioutil.ReadFile(rootCaFile)
if err != nil {
log.Fatalf("Error Reading Ca File: %v\n", err)
}
}
hosts, err := sslCheck.ProcessHosts(os.Getenv("SSLCHECK_HOSTS"))
if err != nil {
log.Fatalf("Error Processing Hosts: %v\n", err)
}
var config = sslCheck.Config{
Hosts: hosts,
}
err = sslCheck.ValidateConfig(config)
if err != nil {
log.Fatalf("invalid config: %v\n", err)
}
sslCheck.Run(log, config, rootCAPem, prettyPrint, status.GetInfo().Version)
},
}
| [
"\"SSLCHECK_ROOT_CAS\"",
"\"SSLCHECK_HOSTS\""
]
| []
| [
"SSLCHECK_ROOT_CAS",
"SSLCHECK_HOSTS"
]
| [] | ["SSLCHECK_ROOT_CAS", "SSLCHECK_HOSTS"] | go | 2 | 0 | |
pkg/app/app.go | package app
import (
"bufio"
"errors"
"fmt"
"io"
"io/ioutil"
"log"
"os"
"path/filepath"
"regexp"
"strconv"
"strings"
"github.com/aybabtme/humanlog"
"github.com/Jeffthedoor/generics/slices"
"github.com/Jeffthedoor/lazygit/pkg/commands"
"github.com/Jeffthedoor/lazygit/pkg/commands/git_config"
"github.com/Jeffthedoor/lazygit/pkg/commands/oscommands"
"github.com/Jeffthedoor/lazygit/pkg/common"
"github.com/Jeffthedoor/lazygit/pkg/config"
"github.com/Jeffthedoor/lazygit/pkg/env"
"github.com/Jeffthedoor/lazygit/pkg/gui"
"github.com/Jeffthedoor/lazygit/pkg/i18n"
"github.com/Jeffthedoor/lazygit/pkg/updates"
"github.com/sirupsen/logrus"
)
// App struct
type App struct {
*common.Common
closers []io.Closer
Config config.AppConfigurer
OSCommand *oscommands.OSCommand
Gui *gui.Gui
Updater *updates.Updater // may only need this on the Gui
ClientContext string
}
type errorMapping struct {
originalError string
newError string
}
func newProductionLogger() *logrus.Logger {
log := logrus.New()
log.Out = ioutil.Discard
log.SetLevel(logrus.ErrorLevel)
return log
}
func getLogLevel() logrus.Level {
strLevel := os.Getenv("LOG_LEVEL")
level, err := logrus.ParseLevel(strLevel)
if err != nil {
return logrus.DebugLevel
}
return level
}
func newDevelopmentLogger() *logrus.Logger {
logger := logrus.New()
logger.SetLevel(getLogLevel())
logPath, err := config.LogPath()
if err != nil {
log.Fatal(err)
}
file, err := os.OpenFile(logPath, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0o666)
if err != nil {
log.Fatalf("Unable to log to log file: %v", err)
}
logger.SetOutput(file)
return logger
}
func newLogger(config config.AppConfigurer) *logrus.Entry {
var log *logrus.Logger
if config.GetDebug() || os.Getenv("DEBUG") == "TRUE" {
log = newDevelopmentLogger()
} else {
log = newProductionLogger()
}
// highly recommended: tail -f development.log | humanlog
// https://github.com/aybabtme/humanlog
log.Formatter = &logrus.JSONFormatter{}
return log.WithFields(logrus.Fields{
"debug": config.GetDebug(),
"version": config.GetVersion(),
"commit": config.GetCommit(),
"buildDate": config.GetBuildDate(),
})
}
// NewApp bootstrap a new application
func NewApp(config config.AppConfigurer) (*App, error) {
userConfig := config.GetUserConfig()
app := &App{
closers: []io.Closer{},
Config: config,
}
var err error
log := newLogger(config)
tr, err := i18n.NewTranslationSetFromConfig(log, userConfig.Gui.Language)
if err != nil {
return app, err
}
app.Common = &common.Common{
Log: log,
Tr: tr,
UserConfig: userConfig,
Debug: config.GetDebug(),
}
// if we are being called in 'demon' mode, we can just return here
app.ClientContext = os.Getenv("LAZYGIT_CLIENT_COMMAND")
if app.ClientContext != "" {
return app, nil
}
app.OSCommand = oscommands.NewOSCommand(app.Common, oscommands.GetPlatform(), oscommands.NewNullGuiIO(log))
app.Updater, err = updates.NewUpdater(app.Common, config, app.OSCommand)
if err != nil {
return app, err
}
dirName, err := os.Getwd()
if err != nil {
return app, err
}
showRecentRepos, err := app.setupRepo()
if err != nil {
return app, err
}
gitConfig := git_config.NewStdCachedGitConfig(app.Log)
app.Gui, err = gui.NewGui(app.Common, config, gitConfig, app.Updater, showRecentRepos, dirName)
if err != nil {
return app, err
}
return app, nil
}
func (app *App) validateGitVersion() error {
output, err := app.OSCommand.Cmd.New("git --version").RunWithOutput()
// if we get an error anywhere here we'll show the same status
minVersionError := errors.New(app.Tr.MinGitVersionError)
if err != nil {
return minVersionError
}
if isGitVersionValid(output) {
return nil
}
return minVersionError
}
func isGitVersionValid(versionStr string) bool {
// output should be something like: 'git version 2.23.0 (blah)'
re := regexp.MustCompile(`[^\d]+([\d\.]+)`)
matches := re.FindStringSubmatch(versionStr)
if len(matches) == 0 {
return false
}
gitVersion := matches[1]
majorVersion, err := strconv.Atoi(gitVersion[0:1])
if err != nil {
return false
}
if majorVersion < 2 {
return false
}
return true
}
func (app *App) setupRepo() (bool, error) {
if err := app.validateGitVersion(); err != nil {
return false, err
}
if env.GetGitDirEnv() != "" {
// we've been given the git dir directly. We'll verify this dir when initializing our Git object
return false, nil
}
// if we are not in a git repo, we ask if we want to `git init`
if err := commands.VerifyInGitRepo(app.OSCommand); err != nil {
cwd, err := os.Getwd()
if err != nil {
return false, err
}
info, _ := os.Stat(filepath.Join(cwd, ".git"))
if info != nil && info.IsDir() {
return false, err // Current directory appears to be a git repository.
}
shouldInitRepo := true
notARepository := app.UserConfig.NotARepository
if notARepository == "prompt" {
// Offer to initialize a new repository in current directory.
fmt.Print(app.Tr.CreateRepo)
response, _ := bufio.NewReader(os.Stdin).ReadString('\n')
if strings.Trim(response, " \n") != "y" {
shouldInitRepo = false
}
} else if notARepository == "skip" {
shouldInitRepo = false
}
if !shouldInitRepo {
// check if we have a recent repo we can open
recentRepos := app.Config.GetAppState().RecentRepos
if len(recentRepos) > 0 {
var err error
// try opening each repo in turn, in case any have been deleted
for _, repoDir := range recentRepos {
if err = os.Chdir(repoDir); err == nil {
return true, nil
}
}
return false, err
}
os.Exit(1)
}
if err := app.OSCommand.Cmd.New("git init").Run(); err != nil {
return false, err
}
}
return false, nil
}
func (app *App) Run(filterPath string) error {
if app.ClientContext == "INTERACTIVE_REBASE" {
return app.Rebase()
}
if app.ClientContext == "EXIT_IMMEDIATELY" {
os.Exit(0)
}
err := app.Gui.RunAndHandleError(filterPath)
return err
}
func gitDir() string {
dir := env.GetGitDirEnv()
if dir == "" {
return ".git"
}
return dir
}
// Rebase contains logic for when we've been run in demon mode, meaning we've
// given lazygit as a command for git to call e.g. to edit a file
func (app *App) Rebase() error {
app.Log.Info("Lazygit invoked as interactive rebase demon")
app.Log.Info("args: ", os.Args)
if strings.HasSuffix(os.Args[1], "git-rebase-todo") {
if err := ioutil.WriteFile(os.Args[1], []byte(os.Getenv("LAZYGIT_REBASE_TODO")), 0o644); err != nil {
return err
}
} else if strings.HasSuffix(os.Args[1], filepath.Join(gitDir(), "COMMIT_EDITMSG")) { // TODO: test
// if we are rebasing and squashing, we'll see a COMMIT_EDITMSG
// but in this case we don't need to edit it, so we'll just return
} else {
app.Log.Info("Lazygit demon did not match on any use cases")
}
return nil
}
// Close closes any resources
func (app *App) Close() error {
return slices.TryForEach(app.closers, func(closer io.Closer) error {
return closer.Close()
})
}
// KnownError takes an error and tells us whether it's an error that we know about where we can print a nicely formatted version of it rather than panicking with a stack trace
func (app *App) KnownError(err error) (string, bool) {
errorMessage := err.Error()
knownErrorMessages := []string{app.Tr.MinGitVersionError}
if slices.Contains(knownErrorMessages, errorMessage) {
return errorMessage, true
}
mappings := []errorMapping{
{
originalError: "fatal: not a git repository",
newError: app.Tr.NotARepository,
},
}
if mapping, ok := slices.Find(mappings, func(mapping errorMapping) bool {
return strings.Contains(errorMessage, mapping.originalError)
}); ok {
return mapping.newError, true
}
return "", false
}
func TailLogs() {
logFilePath, err := config.LogPath()
if err != nil {
log.Fatal(err)
}
fmt.Printf("Tailing log file %s\n\n", logFilePath)
opts := humanlog.DefaultOptions
opts.Truncates = false
_, err = os.Stat(logFilePath)
if err != nil {
if os.IsNotExist(err) {
log.Fatal("Log file does not exist. Run `lazygit --debug` first to create the log file")
}
log.Fatal(err)
}
TailLogsForPlatform(logFilePath, opts)
}
| [
"\"LOG_LEVEL\"",
"\"DEBUG\"",
"\"LAZYGIT_CLIENT_COMMAND\"",
"\"LAZYGIT_REBASE_TODO\""
]
| []
| [
"LAZYGIT_REBASE_TODO",
"LAZYGIT_CLIENT_COMMAND",
"LOG_LEVEL",
"DEBUG"
]
| [] | ["LAZYGIT_REBASE_TODO", "LAZYGIT_CLIENT_COMMAND", "LOG_LEVEL", "DEBUG"] | go | 4 | 0 | |
mesonbuild/dependencies/boost.py | # Copyright 2013-2017 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file contains the detection logic for miscellaneous external dependencies.
import glob
import os
from .. import mlog
from .. import mesonlib
from ..environment import detect_cpu_family
from .base import (DependencyException, ExternalDependency)
# On windows 3 directory layouts are supported:
# * The default layout (versioned) installed:
# - $BOOST_ROOT/include/boost-x_x/boost/*.hpp
# - $BOOST_ROOT/lib/*.lib
# * The non-default layout (system) installed:
# - $BOOST_ROOT/include/boost/*.hpp
# - $BOOST_ROOT/lib/*.lib
# * The pre-built binaries from sf.net:
# - $BOOST_ROOT/boost/*.hpp
# - $BOOST_ROOT/lib<arch>-<compiler>/*.lib where arch=32/64 and compiler=msvc-14.1
#
# Note that we should also try to support:
# mingw-w64 / Windows : libboost_<module>-mt.a (location = <prefix>/mingw64/lib/)
# libboost_<module>-mt.dll.a
#
# Library names supported:
# - libboost_<module>-<compiler>-mt-gd-x_x.lib (static)
# - boost_<module>-<compiler>-mt-gd-x_x.lib|.dll (shared)
# - libboost_<module>.lib (static)
# - boost_<module>.lib|.dll (shared)
# where compiler is vc141 for example.
#
# NOTE: -gd means runtime and build time debugging is on
# -mt means threading=multi
#
# The `modules` argument accept library names. This is because every module that
# has libraries to link against also has multiple options regarding how to
# link. See for example:
# * http://www.boost.org/doc/libs/1_65_1/libs/test/doc/html/boost_test/usage_variants.html
# * http://www.boost.org/doc/libs/1_65_1/doc/html/stacktrace/configuration_and_build.html
# * http://www.boost.org/doc/libs/1_65_1/libs/math/doc/html/math_toolkit/main_tr1.html
# **On Unix**, official packaged versions of boost libraries follow the following schemes:
#
# Linux / Debian: libboost_<module>.so -> libboost_<module>.so.1.66.0
# Linux / Red Hat: libboost_<module>.so -> libboost_<module>.so.1.66.0
# Linux / OpenSuse: libboost_<module>.so -> libboost_<module>.so.1.66.0
# Win / Cygwin: libboost_<module>.dll.a (location = /usr/lib)
# libboost_<module>.a
# cygboost_<module>_1_64.dll (location = /usr/bin)
# Mac / homebrew: libboost_<module>.dylib + libboost_<module>-mt.dylib (location = /usr/local/lib)
# Mac / macports: libboost_<module>.dylib + libboost_<module>-mt.dylib (location = /opt/local/lib)
#
# Its not clear that any other abi tags (e.g. -gd) are used in official packages.
#
# On Linux systems, boost libs have multithreading support enabled, but without the -mt tag.
#
# Boost documentation recommends using complex abi tags like "-lboost_regex-gcc34-mt-d-1_36".
# (See http://www.boost.org/doc/libs/1_66_0/more/getting_started/unix-variants.html#library-naming)
# However, its not clear that any Unix distribution follows this scheme.
# Furthermore, the boost documentation for unix above uses examples from windows like
# "libboost_regex-vc71-mt-d-x86-1_34.lib", so apparently the abi tags may be more aimed at windows.
#
# Probably we should use the linker search path to decide which libraries to use. This will
# make it possible to find the macports boost libraries without setting BOOST_ROOT, and will
# also mean that it would be possible to use user-installed boost libraries when official
# packages are installed.
#
# We thus follow the following strategy:
# 1. Look for libraries using compiler.find_library( )
# 1.1 On Linux, just look for boost_<module>
# 1.2 On other systems (e.g. Mac) look for boost_<module>-mt if multithreading.
# 1.3 Otherwise look for boost_<module>
# 2. Fall back to previous approach
# 2.1. Search particular directories.
# 2.2. Find boost libraries with unknown suffixes using file-name globbing.
# TODO: Unix: Don't assume we know where the boost dir is, rely on -Idir and -Ldir being set.
# TODO: Allow user to specify suffix in BOOST_SUFFIX, or add specific options like BOOST_DEBUG for 'd' for debug.
class BoostDependency(ExternalDependency):
def __init__(self, environment, kwargs):
super().__init__('boost', environment, 'cpp', kwargs)
self.need_static_link = ['boost_exception', 'boost_test_exec_monitor']
# FIXME: is this the right way to find the build type?
self.is_debug = environment.cmd_line_options.buildtype.startswith('debug')
threading = kwargs.get("threading", "multi")
self.is_multithreading = threading == "multi"
self.requested_modules = self.get_requested(kwargs)
self.boost_root = None
self.boost_roots = []
self.incdir = None
self.libdir = None
if 'BOOST_ROOT' in os.environ:
self.boost_root = os.environ['BOOST_ROOT']
self.boost_roots = [self.boost_root]
if not os.path.isabs(self.boost_root):
raise DependencyException('BOOST_ROOT must be an absolute path.')
if 'BOOST_INCLUDEDIR' in os.environ:
self.incdir = os.environ['BOOST_INCLUDEDIR']
if 'BOOST_LIBRARYDIR' in os.environ:
self.libdir = os.environ['BOOST_LIBRARYDIR']
if self.boost_root is None:
if mesonlib.for_windows(self.want_cross, self.env):
self.boost_roots = self.detect_win_roots()
else:
self.boost_roots = self.detect_nix_roots()
if self.incdir is None:
if mesonlib.for_windows(self.want_cross, self.env):
self.incdir = self.detect_win_incdir()
else:
self.incdir = self.detect_nix_incdir()
if self.check_invalid_modules():
self.log_fail()
return
mlog.debug('Boost library root dir is', mlog.bold(self.boost_root))
mlog.debug('Boost include directory is', mlog.bold(self.incdir))
# 1. check if we can find BOOST headers.
self.detect_headers_and_version()
# 2. check if we can find BOOST libraries.
if self.is_found:
self.detect_lib_modules()
mlog.debug('Boost library directory is', mlog.bold(self.libdir))
# 3. Report success or failure
if self.is_found:
self.log_success()
else:
self.log_fail()
def check_invalid_modules(self):
invalid_modules = [c for c in self.requested_modules if 'boost_' + c not in BOOST_LIBS]
# previous versions of meson allowed include dirs as modules
remove = []
for m in invalid_modules:
if m in BOOST_DIRS:
mlog.warning('Requested boost library', mlog.bold(m), 'that doesn\'t exist. '
'This will be an error in the future')
remove.append(m)
self.requested_modules = [x for x in self.requested_modules if x not in remove]
invalid_modules = [x for x in invalid_modules if x not in remove]
if invalid_modules:
mlog.log(mlog.red('ERROR:'), 'Invalid Boost modules: ' + ', '.join(invalid_modules))
return True
else:
return False
def log_fail(self):
module_str = ', '.join(self.requested_modules)
mlog.log("Dependency Boost (%s) found:" % module_str, mlog.red('NO'))
def log_success(self):
module_str = ', '.join(self.requested_modules)
if self.boost_root:
info = self.version + ', ' + self.boost_root
else:
info = self.version
mlog.log('Dependency Boost (%s) found:' % module_str, mlog.green('YES'), info)
def detect_nix_roots(self):
return [os.path.abspath(os.path.join(x, '..'))
for x in self.compiler.get_default_include_dirs()]
def detect_win_roots(self):
res = []
# Where boost documentation says it should be
globtext = 'C:\\Program Files\\boost\\boost_*'
files = glob.glob(globtext)
res.extend(files)
# Where boost built from source actually installs it
if os.path.isdir('C:\\Boost'):
res.append('C:\\Boost')
# Where boost prebuilt binaries are
globtext = 'C:\\local\\boost_*'
files = glob.glob(globtext)
res.extend(files)
return res
def detect_nix_incdir(self):
if self.boost_root:
return os.path.join(self.boost_root, 'include')
return None
# FIXME: Should pick a version that matches the requested version
# Returns the folder that contains the boost folder.
def detect_win_incdir(self):
for root in self.boost_roots:
globtext = os.path.join(root, 'include', 'boost-*')
incdirs = glob.glob(globtext)
if len(incdirs) > 0:
return incdirs[0]
incboostdir = os.path.join(root, 'include', 'boost')
if os.path.isdir(incboostdir):
return os.path.join(root, 'include')
incboostdir = os.path.join(root, 'boost')
if os.path.isdir(incboostdir):
return root
return None
def get_compile_args(self):
args = []
include_dir = self.incdir
# Use "-isystem" when including boost headers instead of "-I"
# to avoid compiler warnings/failures when "-Werror" is used
# Careful not to use "-isystem" on default include dirs as it
# breaks some of the headers for certain gcc versions
# For example, doing g++ -isystem /usr/include on a simple
# "int main()" source results in the error:
# "/usr/include/c++/6.3.1/cstdlib:75:25: fatal error: stdlib.h: No such file or directory"
# See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=70129
# and http://stackoverflow.com/questions/37218953/isystem-on-a-system-include-directory-causes-errors
# for more details
if include_dir and include_dir not in self.compiler.get_default_include_dirs():
args.append("".join(self.compiler.get_include_args(include_dir, True)))
return args
def get_requested(self, kwargs):
candidates = mesonlib.extract_as_list(kwargs, 'modules')
for c in candidates:
if not isinstance(c, str):
raise DependencyException('Boost module argument is not a string.')
return candidates
def detect_headers_and_version(self):
try:
version = self.compiler.get_define('BOOST_LIB_VERSION', '#include <boost/version.hpp>', self.env, self.get_compile_args(), [])
except mesonlib.EnvironmentException:
return
except TypeError:
return
# Remove quotes
version = version[1:-1]
# Fix version string
self.version = version.replace('_', '.')
self.is_found = True
def detect_lib_modules(self):
self.lib_modules = {}
# 1. Try to find modules using compiler.find_library( )
if self.find_libraries_with_abi_tags(self.abi_tags()):
pass
# 2. Fall back to the old method
else:
if mesonlib.for_windows(self.want_cross, self.env):
self.detect_lib_modules_win()
else:
self.detect_lib_modules_nix()
# 3. Check if we can find the modules
for m in self.requested_modules:
if 'boost_' + m not in self.lib_modules:
mlog.debug('Requested Boost library {!r} not found'.format(m))
self.is_found = False
def modname_from_filename(self, filename):
modname = os.path.basename(filename)
modname = modname.split('.', 1)[0]
modname = modname.split('-', 1)[0]
if modname.startswith('libboost'):
modname = modname[3:]
return modname
def compiler_tag(self):
tag = None
compiler = self.env.detect_cpp_compiler(self.want_cross)
if mesonlib.for_windows(self.want_cross, self.env):
if compiler.get_id() == 'msvc':
comp_ts_version = compiler.get_toolset_version()
compiler_ts = comp_ts_version.split('.')
# FIXME - what about other compilers?
tag = '-vc{}{}'.format(compiler_ts[0], compiler_ts[1])
else:
tag = ''
return tag
def threading_tag(self):
if not self.is_multithreading:
return ''
if mesonlib.for_darwin(self.want_cross, self.env):
# - Mac: requires -mt for multithreading, so should not fall back to non-mt libraries.
return '-mt'
elif mesonlib.for_windows(self.want_cross, self.env):
# - Windows: requires -mt for multithreading, so should not fall back to non-mt libraries.
return '-mt'
else:
# - Linux: leaves off -mt but libraries are multithreading-aware.
# - Cygwin: leaves off -mt but libraries are multithreading-aware.
return ''
def version_tag(self):
return '-' + self.version.replace('.', '_')
def debug_tag(self):
return '-gd' if self.is_debug else ''
def versioned_abi_tag(self):
return self.compiler_tag() + self.threading_tag() + self.debug_tag() + self.version_tag()
# FIXME - how to handle different distributions, e.g. for Mac? Currently we handle homebrew and macports, but not fink.
def abi_tags(self):
if mesonlib.for_windows(self.want_cross, self.env):
return [self.versioned_abi_tag(), self.threading_tag()]
else:
return [self.threading_tag()]
def sourceforge_dir(self):
if self.env.detect_cpp_compiler(self.want_cross).get_id() != 'msvc':
return None
comp_ts_version = self.env.detect_cpp_compiler(self.want_cross).get_toolset_version()
arch = detect_cpu_family(self.env.coredata.compilers)
if arch == 'x86':
return 'lib32-msvc-{}'.format(comp_ts_version)
elif arch == 'x86_64':
return 'lib64-msvc-{}'.format(comp_ts_version)
else:
# Does anyone do Boost cross-compiling to other archs on Windows?
return None
def find_libraries_with_abi_tag(self, tag):
# All modules should have the same tag
self.lib_modules = {}
all_found = True
for module in self.requested_modules:
libname = 'boost_' + module + tag
args = self.compiler.find_library(libname, self.env, self.extra_lib_dirs())
if args is None:
mlog.debug("Couldn\'t find library '{}' for boost module '{}' (ABI tag = '{}')".format(libname, module, tag))
all_found = False
else:
mlog.debug('Link args for boost module "{}" are {}'.format(module, args))
self.lib_modules['boost_' + module] = args
return all_found
def find_libraries_with_abi_tags(self, tags):
for tag in tags:
if self.find_libraries_with_abi_tag(tag):
return True
return False
def detect_lib_modules_win(self):
if not self.libdir:
# The libdirs in the distributed binaries (from sf)
lib_sf = self.sourceforge_dir()
if self.boost_root:
roots = [self.boost_root]
else:
roots = self.boost_roots
for root in roots:
# The default libdir when building
libdir = os.path.join(root, 'lib')
if os.path.isdir(libdir):
self.libdir = libdir
break
if lib_sf:
full_path = os.path.join(root, lib_sf)
if os.path.isdir(full_path):
self.libdir = full_path
break
if not self.libdir:
return
for name in self.need_static_link:
# FIXME - why are we only looking for *.lib? Mingw provides *.dll.a and *.a
libname = 'lib' + name + self.versioned_abi_tag() + '.lib'
if os.path.isfile(os.path.join(self.libdir, libname)):
self.lib_modules[self.modname_from_filename(libname)] = [libname]
else:
libname = "lib{}.lib".format(name)
if os.path.isfile(os.path.join(self.libdir, libname)):
self.lib_modules[name[3:]] = [libname]
# globber1 applies to a layout=system installation
# globber2 applies to a layout=versioned installation
globber1 = 'libboost_*' if self.static else 'boost_*'
globber2 = globber1 + self.versioned_abi_tag()
# FIXME - why are we only looking for *.lib? Mingw provides *.dll.a and *.a
globber2_matches = glob.glob(os.path.join(self.libdir, globber2 + '.lib'))
for entry in globber2_matches:
fname = os.path.basename(entry)
self.lib_modules[self.modname_from_filename(fname)] = [fname]
if len(globber2_matches) == 0:
# FIXME - why are we only looking for *.lib? Mingw provides *.dll.a and *.a
for entry in glob.glob(os.path.join(self.libdir, globber1 + '.lib')):
if self.static:
fname = os.path.basename(entry)
self.lib_modules[self.modname_from_filename(fname)] = [fname]
def detect_lib_modules_nix(self):
if self.static:
libsuffix = 'a'
elif mesonlib.for_darwin(self.want_cross, self.env):
libsuffix = 'dylib'
else:
libsuffix = 'so'
globber = 'libboost_*.{}'.format(libsuffix)
if self.libdir:
libdirs = [self.libdir]
elif self.boost_root is None:
libdirs = mesonlib.get_library_dirs()
else:
libdirs = [os.path.join(self.boost_root, 'lib')]
for libdir in libdirs:
for name in self.need_static_link:
libname = 'lib{}.a'.format(name)
if os.path.isfile(os.path.join(libdir, libname)):
self.lib_modules[name] = [libname]
for entry in glob.glob(os.path.join(libdir, globber)):
# I'm not 100% sure what to do here. Some distros
# have modules such as thread only as -mt versions.
# On debian all packages are built threading=multi
# but not suffixed with -mt.
# FIXME: implement detect_lib_modules_{debian, redhat, ...}
# FIXME: this wouldn't work with -mt-gd either. -BDR
if self.is_multithreading and mesonlib.is_debianlike():
pass
elif self.is_multithreading and entry.endswith('-mt.{}'.format(libsuffix)):
pass
elif not entry.endswith('-mt.{}'.format(libsuffix)):
pass
else:
continue
modname = self.modname_from_filename(entry)
if modname not in self.lib_modules:
self.lib_modules[modname] = [entry]
def extra_lib_dirs(self):
if self.libdir:
return [self.libdir]
elif self.boost_root:
return [os.path.join(self.boost_root, 'lib')]
return []
def get_link_args(self):
args = []
for dir in self.extra_lib_dirs():
args += self.compiler.get_linker_search_args(dir)
for lib in self.requested_modules:
args += self.lib_modules['boost_' + lib]
return args
def get_sources(self):
return []
def need_threads(self):
return 'thread' in self.requested_modules
# Generated with boost_names.py
BOOST_LIBS = [
'boost_atomic',
'boost_chrono',
'boost_chrono',
'boost_container',
'boost_context',
'boost_coroutine',
'boost_date_time',
'boost_exception',
'boost_fiber',
'boost_filesystem',
'boost_graph',
'boost_iostreams',
'boost_locale',
'boost_log',
'boost_log_setup',
'boost_math_tr1',
'boost_math_tr1f',
'boost_math_tr1l',
'boost_math_c99',
'boost_math_c99f',
'boost_math_c99l',
'boost_math_tr1',
'boost_math_tr1f',
'boost_math_tr1l',
'boost_math_c99',
'boost_math_c99f',
'boost_math_c99l',
'boost_math_tr1',
'boost_math_tr1f',
'boost_math_tr1l',
'boost_math_c99',
'boost_math_c99f',
'boost_math_c99l',
'boost_math_tr1',
'boost_math_tr1f',
'boost_math_tr1l',
'boost_math_c99',
'boost_math_c99f',
'boost_math_c99l',
'boost_math_tr1',
'boost_math_tr1f',
'boost_math_tr1l',
'boost_math_c99',
'boost_math_c99f',
'boost_math_c99l',
'boost_math_tr1',
'boost_math_tr1f',
'boost_math_tr1l',
'boost_math_c99',
'boost_math_c99f',
'boost_math_c99l',
'boost_mpi',
'boost_program_options',
'boost_python',
'boost_python3',
'boost_numpy',
'boost_numpy3',
'boost_random',
'boost_regex',
'boost_serialization',
'boost_wserialization',
'boost_signals',
'boost_stacktrace_noop',
'boost_stacktrace_backtrace',
'boost_stacktrace_addr2line',
'boost_stacktrace_basic',
'boost_stacktrace_windbg',
'boost_stacktrace_windbg_cached',
'boost_system',
'boost_prg_exec_monitor',
'boost_test_exec_monitor',
'boost_unit_test_framework',
'boost_thread',
'boost_timer',
'boost_type_erasure',
'boost_wave'
]
BOOST_DIRS = [
'lambda',
'optional',
'convert',
'system',
'uuid',
'archive',
'align',
'timer',
'chrono',
'gil',
'logic',
'signals',
'predef',
'tr1',
'multi_index',
'property_map',
'multi_array',
'context',
'random',
'endian',
'circular_buffer',
'proto',
'assign',
'format',
'math',
'phoenix',
'graph',
'locale',
'mpl',
'pool',
'unordered',
'core',
'exception',
'ptr_container',
'flyweight',
'range',
'typeof',
'thread',
'move',
'spirit',
'dll',
'compute',
'serialization',
'ratio',
'msm',
'config',
'metaparse',
'coroutine2',
'qvm',
'program_options',
'concept',
'detail',
'hana',
'concept_check',
'compatibility',
'variant',
'type_erasure',
'mpi',
'test',
'fusion',
'log',
'sort',
'local_function',
'units',
'functional',
'preprocessor',
'integer',
'container',
'polygon',
'interprocess',
'numeric',
'iterator',
'wave',
'lexical_cast',
'multiprecision',
'utility',
'tti',
'asio',
'dynamic_bitset',
'algorithm',
'xpressive',
'bimap',
'signals2',
'type_traits',
'regex',
'statechart',
'parameter',
'icl',
'python',
'lockfree',
'intrusive',
'io',
'pending',
'geometry',
'tuple',
'iostreams',
'heap',
'atomic',
'filesystem',
'smart_ptr',
'function',
'fiber',
'type_index',
'accumulators',
'function_types',
'coroutine',
'vmd',
'date_time',
'property_tree',
'bind'
]
| []
| []
| [
"BOOST_LIBRARYDIR",
"BOOST_ROOT",
"BOOST_INCLUDEDIR"
]
| [] | ["BOOST_LIBRARYDIR", "BOOST_ROOT", "BOOST_INCLUDEDIR"] | python | 3 | 0 | |
code/go/0chain.net/smartcontract/storagesc/blobber_main.go | // +build !integration_tests
package storagesc
import (
"fmt"
cstate "0chain.net/chaincore/chain/state"
"0chain.net/chaincore/transaction"
)
// insert new blobber, filling its stake pool
func (sc *StorageSmartContract) insertBlobber(t *transaction.Transaction,
conf *scConfig, blobber *StorageNode, all *StorageNodes,
balances cstate.StateContextI) (err error) {
blobber.LastHealthCheck = t.CreationDate // set to now
// the stake pool can be created by related validator
var sp *stakePool
sp, err = sc.getOrCreateStakePool(conf, blobber.ID,
&blobber.StakePoolSettings, balances)
if err != nil {
return
}
if err = sp.save(sc.ID, t.ClientID, balances); err != nil {
return fmt.Errorf("saving stake pool: %v", err)
}
all.Nodes.add(blobber) // add to all
// statistic
sc.statIncr(statAddBlobber)
sc.statIncr(statNumberOfBlobbers)
return
}
| []
| []
| []
| [] | [] | go | null | null | null |
test/unit/common/middleware/test_versioned_writes.py | # Copyright (c) 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import json
import os
import time
import mock
import unittest
from swift.common import swob, utils, registry
from swift.common.middleware import versioned_writes, copy
from swift.common.swob import Request
from test.unit.common.middleware import helpers
class FakeCache(object):
def __init__(self, val):
if 'status' not in val:
val['status'] = 200
self.val = val
def get(self, *args):
return self.val
def local_tz(func):
'''
Decorator to change the timezone when running a test.
This uses the Eastern Time Zone definition from the time module's docs.
Note that the timezone affects things like time.time() and time.mktime().
'''
@functools.wraps(func)
def wrapper(*args, **kwargs):
tz = os.environ.get('TZ', '')
try:
os.environ['TZ'] = 'EST+05EDT,M4.1.0,M10.5.0'
time.tzset()
return func(*args, **kwargs)
finally:
os.environ['TZ'] = tz
time.tzset()
return wrapper
class VersionedWritesBaseTestCase(unittest.TestCase):
def setUp(self):
self.app = helpers.FakeSwift()
conf = {'allow_versioned_writes': 'true'}
self.vw = versioned_writes.legacy.VersionedWritesMiddleware(
self.app, conf)
def tearDown(self):
self.assertEqual(self.app.unclosed_requests, {})
self.assertEqual(self.app.unread_requests, {})
def call_app(self, req, app=None):
if app is None:
app = self.app
self.authorized = []
def authorize(req):
self.authorized.append(req)
if 'swift.authorize' not in req.environ:
req.environ['swift.authorize'] = authorize
req.headers.setdefault("User-Agent", "Marula Kruger")
status = [None]
headers = [None]
def start_response(s, h, ei=None):
status[0] = s
headers[0] = h
body_iter = app(req.environ, start_response)
with utils.closing_if_possible(body_iter):
body = b''.join(body_iter)
return status[0], headers[0], body
def call_vw(self, req):
return self.call_app(req, app=self.vw)
def assertRequestEqual(self, req, other):
self.assertEqual(req.method, other.method)
self.assertEqual(req.path, other.path)
class VersionedWritesTestCase(VersionedWritesBaseTestCase):
def test_put_container(self):
self.app.register('PUT', '/v1/a/c', swob.HTTPOk, {}, 'passed')
req = Request.blank('/v1/a/c',
headers={'X-Versions-Location': 'ver_cont'},
environ={'REQUEST_METHOD': 'PUT'})
status, headers, body = self.call_vw(req)
self.assertEqual(status, '200 OK')
# check for sysmeta header
calls = self.app.calls_with_headers
method, path, req_headers = calls[0]
self.assertEqual('PUT', method)
self.assertEqual('/v1/a/c', path)
self.assertIn('x-container-sysmeta-versions-location', req_headers)
self.assertEqual(req.headers['x-container-sysmeta-versions-location'],
'ver_cont')
self.assertIn('x-container-sysmeta-versions-mode', req_headers)
self.assertEqual(req.headers['x-container-sysmeta-versions-mode'],
'stack')
self.assertEqual(len(self.authorized), 1)
self.assertRequestEqual(req, self.authorized[0])
def test_put_container_history_header(self):
self.app.register('PUT', '/v1/a/c', swob.HTTPOk, {}, 'passed')
req = Request.blank('/v1/a/c',
headers={'X-History-Location': 'ver_cont'},
environ={'REQUEST_METHOD': 'PUT'})
status, headers, body = self.call_vw(req)
self.assertEqual(status, '200 OK')
# check for sysmeta header
calls = self.app.calls_with_headers
method, path, req_headers = calls[0]
self.assertEqual('PUT', method)
self.assertEqual('/v1/a/c', path)
self.assertIn('x-container-sysmeta-versions-location', req_headers)
self.assertEqual('ver_cont',
req_headers['x-container-sysmeta-versions-location'])
self.assertIn('x-container-sysmeta-versions-mode', req_headers)
self.assertEqual('history',
req_headers['x-container-sysmeta-versions-mode'])
self.assertEqual(len(self.authorized), 1)
self.assertRequestEqual(req, self.authorized[0])
def test_put_container_both_headers(self):
req = Request.blank('/v1/a/c',
headers={'X-Versions-Location': 'ver_cont',
'X-History-Location': 'ver_cont'},
environ={'REQUEST_METHOD': 'PUT'})
status, headers, body = self.call_vw(req)
self.assertEqual(status, '400 Bad Request')
self.assertFalse(self.app.calls)
def test_container_allow_versioned_writes_false(self):
self.vw.conf = {'allow_versioned_writes': 'false'}
# PUT/POST container must fail as 412 when allow_versioned_writes
# set to false
for method in ('PUT', 'POST'):
for header in ('X-Versions-Location', 'X-History-Location'):
req = Request.blank('/v1/a/c',
headers={header: 'ver_cont'},
environ={'REQUEST_METHOD': method})
status, headers, body = self.call_vw(req)
self.assertEqual(status, "412 Precondition Failed",
'Got %s instead of 412 when %sing '
'with %s header' % (status, method, header))
# GET performs as normal
self.app.register('GET', '/v1/a/c', swob.HTTPOk, {}, 'passed')
for method in ('GET', 'HEAD'):
req = Request.blank('/v1/a/c',
headers={'X-Versions-Location': 'ver_cont'},
environ={'REQUEST_METHOD': method})
status, headers, body = self.call_vw(req)
self.assertEqual(status, '200 OK')
def _test_removal(self, headers):
self.app.register('POST', '/v1/a/c', swob.HTTPNoContent, {}, 'passed')
req = Request.blank('/v1/a/c',
headers=headers,
environ={'REQUEST_METHOD': 'POST'})
status, headers, body = self.call_vw(req)
self.assertEqual(status, '204 No Content')
# check for sysmeta header
calls = self.app.calls_with_headers
method, path, req_headers = calls[0]
self.assertEqual('POST', method)
self.assertEqual('/v1/a/c', path)
for header in ['x-container-sysmeta-versions-location',
'x-container-sysmeta-versions-mode',
'x-versions-location']:
self.assertIn(header, req_headers)
self.assertEqual('', req_headers[header])
self.assertEqual(len(self.authorized), 1)
self.assertRequestEqual(req, self.authorized[0])
def test_remove_headers(self):
self._test_removal({'X-Remove-Versions-Location': 'x'})
self._test_removal({'X-Remove-History-Location': 'x'})
def test_empty_versions_location(self):
self._test_removal({'X-Versions-Location': ''})
self._test_removal({'X-History-Location': ''})
def test_remove_add_versions_precedence(self):
self.app.register(
'POST', '/v1/a/c', swob.HTTPOk,
{'x-container-sysmeta-versions-location': 'ver_cont'},
'passed')
req = Request.blank('/v1/a/c',
headers={'X-Remove-Versions-Location': 'x',
'X-Versions-Location': 'ver_cont'},
environ={'REQUEST_METHOD': 'POST'})
status, headers, body = self.call_vw(req)
self.assertEqual(status, '200 OK')
self.assertIn(('X-Versions-Location', 'ver_cont'), headers)
# check for sysmeta header
calls = self.app.calls_with_headers
method, path, req_headers = calls[0]
self.assertEqual('POST', method)
self.assertEqual('/v1/a/c', path)
self.assertIn('x-container-sysmeta-versions-location', req_headers)
self.assertNotIn('x-remove-versions-location', req_headers)
self.assertEqual(len(self.authorized), 1)
self.assertRequestEqual(req, self.authorized[0])
def _test_blank_add_versions_precedence(self, blank_header, add_header):
self.app.register(
'POST', '/v1/a/c', swob.HTTPOk,
{'x-container-sysmeta-versions-location': 'ver_cont'},
'passed')
req = Request.blank('/v1/a/c',
headers={blank_header: '',
add_header: 'ver_cont'},
environ={'REQUEST_METHOD': 'POST'})
status, headers, body = self.call_vw(req)
self.assertEqual(status, '200 OK')
# check for sysmeta header
calls = self.app.calls_with_headers
method, path, req_headers = calls[-1]
self.assertEqual('POST', method)
self.assertEqual('/v1/a/c', path)
self.assertIn('x-container-sysmeta-versions-location', req_headers)
self.assertEqual('ver_cont',
req_headers['x-container-sysmeta-versions-location'])
self.assertIn('x-container-sysmeta-versions-mode', req_headers)
self.assertEqual('history' if add_header == 'X-History-Location'
else 'stack',
req_headers['x-container-sysmeta-versions-mode'])
self.assertNotIn('x-remove-versions-location', req_headers)
self.assertIn('x-versions-location', req_headers)
self.assertEqual('', req_headers['x-versions-location'])
self.assertEqual(len(self.authorized), 1)
self.assertRequestEqual(req, self.authorized[0])
def test_blank_add_versions_precedence(self):
self._test_blank_add_versions_precedence(
'X-Versions-Location', 'X-History-Location')
self._test_blank_add_versions_precedence(
'X-History-Location', 'X-Versions-Location')
def test_get_container(self):
self.app.register(
'GET', '/v1/a/c', swob.HTTPOk,
{'x-container-sysmeta-versions-location': 'ver_cont',
'x-container-sysmeta-versions-mode': 'stack'}, None)
req = Request.blank(
'/v1/a/c',
environ={'REQUEST_METHOD': 'GET'})
status, headers, body = self.call_vw(req)
self.assertEqual(status, '200 OK')
self.assertIn(('X-Versions-Location', 'ver_cont'), headers)
self.assertEqual(len(self.authorized), 1)
self.assertRequestEqual(req, self.authorized[0])
def test_head_container(self):
self.app.register(
'HEAD', '/v1/a/c', swob.HTTPOk,
{'x-container-sysmeta-versions-location': 'other_ver_cont',
'x-container-sysmeta-versions-mode': 'history'}, None)
req = Request.blank(
'/v1/a/c',
environ={'REQUEST_METHOD': 'HEAD'})
status, headers, body = self.call_vw(req)
self.assertEqual(status, '200 OK')
self.assertIn(('X-History-Location', 'other_ver_cont'), headers)
self.assertEqual(len(self.authorized), 1)
self.assertRequestEqual(req, self.authorized[0])
def test_get_head(self):
self.app.register('GET', '/v1/a/c/o', swob.HTTPOk, {}, None)
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'GET'})
status, headers, body = self.call_vw(req)
self.assertEqual(status, '200 OK')
self.assertEqual(len(self.authorized), 1)
self.assertRequestEqual(req, self.authorized[0])
self.app.register('HEAD', '/v1/a/c/o', swob.HTTPOk, {}, None)
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'HEAD'})
status, headers, body = self.call_vw(req)
self.assertEqual(status, '200 OK')
self.assertEqual(len(self.authorized), 1)
self.assertRequestEqual(req, self.authorized[0])
def test_put_object_no_versioning(self):
self.app.register(
'PUT', '/v1/a/c/o', swob.HTTPOk, {}, 'passed')
cache = FakeCache({})
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT', 'swift.cache': cache,
'CONTENT_LENGTH': '100'})
status, headers, body = self.call_vw(req)
self.assertEqual(status, '200 OK')
self.assertEqual(len(self.authorized), 1)
self.assertRequestEqual(req, self.authorized[0])
def test_put_first_object_success(self):
self.app.register(
'PUT', '/v1/a/c/o', swob.HTTPOk, {}, 'passed')
self.app.register(
'GET', '/v1/a/c/o', swob.HTTPNotFound, {}, None)
cache = FakeCache({'sysmeta': {'versions-location': 'ver_cont'}})
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT', 'swift.cache': cache,
'CONTENT_LENGTH': '100',
'swift.trans_id': 'fake_trans_id'})
status, headers, body = self.call_vw(req)
self.assertEqual(status, '200 OK')
self.assertEqual(len(self.authorized), 2)
# Versioned writes middleware now calls auth on the incoming request
# before we try the GET and then at the proxy, so there are 2
# atuhorized for the same request.
self.assertRequestEqual(req, self.authorized[0])
self.assertRequestEqual(req, self.authorized[1])
self.assertEqual(2, self.app.call_count)
self.assertEqual(['VW', None], self.app.swift_sources)
self.assertEqual({'fake_trans_id'}, set(self.app.txn_ids))
def test_put_versioned_object_including_url_encoded_name_success(self):
self.app.register(
'PUT', '/v1/a/c/%ff', swob.HTTPOk, {}, 'passed')
self.app.register(
'GET', '/v1/a/c/%ff', swob.HTTPNotFound, {}, None)
cache = FakeCache({'sysmeta': {'versions-location': 'ver_cont'}})
req = Request.blank(
'/v1/a/c/%25ff',
environ={'REQUEST_METHOD': 'PUT', 'swift.cache': cache,
'CONTENT_LENGTH': '100',
'swift.trans_id': 'fake_trans_id'})
status, headers, body = self.call_vw(req)
self.assertEqual(status, '200 OK')
self.assertEqual(len(self.authorized), 2)
# Versioned writes middleware now calls auth on the incoming request
# before we try the GET and then at the proxy, so there are 2
# atuhorized for the same request.
self.assertRequestEqual(req, self.authorized[0])
self.assertRequestEqual(req, self.authorized[1])
self.assertEqual(2, self.app.call_count)
self.assertEqual(['VW', None], self.app.swift_sources)
self.assertEqual({'fake_trans_id'}, set(self.app.txn_ids))
def test_put_object_no_versioning_with_container_config_true(self):
# set False to versions_write and expect no GET occurred
self.vw.conf = {'allow_versioned_writes': 'false'}
self.app.register(
'PUT', '/v1/a/c/o', swob.HTTPCreated, {}, 'passed')
cache = FakeCache({'versions': 'ver_cont'})
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT', 'swift.cache': cache,
'CONTENT_LENGTH': '100'})
status, headers, body = self.call_vw(req)
self.assertEqual(status, '201 Created')
self.assertEqual(len(self.authorized), 1)
self.assertRequestEqual(req, self.authorized[0])
called_method = [method for (method, path, hdrs) in self.app._calls]
self.assertNotIn('GET', called_method)
def test_put_request_is_dlo_manifest_with_container_config_true(self):
self.app.register(
'PUT', '/v1/a/c/o', swob.HTTPCreated, {}, 'passed')
self.app.register(
'GET', '/v1/a/c/o', swob.HTTPOk,
{'last-modified': 'Thu, 1 Jan 1970 00:01:00 GMT'}, 'old version')
self.app.register(
'PUT', '/v1/a/ver_cont/001o/0000000060.00000', swob.HTTPCreated,
{}, '')
cache = FakeCache({'versions': 'ver_cont'})
req = Request.blank(
'/v1/a/c/o',
headers={'X-Object-Manifest': 'req/manifest'},
environ={'REQUEST_METHOD': 'PUT', 'swift.cache': cache,
'CONTENT_LENGTH': '100'})
status, headers, body = self.call_vw(req)
self.assertEqual(status, '201 Created')
self.assertEqual(len(self.authorized), 2)
self.assertRequestEqual(req, self.authorized[0])
self.assertRequestEqual(req, self.authorized[1])
self.assertEqual(3, self.app.call_count)
self.assertEqual([
('GET', '/v1/a/c/o?symlink=get'),
('PUT', '/v1/a/ver_cont/001o/0000000060.00000'),
('PUT', '/v1/a/c/o'),
], self.app.calls)
self.assertIn('x-object-manifest',
self.app.calls_with_headers[2].headers)
def test_put_version_is_dlo_manifest_with_container_config_true(self):
self.app.register('GET', '/v1/a/c/o', swob.HTTPOk,
{'X-Object-Manifest': 'resp/manifest',
'last-modified': 'Thu, 1 Jan 1970 01:00:00 GMT'},
'passed')
self.app.register(
'PUT', '/v1/a/ver_cont/001o/0000003600.00000', swob.HTTPCreated,
{}, '')
self.app.register(
'PUT', '/v1/a/c/o', swob.HTTPCreated, {}, 'passed')
cache = FakeCache({'versions': 'ver_cont'})
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT', 'swift.cache': cache,
'CONTENT_LENGTH': '100'})
status, headers, body = self.call_vw(req)
self.assertEqual(status, '201 Created')
# The middleware now auths the request before the initial GET, the
# same GET that gets the X-Object-Manifest back. So a second auth is
# now done.
self.assertEqual(len(self.authorized), 2)
self.assertRequestEqual(req, self.authorized[0])
self.assertRequestEqual(req, self.authorized[1])
self.assertEqual(3, self.app.call_count)
self.assertEqual([
('GET', '/v1/a/c/o?symlink=get'),
('PUT', '/v1/a/ver_cont/001o/0000003600.00000'),
('PUT', '/v1/a/c/o'),
], self.app.calls)
self.assertIn('x-object-manifest',
self.app.calls_with_headers[1].headers)
def test_delete_object_no_versioning_with_container_config_true(self):
# set False to versions_write obviously and expect no GET versioning
# container and GET/PUT called (just delete object as normal)
self.vw.conf = {'allow_versioned_writes': 'false'}
self.app.register(
'DELETE', '/v1/a/c/o', swob.HTTPNoContent, {}, 'passed')
cache = FakeCache({'versions': 'ver_cont'})
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'DELETE', 'swift.cache': cache})
status, headers, body = self.call_vw(req)
self.assertEqual(status, '204 No Content')
self.assertEqual(len(self.authorized), 1)
self.assertRequestEqual(req, self.authorized[0])
called_method = \
[method for (method, path, rheaders) in self.app._calls]
self.assertNotIn('PUT', called_method)
self.assertNotIn('GET', called_method)
self.assertEqual(1, self.app.call_count)
def test_new_version_success(self):
self.app.register(
'PUT', '/v1/a/c/o', swob.HTTPCreated, {}, 'passed')
self.app.register(
'GET', '/v1/a/c/o', swob.HTTPOk,
{'last-modified': 'Thu, 1 Jan 1970 00:00:01 GMT'}, 'passed')
self.app.register(
'PUT', '/v1/a/ver_cont/001o/0000000001.00000', swob.HTTPCreated,
{}, None)
cache = FakeCache({'sysmeta': {'versions-location': 'ver_cont'}})
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT', 'swift.cache': cache,
'CONTENT_LENGTH': '100',
'swift.trans_id': 'fake_trans_id'})
status, headers, body = self.call_vw(req)
self.assertEqual(status, '201 Created')
# authorized twice now because versioned_writes now makes a check on
# PUT
self.assertEqual(len(self.authorized), 2)
self.assertRequestEqual(req, self.authorized[0])
self.assertEqual(['VW', 'VW', None], self.app.swift_sources)
self.assertEqual({'fake_trans_id'}, set(self.app.txn_ids))
def test_new_version_get_errors(self):
# GET on source fails, expect client error response,
# no PUT should happen
self.app.register(
'GET', '/v1/a/c/o', swob.HTTPBadRequest, {}, None)
cache = FakeCache({'versions': 'ver_cont'})
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT', 'swift.cache': cache,
'CONTENT_LENGTH': '100'})
status, headers, body = self.call_vw(req)
self.assertEqual(status, '412 Precondition Failed')
self.assertEqual(1, self.app.call_count)
# GET on source fails, expect server error response
self.app.register(
'GET', '/v1/a/c/o', swob.HTTPBadGateway, {}, None)
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT', 'swift.cache': cache,
'CONTENT_LENGTH': '100'})
status, headers, body = self.call_vw(req)
self.assertEqual(status, '503 Service Unavailable')
self.assertEqual(2, self.app.call_count)
def test_new_version_put_errors(self):
# PUT of version fails, expect client error response
self.app.register(
'GET', '/v1/a/c/o', swob.HTTPOk,
{'last-modified': 'Thu, 1 Jan 1970 00:00:01 GMT'}, 'passed')
self.app.register(
'PUT', '/v1/a/ver_cont/001o/0000000001.00000',
swob.HTTPUnauthorized, {}, None)
cache = FakeCache({'sysmeta': {'versions-location': 'ver_cont'}})
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT', 'swift.cache': cache,
'CONTENT_LENGTH': '100'})
status, headers, body = self.call_vw(req)
self.assertEqual(status, '412 Precondition Failed')
self.assertEqual(2, self.app.call_count)
# PUT of version fails, expect server error response
self.app.register(
'PUT', '/v1/a/ver_cont/001o/0000000001.00000', swob.HTTPBadGateway,
{}, None)
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT', 'swift.cache': cache,
'CONTENT_LENGTH': '100'})
status, headers, body = self.call_vw(req)
self.assertEqual(status, '503 Service Unavailable')
self.assertEqual(4, self.app.call_count)
@local_tz
def test_new_version_sysmeta_precedence(self):
self.app.register(
'PUT', '/v1/a/c/o', swob.HTTPOk, {}, 'passed')
self.app.register(
'GET', '/v1/a/c/o', swob.HTTPOk,
{'last-modified': 'Thu, 1 Jan 1970 00:00:00 GMT'}, 'passed')
self.app.register(
'PUT', '/v1/a/ver_cont/001o/0000000000.00000', swob.HTTPOk,
{}, None)
# fill cache with two different values for versions location
# new middleware should use sysmeta first
cache = FakeCache({'versions': 'old_ver_cont',
'sysmeta': {'versions-location': 'ver_cont'}})
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT', 'swift.cache': cache,
'CONTENT_LENGTH': '100'})
status, headers, body = self.call_vw(req)
self.assertEqual(status, '200 OK')
# authorized twice now because versioned_writes now makes a check on
# PUT
self.assertEqual(len(self.authorized), 2)
self.assertRequestEqual(req, self.authorized[0])
# check that sysmeta header was used
calls = self.app.calls_with_headers
method, path, req_headers = calls[1]
self.assertEqual('PUT', method)
self.assertEqual('/v1/a/ver_cont/001o/0000000000.00000', path)
def test_delete_no_versions_container_success(self):
self.app.register(
'DELETE', '/v1/a/c/o', swob.HTTPOk, {}, 'passed')
self.app.register(
'GET', helpers.normalize_path(
'/v1/a/ver_cont?prefix=001o/&marker=&reverse=on'),
swob.HTTPNotFound, {}, None)
cache = FakeCache({'sysmeta': {'versions-location': 'ver_cont'}})
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'DELETE', 'swift.cache': cache,
'CONTENT_LENGTH': '0', 'swift.trans_id': 'fake_trans_id'})
status, headers, body = self.call_vw(req)
self.assertEqual(status, '200 OK')
self.assertEqual(len(self.authorized), 1)
self.assertRequestEqual(req, self.authorized[0])
self.assertEqual(2, self.app.call_count)
self.assertEqual(['VW', None], self.app.swift_sources)
self.assertEqual({'fake_trans_id'}, set(self.app.txn_ids))
prefix_listing_prefix = '/v1/a/ver_cont?prefix=001o/&'
self.assertEqual(self.app.calls, [
('GET', helpers.normalize_path(
prefix_listing_prefix + 'marker=&reverse=on')),
('DELETE', '/v1/a/c/o'),
])
def test_delete_first_object_success(self):
self.app.register(
'DELETE', '/v1/a/c/o', swob.HTTPOk, {}, 'passed')
self.app.register(
'GET', '/v1/a/ver_cont?prefix=001o/&marker=&reverse=on',
swob.HTTPOk, {}, '[]')
cache = FakeCache({'sysmeta': {'versions-location': 'ver_cont'}})
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'DELETE', 'swift.cache': cache,
'CONTENT_LENGTH': '0'})
status, headers, body = self.call_vw(req)
self.assertEqual(status, '200 OK')
self.assertEqual(len(self.authorized), 1)
self.assertRequestEqual(req, self.authorized[0])
prefix_listing_prefix = '/v1/a/ver_cont?prefix=001o/&'
self.assertEqual(self.app.calls, [
('GET', helpers.normalize_path(
prefix_listing_prefix + 'marker=&reverse=on')),
('DELETE', '/v1/a/c/o'),
])
def test_delete_latest_version_no_marker_success(self):
self.app.register(
'GET',
'/v1/a/ver_cont?prefix=001o/&marker=&reverse=on',
swob.HTTPOk, {},
'[{"hash": "y", '
'"last_modified": "2014-11-21T14:23:02.206740", '
'"bytes": 3, '
'"name": "001o/2", '
'"content_type": "text/plain"}, '
'{"hash": "x", '
'"last_modified": "2014-11-21T14:14:27.409100", '
'"bytes": 3, '
'"name": "001o/1", '
'"content_type": "text/plain"}]')
self.app.register(
'GET', '/v1/a/ver_cont/001o/2', swob.HTTPCreated,
{'content-length': '3'}, None)
self.app.register(
'PUT', '/v1/a/c/o', swob.HTTPCreated, {}, None)
self.app.register(
'DELETE', '/v1/a/ver_cont/001o/2', swob.HTTPOk,
{}, None)
cache = FakeCache({'sysmeta': {'versions-location': 'ver_cont'}})
req = Request.blank(
'/v1/a/c/o',
headers={'X-If-Delete-At': 1},
environ={'REQUEST_METHOD': 'DELETE', 'swift.cache': cache,
'CONTENT_LENGTH': '0', 'swift.trans_id': 'fake_trans_id'})
status, headers, body = self.call_vw(req)
self.assertEqual(status, '200 OK')
self.assertEqual(len(self.authorized), 1)
self.assertRequestEqual(req, self.authorized[0])
self.assertEqual(4, self.app.call_count)
self.assertEqual(['VW', 'VW', 'VW', 'VW'], self.app.swift_sources)
self.assertEqual({'fake_trans_id'}, set(self.app.txn_ids))
# check that X-If-Delete-At was removed from DELETE request
req_headers = self.app.headers[-1]
self.assertNotIn('x-if-delete-at', [h.lower() for h in req_headers])
prefix_listing_prefix = '/v1/a/ver_cont?prefix=001o/&'
self.assertEqual(self.app.calls, [
('GET', helpers.normalize_path(
prefix_listing_prefix + 'marker=&reverse=on')),
('GET', '/v1/a/ver_cont/001o/2?symlink=get'),
('PUT', '/v1/a/c/o'),
('DELETE', '/v1/a/ver_cont/001o/2'),
])
def test_delete_latest_version_restores_marker_success(self):
self.app.register(
'GET',
'/v1/a/ver_cont?prefix=001o/&marker=&reverse=on',
swob.HTTPOk, {},
'[{"hash": "x", '
'"last_modified": "2014-11-21T14:23:02.206740", '
'"bytes": 3, '
'"name": "001o/2", '
'"content_type": "application/x-deleted;swift_versions_deleted=1"'
'}, {"hash": "y", '
'"last_modified": "2014-11-21T14:14:27.409100", '
'"bytes": 3, '
'"name": "001o/1", '
'"content_type": "text/plain"'
'}]')
self.app.register(
'HEAD', '/v1/a/c/o', swob.HTTPOk, {}, 'passed')
self.app.register(
'DELETE', '/v1/a/c/o', swob.HTTPNoContent, {})
cache = FakeCache({'sysmeta': {'versions-location': 'ver_cont'}})
req = Request.blank(
'/v1/a/c/o',
headers={'X-If-Delete-At': 1},
environ={'REQUEST_METHOD': 'DELETE', 'swift.cache': cache,
'CONTENT_LENGTH': '0'})
status, headers, body = self.call_vw(req)
self.assertEqual(status, '204 No Content')
self.assertEqual(len(self.authorized), 2)
self.assertRequestEqual(req, self.authorized[0])
self.assertRequestEqual(req, self.authorized[1])
calls = self.app.calls_with_headers
self.assertEqual(['GET', 'HEAD', 'DELETE'],
[c.method for c in calls])
self.assertIn('X-Newest', calls[1].headers)
self.assertEqual('True', calls[1].headers['X-Newest'])
method, path, req_headers = calls.pop()
self.assertTrue(path.startswith('/v1/a/c/o'))
# Since we're deleting the original, this *should* still be present:
self.assertEqual('1', req_headers.get('X-If-Delete-At'))
def test_delete_latest_version_is_marker_success(self):
# Test popping a delete marker off the stack. So, there's data in the
# versions container, topped by a delete marker, and there's nothing
# in the base versioned container.
self.app.register(
'GET',
helpers.normalize_path(
'/v1/a/ver_cont?prefix=001o/&marker=&reverse=on'),
swob.HTTPOk, {},
'[{"hash": "y", '
'"last_modified": "2014-11-21T14:23:02.206740", '
'"bytes": 3, '
'"name": "001o/2", '
'"content_type": "application/x-deleted;swift_versions_deleted=1"'
'},{"hash": "x", '
'"last_modified": "2014-11-21T14:14:27.409100", '
'"bytes": 3, '
'"name": "001o/1", '
'"content_type": "text/plain"'
'}]')
self.app.register(
'HEAD', '/v1/a/c/o', swob.HTTPNotFound, {}, 'passed')
self.app.register(
'GET', '/v1/a/ver_cont/001o/1', swob.HTTPOk, {}, 'passed')
self.app.register(
'PUT', '/v1/a/c/o', swob.HTTPCreated, {}, None)
self.app.register(
'DELETE', '/v1/a/ver_cont/001o/2', swob.HTTPOk, {}, 'passed')
self.app.register(
'DELETE', '/v1/a/ver_cont/001o/1', swob.HTTPOk, {}, 'passed')
cache = FakeCache({'sysmeta': {'versions-location': 'ver_cont'}})
req = Request.blank(
'/v1/a/c/o',
headers={'X-If-Delete-At': 1},
environ={'REQUEST_METHOD': 'DELETE', 'swift.cache': cache,
'CONTENT_LENGTH': '0'})
status, headers, body = self.call_vw(req)
self.assertEqual(status, '200 OK')
self.assertEqual(len(self.authorized), 1)
self.assertRequestEqual(req, self.authorized[0])
prefix_listing_prefix = '/v1/a/ver_cont?prefix=001o/&'
self.assertEqual(self.app.calls, [
('GET', helpers.normalize_path(
prefix_listing_prefix + 'marker=&reverse=on')),
('HEAD', '/v1/a/c/o'),
('GET', '/v1/a/ver_cont/001o/1?symlink=get'),
('PUT', '/v1/a/c/o'),
('DELETE', '/v1/a/ver_cont/001o/1'),
('DELETE', '/v1/a/ver_cont/001o/2'),
])
self.assertIn('X-Newest', self.app.headers[1])
self.assertEqual('True', self.app.headers[1]['X-Newest'])
self.assertIn('X-Newest', self.app.headers[2])
self.assertEqual('True', self.app.headers[2]['X-Newest'])
# check that X-If-Delete-At was removed from DELETE request
for req_headers in self.app.headers[-2:]:
self.assertNotIn('x-if-delete-at',
[h.lower() for h in req_headers])
def test_delete_latest_version_doubled_up_markers_success(self):
self.app.register(
'GET', '/v1/a/ver_cont?prefix=001o/'
'&marker=&reverse=on',
swob.HTTPOk, {},
'[{"hash": "x", '
'"last_modified": "2014-11-21T14:23:02.206740", '
'"bytes": 3, '
'"name": "001o/3", '
'"content_type": "application/x-deleted;swift_versions_deleted=1"'
'}, {"hash": "y", '
'"last_modified": "2014-11-21T14:14:27.409100", '
'"bytes": 3, '
'"name": "001o/2", '
'"content_type": "application/x-deleted;swift_versions_deleted=1"'
'}, {"hash": "y", '
'"last_modified": "2014-11-20T14:23:02.206740", '
'"bytes": 30, '
'"name": "001o/1", '
'"content_type": "text/plain"'
'}]')
self.app.register(
'HEAD', '/v1/a/c/o', swob.HTTPNotFound, {}, 'passed')
self.app.register(
'DELETE', '/v1/a/ver_cont/001o/3', swob.HTTPOk, {}, 'passed')
cache = FakeCache({'sysmeta': {'versions-location': 'ver_cont'}})
req = Request.blank(
'/v1/a/c/o',
headers={'X-If-Delete-At': 1},
environ={'REQUEST_METHOD': 'DELETE', 'swift.cache': cache,
'CONTENT_LENGTH': '0'})
status, headers, body = self.call_vw(req)
self.assertEqual(status, '200 OK')
self.assertEqual(len(self.authorized), 1)
self.assertRequestEqual(req, self.authorized[0])
# check that X-If-Delete-At was removed from DELETE request
calls = self.app.calls_with_headers
self.assertEqual(['GET', 'HEAD', 'DELETE'],
[c.method for c in calls])
method, path, req_headers = calls.pop()
self.assertTrue(path.startswith('/v1/a/ver_cont/001o/3'))
self.assertNotIn('x-if-delete-at', [h.lower() for h in req_headers])
@mock.patch('swift.common.middleware.versioned_writes.legacy.time.time',
return_value=1234)
def test_history_delete_marker_no_object_success(self, mock_time):
self.app.register(
'GET', '/v1/a/c/o', swob.HTTPNotFound,
{}, 'passed')
self.app.register(
'PUT', '/v1/a/ver_cont/001o/0000001234.00000', swob.HTTPCreated,
{}, 'passed')
self.app.register(
'DELETE', '/v1/a/c/o', swob.HTTPNotFound, {}, None)
cache = FakeCache({'sysmeta': {'versions-location': 'ver_cont',
'versions-mode': 'history'}})
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'DELETE', 'swift.cache': cache,
'CONTENT_LENGTH': '0'})
status, headers, body = self.call_vw(req)
self.assertEqual(status, '404 Not Found')
self.assertEqual(len(self.authorized), 2)
req.environ['REQUEST_METHOD'] = 'PUT'
self.assertRequestEqual(req, self.authorized[0])
calls = self.app.calls_with_headers
self.assertEqual(['GET', 'PUT', 'DELETE'], [c.method for c in calls])
self.assertEqual('application/x-deleted;swift_versions_deleted=1',
calls[1].headers.get('Content-Type'))
@mock.patch('swift.common.middleware.versioned_writes.legacy.time.time',
return_value=123456789.54321)
def test_history_delete_marker_over_object_success(self, mock_time):
self.app.register(
'GET', '/v1/a/c/o', swob.HTTPOk,
{'last-modified': 'Wed, 19 Nov 2014 18:19:02 GMT'}, 'passed')
self.app.register(
'PUT', '/v1/a/ver_cont/001o/1416421142.00000', swob.HTTPCreated,
{}, 'passed')
self.app.register(
'PUT', '/v1/a/ver_cont/001o/0123456789.54321', swob.HTTPCreated,
{}, 'passed')
self.app.register(
'DELETE', '/v1/a/c/o', swob.HTTPNoContent, {}, None)
cache = FakeCache({'sysmeta': {'versions-location': 'ver_cont',
'versions-mode': 'history'}})
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'DELETE', 'swift.cache': cache,
'CONTENT_LENGTH': '0'})
status, headers, body = self.call_vw(req)
self.assertEqual(status, '204 No Content')
self.assertEqual(b'', body)
self.assertEqual(len(self.authorized), 2)
req.environ['REQUEST_METHOD'] = 'PUT'
self.assertRequestEqual(req, self.authorized[0])
calls = self.app.calls_with_headers
self.assertEqual(['GET', 'PUT', 'PUT', 'DELETE'],
[c.method for c in calls])
self.assertEqual('/v1/a/ver_cont/001o/1416421142.00000',
calls[1].path)
self.assertEqual('application/x-deleted;swift_versions_deleted=1',
calls[2].headers.get('Content-Type'))
def test_delete_single_version_success(self):
# check that if the first listing page has just a single item then
# it is not erroneously inferred to be a non-reversed listing
self.app.register(
'DELETE', '/v1/a/c/o', swob.HTTPOk, {}, 'passed')
self.app.register(
'GET',
'/v1/a/ver_cont?prefix=001o/&marker=&reverse=on',
swob.HTTPOk, {},
'[{"hash": "y", '
'"last_modified": "2014-11-21T14:23:02.206740", '
'"bytes": 3, '
'"name": "001o/1", '
'"content_type": "text/plain"}]')
self.app.register(
'GET', '/v1/a/ver_cont/001o/1', swob.HTTPOk,
{'content-length': '3'}, None)
self.app.register(
'PUT', '/v1/a/c/o', swob.HTTPCreated, {}, None)
self.app.register(
'DELETE', '/v1/a/ver_cont/001o/1', swob.HTTPOk,
{}, None)
cache = FakeCache({'sysmeta': {'versions-location': 'ver_cont'}})
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'DELETE', 'swift.cache': cache,
'CONTENT_LENGTH': '0'})
status, headers, body = self.call_vw(req)
self.assertEqual(status, '200 OK')
self.assertEqual(len(self.authorized), 1)
self.assertRequestEqual(req, self.authorized[0])
prefix_listing_prefix = '/v1/a/ver_cont?prefix=001o/&'
self.assertEqual(self.app.calls, [
('GET', helpers.normalize_path(
prefix_listing_prefix + 'marker=&reverse=on')),
('GET', '/v1/a/ver_cont/001o/1?symlink=get'),
('PUT', '/v1/a/c/o'),
('DELETE', '/v1/a/ver_cont/001o/1'),
])
def test_DELETE_on_expired_versioned_object(self):
self.app.register(
'GET',
'/v1/a/ver_cont?prefix=001o/&marker=&reverse=on',
swob.HTTPOk, {},
'[{"hash": "y", '
'"last_modified": "2014-11-21T14:23:02.206740", '
'"bytes": 3, '
'"name": "001o/2", '
'"content_type": "text/plain"}, '
'{"hash": "x", '
'"last_modified": "2014-11-21T14:14:27.409100", '
'"bytes": 3, '
'"name": "001o/1", '
'"content_type": "text/plain"}]')
# expired object
self.app.register(
'GET', '/v1/a/ver_cont/001o/2', swob.HTTPNotFound,
{}, None)
self.app.register(
'GET', '/v1/a/ver_cont/001o/1', swob.HTTPCreated,
{'content-length': '3'}, None)
self.app.register(
'PUT', '/v1/a/c/o', swob.HTTPOk, {}, None)
self.app.register(
'DELETE', '/v1/a/ver_cont/001o/1', swob.HTTPOk,
{}, None)
cache = FakeCache({'sysmeta': {'versions-location': 'ver_cont'}})
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'DELETE', 'swift.cache': cache,
'CONTENT_LENGTH': '0'})
status, headers, body = self.call_vw(req)
self.assertEqual(status, '200 OK')
self.assertEqual(len(self.authorized), 1)
self.assertRequestEqual(req, self.authorized[0])
self.assertEqual(5, self.app.call_count)
prefix_listing_prefix = '/v1/a/ver_cont?prefix=001o/&'
self.assertEqual(self.app.calls, [
('GET', helpers.normalize_path(
prefix_listing_prefix + 'marker=&reverse=on')),
('GET', helpers.normalize_path(
'/v1/a/ver_cont/001o/2?symlink=get')),
('GET', helpers.normalize_path(
'/v1/a/ver_cont/001o/1?symlink=get')),
('PUT', helpers.normalize_path('/v1/a/c/o')),
('DELETE', helpers.normalize_path('/v1/a/ver_cont/001o/1')),
])
def test_denied_DELETE_of_versioned_object(self):
authorize_call = []
self.app.register(
'GET',
'/v1/a/ver_cont?prefix=001o/&marker=&reverse=on',
swob.HTTPOk, {},
'[{"hash": "y", '
'"last_modified": "2014-11-21T14:23:02.206740", '
'"bytes": 3, '
'"name": "001o/2", '
'"content_type": "text/plain"}, '
'{"hash": "x", '
'"last_modified": "2014-11-21T14:14:27.409100", '
'"bytes": 3, '
'"name": "001o/1", '
'"content_type": "text/plain"}]')
def fake_authorize(req):
# the container GET is pre-auth'd so here we deny the object DELETE
authorize_call.append(req)
return swob.HTTPForbidden()
cache = FakeCache({'sysmeta': {'versions-location': 'ver_cont'}})
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'DELETE', 'swift.cache': cache,
'swift.authorize': fake_authorize,
'CONTENT_LENGTH': '0'})
status, headers, body = self.call_vw(req)
self.assertEqual(status, '403 Forbidden')
self.assertEqual(len(authorize_call), 1)
self.assertRequestEqual(req, authorize_call[0])
prefix_listing_prefix = '/v1/a/ver_cont?prefix=001o/&'
self.assertEqual(self.app.calls, [
('GET', helpers.normalize_path(
prefix_listing_prefix + 'marker=&reverse=on')),
])
def test_denied_PUT_of_versioned_object(self):
authorize_call = []
self.app.register(
'GET', '/v1/a/c/o', swob.HTTPOk,
{'last-modified': 'Thu, 1 Jan 1970 00:00:01 GMT'}, 'passed')
def fake_authorize(req):
# we should deny the object PUT
authorize_call.append(req)
return swob.HTTPForbidden()
cache = FakeCache({'sysmeta': {'versions-location': 'ver_cont'}})
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'PUT', 'swift.cache': cache,
'swift.authorize': fake_authorize,
'CONTENT_LENGTH': '0'})
# Save off a copy, as the middleware may modify the original
expected_req = Request(req.environ.copy())
status, headers, body = self.call_vw(req)
self.assertEqual(status, '403 Forbidden')
self.assertEqual(len(authorize_call), 1)
self.assertRequestEqual(expected_req, authorize_call[0])
self.assertEqual(self.app.calls, [])
class VersionedWritesOldContainersTestCase(VersionedWritesBaseTestCase):
def test_delete_latest_version_success(self):
self.app.register(
'DELETE', '/v1/a/c/o', swob.HTTPOk, {}, 'passed')
self.app.register(
'GET', '/v1/a/ver_cont?prefix=001o/&'
'marker=&reverse=on',
swob.HTTPOk, {},
'[{"hash": "x", '
'"last_modified": "2014-11-21T14:14:27.409100", '
'"bytes": 3, '
'"name": "001o/1", '
'"content_type": "text/plain"}, '
'{"hash": "y", '
'"last_modified": "2014-11-21T14:23:02.206740", '
'"bytes": 3, '
'"name": "001o/2", '
'"content_type": "text/plain"}]')
self.app.register(
'GET', '/v1/a/ver_cont?prefix=001o/'
'&marker=001o/2',
swob.HTTPNotFound, {}, None)
self.app.register(
'GET', '/v1/a/ver_cont/001o/2', swob.HTTPCreated,
{'content-length': '3'}, None)
self.app.register(
'PUT', '/v1/a/c/o', swob.HTTPCreated, {}, None)
self.app.register(
'DELETE', '/v1/a/ver_cont/001o/2', swob.HTTPOk,
{}, None)
cache = FakeCache({'sysmeta': {'versions-location': 'ver_cont'}})
req = Request.blank(
'/v1/a/c/o',
headers={'X-If-Delete-At': 1},
environ={'REQUEST_METHOD': 'DELETE', 'swift.cache': cache,
'CONTENT_LENGTH': '0', 'swift.trans_id': 'fake_trans_id'})
status, headers, body = self.call_vw(req)
self.assertEqual(status, '200 OK')
self.assertEqual(len(self.authorized), 1)
self.assertRequestEqual(req, self.authorized[0])
self.assertEqual(5, self.app.call_count)
self.assertEqual(['VW', 'VW', 'VW', 'VW', 'VW'],
self.app.swift_sources)
self.assertEqual({'fake_trans_id'}, set(self.app.txn_ids))
# check that X-If-Delete-At was removed from DELETE request
req_headers = self.app.headers[-1]
self.assertNotIn('x-if-delete-at', [h.lower() for h in req_headers])
prefix_listing_prefix = '/v1/a/ver_cont?prefix=001o/&'
self.assertEqual(self.app.calls, [
('GET', helpers.normalize_path(
prefix_listing_prefix + 'marker=&reverse=on')),
('GET', helpers.normalize_path(
prefix_listing_prefix + 'marker=001o/2')),
('GET', '/v1/a/ver_cont/001o/2?symlink=get'),
('PUT', '/v1/a/c/o'),
('DELETE', '/v1/a/ver_cont/001o/2'),
])
def test_DELETE_on_expired_versioned_object(self):
self.app.register(
'GET', '/v1/a/ver_cont?prefix=001o/&'
'marker=&reverse=on',
swob.HTTPOk, {},
'[{"hash": "x", '
'"last_modified": "2014-11-21T14:14:27.409100", '
'"bytes": 3, '
'"name": "001o/1", '
'"content_type": "text/plain"}, '
'{"hash": "y", '
'"last_modified": "2014-11-21T14:23:02.206740", '
'"bytes": 3, '
'"name": "001o/2", '
'"content_type": "text/plain"}]')
self.app.register(
'GET', '/v1/a/ver_cont?prefix=001o/'
'&marker=001o/2',
swob.HTTPNotFound, {}, None)
# expired object
self.app.register(
'GET', '/v1/a/ver_cont/001o/2', swob.HTTPNotFound,
{}, None)
self.app.register(
'GET', '/v1/a/ver_cont/001o/1', swob.HTTPCreated,
{'content-length': '3'}, None)
self.app.register(
'PUT', '/v1/a/c/o', swob.HTTPOk, {}, None)
self.app.register(
'DELETE', '/v1/a/ver_cont/001o/1', swob.HTTPOk,
{}, None)
cache = FakeCache({'sysmeta': {'versions-location': 'ver_cont'}})
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'DELETE', 'swift.cache': cache,
'CONTENT_LENGTH': '0'})
status, headers, body = self.call_vw(req)
self.assertEqual(status, '200 OK')
self.assertEqual(len(self.authorized), 1)
self.assertRequestEqual(req, self.authorized[0])
self.assertEqual(6, self.app.call_count)
prefix_listing_prefix = '/v1/a/ver_cont?prefix=001o/&'
self.assertEqual(self.app.calls, [
('GET', helpers.normalize_path(
prefix_listing_prefix + 'marker=&reverse=on')),
('GET', helpers.normalize_path(
prefix_listing_prefix + 'marker=001o/2')),
('GET', '/v1/a/ver_cont/001o/2?symlink=get'),
('GET', '/v1/a/ver_cont/001o/1?symlink=get'),
('PUT', '/v1/a/c/o'),
('DELETE', '/v1/a/ver_cont/001o/1'),
])
def test_denied_DELETE_of_versioned_object(self):
authorize_call = []
self.app.register(
'DELETE', '/v1/a/c/o', swob.HTTPOk, {}, 'passed')
self.app.register(
'GET', '/v1/a/ver_cont?prefix=001o/&'
'marker=&reverse=on',
swob.HTTPOk, {},
'[{"hash": "x", '
'"last_modified": "2014-11-21T14:14:27.409100", '
'"bytes": 3, '
'"name": "001o/1", '
'"content_type": "text/plain"}, '
'{"hash": "y", '
'"last_modified": "2014-11-21T14:23:02.206740", '
'"bytes": 3, '
'"name": "001o/2", '
'"content_type": "text/plain"}]')
self.app.register(
'GET', '/v1/a/ver_cont?prefix=001o/'
'&marker=001o/2',
swob.HTTPNotFound, {}, None)
self.app.register(
'DELETE', '/v1/a/c/o', swob.HTTPForbidden,
{}, None)
def fake_authorize(req):
authorize_call.append(req)
return swob.HTTPForbidden()
cache = FakeCache({'sysmeta': {'versions-location': 'ver_cont'}})
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'DELETE', 'swift.cache': cache,
'swift.authorize': fake_authorize,
'CONTENT_LENGTH': '0'})
status, headers, body = self.call_vw(req)
self.assertEqual(status, '403 Forbidden')
self.assertEqual(len(authorize_call), 1)
self.assertRequestEqual(req, authorize_call[0])
prefix_listing_prefix = '/v1/a/ver_cont?prefix=001o/&'
self.assertEqual(self.app.calls, [
('GET', helpers.normalize_path(
prefix_listing_prefix + 'marker=&reverse=on')),
('GET', helpers.normalize_path(
prefix_listing_prefix + 'marker=001o/2')),
])
def test_partially_upgraded_cluster(self):
old_versions = [
{'hash': 'etag%d' % x,
'last_modified': "2014-11-21T14:14:%02d.409100" % x,
'bytes': 3,
'name': '001o/%d' % x,
'content_type': 'text/plain'}
for x in range(5)]
# first container server can reverse
self.app.register(
'GET', '/v1/a/ver_cont?prefix=001o/&'
'marker=&reverse=on',
swob.HTTPOk, {}, json.dumps(list(reversed(old_versions[2:]))))
# but all objects are already gone
self.app.register(
'GET', '/v1/a/ver_cont/001o/4', swob.HTTPNotFound,
{}, None)
self.app.register(
'GET', '/v1/a/ver_cont/001o/3', swob.HTTPNotFound,
{}, None)
self.app.register(
'GET', '/v1/a/ver_cont/001o/2', swob.HTTPNotFound,
{}, None)
# second container server can't reverse
self.app.register(
'GET', '/v1/a/ver_cont?prefix=001o/&'
'marker=001o/2&reverse=on',
swob.HTTPOk, {}, json.dumps(old_versions[3:]))
# subsequent requests shouldn't reverse
self.app.register(
'GET', '/v1/a/ver_cont?prefix=001o/&'
'marker=&end_marker=001o/2',
swob.HTTPOk, {}, json.dumps(old_versions[:1]))
self.app.register(
'GET', '/v1/a/ver_cont?prefix=001o/&'
'marker=001o/0&end_marker=001o/2',
swob.HTTPOk, {}, json.dumps(old_versions[1:2]))
self.app.register(
'GET', '/v1/a/ver_cont?prefix=001o/&'
'marker=001o/1&end_marker=001o/2',
swob.HTTPOk, {}, '[]')
self.app.register(
'GET', '/v1/a/ver_cont/001o/1', swob.HTTPOk,
{'content-length': '3'}, None)
self.app.register(
'PUT', '/v1/a/c/o', swob.HTTPCreated, {}, None)
self.app.register(
'DELETE', '/v1/a/ver_cont/001o/1', swob.HTTPNoContent,
{}, None)
cache = FakeCache({'sysmeta': {'versions-location': 'ver_cont'}})
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'DELETE', 'swift.cache': cache,
'CONTENT_LENGTH': '0'})
status, headers, body = self.call_vw(req)
self.assertEqual(status, '204 No Content')
prefix_listing_prefix = '/v1/a/ver_cont?prefix=001o/&'
self.assertEqual(self.app.calls, [
('GET', helpers.normalize_path(
prefix_listing_prefix + 'marker=&reverse=on')),
('GET', '/v1/a/ver_cont/001o/4?symlink=get'),
('GET', '/v1/a/ver_cont/001o/3?symlink=get'),
('GET', '/v1/a/ver_cont/001o/2?symlink=get'),
('GET', helpers.normalize_path(
prefix_listing_prefix + 'marker=001o/2&reverse=on')),
('GET', helpers.normalize_path(
prefix_listing_prefix + 'marker=&end_marker=001o/2')),
('GET', helpers.normalize_path(
prefix_listing_prefix + 'marker=001o/0&end_marker=001o/2')),
('GET', helpers.normalize_path(
prefix_listing_prefix + 'marker=001o/1&end_marker=001o/2')),
('GET', '/v1/a/ver_cont/001o/1?symlink=get'),
('PUT', '/v1/a/c/o'),
('DELETE', '/v1/a/ver_cont/001o/1'),
])
def test_partially_upgraded_cluster_single_result_on_second_page(self):
old_versions = [
{'hash': 'etag%d' % x,
'last_modified': "2014-11-21T14:14:%02d.409100" % x,
'bytes': 3,
'name': '001o/%d' % x,
'content_type': 'text/plain'}
for x in range(5)]
# first container server can reverse
self.app.register(
'GET', '/v1/a/ver_cont?prefix=001o/&'
'marker=&reverse=on',
swob.HTTPOk, {}, json.dumps(list(reversed(old_versions[-2:]))))
# but both objects are already gone
self.app.register(
'GET', '/v1/a/ver_cont/001o/4', swob.HTTPNotFound,
{}, None)
self.app.register(
'GET', '/v1/a/ver_cont/001o/3', swob.HTTPNotFound,
{}, None)
# second container server can't reverse
self.app.register(
'GET', '/v1/a/ver_cont?prefix=001o/&'
'marker=001o/3&reverse=on',
swob.HTTPOk, {}, json.dumps(old_versions[4:]))
# subsequent requests shouldn't reverse
self.app.register(
'GET', '/v1/a/ver_cont?prefix=001o/&'
'marker=&end_marker=001o/3',
swob.HTTPOk, {}, json.dumps(old_versions[:2]))
self.app.register(
'GET', '/v1/a/ver_cont?prefix=001o/&'
'marker=001o/1&end_marker=001o/3',
swob.HTTPOk, {}, json.dumps(old_versions[2:3]))
self.app.register(
'GET', '/v1/a/ver_cont?prefix=001o/&'
'marker=001o/2&end_marker=001o/3',
swob.HTTPOk, {}, '[]')
self.app.register(
'GET', '/v1/a/ver_cont/001o/2', swob.HTTPOk,
{'content-length': '3'}, None)
self.app.register(
'PUT', '/v1/a/c/o', swob.HTTPCreated, {}, None)
self.app.register(
'DELETE', '/v1/a/ver_cont/001o/2', swob.HTTPNoContent,
{}, None)
cache = FakeCache({'sysmeta': {'versions-location': 'ver_cont'}})
req = Request.blank(
'/v1/a/c/o',
environ={'REQUEST_METHOD': 'DELETE', 'swift.cache': cache,
'CONTENT_LENGTH': '0'})
status, headers, body = self.call_vw(req)
self.assertEqual(status, '204 No Content')
prefix_listing_prefix = '/v1/a/ver_cont?prefix=001o/&'
self.assertEqual(self.app.calls, [
('GET', helpers.normalize_path(
prefix_listing_prefix + 'marker=&reverse=on')),
('GET', '/v1/a/ver_cont/001o/4?symlink=get'),
('GET', '/v1/a/ver_cont/001o/3?symlink=get'),
('GET', helpers.normalize_path(
prefix_listing_prefix + 'marker=001o/3&reverse=on')),
('GET', helpers.normalize_path(
prefix_listing_prefix + 'marker=&end_marker=001o/3')),
('GET', helpers.normalize_path(
prefix_listing_prefix + 'marker=001o/1&end_marker=001o/3')),
('GET', helpers.normalize_path(
prefix_listing_prefix + 'marker=001o/2&end_marker=001o/3')),
('GET', '/v1/a/ver_cont/001o/2?symlink=get'),
('PUT', '/v1/a/c/o'),
('DELETE', '/v1/a/ver_cont/001o/2'),
])
class VersionedWritesCopyingTestCase(VersionedWritesBaseTestCase):
# verify interaction of copy and versioned_writes middlewares
def setUp(self):
self.app = helpers.FakeSwift()
conf = {'allow_versioned_writes': 'true'}
self.vw = versioned_writes.filter_factory(conf)(self.app)
self.filter = copy.filter_factory({})(self.vw)
def call_filter(self, req, **kwargs):
return self.call_app(req, app=self.filter, **kwargs)
def test_copy_first_version(self):
# no existing object to move to the versions container
self.app.register(
'GET', '/v1/a/tgt_cont/tgt_obj', swob.HTTPNotFound, {}, None)
self.app.register(
'GET', '/v1/a/src_cont/src_obj', swob.HTTPOk, {}, 'passed')
self.app.register(
'PUT', '/v1/a/tgt_cont/tgt_obj', swob.HTTPCreated, {}, 'passed')
cache = FakeCache({'sysmeta': {'versions-location': 'ver_cont'}})
req = Request.blank(
'/v1/a/src_cont/src_obj',
environ={'REQUEST_METHOD': 'COPY', 'swift.cache': cache,
'CONTENT_LENGTH': '100'},
headers={'Destination': 'tgt_cont/tgt_obj'})
status, headers, body = self.call_filter(req)
self.assertEqual(status, '201 Created')
self.assertEqual(len(self.authorized), 3)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/src_cont/src_obj', self.authorized[0].path)
# At the moment we are calling authorize on the incoming request in
# the middleware before we do the PUT (and the source GET) and again
# on the incoming request when it gets to the proxy. So the 2nd and
# 3rd auths look the same.
self.assertEqual('PUT', self.authorized[1].method)
self.assertEqual('/v1/a/tgt_cont/tgt_obj', self.authorized[1].path)
self.assertEqual('PUT', self.authorized[2].method)
self.assertEqual('/v1/a/tgt_cont/tgt_obj', self.authorized[2].path)
# note the GET on tgt_cont/tgt_obj is pre-authed
self.assertEqual(3, self.app.call_count, self.app.calls)
def test_copy_new_version(self):
# existing object should be moved to versions container
self.app.register(
'GET', '/v1/a/src_cont/src_obj', swob.HTTPOk, {}, 'passed')
self.app.register(
'GET', '/v1/a/tgt_cont/tgt_obj', swob.HTTPOk,
{'last-modified': 'Thu, 1 Jan 1970 00:00:01 GMT'}, 'passed')
self.app.register(
'PUT', '/v1/a/ver_cont/007tgt_obj/0000000001.00000', swob.HTTPOk,
{}, None)
self.app.register(
'PUT', '/v1/a/tgt_cont/tgt_obj', swob.HTTPCreated, {}, 'passed')
cache = FakeCache({'sysmeta': {'versions-location': 'ver_cont'}})
req = Request.blank(
'/v1/a/src_cont/src_obj',
environ={'REQUEST_METHOD': 'COPY', 'swift.cache': cache,
'CONTENT_LENGTH': '100'},
headers={'Destination': 'tgt_cont/tgt_obj'})
status, headers, body = self.call_filter(req)
self.assertEqual(status, '201 Created')
self.assertEqual(len(self.authorized), 3)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/src_cont/src_obj', self.authorized[0].path)
self.assertEqual('PUT', self.authorized[1].method)
self.assertEqual('/v1/a/tgt_cont/tgt_obj', self.authorized[1].path)
self.assertEqual(4, self.app.call_count)
def test_copy_new_version_different_account(self):
self.app.register(
'GET', '/v1/src_a/src_cont/src_obj', swob.HTTPOk, {}, 'passed')
self.app.register(
'GET', '/v1/tgt_a/tgt_cont/tgt_obj', swob.HTTPOk,
{'last-modified': 'Thu, 1 Jan 1970 00:00:01 GMT'}, 'passed')
self.app.register(
'PUT', '/v1/tgt_a/ver_cont/007tgt_obj/0000000001.00000',
swob.HTTPOk, {}, None)
self.app.register(
'PUT', '/v1/tgt_a/tgt_cont/tgt_obj', swob.HTTPCreated, {},
'passed')
cache = FakeCache({'sysmeta': {'versions-location': 'ver_cont'}})
req = Request.blank(
'/v1/src_a/src_cont/src_obj',
environ={'REQUEST_METHOD': 'COPY', 'swift.cache': cache,
'CONTENT_LENGTH': '100'},
headers={'Destination': 'tgt_cont/tgt_obj',
'Destination-Account': 'tgt_a'})
status, headers, body = self.call_filter(req)
self.assertEqual(status, '201 Created')
self.assertEqual(len(self.authorized), 3)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/src_a/src_cont/src_obj', self.authorized[0].path)
self.assertEqual('PUT', self.authorized[1].method)
self.assertEqual('/v1/tgt_a/tgt_cont/tgt_obj', self.authorized[1].path)
self.assertEqual(4, self.app.call_count)
def test_copy_object_no_versioning_with_container_config_true(self):
# set False to versions_write obviously and expect no extra
# COPY called (just copy object as normal)
self.vw.conf = {'allow_versioned_writes': 'false'}
self.app.register(
'GET', '/v1/a/src_cont/src_obj', swob.HTTPOk, {}, 'passed')
self.app.register(
'PUT', '/v1/a/tgt_cont/tgt_obj', swob.HTTPCreated, {}, 'passed')
cache = FakeCache({'versions': 'ver_cont'})
req = Request.blank(
'/v1/a/src_cont/src_obj',
environ={'REQUEST_METHOD': 'COPY', 'swift.cache': cache},
headers={'Destination': '/tgt_cont/tgt_obj'})
status, headers, body = self.call_filter(req)
self.assertEqual(status, '201 Created')
self.assertEqual(len(self.authorized), 2)
self.assertEqual('GET', self.authorized[0].method)
self.assertEqual('/v1/a/src_cont/src_obj', self.authorized[0].path)
self.assertEqual('PUT', self.authorized[1].method)
self.assertEqual('/v1/a/tgt_cont/tgt_obj', self.authorized[1].path)
self.assertEqual(2, self.app.call_count)
class TestSwiftInfo(unittest.TestCase):
def setUp(self):
registry._swift_info = {}
registry._swift_admin_info = {}
def test_registered_defaults(self):
versioned_writes.filter_factory({})('have to pass in an app')
swift_info = registry.get_swift_info()
# in default, versioned_writes is not in swift_info
self.assertNotIn('versioned_writes', swift_info)
def test_registered_explicitly_set(self):
versioned_writes.filter_factory(
{'allow_versioned_writes': 'true'})('have to pass in an app')
swift_info = registry.get_swift_info()
self.assertIn('versioned_writes', swift_info)
self.assertEqual(
swift_info['versioned_writes'].get('allowed_flags'),
('x-versions-location', 'x-history-location'))
if __name__ == '__main__':
unittest.main()
| []
| []
| [
"TZ"
]
| [] | ["TZ"] | python | 1 | 0 | |
src/mbf_externals/__init__.py | from .externals import (
ExternalAlgorithm,
ExternalAlgorithmStore,
change_global_store,
get_global_store,
)
from .fastq import FASTQC
from .prebuild import PrebuildManager, change_global_manager, get_global_manager
from . import aligners
from . import util
from pathlib import Path
import os
__version__ = "0.1"
def create_defaults():
if "MBF_EXTERNAL_PREBUILD_PATH" in os.environ:
hostname = os.environ["MBF_EXTERNAL_HOSTNAME"]
if not (Path(os.environ["MBF_EXTERNAL_PREBUILD_PATH"]) / hostname).exists():
raise ValueError(
"%s did not exist - must be created manually"
% (Path(os.environ["MBF_EXTERNAL_PREBUILD_PATH"]) / hostname)
)
store_base = (
Path(os.environ["MBF_EXTERNAL_PREBUILD_PATH"]) / hostname / "mbf_store"
)
prebuild_path = Path(os.environ["MBF_EXTERNAL_PREBUILD_PATH"])
elif "VIRTUAL_ENV" in os.environ:
import socket
store_base = Path(os.environ["VIRTUAL_ENV"]) / "mbf_store"
prebuild_path = (Path(".") / "prebuilt").absolute()
prebuild_path.mkdir(exist_ok=True)
hostname = socket.gethostname()
else:
# print("No defaults for mbf_externals possible")
change_global_store(None)
change_global_manager(None)
return
zipped = store_base / "zip"
unpacked = store_base / "unpack"
store_base.mkdir(exist_ok=True)
zipped.mkdir(exist_ok=True)
unpacked.mkdir(exist_ok=True)
change_global_store(ExternalAlgorithmStore(zipped, unpacked))
change_global_manager(PrebuildManager(prebuild_path, hostname))
create_defaults()
__all__ = [
ExternalAlgorithm,
ExternalAlgorithmStore,
FASTQC,
change_global_store,
get_global_store,
get_global_manager,
PrebuildManager,
aligners,
create_defaults(),
util,
__version__,
]
| []
| []
| [
"MBF_EXTERNAL_HOSTNAME",
"MBF_EXTERNAL_PREBUILD_PATH",
"VIRTUAL_ENV"
]
| [] | ["MBF_EXTERNAL_HOSTNAME", "MBF_EXTERNAL_PREBUILD_PATH", "VIRTUAL_ENV"] | python | 3 | 0 | |
snippets/list_test.py | import sys
import time
n = 10**7
a = [None] * n
s = time.time()
for i in range(n):
a[i] = i
print(sys.getsizeof(a))
e = time.time()
print(e-s)
b = []
s = time.time()
for i in range(n):
b.append(i)
print(sys.getsizeof(b))
e = time.time()
print(e-s)
s = time.time()
c = list(range(n))
e = time.time()
print(e-s)
| []
| []
| []
| [] | [] | python | null | null | null |
api/v1alpha1/zz_generated.deepcopy.go | // +build !ignore_autogenerated
/*
Copyright 2020 Red Hat Community of Practice.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by controller-gen. DO NOT EDIT.
package v1alpha1
import (
"k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MustGather) DeepCopyInto(out *MustGather) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MustGather.
func (in *MustGather) DeepCopy() *MustGather {
if in == nil {
return nil
}
out := new(MustGather)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *MustGather) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MustGatherList) DeepCopyInto(out *MustGatherList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]MustGather, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MustGatherList.
func (in *MustGatherList) DeepCopy() *MustGatherList {
if in == nil {
return nil
}
out := new(MustGatherList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *MustGatherList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MustGatherSpec) DeepCopyInto(out *MustGatherSpec) {
*out = *in
out.CaseManagementAccountSecretRef = in.CaseManagementAccountSecretRef
out.ServiceAccountRef = in.ServiceAccountRef
if in.MustGatherImages != nil {
in, out := &in.MustGatherImages, &out.MustGatherImages
*out = make([]string, len(*in))
copy(*out, *in)
}
out.ProxyConfig = in.ProxyConfig
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MustGatherSpec.
func (in *MustGatherSpec) DeepCopy() *MustGatherSpec {
if in == nil {
return nil
}
out := new(MustGatherSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MustGatherStatus) DeepCopyInto(out *MustGatherStatus) {
*out = *in
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]v1.Condition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MustGatherStatus.
func (in *MustGatherStatus) DeepCopy() *MustGatherStatus {
if in == nil {
return nil
}
out := new(MustGatherStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ProxySpec) DeepCopyInto(out *ProxySpec) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxySpec.
func (in *ProxySpec) DeepCopy() *ProxySpec {
if in == nil {
return nil
}
out := new(ProxySpec)
in.DeepCopyInto(out)
return out
}
| []
| []
| []
| [] | [] | go | null | null | null |
recipes/storage.py | # Copyright 2022 Aleksandr Soloshenko
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from django.conf import settings
from storages.backends.azure_storage import AzureStorage
# Create your models here.
class PhotoStorage(AzureStorage):
account_name = os.getenv("AZURE_ACCOUNT_NAME")
account_key = os.getenv("AZURE_ACCOUNT_KEY")
azure_container = os.getenv("AZURE_CONTAINER")
expiration_secs = None
photoStorage = PhotoStorage()
| []
| []
| [
"AZURE_CONTAINER",
"AZURE_ACCOUNT_KEY",
"AZURE_ACCOUNT_NAME"
]
| [] | ["AZURE_CONTAINER", "AZURE_ACCOUNT_KEY", "AZURE_ACCOUNT_NAME"] | python | 3 | 0 | |
urlfinder/wsgi.py | """
WSGI config for urlfinder project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'urlfinder.settings')
application = get_wsgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
danniesMovies/asgi.py | """
ASGI config for danniesMovies project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'danniesMovies.settings')
application = get_asgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
src/cmd/create.go | /*
* Copyright © 2019 – 2021 Red Hat Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"errors"
"fmt"
"os"
"path/filepath"
"strings"
"time"
"github.com/briandowns/spinner"
"github.com/containers/toolbox/pkg/podman"
"github.com/containers/toolbox/pkg/shell"
"github.com/containers/toolbox/pkg/utils"
"github.com/godbus/dbus/v5"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"golang.org/x/crypto/ssh/terminal"
)
const (
alpha = `abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ`
num = `0123456789`
alphanum = alpha + num
)
var (
createFlags struct {
container string
distro string
image string
release string
}
createToolboxShMounts = []struct {
containerPath string
source string
}{
{"/etc/profile.d/toolbox.sh", "/etc/profile.d/toolbox.sh"},
{"/etc/profile.d/toolbox.sh", "/usr/share/profile.d/toolbox.sh"},
}
)
var createCmd = &cobra.Command{
Use: "create",
Short: "Create a new toolbox container",
RunE: create,
}
func init() {
flags := createCmd.Flags()
flags.StringVarP(&createFlags.container,
"container",
"c",
"",
"Assign a different name to the toolbox container")
flags.StringVarP(&createFlags.distro,
"distro",
"d",
"",
"Create a toolbox container for a different operating system distribution than the host")
flags.StringVarP(&createFlags.image,
"image",
"i",
"",
"Change the name of the base image used to create the toolbox container")
flags.StringVarP(&createFlags.release,
"release",
"r",
"",
"Create a toolbox container for a different operating system release than the host")
createCmd.SetHelpFunc(createHelp)
rootCmd.AddCommand(createCmd)
}
func create(cmd *cobra.Command, args []string) error {
if utils.IsInsideContainer() {
if !utils.IsInsideToolboxContainer() {
return errors.New("this is not a toolbox container")
}
if _, err := utils.ForwardToHost(); err != nil {
return err
}
return nil
}
if cmd.Flag("distro").Changed && cmd.Flag("image").Changed {
return errors.New("options --distro and --image cannot be used together")
}
if cmd.Flag("image").Changed && cmd.Flag("release").Changed {
return errors.New("options --image and --release cannot be used together")
}
var container string
var containerArg string
if len(args) != 0 {
container = args[0]
containerArg = "CONTAINER"
} else if createFlags.container != "" {
container = createFlags.container
containerArg = "--container"
}
if container != "" {
if !utils.IsContainerNameValid(container) {
var builder strings.Builder
fmt.Fprintf(&builder, "invalid argument for '%s'\n", containerArg)
fmt.Fprintf(&builder, "Container names must match '%s'\n", utils.ContainerNameRegexp)
fmt.Fprintf(&builder, "Run '%s --help' for usage.", executableBase)
errMsg := builder.String()
return errors.New(errMsg)
}
}
var release string
if createFlags.release != "" {
var err error
release, err = utils.ParseRelease(createFlags.distro, createFlags.release)
if err != nil {
err := utils.CreateErrorInvalidRelease(executableBase)
return err
}
}
container, image, release, err := utils.ResolveContainerAndImageNames(container,
createFlags.distro,
createFlags.image,
release)
if err != nil {
return err
}
if err := createContainer(container, image, release, true); err != nil {
return err
}
return nil
}
func createContainer(container, image, release string, showCommandToEnter bool) error {
if container == "" {
panic("container not specified")
}
if image == "" {
panic("image not specified")
}
if release == "" {
panic("release not specified")
}
enterCommand := getEnterCommand(container, release)
logrus.Debugf("Checking if container %s already exists", container)
if exists, _ := podman.ContainerExists(container); exists {
var builder strings.Builder
fmt.Fprintf(&builder, "container %s already exists\n", container)
fmt.Fprintf(&builder, "Enter with: %s\n", enterCommand)
fmt.Fprintf(&builder, "Run '%s --help' for usage.", executableBase)
errMsg := builder.String()
return errors.New(errMsg)
}
pulled, err := pullImage(image, release)
if err != nil {
return err
}
if !pulled {
return nil
}
imageFull, err := getFullyQualifiedImageFromRepoTags(image)
if err != nil {
return err
}
s := spinner.New(spinner.CharSets[9], 500*time.Millisecond)
stdoutFd := os.Stdout.Fd()
stdoutFdInt := int(stdoutFd)
if logLevel := logrus.GetLevel(); logLevel < logrus.DebugLevel && terminal.IsTerminal(stdoutFdInt) {
s.Prefix = fmt.Sprintf("Creating container %s: ", container)
s.Writer = os.Stdout
s.Start()
defer s.Stop()
}
toolboxPath := os.Getenv("TOOLBOX_PATH")
toolboxPathEnvArg := "TOOLBOX_PATH=" + toolboxPath
toolboxPathMountArg := toolboxPath + ":/usr/bin/toolbox:ro"
var runtimeDirectory string
var xdgRuntimeDirEnv []string
if currentUser.Uid == "0" {
runtimeDirectory, err = utils.GetRuntimeDirectory(currentUser)
if err != nil {
return err
}
} else {
xdgRuntimeDir := os.Getenv("XDG_RUNTIME_DIR")
xdgRuntimeDirEnvArg := "XDG_RUNTIME_DIR=" + xdgRuntimeDir
xdgRuntimeDirEnv = []string{"--env", xdgRuntimeDirEnvArg}
runtimeDirectory = xdgRuntimeDir
}
runtimeDirectoryMountArg := runtimeDirectory + ":" + runtimeDirectory
logrus.Debug("Checking if 'podman create' supports '--mount type=devpts'")
var devPtsMount []string
if podman.CheckVersion("2.1.0") {
logrus.Debug("'podman create' supports '--mount type=devpts'")
devPtsMount = []string{"--mount", "type=devpts,destination=/dev/pts"}
}
logrus.Debug("Checking if 'podman create' supports '--ulimit host'")
var ulimitHost []string
if podman.CheckVersion("1.5.0") {
logrus.Debug("'podman create' supports '--ulimit host'")
ulimitHost = []string{"--ulimit", "host"}
}
var usernsArg string
if currentUser.Uid == "0" {
usernsArg = "host"
} else {
usernsArg = "keep-id"
}
dbusSystemSocket, err := getDBusSystemSocket()
if err != nil {
return err
}
dbusSystemSocketMountArg := dbusSystemSocket + ":" + dbusSystemSocket
homeDirEvaled, err := filepath.EvalSymlinks(currentUser.HomeDir)
if err != nil {
return fmt.Errorf("failed to canonicalize %s", currentUser.HomeDir)
}
logrus.Debugf("%s canonicalized to %s", currentUser.HomeDir, homeDirEvaled)
homeDirMountArg := homeDirEvaled + ":" + homeDirEvaled + ":rslave"
bootMountFlags := "rw"
isBootReadWrite, err := isPathReadWrite("/boot")
if err != nil {
return err
}
if !isBootReadWrite {
bootMountFlags = "ro"
}
bootMountArg := "/boot:/run/host/boot:" + bootMountFlags + ",rslave"
usrMountFlags := "ro"
isUsrReadWrite, err := isPathReadWrite("/usr")
if err != nil {
return err
}
if isUsrReadWrite {
usrMountFlags = "rw"
}
usrMountArg := "/usr:/run/host/usr:" + usrMountFlags + ",rslave"
var avahiSocketMount []string
avahiSocket, err := getServiceSocket("Avahi", "avahi-daemon.socket")
if err != nil {
logrus.Debug(err)
}
if avahiSocket != "" {
avahiSocketMountArg := avahiSocket + ":" + avahiSocket
avahiSocketMount = []string{"--volume", avahiSocketMountArg}
}
var kcmSocketMount []string
kcmSocket, err := getServiceSocket("KCM", "sssd-kcm.socket")
if err != nil {
logrus.Debug(err)
}
if kcmSocket != "" {
kcmSocketMountArg := kcmSocket + ":" + kcmSocket
kcmSocketMount = []string{"--volume", kcmSocketMountArg}
}
var mediaLink []string
var mediaMount []string
if utils.PathExists("/media") {
logrus.Debug("Checking if /media is a symbolic link to /run/media")
mediaPath, _ := filepath.EvalSymlinks("/media")
if mediaPath == "/run/media" {
logrus.Debug("/media is a symbolic link to /run/media")
mediaLink = []string{"--media-link"}
} else {
mediaMount = []string{"--volume", "/media:/media:rslave"}
}
}
var mntLink []string
var mntMount []string
if utils.PathExists("/mnt") {
logrus.Debug("Checking if /mnt is a symbolic link to /var/mnt")
mntPath, _ := filepath.EvalSymlinks("/mnt")
if mntPath == "/var/mnt" {
logrus.Debug("/mnt is a symbolic link to /var/mnt")
mntLink = []string{"--mnt-link"}
} else {
mntMount = []string{"--volume", "/mnt:/mnt:rslave"}
}
}
var runMediaMount []string
if utils.PathExists("/run/media") {
runMediaMount = []string{"--volume", "/run/media:/run/media:rslave"}
}
logrus.Debug("Looking for toolbox.sh")
var toolboxShMount []string
for _, mount := range createToolboxShMounts {
if utils.PathExists(mount.source) {
logrus.Debugf("Found %s", mount.source)
toolboxShMountArg := mount.source + ":" + mount.containerPath + ":ro"
toolboxShMount = []string{"--volume", toolboxShMountArg}
break
}
}
logrus.Debug("Checking if /home is a symbolic link to /var/home")
var slashHomeLink []string
slashHomeEvaled, _ := filepath.EvalSymlinks("/home")
if slashHomeEvaled == "/var/home" {
logrus.Debug("/home is a symbolic link to /var/home")
slashHomeLink = []string{"--home-link"}
}
logLevelString := podman.LogLevel.String()
userShell := os.Getenv("SHELL")
if userShell == "" {
return errors.New("failed to get the current user's default shell")
}
entryPoint := []string{
"toolbox", "--log-level", "debug",
"init-container",
"--gid", currentUser.Gid,
"--home", currentUser.HomeDir,
"--shell", userShell,
"--uid", currentUser.Uid,
"--user", currentUser.Username,
"--monitor-host",
}
entryPoint = append(entryPoint, slashHomeLink...)
entryPoint = append(entryPoint, mediaLink...)
entryPoint = append(entryPoint, mntLink...)
createArgs := []string{
"--log-level", logLevelString,
"create",
"--dns", "none",
"--env", toolboxPathEnvArg,
}
createArgs = append(createArgs, xdgRuntimeDirEnv...)
createArgs = append(createArgs, []string{
"--hostname", "toolbox",
"--ipc", "host",
"--label", "com.github.containers.toolbox=true",
"--label", "com.github.debarshiray.toolbox=true",
}...)
createArgs = append(createArgs, devPtsMount...)
createArgs = append(createArgs, []string{
"--name", container,
"--network", "host",
"--no-hosts",
"--pid", "host",
"--privileged",
"--security-opt", "label=disable",
}...)
createArgs = append(createArgs, ulimitHost...)
createArgs = append(createArgs, []string{
"--userns", usernsArg,
"--user", "root:root",
"--volume", bootMountArg,
"--volume", "/etc:/run/host/etc",
"--volume", "/dev:/dev:rslave",
"--volume", "/run:/run/host/run:rslave",
"--volume", "/tmp:/run/host/tmp:rslave",
"--volume", "/var:/run/host/var:rslave",
"--volume", dbusSystemSocketMountArg,
"--volume", homeDirMountArg,
"--volume", toolboxPathMountArg,
"--volume", usrMountArg,
"--volume", runtimeDirectoryMountArg,
}...)
createArgs = append(createArgs, avahiSocketMount...)
createArgs = append(createArgs, kcmSocketMount...)
createArgs = append(createArgs, mediaMount...)
createArgs = append(createArgs, mntMount...)
createArgs = append(createArgs, runMediaMount...)
createArgs = append(createArgs, toolboxShMount...)
createArgs = append(createArgs, []string{
imageFull,
}...)
createArgs = append(createArgs, entryPoint...)
logrus.Debugf("Creating container %s:", container)
logrus.Debug("podman")
for _, arg := range createArgs {
logrus.Debugf("%s", arg)
}
if err := shell.Run("podman", nil, nil, nil, createArgs...); err != nil {
return fmt.Errorf("failed to create container %s", container)
}
if showCommandToEnter {
fmt.Printf("Created container: %s\n", container)
fmt.Printf("Enter with: %s\n", enterCommand)
}
return nil
}
func createHelp(cmd *cobra.Command, args []string) {
if utils.IsInsideContainer() {
if !utils.IsInsideToolboxContainer() {
fmt.Fprintf(os.Stderr, "Error: this is not a toolbox container\n")
return
}
if _, err := utils.ForwardToHost(); err != nil {
fmt.Fprintf(os.Stderr, "Error: %s\n", err)
return
}
return
}
if err := utils.ShowManual("toolbox-create"); err != nil {
fmt.Fprintf(os.Stderr, "Error: %s\n", err)
return
}
}
func getDBusSystemSocket() (string, error) {
logrus.Debug("Resolving path to the D-Bus system socket")
address := os.Getenv("DBUS_SYSTEM_BUS_ADDRESS")
if address == "" {
address = "unix:path=/var/run/dbus/system_bus_socket"
}
addressSplit := strings.Split(address, "=")
if len(addressSplit) != 2 {
return "", errors.New("failed to get the path to the D-Bus system socket")
}
path := addressSplit[1]
pathEvaled, err := filepath.EvalSymlinks(path)
if err != nil {
return "", fmt.Errorf("failed to resolve the path to the D-Bus system socket: %w", err)
}
return pathEvaled, nil
}
func getEnterCommand(container, release string) string {
var enterCommand string
containerNamePrefixDefaultWithRelease := utils.ContainerNamePrefixDefault + "-" + release
switch container {
case utils.ContainerNameDefault:
enterCommand = fmt.Sprintf("%s enter", executableBase)
case containerNamePrefixDefaultWithRelease:
enterCommand = fmt.Sprintf("%s enter --release %s", executableBase, release)
default:
enterCommand = fmt.Sprintf("%s enter %s", executableBase, container)
}
return enterCommand
}
func getFullyQualifiedImageFromRepoTags(image string) (string, error) {
logrus.Debugf("Resolving fully qualified name for image %s from RepoTags", image)
var imageFull string
if utils.ImageReferenceHasDomain(image) {
imageFull = image
} else {
info, err := podman.Inspect("image", image)
if err != nil {
return "", fmt.Errorf("failed to inspect image %s", image)
}
if info["RepoTags"] == nil {
return "", fmt.Errorf("missing RepoTag for image %s", image)
}
repoTags := info["RepoTags"].([]interface{})
if len(repoTags) == 0 {
return "", fmt.Errorf("empty RepoTag for image %s", image)
}
for _, repoTag := range repoTags {
repoTagString := repoTag.(string)
tag := utils.ImageReferenceGetTag(repoTagString)
if tag != "latest" {
imageFull = repoTagString
break
}
}
if imageFull == "" {
imageFull = repoTags[0].(string)
}
}
logrus.Debugf("Resolved image %s to %s", image, imageFull)
return imageFull, nil
}
func getServiceSocket(serviceName string, unitName string) (string, error) {
logrus.Debugf("Resolving path to the %s socket", serviceName)
connection, err := dbus.SystemBus()
if err != nil {
return "", fmt.Errorf("failed to connect to the D-Bus system instance: %w", err)
}
unitNameEscaped := systemdPathBusEscape(unitName)
unitPath := dbus.ObjectPath("/org/freedesktop/systemd1/unit/" + unitNameEscaped)
unit := connection.Object("org.freedesktop.systemd1", unitPath)
call := unit.Call("org.freedesktop.DBus.Properties.GetAll", 0, "")
var result map[string]dbus.Variant
err = call.Store(&result)
if err != nil {
return "", fmt.Errorf("failed to get the properties of %s: %w", unitName, err)
}
listenVariant, listenFound := result["Listen"]
if !listenFound {
return "", fmt.Errorf("failed to find the Listen property of %s: %w", unitName, err)
}
listenVariantSignature := listenVariant.Signature().String()
if listenVariantSignature != "aav" {
return "", errors.New("unknown reply from org.freedesktop.DBus.Properties.GetAll")
}
listenValue := listenVariant.Value()
sockets := listenValue.([][]interface{})
for _, socket := range sockets {
if socket[0] == "Stream" {
path := socket[1].(string)
if !strings.HasPrefix(path, "/") {
continue
}
pathEvaled, err := filepath.EvalSymlinks(path)
if err != nil {
continue
}
return pathEvaled, nil
}
}
return "", fmt.Errorf("failed to find a SOCK_STREAM socket for %s", unitName)
}
func isPathReadWrite(path string) (bool, error) {
logrus.Debugf("Checking if %s is mounted read-only or read-write", path)
mountPoint, err := utils.GetMountPoint(path)
if err != nil {
return false, fmt.Errorf("failed to get the mount-point of %s: %s", path, err)
}
logrus.Debugf("Mount-point of %s is %s", path, mountPoint)
mountFlags, err := utils.GetMountOptions(mountPoint)
if err != nil {
return false, fmt.Errorf("failed to get the mount options of %s: %s", mountPoint, err)
}
logrus.Debugf("Mount flags of %s on the host are %s", path, mountFlags)
if !strings.Contains(mountFlags, "ro") {
return true, nil
}
return false, nil
}
func pullImage(image, release string) (bool, error) {
if _, err := utils.ImageReferenceCanBeID(image); err == nil {
logrus.Debugf("Looking for image %s", image)
if _, err := podman.ImageExists(image); err == nil {
return true, nil
}
}
hasDomain := utils.ImageReferenceHasDomain(image)
if !hasDomain {
imageLocal := "localhost/" + image
logrus.Debugf("Looking for image %s", imageLocal)
if _, err := podman.ImageExists(imageLocal); err == nil {
return true, nil
}
}
var imageFull string
if hasDomain {
imageFull = image
} else {
var err error
imageFull, err = utils.GetFullyQualifiedImageFromDistros(image, release)
if err != nil {
return false, fmt.Errorf("image %s not found in local storage and known registries", image)
}
}
logrus.Debugf("Looking for image %s", imageFull)
if _, err := podman.ImageExists(imageFull); err == nil {
return true, nil
}
domain := utils.ImageReferenceGetDomain(imageFull)
if domain == "" {
panicMsg := fmt.Sprintf("failed to get domain from %s", imageFull)
panic(panicMsg)
}
promptForDownload := true
var shouldPullImage bool
if rootFlags.assumeYes || domain == "localhost" {
promptForDownload = false
shouldPullImage = true
}
if promptForDownload {
fmt.Println("Image required to create toolbox container.")
prompt := fmt.Sprintf("Download %s (500MB)? [y/N]:", imageFull)
shouldPullImage = utils.AskForConfirmation(prompt)
}
if !shouldPullImage {
return false, nil
}
logrus.Debugf("Pulling image %s", imageFull)
stdoutFd := os.Stdout.Fd()
stdoutFdInt := int(stdoutFd)
if logLevel := logrus.GetLevel(); logLevel < logrus.DebugLevel && terminal.IsTerminal(stdoutFdInt) {
s := spinner.New(spinner.CharSets[9], 500*time.Millisecond)
s.Prefix = fmt.Sprintf("Pulling %s: ", imageFull)
s.Writer = os.Stdout
s.Start()
defer s.Stop()
}
if err := podman.Pull(imageFull); err != nil {
return false, fmt.Errorf("failed to pull image %s", imageFull)
}
return true, nil
}
// systemdNeedsEscape checks whether a byte in a potential dbus ObjectPath needs to be escaped
func systemdNeedsEscape(i int, b byte) bool {
// Escape everything that is not a-z-A-Z-0-9
// Also escape 0-9 if it's the first character
return strings.IndexByte(alphanum, b) == -1 ||
(i == 0 && strings.IndexByte(num, b) != -1)
}
// systemdPathBusEscape sanitizes a constituent string of a dbus ObjectPath using the
// rules that systemd uses for serializing special characters.
func systemdPathBusEscape(path string) string {
// Special case the empty string
if len(path) == 0 {
return "_"
}
n := []byte{}
for i := 0; i < len(path); i++ {
c := path[i]
if systemdNeedsEscape(i, c) {
e := fmt.Sprintf("_%x", c)
n = append(n, []byte(e)...)
} else {
n = append(n, c)
}
}
return string(n)
}
| [
"\"TOOLBOX_PATH\"",
"\"XDG_RUNTIME_DIR\"",
"\"SHELL\"",
"\"DBUS_SYSTEM_BUS_ADDRESS\""
]
| []
| [
"SHELL",
"XDG_RUNTIME_DIR",
"DBUS_SYSTEM_BUS_ADDRESS",
"TOOLBOX_PATH"
]
| [] | ["SHELL", "XDG_RUNTIME_DIR", "DBUS_SYSTEM_BUS_ADDRESS", "TOOLBOX_PATH"] | go | 4 | 0 | |
tools/githubhelper/githubhelper.go | /*
Copyright 2018 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// githubhelper.go interacts with GitHub, providing useful data for a Prow job.
package main
import (
"context"
"flag"
"fmt"
"log"
"os"
"strconv"
"github.com/google/go-github/v27/github"
"knative.dev/test-infra/pkg/ghutil"
)
var (
// Info about the current PR
repoOwner = os.Getenv("REPO_OWNER")
repoName = os.Getenv("REPO_NAME")
pullNumber = atoi(os.Getenv("PULL_NUMBER"), "pull number")
// Shared useful variables
ctx = context.Background()
verbose = false
client *ghutil.GithubClient
)
// authenticate creates client with given token if it's provided and exists,
// otherwise it falls back to use an anonymous client
func authenticate(githubTokenPath *string) {
var err error
client, err = ghutil.NewGithubClient(*githubTokenPath)
if err != nil {
infof("Error creating client with token %q: %v", *githubTokenPath, err)
infof("Proceeding with unauthenticated client")
client = &ghutil.GithubClient{Client: github.NewClient(nil)}
}
}
// atoi is a convenience function to convert a string to integer, failing in case of error.
func atoi(str, valueName string) int {
value, err := strconv.Atoi(str)
if err != nil {
log.Fatalf("Unexpected non number '%s' for %s: %v", str, valueName, err)
}
return value
}
// infof if a convenience wrapper around log.Infof, and does nothing unless --verbose is passed.
func infof(template string, args ...interface{}) {
if verbose {
log.Printf(template, args...)
}
}
// listChangedFiles simply lists the files changed by the current PR.
func listChangedFiles() {
infof("Listing changed files for PR %d in repository %s/%s", pullNumber, repoOwner, repoName)
files, err := client.ListFiles(repoOwner, repoName, pullNumber)
if err != nil {
log.Fatalf("Error listing files: %v", err)
}
for _, file := range files {
fmt.Println(*file.Filename)
}
}
func main() {
githubTokenPath := flag.String("github-token", os.Getenv("GITHUB_BOT_TOKEN"), "Github token file path for authenticating with Github")
listChangedFilesFlag := flag.Bool("list-changed-files", false, "List the files changed by the current pull request")
verboseFlag := flag.Bool("verbose", false, "Whether to dump extra info on output or not; intended for debugging")
flag.Parse()
verbose = *verboseFlag
authenticate(githubTokenPath)
if *listChangedFilesFlag {
listChangedFiles()
}
}
| [
"\"REPO_OWNER\"",
"\"REPO_NAME\"",
"\"PULL_NUMBER\"",
"\"GITHUB_BOT_TOKEN\""
]
| []
| [
"PULL_NUMBER",
"REPO_NAME",
"GITHUB_BOT_TOKEN",
"REPO_OWNER"
]
| [] | ["PULL_NUMBER", "REPO_NAME", "GITHUB_BOT_TOKEN", "REPO_OWNER"] | go | 4 | 0 | |
src/test/util_test.go | /*
* Copyright 2020 Huawei Technologies Co., Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package test
import (
"github.com/stretchr/testify/assert"
"mep-agent/src/util"
"os"
"testing"
)
func TestClearByteArray(t *testing.T) {
data1 := []byte{'a', 'b', 'c'}
util.ClearByteArray(data1)
data2 := []byte{0, 0, 0}
assert.Equal(t, data2, data1)
util.ClearByteArray(nil)
}
func TestReadTokenFromEnvironment1(t *testing.T) {
os.Setenv("AK", "ZXhhbXBsZUFL")
os.Setenv("SK", "ZXhhbXBsZVNL")
err := util.ReadTokenFromEnvironment()
assert.EqualValues(t, 0, len(os.Getenv("AK")))
assert.EqualValues(t, 0, len(os.Getenv("SK")))
assert.NoError(t, err, "No error is expected")
}
func TestReadTokenFromEnvironment2(t *testing.T) {
os.Setenv("AK", "ZXhhbXBsZUFL")
err := util.ReadTokenFromEnvironment()
Expected := "ak and sk keys should be set in env variable"
assert.EqualError(t, err, Expected)
}
func TestGetAppInstanceIdDecodeFailed(t *testing.T) {
os.Setenv("APPINSTID", "b1fe5b4d-76a7-4a52-b60f-932fde7c8d57")
_, err := util.GetAppInstanceID()
assert.Equal(t, err, nil)
}
func TestGetAppInstanceIdNotSet(t *testing.T) {
_, err := util.GetAppInstanceID()
Expected := "app instance id should be set in env variable"
assert.EqualError(t, err, Expected)
}
| [
"\"AK\"",
"\"SK\""
]
| []
| [
"SK",
"AK"
]
| [] | ["SK", "AK"] | go | 2 | 0 | |
sis_scraper/main.py | from dotenv import load_dotenv
import os
import requests
from bs4 import BeautifulSoup
import json
import re
load_dotenv()
def addConflicts(data):
for department in data:
for course in department["courses"]:
for section in course["sections"]:
section["conflicts"] = getConflict(
data, section["timeslots"], section["subj"] + str(section["crse"])
)
def getConflict(data, check_timeslots, course_code):
conflicts = {}
for department in data:
for course in department["courses"]:
for section in course["sections"]:
for timeslot in section["timeslots"]:
for day in timeslot["days"]:
# Dont conflict with other sections of the same course (or with self)
if course_code == section["subj"] + str(section["crse"]):
continue
# If this course does not have a timeslot just skip it
if timeslot["timeStart"] == -1 or timeslot["timeEnd"] == -1:
continue
for check_timeslot in check_timeslots:
# If this course does not have a timeslot just skip it
if (
check_timeslot["timeStart"] == -1
or check_timeslot["timeEnd"] == -1
):
continue
# If not happening on the same day skip it
if day not in check_timeslot["days"]:
continue
# If the dates dont overlap skip it
if not max(
check_timeslot["dateStart"], timeslot["dateStart"]
) < min(check_timeslot["dateEnd"], timeslot["dateEnd"]):
continue
# There is a conflict
if max(
check_timeslot["timeStart"], timeslot["timeStart"]
) < min(check_timeslot["timeEnd"], timeslot["timeEnd"]):
# JSON does not support hashtables without a value so the value
# is always set to true even though just by being in the conflicts
# hash table is enough to know it conflicts
conflicts[section["crn"]] = True
return conflicts
# We decided not to use this but I left it just in case
# def reformatJson(data):
# departments_copy = data
# reformat = {}
# for department in departments_copy:
# reformat[department['code']] = department
# course_copy = department['courses']
# reformat[department['code']]['courses'] = {}
# for course in course_copy:
# reformat[department['code']]['courses'][f"{course['subj']}-{course['crse']}"] = course
# sections_copy = course['sections']
# reformat[department['code']]['courses'][f"{course['subj']}-{course['crse']}"]['sections'] = {}
# for section in sections_copy:
# reformat[department['code']]['courses'][f"{course['subj']}-{course['crse']}"]['sections'][section['crn']] = section
#
#
# return reformat
#
def getContent(element):
return " ".join(
element.encode_contents().decode().strip().replace("&", "&").split()
)
def getContentFromChild(element, childType):
if len(element.findAll(childType)) > 0:
element = element.findAll(childType)[0]
return getContent(element)
def cleanOutAbbr(text):
text = re.sub("<abbr.*?>", "", text)
text = re.sub("<\/abbr>", "", text)
text = re.sub(
"\s?\([pP]\)", "", text
) # Remove primary instructor indicator (maybe we can use this data somewhere later but for now it is removed)
text = re.sub("\w+\.\s+", "", text)
return text
def timeToMilitary(time, useStartTime):
if "TBA" in time:
return -1
if useStartTime:
time = time.split("-")[0]
else:
time = time.split("-")[1]
offset = 0
if "pm" in time and "12:" not in time:
offset = 1200
return int("".join(time.strip().split(":"))[:4]) + offset
def toTitle(text):
text = text.title()
regex = r"\b[iI]+\b"
matches = re.finditer(regex, text)
for matchNum, match in enumerate(matches, start=1):
text = (
text[: match.start()]
+ text[match.start() : match.end()].upper()
+ text[match.end() :]
)
text = text.replace("'S", "'s")
return text
payload = f'sid={os.getenv("RIN")}&PIN={os.getenv("PASSWORD")}'
headers = {"Content-Type": "application/x-www-form-urlencoded"}
with requests.Session() as s:
s.get(url="https://sis.rpi.edu/rss/twbkwbis.P_WWWLogin")
response = s.request(
"POST",
"https://sis.rpi.edu/rss/twbkwbis.P_ValLogin",
headers=headers,
data=payload,
)
if b"Welcome" not in response.text.encode("utf8"):
print("Failed to log into sis")
exit(1)
url = "https://sis.rpi.edu/rss/bwskfcls.P_GetCrse_Advanced"
payload = f'rsts=dummy&crn=dummy&term_in={os.getenv("CURRENT_TERM")}&sel_subj=dummy&sel_day=dummy&sel_schd=dummy&sel_insm=dummy&sel_camp=dummy&sel_levl=dummy&sel_sess=dummy&sel_instr=dummy&sel_ptrm=dummy&sel_attr=dummy&sel_subj=ADMN&sel_subj=USAF&sel_subj=ARCH&sel_subj=ARTS&sel_subj=ASTR&sel_subj=BCBP&sel_subj=BIOL&sel_subj=BMED&sel_subj=CHME&sel_subj=CHEM&sel_subj=CIVL&sel_subj=COGS&sel_subj=COMM&sel_subj=CSCI&sel_subj=ENGR&sel_subj=ERTH&sel_subj=ECON&sel_subj=ECSE&sel_subj=ESCI&sel_subj=ENVE&sel_subj=GSAS&sel_subj=ISYE&sel_subj=ITWS&sel_subj=IENV&sel_subj=IHSS&sel_subj=ISCI&sel_subj=LANG&sel_subj=LGHT&sel_subj=LITR&sel_subj=MGMT&sel_subj=MTLE&sel_subj=MATP&sel_subj=MATH&sel_subj=MANE&sel_subj=USAR&sel_subj=USNA&sel_subj=PHIL&sel_subj=PHYS&sel_subj=PSYC&sel_subj=STSH&sel_subj=STSS&sel_subj=WRIT&sel_crse=&sel_title=&sel_from_cred=&sel_to_cred=&sel_camp=%25&sel_ptrm=%25&begin_hh=0&begin_mi=0&begin_ap=a&end_hh=0&end_mi=0&end_ap=a&SUB_BTN=Section+Search&path=1'
# This payload is for testing. It will only return CSCI classes and will therefore be a bit faster
# payload = f'rsts=dummy&crn=dummy&term_in={os.getenv("CURRENT_TERM")}&sel_subj=dummy&sel_day=dummy&sel_schd=dummy&sel_insm=dummy&sel_camp=dummy&sel_levl=dummy&sel_sess=dummy&sel_instr=dummy&sel_ptrm=dummy&sel_attr=dummy&sel_subj=CSCI&sel_subj=LGHT&sel_crse=&sel_title=&sel_from_cred=&sel_to_cred=&sel_camp=%25&sel_ptrm=%25&begin_hh=0&begin_mi=0&begin_ap=a&end_hh=0&end_mi=0&end_ap=a&SUB_BTN=Section+Search&path=1'
headers = {}
response = s.request("POST", url, headers=headers, data=payload)
data = []
# print(response.text.encode('utf8'))
soup = BeautifulSoup(response.text.encode("utf8"), "html.parser")
table = soup.findAll("table", {"class": "datadisplaytable"})[0]
rows = table.findAll("tr")
current_department = None
current_code = None
current_courses = None
last_subject = None
last_course_code = None
for row in rows:
th = row.findAll("th")
if len(th) != 0:
if "ddtitle" in th[0].attrs["class"]:
# if(current_department):
data.append(
{"name": toTitle(getContent(th[0])), "code": "", "courses": []}
)
else:
td = row.findAll("td")
if "TBA" not in getContent(td[8]):
timeslot_data = {
"days": list(getContent(td[8])),
"timeStart": timeToMilitary(
getContentFromChild(td[9], "abbr"), True
),
"timeEnd": timeToMilitary(
getContentFromChild(td[9], "abbr"), False
),
"instructor": ", ".join(
[x.strip() for x in cleanOutAbbr(getContent(td[19])).split(",")]
),
"dateStart": getContentFromChild(td[20], "abbr").split("-")[0],
"dateEnd": getContentFromChild(td[20], "abbr").split("-")[1],
"location": getContentFromChild(td[21], "abbr"),
}
else:
timeslot_data = {
"dateEnd": "",
"dateStart": "",
"days": [],
"instructor": "",
"location": "",
"timeEnd": -1,
"timeStart": -1,
}
if len(getContent(td[1])) == 0:
data[-1]["courses"][-1]["sections"][-1]["timeslots"].append(
timeslot_data
)
continue
credit_min = float(getContent(td[6]).split("-")[0])
credit_max = credit_min
if len(getContent(td[6]).split("-")) > 1:
credit_max = float(getContent(td[6]).split("-")[1])
section_data = {
# "select":getContentFromChild(td[0], 'abbr'),
"crn": int(getContentFromChild(td[1], "a")),
"subj": getContent(td[2]),
"crse": int(getContent(td[3])),
"sec": getContent(td[4]),
# "cmp":getContent(td[5]),
"credMin": credit_min,
"credMax": credit_max,
"title": toTitle(getContent(td[7])),
# "cap": int(getContent(td[10])),
# "act":int(getContent(td[11])),
# "rem": int(getContent(td[12])),
# "wlCap":int(getContent(td[13])),
# "wlAct":int(getContent(td[14])),
# "wlRem":int(getContent(td[15])),
# "xlCap":getContent(td[16]),
# "xlAct":getContent(td[17]),
# "xlRem":getContent(td[18]),
"attribute": getContent(td[22]) if 22 < len(td) else "",
"timeslots": [timeslot_data],
}
if (
section_data["subj"] == last_subject
and section_data["crse"] == last_course_code
):
data[-1]["courses"][-1]["sections"].append(section_data)
continue
last_subject = getContent(td[2])
last_course_code = int(getContent(td[3]))
data[-1]["courses"].append(
{
"title": toTitle(getContent(td[7])),
"subj": getContent(td[2]),
"crse": int(getContent(td[3])),
"id": getContent(td[2]) + "-" + getContent(td[3]),
"sections": [section_data],
}
)
if len(getContent(td[2])) > 0:
data[-1]["code"] = getContent(td[2])
# This is for the old conflict method that has a list for each class that it conflicts with
# addConflicts(data)
# data = reformatJson(data)
# print(json.dumps(data,sort_keys=False,indent=2))
with open(f"courses.json", "w") as outfile: # -{os.getenv("CURRENT_TERM")}
json.dump(data, outfile, sort_keys=False, indent=2)
# Generate binary conflict output
# (32bit crn + 3*64bit conflicts 5am-midnight(by 30min))for every course
day_offsets = {
"M": 0 * 16 * 6,
"T": 1 * 16 * 6,
"W": 2 * 16 * 6,
"R": 3 * 16 * 6,
"F": 4 * 16 * 6,
"S": 5 * 16 * 6,
}
conflicts = {}
crn_to_courses = {}
for dept in data:
for course in dept["courses"]:
for section in course["sections"]:
crn_to_courses[section["crn"]] = course["id"]
conflict = [0] * (64 * 9)
for time in section["timeslots"]:
for day in time["days"]:
for hour in range(700, 2300, 100):
for minute in range(0, 60, 10):
if (
time["timeStart"] <= hour + minute
and time["timeEnd"] > hour + minute
):
minute_idx = int(minute / 10)
hour_idx = int(hour / 100) - 7 # we start at 7am
conflict[
day_offsets[day] + hour_idx * 6 + minute_idx
] = 1
conflicts[section["crn"]] = "".join(str(e) for e in conflict)
with open("mod.rs", "w") as f: # -{os.getenv("CURRENT_TERM")}
f.write(
"""\
//This file was automatically generated. Please do not modify it directly
use ::phf::{phf_map, Map};
pub static CRN_TIMES: Map<u32, [u64; 9]> = phf_map! {
"""
)
for crn, conflict in conflicts.items():
rust_array = f"\t{crn}u32 => ["
for i in range(0, 9 * 64, 64):
if i != 0:
rust_array += ", "
rust_array += str(int(conflict[i : i + 64], 2))
rust_array += "],\n"
f.write(rust_array)
f.write(
"""
};
pub static CRN_COURSES: Map<u32, &'static str> = phf_map! {
"""
)
for crn, course in crn_to_courses.items():
f.write(f'\t{crn}u32 => "{course}",\n')
f.write("};")
| []
| []
| [
"RIN",
"PASSWORD",
"CURRENT_TERM"
]
| [] | ["RIN", "PASSWORD", "CURRENT_TERM"] | python | 3 | 0 | |
Packs/TrendMicroDDA/Integrations/TrendMicroDDA/TrendMicroDDA.py | import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
import hashlib
from datetime import datetime
import uuid
import json
import requests
import re
import platform
import os.path
import copy
# disable insecure warnings
requests.packages.urllib3.disable_warnings()
if not demisto.params().get("proxy", True):
del os.environ["HTTP_PROXY"]
del os.environ["HTTPS_PROXY"]
del os.environ["http_proxy"]
del os.environ["https_proxy"]
# HELPER FUNCTIONS #
def load_host_url():
''' loads the host url from the configuration or strips the server url to get valid host url '''
host = demisto.params()['ip_address']
if host:
# strip https://www. of the server address //disable-secrets-detection
url = re.compile(r"https?://(www\.)?")
host = url.sub('', demisto.params()['server']).strip().strip('/')
# strip :{port} of the server address
host = host.split(':')[0]
return host
def hash_file(filename):
'''Calculate the SHA1 of a file'''
h = hashlib.sha1() # nosec
with open(filename, 'rb') as f:
chunk = ''
while True:
chunk = f.read(1024)
if not chunk:
break
h.update(chunk)
return h.hexdigest()
def hash_url(url):
'''Calculate the SHA1 of a URL'''
h = hashlib.sha1() # nosec
h.update(url)
return h.hexdigest()
def get_epoch_time():
'''Get the epoch time (for the X-DTAS-Time header value.'''
epoch_time = str(int(time.time()))
return epoch_time
def get_epoch_from_datetime(dt):
'''Calculate epoch time from a datetime object'''
epoch_format = str(int(time.mktime(dt.timetuple())))
return epoch_format
def calculate_checksum(api_key, headers, body=''):
''' Generates a Checksum for the api call '''
temp = api_key
if 'X-DTAS-ChecksumCalculatingOrder' in headers:
x_dtas_checksum_calculating_order_list = headers['X-DTAS-ChecksumCalculatingOrder'].split(",")
for key in x_dtas_checksum_calculating_order_list:
temp += headers[key]
else:
for key, value in headers.iteritems():
if ('X-DTAS-' in key and 'X-DTAS-Checksum' not in key and 'X-DTAS-ChecksumCalculatingOrder' not in key):
temp += value
temp += body
return hashlib.sha1(temp) # nosec
def http_request(uri, method, headers, body={}, params={}, files={}):
''' Makes an API call to the server URL with the supplied uri, method, headers, body and params '''
url = '%s/%s' % (SERVER_URL, uri)
if method not in ['put', 'post']:
body = json.dumps(body)
res = requests.request(
method,
url,
headers=headers,
data=body,
verify=USE_SSL,
params=params,
files=files
)
if (res.status_code != 102 and (res.status_code < 200 or res.status_code >= 300)):
raise Exception('Got status code ' + str(res.status_code) + ' with body '
+ res.content + ' with headers ' + str(res.headers))
return res
def file_uploaded_to_incident(file, file_sha1):
''' Converts an uploaded file to a Demisto incident '''
incident = {} # type: Dict[str, Any]
incident["name"] = "Incident: %s " % (file_sha1)
incident["occurred"] = str(CURRENT_TIME)
incident["rawJSON"] = "TODO"
labels = [] # type: list
incident["labels"] = labels
return incident
def binary_to_boolean_str(binary):
if (binary == '0'):
return 'False'
else:
return 'True'
def binary_to_boolean(binary):
if (binary == '0'):
return False
else:
return True
# GLOBAL VARIABLES #
API_KEY = demisto.params()['apiKey']
PROTOCOL_VERSION = demisto.params()['protocol_version']
SERVER_URL = demisto.params()['server'][:-1] if demisto.params()['server'].endswith('/') else demisto.params()['server']
USE_SSL = not demisto.params().get('insecure', True)
UUID = str(uuid.uuid4())
HOST = load_host_url()
DEFAULT_HEADERS = {
'X-DTAS-ProtocolVersion': PROTOCOL_VERSION,
'X-DTAS-ClientUUID': UUID,
'X-DTAS-Time': get_epoch_time(),
'X-DTAS-Challenge': str(uuid.uuid4()),
'X-DTAS-ProductName': 'TDA',
'X-DTAS-ClientHostname': platform.node(),
'X-DTAS-SourceID': '1',
'X-DTAS-SourceName': 'DemistoIntegration',
}
if HOST:
DEFAULT_HEADERS['Host'] = HOST
# for fetch incident
CURRENT_TIME = datetime.utcnow()
# COMMAND FUNCTIONS #
def register():
headers_register = copy.deepcopy(DEFAULT_HEADERS)
tmp_checksum = calculate_checksum(API_KEY, headers_register)
headers_register['X-DTAS-Checksum'] = tmp_checksum.hexdigest()
http_request(
'web_service/sample_upload/register',
'get',
headers_register
)
def unregister():
headers_unregister = copy.deepcopy(DEFAULT_HEADERS)
tmp_checksum = calculate_checksum(API_KEY, headers_unregister)
headers_unregister['X-DTAS-Checksum'] = tmp_checksum.hexdigest()
http_request(
'web_service/sample_upload/unregister',
'get',
headers_unregister
)
def test():
headers_test = copy.deepcopy(DEFAULT_HEADERS)
tmp_checksum = calculate_checksum(API_KEY, headers_test)
headers_test['X-DTAS-Checksum'] = tmp_checksum.hexdigest()
http_request(
'web_service/sample_upload/test_connection',
'get',
headers_test
)
demisto.results('ok')
def prettify_simple_upload_sample_file(sha1):
pretty_sample = {
'SHA1': sha1.upper()
}
return pretty_sample
def simple_upload_sample_file(sample_file):
'''Upload a file to Deep Discovery Analyzer for analysis'''
with open(demisto.getFilePath(sample_file)['path'], 'rb') as f:
headers_simple_upload_sample_file = {
'X-DTAS-ProtocolVersion': PROTOCOL_VERSION,
'X-DTAS-ClientUUID': UUID,
'X-DTAS-SourceID': '1',
'X-DTAS-SourceName': 'DemistoIntegration',
'X-DTAS-SHA1': hash_file(demisto.getFilePath(sample_file)['path']),
'X-DTAS-Time': get_epoch_time(),
'X-DTAS-SampleType': '0', # 0 for file, 1 for URL
'X-DTAS-Challenge': str(uuid.uuid4()),
'X-DTAS-ChecksumCalculatingOrder': "X-DTAS-ProtocolVersion,X-DTAS-ClientUUID,X-DTAS-SourceID,X-DTAS-SourceName," \
+ "X-DTAS-SHA1,X-DTAS-Time,X-DTAS-SampleType,X-DTAS-Challenge",
}
tmp_checksum = calculate_checksum(API_KEY, headers_simple_upload_sample_file)
headers_simple_upload_sample_file['X-DTAS-Checksum'] = tmp_checksum.hexdigest()
cmd_url = 'web_service/sample_upload/simple_upload_sample'
res = http_request(
cmd_url,
'post',
headers_simple_upload_sample_file,
files={'uploadsample': f}
)
pretty_res = prettify_simple_upload_sample_file(headers_simple_upload_sample_file['X-DTAS-SHA1'])
return res, pretty_res
def simple_upload_sample_file_command():
sample_file = demisto.args().get('entryID')
res, pretty_res = simple_upload_sample_file(sample_file)
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['text'],
'Contents': 'File was uploaded to Trend Micro DDA successfully',
'EntryContext': {
'TrendMicroDDA.Submission(val.SHA1 && val.SHA1==obj.SHA1)': pretty_res
}
})
def prettify_simple_upload_sample_url(url, sha1):
pretty_sample = {
'SHA1': sha1.upper(),
'URL': url
}
return pretty_sample
def simple_upload_sample_url(sample_url):
'''Upload a URL to Analyzer for analysis'''
headers_simple_upload_sample_url = {
'X-DTAS-ProtocolVersion': PROTOCOL_VERSION,
'X-DTAS-ClientUUID': UUID,
'X-DTAS-SourceID': '1',
'X-DTAS-SourceName': 'DemistoIntegration',
'X-DTAS-SHA1': hash_url(sample_url),
'X-DTAS-Time': get_epoch_time(),
'X-DTAS-SampleType': '1', # 0 for file, 1 for URL
'X-DTAS-Challenge': str(uuid.uuid4()),
'X-DTAS-ChecksumCalculatingOrder': "X-DTAS-ProtocolVersion,X-DTAS-ClientUUID,X-DTAS-SourceID,X-DTAS-SourceName," \
+ "X-DTAS-SHA1,X-DTAS-Time,X-DTAS-SampleType,X-DTAS-Challenge",
}
tmp_checksum = calculate_checksum(API_KEY, headers_simple_upload_sample_url)
headers_simple_upload_sample_url['X-DTAS-Checksum'] = tmp_checksum.hexdigest()
cmd_url = 'web_service/sample_upload/simple_upload_sample'
res = http_request(
cmd_url,
'post',
headers_simple_upload_sample_url,
files={'uploadsample': sample_url}
)
pretty_res = prettify_simple_upload_sample_url(sample_url, headers_simple_upload_sample_url['X-DTAS-SHA1'])
return res, pretty_res
def simple_upload_sample_url_command():
sample_url = demisto.args().get('url')
res, pretty_res = simple_upload_sample_url(sample_url)
demisto.results({
'Type': entryTypes['note'],
'Contents': str(res.headers),
'ContentsFormat': formats['text'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('URL was uploaded to Trend Micro DDA successfully', pretty_res),
'EntryContext': {
'TrendMicroDDA.Submission(val.SHA1 && val.SHA1==obj.SHA1)': pretty_res
}
})
def get_sample(sha1, archive_type, archive_encrypted, archive_name):
'''Issue a request to retrieve an archive of the sample given its SHA1 hash'''
if not (re.match(r'\b[0-9a-fA-F]{40}\b', sha1)):
return_error('Provided SHA1: {} is unvalid.'.format(sha1))
headers_get_sample = copy.deepcopy(DEFAULT_HEADERS)
headers_get_sample['X-DTAS-SHA1'] = sha1 # SHA1 of the file/URL to download
headers_get_sample['X-DTAS-ArchiveType'] = archive_type
headers_get_sample['X-DTAS-ArchiveEncrypted'] = archive_encrypted
tmp_checksum = calculate_checksum(API_KEY, headers_get_sample)
headers_get_sample['X-DTAS-Checksum'] = tmp_checksum.hexdigest()
cmd_url = 'web_service/sample_upload/get_sample'
res = http_request(
cmd_url,
'get',
headers_get_sample
)
file = fileResult(archive_name, res.content)
return res, file
def get_sample_command():
sha1 = demisto.args()['sha1']
archive_type = demisto.args()['type']
archive_encrypted = demisto.args()['encrypted']
archive_name = demisto.args()['archive_name'] if 'archive_name' in demisto.args() else sha1
archive_name += '.{}'.format(archive_type)
res, file = get_sample(sha1, archive_type, archive_encrypted, archive_name)
return demisto.results(file)
def get_sample_list(interval_start, interval_end, interval_type):
try:
interval_start_dt = datetime.strptime(interval_start, "%Y-%m-%d %H:%M:%S")
interval_end_dt = datetime.strptime(interval_end, "%Y-%m-%d %H:%M:%S")
except BaseException:
return_error('Given interval times are not in the required format, which is: YYYY-MM-DD HH:MM:SS, '
+ 'e.g. 2008-11-22 19:53:42')
headers_get_sample_list = copy.deepcopy(DEFAULT_HEADERS)
headers_get_sample_list['X-DTAS-IntervalStartingPoint'] = get_epoch_from_datetime(interval_start_dt)
headers_get_sample_list['X-DTAS-IntervalEndPoint'] = get_epoch_from_datetime(interval_end_dt)
headers_get_sample_list['X-DTAS-IntervalType'] = interval_type
tmp_checksum = calculate_checksum(API_KEY, headers_get_sample_list)
headers_get_sample_list['X-DTAS-Checksum'] = tmp_checksum.hexdigest()
cmd_url = 'web_service/sample_upload/get_sample_list'
res = http_request(
cmd_url,
'get',
headers_get_sample_list
)
return res # returns a list of SHA1 of the samples
def get_sample_list_command():
'''Issue a request to get a semi-colon separated values list of submissions within the given time interval'''
interval_start = demisto.args()['interval_start']
interval_end = demisto.args()['interval_end']
interval_type = demisto.args()['interval_type']
result = get_sample_list(interval_start, interval_end, interval_type)
if result.text:
sha1_list = result.text.split(';')
hr = '### Trend Micro DDA submissions SHA1\n'
for sha1 in sha1_list:
hr += '- {}\n'.format(sha1)
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['text'],
'Contents': result.text,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': hr
})
else:
demisto.results('No results found.')
def build_report(res, threshold, status, verbose):
report_json = json.loads(xml2json(res.text.encode('utf-8')))
reports = report_json['REPORTS']
# true if list, false if dict
reports_type_is_list = isinstance(reports['FILE_ANALYZE_REPORT'], list)
hr = {} # type: Dict[str, Union[str, Dict[str, str]]]
if isinstance(reports, dict):
image_type_dict = reports.get('IMAGE_TYPE', {})
if isinstance(image_type_dict, dict):
image_type_dict = image_type_dict.get('TYPE', {})
if isinstance(image_type_dict, dict):
image_type = image_type_dict.get('#text', 'Unknown')
else:
image_type = 'Unknown'
else:
image_type = 'Unknown'
else:
image_type = 'Unknown'
hr_headers = {
'Risk Level': reports['OVERALL_RISK_LEVEL'],
'Image Type': image_type,
'Sum of Files Analyzed': (len(reports['FILE_ANALYZE_REPORT'])) if reports_type_is_list else '1',
}
context = {} # type: Dict[str, Any]
dbot_score = 0
context['DBotScore'] = {
'Vendor': 'Trend Micro DDA',
'Score': dbot_score, # check that------------------ TODO --------------------
'Type': 'hash',
'Indicator': reports['FILE_ANALYZE_REPORT']['FileSHA1'] if not reports_type_is_list
else reports['FILE_ANALYZE_REPORT'][0]['FileSHA1']
}
# if type is list, the submission was divided to sub-files and the first file_analyze_report is of the main submission
# context['DBotScore.Indicator'] = reports['FILE_ANALYZE_REPORT']['FileSHA1']
# if not reports_type_is_list else reports['FILE_ANALYZE_REPORT'][0]['FileSHA1']
if not reports_type_is_list: # if the submission doesn't have sub-files
file_analyze_report = reports['FILE_ANALYZE_REPORT']
hr['File Name'] = file_analyze_report['OrigFileName']
hr['Malware Source IP'] = file_analyze_report['MalwareSourceIP']
hr['Malware Source Host'] = file_analyze_report['MalwareSourceHost']
hr['Total Dropped Files'] = file_analyze_report['DroppedFiles']['@Total']
hr['Deny List'] = binary_to_boolean_str(file_analyze_report['IsDenylisted'])
hr['White List'] = binary_to_boolean_str(file_analyze_report['IsWhitelisted'])
if '#text' in file_analyze_report['VirusName']: # the submission has a detection
hr['Detection Name'] = file_analyze_report['VirusName']['#text']
# set the filename
filename = hr['Detection Name'] if ('Detection Name' in hr) else file_analyze_report['FileSHA1']
if filename and '.' not in filename:
filename = str(filename) + ".txt"
# add data regarding the submission to the context
context['TrendMicroDDA.Submission(val.SHA1 && val.SHA1==obj.SHA1)'] = {
'Status': status,
'RiskLevel': reports['OVERALL_RISK_LEVEL'],
'SHA1': file_analyze_report['FileSHA1'],
'SHA256': file_analyze_report['FileSHA256'],
'MD5': file_analyze_report['FileMD5'],
'VirusDetected': binary_to_boolean(file_analyze_report['VirusDetected']),
}
if file_analyze_report['TrueFileType'] == 'URL':
# add the URL address
context[outputPaths['url']] = {
'Data': file_analyze_report['OrigFileName']
}
else:
context[outputPaths['file']] = {
'MD5': file_analyze_report['FileMD5'],
'SHA1': file_analyze_report['FileSHA1'],
'SHA256': file_analyze_report['FileSHA256'],
'Size': file_analyze_report['FileSize'],
'Name': file_analyze_report['OrigFileName'],
}
# add data regarding the submission to the context if file is malicious
if (reports['OVERALL_RISK_LEVEL'] >= threshold):
if file_analyze_report['TrueFileType'] == 'URL':
context[outputPaths['url']].update({
'Malicious': {
'Vendor': 'Trend Micro DDA',
'Description': 'RiskLevel: ' + reports['OVERALL_RISK_LEVEL']
}
})
else:
context[outputPaths['file']].update({
'Malicious': {
'Vendor': 'Trend Micro DDA',
'Description': 'RiskLevel: ' + reports['OVERALL_RISK_LEVEL']
}
})
# extracting IP and Domains from the report
if file_analyze_report['MalwareSourceIP']:
context['IP.Address(val.Address && val.Address == obj.Address)'] = file_analyze_report['MalwareSourceIP']
if file_analyze_report['MalwareSourceHost']:
context['Domain.Name(val.Name && val.Name == obj.Name)'] = file_analyze_report['MalwareSourceHost']
if verbose == 'true':
dropped_files = file_analyze_report['DroppedFiles']
if 'FileItem' in dropped_files:
if 'DownloadURL' in dropped_files['FileItem']:
context['URL.Data(val.Data && val.Data == obj.Data)'] = dropped_files['FileItem']['DownloadURL']
hr['Download URL'] = dropped_files['FileItem']['DownloadURL']
context['TrendMicroDDA.Submission'].update({
'DownloadURL': dropped_files['FileItem']['DownloadURL']
})
else: # if the submission have sub-files
main_file_analyze_report = reports['FILE_ANALYZE_REPORT'][0]
# add data to the war room
hr = copy.deepcopy(reports['FILE_ANALYZE_REPORT'])
for item in hr:
item['File Name'] = item['OrigFileName'] # type: ignore
item['Detection Name'] = item['VirusName']['#text'] if '#text' in item['VirusName'] else None # type: ignore
item['Malware Source IP'] = item['MalwareSourceIP'] # type: ignore
item['Malware Source Host'] = item['MalwareSourceHost'] # type: ignore
if verbose == 'true':
item['Download URL'] = item['DroppedFiles'].get('FileItem') # type: ignore
item['Deny List'] = binary_to_boolean_str(item['IsDenylisted']) if item['IsDenylisted'] else None # type: ignore
item['White List'] = binary_to_boolean_str(item['IsWhitelisted']) if item['IsWhitelisted'] else None # type: ignore
# set the filename
filename = main_file_analyze_report['OrigFileName']
if filename and '.' not in filename:
filename = str(filename) + ".txt"
# This section was commented out because it used an undefined variable download_url_list.
# Need to check again if it should be moving to GA.
# if verbose == 'true':
# hr['Download URL'] = download_url_list
# context['URL.Data(val.Data && val.Data == obj.Data)'] = download_url_list
# context['TrendMicroDDA.Submission'].update({
# 'DownloadURL': download_url_list
# })
# add data regarding the submission to the context
file_analyzed_list = []
for file_analyzed in reports['FILE_ANALYZE_REPORT'][1:]: # iterate over all the subfiles excluding the main file
file_analyzed_dict = {
'SHA1': file_analyzed['FileSHA1'],
'SHA256': file_analyzed['FileSHA256'],
'MD5': file_analyzed['FileMD5'],
'Name': file_analyzed['VirusName']['#text'] if '#text' in file_analyzed['VirusName'] else '',
'VirusDetected': binary_to_boolean(file_analyzed['VirusDetected']),
}
if file_analyzed['TrueFileType'] == 'URL':
# add the URL address
context[outputPaths['url']] = {
'Data': file_analyzed['OrigFileName']
}
else:
context[outputPaths['file']] = {
'MD5': file_analyzed['FileMD5'],
'SHA1': file_analyzed['FileSHA1'],
'SHA256': file_analyzed['FileSHA256'],
'Size': file_analyzed['FileSize'],
'Name': file_analyzed['VirusName']['#text'] if '#text' in file_analyzed['VirusName'] else '',
# add score of some sort from virusdetected? ask michal.------------------ TODO --------------------
}
file_analyzed_list.append(file_analyzed_dict)
context['TrendMicroDDA.Submission(val.SHA1 && val.SHA1==obj.SHA1)'] = {
'Status': status,
'RiskLevel': reports['OVERALL_RISK_LEVEL'],
'SHA1': main_file_analyze_report['FileSHA1'],
'SHA256': main_file_analyze_report['FileSHA256'],
'MD5': main_file_analyze_report['FileMD5'],
'VirusDetected': binary_to_boolean(main_file_analyze_report['VirusDetected']),
'FileAnalyzed': file_analyzed_list,
}
if main_file_analyze_report['TrueFileType'] == 'URL':
context['URL(val.Data && val.Data==obj.Data)'] = {
'Data': main_file_analyze_report['OrigFileName'],
}
else:
context['File(val.SHA1 && val.SHA1==obj.SHA1)'] = {
'MD5': main_file_analyze_report['FileMD5'],
'SHA1': main_file_analyze_report['FileSHA1'],
'SHA256': main_file_analyze_report['FileSHA256'],
'Size': main_file_analyze_report['FileSize'],
'Name': main_file_analyze_report['VirusName']['#text'] if '#text' in main_file_analyze_report['VirusName']
else '',
}
# add data regarding the submission to the context if it is malicious
if (reports['OVERALL_RISK_LEVEL'] >= threshold):
context['DBotScore.Score'] = 3
if (main_file_analyze_report['TrueFileType'] == 'URL'):
context[outputPaths['url']] = {
'Malicious': {
'Vendor': 'Trend Micro DDA',
'Description': 'RiskLevel: ' + reports['OVERALL_RISK_LEVEL']
}
}
else:
context[outputPaths['file']] = {
'Malicious': {
'Vendor': 'Trend Micro DDA',
'Description': 'RiskLevel: ' + reports['OVERALL_RISK_LEVEL']
}
}
# extracting IP and Domains from the report
if main_file_analyze_report['MalwareSourceIP']:
context['IP.Address(val.Address && val.Address == obj.Address)'] = main_file_analyze_report['MalwareSourceIP']
if main_file_analyze_report['MalwareSourceHost']:
context['Domain.Name(val.Name && val.Name == obj.Name)'] = main_file_analyze_report['MalwareSourceHost']
return context, hr, hr_headers, filename
def get_report(sha1):
'''Issue a request to retrieve XML report for a given SHA1'''
if not (re.match(r'\b[0-9a-fA-F]{40}\b', sha1)):
return_error('Provided SHA1: {} is unvalid.'.format(sha1))
headers_get_report = copy.deepcopy(DEFAULT_HEADERS)
headers_get_report['X-DTAS-SHA1'] = sha1 # SHA1 of the file/URL to download
headers_get_report['X-DTAS-Time'] = get_epoch_time()
tmp_checksum = calculate_checksum(API_KEY, headers_get_report)
headers_get_report['X-DTAS-Checksum'] = tmp_checksum.hexdigest()
cmd_url = 'web_service/sample_upload/get_report'
res = http_request(
cmd_url,
'get',
headers_get_report
)
return res
def get_report_command():
sha1 = demisto.args()['sha1']
threshold = demisto.args()['threshold']
verbose = demisto.args()['verbose']
res = get_report(sha1)
if res.status_code == 102:
ec = {
'Status': 'Analyzing',
'SHA1': sha1
}
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['text'],
'Content': {"value": res},
'HumanReadable': 'Submission analyzation was not finished yet.',
'EntryContext': {
'TrendMicroDDA.Submission(val.SHA1 && val.SHA1==obj.SHA1)': ec
}
})
else:
status = 'Completed'
context, hr, hr_headers, filename = build_report(res, threshold, status, verbose)
markdown_table_headers = ['File Name', 'Detection Name', 'Malware Source IP', 'Malware Source Host']
if verbose == 'true':
markdown_table_headers.append('Download URL')
markdown_table_headers.extend(('Deny List', 'White List'))
tmp_file = fileResult(filename, res.text)
demisto.results({ # add context and the Report File to the war room
'Type': entryTypes['file'],
'FileID': tmp_file.get('FileID'),
'Contents': '',
'ContentsFormat': formats['text'],
'File': tmp_file.get('File'),
'EntryContext': context,
})
demisto.results({ # add table to the war room
'Type': entryTypes['note'],
'Contents': res.text,
'ContentsFormat': formats['text'],
'HumanReadableFormat': formats['markdown'],
'HumanReadable':
'## Submission Report from TrendMicroDDA\n'
+ '### Risk Level: {}, Sum of Files Analyzed: {}, Image Type: {}\n'.format(
hr_headers['Risk Level'], hr_headers['Sum of Files Analyzed'], hr_headers['Image Type'])
+ tableToMarkdown('Report Summary', hr, headers=markdown_table_headers),
})
def build_brief_report(res, sha1, threshold):
report_json = json.loads(xml2json(res.text))
brief_report_json = report_json.get('REPORT', {}).get('BRIEF_REPORT', {})
hr = {
'SHA1': sha1,
'Risk Level': brief_report_json.get('RiskLevel'),
'Status': brief_report_json.get('STATUS'),
}
return hr
def get_brief_report(sha1):
if not (re.match(r'\b[0-9a-fA-F]{40}\b', sha1)):
return_error('Provided SHA1 is unvalid.')
headers_get_brief_report = {
'Content-Type': 'text/plain',
'X-DTAS-ProtocolVersion': PROTOCOL_VERSION,
'X-DTAS-ClientUUID': UUID,
'X-DTAS-Time': get_epoch_time(),
'X-DTAS-Challenge': str(uuid.uuid4()),
'X-DTAS-ChecksumCalculatingOrder': "X-DTAS-ProtocolVersion,X-DTAS-ClientUUID,X-DTAS-Time,X-DTAS-Challenge",
}
sha1_list = argToList(sha1)
data = ';'.join(sha1_list)
tmp_checksum = calculate_checksum(API_KEY, headers_get_brief_report, data)
headers_get_brief_report['X-DTAS-Checksum'] = tmp_checksum.hexdigest()
cmd_url = 'web_service/sample_upload/get_brief_report'
res = http_request(
cmd_url,
'put',
headers=headers_get_brief_report,
body=data
)
return res
def get_brief_report_command():
'''Issue a request to retrieve the brief XML report for a given SHA1'''
sha1 = demisto.args()['sha1']
threshold = demisto.args()['threshold']
res = get_brief_report(sha1)
hr = build_brief_report(res, sha1, threshold)
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['text'],
'Contents': res.text,
'HumanReadableFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('Sample Brief Report from TrendMicroDDA', hr, removeNull=True),
# 'EntryContext': {
# 'TrendMicroDDA.Submission(val.SHA1 && val.SHA1==obj.SHA1)': context
# }
})
def check_status(sha1_list):
for sha1 in sha1_list:
if not (re.match(r'\b[0-9a-fA-F]{40}\b', sha1)):
return_error('Provided SHA1: {} is unvalid.'.format(sha1))
manyRes = []
manyEC = []
for sha1 in sha1_list:
res = get_report(sha1)
manyRes.append(res.text)
if res.status_code == 102:
manyEC.append({
'Status': 'Analyzing',
'SHA1': sha1
})
else:
manyEC.append({
'Status': 'Completed',
'SHA1': sha1
})
return manyRes, manyEC
def check_status_command():
sha1_list = argToList(demisto.args()['sha1'])
manyRes, manyEC, = check_status(sha1_list)
demisto.results({
'Type': entryTypes['note'],
'Contents': manyRes,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('Status of the submissions in TrendMicroDDA', manyEC),
'EntryContext': {
'TrendMicroDDA.Submission(val.SHA1 && val.SHA1==obj.SHA1)': manyEC
}
})
# EXECUTION
LOG('command is %s' % (demisto.command(), ))
try:
register()
if demisto.command() == 'test-module':
test()
elif demisto.command() == 'trendmicro-dda-upload-file':
simple_upload_sample_file_command()
elif demisto.command() == 'trendmicro-dda-upload-url':
simple_upload_sample_url_command()
elif demisto.command() == 'trendmicro-dda-get-sample':
get_sample_command()
elif demisto.command() == 'trendmicro-dda-check-status':
check_status_command()
elif demisto.command() == 'trendmicro-dda-get-brief-report':
get_brief_report_command()
elif demisto.command() == 'trendmicro-dda-get-report': # add !file !url command? ask anar
get_report_command()
elif demisto.command() == 'trendmicro-dda-get-openioc':
# get_openioc_report_command()
return_error("Deprecated command")
elif demisto.command() == 'trendmicro-dda-get-sample-list':
get_sample_list_command()
except Exception as e:
LOG(str(e))
LOG.print_log()
raise
finally:
unregister()
| []
| []
| [
"HTTP_PROXY",
"HTTPS_PROXY",
"http_proxy",
"https_proxy"
]
| [] | ["HTTP_PROXY", "HTTPS_PROXY", "http_proxy", "https_proxy"] | python | 4 | 0 | |
examples/sccl_init.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import os
def show():
if 'MSCCL_CONFIG' in os.environ:
print()
print(f"MSCCL_CONFIG = {os.environ['MSCCL_CONFIG']}")
print(f"Contents of {os.environ['MSCCL_CONFIG']}:")
with open(os.environ['MSCCL_CONFIG']) as f:
print(f.read())
print()
print('=== Trigger a builtin synthesis plan ===')
import msccl
msccl.init('ndv4', 9, (msccl.Collective.alltoall, '1GB'))
show()
print('=== Register additional plans from a library ===')
import msccl_presynth
msccl.init('ndv2', 3,
(msccl.Collective.alltoall, '1GB'),
(msccl.Collective.allgather, (128, '1KB')))
show()
print('=== Register custom plans ===')
from msccl.autosynth.registry import register_synthesis_plan
@register_synthesis_plan(msccl.Collective.alltoall, 'ndv9000', lambda m: m == 1, ('1MB', None))
def alltoall_9000(machines):
return """<algo name="a2andv9000" nchunksperloop="2" nchannels="1" inplace="0" ngpus="2" proto="Simple">
...
</algo>"""
msccl.init('ndv9000', 1, (msccl.Collective.alltoall, '2MB'))
show()
print('=== Overlapping size ranges ===')
register_synthesis_plan(msccl.Collective.alltoall, 'ndv9000', lambda m: m == 1, (0, '1KB'), protocol='LL')(alltoall_9000)
register_synthesis_plan(msccl.Collective.alltoall, 'ndv9000', lambda m: m == 1, ('1KB', '1MB'), protocol='LL128')(alltoall_9000)
msccl.init('ndv9000', 1, (msccl.Collective.alltoall, ('2KB', None)))
show()
# TODO: Update the following programs to use the new syntax
# print('=== MSCCLang program ===')
# from msccl.autosynth.registry import register_msccl_program
# from msccl.topologies import line
# from msccl.language import *
# @register_msccl_program(line(2), 'allgather', 'two_gpus', machines= lambda m: m == 1)
# def trivial_allgather(prog, nodes):
# chunk(Buffer.input, 0, 0).send(0, Buffer.output, 0).send(1)
# chunk(Buffer.input, 1, 0).send(1, Buffer.output, 1).send(0)
# msccl.init('two_gpus', 1, (msccl.Collective.allgather, (0, None)))
# show()
# print('=== MSCCLang program example ====')
# from msccl.topologies import fully_connected
# from msccl.programs.allreduce_a100_ring import allreduce_ring
# @register_msccl_program(fully_connected(8), 'allreduce', 'ndv4', chunk_factor=8, inplace=True,
# instances=4, protocol='LL128', threadblock_policy=ThreadblockPolicy.manual, machines=lambda x: x == 1)
# def ndv4_ring_allreduce(prog, nodes):
# allreduce_ring(size=8, channels=8)
# msccl.init('ndv4', 1, (msccl.Collective.allreduce, (0, None)))
# show() | []
| []
| [
"MSCCL_CONFIG"
]
| [] | ["MSCCL_CONFIG"] | python | 1 | 0 | |
main/wsgi.py | """
WSGI config for bandcamp app.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
from dj_static import Cling
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "main.settings")
application = Cling(get_wsgi_application()) # pylint: disable=invalid-name
| []
| []
| []
| [] | [] | python | 0 | 0 | |
python/pyarmnn/src/pyarmnn/_version.py | # Copyright © 2020 Arm Ltd. All rights reserved.
# Copyright 2020 NXP
# SPDX-License-Identifier: MIT
import os
version_info = (25, 0, 0)
__dev_version_env = os.getenv("PYARMNN_DEV_VER", "")
if __dev_version_env:
__dev_version = "dev0"
try:
__dev_version = "dev{}".format(int(__dev_version_env))
except ValueError:
__dev_version = str(__dev_version_env)
version_info = (*version_info, __dev_version)
__version__ = '.'.join(str(c) for c in version_info)
__arm_ml_version__ = '{}.{}.{}'.format(version_info[0], version_info[1], version_info[2])
def check_armnn_version(installed_armnn_version: str, expected_armnn_version: str = __arm_ml_version__):
"""Compares expected Arm NN version and Arm NN version used to build the package.
Args:
installed_armnn_version (str): Arm NN version used to generate the package (e.g. 25.0.0)
expected_armnn_version (str): Expected Arm NN version
Returns:
None
"""
expected = expected_armnn_version.split('.', 2)
installed = installed_armnn_version.split('.', 2)
# only compare major and minor versions, not patch
assert (expected[0] == installed[0]) and (expected[1] == installed[1]), \
"Expected ArmNN version is {} but installed ArmNN version is {}".format(expected_armnn_version, installed_armnn_version)
| []
| []
| [
"PYARMNN_DEV_VER"
]
| [] | ["PYARMNN_DEV_VER"] | python | 1 | 0 | |
Chapter 9/wildlife/manage.py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "wildlife.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
slack/main.go | package main
import (
"bytes"
"encoding/json"
"errors"
"flag"
"fmt"
"io/ioutil"
"log"
"net/http"
"os"
"strings"
"time"
)
var (
message = flag.String("message", "", "The message to send")
isError = flag.Bool("error", false, "Message is an error")
isAlert = flag.Bool("alert", false, "Alert @here when message is sent")
field = flag.String("field", "", "Slack attachment field")
fields = flag.String("fields", "", "Slack attachment fields")
// optional
webhook = flag.String("webhook", "", "Slack Webhook")
)
// Webhook is the Slack webhook to use
var (
Webhook = os.Getenv("SLACK_WEBHOOK")
)
func main() {
flag.Parse()
log.SetPrefix("[Slack] ")
if Webhook == "" {
Webhook = *webhook
}
if Webhook == "" {
log.Fatal("missing SLACK_WEBHOOK in environment, please see slack README")
}
if *field != "" && *fields != "" {
log.Fatal("both 'field' and 'fields' can't be specified, use 'fields' for multiple fields")
}
if *message == "" {
log.Fatal("missing --message, must be specified")
}
if *field != "" || *fields != "" {
if err := loadFields(); err != nil {
log.Fatalf("unable to load fields: %v", err)
}
}
if err := slack(*message); err != nil {
log.Fatalf("failed to send message: %v", err)
}
}
// Field defines the structure of a Slack attachment field
type Field struct {
Title string `json:"title"`
Value string `json:"value"`
Short bool `json:"short"`
}
type attachment struct {
Color string `json:"color"`
Fields []Field `json:"fields"`
}
// Alert defines a slack notification/alert
type Alert struct {
Text string `json:"text"`
Err error `json:"-"`
Attachments []attachment `json:"attachments"`
}
var theFields []Field
func loadFields() error {
// TODO: implement fields parsing
return errors.New("TODO: --field(s) support pending")
}
// trim trims a message to a specific length
func trim(message string, length int) string {
results := strings.Split(string(message), "\n")
if len(results) <= length {
return message
}
trimmed := results[len(results)-(length-1):]
results = append([]string{"... [Results TRIMMED for display] ..."}, trimmed...)
return strings.Join(results, "\n")
}
func slack(message string) error {
color := "good"
if *isError {
color = "danger"
theFields = append(theFields, Field{
Title: "Error",
Value: "```" + trim(message, 10) + "```",
Short: false,
})
}
if *isAlert {
message += " _cc_ <!here>"
}
sa := Alert{
Text: message,
Attachments: []attachment{
{
Color: color,
Fields: theFields,
},
},
}
jsonBody, err := json.Marshal(sa)
if err != nil {
return err
}
cl := &http.Client{Timeout: time.Second * 20}
req, err := http.NewRequest("POST", Webhook, bytes.NewBuffer(jsonBody))
if err != nil {
return fmt.Errorf("unable to use specified webhook %v", err)
}
resp, err := cl.Do(req)
if err != nil {
return fmt.Errorf("failed to send request to slack: %v", err)
}
defer resp.Body.Close()
if resp.StatusCode == http.StatusOK {
return nil
}
dat, err := ioutil.ReadAll(resp.Body)
if err != nil {
return fmt.Errorf("unable to read slack response: %v", err)
}
return fmt.Errorf("slack responded with invalid response [%s] %s", resp.Status, dat)
}
| [
"\"SLACK_WEBHOOK\""
]
| []
| [
"SLACK_WEBHOOK"
]
| [] | ["SLACK_WEBHOOK"] | go | 1 | 0 | |
HuberyBlog/manage.py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "HuberyBlog.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
GBB.ConversationalKM.Python/SelectIntent/__init__.py | import logging
import azure.functions as func
import json
import os
from azure.cosmosdb.table.tableservice import TableService
from azure.cosmosdb.table.models import Entity
def main(req: func.HttpRequest) -> func.HttpResponse:
logging.info('Python HTTP trigger function processed a request.')
# Connect to Azure Table Storage
table_service = TableService(connection_string= os.environ['AzureWebJobsStorage'])
table_service.create_table('intents') if not table_service.exists('intents') else None
req_body = req.get_json()
if req_body:
# Create row to be saved on Azure Table Storage
print(req_body.get('ConversationId'))
data = req_body
data["PartitionKey"] = req_body.get('ConversationId')
data["RowKey"] = req_body.get('MessageId')
# Save row on Azure Table Storage
table_service.insert_or_replace_entity('intents', data)
return func.HttpResponse(f"Row {req_body.get('MessageId')} for {req_body.get('ConversationId')} added")
else:
return func.HttpResponse(
"Please pass valid request body",
status_code=400
) | []
| []
| [
"AzureWebJobsStorage"
]
| [] | ["AzureWebJobsStorage"] | python | 1 | 0 | |
github/github.go | package github
import (
"encoding/json"
"fmt"
"log"
"os"
"github.com/vsoch/codestats/utils"
)
func githubGetRequest(url string) string {
headers := make(map[string]string)
token := os.Getenv("GITHUB_TOKEN")
headers["Accept"] = "application/vnd.github.v3+json"
if token != "" {
headers["Authorization"] = fmt.Sprintf("token %s", token)
}
return utils.GetRequest(url, headers)
}
func GetReleases(name string) Releases {
response := githubGetRequest("https://api.github.com/repos/" + name + "/releases")
// The response gets parsed into a spack package
releases := Releases{}
err := json.Unmarshal([]byte(response), &releases)
if err != nil {
log.Fatalf("Issue unmarshalling releases data structure\n")
}
return releases
}
func GetOrgRepos(orgName string) Repos {
response := githubGetRequest("https://api.github.com/orgs/" + orgName + "/repos")
// The response gets parsed into a spack package
repos := Repos{}
err := json.Unmarshal([]byte(response), &repos)
if err != nil {
log.Fatalf("Issue unmarshalling repositories data structure\n")
}
return repos
}
func GetRepo(repoName string) Repository {
response := githubGetRequest("https://api.github.com/repos/" + repoName)
// The response gets parsed into a spack package
repo := Repository{}
err := json.Unmarshal([]byte(response), &repo)
if err != nil {
log.Fatalf("Issue unmarshalling repository data structure\n")
}
return repo
}
func GetCommits(name string, branch string) Commits {
url := "https://api.github.com/repos/" + name + "/commits"
headers := make(map[string]string)
headers["Accept"] = "application/vnd.github.v3+json"
headers["Sha"] = branch
token := os.Getenv("GITHUB_TOKEN")
if token != "" {
headers["Authorization"] = fmt.Sprintf("token %s", token)
}
response := utils.GetRequest(url, headers)
commits := Commits{}
err := json.Unmarshal([]byte(response), &commits)
if err != nil {
log.Fatalf("Issue unmarshalling commits data structure\n")
}
return commits
}
| [
"\"GITHUB_TOKEN\"",
"\"GITHUB_TOKEN\""
]
| []
| [
"GITHUB_TOKEN"
]
| [] | ["GITHUB_TOKEN"] | go | 1 | 0 | |
webhooks-extension/pkg/endpoints/webhook.go | /*
Copyright 2019 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package endpoints
import (
"encoding/json"
"errors"
"fmt"
restful "github.com/emicklei/go-restful"
routesv1 "github.com/openshift/api/route/v1"
logging "github.com/tektoncd/experimental/webhooks-extension/pkg/logging"
pipelinesv1alpha1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1"
v1alpha1 "github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1"
corev1 "k8s.io/api/core/v1"
"k8s.io/api/extensions/v1beta1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"net/http"
"os"
"strconv"
"strings"
"sync"
"time"
)
var (
modifyingEventListenerLock sync.Mutex
actions = pipelinesv1alpha1.Param{Name: "Wext-Incoming-Actions", Value: pipelinesv1alpha1.ArrayOrString{Type: pipelinesv1alpha1.ParamTypeString, StringVal: "opened,reopened,synchronize"}}
)
const (
eventListenerName = "tekton-webhooks-eventlistener"
routeName = "el-" + eventListenerName
)
/*
Creation of the eventlistener, called when no eventlistener exists at
the point of webhook creation.
*/
func (r Resource) createEventListener(webhook webhook, namespace, monitorTriggerName string) (*v1alpha1.EventListener, error) {
hookParams, monitorParams := r.getParams(webhook)
pushTrigger := r.newTrigger(webhook.Name+"-"+webhook.Namespace+"-push-event",
webhook.Pipeline+"-push-binding",
webhook.Pipeline+"-template",
webhook.GitRepositoryURL,
"push",
webhook.AccessTokenRef,
hookParams)
pullRequestTrigger := r.newTrigger(webhook.Name+"-"+webhook.Namespace+"-pullrequest-event",
webhook.Pipeline+"-pullrequest-binding",
webhook.Pipeline+"-template",
webhook.GitRepositoryURL,
"pull_request",
webhook.AccessTokenRef,
hookParams)
pullRequestTrigger.Interceptor.Header = append(pullRequestTrigger.Interceptor.Header, actions)
monitorTrigger := r.newTrigger(monitorTriggerName,
webhook.PullTask+"-binding",
webhook.PullTask+"-template",
webhook.GitRepositoryURL,
"pull_request",
webhook.AccessTokenRef,
monitorParams)
monitorTrigger.Interceptor.Header = append(monitorTrigger.Interceptor.Header, actions)
triggers := []v1alpha1.EventListenerTrigger{pushTrigger, pullRequestTrigger, monitorTrigger}
eventListener := v1alpha1.EventListener{
ObjectMeta: metav1.ObjectMeta{
Name: eventListenerName,
Namespace: namespace,
},
Spec: v1alpha1.EventListenerSpec{
ServiceAccountName: "tekton-webhooks-extension-eventlistener",
Triggers: triggers,
},
}
return r.TriggersClient.TektonV1alpha1().EventListeners(namespace).Create(&eventListener)
}
/*
Update of the eventlistener, called when adding additional webhooks as we
run with a single eventlistener.
*/
func (r Resource) updateEventListener(eventListener *v1alpha1.EventListener, webhook webhook, monitorTriggerName string) (*v1alpha1.EventListener, error) {
hookParams, monitorParams := r.getParams(webhook)
newPushTrigger := r.newTrigger(webhook.Name+"-"+webhook.Namespace+"-push-event",
webhook.Pipeline+"-push-binding",
webhook.Pipeline+"-template",
webhook.GitRepositoryURL,
"push",
webhook.AccessTokenRef,
hookParams)
newPullRequestTrigger := r.newTrigger(webhook.Name+"-"+webhook.Namespace+"-pullrequest-event",
webhook.Pipeline+"-pullrequest-binding",
webhook.Pipeline+"-template",
webhook.GitRepositoryURL,
"pull_request",
webhook.AccessTokenRef,
hookParams)
newPullRequestTrigger.Interceptor.Header = append(newPullRequestTrigger.Interceptor.Header, actions)
eventListener.Spec.Triggers = append(eventListener.Spec.Triggers, newPushTrigger)
eventListener.Spec.Triggers = append(eventListener.Spec.Triggers, newPullRequestTrigger)
existingMonitorFound := false
for _, trigger := range eventListener.Spec.Triggers {
if trigger.Name == monitorTriggerName {
existingMonitorFound = true
break
}
}
if !existingMonitorFound {
newMonitor := r.newTrigger(monitorTriggerName,
webhook.PullTask+"-binding",
webhook.PullTask+"-template",
webhook.GitRepositoryURL,
"pull_request",
webhook.AccessTokenRef,
monitorParams)
newMonitor.Interceptor.Header = append(newMonitor.Interceptor.Header, actions)
eventListener.Spec.Triggers = append(eventListener.Spec.Triggers, newMonitor)
}
return r.TriggersClient.TektonV1alpha1().EventListeners(eventListener.GetNamespace()).Update(eventListener)
}
func (r Resource) newTrigger(name, bindingName, templateName, repoURL, event, secretName string, params []pipelinesv1alpha1.Param) v1alpha1.EventListenerTrigger {
return v1alpha1.EventListenerTrigger{
Name: name,
Binding: v1alpha1.EventListenerBinding{
Name: bindingName,
APIVersion: "v1alpha1",
},
Params: params,
Template: v1alpha1.EventListenerTemplate{
Name: templateName,
APIVersion: "v1alpha1",
},
Interceptor: &v1alpha1.EventInterceptor{
Header: []pipelinesv1alpha1.Param{
{Name: "Wext-Trigger-Name", Value: pipelinesv1alpha1.ArrayOrString{Type: pipelinesv1alpha1.ParamTypeString, StringVal: name}},
{Name: "Wext-Repository-Url", Value: pipelinesv1alpha1.ArrayOrString{Type: pipelinesv1alpha1.ParamTypeString, StringVal: repoURL}},
{Name: "Wext-Incoming-Event", Value: pipelinesv1alpha1.ArrayOrString{Type: pipelinesv1alpha1.ParamTypeString, StringVal: event}},
{Name: "Wext-Secret-Name", Value: pipelinesv1alpha1.ArrayOrString{Type: pipelinesv1alpha1.ParamTypeString, StringVal: secretName}}},
ObjectRef: &corev1.ObjectReference{
APIVersion: "v1",
Kind: "Service",
Name: "tekton-webhooks-extension-validator",
Namespace: r.Defaults.Namespace,
},
},
}
}
/*
Processing of the inputs into the required structure for
the eventlistener.
*/
func (r Resource) getParams(webhook webhook) (webhookParams, monitorParams []pipelinesv1alpha1.Param) {
saName := webhook.ServiceAccount
requestedReleaseName := webhook.ReleaseName
if saName == "" {
saName = "default"
}
server, org, repo, err := getGitValues(webhook.GitRepositoryURL)
if err != nil {
logging.Log.Errorf("error returned from getGitValues: %s", err)
}
server = strings.TrimPrefix(server, "https://")
server = strings.TrimPrefix(server, "http://")
releaseName := ""
if requestedReleaseName != "" {
logging.Log.Infof("Release name based on input: %s", requestedReleaseName)
releaseName = requestedReleaseName
} else {
releaseName = repo
logging.Log.Infof("Release name based on repository name: %s", releaseName)
}
hookParams := []pipelinesv1alpha1.Param{
{Name: "webhooks-tekton-release-name", Value: pipelinesv1alpha1.ArrayOrString{Type: pipelinesv1alpha1.ParamTypeString, StringVal: releaseName}},
{Name: "webhooks-tekton-target-namespace", Value: pipelinesv1alpha1.ArrayOrString{Type: pipelinesv1alpha1.ParamTypeString, StringVal: webhook.Namespace}},
{Name: "webhooks-tekton-service-account", Value: pipelinesv1alpha1.ArrayOrString{Type: pipelinesv1alpha1.ParamTypeString, StringVal: webhook.ServiceAccount}},
{Name: "webhooks-tekton-git-server", Value: pipelinesv1alpha1.ArrayOrString{Type: pipelinesv1alpha1.ParamTypeString, StringVal: server}},
{Name: "webhooks-tekton-git-org", Value: pipelinesv1alpha1.ArrayOrString{Type: pipelinesv1alpha1.ParamTypeString, StringVal: org}},
{Name: "webhooks-tekton-git-repo", Value: pipelinesv1alpha1.ArrayOrString{Type: pipelinesv1alpha1.ParamTypeString, StringVal: repo}},
{Name: "webhooks-tekton-pull-task", Value: pipelinesv1alpha1.ArrayOrString{Type: pipelinesv1alpha1.ParamTypeString, StringVal: webhook.PullTask}}}
if webhook.DockerRegistry != "" {
hookParams = append(hookParams, pipelinesv1alpha1.Param{Name: "webhooks-tekton-docker-registry", Value: pipelinesv1alpha1.ArrayOrString{Type: pipelinesv1alpha1.ParamTypeString, StringVal: webhook.DockerRegistry}})
}
if webhook.HelmSecret != "" {
hookParams = append(hookParams, pipelinesv1alpha1.Param{Name: "webhooks-tekton-helm-secret", Value: pipelinesv1alpha1.ArrayOrString{Type: pipelinesv1alpha1.ParamTypeString, StringVal: webhook.HelmSecret}})
}
onSuccessComment := webhook.OnSuccessComment
if onSuccessComment == "" {
onSuccessComment = "Success"
}
onFailureComment := webhook.OnFailureComment
if onFailureComment == "" {
onFailureComment = "Failed"
}
onTimeoutComment := webhook.OnTimeoutComment
if onTimeoutComment == "" {
onTimeoutComment = "Unknown"
}
prMonitorParams := []pipelinesv1alpha1.Param{
{Name: "commentsuccess", Value: pipelinesv1alpha1.ArrayOrString{Type: pipelinesv1alpha1.ParamTypeString, StringVal: onSuccessComment}},
{Name: "commentfailure", Value: pipelinesv1alpha1.ArrayOrString{Type: pipelinesv1alpha1.ParamTypeString, StringVal: onFailureComment}},
{Name: "commenttimeout", Value: pipelinesv1alpha1.ArrayOrString{Type: pipelinesv1alpha1.ParamTypeString, StringVal: onTimeoutComment}},
{Name: "gitsecretname", Value: pipelinesv1alpha1.ArrayOrString{Type: pipelinesv1alpha1.ParamTypeString, StringVal: webhook.AccessTokenRef}},
{Name: "gitsecretkeyname", Value: pipelinesv1alpha1.ArrayOrString{Type: pipelinesv1alpha1.ParamTypeString, StringVal: "accessToken"}},
{Name: "dashboardurl", Value: pipelinesv1alpha1.ArrayOrString{Type: pipelinesv1alpha1.ParamTypeString, StringVal: r.getDashboardURL(r.Defaults.Namespace)}},
}
return hookParams, prMonitorParams
}
func (r Resource) getDashboardURL(installNs string) string {
type element struct {
Type string `json:"type"`
URL string `json:"url"`
}
toReturn := "http://localhost:9097/"
labelLookup := "app=tekton-dashboard"
if "openshift" == os.Getenv("PLATFORM") {
labelLookup = "app=tekton-dashboard-internal"
}
services, err := r.K8sClient.CoreV1().Services(installNs).List(metav1.ListOptions{LabelSelector: labelLookup})
if err != nil {
logging.Log.Errorf("could not find the dashboard's service - error: %s", err.Error())
return toReturn
}
if len(services.Items) == 0 {
logging.Log.Error("could not find the dashboard's service")
return toReturn
}
name := services.Items[0].GetName()
proto := services.Items[0].Spec.Ports[0].Name
port := services.Items[0].Spec.Ports[0].Port
url := fmt.Sprintf("%s://%s:%d/v1/namespaces/%s/endpoints", proto, name, port, installNs)
logging.Log.Debugf("using url: %s", url)
resp, err := http.DefaultClient.Get(url)
if err != nil {
logging.Log.Errorf("error occurred when hitting the endpoints REST endpoint: %s", err.Error())
return url
}
if resp.StatusCode != 200 {
logging.Log.Errorf("return code was not 200 when hitting the endpoints REST endpoint, code returned was: %d", resp.StatusCode)
return url
}
bodyJSON := []element{}
json.NewDecoder(resp.Body).Decode(&bodyJSON)
return bodyJSON[0].URL
}
/*
Processes a git URL into component parts, all of which are lowercased
to try and avoid problems matching strings.
*/
func getGitValues(url string) (gitServer, gitOwner, gitRepo string, err error) {
repoURL := ""
prefix := ""
if url != "" {
url = strings.ToLower(url)
if strings.Contains(url, "https://") {
repoURL = strings.TrimPrefix(url, "https://")
prefix = "https://"
} else {
repoURL = strings.TrimPrefix(url, "http://")
prefix = "http://"
}
}
// example at this point: github.com/tektoncd/pipeline
numSlashes := strings.Count(repoURL, "/")
if numSlashes < 2 {
return "", "", "", errors.New("URL didn't contain an owner and repository")
}
repoURL = strings.TrimSuffix(repoURL, "/")
gitServer = prefix + repoURL[0:strings.Index(repoURL, "/")]
gitOwner = repoURL[strings.Index(repoURL, "/")+1 : strings.LastIndex(repoURL, "/")]
//need to cut off the .git
if strings.HasSuffix(url, ".git") {
gitRepo = repoURL[strings.LastIndex(repoURL, "/")+1 : len(repoURL)-4]
} else {
gitRepo = repoURL[strings.LastIndex(repoURL, "/")+1:]
}
return gitServer, gitOwner, gitRepo, nil
}
// Creates a webhook for a given repository and populates (creating if doesn't yet exist) an eventlistener
func (r Resource) createWebhook(request *restful.Request, response *restful.Response) {
modifyingEventListenerLock.Lock()
defer modifyingEventListenerLock.Unlock()
logging.Log.Infof("Webhook creation request received with request: %+v.", request)
installNs := r.Defaults.Namespace
webhook := webhook{}
if err := request.ReadEntity(&webhook); err != nil {
logging.Log.Errorf("error trying to read request entity as webhook: %s.", err)
RespondError(response, err, http.StatusBadRequest)
return
}
// Sanitize GitRepositoryURL
webhook.GitRepositoryURL = strings.TrimSuffix(webhook.GitRepositoryURL, ".git")
if webhook.PullTask == "" {
webhook.PullTask = "monitor-task"
}
if webhook.Name != "" {
if len(webhook.Name) > 57 {
tooLongMessage := fmt.Sprintf("requested release name (%s) must be less than 58 characters", webhook.Name)
err := errors.New(tooLongMessage)
logging.Log.Errorf("error: %s", err.Error())
RespondError(response, err, http.StatusBadRequest)
return
}
}
dockerRegDefault := r.Defaults.DockerRegistry
// remove prefixes if any
webhook.DockerRegistry = strings.TrimPrefix(webhook.DockerRegistry, "https://")
webhook.DockerRegistry = strings.TrimPrefix(webhook.DockerRegistry, "http://")
if webhook.DockerRegistry == "" && dockerRegDefault != "" {
webhook.DockerRegistry = dockerRegDefault
}
logging.Log.Debugf("Docker registry location is: %s", webhook.DockerRegistry)
namespace := webhook.Namespace
if namespace == "" {
err := errors.New("a namespace for creating a webhook is required, but none was given")
logging.Log.Errorf("error: %s.", err.Error())
RespondError(response, err, http.StatusBadRequest)
return
}
if !strings.HasPrefix(webhook.GitRepositoryURL, "http") {
err := errors.New("the supplied GitRepositoryURL does not specify the protocol http:// or https://")
logging.Log.Errorf("error: %s", err.Error())
RespondError(response, err, http.StatusBadRequest)
return
}
pieces := strings.Split(webhook.GitRepositoryURL, "/")
if len(pieces) < 4 {
logging.Log.Errorf("error creating webhook: GitRepositoryURL format error (%+v).", webhook.GitRepositoryURL)
RespondError(response, errors.New("GitRepositoryURL format error"), http.StatusBadRequest)
return
}
hooks, err := r.getHooksForRepo(webhook.GitRepositoryURL)
if len(hooks) > 0 {
for _, hook := range hooks {
if hook.Name == webhook.Name && hook.Namespace == webhook.Namespace {
logging.Log.Errorf("error creating webhook: A webhook already exists for GitRepositoryURL %+v with the Name %s and Namespace %s.", webhook.GitRepositoryURL, webhook.Name, webhook.Namespace)
RespondError(response, errors.New("Webhook already exists for the specified Git repository with the same name, targeting the same namespace"), http.StatusBadRequest)
return
}
if hook.Pipeline == webhook.Pipeline && hook.Namespace == webhook.Namespace {
logging.Log.Errorf("error creating webhook: A webhook already exists for GitRepositoryURL %+v, running pipeline %s in namespace %s.", webhook.GitRepositoryURL, webhook.Pipeline, webhook.Namespace)
RespondError(response, errors.New("Webhook already exists for the specified Git repository, running the same pipeline in the same namespace"), http.StatusBadRequest)
return
}
if hook.PullTask != webhook.PullTask {
msg := fmt.Sprintf("PullTask mismatch. Webhooks on a repository must use the same PullTask existing webhooks use %s not %s.", hook.PullTask, webhook.PullTask)
logging.Log.Errorf("error creating webhook: " + msg)
RespondError(response, errors.New(msg), http.StatusBadRequest)
return
}
}
}
_, templateErr := r.TriggersClient.TektonV1alpha1().TriggerTemplates(installNs).Get(webhook.Pipeline+"-template", metav1.GetOptions{})
_, pushErr := r.TriggersClient.TektonV1alpha1().TriggerBindings(installNs).Get(webhook.Pipeline+"-push-binding", metav1.GetOptions{})
_, pullrequestErr := r.TriggersClient.TektonV1alpha1().TriggerBindings(installNs).Get(webhook.Pipeline+"-pullrequest-binding", metav1.GetOptions{})
if templateErr != nil || pushErr != nil || pullrequestErr != nil {
msg := fmt.Sprintf("Could not find the required trigger template or trigger bindings in namespace: %s. Expected to find: %s, %s and %s", installNs, webhook.Pipeline+"-template", webhook.Pipeline+"-push-binding", webhook.Pipeline+"-pullrequest-binding")
logging.Log.Errorf("%s", msg)
logging.Log.Errorf("template error: `%s`, pushbinding error: `%s`, pullrequest error: `%s`", templateErr, pushErr, pullrequestErr)
RespondError(response, errors.New(msg), http.StatusBadRequest)
return
}
eventListener, err := r.TriggersClient.TektonV1alpha1().EventListeners(installNs).Get(eventListenerName, metav1.GetOptions{})
if err != nil && !k8serrors.IsNotFound(err) {
msg := fmt.Sprintf("unable to create webhook due to error listing Tekton eventlistener: %s", err)
logging.Log.Errorf("%s", msg)
RespondError(response, errors.New(msg), http.StatusInternalServerError)
return
}
gitServer, gitOwner, gitRepo, err := getGitValues(webhook.GitRepositoryURL)
if err != nil {
logging.Log.Errorf("error parsing git repository URL %s in getGitValues(): %s", webhook.GitRepositoryURL, err)
RespondError(response, errors.New("error parsing GitRepositoryURL, check pod logs for more details"), http.StatusInternalServerError)
return
}
sanitisedURL := gitServer + "/" + gitOwner + "/" + gitRepo
// Single monitor trigger for all triggers on a repo - thus name to use for monitor is
monitorTriggerName := strings.TrimPrefix(gitServer+"/"+gitOwner+"/"+gitRepo, "http://")
monitorTriggerName = strings.TrimPrefix(monitorTriggerName, "https://")
if eventListener != nil && eventListener.GetName() != "" {
_, err := r.updateEventListener(eventListener, webhook, monitorTriggerName)
if err != nil {
msg := fmt.Sprintf("error creating webhook due to error updating eventlistener: %s", err)
logging.Log.Errorf("%s", msg)
RespondError(response, errors.New(msg), http.StatusInternalServerError)
return
}
} else {
logging.Log.Info("No existing eventlistener found, creating a new one...")
_, err := r.createEventListener(webhook, installNs, monitorTriggerName)
if err != nil {
msg := fmt.Sprintf("error creating webhook due to error creating eventlistener. Error was: %s", err)
logging.Log.Errorf("%s", msg)
RespondError(response, errors.New(msg), http.StatusInternalServerError)
return
}
_, varexists := os.LookupEnv("PLATFORM")
if !varexists {
err = r.createDeleteIngress("create", installNs)
if err != nil {
msg := fmt.Sprintf("error creating webhook due to error creating ingress. Error was: %s", err)
logging.Log.Errorf("%s", msg)
logging.Log.Debugf("Deleting eventlistener as failed creating Ingress")
err2 := r.TriggersClient.TektonV1alpha1().EventListeners(installNs).Delete(eventListenerName, &metav1.DeleteOptions{})
if err2 != nil {
updatedMsg := fmt.Sprintf("error creating webhook due to error creating ingress. Also failed to cleanup and delete eventlistener. Errors were: %s and %s", err, err2)
RespondError(response, errors.New(updatedMsg), http.StatusInternalServerError)
return
}
RespondError(response, errors.New(msg), http.StatusInternalServerError)
return
} else {
logging.Log.Debug("ingress creation succeeded")
}
} else {
if err := r.createOpenshiftRoute(routeName); err != nil {
logging.Log.Debug("Failed to create Route, deleting EventListener...")
err2 := r.TriggersClient.TektonV1alpha1().EventListeners(installNs).Delete(eventListenerName, &metav1.DeleteOptions{})
if err2 != nil {
updatedMsg := fmt.Sprintf("Error creating webhook due to error creating route. Also failed to cleanup and delete eventlistener. Errors were: %s and %s", err, err2)
RespondError(response, errors.New(updatedMsg), http.StatusInternalServerError)
return
}
RespondError(response, err, http.StatusInternalServerError)
return
}
}
}
if len(hooks) == 0 {
// // Give the eventlistener a chance to be up and running or webhook ping
// // will get a 503 and might confuse people (although resend will work)
for i := 0; i < 30; i = i + 1 {
a, _ := r.K8sClient.Apps().Deployments(installNs).Get(routeName, metav1.GetOptions{})
replicas := a.Status.ReadyReplicas
if replicas > 0 {
break
}
time.Sleep(1 * time.Second)
}
// Create webhook
err = r.AddWebhook(webhook, gitOwner, gitRepo)
if err != nil {
err2 := r.deleteFromEventListener(webhook.Name+"-"+webhook.Namespace, installNs, monitorTriggerName, webhook.GitRepositoryURL)
if err2 != nil {
updatedMsg := fmt.Sprintf("error creating webhook. Also failed to cleanup and delete entry from eventlistener. Errors were: %s and %s", err, err2)
RespondError(response, errors.New(updatedMsg), http.StatusInternalServerError)
return
}
RespondError(response, err, http.StatusInternalServerError)
return
}
logging.Log.Debug("webhook creation succeeded")
} else {
logging.Log.Debugf("webhook already exists for repository %s - not creating new hook in GitHub", sanitisedURL)
}
response.WriteHeader(http.StatusCreated)
}
func (r Resource) createDeleteIngress(mode, installNS string) error {
if mode == "create" {
// Unlike webhook creation, the ingress does not need a protocol specified
callback := strings.TrimPrefix(r.Defaults.CallbackURL, "http://")
callback = strings.TrimPrefix(callback, "https://")
ingress := &v1beta1.Ingress{
ObjectMeta: metav1.ObjectMeta{
Name: "el-" + eventListenerName,
Namespace: installNS,
},
Spec: v1beta1.IngressSpec{
Rules: []v1beta1.IngressRule{
{
Host: callback,
IngressRuleValue: v1beta1.IngressRuleValue{
HTTP: &v1beta1.HTTPIngressRuleValue{
Paths: []v1beta1.HTTPIngressPath{
{
Backend: v1beta1.IngressBackend{
ServiceName: "el-" + eventListenerName,
ServicePort: intstr.IntOrString{
Type: intstr.Int,
IntVal: 8080,
},
},
},
},
},
},
},
},
},
}
ingress, err := r.K8sClient.ExtensionsV1beta1().Ingresses(installNS).Create(ingress)
if err != nil {
return err
}
logging.Log.Debug("Ingress has been created")
return nil
} else if mode == "delete" {
err := r.K8sClient.ExtensionsV1beta1().Ingresses(installNS).Delete("el-"+eventListenerName, &metav1.DeleteOptions{})
if err != nil {
return err
}
logging.Log.Debug("Ingress has been deleted")
return nil
} else {
logging.Log.Debug("Wrong mode")
return errors.New("Wrong mode for createDeleteIngress")
}
}
// Removes from Eventlistener, removes the webhook
func (r Resource) deleteWebhook(request *restful.Request, response *restful.Response) {
modifyingEventListenerLock.Lock()
defer modifyingEventListenerLock.Unlock()
logging.Log.Debug("In deleteWebhook")
name := request.PathParameter("name")
repo := request.QueryParameter("repository")
namespace := request.QueryParameter("namespace")
deletePipelineRuns := request.QueryParameter("deletepipelineruns")
var toDeletePipelineRuns = false
var err error
if deletePipelineRuns != "" {
toDeletePipelineRuns, err = strconv.ParseBool(deletePipelineRuns)
if err != nil {
theError := errors.New("bad request information provided, cannot handle deletepipelineruns query (should be set to true or not provided)")
logging.Log.Error(theError)
RespondError(response, theError, http.StatusInternalServerError)
return
}
}
if namespace == "" || repo == "" {
theError := errors.New("bad request information provided, a namespace and a repository must be specified as query parameters")
logging.Log.Error(theError)
RespondError(response, theError, http.StatusBadRequest)
return
}
logging.Log.Debugf("in deleteWebhook, name: %s, repo: %s, delete pipeline runs: %s", name, repo, deletePipelineRuns)
webhooks, err := r.getHooksForRepo(repo)
if err != nil {
RespondError(response, err, http.StatusNotFound)
return
}
logging.Log.Debugf("Found %d webhooks/pipelines registered against repo %s", len(webhooks), repo)
if len(webhooks) < 1 {
err := fmt.Errorf("no webhook found for repo %s", repo)
logging.Log.Error(err)
RespondError(response, err, http.StatusBadRequest)
return
}
gitServer, gitOwner, gitRepo, err := getGitValues(repo)
// Single monitor trigger for all triggers on a repo - thus name to use for monitor is
monitorTriggerName := strings.TrimPrefix(gitServer+"/"+gitOwner+"/"+gitRepo, "http://")
monitorTriggerName = strings.TrimPrefix(monitorTriggerName, "https://")
found := false
for _, hook := range webhooks {
if hook.Name == name && hook.Namespace == namespace {
found = true
if len(webhooks) == 1 {
logging.Log.Debug("No other pipelines triggered by this GitHub webhook, deleting webhook")
// Delete webhook
err := r.RemoveWebhook(hook, gitOwner, gitRepo)
if err != nil {
RespondError(response, err, http.StatusInternalServerError)
return
}
logging.Log.Debug("Webhook deletion succeeded")
}
if toDeletePipelineRuns {
r.deletePipelineRuns(repo, namespace, hook.Pipeline)
}
eventListenerEntryPrefix := name + "-" + namespace
err = r.deleteFromEventListener(eventListenerEntryPrefix, r.Defaults.Namespace, monitorTriggerName, repo)
if err != nil {
logging.Log.Error(err)
theError := errors.New("error deleting webhook from eventlistener.")
RespondError(response, theError, http.StatusInternalServerError)
return
}
response.WriteHeader(204)
}
}
if !found {
err := fmt.Errorf("no webhook found for repo %s with name %s associated with namespace %s", repo, name, namespace)
logging.Log.Error(err)
RespondError(response, err, http.StatusNotFound)
return
}
}
func (r Resource) deleteFromEventListener(name, installNS, monitorTriggerName, repoOnParams string) error {
logging.Log.Debugf("Deleting triggers for %s from the eventlistener", name)
el, err := r.TriggersClient.TektonV1alpha1().EventListeners(installNS).Get(eventListenerName, metav1.GetOptions{})
if err != nil {
return err
}
toRemove := []string{name + "-push-event", name + "-pullrequest-event"}
newTriggers := []v1alpha1.EventListenerTrigger{}
currentTriggers := el.Spec.Triggers
monitorTrigger := v1alpha1.EventListenerTrigger{}
triggersOnRepo := 0
triggersDeleted := 0
for _, t := range currentTriggers {
if t.Name == monitorTriggerName {
monitorTrigger = t
} else {
interceptorParams := t.Interceptor.Header
for _, p := range interceptorParams {
if p.Name == "Wext-Repository-Url" && p.Value.StringVal == repoOnParams {
triggersOnRepo++
}
}
found := false
for _, triggerName := range toRemove {
if triggerName == t.Name {
triggersDeleted++
found = true
break
}
}
if !found {
newTriggers = append(newTriggers, t)
}
}
}
if triggersOnRepo > triggersDeleted {
newTriggers = append(newTriggers, monitorTrigger)
}
if len(newTriggers) == 0 {
err = r.TriggersClient.TektonV1alpha1().EventListeners(installNS).Delete(el.GetName(), &metav1.DeleteOptions{})
if err != nil {
return err
}
_, varExists := os.LookupEnv("PLATFORM")
if !varExists {
err = r.createDeleteIngress("delete", installNS)
if err != nil {
logging.Log.Errorf("error deleting ingress: %s", err)
return err
} else {
logging.Log.Debug("Ingress deleted")
return nil
}
} else {
if err := r.deleteOpenshiftRoute(routeName); err != nil {
msg := fmt.Sprintf("error deleting webhook due to error deleting route. Error was: %s", err)
logging.Log.Errorf("%s", msg)
return err
}
logging.Log.Debug("route deletion succeeded")
}
} else {
el.Spec.Triggers = newTriggers
_, err = r.TriggersClient.TektonV1alpha1().EventListeners(installNS).Update(el)
if err != nil {
logging.Log.Errorf("error updating eventlistener: %s", err)
return err
}
}
return err
}
func (r Resource) getAllWebhooks(request *restful.Request, response *restful.Response) {
logging.Log.Debugf("Get all webhooks")
webhooks, err := r.getWebhooksFromEventListener()
if err != nil {
logging.Log.Errorf("error trying to get webhooks: %s.", err.Error())
RespondError(response, err, http.StatusInternalServerError)
return
}
response.WriteEntity(webhooks)
}
func (r Resource) getHooksForRepo(gitURL string) ([]webhook, error) {
hooksForRepo := []webhook{}
allHooks, err := r.getWebhooksFromEventListener()
if err != nil {
return nil, err
}
for _, hook := range allHooks {
if hook.GitRepositoryURL == gitURL {
hooksForRepo = append(hooksForRepo, hook)
}
}
return hooksForRepo, nil
}
func (r Resource) getWebhooksFromEventListener() ([]webhook, error) {
logging.Log.Debugf("Getting webhooks from eventlistener")
el, err := r.TriggersClient.TektonV1alpha1().EventListeners(r.Defaults.Namespace).Get(eventListenerName, metav1.GetOptions{})
if err != nil {
if strings.Contains(err.Error(), "not found") {
return []webhook{}, nil
}
return nil, err
}
hooks := []webhook{}
var hook webhook
for _, trigger := range el.Spec.Triggers {
checkHook := false
if strings.HasSuffix(trigger.Name, "-push-event") {
hook = getHookFromTrigger(trigger, "-push-event")
checkHook = true
} else if strings.HasSuffix(trigger.Name, "-pullrequest-event") {
hook = getHookFromTrigger(trigger, "-pullrequest-event")
checkHook = true
}
if checkHook && !containedInArray(hooks, hook) {
hooks = append(hooks, hook)
}
}
return hooks, nil
}
func getHookFromTrigger(t v1alpha1.EventListenerTrigger, suffix string) webhook {
var releaseName, namespace, serviceaccount, pulltask, dockerreg, helmsecret, repo, gitSecret string
for _, param := range t.Params {
switch param.Name {
case "webhooks-tekton-release-name":
releaseName = param.Value.StringVal
case "webhooks-tekton-target-namespace":
namespace = param.Value.StringVal
case "webhooks-tekton-service-account":
serviceaccount = param.Value.StringVal
case "webhooks-tekton-pull-task":
pulltask = param.Value.StringVal
case "webhooks-tekton-docker-registry":
dockerreg = param.Value.StringVal
case "webhooks-tekton-helm-secret":
helmsecret = param.Value.StringVal
}
}
for _, header := range t.Interceptor.Header {
switch header.Name {
case "Wext-Repository-Url":
repo = header.Value.StringVal
case "Wext-Secret-Name":
gitSecret = header.Value.StringVal
}
}
triggerAsHook := webhook{
Name: strings.TrimSuffix(t.Name, "-"+namespace+suffix),
Namespace: namespace,
Pipeline: strings.TrimSuffix(t.Template.Name, "-template"),
GitRepositoryURL: repo,
HelmSecret: helmsecret,
PullTask: pulltask,
DockerRegistry: dockerreg,
ServiceAccount: serviceaccount,
ReleaseName: releaseName,
AccessTokenRef: gitSecret,
}
return triggerAsHook
}
func containedInArray(array []webhook, hook webhook) bool {
for _, item := range array {
if item == hook {
return true
}
}
return false
}
func (r Resource) deletePipelineRuns(gitRepoURL, namespace, pipeline string) error {
logging.Log.Debugf("Looking for PipelineRuns in namespace %s with repository URL %s for pipeline %s", namespace, gitRepoURL, pipeline)
allPipelineRuns, err := r.TektonClient.TektonV1alpha1().PipelineRuns(namespace).List(metav1.ListOptions{})
if err != nil {
logging.Log.Errorf("Unable to retrieve PipelineRuns in the namespace %s! Error: %s", namespace, err.Error())
return err
}
found := false
for _, pipelineRun := range allPipelineRuns.Items {
if pipelineRun.Spec.PipelineRef.Name == pipeline {
labels := pipelineRun.GetLabels()
serverURL := labels["gitServer"]
orgName := labels["gitOrg"]
repoName := labels["gitRepo"]
foundRepoURL := fmt.Sprintf("https://%s/%s/%s", serverURL, orgName, repoName)
gitRepoURL = strings.ToLower(strings.TrimSuffix(gitRepoURL, ".git"))
foundRepoURL = strings.ToLower(strings.TrimSuffix(foundRepoURL, ".git"))
if foundRepoURL == gitRepoURL {
found = true
err := r.TektonClient.TektonV1alpha1().PipelineRuns(namespace).Delete(pipelineRun.Name, &metav1.DeleteOptions{})
if err != nil {
logging.Log.Errorf("failed to delete %s, error: %s", pipelineRun.Name, err.Error())
return err
}
logging.Log.Infof("Deleted PipelineRun %s", pipelineRun.Name)
}
}
}
if !found {
logging.Log.Infof("No matching PipelineRuns found")
}
return nil
}
func (r Resource) getDefaults(request *restful.Request, response *restful.Response) {
logging.Log.Debugf("getDefaults returning: %v", r.Defaults)
response.WriteEntity(r.Defaults)
}
// RespondError ...
func RespondError(response *restful.Response, err error, statusCode int) {
logging.Log.Errorf("Error for RespondError: %s.", err.Error())
logging.Log.Errorf("Response is %v.", *response)
response.AddHeader("Content-Type", "text/plain")
response.WriteError(statusCode, err)
}
// RespondErrorMessage ...
func RespondErrorMessage(response *restful.Response, message string, statusCode int) {
logging.Log.Errorf("Message for RespondErrorMessage: %s.", message)
response.AddHeader("Content-Type", "text/plain")
response.WriteErrorString(statusCode, message)
}
// RespondErrorAndMessage ...
func RespondErrorAndMessage(response *restful.Response, err error, message string, statusCode int) {
logging.Log.Errorf("Error for RespondErrorAndMessage: %s.", err.Error())
logging.Log.Errorf("Message for RespondErrorAndMesage: %s.", message)
response.AddHeader("Content-Type", "text/plain")
response.WriteErrorString(statusCode, message)
}
// RegisterExtensionWebService registers the webhook webservice
func (r Resource) RegisterExtensionWebService(container *restful.Container) {
ws := new(restful.WebService)
ws.
Path("/webhooks").
Consumes(restful.MIME_JSON, restful.MIME_JSON).
Produces(restful.MIME_JSON, restful.MIME_JSON)
ws.Route(ws.POST("/").To(r.createWebhook))
ws.Route(ws.GET("/").To(r.getAllWebhooks))
ws.Route(ws.GET("/defaults").To(r.getDefaults))
ws.Route(ws.DELETE("/{name}").To(r.deleteWebhook))
ws.Route(ws.POST("/credentials").To(r.createCredential))
ws.Route(ws.GET("/credentials").To(r.getAllCredentials))
ws.Route(ws.DELETE("/credentials/{name}").To(r.deleteCredential))
container.Add(ws)
}
// RegisterWeb registers extension web bundle on the container
func (r Resource) RegisterWeb(container *restful.Container) {
var handler http.Handler
webResourcesDir := os.Getenv("WEB_RESOURCES_DIR")
koDataPath := os.Getenv("KO_DATA_PATH")
_, err := os.Stat(webResourcesDir)
if err != nil {
if os.IsNotExist(err) {
if koDataPath != "" {
logging.Log.Warnf("WEB_RESOURCES_DIR %s not found, serving static content from KO_DATA_PATH instead.", webResourcesDir)
handler = http.FileServer(http.Dir(koDataPath))
} else {
logging.Log.Errorf("WEB_RESOURCES_DIR %s not found and KO_DATA_PATH not found, static resource (UI) problems to be expected.", webResourcesDir)
}
} else {
logging.Log.Errorf("error returned while checking for WEB_RESOURCES_DIR %s", webResourcesDir)
}
} else {
logging.Log.Infof("Serving static files from WEB_RESOURCES_DIR: %s", webResourcesDir)
handler = http.FileServer(http.Dir(webResourcesDir))
}
container.Handle("/web/", http.StripPrefix("/web/", handler))
}
// createOpenshiftRoute attempts to create an Openshift Route on the service.
// The Route has the same name as the service
func (r Resource) createOpenshiftRoute(serviceName string) error {
annotations := make(map[string]string)
annotations["haproxy.router.openshift.io/timeout"] = "2m"
route := &routesv1.Route{
ObjectMeta: metav1.ObjectMeta{
Name: serviceName,
Annotations: annotations,
},
Spec: routesv1.RouteSpec{
To: routesv1.RouteTargetReference{
Kind: "Service",
Name: serviceName,
},
},
}
_, err := r.RoutesClient.RouteV1().Routes(r.Defaults.Namespace).Create(route)
return err
}
// deleteOpenshiftRoute attempts to delete an Openshift Route
func (r Resource) deleteOpenshiftRoute(routeName string) error {
return r.RoutesClient.RouteV1().Routes(r.Defaults.Namespace).Delete(routeName, &metav1.DeleteOptions{})
}
| [
"\"PLATFORM\"",
"\"WEB_RESOURCES_DIR\"",
"\"KO_DATA_PATH\""
]
| []
| [
"PLATFORM",
"WEB_RESOURCES_DIR",
"KO_DATA_PATH"
]
| [] | ["PLATFORM", "WEB_RESOURCES_DIR", "KO_DATA_PATH"] | go | 3 | 0 | |
yolov5/utils/wandb_logging/wandb_utils.py | """Utilities and tools for tracking runs with Weights & Biases."""
import logging
import os
import sys
from contextlib import contextmanager
from pathlib import Path
import yaml
from tqdm import tqdm
sys.path.append(str(Path(__file__).parent.parent.parent)) # add utils/ to path
from utils.datasets import LoadImagesAndLabels
from utils.datasets import img2label_paths
from utils.general import colorstr, check_dataset, check_file
try:
import wandb
from wandb import init, finish
except ImportError:
wandb = None
RANK = int(os.getenv('RANK', -1))
WANDB_ARTIFACT_PREFIX = 'wandb-artifact://'
def remove_prefix(from_string, prefix=WANDB_ARTIFACT_PREFIX):
return from_string[len(prefix):]
def check_wandb_config_file(data_config_file):
wandb_config = '_wandb.'.join(data_config_file.rsplit('.', 1)) # updated data.yaml path
if Path(wandb_config).is_file():
return wandb_config
return data_config_file
def get_run_info(run_path):
run_path = Path(remove_prefix(run_path, WANDB_ARTIFACT_PREFIX))
run_id = run_path.stem
project = run_path.parent.stem
entity = run_path.parent.parent.stem
model_artifact_name = 'run_' + run_id + '_model'
return entity, project, run_id, model_artifact_name
def check_wandb_resume(opt):
process_wandb_config_ddp_mode(opt) if RANK not in [-1, 0] else None
if isinstance(opt.resume, str):
if opt.resume.startswith(WANDB_ARTIFACT_PREFIX):
if RANK not in [-1, 0]: # For resuming DDP runs
entity, project, run_id, model_artifact_name = get_run_info(opt.resume)
api = wandb.Api()
artifact = api.artifact(entity + '/' + project + '/' + model_artifact_name + ':latest')
modeldir = artifact.download()
opt.weights = str(Path(modeldir) / "last.pt")
return True
return None
def process_wandb_config_ddp_mode(opt):
with open(check_file(opt.data)) as f:
data_dict = yaml.safe_load(f) # data dict
train_dir, val_dir = None, None
if isinstance(data_dict['train'], str) and data_dict['train'].startswith(WANDB_ARTIFACT_PREFIX):
api = wandb.Api()
train_artifact = api.artifact(remove_prefix(data_dict['train']) + ':' + opt.artifact_alias)
train_dir = train_artifact.download()
train_path = Path(train_dir) / 'data/images/'
data_dict['train'] = str(train_path)
if isinstance(data_dict['val'], str) and data_dict['val'].startswith(WANDB_ARTIFACT_PREFIX):
api = wandb.Api()
val_artifact = api.artifact(remove_prefix(data_dict['val']) + ':' + opt.artifact_alias)
val_dir = val_artifact.download()
val_path = Path(val_dir) / 'data/images/'
data_dict['val'] = str(val_path)
if train_dir or val_dir:
ddp_data_path = str(Path(val_dir) / 'wandb_local_data.yaml')
with open(ddp_data_path, 'w') as f:
yaml.safe_dump(data_dict, f)
opt.data = ddp_data_path
class WandbLogger():
"""Log training runs, datasets, models, and predictions to Weights & Biases.
This logger sends information to W&B at wandb.ai. By default, this information
includes hyperparameters, system configuration and metrics, model metrics,
and basic data metrics and analyses.
By providing additional command line arguments to train.py, datasets,
models and predictions can also be logged.
For more on how this logger is used, see the Weights & Biases documentation:
https://docs.wandb.com/guides/integrations/yolov5
"""
def __init__(self, opt, name, run_id, data_dict, job_type='Training'):
# Pre-training routine --
self.job_type = job_type
self.wandb, self.wandb_run = wandb, None if not wandb else wandb.run
self.val_artifact, self.train_artifact = None, None
self.train_artifact_path, self.val_artifact_path = None, None
self.result_artifact = None
self.val_table, self.result_table = None, None
self.data_dict = data_dict
self.bbox_media_panel_images = []
self.val_table_path_map = None
self.max_imgs_to_log = 16
# It's more elegant to stick to 1 wandb.init call, but useful config data is overwritten in the WandbLogger's wandb.init call
if isinstance(opt.resume, str): # checks resume from artifact
if opt.resume.startswith(WANDB_ARTIFACT_PREFIX):
entity, project, run_id, model_artifact_name = get_run_info(opt.resume)
model_artifact_name = WANDB_ARTIFACT_PREFIX + model_artifact_name
assert wandb, 'install wandb to resume wandb runs'
# Resume wandb-artifact:// runs here| workaround for not overwriting wandb.config
self.wandb_run = wandb.init(id=run_id,
project=project,
entity=entity,
resume='allow',
allow_val_change=True)
opt.resume = model_artifact_name
elif self.wandb:
self.wandb_run = wandb.init(config=opt,
resume="allow",
project='YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem,
entity=opt.entity,
name=name,
job_type=job_type,
id=run_id,
allow_val_change=True) if not wandb.run else wandb.run
if self.wandb_run:
if self.job_type == 'Training':
if not opt.resume:
wandb_data_dict = self.check_and_upload_dataset(opt) if opt.upload_dataset else data_dict
# Info useful for resuming from artifacts
self.wandb_run.config.update({'opt': vars(opt), 'data_dict': wandb_data_dict}, allow_val_change=True)
self.data_dict = self.setup_training(opt, data_dict)
if self.job_type == 'Dataset Creation':
self.data_dict = self.check_and_upload_dataset(opt)
else:
prefix = colorstr('wandb: ')
print(f"{prefix}Install Weights & Biases for YOLOv5 logging with 'pip install wandb' (recommended)")
def check_and_upload_dataset(self, opt):
assert wandb, 'Install wandb to upload dataset'
config_path = self.log_dataset_artifact(check_file(opt.data),
opt.single_cls,
'YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem)
print("Created dataset config file ", config_path)
with open(config_path) as f:
wandb_data_dict = yaml.safe_load(f)
return wandb_data_dict
def setup_training(self, opt, data_dict):
self.log_dict, self.current_epoch = {}, 0
self.bbox_interval = opt.bbox_interval
if isinstance(opt.resume, str):
modeldir, _ = self.download_model_artifact(opt)
if modeldir:
self.weights = Path(modeldir) / "last.pt"
config = self.wandb_run.config
opt.weights, opt.save_period, opt.batch_size, opt.bbox_interval, opt.epochs, opt.hyp = str(
self.weights), config.save_period, config.batch_size, config.bbox_interval, config.epochs, \
config.opt['hyp']
data_dict = dict(self.wandb_run.config.data_dict) # eliminates the need for config file to resume
if self.val_artifact is None: # If --upload_dataset is set, use the existing artifact, don't download
self.train_artifact_path, self.train_artifact = self.download_dataset_artifact(data_dict.get('train'),
opt.artifact_alias)
self.val_artifact_path, self.val_artifact = self.download_dataset_artifact(data_dict.get('val'),
opt.artifact_alias)
if self.train_artifact_path is not None:
train_path = Path(self.train_artifact_path) / 'data/images/'
data_dict['train'] = str(train_path)
if self.val_artifact_path is not None:
val_path = Path(self.val_artifact_path) / 'data/images/'
data_dict['val'] = str(val_path)
if self.val_artifact is not None:
self.result_artifact = wandb.Artifact("run_" + wandb.run.id + "_progress", "evaluation")
self.result_table = wandb.Table(["epoch", "id", "ground truth", "prediction", "avg_confidence"])
self.val_table = self.val_artifact.get("val")
if self.val_table_path_map is None:
self.map_val_table_path()
wandb.log({"validation dataset": self.val_table})
if opt.bbox_interval == -1:
self.bbox_interval = opt.bbox_interval = (opt.epochs // 10) if opt.epochs > 10 else 1
return data_dict
def download_dataset_artifact(self, path, alias):
if isinstance(path, str) and path.startswith(WANDB_ARTIFACT_PREFIX):
artifact_path = Path(remove_prefix(path, WANDB_ARTIFACT_PREFIX) + ":" + alias)
dataset_artifact = wandb.use_artifact(artifact_path.as_posix().replace("\\", "/"))
assert dataset_artifact is not None, "'Error: W&B dataset artifact doesn\'t exist'"
datadir = dataset_artifact.download()
return datadir, dataset_artifact
return None, None
def download_model_artifact(self, opt):
if opt.resume.startswith(WANDB_ARTIFACT_PREFIX):
model_artifact = wandb.use_artifact(remove_prefix(opt.resume, WANDB_ARTIFACT_PREFIX) + ":latest")
assert model_artifact is not None, 'Error: W&B model artifact doesn\'t exist'
modeldir = model_artifact.download()
epochs_trained = model_artifact.metadata.get('epochs_trained')
total_epochs = model_artifact.metadata.get('total_epochs')
is_finished = total_epochs is None
assert not is_finished, 'training is finished, can only resume incomplete runs.'
return modeldir, model_artifact
return None, None
def log_model(self, path, opt, epoch, fitness_score, best_model=False):
model_artifact = wandb.Artifact('run_' + wandb.run.id + '_model', type='model', metadata={
'original_url': str(path),
'epochs_trained': epoch + 1,
'save period': opt.save_period,
'project': opt.project,
'total_epochs': opt.epochs,
'fitness_score': fitness_score
})
model_artifact.add_file(str(path / 'last.pt'), name='last.pt')
wandb.log_artifact(model_artifact,
aliases=['latest', 'last', 'epoch ' + str(self.current_epoch), 'best' if best_model else ''])
print("Saving model artifact on epoch ", epoch + 1)
def log_dataset_artifact(self, data_file, single_cls, project, overwrite_config=False):
with open(data_file) as f:
data = yaml.safe_load(f) # data dict
check_dataset(data)
nc, names = (1, ['item']) if single_cls else (int(data['nc']), data['names'])
names = {k: v for k, v in enumerate(names)} # to index dictionary
self.train_artifact = self.create_dataset_table(LoadImagesAndLabels(
data['train'], rect=True, batch_size=1), names, name='train') if data.get('train') else None
self.val_artifact = self.create_dataset_table(LoadImagesAndLabels(
data['val'], rect=True, batch_size=1), names, name='val') if data.get('val') else None
if data.get('train'):
data['train'] = WANDB_ARTIFACT_PREFIX + str(Path(project) / 'train')
if data.get('val'):
data['val'] = WANDB_ARTIFACT_PREFIX + str(Path(project) / 'val')
path = data_file if overwrite_config else '_wandb.'.join(data_file.rsplit('.', 1)) # updated data.yaml path
data.pop('download', None)
data.pop('path', None)
with open(path, 'w') as f:
yaml.safe_dump(data, f)
if self.job_type == 'Training': # builds correct artifact pipeline graph
self.wandb_run.use_artifact(self.val_artifact)
self.wandb_run.use_artifact(self.train_artifact)
self.val_artifact.wait()
self.val_table = self.val_artifact.get('val')
self.map_val_table_path()
else:
self.wandb_run.log_artifact(self.train_artifact)
self.wandb_run.log_artifact(self.val_artifact)
return path
def map_val_table_path(self):
self.val_table_path_map = {}
print("Mapping dataset")
for i, data in enumerate(tqdm(self.val_table.data)):
self.val_table_path_map[data[3]] = data[0]
def create_dataset_table(self, dataset, class_to_id, name='dataset'):
# TODO: Explore multiprocessing to slpit this loop parallely| This is essential for speeding up the the logging
artifact = wandb.Artifact(name=name, type="dataset")
img_files = tqdm([dataset.path]) if isinstance(dataset.path, str) and Path(dataset.path).is_dir() else None
img_files = tqdm(dataset.img_files) if not img_files else img_files
for img_file in img_files:
if Path(img_file).is_dir():
artifact.add_dir(img_file, name='data/images')
labels_path = 'labels'.join(dataset.path.rsplit('images', 1))
artifact.add_dir(labels_path, name='data/labels')
else:
artifact.add_file(img_file, name='data/images/' + Path(img_file).name)
label_file = Path(img2label_paths([img_file])[0])
artifact.add_file(str(label_file),
name='data/labels/' + label_file.name) if label_file.exists() else None
table = wandb.Table(columns=["id", "train_image", "Classes", "name"])
class_set = wandb.Classes([{'id': id, 'name': name} for id, name in class_to_id.items()])
for si, (img, labels, paths, shapes) in enumerate(tqdm(dataset)):
box_data, img_classes = [], {}
for cls, *xywh in labels[:, 1:].tolist():
cls = int(cls)
box_data.append({"position": {"middle": [xywh[0], xywh[1]], "width": xywh[2], "height": xywh[3]},
"class_id": cls,
"box_caption": "%s" % (class_to_id[cls])})
img_classes[cls] = class_to_id[cls]
boxes = {"ground_truth": {"box_data": box_data, "class_labels": class_to_id}} # inference-space
table.add_data(si, wandb.Image(paths, classes=class_set, boxes=boxes), list(img_classes.values()),
Path(paths).name)
artifact.add(table, name)
return artifact
def log_training_progress(self, predn, path, names):
class_set = wandb.Classes([{'id': id, 'name': name} for id, name in names.items()])
box_data = []
total_conf = 0
for *xyxy, conf, cls in predn.tolist():
if conf >= 0.25:
box_data.append(
{"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]},
"class_id": int(cls),
"box_caption": "%s %.3f" % (names[cls], conf),
"scores": {"class_score": conf},
"domain": "pixel"})
total_conf = total_conf + conf
boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space
id = self.val_table_path_map[Path(path).name]
self.result_table.add_data(self.current_epoch,
id,
self.val_table.data[id][1],
wandb.Image(self.val_table.data[id][1], boxes=boxes, classes=class_set),
total_conf / max(1, len(box_data))
)
def val_one_image(self, pred, predn, path, names, im):
if self.val_table and self.result_table: # Log Table if Val dataset is uploaded as artifact
self.log_training_progress(predn, path, names)
else: # Default to bbox media panelif Val artifact not found
if len(self.bbox_media_panel_images) < self.max_imgs_to_log and self.current_epoch > 0:
if self.current_epoch % self.bbox_interval == 0:
box_data = [{"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]},
"class_id": int(cls),
"box_caption": "%s %.3f" % (names[cls], conf),
"scores": {"class_score": conf},
"domain": "pixel"} for *xyxy, conf, cls in pred.tolist()]
boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space
self.bbox_media_panel_images.append(wandb.Image(im, boxes=boxes, caption=path.name))
def log(self, log_dict):
if self.wandb_run:
for key, value in log_dict.items():
self.log_dict[key] = value
def end_epoch(self, best_result=False):
if self.wandb_run:
with all_logging_disabled():
if self.bbox_media_panel_images:
self.log_dict["Bounding Box Debugger/Images"] = self.bbox_media_panel_images
wandb.log(self.log_dict)
self.log_dict = {}
self.bbox_media_panel_images = []
if self.result_artifact:
self.result_artifact.add(self.result_table, 'result')
wandb.log_artifact(self.result_artifact, aliases=['latest', 'last', 'epoch ' + str(self.current_epoch),
('best' if best_result else '')])
wandb.log({"evaluation": self.result_table})
self.result_table = wandb.Table(["epoch", "id", "ground truth", "prediction", "avg_confidence"])
self.result_artifact = wandb.Artifact("run_" + wandb.run.id + "_progress", "evaluation")
def finish_run(self):
if self.wandb_run:
if self.log_dict:
with all_logging_disabled():
wandb.log(self.log_dict)
wandb.run.finish()
@contextmanager
def all_logging_disabled(highest_level=logging.CRITICAL):
""" source - https://gist.github.com/simon-weber/7853144
A context manager that will prevent any logging messages triggered during the body from being processed.
:param highest_level: the maximum logging level in use.
This would only need to be changed if a custom level greater than CRITICAL is defined.
"""
previous_level = logging.root.manager.disable
logging.disable(highest_level)
try:
yield
finally:
logging.disable(previous_level)
| []
| []
| [
"RANK"
]
| [] | ["RANK"] | python | 1 | 0 | |
app/app/settings.py | """
Django settings for app project.
Generated by 'django-admin startproject' using Django 2.1.15.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '&0i@-7np=^#vxs!elg4_1axf(98vv4x0tsypca^_o!%v1g5ct3'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'core',
'user',
'recipe',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'HOST': os.environ.get('DB_HOST'),
'NAME': os.environ.get('DB_NAME'),
'USER': os.environ.get('DB_USER'),
'PASSWORD': os.environ.get('DB_PASS'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
MEDIA_ROOT = '/vol/web/media'
STATIC_ROOT = '/vol/web/static'
AUTH_USER_MODEL = 'core.User'
| []
| []
| [
"DB_PASS",
"DB_USER",
"DB_NAME",
"DB_HOST"
]
| [] | ["DB_PASS", "DB_USER", "DB_NAME", "DB_HOST"] | python | 4 | 0 | |
wire/msgblock.go | // Copyright (c) 2013-2016 The essentiaone developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package wire
import (
"bytes"
"fmt"
"io"
"github.com/essentiaone/divid/chaincfg/chainhash"
)
// defaultTransactionAlloc is the default size used for the backing array
// for transactions. The transaction array will dynamically grow as needed, but
// this figure is intended to provide enough space for the number of
// transactions in the vast majority of blocks without needing to grow the
// backing array multiple times.
const defaultTransactionAlloc = 2048
// MaxBlocksPerMsg is the maximum number of blocks allowed per message.
const MaxBlocksPerMsg = 500
// MaxBlockPayload is the maximum bytes a block message can be in bytes.
// After Segregated Witness, the max block payload has been raised to 4MB.
const MaxBlockPayload = 4000000
// maxTxPerBlock is the maximum number of transactions that could
// possibly fit into a block.
const maxTxPerBlock = (MaxBlockPayload / minTxPayload) + 1
// TxLoc holds locator data for the offset and length of where a transaction is
// located within a MsgBlock data buffer.
type TxLoc struct {
TxStart int
TxLen int
}
// MsgBlock implements the Message interface and represents a bitcoin
// block message. It is used to deliver block and transaction information in
// response to a getdata message (MsgGetData) for a given block hash.
type MsgBlock struct {
Header BlockHeader
Transactions []*MsgTx
}
// AddTransaction adds a transaction to the message.
func (msg *MsgBlock) AddTransaction(tx *MsgTx) error {
msg.Transactions = append(msg.Transactions, tx)
return nil
}
// ClearTransactions removes all transactions from the message.
func (msg *MsgBlock) ClearTransactions() {
msg.Transactions = make([]*MsgTx, 0, defaultTransactionAlloc)
}
// BtcDecode decodes r using the bitcoin protocol encoding into the receiver.
// This is part of the Message interface implementation.
// See Deserialize for decoding blocks stored to disk, such as in a database, as
// opposed to decoding blocks from the wire.
func (msg *MsgBlock) BtcDecode(r io.Reader, pver uint32, enc MessageEncoding) error {
err := readBlockHeader(r, pver, &msg.Header)
if err != nil {
return err
}
txCount, err := ReadVarInt(r, pver)
if err != nil {
return err
}
// Prevent more transactions than could possibly fit into a block.
// It would be possible to cause memory exhaustion and panics without
// a sane upper bound on this count.
if txCount > maxTxPerBlock {
str := fmt.Sprintf("too many transactions to fit into a block "+
"[count %d, max %d]", txCount, maxTxPerBlock)
return messageError("MsgBlock.BtcDecode", str)
}
msg.Transactions = make([]*MsgTx, 0, txCount)
for i := uint64(0); i < txCount; i++ {
tx := MsgTx{}
err := tx.BtcDecode(r, pver, enc)
if err != nil {
return err
}
msg.Transactions = append(msg.Transactions, &tx)
}
return nil
}
// Deserialize decodes a block from r into the receiver using a format that is
// suitable for long-term storage such as a database while respecting the
// Version field in the block. This function differs from BtcDecode in that
// BtcDecode decodes from the bitcoin wire protocol as it was sent across the
// network. The wire encoding can technically differ depending on the protocol
// version and doesn't even really need to match the format of a stored block at
// all. As of the time this comment was written, the encoded block is the same
// in both instances, but there is a distinct difference and separating the two
// allows the API to be flexible enough to deal with changes.
func (msg *MsgBlock) Deserialize(r io.Reader) error {
// At the current time, there is no difference between the wire encoding
// at protocol version 0 and the stable long-term storage format. As
// a result, make use of BtcDecode.
//
// Passing an encoding type of WitnessEncoding to BtcEncode for the
// MessageEncoding parameter indicates that the transactions within the
// block are expected to be serialized according to the new
// serialization structure defined in BIP0141.
return msg.BtcDecode(r, 0, WitnessEncoding)
}
// DeserializeNoWitness decodes a block from r into the receiver similar to
// Deserialize, however DeserializeWitness strips all (if any) witness data
// from the transactions within the block before encoding them.
func (msg *MsgBlock) DeserializeNoWitness(r io.Reader) error {
return msg.BtcDecode(r, 0, BaseEncoding)
}
// DeserializeTxLoc decodes r in the same manner Deserialize does, but it takes
// a byte buffer instead of a generic reader and returns a slice containing the
// start and length of each transaction within the raw data that is being
// deserialized.
func (msg *MsgBlock) DeserializeTxLoc(r *bytes.Buffer) ([]TxLoc, error) {
fullLen := r.Len()
// At the current time, there is no difference between the wire encoding
// at protocol version 0 and the stable long-term storage format. As
// a result, make use of existing wire protocol functions.
err := readBlockHeader(r, 0, &msg.Header)
if err != nil {
return nil, err
}
txCount, err := ReadVarInt(r, 0)
if err != nil {
return nil, err
}
// Prevent more transactions than could possibly fit into a block.
// It would be possible to cause memory exhaustion and panics without
// a sane upper bound on this count.
if txCount > maxTxPerBlock {
str := fmt.Sprintf("too many transactions to fit into a block "+
"[count %d, max %d]", txCount, maxTxPerBlock)
return nil, messageError("MsgBlock.DeserializeTxLoc", str)
}
// Deserialize each transaction while keeping track of its location
// within the byte stream.
msg.Transactions = make([]*MsgTx, 0, txCount)
txLocs := make([]TxLoc, txCount)
for i := uint64(0); i < txCount; i++ {
txLocs[i].TxStart = fullLen - r.Len()
tx := MsgTx{}
err := tx.Deserialize(r)
if err != nil {
return nil, err
}
msg.Transactions = append(msg.Transactions, &tx)
txLocs[i].TxLen = (fullLen - r.Len()) - txLocs[i].TxStart
}
return txLocs, nil
}
// BtcEncode encodes the receiver to w using the bitcoin protocol encoding.
// This is part of the Message interface implementation.
// See Serialize for encoding blocks to be stored to disk, such as in a
// database, as opposed to encoding blocks for the wire.
func (msg *MsgBlock) BtcEncode(w io.Writer, pver uint32, enc MessageEncoding) error {
err := writeBlockHeader(w, pver, &msg.Header)
if err != nil {
return err
}
err = WriteVarInt(w, pver, uint64(len(msg.Transactions)))
if err != nil {
return err
}
for _, tx := range msg.Transactions {
err = tx.BtcEncode(w, pver, enc)
if err != nil {
return err
}
}
return nil
}
// Serialize encodes the block to w using a format that suitable for long-term
// storage such as a database while respecting the Version field in the block.
// This function differs from BtcEncode in that BtcEncode encodes the block to
// the bitcoin wire protocol in order to be sent across the network. The wire
// encoding can technically differ depending on the protocol version and doesn't
// even really need to match the format of a stored block at all. As of the
// time this comment was written, the encoded block is the same in both
// instances, but there is a distinct difference and separating the two allows
// the API to be flexible enough to deal with changes.
func (msg *MsgBlock) Serialize(w io.Writer) error {
// At the current time, there is no difference between the wire encoding
// at protocol version 0 and the stable long-term storage format. As
// a result, make use of BtcEncode.
//
// Passing WitnessEncoding as the encoding type here indicates that
// each of the transactions should be serialized using the witness
// serialization structure defined in BIP0141.
return msg.BtcEncode(w, 0, WitnessEncoding)
}
// SerializeNoWitness encodes a block to w using an identical format to
// Serialize, with all (if any) witness data stripped from all transactions.
// This method is provided in additon to the regular Serialize, in order to
// allow one to selectively encode transaction witness data to non-upgraded
// peers which are unaware of the new encoding.
func (msg *MsgBlock) SerializeNoWitness(w io.Writer) error {
return msg.BtcEncode(w, 0, BaseEncoding)
}
// SerializeSize returns the number of bytes it would take to serialize the
// block, factoring in any witness data within transaction.
func (msg *MsgBlock) SerializeSize() int {
// Block header bytes + Serialized varint size for the number of
// transactions.
n := blockHeaderLen + VarIntSerializeSize(uint64(len(msg.Transactions)))
for _, tx := range msg.Transactions {
n += tx.SerializeSize()
}
return n
}
// SerializeSizeStripped returns the number of bytes it would take to serialize
// the block, excluding any witness data (if any).
func (msg *MsgBlock) SerializeSizeStripped() int {
// Block header bytes + Serialized varint size for the number of
// transactions.
n := blockHeaderLen + VarIntSerializeSize(uint64(len(msg.Transactions)))
for _, tx := range msg.Transactions {
n += tx.SerializeSizeStripped()
}
return n
}
// Command returns the protocol command string for the message. This is part
// of the Message interface implementation.
func (msg *MsgBlock) Command() string {
return CmdBlock
}
// MaxPayloadLength returns the maximum length the payload can be for the
// receiver. This is part of the Message interface implementation.
func (msg *MsgBlock) MaxPayloadLength(pver uint32) uint32 {
// Block header at 80 bytes + transaction count + max transactions
// which can vary up to the MaxBlockPayload (including the block header
// and transaction count).
return MaxBlockPayload
}
// BlockHash computes the block identifier hash for this block.
func (msg *MsgBlock) BlockHash() chainhash.Hash {
return msg.Header.BlockHash()
}
// TxHashes returns a slice of hashes of all of transactions in this block.
func (msg *MsgBlock) TxHashes() ([]chainhash.Hash, error) {
hashList := make([]chainhash.Hash, 0, len(msg.Transactions))
for _, tx := range msg.Transactions {
hashList = append(hashList, tx.TxHash())
}
return hashList, nil
}
// NewMsgBlock returns a new bitcoin block message that conforms to the
// Message interface. See MsgBlock for details.
func NewMsgBlock(blockHeader *BlockHeader) *MsgBlock {
return &MsgBlock{
Header: *blockHeader,
Transactions: make([]*MsgTx, 0, defaultTransactionAlloc),
}
}
| []
| []
| []
| [] | [] | go | null | null | null |
vendor/github.com/containers/storage/store.go | package storage
import (
"encoding/base64"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"reflect"
"strings"
"sync"
"time"
// register all of the built-in drivers
_ "github.com/containers/storage/drivers/register"
"github.com/BurntSushi/toml"
drivers "github.com/containers/storage/drivers"
"github.com/containers/storage/pkg/archive"
cfg "github.com/containers/storage/pkg/config"
"github.com/containers/storage/pkg/directory"
"github.com/containers/storage/pkg/homedir"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/ioutils"
"github.com/containers/storage/pkg/parsers"
"github.com/containers/storage/pkg/stringid"
"github.com/containers/storage/pkg/stringutils"
digest "github.com/opencontainers/go-digest"
"github.com/opencontainers/selinux/go-selinux/label"
"github.com/pkg/errors"
)
var (
// DefaultStoreOptions is a reasonable default set of options.
defaultStoreOptions StoreOptions
stores []*store
storesLock sync.Mutex
)
// ROFileBasedStore wraps up the methods of the various types of file-based
// data stores that we implement which are needed for both read-only and
// read-write files.
type ROFileBasedStore interface {
Locker
// Load reloads the contents of the store from disk. It should be called
// with the lock held.
Load() error
}
// RWFileBasedStore wraps up the methods of various types of file-based data
// stores that we implement using read-write files.
type RWFileBasedStore interface {
// Save saves the contents of the store to disk. It should be called with
// the lock held, and Touch() should be called afterward before releasing the
// lock.
Save() error
}
// FileBasedStore wraps up the common methods of various types of file-based
// data stores that we implement.
type FileBasedStore interface {
ROFileBasedStore
RWFileBasedStore
}
// ROMetadataStore wraps a method for reading metadata associated with an ID.
type ROMetadataStore interface {
// Metadata reads metadata associated with an item with the specified ID.
Metadata(id string) (string, error)
}
// RWMetadataStore wraps a method for setting metadata associated with an ID.
type RWMetadataStore interface {
// SetMetadata updates the metadata associated with the item with the specified ID.
SetMetadata(id, metadata string) error
}
// MetadataStore wraps up methods for getting and setting metadata associated with IDs.
type MetadataStore interface {
ROMetadataStore
RWMetadataStore
}
// An ROBigDataStore wraps up the read-only big-data related methods of the
// various types of file-based lookaside stores that we implement.
type ROBigDataStore interface {
// BigData retrieves a (potentially large) piece of data associated with
// this ID, if it has previously been set.
BigData(id, key string) ([]byte, error)
// BigDataSize retrieves the size of a (potentially large) piece of
// data associated with this ID, if it has previously been set.
BigDataSize(id, key string) (int64, error)
// BigDataDigest retrieves the digest of a (potentially large) piece of
// data associated with this ID, if it has previously been set.
BigDataDigest(id, key string) (digest.Digest, error)
// BigDataNames() returns a list of the names of previously-stored pieces of
// data.
BigDataNames(id string) ([]string, error)
}
// A RWImageBigDataStore wraps up how we store big-data associated with images.
type RWImageBigDataStore interface {
// SetBigData stores a (potentially large) piece of data associated
// with this ID.
// Pass github.com/containers/image/manifest.Digest as digestManifest
// to allow ByDigest to find images by their correct digests.
SetBigData(id, key string, data []byte, digestManifest func([]byte) (digest.Digest, error)) error
}
// A ContainerBigDataStore wraps up how we store big-data associated with containers.
type ContainerBigDataStore interface {
ROBigDataStore
// SetBigData stores a (potentially large) piece of data associated
// with this ID.
SetBigData(id, key string, data []byte) error
}
// A FlaggableStore can have flags set and cleared on items which it manages.
type FlaggableStore interface {
// ClearFlag removes a named flag from an item in the store.
ClearFlag(id string, flag string) error
// SetFlag sets a named flag and its value on an item in the store.
SetFlag(id string, flag string, value interface{}) error
}
// StoreOptions is used for passing initialization options to GetStore(), for
// initializing a Store object and the underlying storage that it controls.
type StoreOptions struct {
// RunRoot is the filesystem path under which we can store run-time
// information, such as the locations of active mount points, that we
// want to lose if the host is rebooted.
RunRoot string `json:"runroot,omitempty"`
// GraphRoot is the filesystem path under which we will store the
// contents of layers, images, and containers.
GraphRoot string `json:"root,omitempty"`
// RootlessStoragePath is the storage path for rootless users
// default $HOME/.local/share/containers/storage
RootlessStoragePath string `toml:"rootless_storage_path"`
// GraphDriverName is the underlying storage driver that we'll be
// using. It only needs to be specified the first time a Store is
// initialized for a given RunRoot and GraphRoot.
GraphDriverName string `json:"driver,omitempty"`
// GraphDriverOptions are driver-specific options.
GraphDriverOptions []string `json:"driver-options,omitempty"`
// UIDMap and GIDMap are used for setting up a container's root filesystem
// for use inside of a user namespace where UID mapping is being used.
UIDMap []idtools.IDMap `json:"uidmap,omitempty"`
GIDMap []idtools.IDMap `json:"gidmap,omitempty"`
// RootAutoNsUser is the user used to pick a subrange when automatically setting
// a user namespace for the root user.
RootAutoNsUser string `json:"root_auto_ns_user,omitempty"`
// AutoNsMinSize is the minimum size for an automatic user namespace.
AutoNsMinSize uint32 `json:"auto_userns_min_size,omitempty"`
// AutoNsMaxSize is the maximum size for an automatic user namespace.
AutoNsMaxSize uint32 `json:"auto_userns_max_size,omitempty"`
}
// Store wraps up the various types of file-based stores that we use into a
// singleton object that initializes and manages them all together.
type Store interface {
// RunRoot, GraphRoot, GraphDriverName, and GraphOptions retrieve
// settings that were passed to GetStore() when the object was created.
RunRoot() string
GraphRoot() string
GraphDriverName() string
GraphOptions() []string
UIDMap() []idtools.IDMap
GIDMap() []idtools.IDMap
// GraphDriver obtains and returns a handle to the graph Driver object used
// by the Store.
GraphDriver() (drivers.Driver, error)
// CreateLayer creates a new layer in the underlying storage driver,
// optionally having the specified ID (one will be assigned if none is
// specified), with the specified layer (or no layer) as its parent,
// and with optional names. (The writeable flag is ignored.)
CreateLayer(id, parent string, names []string, mountLabel string, writeable bool, options *LayerOptions) (*Layer, error)
// PutLayer combines the functions of CreateLayer and ApplyDiff,
// marking the layer for automatic removal if applying the diff fails
// for any reason.
//
// Note that we do some of this work in a child process. The calling
// process's main() function needs to import our pkg/reexec package and
// should begin with something like this in order to allow us to
// properly start that child process:
// if reexec.Init() {
// return
// }
PutLayer(id, parent string, names []string, mountLabel string, writeable bool, options *LayerOptions, diff io.Reader) (*Layer, int64, error)
// CreateImage creates a new image, optionally with the specified ID
// (one will be assigned if none is specified), with optional names,
// referring to a specified image, and with optional metadata. An
// image is a record which associates the ID of a layer with a
// additional bookkeeping information which the library stores for the
// convenience of its caller.
CreateImage(id string, names []string, layer, metadata string, options *ImageOptions) (*Image, error)
// CreateContainer creates a new container, optionally with the
// specified ID (one will be assigned if none is specified), with
// optional names, using the specified image's top layer as the basis
// for the container's layer, and assigning the specified ID to that
// layer (one will be created if none is specified). A container is a
// layer which is associated with additional bookkeeping information
// which the library stores for the convenience of its caller.
CreateContainer(id string, names []string, image, layer, metadata string, options *ContainerOptions) (*Container, error)
// Metadata retrieves the metadata which is associated with a layer,
// image, or container (whichever the passed-in ID refers to).
Metadata(id string) (string, error)
// SetMetadata updates the metadata which is associated with a layer,
// image, or container (whichever the passed-in ID refers to) to match
// the specified value. The metadata value can be retrieved at any
// time using Metadata, or using Layer, Image, or Container and reading
// the object directly.
SetMetadata(id, metadata string) error
// Exists checks if there is a layer, image, or container which has the
// passed-in ID or name.
Exists(id string) bool
// Status asks for a status report, in the form of key-value pairs,
// from the underlying storage driver. The contents vary from driver
// to driver.
Status() ([][2]string, error)
// Delete removes the layer, image, or container which has the
// passed-in ID or name. Note that no safety checks are performed, so
// this can leave images with references to layers which do not exist,
// and layers with references to parents which no longer exist.
Delete(id string) error
// DeleteLayer attempts to remove the specified layer. If the layer is the
// parent of any other layer, or is referred to by any images, it will return
// an error.
DeleteLayer(id string) error
// DeleteImage removes the specified image if it is not referred to by
// any containers. If its top layer is then no longer referred to by
// any other images and is not the parent of any other layers, its top
// layer will be removed. If that layer's parent is no longer referred
// to by any other images and is not the parent of any other layers,
// then it, too, will be removed. This procedure will be repeated
// until a layer which should not be removed, or the base layer, is
// reached, at which point the list of removed layers is returned. If
// the commit argument is false, the image and layers are not removed,
// but the list of layers which would be removed is still returned.
DeleteImage(id string, commit bool) (layers []string, err error)
// DeleteContainer removes the specified container and its layer. If
// there is no matching container, or if the container exists but its
// layer does not, an error will be returned.
DeleteContainer(id string) error
// Wipe removes all known layers, images, and containers.
Wipe() error
// Mount attempts to mount a layer, image, or container for access, and
// returns the pathname if it succeeds.
// Note if the mountLabel == "", the default label for the container
// will be used.
//
// Note that we do some of this work in a child process. The calling
// process's main() function needs to import our pkg/reexec package and
// should begin with something like this in order to allow us to
// properly start that child process:
// if reexec.Init() {
// return
// }
Mount(id, mountLabel string) (string, error)
// Unmount attempts to unmount a layer, image, or container, given an ID, a
// name, or a mount path. Returns whether or not the layer is still mounted.
Unmount(id string, force bool) (bool, error)
// Mounted returns number of times the layer has been mounted.
Mounted(id string) (int, error)
// Changes returns a summary of the changes which would need to be made
// to one layer to make its contents the same as a second layer. If
// the first layer is not specified, the second layer's parent is
// assumed. Each Change structure contains a Path relative to the
// layer's root directory, and a Kind which is either ChangeAdd,
// ChangeModify, or ChangeDelete.
Changes(from, to string) ([]archive.Change, error)
// DiffSize returns a count of the size of the tarstream which would
// specify the changes returned by Changes.
DiffSize(from, to string) (int64, error)
// Diff returns the tarstream which would specify the changes returned
// by Changes. If options are passed in, they can override default
// behaviors.
Diff(from, to string, options *DiffOptions) (io.ReadCloser, error)
// ApplyDiff applies a tarstream to a layer. Information about the
// tarstream is cached with the layer. Typically, a layer which is
// populated using a tarstream will be expected to not be modified in
// any other way, either before or after the diff is applied.
//
// Note that we do some of this work in a child process. The calling
// process's main() function needs to import our pkg/reexec package and
// should begin with something like this in order to allow us to
// properly start that child process:
// if reexec.Init() {
// return
// }
ApplyDiff(to string, diff io.Reader) (int64, error)
// LayersByCompressedDigest returns a slice of the layers with the
// specified compressed digest value recorded for them.
LayersByCompressedDigest(d digest.Digest) ([]Layer, error)
// LayersByUncompressedDigest returns a slice of the layers with the
// specified uncompressed digest value recorded for them.
LayersByUncompressedDigest(d digest.Digest) ([]Layer, error)
// LayerSize returns a cached approximation of the layer's size, or -1
// if we don't have a value on hand.
LayerSize(id string) (int64, error)
// LayerParentOwners returns the UIDs and GIDs of owners of parents of
// the layer's mountpoint for which the layer's UID and GID maps (if
// any are defined) don't contain corresponding IDs.
LayerParentOwners(id string) ([]int, []int, error)
// Layers returns a list of the currently known layers.
Layers() ([]Layer, error)
// Images returns a list of the currently known images.
Images() ([]Image, error)
// Containers returns a list of the currently known containers.
Containers() ([]Container, error)
// Names returns the list of names for a layer, image, or container.
Names(id string) ([]string, error)
// SetNames changes the list of names for a layer, image, or container.
// Duplicate names are removed from the list automatically.
SetNames(id string, names []string) error
// ListImageBigData retrieves a list of the (possibly large) chunks of
// named data associated with an image.
ListImageBigData(id string) ([]string, error)
// ImageBigData retrieves a (possibly large) chunk of named data
// associated with an image.
ImageBigData(id, key string) ([]byte, error)
// ImageBigDataSize retrieves the size of a (possibly large) chunk
// of named data associated with an image.
ImageBigDataSize(id, key string) (int64, error)
// ImageBigDataDigest retrieves the digest of a (possibly large) chunk
// of named data associated with an image.
ImageBigDataDigest(id, key string) (digest.Digest, error)
// SetImageBigData stores a (possibly large) chunk of named data
// associated with an image. Pass
// github.com/containers/image/manifest.Digest as digestManifest to
// allow ImagesByDigest to find images by their correct digests.
SetImageBigData(id, key string, data []byte, digestManifest func([]byte) (digest.Digest, error)) error
// ImageSize computes the size of the image's layers and ancillary data.
ImageSize(id string) (int64, error)
// ListContainerBigData retrieves a list of the (possibly large) chunks of
// named data associated with a container.
ListContainerBigData(id string) ([]string, error)
// ContainerBigData retrieves a (possibly large) chunk of named data
// associated with a container.
ContainerBigData(id, key string) ([]byte, error)
// ContainerBigDataSize retrieves the size of a (possibly large)
// chunk of named data associated with a container.
ContainerBigDataSize(id, key string) (int64, error)
// ContainerBigDataDigest retrieves the digest of a (possibly large)
// chunk of named data associated with a container.
ContainerBigDataDigest(id, key string) (digest.Digest, error)
// SetContainerBigData stores a (possibly large) chunk of named data
// associated with a container.
SetContainerBigData(id, key string, data []byte) error
// ContainerSize computes the size of the container's layer and ancillary
// data. Warning: this is a potentially expensive operation.
ContainerSize(id string) (int64, error)
// Layer returns a specific layer.
Layer(id string) (*Layer, error)
// Image returns a specific image.
Image(id string) (*Image, error)
// ImagesByTopLayer returns a list of images which reference the specified
// layer as their top layer. They will have different IDs and names
// and may have different metadata, big data items, and flags.
ImagesByTopLayer(id string) ([]*Image, error)
// ImagesByDigest returns a list of images which contain a big data item
// named ImageDigestBigDataKey whose contents have the specified digest.
ImagesByDigest(d digest.Digest) ([]*Image, error)
// Container returns a specific container.
Container(id string) (*Container, error)
// ContainerByLayer returns a specific container based on its layer ID or
// name.
ContainerByLayer(id string) (*Container, error)
// ContainerDirectory returns a path of a directory which the caller
// can use to store data, specific to the container, which the library
// does not directly manage. The directory will be deleted when the
// container is deleted.
ContainerDirectory(id string) (string, error)
// SetContainerDirectoryFile is a convenience function which stores
// a piece of data in the specified file relative to the container's
// directory.
SetContainerDirectoryFile(id, file string, data []byte) error
// FromContainerDirectory is a convenience function which reads
// the contents of the specified file relative to the container's
// directory.
FromContainerDirectory(id, file string) ([]byte, error)
// ContainerRunDirectory returns a path of a directory which the
// caller can use to store data, specific to the container, which the
// library does not directly manage. The directory will be deleted
// when the host system is restarted.
ContainerRunDirectory(id string) (string, error)
// SetContainerRunDirectoryFile is a convenience function which stores
// a piece of data in the specified file relative to the container's
// run directory.
SetContainerRunDirectoryFile(id, file string, data []byte) error
// FromContainerRunDirectory is a convenience function which reads
// the contents of the specified file relative to the container's run
// directory.
FromContainerRunDirectory(id, file string) ([]byte, error)
// ContainerParentOwners returns the UIDs and GIDs of owners of parents
// of the container's layer's mountpoint for which the layer's UID and
// GID maps (if any are defined) don't contain corresponding IDs.
ContainerParentOwners(id string) ([]int, []int, error)
// Lookup returns the ID of a layer, image, or container with the specified
// name or ID.
Lookup(name string) (string, error)
// Shutdown attempts to free any kernel resources which are being used
// by the underlying driver. If "force" is true, any mounted (i.e., in
// use) layers are unmounted beforehand. If "force" is not true, then
// layers being in use is considered to be an error condition. A list
// of still-mounted layers is returned along with possible errors.
Shutdown(force bool) (layers []string, err error)
// Version returns version information, in the form of key-value pairs, from
// the storage package.
Version() ([][2]string, error)
// GetDigestLock returns digest-specific Locker.
GetDigestLock(digest.Digest) (Locker, error)
}
// AutoUserNsOptions defines how to automatically create a user namespace.
type AutoUserNsOptions struct {
// Size defines the size for the user namespace. If it is set to a
// value bigger than 0, the user namespace will have exactly this size.
// If it is not set, some heuristics will be used to find its size.
Size uint32
// InitialSize defines the minimum size for the user namespace.
// The created user namespace will have at least this size.
InitialSize uint32
// PasswdFile to use if the container uses a volume.
PasswdFile string
// GroupFile to use if the container uses a volume.
GroupFile string
// AdditionalUIDMappings specified additional UID mappings to include in
// the generated user namespace.
AdditionalUIDMappings []idtools.IDMap
// AdditionalGIDMappings specified additional GID mappings to include in
// the generated user namespace.
AdditionalGIDMappings []idtools.IDMap
}
// IDMappingOptions are used for specifying how ID mapping should be set up for
// a layer or container.
type IDMappingOptions struct {
// UIDMap and GIDMap are used for setting up a layer's root filesystem
// for use inside of a user namespace where ID mapping is being used.
// If HostUIDMapping/HostGIDMapping is true, no mapping of the
// respective type will be used. Otherwise, if UIDMap and/or GIDMap
// contain at least one mapping, one or both will be used. By default,
// if neither of those conditions apply, if the layer has a parent
// layer, the parent layer's mapping will be used, and if it does not
// have a parent layer, the mapping which was passed to the Store
// object when it was initialized will be used.
HostUIDMapping bool
HostGIDMapping bool
UIDMap []idtools.IDMap
GIDMap []idtools.IDMap
AutoUserNs bool
AutoUserNsOpts AutoUserNsOptions
}
// LayerOptions is used for passing options to a Store's CreateLayer() and PutLayer() methods.
type LayerOptions struct {
// IDMappingOptions specifies the type of ID mapping which should be
// used for this layer. If nothing is specified, the layer will
// inherit settings from its parent layer or, if it has no parent
// layer, the Store object.
IDMappingOptions
// TemplateLayer is the ID of a layer whose contents will be used to
// initialize this layer. If set, it should be a child of the layer
// which we want to use as the parent of the new layer.
TemplateLayer string
}
// ImageOptions is used for passing options to a Store's CreateImage() method.
type ImageOptions struct {
// CreationDate, if not zero, will override the default behavior of marking the image as having been
// created when CreateImage() was called, recording CreationDate instead.
CreationDate time.Time
// Digest is a hard-coded digest value that we can use to look up the image. It is optional.
Digest digest.Digest
}
// ContainerOptions is used for passing options to a Store's CreateContainer() method.
type ContainerOptions struct {
// IDMappingOptions specifies the type of ID mapping which should be
// used for this container's layer. If nothing is specified, the
// container's layer will inherit settings from the image's top layer
// or, if it is not being created based on an image, the Store object.
IDMappingOptions
LabelOpts []string
Flags map[string]interface{}
MountOpts []string
}
type store struct {
lastLoaded time.Time
runRoot string
graphLock Locker
usernsLock Locker
graphRoot string
graphDriverName string
graphOptions []string
uidMap []idtools.IDMap
gidMap []idtools.IDMap
autoUsernsUser string
autoUIDMap []idtools.IDMap // Set by getAvailableMappings()
autoGIDMap []idtools.IDMap // Set by getAvailableMappings()
autoNsMinSize uint32
autoNsMaxSize uint32
graphDriver drivers.Driver
layerStore LayerStore
roLayerStores []ROLayerStore
imageStore ImageStore
roImageStores []ROImageStore
containerStore ContainerStore
digestLockRoot string
}
// GetStore attempts to find an already-created Store object matching the
// specified location and graph driver, and if it can't, it creates and
// initializes a new Store object, and the underlying storage that it controls.
//
// If StoreOptions `options` haven't been fully populated, then DefaultStoreOptions are used.
//
// These defaults observe environment variables:
// * `STORAGE_DRIVER` for the name of the storage driver to attempt to use
// * `STORAGE_OPTS` for the string of options to pass to the driver
//
// Note that we do some of this work in a child process. The calling process's
// main() function needs to import our pkg/reexec package and should begin with
// something like this in order to allow us to properly start that child
// process:
// if reexec.Init() {
// return
// }
func GetStore(options StoreOptions) (Store, error) {
if options.RunRoot == "" && options.GraphRoot == "" && options.GraphDriverName == "" && len(options.GraphDriverOptions) == 0 {
options = defaultStoreOptions
}
if options.GraphRoot != "" {
dir, err := filepath.Abs(options.GraphRoot)
if err != nil {
return nil, errors.Wrapf(err, "error deriving an absolute path from %q", options.GraphRoot)
}
options.GraphRoot = dir
}
if options.RunRoot != "" {
dir, err := filepath.Abs(options.RunRoot)
if err != nil {
return nil, errors.Wrapf(err, "error deriving an absolute path from %q", options.RunRoot)
}
options.RunRoot = dir
}
storesLock.Lock()
defer storesLock.Unlock()
for _, s := range stores {
if s.graphRoot == options.GraphRoot && (options.GraphDriverName == "" || s.graphDriverName == options.GraphDriverName) {
return s, nil
}
}
if options.GraphRoot == "" {
return nil, errors.Wrap(ErrIncompleteOptions, "no storage root specified")
}
if options.RunRoot == "" {
return nil, errors.Wrap(ErrIncompleteOptions, "no storage runroot specified")
}
if err := os.MkdirAll(options.RunRoot, 0700); err != nil && !os.IsExist(err) {
return nil, err
}
if err := os.MkdirAll(options.GraphRoot, 0700); err != nil && !os.IsExist(err) {
return nil, err
}
for _, subdir := range []string{"mounts", "tmp", options.GraphDriverName} {
if err := os.MkdirAll(filepath.Join(options.GraphRoot, subdir), 0700); err != nil && !os.IsExist(err) {
return nil, err
}
}
graphLock, err := GetLockfile(filepath.Join(options.GraphRoot, "storage.lock"))
if err != nil {
return nil, err
}
usernsLock, err := GetLockfile(filepath.Join(options.GraphRoot, "userns.lock"))
if err != nil {
return nil, err
}
autoNsMinSize := options.AutoNsMinSize
autoNsMaxSize := options.AutoNsMaxSize
if autoNsMinSize == 0 {
autoNsMinSize = AutoUserNsMinSize
}
if autoNsMaxSize == 0 {
autoNsMaxSize = AutoUserNsMaxSize
}
s := &store{
runRoot: options.RunRoot,
graphLock: graphLock,
graphRoot: options.GraphRoot,
graphDriverName: options.GraphDriverName,
graphOptions: options.GraphDriverOptions,
uidMap: copyIDMap(options.UIDMap),
gidMap: copyIDMap(options.GIDMap),
autoUsernsUser: options.RootAutoNsUser,
autoNsMinSize: autoNsMinSize,
autoNsMaxSize: autoNsMaxSize,
autoUIDMap: nil,
autoGIDMap: nil,
usernsLock: usernsLock,
}
if err := s.load(); err != nil {
return nil, err
}
stores = append(stores, s)
return s, nil
}
func copyUint32Slice(slice []uint32) []uint32 {
m := []uint32{}
if slice != nil {
m = make([]uint32, len(slice))
copy(m, slice)
}
if len(m) > 0 {
return m[:]
}
return nil
}
func copyIDMap(idmap []idtools.IDMap) []idtools.IDMap {
m := []idtools.IDMap{}
if idmap != nil {
m = make([]idtools.IDMap, len(idmap))
copy(m, idmap)
}
if len(m) > 0 {
return m[:]
}
return nil
}
func (s *store) RunRoot() string {
return s.runRoot
}
func (s *store) GraphDriverName() string {
return s.graphDriverName
}
func (s *store) GraphRoot() string {
return s.graphRoot
}
func (s *store) GraphOptions() []string {
return s.graphOptions
}
func (s *store) UIDMap() []idtools.IDMap {
return copyIDMap(s.uidMap)
}
func (s *store) GIDMap() []idtools.IDMap {
return copyIDMap(s.gidMap)
}
func (s *store) load() error {
driver, err := s.GraphDriver()
if err != nil {
return err
}
s.graphDriver = driver
s.graphDriverName = driver.String()
driverPrefix := s.graphDriverName + "-"
gipath := filepath.Join(s.graphRoot, driverPrefix+"images")
if err := os.MkdirAll(gipath, 0700); err != nil {
return err
}
ris, err := newImageStore(gipath)
if err != nil {
return err
}
s.imageStore = ris
if _, err := s.ROImageStores(); err != nil {
return err
}
gcpath := filepath.Join(s.graphRoot, driverPrefix+"containers")
if err := os.MkdirAll(gcpath, 0700); err != nil {
return err
}
rcs, err := newContainerStore(gcpath)
if err != nil {
return err
}
rcpath := filepath.Join(s.runRoot, driverPrefix+"containers")
if err := os.MkdirAll(rcpath, 0700); err != nil {
return err
}
s.containerStore = rcs
s.digestLockRoot = filepath.Join(s.runRoot, driverPrefix+"locks")
if err := os.MkdirAll(s.digestLockRoot, 0700); err != nil {
return err
}
return nil
}
// GetDigestLock returns a digest-specific Locker.
func (s *store) GetDigestLock(d digest.Digest) (Locker, error) {
return GetLockfile(filepath.Join(s.digestLockRoot, d.String()))
}
func (s *store) getGraphDriver() (drivers.Driver, error) {
if s.graphDriver != nil {
return s.graphDriver, nil
}
config := drivers.Options{
Root: s.graphRoot,
RunRoot: s.runRoot,
DriverOptions: s.graphOptions,
UIDMaps: s.uidMap,
GIDMaps: s.gidMap,
}
driver, err := drivers.New(s.graphDriverName, config)
if err != nil {
return nil, err
}
s.graphDriver = driver
s.graphDriverName = driver.String()
return driver, nil
}
func (s *store) GraphDriver() (drivers.Driver, error) {
s.graphLock.Lock()
defer s.graphLock.Unlock()
if s.graphLock.TouchedSince(s.lastLoaded) {
s.graphDriver = nil
s.layerStore = nil
s.lastLoaded = time.Now()
}
return s.getGraphDriver()
}
// LayerStore obtains and returns a handle to the writeable layer store object
// used by the Store. Accessing this store directly will bypass locking and
// synchronization, so it is not a part of the exported Store interface.
func (s *store) LayerStore() (LayerStore, error) {
s.graphLock.Lock()
defer s.graphLock.Unlock()
if s.graphLock.TouchedSince(s.lastLoaded) {
s.graphDriver = nil
s.layerStore = nil
s.lastLoaded = time.Now()
}
if s.layerStore != nil {
return s.layerStore, nil
}
driver, err := s.getGraphDriver()
if err != nil {
return nil, err
}
driverPrefix := s.graphDriverName + "-"
rlpath := filepath.Join(s.runRoot, driverPrefix+"layers")
if err := os.MkdirAll(rlpath, 0700); err != nil {
return nil, err
}
glpath := filepath.Join(s.graphRoot, driverPrefix+"layers")
if err := os.MkdirAll(glpath, 0700); err != nil {
return nil, err
}
rls, err := s.newLayerStore(rlpath, glpath, driver)
if err != nil {
return nil, err
}
s.layerStore = rls
return s.layerStore, nil
}
// ROLayerStores obtains additional read/only layer store objects used by the
// Store. Accessing these stores directly will bypass locking and
// synchronization, so it is not part of the exported Store interface.
func (s *store) ROLayerStores() ([]ROLayerStore, error) {
s.graphLock.Lock()
defer s.graphLock.Unlock()
if s.roLayerStores != nil {
return s.roLayerStores, nil
}
driver, err := s.getGraphDriver()
if err != nil {
return nil, err
}
driverPrefix := s.graphDriverName + "-"
rlpath := filepath.Join(s.runRoot, driverPrefix+"layers")
if err := os.MkdirAll(rlpath, 0700); err != nil {
return nil, err
}
for _, store := range driver.AdditionalImageStores() {
glpath := filepath.Join(store, driverPrefix+"layers")
rls, err := newROLayerStore(rlpath, glpath, driver)
if err != nil {
return nil, err
}
s.roLayerStores = append(s.roLayerStores, rls)
}
return s.roLayerStores, nil
}
// ImageStore obtains and returns a handle to the writable image store object
// used by the Store. Accessing this store directly will bypass locking and
// synchronization, so it is not a part of the exported Store interface.
func (s *store) ImageStore() (ImageStore, error) {
if s.imageStore != nil {
return s.imageStore, nil
}
return nil, ErrLoadError
}
// ROImageStores obtains additional read/only image store objects used by the
// Store. Accessing these stores directly will bypass locking and
// synchronization, so it is not a part of the exported Store interface.
func (s *store) ROImageStores() ([]ROImageStore, error) {
if len(s.roImageStores) != 0 {
return s.roImageStores, nil
}
driver, err := s.getGraphDriver()
if err != nil {
return nil, err
}
driverPrefix := s.graphDriverName + "-"
for _, store := range driver.AdditionalImageStores() {
gipath := filepath.Join(store, driverPrefix+"images")
ris, err := newROImageStore(gipath)
if err != nil {
return nil, err
}
s.roImageStores = append(s.roImageStores, ris)
}
return s.roImageStores, nil
}
// ContainerStore obtains and returns a handle to the container store object
// used by the Store. Accessing this store directly will bypass locking and
// synchronization, so it is not a part of the exported Store interface.
func (s *store) ContainerStore() (ContainerStore, error) {
if s.containerStore != nil {
return s.containerStore, nil
}
return nil, ErrLoadError
}
func (s *store) PutLayer(id, parent string, names []string, mountLabel string, writeable bool, options *LayerOptions, diff io.Reader) (*Layer, int64, error) {
var parentLayer *Layer
rlstore, err := s.LayerStore()
if err != nil {
return nil, -1, err
}
rlstores, err := s.ROLayerStores()
if err != nil {
return nil, -1, err
}
rcstore, err := s.ContainerStore()
if err != nil {
return nil, -1, err
}
rlstore.Lock()
defer rlstore.Unlock()
if modified, err := rlstore.Modified(); modified || err != nil {
if err = rlstore.Load(); err != nil {
return nil, -1, err
}
}
rcstore.Lock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
if err = rcstore.Load(); err != nil {
return nil, -1, err
}
}
if id == "" {
id = stringid.GenerateRandomID()
}
if options == nil {
options = &LayerOptions{}
}
if options.HostUIDMapping {
options.UIDMap = nil
}
if options.HostGIDMapping {
options.GIDMap = nil
}
uidMap := options.UIDMap
gidMap := options.GIDMap
if parent != "" {
var ilayer *Layer
for _, l := range append([]ROLayerStore{rlstore}, rlstores...) {
lstore := l
if lstore != rlstore {
lstore.RLock()
defer lstore.Unlock()
if modified, err := lstore.Modified(); modified || err != nil {
if err = lstore.Load(); err != nil {
return nil, -1, err
}
}
}
if l, err := lstore.Get(parent); err == nil && l != nil {
ilayer = l
parent = ilayer.ID
break
}
}
if ilayer == nil {
return nil, -1, ErrLayerUnknown
}
parentLayer = ilayer
containers, err := rcstore.Containers()
if err != nil {
return nil, -1, err
}
for _, container := range containers {
if container.LayerID == parent {
return nil, -1, ErrParentIsContainer
}
}
if !options.HostUIDMapping && len(options.UIDMap) == 0 {
uidMap = ilayer.UIDMap
}
if !options.HostGIDMapping && len(options.GIDMap) == 0 {
gidMap = ilayer.GIDMap
}
} else {
if !options.HostUIDMapping && len(options.UIDMap) == 0 {
uidMap = s.uidMap
}
if !options.HostGIDMapping && len(options.GIDMap) == 0 {
gidMap = s.gidMap
}
}
var layerOptions *LayerOptions
if s.graphDriver.SupportsShifting() {
layerOptions = &LayerOptions{IDMappingOptions: IDMappingOptions{HostUIDMapping: true, HostGIDMapping: true, UIDMap: nil, GIDMap: nil}}
} else {
layerOptions = &LayerOptions{
IDMappingOptions: IDMappingOptions{
HostUIDMapping: options.HostUIDMapping,
HostGIDMapping: options.HostGIDMapping,
UIDMap: copyIDMap(uidMap),
GIDMap: copyIDMap(gidMap),
},
}
}
return rlstore.Put(id, parentLayer, names, mountLabel, nil, layerOptions, writeable, nil, diff)
}
func (s *store) CreateLayer(id, parent string, names []string, mountLabel string, writeable bool, options *LayerOptions) (*Layer, error) {
layer, _, err := s.PutLayer(id, parent, names, mountLabel, writeable, options, nil)
return layer, err
}
func (s *store) CreateImage(id string, names []string, layer, metadata string, options *ImageOptions) (*Image, error) {
if id == "" {
id = stringid.GenerateRandomID()
}
if layer != "" {
lstore, err := s.LayerStore()
if err != nil {
return nil, err
}
lstores, err := s.ROLayerStores()
if err != nil {
return nil, err
}
var ilayer *Layer
for _, s := range append([]ROLayerStore{lstore}, lstores...) {
store := s
if store == lstore {
store.Lock()
} else {
store.RLock()
}
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
return nil, err
}
}
ilayer, err = store.Get(layer)
if err == nil {
break
}
}
if ilayer == nil {
return nil, ErrLayerUnknown
}
layer = ilayer.ID
}
ristore, err := s.ImageStore()
if err != nil {
return nil, err
}
ristore.Lock()
defer ristore.Unlock()
if modified, err := ristore.Modified(); modified || err != nil {
if err = ristore.Load(); err != nil {
return nil, err
}
}
creationDate := time.Now().UTC()
if options != nil && !options.CreationDate.IsZero() {
creationDate = options.CreationDate
}
return ristore.Create(id, names, layer, metadata, creationDate, options.Digest)
}
func (s *store) imageTopLayerForMapping(image *Image, ristore ROImageStore, createMappedLayer bool, rlstore LayerStore, lstores []ROLayerStore, options IDMappingOptions) (*Layer, error) {
layerMatchesMappingOptions := func(layer *Layer, options IDMappingOptions) bool {
// If the driver supports shifting and the layer has no mappings, we can use it.
if s.graphDriver.SupportsShifting() && len(layer.UIDMap) == 0 && len(layer.GIDMap) == 0 {
return true
}
// If we want host mapping, and the layer uses mappings, it's not the best match.
if options.HostUIDMapping && len(layer.UIDMap) != 0 {
return false
}
if options.HostGIDMapping && len(layer.GIDMap) != 0 {
return false
}
// If we don't care about the mapping, it's fine.
if len(options.UIDMap) == 0 && len(options.GIDMap) == 0 {
return true
}
// Compare the maps.
return reflect.DeepEqual(layer.UIDMap, options.UIDMap) && reflect.DeepEqual(layer.GIDMap, options.GIDMap)
}
var layer, parentLayer *Layer
allStores := append([]ROLayerStore{rlstore}, lstores...)
// Locate the image's top layer and its parent, if it has one.
for _, s := range allStores {
store := s
if store != rlstore {
store.RLock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
return nil, err
}
}
}
// Walk the top layer list.
for _, candidate := range append([]string{image.TopLayer}, image.MappedTopLayers...) {
if cLayer, err := store.Get(candidate); err == nil {
// We want the layer's parent, too, if it has one.
var cParentLayer *Layer
if cLayer.Parent != "" {
// Its parent should be in one of the stores, somewhere.
for _, ps := range allStores {
if cParentLayer, err = ps.Get(cLayer.Parent); err == nil {
break
}
}
if cParentLayer == nil {
continue
}
}
// If the layer matches the desired mappings, it's a perfect match,
// so we're actually done here.
if layerMatchesMappingOptions(cLayer, options) {
return cLayer, nil
}
// Record the first one that we found, even if it's not ideal, so that
// we have a starting point.
if layer == nil {
layer = cLayer
parentLayer = cParentLayer
}
}
}
}
if layer == nil {
return nil, ErrLayerUnknown
}
// The top layer's mappings don't match the ones we want, but it's in a read-only
// image store, so we can't create and add a mapped copy of the layer to the image.
// We'll have to do the mapping for the container itself, elsewhere.
if !createMappedLayer {
return layer, nil
}
// The top layer's mappings don't match the ones we want, and it's in an image store
// that lets us edit image metadata...
if istore, ok := ristore.(*imageStore); ok {
// ... so create a duplicate of the layer with the desired mappings, and
// register it as an alternate top layer in the image.
var layerOptions LayerOptions
if s.graphDriver.SupportsShifting() {
layerOptions = LayerOptions{
IDMappingOptions: IDMappingOptions{
HostUIDMapping: true,
HostGIDMapping: true,
UIDMap: nil,
GIDMap: nil,
},
}
} else {
layerOptions = LayerOptions{
IDMappingOptions: IDMappingOptions{
HostUIDMapping: options.HostUIDMapping,
HostGIDMapping: options.HostGIDMapping,
UIDMap: copyIDMap(options.UIDMap),
GIDMap: copyIDMap(options.GIDMap),
},
}
}
layerOptions.TemplateLayer = layer.ID
mappedLayer, _, err := rlstore.Put("", parentLayer, nil, layer.MountLabel, nil, &layerOptions, false, nil, nil)
if err != nil {
return nil, errors.Wrapf(err, "error creating an ID-mapped copy of layer %q", layer.ID)
}
if err = istore.addMappedTopLayer(image.ID, mappedLayer.ID); err != nil {
if err2 := rlstore.Delete(mappedLayer.ID); err2 != nil {
err = errors.WithMessage(err, fmt.Sprintf("error deleting layer %q: %v", mappedLayer.ID, err2))
}
return nil, errors.Wrapf(err, "error registering ID-mapped layer with image %q", image.ID)
}
layer = mappedLayer
}
return layer, nil
}
func (s *store) CreateContainer(id string, names []string, image, layer, metadata string, options *ContainerOptions) (*Container, error) {
if options == nil {
options = &ContainerOptions{}
}
if options.HostUIDMapping {
options.UIDMap = nil
}
if options.HostGIDMapping {
options.GIDMap = nil
}
rlstore, err := s.LayerStore()
if err != nil {
return nil, err
}
if id == "" {
id = stringid.GenerateRandomID()
}
var imageTopLayer *Layer
imageID := ""
if options.AutoUserNs || options.UIDMap != nil || options.GIDMap != nil {
// Prevent multiple instances to retrieve the same range when AutoUserNs
// are used.
// It doesn't prevent containers that specify an explicit mapping to overlap
// with AutoUserNs.
s.usernsLock.Lock()
defer s.usernsLock.Unlock()
}
var imageHomeStore ROImageStore
var istore ImageStore
var istores []ROImageStore
var lstores []ROLayerStore
var cimage *Image
if image != "" {
var err error
lstores, err = s.ROLayerStores()
if err != nil {
return nil, err
}
istore, err = s.ImageStore()
if err != nil {
return nil, err
}
istores, err = s.ROImageStores()
if err != nil {
return nil, err
}
rlstore.Lock()
defer rlstore.Unlock()
if modified, err := rlstore.Modified(); modified || err != nil {
if err = rlstore.Load(); err != nil {
return nil, err
}
}
for _, s := range append([]ROImageStore{istore}, istores...) {
store := s
if store == istore {
store.Lock()
} else {
store.RLock()
}
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
return nil, err
}
}
cimage, err = store.Get(image)
if err == nil {
imageHomeStore = store
break
}
}
if cimage == nil {
return nil, errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id)
}
imageID = cimage.ID
}
if options.AutoUserNs {
var err error
options.UIDMap, options.GIDMap, err = s.getAutoUserNS(id, &options.AutoUserNsOpts, cimage)
if err != nil {
return nil, err
}
}
uidMap := options.UIDMap
gidMap := options.GIDMap
idMappingsOptions := options.IDMappingOptions
if image != "" {
if cimage.TopLayer != "" {
createMappedLayer := imageHomeStore == istore
ilayer, err := s.imageTopLayerForMapping(cimage, imageHomeStore, createMappedLayer, rlstore, lstores, idMappingsOptions)
if err != nil {
return nil, err
}
imageTopLayer = ilayer
if !options.HostUIDMapping && len(options.UIDMap) == 0 {
uidMap = ilayer.UIDMap
}
if !options.HostGIDMapping && len(options.GIDMap) == 0 {
gidMap = ilayer.GIDMap
}
}
} else {
rlstore.Lock()
defer rlstore.Unlock()
if modified, err := rlstore.Modified(); modified || err != nil {
if err = rlstore.Load(); err != nil {
return nil, err
}
}
if !options.HostUIDMapping && len(options.UIDMap) == 0 {
uidMap = s.uidMap
}
if !options.HostGIDMapping && len(options.GIDMap) == 0 {
gidMap = s.gidMap
}
}
var layerOptions *LayerOptions
if s.graphDriver.SupportsShifting() {
layerOptions = &LayerOptions{
IDMappingOptions: IDMappingOptions{
HostUIDMapping: true,
HostGIDMapping: true,
UIDMap: nil,
GIDMap: nil,
},
}
} else {
layerOptions = &LayerOptions{
IDMappingOptions: IDMappingOptions{
HostUIDMapping: idMappingsOptions.HostUIDMapping,
HostGIDMapping: idMappingsOptions.HostGIDMapping,
UIDMap: copyIDMap(uidMap),
GIDMap: copyIDMap(gidMap),
},
}
}
if options.Flags == nil {
options.Flags = make(map[string]interface{})
}
plabel, _ := options.Flags["ProcessLabel"].(string)
mlabel, _ := options.Flags["MountLabel"].(string)
if (plabel == "" && mlabel != "") ||
(plabel != "" && mlabel == "") {
return nil, errors.Errorf("ProcessLabel and Mountlabel must either not be specified or both specified")
}
if plabel == "" {
processLabel, mountLabel, err := label.InitLabels(options.LabelOpts)
if err != nil {
return nil, err
}
options.Flags["ProcessLabel"] = processLabel
options.Flags["MountLabel"] = mountLabel
}
clayer, err := rlstore.Create(layer, imageTopLayer, nil, options.Flags["MountLabel"].(string), nil, layerOptions, true)
if err != nil {
return nil, err
}
layer = clayer.ID
rcstore, err := s.ContainerStore()
if err != nil {
return nil, err
}
rcstore.Lock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
if err = rcstore.Load(); err != nil {
return nil, err
}
}
options.IDMappingOptions = IDMappingOptions{
HostUIDMapping: len(options.UIDMap) == 0,
HostGIDMapping: len(options.GIDMap) == 0,
UIDMap: copyIDMap(options.UIDMap),
GIDMap: copyIDMap(options.GIDMap),
}
container, err := rcstore.Create(id, names, imageID, layer, metadata, options)
if err != nil || container == nil {
rlstore.Delete(layer)
}
return container, err
}
func (s *store) SetMetadata(id, metadata string) error {
rlstore, err := s.LayerStore()
if err != nil {
return err
}
ristore, err := s.ImageStore()
if err != nil {
return err
}
rcstore, err := s.ContainerStore()
if err != nil {
return err
}
rlstore.Lock()
defer rlstore.Unlock()
if modified, err := rlstore.Modified(); modified || err != nil {
if err = rlstore.Load(); err != nil {
return err
}
}
ristore.Lock()
defer ristore.Unlock()
if modified, err := ristore.Modified(); modified || err != nil {
if err := ristore.Load(); err != nil {
return err
}
}
rcstore.Lock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
if err = rcstore.Load(); err != nil {
return err
}
}
if rlstore.Exists(id) {
return rlstore.SetMetadata(id, metadata)
}
if ristore.Exists(id) {
return ristore.SetMetadata(id, metadata)
}
if rcstore.Exists(id) {
return rcstore.SetMetadata(id, metadata)
}
return ErrNotAnID
}
func (s *store) Metadata(id string) (string, error) {
lstore, err := s.LayerStore()
if err != nil {
return "", err
}
lstores, err := s.ROLayerStores()
if err != nil {
return "", err
}
for _, s := range append([]ROLayerStore{lstore}, lstores...) {
store := s
store.RLock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
return "", err
}
}
if store.Exists(id) {
return store.Metadata(id)
}
}
istore, err := s.ImageStore()
if err != nil {
return "", err
}
istores, err := s.ROImageStores()
if err != nil {
return "", err
}
for _, s := range append([]ROImageStore{istore}, istores...) {
store := s
store.RLock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
return "", err
}
}
if store.Exists(id) {
return store.Metadata(id)
}
}
cstore, err := s.ContainerStore()
if err != nil {
return "", err
}
cstore.RLock()
defer cstore.Unlock()
if modified, err := cstore.Modified(); modified || err != nil {
if err = cstore.Load(); err != nil {
return "", err
}
}
if cstore.Exists(id) {
return cstore.Metadata(id)
}
return "", ErrNotAnID
}
func (s *store) ListImageBigData(id string) ([]string, error) {
istore, err := s.ImageStore()
if err != nil {
return nil, err
}
istores, err := s.ROImageStores()
if err != nil {
return nil, err
}
for _, s := range append([]ROImageStore{istore}, istores...) {
store := s
store.RLock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
return nil, err
}
}
bigDataNames, err := store.BigDataNames(id)
if err == nil {
return bigDataNames, err
}
}
return nil, errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id)
}
func (s *store) ImageBigDataSize(id, key string) (int64, error) {
istore, err := s.ImageStore()
if err != nil {
return -1, err
}
istores, err := s.ROImageStores()
if err != nil {
return -1, err
}
for _, s := range append([]ROImageStore{istore}, istores...) {
store := s
store.RLock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
return -1, err
}
}
size, err := store.BigDataSize(id, key)
if err == nil {
return size, nil
}
}
return -1, ErrSizeUnknown
}
func (s *store) ImageBigDataDigest(id, key string) (digest.Digest, error) {
ristore, err := s.ImageStore()
if err != nil {
return "", err
}
stores, err := s.ROImageStores()
if err != nil {
return "", err
}
stores = append([]ROImageStore{ristore}, stores...)
for _, r := range stores {
ristore := r
ristore.RLock()
defer ristore.Unlock()
if modified, err := ristore.Modified(); modified || err != nil {
if err = ristore.Load(); err != nil {
return "", nil
}
}
d, err := ristore.BigDataDigest(id, key)
if err == nil && d.Validate() == nil {
return d, nil
}
}
return "", ErrDigestUnknown
}
func (s *store) ImageBigData(id, key string) ([]byte, error) {
istore, err := s.ImageStore()
if err != nil {
return nil, err
}
istores, err := s.ROImageStores()
if err != nil {
return nil, err
}
foundImage := false
for _, s := range append([]ROImageStore{istore}, istores...) {
store := s
store.RLock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
return nil, err
}
}
data, err := store.BigData(id, key)
if err == nil {
return data, nil
}
if store.Exists(id) {
foundImage = true
}
}
if foundImage {
return nil, errors.Wrapf(os.ErrNotExist, "error locating item named %q for image with ID %q", key, id)
}
return nil, errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id)
}
func (s *store) SetImageBigData(id, key string, data []byte, digestManifest func([]byte) (digest.Digest, error)) error {
ristore, err := s.ImageStore()
if err != nil {
return err
}
ristore.Lock()
defer ristore.Unlock()
if modified, err := ristore.Modified(); modified || err != nil {
if err = ristore.Load(); err != nil {
return nil
}
}
return ristore.SetBigData(id, key, data, digestManifest)
}
func (s *store) ImageSize(id string) (int64, error) {
var image *Image
lstore, err := s.LayerStore()
if err != nil {
return -1, errors.Wrapf(err, "error loading primary layer store data")
}
lstores, err := s.ROLayerStores()
if err != nil {
return -1, errors.Wrapf(err, "error loading additional layer stores")
}
for _, s := range append([]ROLayerStore{lstore}, lstores...) {
store := s
store.RLock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
return -1, err
}
}
}
var imageStore ROBigDataStore
istore, err := s.ImageStore()
if err != nil {
return -1, errors.Wrapf(err, "error loading primary image store data")
}
istores, err := s.ROImageStores()
if err != nil {
return -1, errors.Wrapf(err, "error loading additional image stores")
}
// Look for the image's record.
for _, s := range append([]ROImageStore{istore}, istores...) {
store := s
store.RLock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
return -1, err
}
}
if image, err = store.Get(id); err == nil {
imageStore = store
break
}
}
if image == nil {
return -1, errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id)
}
// Start with a list of the image's top layers, if it has any.
queue := make(map[string]struct{})
for _, layerID := range append([]string{image.TopLayer}, image.MappedTopLayers...) {
if layerID != "" {
queue[layerID] = struct{}{}
}
}
visited := make(map[string]struct{})
// Walk all of the layers.
var size int64
for len(visited) < len(queue) {
for layerID := range queue {
// Visit each layer only once.
if _, ok := visited[layerID]; ok {
continue
}
visited[layerID] = struct{}{}
// Look for the layer and the store that knows about it.
var layerStore ROLayerStore
var layer *Layer
for _, store := range append([]ROLayerStore{lstore}, lstores...) {
if layer, err = store.Get(layerID); err == nil {
layerStore = store
break
}
}
if layer == nil {
return -1, errors.Wrapf(ErrLayerUnknown, "error locating layer with ID %q", layerID)
}
// The UncompressedSize is only valid if there's a digest to go with it.
n := layer.UncompressedSize
if layer.UncompressedDigest == "" {
// Compute the size.
n, err = layerStore.DiffSize("", layer.ID)
if err != nil {
return -1, errors.Wrapf(err, "size/digest of layer with ID %q could not be calculated", layerID)
}
}
// Count this layer.
size += n
// Make a note to visit the layer's parent if we haven't already.
if layer.Parent != "" {
queue[layer.Parent] = struct{}{}
}
}
}
// Count big data items.
names, err := imageStore.BigDataNames(id)
if err != nil {
return -1, errors.Wrapf(err, "error reading list of big data items for image %q", id)
}
for _, name := range names {
n, err := imageStore.BigDataSize(id, name)
if err != nil {
return -1, errors.Wrapf(err, "error reading size of big data item %q for image %q", name, id)
}
size += n
}
return size, nil
}
func (s *store) ContainerSize(id string) (int64, error) {
lstore, err := s.LayerStore()
if err != nil {
return -1, err
}
lstores, err := s.ROLayerStores()
if err != nil {
return -1, err
}
for _, s := range append([]ROLayerStore{lstore}, lstores...) {
store := s
store.RLock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
return -1, err
}
}
}
// Get the location of the container directory and container run directory.
// Do it before we lock the container store because they do, too.
cdir, err := s.ContainerDirectory(id)
if err != nil {
return -1, err
}
rdir, err := s.ContainerRunDirectory(id)
if err != nil {
return -1, err
}
rcstore, err := s.ContainerStore()
if err != nil {
return -1, err
}
rcstore.RLock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
if err = rcstore.Load(); err != nil {
return -1, err
}
}
// Read the container record.
container, err := rcstore.Get(id)
if err != nil {
return -1, err
}
// Read the container's layer's size.
var layer *Layer
var size int64
for _, store := range append([]ROLayerStore{lstore}, lstores...) {
if layer, err = store.Get(container.LayerID); err == nil {
size, err = store.DiffSize("", layer.ID)
if err != nil {
return -1, errors.Wrapf(err, "error determining size of layer with ID %q", layer.ID)
}
break
}
}
if layer == nil {
return -1, errors.Wrapf(ErrLayerUnknown, "error locating layer with ID %q", container.LayerID)
}
// Count big data items.
names, err := rcstore.BigDataNames(id)
if err != nil {
return -1, errors.Wrapf(err, "error reading list of big data items for container %q", container.ID)
}
for _, name := range names {
n, err := rcstore.BigDataSize(id, name)
if err != nil {
return -1, errors.Wrapf(err, "error reading size of big data item %q for container %q", name, id)
}
size += n
}
// Count the size of our container directory and container run directory.
n, err := directory.Size(cdir)
if err != nil {
return -1, err
}
size += n
n, err = directory.Size(rdir)
if err != nil {
return -1, err
}
size += n
return size, nil
}
func (s *store) ListContainerBigData(id string) ([]string, error) {
rcstore, err := s.ContainerStore()
if err != nil {
return nil, err
}
rcstore.RLock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
if err = rcstore.Load(); err != nil {
return nil, err
}
}
return rcstore.BigDataNames(id)
}
func (s *store) ContainerBigDataSize(id, key string) (int64, error) {
rcstore, err := s.ContainerStore()
if err != nil {
return -1, err
}
rcstore.RLock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
if err = rcstore.Load(); err != nil {
return -1, err
}
}
return rcstore.BigDataSize(id, key)
}
func (s *store) ContainerBigDataDigest(id, key string) (digest.Digest, error) {
rcstore, err := s.ContainerStore()
if err != nil {
return "", err
}
rcstore.RLock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
if err = rcstore.Load(); err != nil {
return "", err
}
}
return rcstore.BigDataDigest(id, key)
}
func (s *store) ContainerBigData(id, key string) ([]byte, error) {
rcstore, err := s.ContainerStore()
if err != nil {
return nil, err
}
rcstore.RLock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
if err = rcstore.Load(); err != nil {
return nil, err
}
}
return rcstore.BigData(id, key)
}
func (s *store) SetContainerBigData(id, key string, data []byte) error {
rcstore, err := s.ContainerStore()
if err != nil {
return err
}
rcstore.Lock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
if err = rcstore.Load(); err != nil {
return err
}
}
return rcstore.SetBigData(id, key, data)
}
func (s *store) Exists(id string) bool {
lstore, err := s.LayerStore()
if err != nil {
return false
}
lstores, err := s.ROLayerStores()
if err != nil {
return false
}
for _, s := range append([]ROLayerStore{lstore}, lstores...) {
store := s
store.RLock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
return false
}
}
if store.Exists(id) {
return true
}
}
istore, err := s.ImageStore()
if err != nil {
return false
}
istores, err := s.ROImageStores()
if err != nil {
return false
}
for _, s := range append([]ROImageStore{istore}, istores...) {
store := s
store.RLock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
return false
}
}
if store.Exists(id) {
return true
}
}
rcstore, err := s.ContainerStore()
if err != nil {
return false
}
rcstore.RLock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
if err = rcstore.Load(); err != nil {
return false
}
}
if rcstore.Exists(id) {
return true
}
return false
}
func dedupeNames(names []string) []string {
seen := make(map[string]bool)
deduped := make([]string, 0, len(names))
for _, name := range names {
if _, wasSeen := seen[name]; !wasSeen {
seen[name] = true
deduped = append(deduped, name)
}
}
return deduped
}
func (s *store) SetNames(id string, names []string) error {
deduped := dedupeNames(names)
rlstore, err := s.LayerStore()
if err != nil {
return err
}
rlstore.Lock()
defer rlstore.Unlock()
if modified, err := rlstore.Modified(); modified || err != nil {
if err = rlstore.Load(); err != nil {
return err
}
}
if rlstore.Exists(id) {
return rlstore.SetNames(id, deduped)
}
ristore, err := s.ImageStore()
if err != nil {
return err
}
ristore.Lock()
defer ristore.Unlock()
if modified, err := ristore.Modified(); modified || err != nil {
if err = ristore.Load(); err != nil {
return err
}
}
if ristore.Exists(id) {
return ristore.SetNames(id, deduped)
}
rcstore, err := s.ContainerStore()
if err != nil {
return err
}
rcstore.Lock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
if err = rcstore.Load(); err != nil {
return err
}
}
if rcstore.Exists(id) {
return rcstore.SetNames(id, deduped)
}
return ErrLayerUnknown
}
func (s *store) Names(id string) ([]string, error) {
lstore, err := s.LayerStore()
if err != nil {
return nil, err
}
lstores, err := s.ROLayerStores()
if err != nil {
return nil, err
}
for _, s := range append([]ROLayerStore{lstore}, lstores...) {
store := s
store.RLock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
return nil, err
}
}
if l, err := store.Get(id); l != nil && err == nil {
return l.Names, nil
}
}
istore, err := s.ImageStore()
if err != nil {
return nil, err
}
istores, err := s.ROImageStores()
if err != nil {
return nil, err
}
for _, s := range append([]ROImageStore{istore}, istores...) {
store := s
store.RLock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
return nil, err
}
}
if i, err := store.Get(id); i != nil && err == nil {
return i.Names, nil
}
}
rcstore, err := s.ContainerStore()
if err != nil {
return nil, err
}
rcstore.RLock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
if err = rcstore.Load(); err != nil {
return nil, err
}
}
if c, err := rcstore.Get(id); c != nil && err == nil {
return c.Names, nil
}
return nil, ErrLayerUnknown
}
func (s *store) Lookup(name string) (string, error) {
lstore, err := s.LayerStore()
if err != nil {
return "", err
}
lstores, err := s.ROLayerStores()
if err != nil {
return "", err
}
for _, s := range append([]ROLayerStore{lstore}, lstores...) {
store := s
store.RLock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
return "", err
}
}
if l, err := store.Get(name); l != nil && err == nil {
return l.ID, nil
}
}
istore, err := s.ImageStore()
if err != nil {
return "", err
}
istores, err := s.ROImageStores()
if err != nil {
return "", err
}
for _, s := range append([]ROImageStore{istore}, istores...) {
store := s
store.RLock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
return "", err
}
}
if i, err := store.Get(name); i != nil && err == nil {
return i.ID, nil
}
}
cstore, err := s.ContainerStore()
if err != nil {
return "", err
}
cstore.RLock()
defer cstore.Unlock()
if modified, err := cstore.Modified(); modified || err != nil {
if err = cstore.Load(); err != nil {
return "", err
}
}
if c, err := cstore.Get(name); c != nil && err == nil {
return c.ID, nil
}
return "", ErrLayerUnknown
}
func (s *store) DeleteLayer(id string) error {
rlstore, err := s.LayerStore()
if err != nil {
return err
}
ristore, err := s.ImageStore()
if err != nil {
return err
}
rcstore, err := s.ContainerStore()
if err != nil {
return err
}
rlstore.Lock()
defer rlstore.Unlock()
if modified, err := rlstore.Modified(); modified || err != nil {
if err = rlstore.Load(); err != nil {
return err
}
}
ristore.Lock()
defer ristore.Unlock()
if modified, err := ristore.Modified(); modified || err != nil {
if err = ristore.Load(); err != nil {
return err
}
}
rcstore.Lock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
if err = rcstore.Load(); err != nil {
return err
}
}
if rlstore.Exists(id) {
if l, err := rlstore.Get(id); err != nil {
id = l.ID
}
layers, err := rlstore.Layers()
if err != nil {
return err
}
for _, layer := range layers {
if layer.Parent == id {
return ErrLayerHasChildren
}
}
images, err := ristore.Images()
if err != nil {
return err
}
for _, image := range images {
if image.TopLayer == id || stringutils.InSlice(image.MappedTopLayers, id) {
return errors.Wrapf(ErrLayerUsedByImage, "Layer %v used by image %v", id, image.ID)
}
}
containers, err := rcstore.Containers()
if err != nil {
return err
}
for _, container := range containers {
if container.LayerID == id {
return errors.Wrapf(ErrLayerUsedByContainer, "Layer %v used by container %v", id, container.ID)
}
}
return rlstore.Delete(id)
}
return ErrNotALayer
}
func (s *store) DeleteImage(id string, commit bool) (layers []string, err error) {
rlstore, err := s.LayerStore()
if err != nil {
return nil, err
}
ristore, err := s.ImageStore()
if err != nil {
return nil, err
}
rcstore, err := s.ContainerStore()
if err != nil {
return nil, err
}
rlstore.Lock()
defer rlstore.Unlock()
if modified, err := rlstore.Modified(); modified || err != nil {
if err = rlstore.Load(); err != nil {
return nil, err
}
}
ristore.Lock()
defer ristore.Unlock()
if modified, err := ristore.Modified(); modified || err != nil {
if err = ristore.Load(); err != nil {
return nil, err
}
}
rcstore.Lock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
if err = rcstore.Load(); err != nil {
return nil, err
}
}
layersToRemove := []string{}
if ristore.Exists(id) {
image, err := ristore.Get(id)
if err != nil {
return nil, err
}
id = image.ID
containers, err := rcstore.Containers()
if err != nil {
return nil, err
}
aContainerByImage := make(map[string]string)
for _, container := range containers {
aContainerByImage[container.ImageID] = container.ID
}
if container, ok := aContainerByImage[id]; ok {
return nil, errors.Wrapf(ErrImageUsedByContainer, "Image used by %v", container)
}
images, err := ristore.Images()
if err != nil {
return nil, err
}
layers, err := rlstore.Layers()
if err != nil {
return nil, err
}
childrenByParent := make(map[string]*[]string)
for _, layer := range layers {
parent := layer.Parent
if list, ok := childrenByParent[parent]; ok {
newList := append(*list, layer.ID)
childrenByParent[parent] = &newList
} else {
childrenByParent[parent] = &([]string{layer.ID})
}
}
otherImagesByTopLayer := make(map[string]string)
for _, img := range images {
if img.ID != id {
otherImagesByTopLayer[img.TopLayer] = img.ID
for _, layerID := range img.MappedTopLayers {
otherImagesByTopLayer[layerID] = img.ID
}
}
}
if commit {
if err = ristore.Delete(id); err != nil {
return nil, err
}
}
layer := image.TopLayer
lastRemoved := ""
for layer != "" {
if rcstore.Exists(layer) {
break
}
if _, ok := otherImagesByTopLayer[layer]; ok {
break
}
parent := ""
if l, err := rlstore.Get(layer); err == nil {
parent = l.Parent
}
hasOtherRefs := func() bool {
layersToCheck := []string{layer}
if layer == image.TopLayer {
layersToCheck = append(layersToCheck, image.MappedTopLayers...)
}
for _, layer := range layersToCheck {
if childList, ok := childrenByParent[layer]; ok && childList != nil {
children := *childList
for _, child := range children {
if child != lastRemoved {
return true
}
}
}
}
return false
}
if hasOtherRefs() {
break
}
lastRemoved = layer
if layer == image.TopLayer {
layersToRemove = append(layersToRemove, image.MappedTopLayers...)
}
layersToRemove = append(layersToRemove, lastRemoved)
layer = parent
}
} else {
return nil, ErrNotAnImage
}
if commit {
for _, layer := range layersToRemove {
if err = rlstore.Delete(layer); err != nil {
return nil, err
}
}
}
return layersToRemove, nil
}
func (s *store) DeleteContainer(id string) error {
rlstore, err := s.LayerStore()
if err != nil {
return err
}
ristore, err := s.ImageStore()
if err != nil {
return err
}
rcstore, err := s.ContainerStore()
if err != nil {
return err
}
rlstore.Lock()
defer rlstore.Unlock()
if modified, err := rlstore.Modified(); modified || err != nil {
if err = rlstore.Load(); err != nil {
return err
}
}
ristore.Lock()
defer ristore.Unlock()
if modified, err := ristore.Modified(); modified || err != nil {
if err = ristore.Load(); err != nil {
return err
}
}
rcstore.Lock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
if err = rcstore.Load(); err != nil {
return err
}
}
if rcstore.Exists(id) {
if container, err := rcstore.Get(id); err == nil {
errChan := make(chan error)
var wg sync.WaitGroup
if rlstore.Exists(container.LayerID) {
wg.Add(1)
go func() {
errChan <- rlstore.Delete(container.LayerID)
wg.Done()
}()
}
wg.Add(1)
go func() {
errChan <- rcstore.Delete(id)
wg.Done()
}()
middleDir := s.graphDriverName + "-containers"
gcpath := filepath.Join(s.GraphRoot(), middleDir, container.ID)
wg.Add(1)
go func() {
errChan <- os.RemoveAll(gcpath)
wg.Done()
}()
rcpath := filepath.Join(s.RunRoot(), middleDir, container.ID)
wg.Add(1)
go func() {
errChan <- os.RemoveAll(rcpath)
wg.Done()
}()
go func() {
wg.Wait()
close(errChan)
}()
for {
select {
case err, ok := <-errChan:
if !ok {
return nil
}
if err != nil {
return err
}
}
}
}
}
return ErrNotAContainer
}
func (s *store) Delete(id string) error {
rlstore, err := s.LayerStore()
if err != nil {
return err
}
ristore, err := s.ImageStore()
if err != nil {
return err
}
rcstore, err := s.ContainerStore()
if err != nil {
return err
}
rlstore.Lock()
defer rlstore.Unlock()
if modified, err := rlstore.Modified(); modified || err != nil {
if err = rlstore.Load(); err != nil {
return err
}
}
ristore.Lock()
defer ristore.Unlock()
if modified, err := ristore.Modified(); modified || err != nil {
if err := ristore.Load(); err != nil {
return err
}
}
rcstore.Lock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
if err = rcstore.Load(); err != nil {
return err
}
}
if rcstore.Exists(id) {
if container, err := rcstore.Get(id); err == nil {
if rlstore.Exists(container.LayerID) {
if err = rlstore.Delete(container.LayerID); err != nil {
return err
}
if err = rcstore.Delete(id); err != nil {
return err
}
middleDir := s.graphDriverName + "-containers"
gcpath := filepath.Join(s.GraphRoot(), middleDir, container.ID, "userdata")
if err = os.RemoveAll(gcpath); err != nil {
return err
}
rcpath := filepath.Join(s.RunRoot(), middleDir, container.ID, "userdata")
if err = os.RemoveAll(rcpath); err != nil {
return err
}
return nil
}
return ErrNotALayer
}
}
if ristore.Exists(id) {
return ristore.Delete(id)
}
if rlstore.Exists(id) {
return rlstore.Delete(id)
}
return ErrLayerUnknown
}
func (s *store) Wipe() error {
rcstore, err := s.ContainerStore()
if err != nil {
return err
}
ristore, err := s.ImageStore()
if err != nil {
return err
}
rlstore, err := s.LayerStore()
if err != nil {
return err
}
rlstore.Lock()
defer rlstore.Unlock()
if modified, err := rlstore.Modified(); modified || err != nil {
if err = rlstore.Load(); err != nil {
return err
}
}
ristore.Lock()
defer ristore.Unlock()
if modified, err := ristore.Modified(); modified || err != nil {
if err = ristore.Load(); err != nil {
return err
}
}
rcstore.Lock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
if err = rcstore.Load(); err != nil {
return err
}
}
if err = rcstore.Wipe(); err != nil {
return err
}
if err = ristore.Wipe(); err != nil {
return err
}
return rlstore.Wipe()
}
func (s *store) Status() ([][2]string, error) {
rlstore, err := s.LayerStore()
if err != nil {
return nil, err
}
return rlstore.Status()
}
func (s *store) Version() ([][2]string, error) {
return [][2]string{}, nil
}
func (s *store) Mount(id, mountLabel string) (string, error) {
container, err := s.Container(id)
var (
uidMap, gidMap []idtools.IDMap
mountOpts []string
)
if err == nil {
uidMap, gidMap = container.UIDMap, container.GIDMap
id = container.LayerID
mountOpts = container.MountOpts()
}
rlstore, err := s.LayerStore()
if err != nil {
return "", err
}
s.graphLock.Lock()
defer s.graphLock.Unlock()
rlstore.Lock()
defer rlstore.Unlock()
if modified, err := rlstore.Modified(); modified || err != nil {
if err = rlstore.Load(); err != nil {
return "", err
}
}
/* We need to make sure the home mount is present when the Mount is done. */
if s.graphLock.TouchedSince(s.lastLoaded) {
s.graphDriver = nil
s.layerStore = nil
s.graphDriver, err = s.getGraphDriver()
if err != nil {
return "", err
}
s.lastLoaded = time.Now()
}
if rlstore.Exists(id) {
options := drivers.MountOpts{
MountLabel: mountLabel,
UidMaps: uidMap,
GidMaps: gidMap,
Options: mountOpts,
}
return rlstore.Mount(id, options)
}
return "", ErrLayerUnknown
}
func (s *store) Mounted(id string) (int, error) {
if layerID, err := s.ContainerLayerID(id); err == nil {
id = layerID
}
rlstore, err := s.LayerStore()
if err != nil {
return 0, err
}
rlstore.RLock()
defer rlstore.Unlock()
if modified, err := rlstore.Modified(); modified || err != nil {
if err = rlstore.Load(); err != nil {
return 0, err
}
}
return rlstore.Mounted(id)
}
func (s *store) Unmount(id string, force bool) (bool, error) {
if layerID, err := s.ContainerLayerID(id); err == nil {
id = layerID
}
rlstore, err := s.LayerStore()
if err != nil {
return false, err
}
rlstore.Lock()
defer rlstore.Unlock()
if modified, err := rlstore.Modified(); modified || err != nil {
if err = rlstore.Load(); err != nil {
return false, err
}
}
if rlstore.Exists(id) {
return rlstore.Unmount(id, force)
}
return false, ErrLayerUnknown
}
func (s *store) Changes(from, to string) ([]archive.Change, error) {
lstore, err := s.LayerStore()
if err != nil {
return nil, err
}
lstores, err := s.ROLayerStores()
if err != nil {
return nil, err
}
for _, s := range append([]ROLayerStore{lstore}, lstores...) {
store := s
store.RLock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
return nil, err
}
}
if store.Exists(to) {
return store.Changes(from, to)
}
}
return nil, ErrLayerUnknown
}
func (s *store) DiffSize(from, to string) (int64, error) {
lstore, err := s.LayerStore()
if err != nil {
return -1, err
}
lstores, err := s.ROLayerStores()
if err != nil {
return -1, err
}
for _, s := range append([]ROLayerStore{lstore}, lstores...) {
store := s
store.RLock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
return -1, err
}
}
if store.Exists(to) {
return store.DiffSize(from, to)
}
}
return -1, ErrLayerUnknown
}
func (s *store) Diff(from, to string, options *DiffOptions) (io.ReadCloser, error) {
lstore, err := s.LayerStore()
if err != nil {
return nil, err
}
lstores, err := s.ROLayerStores()
if err != nil {
return nil, err
}
for _, s := range append([]ROLayerStore{lstore}, lstores...) {
store := s
store.RLock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
return nil, err
}
}
if store.Exists(to) {
rc, err := store.Diff(from, to, options)
if rc != nil && err == nil {
wrapped := ioutils.NewReadCloserWrapper(rc, func() error {
err := rc.Close()
store.Unlock()
return err
})
return wrapped, nil
}
store.Unlock()
return rc, err
}
store.Unlock()
}
return nil, ErrLayerUnknown
}
func (s *store) ApplyDiff(to string, diff io.Reader) (int64, error) {
rlstore, err := s.LayerStore()
if err != nil {
return -1, err
}
rlstore.Lock()
defer rlstore.Unlock()
if modified, err := rlstore.Modified(); modified || err != nil {
if err = rlstore.Load(); err != nil {
return -1, err
}
}
if rlstore.Exists(to) {
return rlstore.ApplyDiff(to, diff)
}
return -1, ErrLayerUnknown
}
func (s *store) layersByMappedDigest(m func(ROLayerStore, digest.Digest) ([]Layer, error), d digest.Digest) ([]Layer, error) {
var layers []Layer
lstore, err := s.LayerStore()
if err != nil {
return nil, err
}
lstores, err := s.ROLayerStores()
if err != nil {
return nil, err
}
for _, s := range append([]ROLayerStore{lstore}, lstores...) {
store := s
store.RLock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
return nil, err
}
}
storeLayers, err := m(store, d)
if err != nil {
if errors.Cause(err) != ErrLayerUnknown {
return nil, err
}
continue
}
layers = append(layers, storeLayers...)
}
if len(layers) == 0 {
return nil, ErrLayerUnknown
}
return layers, nil
}
func (s *store) LayersByCompressedDigest(d digest.Digest) ([]Layer, error) {
if err := d.Validate(); err != nil {
return nil, errors.Wrapf(err, "error looking for compressed layers matching digest %q", d)
}
return s.layersByMappedDigest(func(r ROLayerStore, d digest.Digest) ([]Layer, error) { return r.LayersByCompressedDigest(d) }, d)
}
func (s *store) LayersByUncompressedDigest(d digest.Digest) ([]Layer, error) {
if err := d.Validate(); err != nil {
return nil, errors.Wrapf(err, "error looking for layers matching digest %q", d)
}
return s.layersByMappedDigest(func(r ROLayerStore, d digest.Digest) ([]Layer, error) { return r.LayersByUncompressedDigest(d) }, d)
}
func (s *store) LayerSize(id string) (int64, error) {
lstore, err := s.LayerStore()
if err != nil {
return -1, err
}
lstores, err := s.ROLayerStores()
if err != nil {
return -1, err
}
for _, s := range append([]ROLayerStore{lstore}, lstores...) {
store := s
store.RLock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
return -1, err
}
}
if store.Exists(id) {
return store.Size(id)
}
}
return -1, ErrLayerUnknown
}
func (s *store) LayerParentOwners(id string) ([]int, []int, error) {
rlstore, err := s.LayerStore()
if err != nil {
return nil, nil, err
}
rlstore.RLock()
defer rlstore.Unlock()
if modified, err := rlstore.Modified(); modified || err != nil {
if err = rlstore.Load(); err != nil {
return nil, nil, err
}
}
if rlstore.Exists(id) {
return rlstore.ParentOwners(id)
}
return nil, nil, ErrLayerUnknown
}
func (s *store) ContainerParentOwners(id string) ([]int, []int, error) {
rlstore, err := s.LayerStore()
if err != nil {
return nil, nil, err
}
rcstore, err := s.ContainerStore()
if err != nil {
return nil, nil, err
}
rlstore.RLock()
defer rlstore.Unlock()
if modified, err := rlstore.Modified(); modified || err != nil {
if err = rlstore.Load(); err != nil {
return nil, nil, err
}
}
rcstore.RLock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
if err = rcstore.Load(); err != nil {
return nil, nil, err
}
}
container, err := rcstore.Get(id)
if err != nil {
return nil, nil, err
}
if rlstore.Exists(container.LayerID) {
return rlstore.ParentOwners(container.LayerID)
}
return nil, nil, ErrLayerUnknown
}
func (s *store) Layers() ([]Layer, error) {
lstore, err := s.LayerStore()
if err != nil {
return nil, err
}
if err := lstore.LoadLocked(); err != nil {
return nil, err
}
layers, err := lstore.Layers()
if err != nil {
return nil, err
}
lstores, err := s.ROLayerStores()
if err != nil {
return nil, err
}
for _, s := range lstores {
store := s
store.RLock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
return nil, err
}
}
storeLayers, err := store.Layers()
if err != nil {
return nil, err
}
layers = append(layers, storeLayers...)
}
return layers, nil
}
func (s *store) Images() ([]Image, error) {
var images []Image
istore, err := s.ImageStore()
if err != nil {
return nil, err
}
istores, err := s.ROImageStores()
if err != nil {
return nil, err
}
for _, s := range append([]ROImageStore{istore}, istores...) {
store := s
store.RLock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
return nil, err
}
}
storeImages, err := store.Images()
if err != nil {
return nil, err
}
images = append(images, storeImages...)
}
return images, nil
}
func (s *store) Containers() ([]Container, error) {
rcstore, err := s.ContainerStore()
if err != nil {
return nil, err
}
rcstore.RLock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
if err = rcstore.Load(); err != nil {
return nil, err
}
}
return rcstore.Containers()
}
func (s *store) Layer(id string) (*Layer, error) {
lstore, err := s.LayerStore()
if err != nil {
return nil, err
}
lstores, err := s.ROLayerStores()
if err != nil {
return nil, err
}
for _, s := range append([]ROLayerStore{lstore}, lstores...) {
store := s
store.RLock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
return nil, err
}
}
layer, err := store.Get(id)
if err == nil {
return layer, nil
}
}
return nil, ErrLayerUnknown
}
func (s *store) Image(id string) (*Image, error) {
istore, err := s.ImageStore()
if err != nil {
return nil, err
}
istores, err := s.ROImageStores()
if err != nil {
return nil, err
}
for _, s := range append([]ROImageStore{istore}, istores...) {
store := s
store.RLock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
return nil, err
}
}
image, err := store.Get(id)
if err == nil {
return image, nil
}
}
return nil, errors.Wrapf(ErrImageUnknown, "error locating image with ID %q", id)
}
func (s *store) ImagesByTopLayer(id string) ([]*Image, error) {
images := []*Image{}
layer, err := s.Layer(id)
if err != nil {
return nil, err
}
istore, err := s.ImageStore()
if err != nil {
return nil, err
}
istores, err := s.ROImageStores()
if err != nil {
return nil, err
}
for _, s := range append([]ROImageStore{istore}, istores...) {
store := s
store.RLock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
return nil, err
}
}
imageList, err := store.Images()
if err != nil {
return nil, err
}
for _, image := range imageList {
if image.TopLayer == layer.ID || stringutils.InSlice(image.MappedTopLayers, layer.ID) {
images = append(images, &image)
}
}
}
return images, nil
}
func (s *store) ImagesByDigest(d digest.Digest) ([]*Image, error) {
images := []*Image{}
istore, err := s.ImageStore()
if err != nil {
return nil, err
}
istores, err := s.ROImageStores()
if err != nil {
return nil, err
}
for _, store := range append([]ROImageStore{istore}, istores...) {
store.RLock()
defer store.Unlock()
if modified, err := store.Modified(); modified || err != nil {
if err = store.Load(); err != nil {
return nil, err
}
}
imageList, err := store.ByDigest(d)
if err != nil && errors.Cause(err) != ErrImageUnknown {
return nil, err
}
images = append(images, imageList...)
}
return images, nil
}
func (s *store) Container(id string) (*Container, error) {
rcstore, err := s.ContainerStore()
if err != nil {
return nil, err
}
rcstore.RLock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
if err = rcstore.Load(); err != nil {
return nil, err
}
}
return rcstore.Get(id)
}
func (s *store) ContainerLayerID(id string) (string, error) {
rcstore, err := s.ContainerStore()
if err != nil {
return "", err
}
rcstore.RLock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
if err = rcstore.Load(); err != nil {
return "", err
}
}
container, err := rcstore.Get(id)
if err != nil {
return "", err
}
return container.LayerID, nil
}
func (s *store) ContainerByLayer(id string) (*Container, error) {
layer, err := s.Layer(id)
if err != nil {
return nil, err
}
rcstore, err := s.ContainerStore()
if err != nil {
return nil, err
}
rcstore.RLock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
if err = rcstore.Load(); err != nil {
return nil, err
}
}
containerList, err := rcstore.Containers()
if err != nil {
return nil, err
}
for _, container := range containerList {
if container.LayerID == layer.ID {
return &container, nil
}
}
return nil, ErrContainerUnknown
}
func (s *store) ContainerDirectory(id string) (string, error) {
rcstore, err := s.ContainerStore()
if err != nil {
return "", err
}
rcstore.RLock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
if err = rcstore.Load(); err != nil {
return "", err
}
}
id, err = rcstore.Lookup(id)
if err != nil {
return "", err
}
middleDir := s.graphDriverName + "-containers"
gcpath := filepath.Join(s.GraphRoot(), middleDir, id, "userdata")
if err := os.MkdirAll(gcpath, 0700); err != nil {
return "", err
}
return gcpath, nil
}
func (s *store) ContainerRunDirectory(id string) (string, error) {
rcstore, err := s.ContainerStore()
if err != nil {
return "", err
}
rcstore.RLock()
defer rcstore.Unlock()
if modified, err := rcstore.Modified(); modified || err != nil {
if err = rcstore.Load(); err != nil {
return "", err
}
}
id, err = rcstore.Lookup(id)
if err != nil {
return "", err
}
middleDir := s.graphDriverName + "-containers"
rcpath := filepath.Join(s.RunRoot(), middleDir, id, "userdata")
if err := os.MkdirAll(rcpath, 0700); err != nil {
return "", err
}
return rcpath, nil
}
func (s *store) SetContainerDirectoryFile(id, file string, data []byte) error {
dir, err := s.ContainerDirectory(id)
if err != nil {
return err
}
err = os.MkdirAll(filepath.Dir(filepath.Join(dir, file)), 0700)
if err != nil {
return err
}
return ioutils.AtomicWriteFile(filepath.Join(dir, file), data, 0600)
}
func (s *store) FromContainerDirectory(id, file string) ([]byte, error) {
dir, err := s.ContainerDirectory(id)
if err != nil {
return nil, err
}
return ioutil.ReadFile(filepath.Join(dir, file))
}
func (s *store) SetContainerRunDirectoryFile(id, file string, data []byte) error {
dir, err := s.ContainerRunDirectory(id)
if err != nil {
return err
}
err = os.MkdirAll(filepath.Dir(filepath.Join(dir, file)), 0700)
if err != nil {
return err
}
return ioutils.AtomicWriteFile(filepath.Join(dir, file), data, 0600)
}
func (s *store) FromContainerRunDirectory(id, file string) ([]byte, error) {
dir, err := s.ContainerRunDirectory(id)
if err != nil {
return nil, err
}
return ioutil.ReadFile(filepath.Join(dir, file))
}
func (s *store) Shutdown(force bool) ([]string, error) {
mounted := []string{}
modified := false
rlstore, err := s.LayerStore()
if err != nil {
return mounted, err
}
s.graphLock.Lock()
defer s.graphLock.Unlock()
rlstore.Lock()
defer rlstore.Unlock()
if modified, err := rlstore.Modified(); modified || err != nil {
if err = rlstore.Load(); err != nil {
return nil, err
}
}
layers, err := rlstore.Layers()
if err != nil {
return mounted, err
}
for _, layer := range layers {
if layer.MountCount == 0 {
continue
}
mounted = append(mounted, layer.ID)
if force {
for layer.MountCount > 0 {
_, err2 := rlstore.Unmount(layer.ID, force)
if err2 != nil {
if err == nil {
err = err2
}
break
}
modified = true
}
}
}
if len(mounted) > 0 && err == nil {
err = errors.Wrap(ErrLayerUsedByContainer, "A layer is mounted")
}
if err == nil {
err = s.graphDriver.Cleanup()
s.graphLock.Touch()
modified = true
}
if modified {
rlstore.Touch()
}
return mounted, err
}
// Convert a BigData key name into an acceptable file name.
func makeBigDataBaseName(key string) string {
reader := strings.NewReader(key)
for reader.Len() > 0 {
ch, size, err := reader.ReadRune()
if err != nil || size != 1 {
break
}
if ch != '.' && !(ch >= '0' && ch <= '9') && !(ch >= 'a' && ch <= 'z') {
break
}
}
if reader.Len() > 0 {
return "=" + base64.StdEncoding.EncodeToString([]byte(key))
}
return key
}
func stringSliceWithoutValue(slice []string, value string) []string {
modified := make([]string, 0, len(slice))
for _, v := range slice {
if v == value {
continue
}
modified = append(modified, v)
}
return modified
}
func copyStringSlice(slice []string) []string {
if len(slice) == 0 {
return nil
}
ret := make([]string, len(slice))
copy(ret, slice)
return ret
}
func copyStringInt64Map(m map[string]int64) map[string]int64 {
ret := make(map[string]int64, len(m))
for k, v := range m {
ret[k] = v
}
return ret
}
func copyStringDigestMap(m map[string]digest.Digest) map[string]digest.Digest {
ret := make(map[string]digest.Digest, len(m))
for k, v := range m {
ret[k] = v
}
return ret
}
func copyDigestSlice(slice []digest.Digest) []digest.Digest {
if len(slice) == 0 {
return nil
}
ret := make([]digest.Digest, len(slice))
copy(ret, slice)
return ret
}
// copyStringInterfaceMap still forces us to assume that the interface{} is
// a non-pointer scalar value
func copyStringInterfaceMap(m map[string]interface{}) map[string]interface{} {
ret := make(map[string]interface{}, len(m))
for k, v := range m {
ret[k] = v
}
return ret
}
// defaultConfigFile path to the system wide storage.conf file
const defaultConfigFile = "/etc/containers/storage.conf"
// AutoUserNsMinSize is the minimum size for automatically created user namespaces
const AutoUserNsMinSize = 1024
// AutoUserNsMaxSize is the maximum size for automatically created user namespaces
const AutoUserNsMaxSize = 65536
// RootAutoUserNsUser is the default user used for root containers when automatically
// creating a user namespace.
const RootAutoUserNsUser = "containers"
// DefaultConfigFile returns the path to the storage config file used
func DefaultConfigFile(rootless bool) (string, error) {
if rootless {
if configHome := os.Getenv("XDG_CONFIG_HOME"); configHome != "" {
return filepath.Join(configHome, "containers/storage.conf"), nil
}
home := homedir.Get()
if home == "" {
return "", errors.New("cannot determine user's homedir")
}
return filepath.Join(home, ".config/containers/storage.conf"), nil
}
return defaultConfigFile, nil
}
// TOML-friendly explicit tables used for conversions.
type tomlConfig struct {
Storage struct {
Driver string `toml:"driver"`
RunRoot string `toml:"runroot"`
GraphRoot string `toml:"graphroot"`
RootlessStoragePath string `toml:"rootless_storage_path"`
Options cfg.OptionsConfig `toml:"options"`
} `toml:"storage"`
}
// ReloadConfigurationFile parses the specified configuration file and overrides
// the configuration in storeOptions.
func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) {
data, err := ioutil.ReadFile(configFile)
if err != nil {
if !os.IsNotExist(err) {
fmt.Printf("Failed to read %s %v\n", configFile, err.Error())
return
}
}
config := new(tomlConfig)
if _, err := toml.Decode(string(data), config); err != nil {
fmt.Printf("Failed to parse %s %v\n", configFile, err.Error())
return
}
if os.Getenv("STORAGE_DRIVER") != "" {
config.Storage.Driver = os.Getenv("STORAGE_DRIVER")
}
if config.Storage.Driver != "" {
storeOptions.GraphDriverName = config.Storage.Driver
}
if config.Storage.RunRoot != "" {
storeOptions.RunRoot = config.Storage.RunRoot
}
if config.Storage.GraphRoot != "" {
storeOptions.GraphRoot = config.Storage.GraphRoot
}
if config.Storage.RootlessStoragePath != "" {
storeOptions.RootlessStoragePath = config.Storage.RootlessStoragePath
}
for _, s := range config.Storage.Options.AdditionalImageStores {
storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.imagestore=%s", config.Storage.Driver, s))
}
if config.Storage.Options.Size != "" {
storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.size=%s", config.Storage.Driver, config.Storage.Options.Size))
}
if config.Storage.Options.MountProgram != "" {
storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.mount_program=%s", config.Storage.Driver, config.Storage.Options.MountProgram))
}
if config.Storage.Options.IgnoreChownErrors != "" {
storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.ignore_chown_errors=%s", config.Storage.Driver, config.Storage.Options.IgnoreChownErrors))
}
if config.Storage.Options.MountOpt != "" {
storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, fmt.Sprintf("%s.mountopt=%s", config.Storage.Driver, config.Storage.Options.MountOpt))
}
if config.Storage.Options.RemapUser != "" && config.Storage.Options.RemapGroup == "" {
config.Storage.Options.RemapGroup = config.Storage.Options.RemapUser
}
if config.Storage.Options.RemapGroup != "" && config.Storage.Options.RemapUser == "" {
config.Storage.Options.RemapUser = config.Storage.Options.RemapGroup
}
if config.Storage.Options.RemapUser != "" && config.Storage.Options.RemapGroup != "" {
mappings, err := idtools.NewIDMappings(config.Storage.Options.RemapUser, config.Storage.Options.RemapGroup)
if err != nil {
fmt.Printf("Error initializing ID mappings for %s:%s %v\n", config.Storage.Options.RemapUser, config.Storage.Options.RemapGroup, err)
return
}
storeOptions.UIDMap = mappings.UIDs()
storeOptions.GIDMap = mappings.GIDs()
}
uidmap, err := idtools.ParseIDMap([]string{config.Storage.Options.RemapUIDs}, "remap-uids")
if err != nil {
fmt.Print(err)
} else {
storeOptions.UIDMap = append(storeOptions.UIDMap, uidmap...)
}
gidmap, err := idtools.ParseIDMap([]string{config.Storage.Options.RemapGIDs}, "remap-gids")
if err != nil {
fmt.Print(err)
} else {
storeOptions.GIDMap = append(storeOptions.GIDMap, gidmap...)
}
storeOptions.RootAutoNsUser = config.Storage.Options.RootAutoUsernsUser
if config.Storage.Options.AutoUsernsMinSize > 0 {
storeOptions.AutoNsMinSize = config.Storage.Options.AutoUsernsMinSize
}
if config.Storage.Options.AutoUsernsMaxSize > 0 {
storeOptions.AutoNsMaxSize = config.Storage.Options.AutoUsernsMaxSize
}
storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, cfg.GetGraphDriverOptions(storeOptions.GraphDriverName, config.Storage.Options)...)
if os.Getenv("STORAGE_OPTS") != "" {
storeOptions.GraphDriverOptions = append(storeOptions.GraphDriverOptions, strings.Split(os.Getenv("STORAGE_OPTS"), ",")...)
}
if len(storeOptions.GraphDriverOptions) == 1 && storeOptions.GraphDriverOptions[0] == "" {
storeOptions.GraphDriverOptions = nil
}
}
var prevReloadConfig = struct {
storeOptions *StoreOptions
mod time.Time
mutex sync.Mutex
configFile string
}{}
func reloadConfigurationFileIfNeeded(configFile string, storeOptions *StoreOptions) {
prevReloadConfig.mutex.Lock()
defer prevReloadConfig.mutex.Unlock()
fi, err := os.Stat(configFile)
if err != nil {
if !os.IsNotExist(err) {
fmt.Printf("Failed to read %s %v\n", configFile, err.Error())
}
return
}
mtime := fi.ModTime()
if prevReloadConfig.storeOptions != nil && prevReloadConfig.mod == mtime && prevReloadConfig.configFile == configFile {
*storeOptions = *prevReloadConfig.storeOptions
return
}
ReloadConfigurationFile(configFile, storeOptions)
prevReloadConfig.storeOptions = storeOptions
prevReloadConfig.mod = mtime
prevReloadConfig.configFile = configFile
}
func init() {
defaultStoreOptions.RunRoot = "/var/run/containers/storage"
defaultStoreOptions.GraphRoot = "/var/lib/containers/storage"
defaultStoreOptions.GraphDriverName = ""
reloadConfigurationFileIfNeeded(defaultConfigFile, &defaultStoreOptions)
}
// GetDefaultMountOptions returns the default mountoptions defined in container/storage
func GetDefaultMountOptions() ([]string, error) {
return GetMountOptions(defaultStoreOptions.GraphDriverName, defaultStoreOptions.GraphDriverOptions)
}
// GetMountOptions returns the mountoptions for the specified driver and graphDriverOptions
func GetMountOptions(driver string, graphDriverOptions []string) ([]string, error) {
mountOpts := []string{
".mountopt",
fmt.Sprintf("%s.mountopt", driver),
}
for _, option := range graphDriverOptions {
key, val, err := parsers.ParseKeyValueOpt(option)
if err != nil {
return nil, err
}
key = strings.ToLower(key)
for _, m := range mountOpts {
if m == key {
return strings.Split(val, ","), nil
}
}
}
return nil, nil
}
| [
"\"XDG_CONFIG_HOME\"",
"\"STORAGE_DRIVER\"",
"\"STORAGE_DRIVER\"",
"\"STORAGE_OPTS\"",
"\"STORAGE_OPTS\""
]
| []
| [
"STORAGE_OPTS",
"XDG_CONFIG_HOME",
"STORAGE_DRIVER"
]
| [] | ["STORAGE_OPTS", "XDG_CONFIG_HOME", "STORAGE_DRIVER"] | go | 3 | 0 | |
api/smtp_templates.go | package main
import (
"fmt"
"os"
"text/template"
)
var headerTemplate *template.Template
type headerPlugs struct {
FromAddress string
ToName string
ToAddress string
Subject string
}
var templates map[string]*template.Template
func smtpTemplatesLoad() error {
var err error
headerTemplate, err = template.New("header").Parse(`MIME-Version: 1.0
From: EreaDrone <{{.FromAddress}}>
To: {{.ToName}} <{{.ToAddress}}>
Content-Type: text/plain; charset=UTF-8
Subject: {{.Subject}}
`)
if err != nil {
logger.Errorf("cannot parse header template: %v", err)
return errorMalformedTemplate
}
names := []string{
"confirm-hex",
"reset-hex",
"domain-export",
"domain-export-error",
}
templates = make(map[string]*template.Template)
logger.Infof("loading templates: %v", names)
for _, name := range names {
var err error
templates[name] = template.New(name)
templates[name], err = template.ParseFiles(fmt.Sprintf("%s/templates/%s.txt", os.Getenv("STATIC"), name))
if err != nil {
logger.Errorf("cannot parse %s/templates/%s.txt: %v", os.Getenv("STATIC"), name, err)
return errorMalformedTemplate
}
}
return nil
}
| [
"\"STATIC\"",
"\"STATIC\""
]
| []
| [
"STATIC"
]
| [] | ["STATIC"] | go | 1 | 0 | |
Zero/settings.py | """
Django settings for Zero project.
Generated by 'django-admin startproject' using Django 3.2.6.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
import os
import django_heroku
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-f8b&g4mpv@-2$2+6dvej0eecwmdk8e&dmu%1g^vek!-80v$rr3'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ['https://zerolang.herokuapp.com/','127.0.0.1','localhost']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'whitenoise.runserver_nostatic',
]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
]
ROOT_URLCONF = 'Zero.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'Zero.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangopoject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
BASE_DIR / "static",
]
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR,'media')
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
django_heroku.settings(locals())
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'console': {
'class': 'logging.StreamHandler',
},
},
'loggers': {
'django': {
'handlers': ['console'],
'level': os.getenv('DJANGO_LOG_LEVEL', 'DEBUG'),
},
},
}
| []
| []
| [
"DJANGO_LOG_LEVEL"
]
| [] | ["DJANGO_LOG_LEVEL"] | python | 1 | 0 | |
src/test/java/com/google/jenkins/plugins/k8sengine/KubernetesEngineBuilderPipelineIT.java | /*
* Copyright 2019 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.jenkins.plugins.k8sengine;
import static com.google.jenkins.plugins.k8sengine.ITUtil.copyTestFileToDir;
import static com.google.jenkins.plugins.k8sengine.ITUtil.dumpLog;
import static com.google.jenkins.plugins.k8sengine.ITUtil.formatRandomName;
import static com.google.jenkins.plugins.k8sengine.ITUtil.loadResource;
import static org.junit.Assert.assertNotNull;
import com.cloudbees.plugins.credentials.Credentials;
import com.cloudbees.plugins.credentials.CredentialsStore;
import com.cloudbees.plugins.credentials.SystemCredentialsProvider;
import com.cloudbees.plugins.credentials.domains.Domain;
import com.cloudbees.plugins.credentials.domains.DomainRequirement;
import com.google.api.client.http.HttpTransport;
import com.google.api.services.container.model.Cluster;
import com.google.common.collect.ImmutableList;
import com.google.jenkins.plugins.credentials.oauth.GoogleRobotPrivateKeyCredentials;
import com.google.jenkins.plugins.credentials.oauth.ServiceAccountConfig;
import com.google.jenkins.plugins.k8sengine.client.ClientFactory;
import com.google.jenkins.plugins.k8sengine.client.ContainerClient;
import hudson.EnvVars;
import hudson.FilePath;
import hudson.Launcher;
import hudson.model.Result;
import hudson.slaves.EnvironmentVariablesNodeProperty;
import java.util.Optional;
import java.util.logging.Logger;
import org.jenkinsci.plugins.workflow.cps.CpsFlowDefinition;
import org.jenkinsci.plugins.workflow.job.WorkflowJob;
import org.jenkinsci.plugins.workflow.job.WorkflowRun;
import org.junit.BeforeClass;
import org.junit.ClassRule;
import org.junit.Test;
import org.jvnet.hudson.test.JenkinsRule;
/** Tests the {@link KubernetesEngineBuilder} for use-cases involving the Jenkins Pipeline DSL. */
public class KubernetesEngineBuilderPipelineIT {
private static final Logger LOGGER =
Logger.getLogger(KubernetesEngineBuilderPipelineIT.class.getName());
private static final String TEST_DEPLOYMENT_MANIFEST = "testDeployment.yml";
@ClassRule public static JenkinsRule jenkinsRule = new JenkinsRule();
private static EnvVars envVars;
private static String clusterName;
private static String projectId;
private static String testZone;
private static String credentialsId;
private static ContainerClient client;
@BeforeClass
public static void init() throws Exception {
LOGGER.info("Initializing KubernetesEngineBuilderPipelineIT");
projectId = System.getenv("GOOGLE_PROJECT_ID");
assertNotNull("GOOGLE_PROJECT_ID env var must be set", projectId);
testZone = System.getenv("GOOGLE_PROJECT_ZONE");
assertNotNull("GOOGLE_PROJECT_ZONE env var must be set", testZone);
clusterName = System.getenv("GOOGLE_GKE_CLUSTER");
assertNotNull("GOOGLE_GKE_CLUSTER env var must be set", clusterName);
LOGGER.info("Creating credentials");
String serviceAccountKeyJson = System.getenv("GOOGLE_CREDENTIALS");
assertNotNull("GOOGLE_CREDENTIALS env var must be set", serviceAccountKeyJson);
credentialsId = projectId;
ServiceAccountConfig sac = new StringJsonServiceAccountConfig(serviceAccountKeyJson);
Credentials c = (Credentials) new GoogleRobotPrivateKeyCredentials(credentialsId, sac, null);
CredentialsStore store =
new SystemCredentialsProvider.ProviderImpl().getStore(jenkinsRule.jenkins);
store.addCredentials(Domain.global(), c);
client =
new ClientFactory(
jenkinsRule.jenkins,
ImmutableList.<DomainRequirement>of(),
credentialsId,
Optional.<HttpTransport>empty())
.containerClient();
EnvironmentVariablesNodeProperty prop = new EnvironmentVariablesNodeProperty();
envVars = prop.getEnvVars();
envVars.put("PROJECT_ID", projectId);
envVars.put("CLUSTER_NAME", clusterName);
envVars.put("CREDENTIALS_ID", credentialsId);
envVars.put("ZONE", testZone);
jenkinsRule.jenkins.getGlobalNodeProperties().add(prop);
}
@Test
public void testWorkspaceDeclarativePipelineDeploysProperly() throws Exception {
envVars.put("MANIFEST_PATTERN", TEST_DEPLOYMENT_MANIFEST);
envVars.put("NAMESPACE", "default");
WorkflowJob testProject =
jenkinsRule.createProject(WorkflowJob.class, formatRandomName("test"));
testProject.setDefinition(
new CpsFlowDefinition(
loadResource(getClass(), "workspaceDeclarativePipeline.groovy"), true));
copyTestFileToDir(
getClass(),
jenkinsRule.jenkins.getWorkspaceFor(testProject).getRemote(),
TEST_DEPLOYMENT_MANIFEST);
WorkflowRun run = testProject.scheduleBuild2(0).waitForStart();
assertNotNull(run);
jenkinsRule.assertBuildStatusSuccess(jenkinsRule.waitForCompletion(run));
dumpLog(LOGGER, run);
kubectlDelete(
jenkinsRule.createLocalLauncher(),
jenkinsRule.jenkins.getWorkspaceFor(testProject),
TEST_DEPLOYMENT_MANIFEST,
"deployment",
"nginx-deployment",
"default");
}
@Test
public void testGitDeclarativePipelineDeploysProperly() throws Exception {
envVars.put("GIT_URL", "https://github.com/jenkinsci/google-kubernetes-engine-plugin.git");
envVars.put("MANIFEST_PATTERN", "docs/resources/manifest.yaml");
envVars.put("NAMESPACE", "default");
WorkflowJob testProject =
jenkinsRule.createProject(WorkflowJob.class, formatRandomName("test"));
testProject.setDefinition(
new CpsFlowDefinition(loadResource(getClass(), "gitDeclarativePipeline.groovy"), true));
WorkflowRun run = testProject.scheduleBuild2(0).waitForStart();
assertNotNull(run);
jenkinsRule.assertBuildStatusSuccess(jenkinsRule.waitForCompletion(run));
dumpLog(LOGGER, run);
kubectlDelete(
jenkinsRule.createLocalLauncher(),
jenkinsRule.jenkins.getWorkspaceFor(testProject),
"docs/resources/manifest.yaml",
"deployment",
"nginx-deployment",
"default");
}
@Test
public void testMalformedDeclarativePipelineFails() throws Exception {
envVars.put("MANIFEST_PATTERN", TEST_DEPLOYMENT_MANIFEST);
envVars.put("NAMESPACE", "default");
WorkflowJob testProject =
jenkinsRule.createProject(WorkflowJob.class, formatRandomName("test"));
testProject.setDefinition(
new CpsFlowDefinition(
loadResource(getClass(), "malformedDeclarativePipeline.groovy"), true));
copyTestFileToDir(
getClass(),
jenkinsRule.jenkins.getWorkspaceFor(testProject).getRemote(),
TEST_DEPLOYMENT_MANIFEST);
WorkflowRun run = testProject.scheduleBuild2(0).waitForStart();
assertNotNull(run);
jenkinsRule.assertBuildStatus(Result.FAILURE, jenkinsRule.waitForCompletion(run));
dumpLog(LOGGER, run);
}
@Test
public void testNoNamespaceDeclarativePipelineDeploysProperly() throws Exception {
envVars.put("MANIFEST_PATTERN", TEST_DEPLOYMENT_MANIFEST);
WorkflowJob testProject =
jenkinsRule.createProject(WorkflowJob.class, formatRandomName("test"));
testProject.setDefinition(
new CpsFlowDefinition(
loadResource(getClass(), "noNamespaceDeclarativePipeline.groovy"), true));
copyTestFileToDir(
getClass(),
jenkinsRule.jenkins.getWorkspaceFor(testProject).getRemote(),
TEST_DEPLOYMENT_MANIFEST);
WorkflowRun run = testProject.scheduleBuild2(0).waitForStart();
assertNotNull(run);
jenkinsRule.assertBuildStatus(Result.SUCCESS, jenkinsRule.waitForCompletion(run));
dumpLog(LOGGER, run);
kubectlDelete(
jenkinsRule.createLocalLauncher(),
jenkinsRule.jenkins.getWorkspaceFor(testProject),
TEST_DEPLOYMENT_MANIFEST,
"deployment",
"nginx-deployment",
"");
}
@Test
public void testCustomNamespaceDeclarativePipelineDeploysProperly() throws Exception {
envVars.put("MANIFEST_PATTERN", TEST_DEPLOYMENT_MANIFEST);
envVars.put("NAMESPACE", "test");
WorkflowJob testProject =
jenkinsRule.createProject(WorkflowJob.class, formatRandomName("test"));
testProject.setDefinition(
new CpsFlowDefinition(
loadResource(getClass(), "workspaceDeclarativePipeline.groovy"), true));
copyTestFileToDir(
getClass(),
jenkinsRule.jenkins.getWorkspaceFor(testProject).getRemote(),
TEST_DEPLOYMENT_MANIFEST);
WorkflowRun run = testProject.scheduleBuild2(0).waitForStart();
assertNotNull(run);
jenkinsRule.assertBuildStatus(Result.SUCCESS, jenkinsRule.waitForCompletion(run));
dumpLog(LOGGER, run);
kubectlDelete(
jenkinsRule.createLocalLauncher(),
jenkinsRule.jenkins.getWorkspaceFor(testProject),
TEST_DEPLOYMENT_MANIFEST,
"deployment",
"nginx-deployment",
"test");
}
private static void kubectlDelete(
Launcher launcher,
FilePath workspace,
String manifestPattern,
String kind,
String name,
String namespace)
throws Exception {
Cluster cluster = client.getCluster(projectId, testZone, clusterName);
KubeConfig kubeConfig = KubeConfig.fromCluster(projectId, cluster);
KubectlWrapper kubectl =
new KubectlWrapper.Builder()
.workspace(workspace)
.launcher(launcher)
.kubeConfig(kubeConfig)
.namespace(namespace)
.build();
FilePath manifestFile = workspace.child(manifestPattern);
kubectl.runKubectlCommand("delete", ImmutableList.<String>of(kind, name));
}
}
| [
"\"GOOGLE_PROJECT_ID\"",
"\"GOOGLE_PROJECT_ZONE\"",
"\"GOOGLE_GKE_CLUSTER\"",
"\"GOOGLE_CREDENTIALS\""
]
| []
| [
"GOOGLE_PROJECT_ID",
"GOOGLE_PROJECT_ZONE",
"GOOGLE_CREDENTIALS",
"GOOGLE_GKE_CLUSTER"
]
| [] | ["GOOGLE_PROJECT_ID", "GOOGLE_PROJECT_ZONE", "GOOGLE_CREDENTIALS", "GOOGLE_GKE_CLUSTER"] | java | 4 | 0 | |
cmd/gloat/main.go | package main
import (
"database/sql"
"errors"
"flag"
"fmt"
"net/url"
"os"
"path/filepath"
"strconv"
"strings"
"github.com/webedx-spark/gloat"
_ "github.com/go-sql-driver/mysql"
_ "github.com/lib/pq"
_ "github.com/mattn/go-sqlite3"
)
const usage = `Usage gloat: [OPTION ...] [COMMAND ...]
Gloat is a Go SQL migration utility.
Commands:
new Create a new migration folder
up Apply new migrations
down Revert the last applied migration
to <version> Migrate to a given version (down to).
latest Latest migration in the source.
current Latest Applied migration.
present List all present versions.
Options:
-src The folder with migrations
(default $DATABASE_SRC or database/migrations)
-url The database connection URL
(default $DATABASE_URL)
-help Show this message
`
type arguments struct {
url string
src string
rest []string
}
func main() {
args := parseArguments()
var cmdName string
if len(args.rest) > 0 {
cmdName = args.rest[0]
}
var err error
switch cmdName {
case "up":
err = upCmd(args)
case "down":
err = downCmd(args)
case "new":
err = newCmd(args)
case "to":
err = migrateToCmd(args)
case "latest":
err = latestCmd(args)
case "current":
err = currentCmd(args)
case "present":
err = presentCmd(args)
default:
fmt.Fprintf(os.Stderr, usage)
os.Exit(2)
}
if err != nil {
fmt.Fprintf(os.Stderr, "Error: %+v\n", err)
os.Exit(2)
}
}
func upCmd(args arguments) error {
gl, err := setupGloat(args)
if err != nil {
return err
}
migrations, err := gl.Unapplied()
if err != nil {
return err
}
appliedMigrations := map[int64]bool{}
for _, migration := range migrations {
fmt.Printf("Applying: %d...\n", migration.Version)
if err := gl.Apply(migration); err != nil {
return err
}
appliedMigrations[migration.Version] = true
}
if len(appliedMigrations) == 0 {
fmt.Printf("No migrations to apply\n")
}
return nil
}
func latestCmd(args arguments) error {
gl, err := setupGloat(args)
if err != nil {
return err
}
latest, err := gl.Latest()
if err != nil {
return err
}
if latest != nil {
fmt.Printf("%d", latest.Version)
}
return nil
}
func presentCmd(args arguments) error {
gl, err := setupGloat(args)
if err != nil {
return err
}
migrations, err := gl.Present()
if err != nil {
return err
}
migrations.Sort()
for i, m := range migrations {
fmt.Printf("%d", m.Version)
if i != len(migrations)-1 {
fmt.Print(",")
}
}
return nil
}
func currentCmd(args arguments) error {
gl, err := setupGloat(args)
if err != nil {
return err
}
current, err := gl.Current()
if err != nil {
return err
}
if current != nil {
fmt.Printf("%d", current.Version)
}
return nil
}
func migrateToCmd(args arguments) error {
gl, err := setupGloat(args)
if err != nil {
return err
}
if len(args.rest) < 2 {
return errors.New("migrate to requires a version to migrate to")
}
version, err := strconv.ParseInt(args.rest[1], 10, 64)
if err != nil {
return err
}
migrations, err := gl.AppliedAfter(version)
if err != nil {
return err
}
for _, migration := range migrations {
fmt.Printf("Reverting: %d...\n", migration.Version)
if err := gl.Revert(migration); err != nil {
return err
}
}
return nil
}
func downCmd(args arguments) error {
gl, err := setupGloat(args)
if err != nil {
return err
}
migration, err := gl.Current()
if err != nil {
return err
}
if migration == nil {
fmt.Printf("No migrations to revert\n")
return nil
}
fmt.Printf("Reverting: %d...\n", migration.Version)
if err := gl.Revert(migration); err != nil {
return err
}
return nil
}
func newCmd(args arguments) error {
if _, err := os.Stat(args.src); os.IsNotExist(err) {
return err
}
if len(args.rest) < 2 {
return errors.New("new requires a migration name given as an argument")
}
migration := gloat.GenerateMigration(strings.Join(args.rest[1:], "_"))
migrationDirectoryPath := filepath.Join(args.src, migration.Path)
if err := os.MkdirAll(migrationDirectoryPath, 0755); err != nil {
return err
}
f, err := os.Create(filepath.Join(migrationDirectoryPath, "up.sql"))
if err != nil {
return err
}
f.Close()
f, err = os.Create(filepath.Join(migrationDirectoryPath, "down.sql"))
if err != nil {
return err
}
f.Close()
fmt.Printf("Created %s\n", migrationDirectoryPath)
return nil
}
func parseArguments() arguments {
var args arguments
urlDefault := os.Getenv("DATABASE_URL")
urlUsage := `database connection url`
srcDefault := os.Getenv("DATABASE_SRC")
if srcDefault == "" {
srcDefault = "database/migrations"
}
srcUsage := `the folder with migrations`
flag.StringVar(&args.url, "url", urlDefault, urlUsage)
flag.StringVar(&args.src, "src", srcDefault, srcUsage)
flag.Usage = func() { fmt.Fprintf(os.Stderr, usage) }
flag.Parse()
args.rest = flag.Args()
return args
}
func setupGloat(args arguments) (*gloat.Gloat, error) {
u, err := url.Parse(args.url)
if err != nil {
return nil, err
}
db, err := sql.Open(u.Scheme, args.url)
if err != nil {
return nil, err
}
store, err := databaseStoreFactory(u.Scheme, db)
if err != nil {
return nil, err
}
return &gloat.Gloat{
Store: store,
Source: gloat.NewFileSystemSource(args.src),
Executor: gloat.NewSQLExecutor(db),
}, nil
}
func databaseStoreFactory(driver string, db *sql.DB) (gloat.Store, error) {
switch driver {
case "postgres", "postgresql":
return gloat.NewPostgreSQLStore(db), nil
case "mysql":
return gloat.NewMySQLStore(db), nil
case "sqlite", "sqlite3":
return gloat.NewMySQLStore(db), nil
}
return nil, errors.New("unsupported database driver " + driver)
}
| [
"\"DATABASE_URL\"",
"\"DATABASE_SRC\""
]
| []
| [
"DATABASE_URL",
"DATABASE_SRC"
]
| [] | ["DATABASE_URL", "DATABASE_SRC"] | go | 2 | 0 | |
team5ml/register/register_model.py | """
Copyright (C) Microsoft Corporation. All rights reserved.
Microsoft Corporation (“Microsoft”) grants you a nonexclusive, perpetual,
royalty-free right to use, copy, and modify the software code provided by us
("Software Code"). You may not sublicense the Software Code or any use of it
(except to your affiliates and to vendors to perform work on your behalf)
through distribution, network access, service agreement, lease, rental, or
otherwise. This license does not purport to express any claim of ownership over
data you may have shared with Microsoft in the creation of the Software Code.
Unless applicable law gives you more rights, Microsoft reserves all other
rights not expressly granted herein, whether by implication, estoppel or
otherwise.
THE SOFTWARE CODE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
MICROSOFT OR ITS LICENSORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THE SOFTWARE CODE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
import json
import os
import sys
import argparse
import traceback
import joblib
from azureml.core import Run, Experiment, Workspace, Dataset
from azureml.core.model import Model as AMLModel
def main():
run = Run.get_context()
if (run.id.startswith('OfflineRun')):
from dotenv import load_dotenv
# For local development, set values in this section
load_dotenv()
workspace_name = os.environ.get("WORKSPACE_NAME")
experiment_name = os.environ.get("EXPERIMENT_NAME")
resource_group = os.environ.get("RESOURCE_GROUP")
subscription_id = os.environ.get("SUBSCRIPTION_ID")
# run_id useful to query previous runs
run_id = "bd184a18-2ac8-4951-8e78-e290bef3b012"
aml_workspace = Workspace.get(
name=workspace_name,
subscription_id=subscription_id,
resource_group=resource_group
)
ws = aml_workspace
exp = Experiment(ws, experiment_name)
else:
ws = run.experiment.workspace
exp = run.experiment
run_id = 'amlcompute'
parser = argparse.ArgumentParser("register")
parser.add_argument(
"--run_id",
type=str,
help="Training run ID",
)
parser.add_argument(
"--model_name",
type=str,
help="Name of the Model",
default="team5ml_model.pkl",
)
parser.add_argument(
"--step_input",
type=str,
help=("input from previous steps")
)
args = parser.parse_args()
if (args.run_id is not None):
run_id = args.run_id
if (run_id == 'amlcompute'):
run_id = run.parent.id
model_name = args.model_name
model_path = args.step_input
print("Getting registration parameters")
# Load the registration parameters from the parameters file
with open("parameters.json") as f:
pars = json.load(f)
try:
register_args = pars["registration"]
except KeyError:
print("Could not load registration values from file")
register_args = {"tags": []}
model_tags = {}
for tag in register_args["tags"]:
try:
mtag = run.parent.get_metrics()[tag]
model_tags[tag] = mtag
except KeyError:
print(f"Could not find {tag} metric on parent run.")
# load the model
print("Loading model from " + model_path)
model_file = os.path.join(model_path, model_name)
model = joblib.load(model_file)
parent_tags = run.parent.get_tags()
try:
build_id = parent_tags["BuildId"]
except KeyError:
build_id = None
print("BuildId tag not found on parent run.")
print(f"Tags present: {parent_tags}")
try:
build_uri = parent_tags["BuildUri"]
except KeyError:
build_uri = None
print("BuildUri tag not found on parent run.")
print(f"Tags present: {parent_tags}")
if (model is not None):
dataset_id = parent_tags["dataset_id"]
if (build_id is None):
register_aml_model(
model_file,
model_name,
model_tags,
exp,
run_id,
dataset_id)
elif (build_uri is None):
register_aml_model(
model_file,
model_name,
model_tags,
exp,
run_id,
dataset_id,
build_id)
else:
register_aml_model(
model_file,
model_name,
model_tags,
exp,
run_id,
dataset_id,
build_id,
build_uri)
else:
print("Model not found. Skipping model registration.")
sys.exit(0)
def model_already_registered(model_name, exp, run_id):
model_list = AMLModel.list(exp.workspace, name=model_name, run_id=run_id)
if len(model_list) >= 1:
e = ("Model name:", model_name, "in workspace",
exp.workspace, "with run_id ", run_id, "is already registered.")
print(e)
raise Exception(e)
else:
print("Model is not registered for this run.")
def register_aml_model(
model_path,
model_name,
model_tags,
exp,
run_id,
dataset_id,
build_id: str = 'none',
build_uri=None
):
try:
tagsValue = {"area": "team5ml",
"run_id": run_id,
"experiment_name": exp.name}
tagsValue.update(model_tags)
if (build_id != 'none'):
model_already_registered(model_name, exp, run_id)
tagsValue["BuildId"] = build_id
if (build_uri is not None):
tagsValue["BuildUri"] = build_uri
model = AMLModel.register(
workspace=exp.workspace,
model_name=model_name,
model_path=model_path,
tags=tagsValue,
datasets=[('training data',
Dataset.get_by_id(exp.workspace, dataset_id))])
os.chdir("..")
print(
"Model registered: {} \nModel Description: {} "
"\nModel Version: {}".format(
model.name, model.description, model.version
)
)
except Exception:
traceback.print_exc(limit=None, file=None, chain=True)
print("Model registration failed")
raise
if __name__ == '__main__':
main()
| []
| []
| [
"SUBSCRIPTION_ID",
"RESOURCE_GROUP",
"WORKSPACE_NAME",
"EXPERIMENT_NAME"
]
| [] | ["SUBSCRIPTION_ID", "RESOURCE_GROUP", "WORKSPACE_NAME", "EXPERIMENT_NAME"] | python | 4 | 0 | |
AutoML_autoaug.py | # -*- coding: utf-8 -*-
import os
os.environ['OMP_NUM_THREADS'] = '1'
import sys
import math
import random
import shutil
import pickle
import logging
import torch
import torch.nn as nn
import torch.optim as optim
import torch.optim.lr_scheduler as lr_scheduler
import torchvision.models as models
import numpy as np
from PIL import Image, ImageEnhance, ImageOps
from hyperas import optim as hyperas_optim
from hyperopt import Trials, STATUS_OK, tpe
from hyperas.distributions import choice, uniform
from hyperas.utils import eval_hyperopt_space
from data_utils import *
from train_tools import *
from models import *
from counting import *
def _logging():
fpath = './results/AutoML/cifar100_autoaug_policy.log'
logger = logging.getLogger('Autoaugment Policy')
logger.setLevel(logging.DEBUG)
if not logger.handlers:
handler = logging.FileHandler(fpath)
handler.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger
def _get_conf():
with open('./tmp.pickle', 'rb') as f:
conf_name = pickle.load(f)
opt = ConfLoader(conf_name).opt
return opt
def data():
# it just for processing, meaningless
dataloader = None
dataset_size = None
return dataloader, dataset_size
def create_model(dataloader, dataset_size):
class SubPolicy():
def __init__(self, p1, operation1, magnitude_idx1, p2, operation2, magnitude_idx2, fillcolor=(128, 128, 128)):
ranges = {
"shearX": np.linspace(0, 0.3, 10),
"shearY": np.linspace(0, 0.3, 10),
"translateX": np.linspace(0, 150 / 331, 10),
"translateY": np.linspace(0, 150 / 331, 10),
"rotate": np.linspace(0, 30, 10),
"color": np.linspace(0.0, 0.9, 10),
"posterize": np.round(np.linspace(8, 4, 10), 0).astype(np.int),
"solarize": np.linspace(256, 0, 10),
"contrast": np.linspace(0.0, 0.9, 10),
"sharpness": np.linspace(0.0, 0.9, 10),
"brightness": np.linspace(0.0, 0.9, 10),
"autocontrast": [0] * 10,
"equalize": [0] * 10,
"invert": [0] * 10
}
# from https://stackoverflow.com/questions/5252170/specify-image-filling-color-when-rotating-in-python-with-pil-and-setting-expand
def rotate_with_fill(img, magnitude):
rot = img.convert("RGBA").rotate(magnitude)
return Image.composite(rot, Image.new("RGBA", rot.size, (128,) * 4), rot).convert(img.mode)
func = {
"shearX": lambda img, magnitude: img.transform(
img.size, Image.AFFINE, (1, magnitude * random.choice([-1, 1]), 0, 0, 1, 0),
Image.BICUBIC, fillcolor=fillcolor),
"shearY": lambda img, magnitude: img.transform(
img.size, Image.AFFINE, (1, 0, 0, magnitude * random.choice([-1, 1]), 1, 0),
Image.BICUBIC, fillcolor=fillcolor),
"translateX": lambda img, magnitude: img.transform(
img.size, Image.AFFINE, (1, 0, magnitude * img.size[0] * random.choice([-1, 1]), 0, 1, 0),
fillcolor=fillcolor),
"translateY": lambda img, magnitude: img.transform(
img.size, Image.AFFINE, (1, 0, 0, 0, 1, magnitude * img.size[1] * random.choice([-1, 1])),
fillcolor=fillcolor),
"rotate": lambda img, magnitude: rotate_with_fill(img, magnitude),
# "rotate": lambda img, magnitude: img.rotate(magnitude * random.choice([-1, 1])),
"color": lambda img, magnitude: ImageEnhance.Color(img).enhance(1 + magnitude * random.choice([-1, 1])),
"posterize": lambda img, magnitude: ImageOps.posterize(img, magnitude),
"solarize": lambda img, magnitude: ImageOps.solarize(img, magnitude),
"contrast": lambda img, magnitude: ImageEnhance.Contrast(img).enhance(
1 + magnitude * random.choice([-1, 1])),
"sharpness": lambda img, magnitude: ImageEnhance.Sharpness(img).enhance(
1 + magnitude * random.choice([-1, 1])),
"brightness": lambda img, magnitude: ImageEnhance.Brightness(img).enhance(
1 + magnitude * random.choice([-1, 1])),
"autocontrast": lambda img, magnitude: ImageOps.autocontrast(img),
"equalize": lambda img, magnitude: ImageOps.equalize(img),
"invert": lambda img, magnitude: ImageOps.invert(img)
}
# self.name = "{}_{:.2f}_and_{}_{:.2f}".format(
# operation1, ranges[operation1][magnitude_idx1],
# operation2, ranges[operation2][magnitude_idx2])
self.p1 = p1
self.operation1 = func[operation1]
self.magnitude1 = ranges[operation1][magnitude_idx1]
self.p2 = p2
self.operation2 = func[operation2]
self.magnitude2 = ranges[operation2][magnitude_idx2]
def __call__(self, img):
if random.random() < self.p1: img = self.operation1(img, self.magnitude1)
if random.random() < self.p2: img = self.operation2(img, self.magnitude2)
return img
class Autoaug():
def __init__(self, fillcolor=(128, 128, 128)):
self.policies = [
SubPolicy({{uniform(0, 1.0)}}, {{choice(["shearX", "shearY", "translateX", "translateY", "rotate", "color", "posterize", "solarize", "contrast", "sharpness", "brightness", "autocontrast", "equalize", "invert"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, {{uniform(0, 1.0)}}, {{choice(["shearX", "shearY", "translateX", "translateY", "rotate", "color", "posterize", "solarize", "contrast", "sharpness", "brightness", "autocontrast", "equalize", "invert"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, fillcolor),
SubPolicy({{uniform(0, 1.0)}}, {{choice(["shearX", "shearY", "translateX", "translateY", "rotate", "color", "posterize", "solarize", "contrast", "sharpness", "brightness", "autocontrast", "equalize", "invert"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, {{uniform(0, 1.0)}}, {{choice(["shearX", "shearY", "translateX", "translateY", "rotate", "color", "posterize", "solarize", "contrast", "sharpness", "brightness", "autocontrast", "equalize", "invert"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, fillcolor),
SubPolicy({{uniform(0, 1.0)}}, {{choice(["shearX", "shearY", "translateX", "translateY", "rotate", "color", "posterize", "solarize", "contrast", "sharpness", "brightness", "autocontrast", "equalize", "invert"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, {{uniform(0, 1.0)}}, {{choice(["shearX", "shearY", "translateX", "translateY", "rotate", "color", "posterize", "solarize", "contrast", "sharpness", "brightness", "autocontrast", "equalize", "invert"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, fillcolor),
SubPolicy({{uniform(0, 1.0)}}, {{choice(["shearX", "shearY", "translateX", "translateY", "rotate", "color", "posterize", "solarize", "contrast", "sharpness", "brightness", "autocontrast", "equalize", "invert"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, {{uniform(0, 1.0)}}, {{choice(["shearX", "shearY", "translateX", "translateY", "rotate", "color", "posterize", "solarize", "contrast", "sharpness", "brightness", "autocontrast", "equalize", "invert"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, fillcolor),
SubPolicy({{uniform(0, 1.0)}}, {{choice(["shearX", "shearY", "translateX", "translateY", "rotate", "color", "posterize", "solarize", "contrast", "sharpness", "brightness", "autocontrast", "equalize", "invert"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, {{uniform(0, 1.0)}}, {{choice(["shearX", "shearY", "translateX", "translateY", "rotate", "color", "posterize", "solarize", "contrast", "sharpness", "brightness", "autocontrast", "equalize", "invert"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, fillcolor),
SubPolicy({{uniform(0, 1.0)}}, {{choice(["shearX", "shearY", "translateX", "translateY", "rotate", "color", "posterize", "solarize", "contrast", "sharpness", "brightness", "autocontrast", "equalize", "invert"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, {{uniform(0, 1.0)}}, {{choice(["shearX", "shearY", "translateX", "translateY", "rotate", "color", "posterize", "solarize", "contrast", "sharpness", "brightness", "autocontrast", "equalize", "invert"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, fillcolor),
SubPolicy({{uniform(0, 1.0)}}, {{choice(["shearX", "shearY", "translateX", "translateY", "rotate", "color", "posterize", "solarize", "contrast", "sharpness", "brightness", "autocontrast", "equalize", "invert"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, {{uniform(0, 1.0)}}, {{choice(["shearX", "shearY", "translateX", "translateY", "rotate", "color", "posterize", "solarize", "contrast", "sharpness", "brightness", "autocontrast", "equalize", "invert"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, fillcolor),
SubPolicy({{uniform(0, 1.0)}}, {{choice(["shearX", "shearY", "translateX", "translateY", "rotate", "color", "posterize", "solarize", "contrast", "sharpness", "brightness", "autocontrast", "equalize", "invert"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, {{uniform(0, 1.0)}}, {{choice(["shearX", "shearY", "translateX", "translateY", "rotate", "color", "posterize", "solarize", "contrast", "sharpness", "brightness", "autocontrast", "equalize", "invert"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, fillcolor),
SubPolicy({{uniform(0, 1.0)}}, {{choice(["shearX", "shearY", "translateX", "translateY", "rotate", "color", "posterize", "solarize", "contrast", "sharpness", "brightness", "autocontrast", "equalize", "invert"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, {{uniform(0, 1.0)}}, {{choice(["shearX", "shearY", "translateX", "translateY", "rotate", "color", "posterize", "solarize", "contrast", "sharpness", "brightness", "autocontrast", "equalize", "invert"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, fillcolor),
SubPolicy({{uniform(0, 1.0)}}, {{choice(["shearX", "shearY", "translateX", "translateY", "rotate", "color", "posterize", "solarize", "contrast", "sharpness", "brightness", "autocontrast", "equalize", "invert"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, {{uniform(0, 1.0)}}, {{choice(["shearX", "shearY", "translateX", "translateY", "rotate", "color", "posterize", "solarize", "contrast", "sharpness", "brightness", "autocontrast", "equalize", "invert"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, fillcolor),
SubPolicy({{uniform(0, 1.0)}}, {{choice(["shearX", "shearY", "translateX", "translateY", "rotate", "color", "posterize", "solarize", "contrast", "sharpness", "brightness", "autocontrast", "equalize", "invert"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, {{uniform(0, 1.0)}}, {{choice(["shearX", "shearY", "translateX", "translateY", "rotate", "color", "posterize", "solarize", "contrast", "sharpness", "brightness", "autocontrast", "equalize", "invert"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, fillcolor),
SubPolicy({{uniform(0, 1.0)}}, {{choice(["shearX", "shearY", "translateX", "translateY", "rotate", "color", "posterize", "solarize", "contrast", "sharpness", "brightness", "autocontrast", "equalize", "invert"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, {{uniform(0, 1.0)}}, {{choice(["shearX", "shearY", "translateX", "translateY", "rotate", "color", "posterize", "solarize", "contrast", "sharpness", "brightness", "autocontrast", "equalize", "invert"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, fillcolor),
SubPolicy({{uniform(0, 1.0)}}, {{choice(["shearX", "shearY", "translateX", "translateY", "rotate", "color", "posterize", "solarize", "contrast", "sharpness", "brightness", "autocontrast", "equalize", "invert"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, {{uniform(0, 1.0)}}, {{choice(["shearX", "shearY", "translateX", "translateY", "rotate", "color", "posterize", "solarize", "contrast", "sharpness", "brightness", "autocontrast", "equalize", "invert"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, fillcolor),
SubPolicy({{uniform(0, 1.0)}}, {{choice(["shearX", "shearY", "translateX", "translateY", "rotate", "color", "posterize", "solarize", "contrast", "sharpness", "brightness", "autocontrast", "equalize", "invert"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, {{uniform(0, 1.0)}}, {{choice(["shearX", "shearY", "translateX", "translateY", "rotate", "color", "posterize", "solarize", "contrast", "sharpness", "brightness", "autocontrast", "equalize", "invert"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, fillcolor),
SubPolicy({{uniform(0, 1.0)}}, {{choice(["shearX", "shearY", "translateX", "translateY", "rotate", "color", "posterize", "solarize", "contrast", "sharpness", "brightness", "autocontrast", "equalize", "invert"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, {{uniform(0, 1.0)}}, {{choice(["shearX", "shearY", "translateX", "translateY", "rotate", "color", "posterize", "solarize", "contrast", "sharpness", "brightness", "autocontrast", "equalize", "invert"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, fillcolor),
SubPolicy({{uniform(0, 1.0)}}, {{choice(["shearX", "shearY", "translateX", "translateY", "rotate", "color", "posterize", "solarize", "contrast", "sharpness", "brightness", "autocontrast", "equalize", "invert"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, {{uniform(0, 1.0)}}, {{choice(["shearX", "shearY", "translateX", "translateY", "rotate", "color", "posterize", "solarize", "contrast", "sharpness", "brightness", "autocontrast", "equalize", "invert"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, fillcolor),
SubPolicy({{uniform(0, 1.0)}}, {{choice(["shearX", "shearY", "translateX", "translateY", "rotate", "color", "posterize", "solarize", "contrast", "sharpness", "brightness", "autocontrast", "equalize", "invert"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, {{uniform(0, 1.0)}}, {{choice(["shearX", "shearY", "translateX", "translateY", "rotate", "color", "posterize", "solarize", "contrast", "sharpness", "brightness", "autocontrast", "equalize", "invert"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, fillcolor),
SubPolicy({{uniform(0, 1.0)}}, {{choice(["shearX", "shearY", "translateX", "translateY", "rotate", "color", "posterize", "solarize", "contrast", "sharpness", "brightness", "autocontrast", "equalize", "invert"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, {{uniform(0, 1.0)}}, {{choice(["shearX", "shearY", "translateX", "translateY", "rotate", "color", "posterize", "solarize", "contrast", "sharpness", "brightness", "autocontrast", "equalize", "invert"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, fillcolor),
SubPolicy({{uniform(0, 1.0)}}, {{choice(["shearX", "shearY", "translateX", "translateY", "rotate", "color", "posterize", "solarize", "contrast", "sharpness", "brightness", "autocontrast", "equalize", "invert"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, {{uniform(0, 1.0)}}, {{choice(["shearX", "shearY", "translateX", "translateY", "rotate", "color", "posterize", "solarize", "contrast", "sharpness", "brightness", "autocontrast", "equalize", "invert"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, fillcolor),
SubPolicy({{uniform(0, 1.0)}}, {{choice(["shearX", "shearY", "translateX", "translateY", "rotate", "color", "posterize", "solarize", "contrast", "sharpness", "brightness", "autocontrast", "equalize", "invert"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, {{uniform(0, 1.0)}}, {{choice(["shearX", "shearY", "translateX", "translateY", "rotate", "color", "posterize", "solarize", "contrast", "sharpness", "brightness", "autocontrast", "equalize", "invert"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, fillcolor),
SubPolicy({{uniform(0, 1.0)}}, {{choice(["shearX", "shearY", "translateX", "translateY", "rotate", "color", "posterize", "solarize", "contrast", "sharpness", "brightness", "autocontrast", "equalize", "invert"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, {{uniform(0, 1.0)}}, {{choice(["shearX", "shearY", "translateX", "translateY", "rotate", "color", "posterize", "solarize", "contrast", "sharpness", "brightness", "autocontrast", "equalize", "invert"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, fillcolor),
SubPolicy({{uniform(0, 1.0)}}, {{choice(["shearX", "shearY", "translateX", "translateY", "rotate", "color", "posterize", "solarize", "contrast", "sharpness", "brightness", "autocontrast", "equalize", "invert"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, {{uniform(0, 1.0)}}, {{choice(["shearX", "shearY", "translateX", "translateY", "rotate", "color", "posterize", "solarize", "contrast", "sharpness", "brightness", "autocontrast", "equalize", "invert"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, fillcolor),
SubPolicy({{uniform(0, 1.0)}}, {{choice(["shearX", "shearY", "translateX", "translateY", "rotate", "color", "posterize", "solarize", "contrast", "sharpness", "brightness", "autocontrast", "equalize", "invert"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, {{uniform(0, 1.0)}}, {{choice(["shearX", "shearY", "translateX", "translateY", "rotate", "color", "posterize", "solarize", "contrast", "sharpness", "brightness", "autocontrast", "equalize", "invert"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, fillcolor),
SubPolicy({{uniform(0, 1.0)}}, {{choice(["shearX", "shearY", "translateX", "translateY", "rotate", "color", "posterize", "solarize", "contrast", "sharpness", "brightness", "autocontrast", "equalize", "invert"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, {{uniform(0, 1.0)}}, {{choice(["shearX", "shearY", "translateX", "translateY", "rotate", "color", "posterize", "solarize", "contrast", "sharpness", "brightness", "autocontrast", "equalize", "invert"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, fillcolor),
SubPolicy({{uniform(0, 1.0)}}, {{choice(["shearX", "shearY", "translateX", "translateY", "rotate", "color", "posterize", "solarize", "contrast", "sharpness", "brightness", "autocontrast", "equalize", "invert"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, {{uniform(0, 1.0)}}, {{choice(["shearX", "shearY", "translateX", "translateY", "rotate", "color", "posterize", "solarize", "contrast", "sharpness", "brightness", "autocontrast", "equalize", "invert"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, fillcolor)
]
def __call__(self, img):
policy_idx = random.randint(0, len(self.policies) - 1)
return self.policies[policy_idx](img)
def __repr__(self):
return 'AutoAugment CIFAR100 Policy'
opt = _get_conf()
logger = _logging()
if os.path.isdir(opt.data.root):
shutil.rmtree(opt.data.root)
DATASETTER = {'cifar10': cifar_10_setter,
'cifar100': cifar_100_setter}
CRITERION = {'mse': nn.MSELoss,
'cross_entropy': nn.CrossEntropyLoss,
'label_smoothing': LabelSmoothingLoss}
OPTIMIZER = {'sgd': optim.SGD,
'adam': optim.Adam,
'adagrad': optim.Adagrad,
'rmsprop': optim.RMSprop,
'radam': RAdam}
dataloaders, dataset_sizes = DATASETTER[opt.data.dataset](batch_size=opt.data.batch_size,
valid_size=opt.data.valid_size,
root=opt.data.root,
fixed_valid=opt.data.fixed_valid,
autoaugment=opt.data.autoaugment,
aug_policy=Autoaug())
avail_resource = opt.model.param.avail_resource
resolution_coefficient = opt.model.param.resolution_coefficient
resolution_coefficient = round(math.pow(resolution_coefficient, avail_resource), 2)
blocks_args, global_params = efficientnet(blocks_args='default',
activation=opt.model.param.activation,
activation_param=opt.model.param.get('activation_param', {}),
resolution_coefficient=resolution_coefficient,
width_coefficient=opt.model.param.width_coefficient,
depth_coefficient=opt.model.param.depth_coefficient,
image_size=opt.model.param.image_size,
num_classes=opt.model.param.num_classes)
#meaningless = {{choice(['No', 'meaning'])}}
model = EfficientNet(blocks_args,
global_params)
model.to(opt.trainhandler.device)
criterion = CRITERION[opt.criterion.algo](**opt.criterion.param) if opt.criterion.get('param') else CRITERION[opt.criterion.algo]()
optimizer = OPTIMIZER[opt.optimizer.algo](model.parameters(), **opt.optimizer.param) if opt.optimizer.get('param') else OPTIMIZER[opt.optimizer.algo](model.parameters())
# if not use scheduler, you can skip in config json file
if opt.scheduler.get('enabled', False):
scheduler_type = lr_scheduler.MultiStepLR if opt.scheduler.type == 'multistep' else lr_scheduler.CosineAnnealingLR if opt.scheduler.type == 'cosine' else lr_scheduler.StepLR
scheduler = scheduler_type(optimizer, **opt.scheduler.param)
else:
scheduler = None
train_handler = TrainHandler(model,
dataloaders,
dataset_sizes,
criterion,
optimizer,
scheduler,
device=opt.trainhandler.device,
path=opt.trainhandler.path,
mixup=opt.trainhandler.mixup.enabled,
alpha=opt.trainhandler.mixup.alpha,
precision=opt.trainhandler.precision)
train_handler.set_name(opt.trainhandler.name)
train_losses, valid_losses, train_accs, valid_accs = train_handler.train_model(num_epochs=opt.trainhandler.train.num_epochs)
_, valid_loss = sorted(valid_losses, key = lambda x: x[1])[0]
_, valid_acc = sorted(valid_accs, key = lambda x: x[1], reverse=True)[0]
logger.info('Validation accuracy : %.2f' % (valid_acc * 100))
return {'loss': valid_loss, 'status': STATUS_OK, 'model': train_handler.model}
if __name__ == '__main__':
conf_name = sys.argv[1]
with open('./tmp.pickle', 'wb') as f:
pickle.dump(conf_name, f)
fpath = './results/AutoML'
if not os.path.isdir(fpath):
os.makedirs(fpath)
if os.path.isfile('./results/AutoML/cifar100_autoaug_policy.log'):
os.remove('./results/AutoML/cifar100_autoaug_policy.log')
opt = ConfLoader(conf_name).opt
logger = _logging()
DATASETTER = {'cifar10': cifar_10_setter,
'cifar100': cifar_100_setter}
CRITERION = {'mse': nn.MSELoss,
'cross_entropy': nn.CrossEntropyLoss,
'label_smoothing': LabelSmoothingLoss}
OPTIMIZER = {'sgd': optim.SGD,
'adam': optim.Adam,
'adagrad': optim.Adagrad,
'rmsprop': optim.RMSprop,
'radam': RAdam}
trials = Trials()
best_run, best_model, space = hyperas_optim.minimize(model=create_model,
data=data,
algo=tpe.suggest,
functions=[_get_conf, _logging],
max_evals=1,
trials=trials,
eval_space=True,
return_space=True)
logger.info('=' * 30)
logger.info('Best performing model chosen hyper-parameters: %s' % best_run)
logger.info('=' * 30)
for t, trial in enumerate(trials):
vals = trial.get('misc').get('vals')
tmp = {}
for k,v in list(vals.items()):
tmp[k] = v[0]
logger.info('Trial %d : %s' % (t, eval_hyperopt_space(space, tmp)))
logger.info('=' * 30)
os.remove('./tmp.pickle') | []
| []
| [
"OMP_NUM_THREADS"
]
| [] | ["OMP_NUM_THREADS"] | python | 1 | 0 | |
ticketdesk/ticketdesk/wsgi.py | """
WSGI config for ticketdesk project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ticketdesk.settings')
application = get_wsgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
src/main/java/edu/mit/sipg/gui/ScorePanel.java | /******************************************************************************
* Copyright 2020 Paul T. Grogan
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*****************************************************************************/
package edu.mit.sipg.gui;
import java.awt.BorderLayout;
import java.awt.Color;
import java.awt.Component;
import java.awt.Font;
import java.awt.FontMetrics;
import java.awt.Graphics2D;
import java.awt.image.BufferedImage;
import java.io.File;
import java.io.IOException;
import java.text.NumberFormat;
import java.util.Date;
import javax.imageio.ImageIO;
import javax.swing.JLabel;
import javax.swing.JOptionPane;
import javax.swing.JPanel;
import javax.swing.SwingUtilities;
import org.apache.log4j.Logger;
import org.jfree.chart.ChartPanel;
import org.jfree.data.xy.DefaultTableXYDataset;
import edu.mit.sipg.core.Country;
import edu.mit.sipg.core.agriculture.AgricultureSoS;
import edu.mit.sipg.core.electricity.ElectricitySoS;
import edu.mit.sipg.core.petroleum.PetroleumSoS;
import edu.mit.sipg.core.water.WaterSoS;
import edu.mit.sipg.gui.base.InfrastructureSystemPanel;
import edu.mit.sipg.gui.event.UpdateEvent;
import edu.mit.sipg.io.Icons;
import edu.mit.sipg.scenario.Scenario;
/**
* Graphics panel to display key score information.
*
* @author Paul T. Grogan
*/
public class ScorePanel extends InfrastructureSystemPanel {
private static final long serialVersionUID = 355808870154994451L;
private static Logger logger = Logger.getLogger(ScorePanel.class);
private final Country country;
private final Scenario scenario;
private JPanel agricultureScorePanel;
private final JLabel agricultureScoreLabel = new JLabel("");
DefaultTableXYDataset agriculturePlayerScore = new DefaultTableXYDataset();
private JPanel waterScorePanel;
private final JLabel waterScoreLabel = new JLabel("");
DefaultTableXYDataset waterPlayerScore = new DefaultTableXYDataset();
private JPanel energyScorePanel;
private final JLabel energyScoreLabel = new JLabel("");
DefaultTableXYDataset energyPlayerScore = new DefaultTableXYDataset();
private JPanel teamScorePanel;
private final JLabel teamScoreLabel = new JLabel("");
DefaultTableXYDataset teamScore = new DefaultTableXYDataset();
private double overBudgetValue = 0;
private double overBudgetLimit = 0;
private int overBudgetYear = 0;
private int roundNumber = 0;
private final JLabel scoreLabel;
private File userOutputDir;
/**
* Instantiates a new score panel.
*
* @param scenario the scenario
* @param scoreLabel the score label
*/
public ScorePanel(Scenario scenario, JLabel scoreLabel) {
super(scenario.getCountry().getSocialSystem());
this.scenario = scenario;
this.country = scenario.getCountry();
this.scoreLabel = scoreLabel;
if(System.getenv().containsKey("SIPG_HOME")) {
userOutputDir = new File(System.getenv("SIPG_HOME"));
} else {
userOutputDir = new File(System.getProperty("user.home"), "SIPG");
}
if(!userOutputDir.exists()) {
userOutputDir.mkdir();
}
teamScorePanel = createStackedAreaChart(null, "Score", null,
new Color[]{PlottingUtils.YELLOW_GREEN, PlottingUtils.DODGER_BLUE,
PlottingUtils.DIM_GRAY, PlottingUtils.GOLDENROD,
PlottingUtils.BLACK}, teamScore);
teamScoreLabel.setFont(getFont().deriveFont(20f));
teamScoreLabel.setHorizontalAlignment(JLabel.CENTER);
teamScorePanel.add(teamScoreLabel, BorderLayout.NORTH);
if(scenario.isTeamScoreDisplayed()) {
addTab("Team Score", Icons.COUNTRY, teamScorePanel);
}
if(country.getAgricultureSystem().isLocal()) {
agricultureScorePanel = createStackedAreaChart(null, "Score", null,
new Color[]{PlottingUtils.YELLOW_GREEN, PlottingUtils.TOMATO,
PlottingUtils.GOLDENROD, PlottingUtils.BLACK}, agriculturePlayerScore);
agricultureScoreLabel.setFont(getFont().deriveFont(20f));
agricultureScoreLabel.setHorizontalAlignment(JLabel.CENTER);
agricultureScorePanel.add(agricultureScoreLabel, BorderLayout.NORTH);
addTab("Individual Score", Icons.AGRICULTURE, agricultureScorePanel);
}
if(country.getWaterSystem().isLocal()) {
waterScorePanel = createStackedAreaChart(null, "Score", null,
new Color[]{PlottingUtils.DODGER_BLUE, PlottingUtils.TOMATO,
PlottingUtils.GOLDENROD, PlottingUtils.BLACK}, waterPlayerScore);
waterScoreLabel.setFont(getFont().deriveFont(20f));
waterScoreLabel.setHorizontalAlignment(JLabel.CENTER);
waterScorePanel.add(waterScoreLabel, BorderLayout.NORTH);
addTab("Individual Score", Icons.WATER, waterScorePanel);
}
if(country.getPetroleumSystem().isLocal()
&& country.getElectricitySystem().isLocal()) {
energyScorePanel = createStackedAreaChart(null, "Score", null,
new Color[]{PlottingUtils.DIM_GRAY, PlottingUtils.TOMATO,
PlottingUtils.GOLDENROD, PlottingUtils.BLACK}, energyPlayerScore);
energyScoreLabel.setFont(getFont().deriveFont(20f));
energyScoreLabel.setHorizontalAlignment(JLabel.CENTER);
energyScorePanel.add(energyScoreLabel, BorderLayout.NORTH);
addTab("Individual Score", Icons.ENERGY, energyScorePanel);
}
}
/**
* Initialize.
*/
private void initialize() {
scoreLabel.setText("");
agricultureScoreLabel.setText("");
agriculturePlayerScore.removeAllSeries();
waterScoreLabel.setText("");
waterPlayerScore.removeAllSeries();
energyScoreLabel.setText("");
energyPlayerScore.removeAllSeries();
teamScoreLabel.setText("");
teamScore.removeAllSeries();
overBudgetYear = 0;
overBudgetValue = 0;
overBudgetLimit = 0;
}
@Override
public void simulationCompleted(UpdateEvent event) {
// hack to save chart images
SwingUtilities.invokeLater(new Runnable() {
public void run() {
if(agricultureScorePanel != null) {
for(Component c : agricultureScorePanel.getComponents()) {
if(c instanceof ChartPanel) {
ChartPanel chartPanel = (ChartPanel) c;
BufferedImage img = chartPanel.getChart().createBufferedImage(
chartPanel.getWidth(), chartPanel.getHeight());
Graphics2D g2d = img.createGraphics();
g2d.drawImage(img, 0, 0, null);
g2d.setPaint(Color.black);
g2d.setFont(new Font("SansSerif", Font.BOLD, 16));
FontMetrics fm = g2d.getFontMetrics();
String text = agricultureScoreLabel.getText();
g2d.drawString(text, img.getWidth()/2 - fm.stringWidth(text)/2, fm.getHeight() + 5);
g2d.dispose();
File outputFile = new File(userOutputDir,
new Date().getTime() + "-agriculture.png");
try {
ImageIO.write(img, "png", outputFile);
} catch (IOException e) {
logger.error(e);
}
}
}
}
if(waterScorePanel != null) {
for(Component c : waterScorePanel.getComponents()) {
if(c instanceof ChartPanel) {
ChartPanel chartPanel = (ChartPanel) c;
BufferedImage img = chartPanel.getChart().createBufferedImage(
chartPanel.getWidth(), chartPanel.getHeight());
Graphics2D g2d = img.createGraphics();
g2d.drawImage(img, 0, 0, null);
g2d.setPaint(Color.black);
g2d.setFont(new Font("SansSerif", Font.BOLD, 16));
FontMetrics fm = g2d.getFontMetrics();
String text = waterScoreLabel.getText();
g2d.drawString(text, img.getWidth()/2 - fm.stringWidth(text)/2, fm.getHeight() + 5);
g2d.dispose();
File outputFile = new File(userOutputDir,
new Date().getTime() + "-water.png");
try {
ImageIO.write(img, "png", outputFile);
} catch (IOException e) {
logger.error(e);
}
}
}
}
if(energyScorePanel != null) {
for(Component c : energyScorePanel.getComponents()) {
if(c instanceof ChartPanel) {
ChartPanel chartPanel = (ChartPanel) c;
BufferedImage img = chartPanel.getChart().createBufferedImage(
chartPanel.getWidth(), chartPanel.getHeight());
Graphics2D g2d = img.createGraphics();
g2d.drawImage(img, 0, 0, null);
g2d.setPaint(Color.black);
g2d.setFont(new Font("SansSerif", Font.BOLD, 16));
FontMetrics fm = g2d.getFontMetrics();
String text = energyScoreLabel.getText();
g2d.drawString(text, img.getWidth()/2 - fm.stringWidth(text)/2, fm.getHeight() + 5);
g2d.dispose();
File outputFile = new File(userOutputDir,
+ new Date().getTime() + "-energy.png");
try {
ImageIO.write(img, "png", outputFile);
} catch (IOException e) {
logger.error(e);
}
}
}
}
if(scenario.isTeamScoreDisplayed()) {
for(Component c : teamScorePanel.getComponents()) {
if(c instanceof ChartPanel) {
ChartPanel chartPanel = (ChartPanel) c;
BufferedImage img = chartPanel.getChart().createBufferedImage(
chartPanel.getWidth(), chartPanel.getHeight());
Graphics2D g2d = img.createGraphics();
g2d.drawImage(img, 0, 0, null);
g2d.setPaint(Color.black);
g2d.setFont(new Font("SansSerif", Font.BOLD, 16));
FontMetrics fm = g2d.getFontMetrics();
String text = teamScoreLabel.getText();
g2d.drawString(text, img.getWidth()/2 - fm.stringWidth(text)/2, fm.getHeight() + 5);
g2d.dispose();
File outputFile = new File(userOutputDir,
+ new Date().getTime() + "-team.png");
try {
ImageIO.write(img, "png", outputFile);
} catch (IOException e) {
logger.error(e);
}
}
}
}
}
});
if(overBudgetYear > 0) {
NumberFormat format = NumberFormat.getNumberInstance();
format.setMaximumFractionDigits(3);
JOptionPane.showMessageDialog(getTopLevelAncestor(),
"Total capital expenditures in " + overBudgetYear
+ " (\u00a7" + format.format(overBudgetValue/1e9)
+ " billion) was over the limit of \u00a7"
+ format.format(overBudgetLimit/1e9) + " billion.",
"Over-Budget Warning", JOptionPane.ERROR_MESSAGE);
}
}
@Override
public void simulationInitialized(UpdateEvent event) {
initialize();
roundNumber++;
}
@Override
public void simulationUpdated(UpdateEvent event) {
int year = (int) event.getTime();
if(year < 1980) {
return;
}
scoreLabel.setText("");
double foodScore = country.getAgricultureSystem().getFoodSecurityScore();
double aquiferScore = country.getWaterSystem().getAquiferSecurityScore();
double reservoirScore = country.getPetroleumSystem().getReservoirSecurityScore();
if(country.getAgricultureSystem() instanceof AgricultureSoS.Local) {
double politicalScore = ((AgricultureSoS.Local) country.getAgricultureSystem()).getPoliticalPowerScore(year);
double financialScore = ((AgricultureSoS.Local) country.getAgricultureSystem()).getFinancialSecurityScore(year);
double aggregateScore = ((AgricultureSoS.Local) country.getAgricultureSystem()).getAggregateScore(year);
updateSeries(agriculturePlayerScore, "Food Security", year, foodScore);
updateSeries(agriculturePlayerScore, "Agricultural Investment", year, politicalScore);
updateSeries(agriculturePlayerScore, "Agricultural Profit", year, financialScore);
updateSeries(agriculturePlayerScore, "Total Score", year, aggregateScore);
String scoreText = "Round " + roundNumber + " Agriculture Score: "
+ NumberFormat.getIntegerInstance().format(aggregateScore);
agricultureScoreLabel.setText(scoreText);
scoreLabel.setText((scoreLabel.getText().isEmpty()?"":
scoreLabel.getText() + ", ") + scoreText);
}
if(country.getWaterSystem() instanceof WaterSoS.Local) {
double politicalScore = ((WaterSoS.Local) country.getWaterSystem()).getPoliticalPowerScore(year);
double financialScore = ((WaterSoS.Local) country.getWaterSystem()).getFinancialSecurityScore(year);
double aggregateScore = ((WaterSoS.Local) country.getWaterSystem()).getAggregateScore(year);
updateSeries(waterPlayerScore, "Aquifer Security", year, aquiferScore);
updateSeries(waterPlayerScore, "Water Investment", year, politicalScore);
updateSeries(waterPlayerScore, "Water Profit", year, financialScore);
updateSeries(waterPlayerScore, "Total Score", year, aggregateScore);
String scoreText = "Round " + roundNumber + " Water Score: "
+ NumberFormat.getIntegerInstance().format(aggregateScore);
waterScoreLabel.setText(scoreText);
scoreLabel.setText((scoreLabel.getText().isEmpty()?"":
scoreLabel.getText() + ", ") + scoreText);
}
if(country.getPetroleumSystem() instanceof PetroleumSoS.Local
&& country.getElectricitySystem() instanceof ElectricitySoS.Local) {
double politicalScore = ((PetroleumSoS.Local) country.getPetroleumSystem()).getPoliticalPowerScore(
year, (ElectricitySoS.Local) country.getElectricitySystem());
double financialScore = ((PetroleumSoS.Local) country.getPetroleumSystem()).getFinancialSecurityScore(
year, (ElectricitySoS.Local) country.getElectricitySystem());
double aggregateScore = ((PetroleumSoS.Local) country.getPetroleumSystem()).getAggregateScore(
year, (ElectricitySoS.Local) country.getElectricitySystem());
updateSeries(energyPlayerScore, "Oil Reservoir Security", year, reservoirScore);
updateSeries(energyPlayerScore, "Energy Investment", year, politicalScore);
updateSeries(energyPlayerScore, "Energy Profit", year, financialScore);
updateSeries(energyPlayerScore, "Total Score", year, aggregateScore);
String scoreText = "Round " + roundNumber + " Energy Score: "
+ NumberFormat.getIntegerInstance().format(aggregateScore);
energyScoreLabel.setText(scoreText);
scoreLabel.setText((scoreLabel.getText().isEmpty()?"":
scoreLabel.getText() + ", ") + scoreText);
}
if(country.getTotalCapitalExpense() > country.getCapitalBudgetLimit()) {
overBudgetYear = year;
overBudgetValue = country.getTotalCapitalExpense();
overBudgetLimit = country.getCapitalBudgetLimit();
}
double financialScore = country.getFinancialSecurityScore(year);
updateSeries(teamScore, "Food Security", year, foodScore);
updateSeries(teamScore, "Aquifer Security", year, aquiferScore);
updateSeries(teamScore, "Oil Reservoir Security", year, reservoirScore);
updateSeries(teamScore, "National Profit", year, financialScore);
double aggregateScore = country.getAggregatedScore(year);
updateSeries(teamScore, "Total Score", year, aggregateScore);
String scoreText = "Round " + roundNumber + " Team Score: "
+ NumberFormat.getIntegerInstance().format(aggregateScore)
+ (overBudgetYear>0?"* (Over budget in " + overBudgetYear + ")":"");
teamScoreLabel.setText(scoreText);
if(scenario.isTeamScoreDisplayed()) {
scoreLabel.setText((scoreLabel.getText().isEmpty()?"":
scoreLabel.getText() + ", ") + scoreText);
}
}
} | [
"\"SIPG_HOME\""
]
| []
| [
"SIPG_HOME"
]
| [] | ["SIPG_HOME"] | java | 1 | 0 | |
pkg/helmexec/exec.go | package helmexec
import (
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"regexp"
"strconv"
"strings"
"sync"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
)
type decryptedSecret struct {
mutex sync.RWMutex
bytes []byte
}
type execer struct {
helmBinary string
version Version
runner Runner
logger *zap.SugaredLogger
kubeContext string
extra []string
decryptedSecretMutex sync.Mutex
decryptedSecrets map[string]*decryptedSecret
}
func NewLogger(writer io.Writer, logLevel string) *zap.SugaredLogger {
var cfg zapcore.EncoderConfig
cfg.MessageKey = "message"
out := zapcore.AddSync(writer)
var level zapcore.Level
err := level.Set(logLevel)
if err != nil {
panic(err)
}
core := zapcore.NewCore(
zapcore.NewConsoleEncoder(cfg),
out,
level,
)
return zap.New(core).Sugar()
}
func getHelmVersion(helmBinary string, logger *zap.SugaredLogger, runner Runner) Version {
// Autodetect from `helm verison`
bytes, err := runner.Execute(helmBinary, []string{"version", "--client", "--short"}, nil)
if err != nil {
panic(err)
}
if bytes == nil || len(bytes) == 0 {
return Version{}
}
re := regexp.MustCompile("v(?P<major>\\d+)\\.(?P<minor>\\d+)\\.(?P<patch>\\d+)")
matches := re.FindStringSubmatch(string(bytes))
result := make(map[string]string)
for i, name := range re.SubexpNames() {
result[name] = matches[i]
}
major, err := strconv.Atoi(result["major"])
if err != nil {
panic(err)
}
minor, err := strconv.Atoi(result["minor"])
if err != nil {
panic(err)
}
patch, err := strconv.Atoi(result["patch"])
if err != nil {
panic(err)
}
// Support explicit helm3 opt-in via environment variable
if os.Getenv("HELMFILE_HELM3") != "" && major < 3 {
return Version{
Major: 3,
Minor: 0,
Patch: 0,
}
}
return Version{
Major: major,
Minor: minor,
Patch: patch,
}
}
// New for running helm commands
func New(helmBinary string, logger *zap.SugaredLogger, kubeContext string, runner Runner) *execer {
return &execer{
helmBinary: helmBinary,
version: getHelmVersion(helmBinary, logger, runner),
logger: logger,
kubeContext: kubeContext,
runner: runner,
decryptedSecrets: make(map[string]*decryptedSecret),
}
}
func (helm *execer) SetExtraArgs(args ...string) {
helm.extra = args
}
func (helm *execer) SetHelmBinary(bin string) {
helm.helmBinary = bin
}
func (helm *execer) AddRepo(name, repository, cafile, certfile, keyfile, username, password string) error {
var args []string
if name == "" && repository != "" {
helm.logger.Infof("empty field name\n")
return fmt.Errorf("empty field name")
}
args = append(args, "repo", "add", name, repository)
if certfile != "" && keyfile != "" {
args = append(args, "--cert-file", certfile, "--key-file", keyfile)
}
if cafile != "" {
args = append(args, "--ca-file", cafile)
}
if username != "" && password != "" {
args = append(args, "--username", username, "--password", password)
}
helm.logger.Infof("Adding repo %v %v", name, repository)
out, err := helm.exec(args, map[string]string{})
helm.info(out)
return err
}
func (helm *execer) UpdateRepo() error {
helm.logger.Info("Updating repo")
out, err := helm.exec([]string{"repo", "update"}, map[string]string{})
helm.info(out)
return err
}
func (helm *execer) BuildDeps(name, chart string) error {
helm.logger.Infof("Building dependency release=%v, chart=%v", name, chart)
out, err := helm.exec([]string{"dependency", "build", chart}, map[string]string{})
helm.info(out)
return err
}
func (helm *execer) UpdateDeps(chart string) error {
helm.logger.Infof("Updating dependency %v", chart)
out, err := helm.exec([]string{"dependency", "update", chart}, map[string]string{})
helm.info(out)
return err
}
func (helm *execer) SyncRelease(context HelmContext, name, chart string, flags ...string) error {
helm.logger.Infof("Upgrading release=%v, chart=%v", name, chart)
preArgs := context.GetTillerlessArgs(helm)
env := context.getTillerlessEnv()
if helm.IsHelm3() {
flags = append(flags, "--history-max", strconv.Itoa(context.HistoryMax))
} else {
env["HELM_TILLER_HISTORY_MAX"] = strconv.Itoa(context.HistoryMax)
}
out, err := helm.exec(append(append(preArgs, "upgrade", "--install", "--reset-values", name, chart), flags...), env)
helm.write(out)
return err
}
func (helm *execer) ReleaseStatus(context HelmContext, name string, flags ...string) error {
helm.logger.Infof("Getting status %v", name)
preArgs := context.GetTillerlessArgs(helm)
env := context.getTillerlessEnv()
out, err := helm.exec(append(append(preArgs, "status", name), flags...), env)
helm.write(out)
return err
}
func (helm *execer) List(context HelmContext, filter string, flags ...string) (string, error) {
helm.logger.Infof("Listing releases matching %v", filter)
preArgs := context.GetTillerlessArgs(helm)
env := context.getTillerlessEnv()
var args []string
if helm.IsHelm3() {
args = []string{"list", "--filter", filter}
} else {
args = []string{"list", filter}
}
out, err := helm.exec(append(append(preArgs, args...), flags...), env)
// In v2 we have been expecting `helm list FILTER` prints nothing.
// In v3 helm still prints the header like `NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION`,
// which confuses helmfile's existing logic that treats any non-empty output from `helm list` is considered as the indication
// of the release to exist.
//
// This fixes it by removing the header from the v3 output, so that the output is formatted the same as that of v2.
if helm.IsHelm3() {
lines := strings.Split(string(out), "\n")
lines = lines[1:]
out = []byte(strings.Join(lines, "\n"))
}
helm.write(out)
return string(out), err
}
func (helm *execer) DecryptSecret(context HelmContext, name string, flags ...string) (string, error) {
absPath, err := filepath.Abs(name)
if err != nil {
return "", err
}
helm.logger.Debugf("Preparing to decrypt secret %v", absPath)
helm.decryptedSecretMutex.Lock()
secret, ok := helm.decryptedSecrets[absPath]
// Cache miss
if !ok {
secret = &decryptedSecret{}
helm.decryptedSecrets[absPath] = secret
secret.mutex.Lock()
defer secret.mutex.Unlock()
helm.decryptedSecretMutex.Unlock()
helm.logger.Infof("Decrypting secret %v", absPath)
preArgs := context.GetTillerlessArgs(helm)
env := context.getTillerlessEnv()
out, err := helm.exec(append(append(preArgs, "secrets", "dec", absPath), flags...), env)
helm.info(out)
if err != nil {
return "", err
}
// HELM_SECRETS_DEC_SUFFIX is used by the helm-secrets plugin to define the output file
decSuffix := os.Getenv("HELM_SECRETS_DEC_SUFFIX")
if len(decSuffix) == 0 {
decSuffix = ".yaml.dec"
}
decFilename := strings.Replace(absPath, ".yaml", decSuffix, 1)
secretBytes, err := ioutil.ReadFile(decFilename)
if err != nil {
return "", err
}
secret.bytes = secretBytes
if err := os.Remove(decFilename); err != nil {
return "", err
}
} else {
// Cache hit
helm.logger.Debugf("Found secret in cache %v", absPath)
secret.mutex.RLock()
helm.decryptedSecretMutex.Unlock()
defer secret.mutex.RUnlock()
}
tmpFile, err := ioutil.TempFile("", "secret")
if err != nil {
return "", err
}
_, err = tmpFile.Write(secret.bytes)
if err != nil {
return "", err
}
return tmpFile.Name(), err
}
func (helm *execer) TemplateRelease(name string, chart string, flags ...string) error {
helm.logger.Infof("Templating release=%v, chart=%v", name, chart)
var args []string
if helm.IsHelm3() {
args = []string{"template", name, chart}
} else {
args = []string{"template", chart, "--name", name}
}
out, err := helm.exec(append(args, flags...), map[string]string{})
helm.write(out)
return err
}
func (helm *execer) DiffRelease(context HelmContext, name, chart string, suppressDiff bool, flags ...string) error {
helm.logger.Infof("Comparing release=%v, chart=%v", name, chart)
preArgs := context.GetTillerlessArgs(helm)
env := context.getTillerlessEnv()
out, err := helm.exec(append(append(preArgs, "diff", "upgrade", "--reset-values", "--allow-unreleased", name, chart), flags...), env)
// Do our best to write STDOUT only when diff existed
// Unfortunately, this works only when you run helmfile with `--detailed-exitcode`
detailedExitcodeEnabled := false
for _, f := range flags {
if strings.Contains(f, "detailed-exitcode") {
detailedExitcodeEnabled = true
break
}
}
if detailedExitcodeEnabled {
switch e := err.(type) {
case ExitError:
if e.ExitStatus() == 2 {
if !(suppressDiff) {
helm.write(out)
}
return err
}
}
} else if !(suppressDiff) {
helm.write(out)
}
return err
}
func (helm *execer) Lint(name, chart string, flags ...string) error {
helm.logger.Infof("Linting release=%v, chart=%v", name, chart)
out, err := helm.exec(append([]string{"lint", chart}, flags...), map[string]string{})
helm.write(out)
return err
}
func (helm *execer) Fetch(chart string, flags ...string) error {
helm.logger.Infof("Fetching %v", chart)
out, err := helm.exec(append([]string{"fetch", chart}, flags...), map[string]string{})
helm.info(out)
return err
}
func (helm *execer) DeleteRelease(context HelmContext, name string, flags ...string) error {
helm.logger.Infof("Deleting %v", name)
preArgs := context.GetTillerlessArgs(helm)
env := context.getTillerlessEnv()
out, err := helm.exec(append(append(preArgs, "delete", name), flags...), env)
helm.write(out)
return err
}
func (helm *execer) TestRelease(context HelmContext, name string, flags ...string) error {
helm.logger.Infof("Testing %v", name)
preArgs := context.GetTillerlessArgs(helm)
env := context.getTillerlessEnv()
args := []string{"test", name}
out, err := helm.exec(append(append(preArgs, args...), flags...), env)
helm.write(out)
return err
}
func (helm *execer) exec(args []string, env map[string]string) ([]byte, error) {
cmdargs := args
if len(helm.extra) > 0 {
cmdargs = append(cmdargs, helm.extra...)
}
if helm.kubeContext != "" {
cmdargs = append(cmdargs, "--kube-context", helm.kubeContext)
}
cmd := fmt.Sprintf("exec: %s %s", helm.helmBinary, strings.Join(cmdargs, " "))
helm.logger.Debug(cmd)
bytes, err := helm.runner.Execute(helm.helmBinary, cmdargs, env)
helm.logger.Debugf("%s: %s", cmd, bytes)
return bytes, err
}
func (helm *execer) info(out []byte) {
if len(out) > 0 {
helm.logger.Infof("%s", out)
}
}
func (helm *execer) write(out []byte) {
if len(out) > 0 {
fmt.Printf("%s\n", out)
}
}
func (helm *execer) IsHelm3() bool {
return helm.version.Major == 3
}
func (helm *execer) GetVersion() Version {
return helm.version
}
func (helm *execer) IsVersionAtLeast(major int, minor int) bool {
return helm.version.Major >= major && helm.version.Minor >= minor
}
| [
"\"HELMFILE_HELM3\"",
"\"HELM_SECRETS_DEC_SUFFIX\""
]
| []
| [
"HELMFILE_HELM3",
"HELM_SECRETS_DEC_SUFFIX"
]
| [] | ["HELMFILE_HELM3", "HELM_SECRETS_DEC_SUFFIX"] | go | 2 | 0 | |
user_test.go | package esa_test
import (
"os"
"testing"
"github.com/k0kubun/pp"
"github.com/yuichiro-h/go-esa"
)
func TestGetUser(t *testing.T) {
client := esa.New(&esa.Config{AccessToken: os.Getenv("ESA_ACCESS_TOKEN")})
res, err := client.GetUser()
if err != nil {
t.Error(err)
}
t.Log(pp.Sprint(res))
}
| [
"\"ESA_ACCESS_TOKEN\""
]
| []
| [
"ESA_ACCESS_TOKEN"
]
| [] | ["ESA_ACCESS_TOKEN"] | go | 1 | 0 | |
sdks/python/apache_beam/runners/interactive/cache_manager.py | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import tempfile
import urllib
import apache_beam as beam
from apache_beam import coders
from apache_beam.io import filesystems
from apache_beam.transforms import combiners
class CacheManager(object):
"""Abstract class for caching PCollections.
A PCollection cache is identified by labels, which consist of a prefix (either
'full' or 'sample') and a cache_label which is a hash of the PCollection
derivation.
"""
def exists(self, *labels):
"""Returns if the PCollection cache exists."""
raise NotImplementedError
def is_latest_version(self, version, *labels):
"""Returns if the given version number is the latest."""
return version == self._latest_version(*labels)
def _latest_version(self, *labels):
"""Returns the latest version number of the PCollection cache."""
raise NotImplementedError
def read(self, *labels):
"""Return the PCollection as a list as well as the version number.
Returns:
(List[PCollection])
(int) the version number
It is possible that the version numbers from read() and_latest_version()
are different. This usually means that the cache's been evicted (thus
unavailable => read() returns version = -1), but it had reached version n
before eviction.
"""
raise NotImplementedError
def source(self, *labels):
"""Returns a beam.io.Source that reads the PCollection cache."""
raise NotImplementedError
def sink(self, *labels):
"""Returns a beam.io.Sink that writes the PCollection cache."""
raise NotImplementedError
def cleanup(self):
"""Cleans up all the PCollection caches."""
raise NotImplementedError
class FileBasedCacheManager(CacheManager):
"""Maps PCollections to local temp files for materialization."""
def __init__(self, temp_dir=None):
self._temp_dir = temp_dir or tempfile.mkdtemp(
prefix='interactive-temp-', dir=os.environ.get('TEST_TMPDIR', None))
self._versions = collections.defaultdict(lambda: self._CacheVersion())
def exists(self, *labels):
return bool(self._match(*labels))
def _latest_version(self, *labels):
timestamp = 0
for path in self._match(*labels):
timestamp = max(timestamp, filesystems.FileSystems.last_updated(path))
result = self._versions["-".join(labels)].get_version(timestamp)
return result
def read(self, *labels):
if not self.exists(*labels):
return [], -1
def _read_helper():
coder = SafeFastPrimitivesCoder()
for path in self._match(*labels):
for line in filesystems.FileSystems.open(path):
yield coder.decode(line.strip())
result, version = list(_read_helper()), self._latest_version(*labels)
return result, version
def source(self, *labels):
return beam.io.ReadFromText(self._glob_path(*labels),
coder=SafeFastPrimitivesCoder())._source
def sink(self, *labels):
return beam.io.WriteToText(self._path(*labels),
coder=SafeFastPrimitivesCoder())._sink
def cleanup(self):
if filesystems.FileSystems.exists(self._temp_dir):
filesystems.FileSystems.delete([self._temp_dir])
def _glob_path(self, *labels):
return self._path(*labels) + '-*-of-*'
def _path(self, *labels):
return filesystems.FileSystems.join(self._temp_dir, *labels)
def _match(self, *labels):
match = filesystems.FileSystems.match([self._glob_path(*labels)])
assert len(match) == 1
return [metadata.path for metadata in match[0].metadata_list]
class _CacheVersion(object):
"""This class keeps track of the timestamp and the corresponding version."""
def __init__(self):
self.current_version = -1
self.current_timestamp = 0
def get_version(self, timestamp):
"""Updates version if necessary and returns the version number.
Args:
timestamp: (int) unix timestamp when the cache is updated. This value is
zero if the cache has been evicted or doesn't exist.
"""
# Do not update timestamp if the cache's been evicted.
if timestamp != 0 and timestamp != self.current_timestamp:
assert timestamp > self.current_timestamp
self.current_version = self.current_version + 1
self.current_timestamp = timestamp
return self.current_version
class ReadCache(beam.PTransform):
"""A PTransform that reads the PCollections from the cache."""
def __init__(self, cache_manager, label):
self._cache_manager = cache_manager
self._label = label
def expand(self, pbegin):
# pylint: disable=expression-not-assigned
return pbegin | 'Read' >> beam.io.Read(
self._cache_manager.source('full', self._label))
class WriteCache(beam.PTransform):
"""A PTransform that writes the PCollections to the cache."""
def __init__(self, cache_manager, label, sample=False, sample_size=0):
self._cache_manager = cache_manager
self._label = label
self._sample = sample
self._sample_size = sample_size
def expand(self, pcoll):
prefix = 'sample' if self._sample else 'full'
if not self._cache_manager.exists(prefix, self._label):
if self._sample:
pcoll |= 'Sample' >> (
combiners.Sample.FixedSizeGlobally(self._sample_size)
| beam.FlatMap(lambda sample: sample))
# pylint: disable=expression-not-assigned
return pcoll | 'Write' >> beam.io.Write(
self._cache_manager.sink(prefix, self._label))
class SafeFastPrimitivesCoder(coders.Coder):
"""This class add an quote/unquote step to escape special characters."""
def encode(self, value):
return urllib.quote(coders.coders.FastPrimitivesCoder().encode(value))
def decode(self, value):
return coders.coders.FastPrimitivesCoder().decode(urllib.unquote(value))
| []
| []
| [
"TEST_TMPDIR"
]
| [] | ["TEST_TMPDIR"] | python | 1 | 0 | |
cli/cli.go | package cli
import (
"bytes"
"database/sql"
"flag"
"fmt"
"io/ioutil"
"os"
"strconv"
_ "github.com/lib/pq"
"github.com/shuymn/regend/config"
"github.com/shuymn/regend/static"
)
const name = "regend"
const (
exitCodeOK = iota
exitCodeErr
)
type CLI struct {
conf *config.RedshiftConfig
}
func NewCLI() *CLI {
return &CLI{}
}
func (c *CLI) Run(args []string) int {
conf := config.NewConfig()
fs := flag.NewFlagSet(name, flag.ContinueOnError)
fs.SetOutput(os.Stderr)
if err := fs.Parse(args); err != nil {
if err == flag.ErrHelp {
return exitCodeOK
}
return exitCodeErr
}
args = fs.Args()
if len(args) == 0 || len(args) > 1 {
fmt.Fprintf(os.Stderr, "usage: %s table\n", name)
return exitCodeErr
}
tomlFile := config.LoadTOMLFilename()
if tomlFile != "" {
if err := conf.LoadTOML(tomlFile); err != nil {
fmt.Fprintf(os.Stderr, "%s: %s\n", name, err)
return exitCodeErr
}
}
c.conf = &conf.Redshift
// environmennnt variables
if redshiftHost := os.Getenv("REGEND_REDSHIFT_HOST"); redshiftHost != "" {
c.conf.Host = redshiftHost
}
redshiftPort, err := strconv.Atoi(os.Getenv("REGEND_REDSHIFT_PORT"))
if err == nil && redshiftPort != 0 {
c.conf.Port = redshiftPort
}
if redshiftUser := os.Getenv("REGEND_REDSHIFT_USER"); redshiftUser != "" {
c.conf.User = redshiftUser
}
if redshiftPassword := os.Getenv("REGEND_REDSHIFT_PASSWORD"); redshiftPassword != "" {
c.conf.Password = redshiftPassword
}
if redshiftDatabase := os.Getenv("REGEND_REDSHIFT_DATABASE"); redshiftDatabase != "" {
c.conf.Database = redshiftDatabase
}
if c.conf.Host == "" {
fmt.Fprintf(os.Stderr, "%s: %s\n", name, "must specify host")
return exitCodeErr
}
if c.conf.Port == 0 {
fmt.Fprintf(os.Stderr, "%s: %s\n", name, "must specify port")
return exitCodeErr
}
if c.conf.User == "" {
fmt.Fprintf(os.Stderr, "%s: %s\n", name, "must specify user")
return exitCodeErr
}
if c.conf.Password == "" {
fmt.Fprintf(os.Stderr, "%s: %s\n", name, "must specify password")
return exitCodeErr
}
if c.conf.Database == "" {
fmt.Fprintf(os.Stderr, "%s: %s\n", name, "must specify database")
return exitCodeErr
}
if err := c.generate(args[0]); err != nil {
fmt.Fprintf(os.Stderr, "%s: %s\n", name, err)
return exitCodeErr
}
return exitCodeOK
}
func (c *CLI) generate(table string) error {
driverName := "postgres"
connStr := fmt.Sprintf("%s://%s:%s@%s:%d/%s", driverName, c.conf.User, c.conf.Password, c.conf.Host, c.conf.Port, c.conf.Database)
db, err := sql.Open(driverName, connStr)
if err != nil {
return err
}
defer db.Close()
file, err := static.Root.Open("/generate_tbl_ddl.sql")
if err != nil {
return err
}
defer file.Close()
content, err := ioutil.ReadAll(file)
if err != nil {
return err
}
b := bytes.NewBuffer(content)
rows, err := db.Query(b.String(), table)
if err != nil {
return err
}
columns, err := rows.Columns()
if err != nil {
return err
}
values := make([]sql.RawBytes, len(columns))
scanArgs := make([]interface{}, len(values))
for i := range values {
scanArgs[i] = &values[i]
}
for rows.Next() {
err := rows.Scan(scanArgs...)
if err != nil {
return err
}
for i, col := range values {
if columns[i] == "ddl" {
fmt.Println(string(col))
}
}
}
return nil
}
| [
"\"REGEND_REDSHIFT_HOST\"",
"\"REGEND_REDSHIFT_PORT\"",
"\"REGEND_REDSHIFT_USER\"",
"\"REGEND_REDSHIFT_PASSWORD\"",
"\"REGEND_REDSHIFT_DATABASE\""
]
| []
| [
"REGEND_REDSHIFT_PASSWORD",
"REGEND_REDSHIFT_PORT",
"REGEND_REDSHIFT_USER",
"REGEND_REDSHIFT_HOST",
"REGEND_REDSHIFT_DATABASE"
]
| [] | ["REGEND_REDSHIFT_PASSWORD", "REGEND_REDSHIFT_PORT", "REGEND_REDSHIFT_USER", "REGEND_REDSHIFT_HOST", "REGEND_REDSHIFT_DATABASE"] | go | 5 | 0 | |
vendor/github.com/Azure/azure-event-hubs-go/v3/storage/credential.go | package storage
// MIT License
//
// Copyright (c) Microsoft Corporation. All rights reserved.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE
import (
"context"
"os"
"sync"
"time"
"github.com/Azure/azure-amqp-common-go/v3/aad"
"github.com/Azure/azure-pipeline-go/pipeline"
"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2017-10-01/storage"
"github.com/Azure/azure-storage-blob-go/azblob"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/adal"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/date"
)
type (
// AADSASCredential represents a token provider for Azure Storage SAS using AAD to authorize signing
AADSASCredential struct {
azblob.Credential
ResourceGroup string
SubscriptionID string
AccountName string
ContainerName string
aadTokenProvider *adal.ServicePrincipalToken
token *SASToken
env *azure.Environment
lockMu sync.Mutex
}
// SASToken contains the expiry time and token for a given SAS
SASToken struct {
expiry time.Time
sas string
}
// AADSASCredentialOption provides options for configuring AAD SAS Token Providers
AADSASCredentialOption func(*aad.TokenProviderConfiguration) error
)
// AADSASCredentialWithEnvironmentVars configures the TokenProvider using the environment variables available
//
// 1. Client Credentials: attempt to authenticate with a Service Principal via "AZURE_TENANT_ID", "AZURE_CLIENT_ID" and
// "AZURE_CLIENT_SECRET"
//
// 2. Client Certificate: attempt to authenticate with a Service Principal via "AZURE_TENANT_ID", "AZURE_CLIENT_ID",
// "AZURE_CERTIFICATE_PATH" and "AZURE_CERTIFICATE_PASSWORD"
//
// 3. Managed Service Identity (MSI): attempt to authenticate via MSI
//
//
// The Azure Environment used can be specified using the name of the Azure Environment set in "AZURE_ENVIRONMENT" var.
func AADSASCredentialWithEnvironmentVars() AADSASCredentialOption {
return func(config *aad.TokenProviderConfiguration) error {
config.TenantID = os.Getenv("AZURE_TENANT_ID")
config.ClientID = os.Getenv("AZURE_CLIENT_ID")
config.ClientSecret = os.Getenv("AZURE_CLIENT_SECRET")
config.CertificatePath = os.Getenv("AZURE_CERTIFICATE_PATH")
config.CertificatePassword = os.Getenv("AZURE_CERTIFICATE_PASSWORD")
if config.Env == nil {
env, err := azureEnvFromEnvironment()
if err != nil {
return err
}
config.Env = env
}
return nil
}
}
// NewAADSASCredential constructs a SAS token provider for Azure storage using Azure Active Directory credentials
//
// canonicalizedResource should be formed as described here: https://docs.microsoft.com/en-us/rest/api/storagerp/storageaccounts/listservicesas
func NewAADSASCredential(subscriptionID, resourceGroup, accountName, containerName string, opts ...AADSASCredentialOption) (*AADSASCredential, error) {
config := &aad.TokenProviderConfiguration{
ResourceURI: azure.PublicCloud.ResourceManagerEndpoint,
Env: &azure.PublicCloud,
}
for _, opt := range opts {
err := opt(config)
if err != nil {
return nil, err
}
}
spToken, err := config.NewServicePrincipalToken()
if err != nil {
return nil, err
}
return &AADSASCredential{
aadTokenProvider: spToken,
env: config.Env,
SubscriptionID: subscriptionID,
ResourceGroup: resourceGroup,
AccountName: accountName,
ContainerName: containerName,
}, nil
}
// New creates a credential policy object.
func (cred *AADSASCredential) New(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.Policy {
return pipeline.PolicyFunc(func(ctx context.Context, request pipeline.Request) (pipeline.Response, error) {
// Add a x-ms-date header if it doesn't already exist
token, err := cred.getToken(ctx)
if err != nil {
return nil, err
}
if request.URL.RawQuery != "" {
request.URL.RawQuery = request.URL.RawQuery + "&" + token.sas
} else {
request.URL.RawQuery = token.sas
}
response, err := next.Do(ctx, request)
return response, err
})
}
// GetToken fetches a Azure Storage SAS token using an AAD token
func (cred *AADSASCredential) getToken(ctx context.Context) (SASToken, error) {
cred.lockMu.Lock()
defer cred.lockMu.Unlock()
span, ctx := startConsumerSpanFromContext(ctx, "storage.AADSASCredential.getToken")
defer span.End()
if cred.token != nil {
if cred.token.expiry.Before(time.Now().Add(-5 * time.Minute)) {
return *cred.token, nil
}
}
token, err := cred.refreshToken(ctx, "/blob/"+cred.AccountName+"/"+cred.ContainerName)
if err != nil {
return SASToken{}, err
}
cred.token = &token
return token, nil
}
func (cred *AADSASCredential) refreshToken(ctx context.Context, canonicalizedResource string) (SASToken, error) {
span, ctx := startConsumerSpanFromContext(ctx, "storage.AADSASCredential.refreshToken")
defer span.End()
now := time.Now().Add(-1 * time.Second)
expiry := now.Add(1 * time.Hour)
client := storage.NewAccountsClientWithBaseURI(cred.env.ResourceManagerEndpoint, cred.SubscriptionID)
client.Authorizer = autorest.NewBearerAuthorizer(cred.aadTokenProvider)
res, err := client.ListAccountSAS(ctx, cred.ResourceGroup, cred.AccountName, storage.AccountSasParameters{
Protocols: storage.HTTPS,
ResourceTypes: storage.SignedResourceTypesS + storage.SignedResourceTypesC + storage.SignedResourceTypesO,
Services: storage.B,
SharedAccessStartTime: &date.Time{Time: now.Round(time.Second).UTC()},
SharedAccessExpiryTime: &date.Time{Time: expiry.Round(time.Second).UTC()},
Permissions: storage.R + storage.W + storage.D + storage.L + storage.A + storage.C + storage.U,
})
if err != nil {
return SASToken{}, err
}
return SASToken{
sas: *res.AccountSasToken,
expiry: expiry,
}, err
}
func azureEnvFromEnvironment() (*azure.Environment, error) {
envName := os.Getenv("AZURE_ENVIRONMENT")
var env azure.Environment
if envName == "" {
env = azure.PublicCloud
} else {
var err error
env, err = azure.EnvironmentFromName(envName)
if err != nil {
return nil, err
}
}
return &env, nil
}
| [
"\"AZURE_TENANT_ID\"",
"\"AZURE_CLIENT_ID\"",
"\"AZURE_CLIENT_SECRET\"",
"\"AZURE_CERTIFICATE_PATH\"",
"\"AZURE_CERTIFICATE_PASSWORD\"",
"\"AZURE_ENVIRONMENT\""
]
| []
| [
"AZURE_CERTIFICATE_PATH",
"AZURE_CLIENT_ID",
"AZURE_ENVIRONMENT",
"AZURE_CLIENT_SECRET",
"AZURE_TENANT_ID",
"AZURE_CERTIFICATE_PASSWORD"
]
| [] | ["AZURE_CERTIFICATE_PATH", "AZURE_CLIENT_ID", "AZURE_ENVIRONMENT", "AZURE_CLIENT_SECRET", "AZURE_TENANT_ID", "AZURE_CERTIFICATE_PASSWORD"] | go | 6 | 0 | |
gh/gh.go | package gh
import (
"context"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"log"
"os"
"path"
"path/filepath"
"regexp"
"strconv"
"strings"
"time"
"github.com/go-git/go-git/v5"
"github.com/go-git/go-git/v5/plumbing/object"
ghttp "github.com/go-git/go-git/v5/plumbing/transport/http"
"github.com/google/go-github/v39/github"
"github.com/k1LoW/go-github-client/v39/factory"
"github.com/lestrrat-go/backoff/v2"
)
const DefaultGithubServerURL = "https://github.com"
var octocovNameRe = regexp.MustCompile(`(?i)(octocov|coverage)`)
type Gh struct {
client *github.Client
}
func New() (*Gh, error) {
client, err := factory.NewGithubClient(factory.Timeout(10 * time.Second))
if err != nil {
return nil, err
}
return &Gh{
client: client,
}, nil
}
func (g *Gh) Client() *github.Client {
return g.client
}
func (g *Gh) SetClient(client *github.Client) {
g.client = client
}
func (g *Gh) PushContent(ctx context.Context, owner, repo, branch, content, cp, message string) error {
srv := g.client.Git
dRef, _, err := srv.GetRef(ctx, owner, repo, path.Join("heads", branch))
if err != nil {
return err
}
parent, _, err := srv.GetCommit(ctx, owner, repo, *dRef.Object.SHA)
if err != nil {
return err
}
var tree *github.Tree
if cp != "" {
blob := &github.Blob{
Content: github.String(content),
Encoding: github.String("utf-8"),
Size: github.Int(len(content)),
}
resB, _, err := srv.CreateBlob(ctx, owner, repo, blob)
if err != nil {
return err
}
entry := &github.TreeEntry{
Path: github.String(cp),
Mode: github.String("100644"),
Type: github.String("blob"),
SHA: resB.SHA,
}
entries := []*github.TreeEntry{entry}
tree, _, err = srv.CreateTree(ctx, owner, repo, *dRef.Object.SHA, entries)
if err != nil {
return err
}
} else {
tree, _, err = srv.GetTree(ctx, owner, repo, *parent.Tree.SHA, false)
if err != nil {
return err
}
}
commit := &github.Commit{
Message: github.String(message),
Tree: tree,
Parents: []*github.Commit{parent},
}
resC, _, err := srv.CreateCommit(ctx, owner, repo, commit)
if err != nil {
return err
}
nref := &github.Reference{
Ref: github.String(path.Join("refs", "heads", branch)),
Object: &github.GitObject{
Type: github.String("commit"),
SHA: resC.SHA,
},
}
if _, _, err := srv.UpdateRef(ctx, owner, repo, nref, false); err != nil {
return err
}
return nil
}
func (g *Gh) GetDefaultBranch(ctx context.Context, owner, repo string) (string, error) {
r, _, err := g.client.Repositories.Get(ctx, owner, repo)
if err != nil {
return "", err
}
return r.GetDefaultBranch(), nil
}
func (g *Gh) GetRawRootURL(ctx context.Context, owner, repo string) (string, error) {
b, err := g.GetDefaultBranch(ctx, owner, repo)
if err != nil {
return "", err
}
if os.Getenv("GITHUB_SERVER_URL") != "" && os.Getenv("GITHUB_SERVER_URL") != DefaultGithubServerURL {
// GitHub Enterprise Server
return fmt.Sprintf("%s/%s/%s/raw/%s", os.Getenv("GITHUB_SERVER_URL"), owner, repo, b), nil
}
baseRef := fmt.Sprintf("refs/heads/%s", b)
ref, _, err := g.client.Git.GetRef(ctx, owner, repo, baseRef)
if err != nil {
return "", err
}
tree, _, err := g.client.Git.GetTree(ctx, owner, repo, ref.GetObject().GetSHA(), false)
if err != nil {
return "", err
}
for _, e := range tree.Entries {
if e.GetType() != "blob" {
continue
}
path := e.GetPath()
fc, _, _, err := g.client.Repositories.GetContents(ctx, owner, repo, path, &github.RepositoryContentGetOptions{})
if err != nil {
return "", err
}
return strings.TrimSuffix(strings.TrimSuffix(fc.GetDownloadURL(), path), "/"), nil
}
return "", fmt.Errorf("not found files. please commit file to root directory and push: %s/%s", owner, repo)
}
func (g *Gh) DetectCurrentJobID(ctx context.Context, owner, repo string) (int64, error) {
if os.Getenv("GITHUB_RUN_ID") == "" {
return 0, fmt.Errorf("env %s is not set", "GITHUB_RUN_ID")
}
runID, err := strconv.ParseInt(os.Getenv("GITHUB_RUN_ID"), 10, 64)
if err != nil {
return 0, err
}
// Although it would be nice if we could get the job_id from an environment variable,
// there is no way to get it at this time, so it uses a heuristic.
p := backoff.Exponential(
backoff.WithMinInterval(time.Second),
backoff.WithMaxInterval(30*time.Second),
backoff.WithJitterFactor(0.05),
backoff.WithMaxRetries(5),
)
b := p.Start(ctx)
for backoff.Continue(b) {
jobs, _, err := g.client.Actions.ListWorkflowJobs(ctx, owner, repo, runID, &github.ListWorkflowJobsOptions{})
if err != nil {
return 0, err
}
if len(jobs.Jobs) == 1 {
return jobs.Jobs[0].GetID(), nil
}
for _, j := range jobs.Jobs {
if j.GetName() == os.Getenv("GTIHUB_JOB") {
return j.GetID(), nil
}
for _, s := range j.Steps {
if s.StartedAt != nil && s.CompletedAt == nil && octocovNameRe.MatchString(s.GetName()) {
return j.GetID(), nil
}
}
}
}
return 0, errors.New("could not detect id of current job")
}
func (g *Gh) DetectCurrentBranch(ctx context.Context) (string, error) {
splitted := strings.Split(os.Getenv("GITHUB_REF"), "/") // refs/pull/8/head or refs/heads/branch-name
if len(splitted) < 3 {
return "", fmt.Errorf("env %s is not set", "GITHUB_REF")
}
if strings.Contains(os.Getenv("GITHUB_REF"), "refs/heads/") {
return splitted[2], nil
}
if os.Getenv("GITHUB_HEAD_REF") == "" {
return "", fmt.Errorf("env %s is not set", "GITHUB_HEAD_REF")
}
return os.Getenv("GITHUB_HEAD_REF"), nil
}
func (g *Gh) DetectCurrentPullRequestNumber(ctx context.Context, owner, repo string) (int, error) {
splitted := strings.Split(os.Getenv("GITHUB_REF"), "/") // refs/pull/8/head or refs/heads/branch-name
if len(splitted) < 3 {
return 0, fmt.Errorf("env %s is not set", "GITHUB_REF")
}
if strings.Contains(os.Getenv("GITHUB_REF"), "refs/pull/") {
prNumber := splitted[2]
return strconv.Atoi(prNumber)
}
b := splitted[2]
l, _, err := g.client.PullRequests.List(ctx, owner, repo, &github.PullRequestListOptions{
State: "open",
})
if err != nil {
return 0, err
}
var d *github.PullRequest
for _, pr := range l {
if pr.GetHead().GetRef() == b {
if d != nil {
return 0, errors.New("could not detect number of pull request")
}
d = pr
}
}
if d != nil {
return d.GetNumber(), nil
}
return 0, errors.New("could not detect number of pull request")
}
type PullRequestFile struct {
Filename string
BlobURL string
}
func (g *Gh) GetPullRequestFiles(ctx context.Context, owner, repo string, number int) ([]*PullRequestFile, error) {
files := []*PullRequestFile{}
page := 1
for {
commitFiles, _, err := g.client.PullRequests.ListFiles(ctx, owner, repo, number, &github.ListOptions{
Page: page,
PerPage: 100,
})
if err != nil {
return nil, err
}
if len(commitFiles) == 0 {
break
}
for _, f := range commitFiles {
files = append(files, &PullRequestFile{
Filename: f.GetFilename(),
BlobURL: f.GetBlobURL(),
})
}
page += 1
}
return files, nil
}
func (g *Gh) GetStepExecutionTimeByTime(ctx context.Context, owner, repo string, jobID int64, t time.Time) (time.Duration, error) {
p := backoff.Exponential(
backoff.WithMinInterval(time.Second),
backoff.WithMaxInterval(30*time.Second),
backoff.WithJitterFactor(0.05),
backoff.WithMaxRetries(5),
)
b := p.Start(ctx)
log.Printf("target time: %v", t)
for backoff.Continue(b) {
job, _, err := g.client.Actions.GetWorkflowJobByID(ctx, owner, repo, jobID)
if err != nil {
return 0, err
}
l := len(job.Steps)
for i, s := range job.Steps {
log.Printf("job step [%d/%d]: %s %v-%v", i+1, l, s.GetName(), s.StartedAt, s.CompletedAt)
if s.StartedAt == nil || s.CompletedAt == nil {
continue
}
// Truncate less than a second
if s.GetStartedAt().Time.Unix() < t.Unix() && t.Unix() <= s.GetCompletedAt().Time.Unix() {
log.Print("detect step")
return s.GetCompletedAt().Time.Sub(s.GetStartedAt().Time), nil
}
}
}
return 0, fmt.Errorf("the step that was executed at the relevant time (%v) does not exist in the job (%d).", t, jobID)
}
func (g *Gh) GetStepByTime(ctx context.Context, owner, repo string, jobID int64, t time.Time) (Step, error) {
p := backoff.Exponential(
backoff.WithMinInterval(time.Second),
backoff.WithMaxInterval(30*time.Second),
backoff.WithJitterFactor(0.05),
backoff.WithMaxRetries(5),
)
b := p.Start(ctx)
log.Printf("target time: %v", t)
for backoff.Continue(b) {
job, _, err := g.client.Actions.GetWorkflowJobByID(ctx, owner, repo, jobID)
if err != nil {
return Step{}, err
}
l := len(job.Steps)
for i, s := range job.Steps {
log.Printf("job step [%d/%d]: %s %v-%v", i+1, l, s.GetName(), s.StartedAt, s.CompletedAt)
if s.StartedAt == nil || s.CompletedAt == nil {
continue
}
// Truncate less than a second
if s.GetStartedAt().Time.Unix() < t.Unix() && t.Unix() <= s.GetCompletedAt().Time.Unix() {
log.Print("detect step")
return Step{
Name: s.GetName(),
StartedAt: s.GetStartedAt().Time,
CompletedAt: s.GetCompletedAt().Time,
}, nil
}
}
}
return Step{}, fmt.Errorf("the step that was executed at the relevant time (%v) does not exist in the job (%d).", t, jobID)
}
type Step struct {
Name string
StartedAt time.Time
CompletedAt time.Time
}
func (g *Gh) GetStepsByName(ctx context.Context, owner, repo string, name string) ([]Step, error) {
if os.Getenv("GITHUB_RUN_ID") == "" {
return nil, fmt.Errorf("env %s is not set", "GITHUB_RUN_ID")
}
runID, err := strconv.ParseInt(os.Getenv("GITHUB_RUN_ID"), 10, 64)
if err != nil {
return nil, err
}
// Although it would be nice if we could get the job_id from an environment variable,
// there is no way to get it at this time, so it uses a heuristic.
p := backoff.Exponential(
backoff.WithMinInterval(time.Second),
backoff.WithMaxInterval(30*time.Second),
backoff.WithJitterFactor(0.05),
backoff.WithMaxRetries(5),
)
b := p.Start(ctx)
steps := []Step{}
max := 0
L:
for backoff.Continue(b) {
max = 0
jobs, _, err := g.client.Actions.ListWorkflowJobs(ctx, owner, repo, runID, &github.ListWorkflowJobsOptions{})
if err != nil {
return nil, err
}
for _, j := range jobs.Jobs {
log.Printf("search job: %d", j.GetID())
l := len(j.Steps)
for i, s := range j.Steps {
if s.GetName() == name {
max += 1
if s.StartedAt == nil || s.CompletedAt == nil {
steps = []Step{}
continue L
}
log.Printf("got job step [%d %d/%d]: %s %v-%v", j.GetID(), i+1, l, s.GetName(), s.StartedAt, s.CompletedAt)
steps = append(steps, Step{
Name: s.GetName(),
StartedAt: s.GetStartedAt().Time,
CompletedAt: s.GetCompletedAt().Time,
})
}
}
}
if max == len(steps) {
return steps, nil
}
}
if max < len(steps) || len(steps) == 0 {
return nil, fmt.Errorf("could not get step times: %s", name)
}
return steps, nil
}
const commentSig = "<!-- octocov -->"
func (g *Gh) PutComment(ctx context.Context, owner, repo string, n int, comment string) error {
if err := g.deleteCurrentIssueComment(ctx, owner, repo, n); err != nil {
return err
}
c := strings.Join([]string{comment, commentSig}, "\n")
if _, _, err := g.client.Issues.CreateComment(ctx, owner, repo, n, &github.IssueComment{Body: &c}); err != nil {
return err
}
return nil
}
func (g *Gh) deleteCurrentIssueComment(ctx context.Context, owner, repo string, n int) error {
opts := &github.IssueListCommentsOptions{}
comments, _, err := g.client.Issues.ListComments(ctx, owner, repo, n, opts)
if err != nil {
return err
}
for _, c := range comments {
if strings.Contains(*c.Body, commentSig) {
_, err = g.client.Issues.DeleteComment(ctx, owner, repo, *c.ID)
if err != nil {
return err
}
}
}
return nil
}
func PushUsingLocalGit(ctx context.Context, gitRoot string, addPaths []string, message string) error {
r, err := git.PlainOpen(gitRoot)
if err != nil {
return err
}
w, err := r.Worktree()
if err != nil {
return err
}
status, err := w.Status()
if err != nil {
return err
}
push := false
for _, p := range addPaths {
rel, err := filepath.Rel(gitRoot, p)
if err != nil {
return err
}
if _, ok := status[rel]; ok {
push = true
_, err := w.Add(rel)
if err != nil {
return err
}
}
}
if !push {
return nil
}
opts := &git.CommitOptions{}
switch {
case os.Getenv("GITHUB_SERVER_URL") == DefaultGithubServerURL:
opts.Author = &object.Signature{
Name: "github-actions",
Email: "41898282+github-actions[bot]@users.noreply.github.com",
When: time.Now(),
}
case os.Getenv("GITHUB_ACTOR") != "":
opts.Author = &object.Signature{
Name: os.Getenv("GITHUB_ACTOR"),
Email: fmt.Sprintf("%[email protected]", os.Getenv("GITHUB_ACTOR")),
When: time.Now(),
}
}
if _, err := w.Commit(message, opts); err != nil {
return err
}
if err := r.PushContext(ctx, &git.PushOptions{
Auth: &ghttp.BasicAuth{
Username: "octocov",
Password: os.Getenv("GITHUB_TOKEN"),
},
}); err != nil && err != git.NoErrAlreadyUpToDate {
return err
}
return nil
}
type GitHubEvent struct {
Name string
Number int
State string
Payload interface{}
}
func DecodeGitHubEvent() (*GitHubEvent, error) {
i := &GitHubEvent{}
n := os.Getenv("GITHUB_EVENT_NAME")
if n == "" {
return i, fmt.Errorf("env %s is not set.", "GITHUB_EVENT_NAME")
}
i.Name = n
p := os.Getenv("GITHUB_EVENT_PATH")
if p == "" {
return i, fmt.Errorf("env %s is not set.", "GITHUB_EVENT_PATH")
}
b, err := ioutil.ReadFile(filepath.Clean(p))
if err != nil {
return i, err
}
s := struct {
PullRequest struct {
Number int `json:"number,omitempty"`
State string `json:"state,omitempty"`
} `json:"pull_request,omitempty"`
Issue struct {
Number int `json:"number,omitempty"`
State string `json:"state,omitempty"`
} `json:"issue,omitempty"`
}{}
if err := json.Unmarshal(b, &s); err != nil {
return i, err
}
switch {
case s.PullRequest.Number > 0:
i.Number = s.PullRequest.Number
i.State = s.PullRequest.State
case s.Issue.Number > 0:
i.Number = s.Issue.Number
i.State = s.Issue.State
}
var payload interface{}
if err := json.Unmarshal(b, &payload); err != nil {
return i, err
}
i.Payload = payload
return i, nil
}
type Repository struct {
Owner string
Repo string
Path string
}
func (r *Repository) Reponame() string {
if r.Path == "" {
return r.Repo
}
return fmt.Sprintf("%s/%s", r.Repo, r.Path)
}
func Parse(raw string) (*Repository, error) {
splitted := strings.Split(raw, "/")
if len(splitted) < 2 {
return nil, fmt.Errorf("could not parse: %s", raw)
}
for _, p := range splitted {
if p == "" {
return nil, fmt.Errorf("invalid repository path: %s", raw)
}
if strings.Trim(p, ".") == "" {
return nil, fmt.Errorf("invalid repository path: %s", raw)
}
}
r := &Repository{
Owner: splitted[0],
Repo: splitted[1],
}
if len(splitted) > 2 {
r.Path = strings.Join(splitted[2:], "/")
}
return r, nil
}
| [
"\"GITHUB_SERVER_URL\"",
"\"GITHUB_SERVER_URL\"",
"\"GITHUB_SERVER_URL\"",
"\"GITHUB_RUN_ID\"",
"\"GITHUB_RUN_ID\"",
"\"GTIHUB_JOB\"",
"\"GITHUB_REF\"",
"\"GITHUB_REF\"",
"\"GITHUB_HEAD_REF\"",
"\"GITHUB_HEAD_REF\"",
"\"GITHUB_REF\"",
"\"GITHUB_REF\"",
"\"GITHUB_RUN_ID\"",
"\"GITHUB_RUN_ID\"",
"\"GITHUB_SERVER_URL\"",
"\"GITHUB_ACTOR\"",
"\"GITHUB_ACTOR\"",
"\"GITHUB_ACTOR\"",
"\"GITHUB_TOKEN\"",
"\"GITHUB_EVENT_NAME\"",
"\"GITHUB_EVENT_PATH\""
]
| []
| [
"GITHUB_SERVER_URL",
"GITHUB_RUN_ID",
"GITHUB_TOKEN",
"GITHUB_ACTOR",
"GITHUB_REF",
"GTIHUB_JOB",
"GITHUB_EVENT_PATH",
"GITHUB_EVENT_NAME",
"GITHUB_HEAD_REF"
]
| [] | ["GITHUB_SERVER_URL", "GITHUB_RUN_ID", "GITHUB_TOKEN", "GITHUB_ACTOR", "GITHUB_REF", "GTIHUB_JOB", "GITHUB_EVENT_PATH", "GITHUB_EVENT_NAME", "GITHUB_HEAD_REF"] | go | 9 | 0 | |
lambdas/transaction-store/lambda.go | package main
import (
"context"
"github.com/aws/aws-lambda-go/events"
"github.com/aws/aws-lambda-go/lambda"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/dynamodb"
"github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute"
"github.com/mailru/easyjson"
"monzo-transactions/monzo"
"os"
)
type LambdaFunction struct {
table *string
db *dynamodb.DynamoDB
}
func (f *LambdaFunction) Invoke(ctx context.Context, req events.SNSEvent) error {
for _, e := range req.Records {
var tx monzo.TransactionCreated
err := easyjson.Unmarshal([]byte(e.SNS.Message), &tx)
if err != nil {
return err
}
attr, err := dynamodbattribute.MarshalMap(&tx)
if err != nil {
return err
}
_, err = f.db.PutItemWithContext(ctx, &dynamodb.PutItemInput{
TableName: f.table,
Item: attr,
})
if err != nil {
return err
}
}
return nil
}
func main() {
s := session.New()
f := &LambdaFunction{
table: aws.String(os.Getenv("LAMBDA_DYNAMODB_TABLE")),
db: dynamodb.New(s),
}
lambda.Start(f.Invoke)
}
| [
"\"LAMBDA_DYNAMODB_TABLE\""
]
| []
| [
"LAMBDA_DYNAMODB_TABLE"
]
| [] | ["LAMBDA_DYNAMODB_TABLE"] | go | 1 | 0 | |
compatibility/compatibility_suite_test.go | package compatibility_test
import (
"code.cloudfoundry.org/docker_driver_integration_tests"
"encoding/json"
"errors"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/gomega/gbytes"
"github.com/onsi/gomega/gexec"
"io/ioutil"
"os"
"os/exec"
"strings"
"testing"
)
type VolumeServiceBrokerBinding struct {
VolumeMounts []struct {
Device struct {
VolumeID string `json:"volume_id"`
MountConfig map[string]interface{} `json:"mount_config"`
} `json:"device"`
} `json:"volume_mounts"`
}
var (
integrationFixtureTemplate = docker_driver_integration_tests.LoadFixtureTemplate()
bindingsFixture = LoadVolumeServiceBrokerBindingsFixture()
session *gexec.Session
)
func TestCompatibility(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Compatibility Suite")
}
var _ = BeforeSuite(func() {
cmd := exec.Command(os.Getenv("DRIVER_CMD"), strings.Split(os.Getenv("DRIVER_OPTS"), ",")...)
var err error
session, err = gexec.Start(cmd, GinkgoWriter, GinkgoWriter)
Expect(err).NotTo(HaveOccurred())
Eventually(session.Out).Should(gbytes.Say("driver-server.started"))
})
func LoadVolumeServiceBrokerBindingsFixture() []VolumeServiceBrokerBinding {
var ok bool
var bindingsFile string
if bindingsFile, ok = os.LookupEnv("BINDINGS_FILE"); !ok {
panic(errors.New("BINDINGS_FILE environment variable not set"))
}
bytes, err := ioutil.ReadFile(bindingsFile)
if err != nil {
panic(err.Error())
}
bindings := []VolumeServiceBrokerBinding{}
err = json.Unmarshal(bytes, &bindings)
if err != nil {
panic(err.Error())
}
return bindings
}
| [
"\"DRIVER_CMD\"",
"\"DRIVER_OPTS\""
]
| []
| [
"DRIVER_CMD",
"DRIVER_OPTS"
]
| [] | ["DRIVER_CMD", "DRIVER_OPTS"] | go | 2 | 0 | |
app.py | import os
import platform
from datetime import datetime
from flask import Flask, render_template, request, redirect, url_for
import psycopg2 as psy
from dotenv import dotenv_values
import json
from flask import Response
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import BigInteger, Boolean, CheckConstraint, Column, DateTime, ForeignKey, Index, Integer, Numeric, \
SmallInteger, String, Table, Text, UniqueConstraint, text
from sqlalchemy.dialects.postgresql import TIMESTAMP
from sqlalchemy.orm import relationship
from sqlalchemy.ext.declarative import declarative_base
app = Flask(__name__)
Base = declarative_base()
metadata = Base.metadata
alchemy_env = dotenv_values("/home/en_var.env")
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://' + alchemy_env['DBUSER'] + ':' + alchemy_env['DBPASS'] + '@147.175.150.216/dota2'
#app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://' + os.getenv('DBUSER') + ':' + os.getenv('DBPASS') + '@147.175.150.216/dota2'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
class Ability(db.Model):
__tablename__ = 'abilities'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.Text)
class AuthGroup(db.Model):
__tablename__ = 'auth_group'
id = db.Column(db.Integer, primary_key=True, server_default=text("nextval('auth_group_id_seq'::regclass)"))
name = db.Column(db.String(150), nullable=False, unique=True)
class AuthUser(db.Model):
__tablename__ = 'auth_user'
id = db.Column(db.Integer, primary_key=True, server_default=text("nextval('auth_user_id_seq'::regclass)"))
password = db.Column(db.String(128), nullable=False)
last_login = db.Column(db.DateTime(True))
is_superuser = db.Column(db.Boolean, nullable=False)
username = db.Column(db.String(150), nullable=False, unique=True)
first_name = db.Column(db.String(150), nullable=False)
last_name = db.Column(db.String(150), nullable=False)
email = db.Column(db.String(254), nullable=False)
is_staff = db.Column(db.Boolean, nullable=False)
is_active = db.Column(db.Boolean, nullable=False)
date_joined = db.Column(db.DateTime(True), nullable=False)
class ClusterRegion(db.Model):
__tablename__ = 'cluster_regions'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.Text)
class DjangoContentType(db.Model):
__tablename__ = 'django_content_type'
__table_args__ = (
UniqueConstraint('app_label', 'model'),
)
id = db.Column(db.Integer, primary_key=True, server_default=text("nextval('django_content_type_id_seq'::regclass)"))
app_label = db.Column(db.String(100), nullable=False)
model = db.Column(db.String(100), nullable=False)
class DjangoMigration(db.Model):
__tablename__ = 'django_migrations'
id = db.Column(db.BigInteger, primary_key=True, server_default=text("nextval('django_migrations_id_seq'::regclass)"))
app = db.Column(db.String(255), nullable=False)
name = db.Column(db.String(255), nullable=False)
applied = db.Column(db.DateTime(True), nullable=False)
class DjangoSession(db.Model):
__tablename__ = 'django_session'
session_key = db.Column(db.String(40), primary_key=True, index=True)
session_data = db.Column(db.Text, nullable=False)
expire_date = db.Column(db.DateTime(True), nullable=False, index=True)
class DoctrineMigrationVersion(db.Model):
__tablename__ = 'doctrine_migration_versions'
version = db.Column(db.String(191), primary_key=True)
executed_at = db.Column(db.TIMESTAMP(), server_default=text("NULL::timestamp without time zone"))
execution_time = db.Column(db.Integer)
class FlywaySchemaHistory(db.Model):
__tablename__ = 'flyway_schema_history'
installed_rank = db.Column(db.Integer, primary_key=True)
version = db.Column(db.String(50))
description = db.Column(db.String(200), nullable=False)
type = db.Column(db.String(20), nullable=False)
script = db.Column(db.String(1000), nullable=False)
checksum = db.Column(db.Integer)
installed_by = db.Column(db.String(100), nullable=False)
installed_on = db.Column(db.DateTime, nullable=False, server_default=text("now()"))
execution_time = db.Column(db.Integer, nullable=False)
success = db.Column(db.Boolean, nullable=False, index=True)
class Hero(db.Model):
__tablename__ = 'heroes'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.Text)
localized_name = db.Column(db.Text)
class Item(db.Model):
__tablename__ = 'items'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.Text)
class Migration(db.Model):
__tablename__ = 'migrations'
id = db.Column(db.Integer, primary_key=True, server_default=text("nextval('migrations_id_seq'::regclass)"))
migration = db.Column(db.String(255), nullable=False)
batch = db.Column(db.Integer, nullable=False)
class Patch(db.Model):
__tablename__ = 'patches'
id = db.Column(db.Integer, primary_key=True, server_default=text("nextval('patches_id_seq'::regclass)"))
name = db.Column(db.Text, nullable=False)
release_date = db.Column(db.DateTime, nullable=False)
class Player(db.Model):
__tablename__ = 'players'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.Text)
nick = db.Column(db.Text)
t_propel_migration = Table(
'propel_migration', metadata,
db.Column('version', db.Integer, server_default=text("0"))
)
class AuthPermission(db.Model):
__tablename__ = 'auth_permission'
__table_args__ = (
UniqueConstraint('content_type_id', 'codename'),
)
id = db.Column(db.Integer, primary_key=True, server_default=text("nextval('auth_permission_id_seq'::regclass)"))
name = db.Column(db.String(255), nullable=False)
content_type_id = db.Column(db.ForeignKey('django_content_type.id', deferrable=True, initially='DEFERRED'), nullable=False, index=True)
codename = db.Column(db.String(100), nullable=False)
content_type = db.relationship('DjangoContentType')
class AuthUserGroup(db.Model):
__tablename__ = 'auth_user_groups'
__table_args__ = (
UniqueConstraint('user_id', 'group_id'),
)
id = db.Column(db.BigInteger, primary_key=True, server_default=text("nextval('auth_user_groups_id_seq'::regclass)"))
user_id = db.Column(db.ForeignKey('auth_user.id', deferrable=True, initially='DEFERRED'), nullable=False, index=True)
group_id = db.Column(db.ForeignKey('auth_group.id', deferrable=True, initially='DEFERRED'), nullable=False, index=True)
group = db.relationship('AuthGroup')
user = db.relationship('AuthUser')
class DjangoAdminLog(db.Model):
__tablename__ = 'django_admin_log'
__table_args__ = (
CheckConstraint('action_flag >= 0'),
)
id = db.Column(db.Integer, primary_key=True, server_default=text("nextval('django_admin_log_id_seq'::regclass)"))
action_time = db.Column(db.DateTime(True), nullable=False)
object_id = db.Column(db.Text)
object_repr = db.Column(db.String(200), nullable=False)
action_flag = db.Column(db.SmallInteger, nullable=False)
change_message = db.Column(db.Text, nullable=False)
content_type_id = db.Column(db.ForeignKey('django_content_type.id', deferrable=True, initially='DEFERRED'), index=True)
user_id = db.Column(db.ForeignKey('auth_user.id', deferrable=True, initially='DEFERRED'), nullable=False, index=True)
content_type = db.relationship('DjangoContentType')
user = db.relationship('AuthUser')
class Match(db.Model):
__tablename__ = 'matches'
id = db.Column(db.Integer, primary_key=True)
cluster_region_id = db.Column(db.ForeignKey('cluster_regions.id'))
start_time = db.Column(db.Integer)
duration = db.Column(db.Integer)
tower_status_radiant = db.Column(db.Integer)
tower_status_dire = db.Column(db.Integer)
barracks_status_radiant = db.Column(db.Integer)
barracks_status_dire = db.Column(db.Integer)
first_blood_time = db.Column(db.Integer)
game_mode = db.Column(db.Integer)
radiant_win = db.Column(db.Boolean)
negative_votes = db.Column(db.Integer)
positive_votes = db.Column(db.Integer)
cluster_region = db.relationship('ClusterRegion')
class PlayerRating(db.Model):
__tablename__ = 'player_ratings'
id = db.Column(db.Integer, primary_key=True, server_default=text("nextval('player_ratings_id_seq'::regclass)"))
player_id = db.Column(db.ForeignKey('players.id'))
total_wins = db.Column(db.Integer)
total_matches = db.Column(db.Integer)
trueskill_mu = db.Column(db.Numeric)
trueskill_sigma = db.Column(db.Numeric)
player = db.relationship('Player')
class AuthGroupPermission(db.Model):
__tablename__ = 'auth_group_permissions'
__table_args__ = (
UniqueConstraint('group_id', 'permission_id'),
)
id = db.Column(db.BigInteger, primary_key=True, server_default=text("nextval('auth_group_permissions_id_seq'::regclass)"))
group_id = db.Column(db.ForeignKey('auth_group.id', deferrable=True, initially='DEFERRED'), nullable=False, index=True)
permission_id = db.Column(db.ForeignKey('auth_permission.id', deferrable=True, initially='DEFERRED'), nullable=False, index=True)
group = db.relationship('AuthGroup')
permission = db.relationship('AuthPermission')
class AuthUserUserPermission(db.Model):
__tablename__ = 'auth_user_user_permissions'
__table_args__ = (
UniqueConstraint('user_id', 'permission_id'),
)
id = db.Column(db.BigInteger, primary_key=True, server_default=text("nextval('auth_user_user_permissions_id_seq'::regclass)"))
user_id = db.Column(db.ForeignKey('auth_user.id', deferrable=True, initially='DEFERRED'), nullable=False, index=True)
permission_id = db.Column(db.ForeignKey('auth_permission.id', deferrable=True, initially='DEFERRED'), nullable=False, index=True)
permission = db.relationship('AuthPermission')
user = db.relationship('AuthUser')
class MatchesPlayersDetail(db.Model):
__tablename__ = 'matches_players_details'
__table_args__ = (
Index('idx_match_id_player_id', 'match_id', 'player_slot', 'id'),
)
id = db.Column(db.Integer, primary_key=True, server_default=text("nextval('matches_players_details_id_seq'::regclass)"))
match_id = db.Column(db.ForeignKey('matches.id'))
player_id = db.Column(db.ForeignKey('players.id'))
hero_id = db.Column(db.ForeignKey('heroes.id'))
player_slot = db.Column(db.Integer)
gold = db.Column(db.Integer)
gold_spent = db.Column(db.Integer)
gold_per_min = db.Column(db.Integer)
xp_per_min = db.Column(db.Integer)
kills = db.Column(db.Integer)
deaths = db.Column(db.Integer)
assists = db.Column(db.Integer)
denies = db.Column(db.Integer)
last_hits = db.Column(db.Integer)
stuns = db.Column(db.Integer)
hero_damage = db.Column(db.Integer)
hero_healing = db.Column(db.Integer)
tower_damage = db.Column(db.Integer)
item_id_1 = db.Column(db.ForeignKey('items.id'))
item_id_2 = db.Column(db.ForeignKey('items.id'))
item_id_3 = db.Column(db.ForeignKey('items.id'))
item_id_4 = db.Column(db.ForeignKey('items.id'))
item_id_5 = db.Column(db.ForeignKey('items.id'))
item_id_6 = db.Column(db.ForeignKey('items.id'))
level = db.Column(db.Integer)
leaver_status = db.Column(db.Integer)
xp_hero = db.Column(db.Integer)
xp_creep = db.Column(db.Integer)
xp_roshan = db.Column(db.Integer)
xp_other = db.Column(db.Integer)
gold_other = db.Column(db.Integer)
gold_death = db.Column(db.Integer)
gold_abandon = db.Column(db.Integer)
gold_sell = db.Column(db.Integer)
gold_destroying_structure = db.Column(db.Integer)
gold_killing_heroes = db.Column(db.Integer)
gold_killing_creeps = db.Column(db.Integer)
gold_killing_roshan = db.Column(db.Integer)
gold_killing_couriers = db.Column(db.Integer)
hero = db.relationship('Hero')
item = db.relationship('Item', primaryjoin='MatchesPlayersDetail.item_id_1 == Item.id')
item1 = db.relationship('Item', primaryjoin='MatchesPlayersDetail.item_id_2 == Item.id')
item2 = db.relationship('Item', primaryjoin='MatchesPlayersDetail.item_id_3 == Item.id')
item3 = db.relationship('Item', primaryjoin='MatchesPlayersDetail.item_id_4 == Item.id')
item4 = db.relationship('Item', primaryjoin='MatchesPlayersDetail.item_id_5 == Item.id')
item5 = db.relationship('Item', primaryjoin='MatchesPlayersDetail.item_id_6 == Item.id')
match = db.relationship('Match')
player = db.relationship('Player')
class Teamfight(db.Model):
__tablename__ = 'teamfights'
__table_args__ = (
Index('teamfights_match_id_start_teamfight_id_idx', 'match_id', 'start_teamfight', 'id'),
)
id = db.Column(db.Integer, primary_key=True, server_default=text("nextval('teamfights_id_seq'::regclass)"))
match_id = db.Column(db.ForeignKey('matches.id'))
start_teamfight = db.Column(db.Integer)
end_teamfight = db.Column(db.Integer)
last_death = db.Column(db.Integer)
deaths = db.Column(db.Integer)
match = db.relationship('Match')
class AbilityUpgrade(db.Model):
__tablename__ = 'ability_upgrades'
id = db.Column(Integer, primary_key=True, server_default=text("nextval('ability_upgrades_id_seq'::regclass)"))
ability_id = db.Column(db.ForeignKey('abilities.id'))
match_player_detail_id = db.Column(db.ForeignKey('matches_players_details.id'))
level = db.Column(Integer)
time = db.Column(Integer)
ability = db.relationship('Ability')
match_player_detail = db.relationship('MatchesPlayersDetail')
class Chat(db.Model):
__tablename__ = 'chats'
id = db.Column(db.Integer, primary_key=True, server_default=text("nextval('chats_id_seq'::regclass)"))
match_player_detail_id = db.Column(db.ForeignKey('matches_players_details.id'))
message = db.Column(db.Text)
time = db.Column(db.Integer)
nick = db.Column(db.Text)
match_player_detail = relationship('MatchesPlayersDetail')
class GameObjective(db.Model):
__tablename__ = 'game_objectives'
id = db.Column(db.Integer, primary_key=True, server_default=text("nextval('game_objectives_id_seq'::regclass)"))
match_player_detail_id_1 = db.Column(db.ForeignKey('matches_players_details.id'))
match_player_detail_id_2 = db.Column(db.ForeignKey('matches_players_details.id'))
key = db.Column(db.Integer)
subtype = db.Column(db.Text)
team = db.Column(db.Integer)
time = db.Column(db.Integer)
value = db.Column(db.Integer)
slot = db.Column(db.Integer)
matches_players_detail = db.relationship('MatchesPlayersDetail', primaryjoin='GameObjective.match_player_detail_id_1 == MatchesPlayersDetail.id')
matches_players_detail1 = db.relationship('MatchesPlayersDetail', primaryjoin='GameObjective.match_player_detail_id_2 == MatchesPlayersDetail.id')
class PlayerAction(db.Model):
__tablename__ = 'player_actions'
id = db.Column(db.Integer, primary_key=True, server_default=text("nextval('player_actions_id_seq'::regclass)"))
unit_order_none = db.Column(db.Integer)
unit_order_move_to_position = db.Column(db.Integer)
unit_order_move_to_target = db.Column(db.Integer)
unit_order_attack_move = db.Column(db.Integer)
unit_order_attack_target = db.Column(db.Integer)
unit_order_cast_position = db.Column(db.Integer)
unit_order_cast_target = db.Column(db.Integer)
unit_order_cast_target_tree = db.Column(db.Integer)
unit_order_cast_no_target = db.Column(db.Integer)
unit_order_cast_toggle = db.Column(db.Integer)
unit_order_hold_position = db.Column(db.Integer)
unit_order_train_ability = db.Column(db.Integer)
unit_order_drop_item = db.Column(db.Integer)
unit_order_give_item = db.Column(db.Integer)
unit_order_pickup_item = db.Column(db.Integer)
unit_order_pickup_rune = db.Column(db.Integer)
unit_order_purchase_item = db.Column(db.Integer)
unit_order_sell_item = db.Column(db.Integer)
unit_order_disassemble_item = db.Column(db.Integer)
unit_order_move_item = db.Column(db.Integer)
unit_order_cast_toggle_auto = db.Column(db.Integer)
unit_order_stop = db.Column(db.Integer)
unit_order_buyback = db.Column(db.Integer)
unit_order_glyph = db.Column(db.Integer)
unit_order_eject_item_from_stash = db.Column(db.Integer)
unit_order_cast_rune = db.Column(db.Integer)
unit_order_ping_ability = db.Column(db.Integer)
unit_order_move_to_direction = db.Column(db.Integer)
match_player_detail_id = db.Column(db.ForeignKey('matches_players_details.id'))
match_player_detail = db.relationship('MatchesPlayersDetail')
class PlayerTime(db.Model):
__tablename__ = 'player_times'
id = db.Column(db.Integer, primary_key=True, server_default=text("nextval('player_times_id_seq'::regclass)"))
match_player_detail_id = db.Column(ForeignKey('matches_players_details.id'))
time = db.Column(db.Integer)
gold = db.Column(db.Integer)
lh = db.Column(db.Integer)
xp = db.Column(db.Integer)
match_player_detail = db.relationship('MatchesPlayersDetail')
class PurchaseLog(db.Model):
__tablename__ = 'purchase_logs'
id = db.Column(db.Integer, primary_key=True, server_default=text("nextval('purchase_logs_id_seq'::regclass)"))
match_player_detail_id = db.Column(db.ForeignKey('matches_players_details.id'))
item_id = db.Column(db.ForeignKey('items.id'))
time = db.Column(db.Integer)
item = db.relationship('Item')
match_player_detail = db.relationship('MatchesPlayersDetail')
class TeamfightsPlayer(db.Model):
__tablename__ = 'teamfights_players'
id = db.Column(db.Integer, primary_key=True, server_default=text("nextval('teamfights_players_id_seq'::regclass)"))
teamfight_id = db.Column(db.ForeignKey('teamfights.id'))
match_player_detail_id = db.Column(db.ForeignKey('matches_players_details.id'))
buyback = db.Column(db.Integer)
damage = db.Column(db.Integer)
deaths = db.Column(db.Integer)
gold_delta = db.Column(db.Integer)
xp_start = db.Column(db.Integer)
xp_end = db.Column(db.Integer)
match_player_detail = db.relationship('MatchesPlayersDetail')
teamfight = db.relationship('Teamfight')
@app.route('/')
def index():
print('Request for index page received')
return render_template('index.html')
# Zadanie 2
@app.route('/v1/health', methods=['GET'])
def do_stuff():
conn = connect()
cur = conn.cursor()
cur.execute("SELECT VERSION()")
fetched_version = cur.fetchone()
cur.execute("SELECT pg_database_size('dota2')/1024/1024 as dota2_db_size")
fetched_size = cur.fetchone()
dic = {}
dic2 = {}
dic2['pgsql'] = dic
dic["dota2_db_size"] = fetched_size[0]
dic['version'] = fetched_version[0]
json_string = json.dumps(dic2)
cur.close()
return json_string
# ZADANIE 4
@app.route('/v4/players/<string:player_id>/game_exp/', methods=['GET'])
def orm_game_exp(player_id):
player = Player.query.join(MatchesPlayersDetail, MatchesPlayersDetail.player_id == Player.id)\
.join(Hero, MatchesPlayersDetail.hero_id == Hero.id) \
.join(Match, MatchesPlayersDetail.match_id == Match.id) \
.filter(Player.id == player_id) \
.with_entities(
(db.func.round(db.cast(Match.duration, db.Numeric) / 60, 2)).label("match_duration_minutes"),
(db.func.greatest(MatchesPlayersDetail.level)).label('level_gained'),
db.case([
db.and_(MatchesPlayersDetail.player_slot < 5, Match.radiant_win),
db.and_(db.not_(Match.radiant_win), MatchesPlayersDetail.player_slot > 127)
]).label("winner"),
(db.func.coalesce(MatchesPlayersDetail.xp_hero, 0) + db.func.coalesce(MatchesPlayersDetail.xp_creep, 0) +
db.func.coalesce(MatchesPlayersDetail.xp_other, 0) + db.func.coalesce(MatchesPlayersDetail.xp_roshan, 0)).label("experiences_gained")) \
.add_columns(
Player.id,
db.func.coalesce(Player.nick, 'unknown').label('player_nick'),
Hero.localized_name.label('hero_localized_name'),
Match.id.label('match_id'),
) \
.order_by(Match.id).all()
dic = {}
dic["id"] = int(player_id)
dic["player_nick"] = player[0].player_nick
matches = []
for p in player:
match = {}
match['match_id'] = p.match_id
match['hero_localized_name'] = p.hero_localized_name
match['match_duration_minutes'] = float(p.match_duration_minutes)
match['experiences_gained'] = p.experiences_gained
match['level_gained'] = p.level_gained
match['winner'] = p.winner
matches.append(match)
dic['matches'] = matches
return Response(json.dumps(dic), status=200, mimetype="application/json")
@app.route('/v4/players/<string:player_id>/game_objectives/', methods=['GET'])
def orm_game_objectives(player_id):
player = Player.query.join(MatchesPlayersDetail, MatchesPlayersDetail.player_id == Player.id) \
.join(Hero, MatchesPlayersDetail.hero_id == Hero.id) \
.join(GameObjective, GameObjective.match_player_detail_id_1 == MatchesPlayersDetail.id) \
.filter(Player.id == player_id) \
.with_entities (
db.func.coalesce(GameObjective.subtype, 'NO_ACTION').label("subtype"),
) \
.add_columns(
Player.id,
Player.nick.label("player_nick"),
MatchesPlayersDetail.match_id,
Hero.localized_name.label("hero_localized_name")
) \
.order_by(MatchesPlayersDetail.match_id, GameObjective.subtype).all()
dic = {}
dic['id'] = int(player_id)
matches = []
for p in player:
if not 'player_nick' in dic.keys():
dic['player_nick'] = p.player_nick
current_match = None
for match in matches:
if match['match_id'] == p.match_id:
current_match = match
break
if current_match is not None:
current = None
for action in current_match['actions']:
if action['hero_action'] == p.subtype:
current = action
break
if current is not None:
current['count'] += 1
else:
current = {}
current['hero_action'] = p.subtype
current['count'] = 1
current_match['actions'].append(current)
else:
current_match = {}
current_match['match_id'] = p.match_id
current_match['hero_localized_name'] = p.hero_localized_name
matches.append(current_match)
current_match['actions'] = []
action = {}
action['hero_action'] = p.subtype
action['count'] = 1
current_match['actions'].append(action)
dic['matches'] = matches
return Response(json.dumps(dic), status=200, mimetype="application/json")
# Zadanie 3
# 1
@app.route('/v2/patches/', methods=['GET'])
def get_patches():
conn = connect()
cur = conn.cursor()
cur.execute("select vsetkymece.match_id, round (vsetkymece.duration/60.0, 2) as duration, patches.name as patch_version, "
"cast (extract (epoch from patches.release_date) as INT) as patch_start_date, "
"cast (extract (epoch from next_patch.release_date) as INT) as patch_end_date "
"from patches "
"left join patches as next_patch on patches.id = next_patch.id - 1 "
"left join ("
"select matches.id as match_id, duration, start_time "
"from matches "
") as vsetkymece on (vsetkymece.start_time > extract(epoch from patches.release_date) "
"and vsetkymece.start_time < coalesce (extract (epoch from next_patch.release_date) , 9999999999)) "
"order by patches.id"
)
dic = {}
dic['patches'] = []
for column in cur:
current_patch = None
for patch in dic['patches']:
# column patch_version
if patch['patch_version'] == str(column[2]):
current_patch = patch
break
if current_patch is not None:
match = {}
match['match_id'] = column[0]
match['duration'] = float(column[1])
current_patch['matches'].append(match)
else:
current_patch = {}
current_patch['patch_version'] = column[2]
current_patch['patch_start_date'] = column[3]
current_patch['patch_end_date'] = column[4]
current_patch['matches'] = []
if column[0] is not None:
match = {}
match['match_id'] = column[0]
match['duration'] = float(column[1])
current_patch['matches'].append(match)
dic['patches'].append(current_patch)
json_string = json.dumps(dic)
cur.close()
return json_string
# 2
@app.route('/v2/players/<string:player_id>/game_exp/', methods=['GET'])
def get_game_exp(player_id):
conn = connect()
cur = conn.cursor()
cur.execute("select coalesce (nick, 'nick') from players "
"where id = " + player_id)
matches = []
dic = {}
dic['id'] = int(player_id)
dic['player_nick'] = cur.fetchone()[0]
cur.execute("select pl.id, coalesce (pl.nick,'unknown') as player_nick, match_id, "
"localized_name as hero_localized_name, "
"round (m.duration / 60.0, 2) as match_duration_minutes, "
"coalesce (xp_hero, 0) + coalesce(xp_creep,0) + coalesce(xp_other,0) + coalesce(xp_roshan,0) as experiences_gained, "
"greatest(level) as level_gained, "
"case when mpd.player_slot < 5 and m.radiant_win = true or mpd.player_slot > 127 and m.radiant_win = false "
"then true else false end as winner "
"from matches_players_details as mpd "
"join players as pl "
"on pl.id = mpd.player_id "
"join heroes as hero "
"on mpd.hero_id = hero.id "
"join matches as m "
"on mpd.match_id = m.id "
"where pl.id = " + player_id +
" order by m.id"
)
for column in cur:
match = {}
match['match_id'] = column[2]
match['hero_localized_name'] = column[3]
match['match_duration_minutes'] = float(column[4])
match['experiences_gained'] = column[5]
match['level_gained'] = column[6]
match['winner'] = column[7]
matches.append(match)
dic['matches'] = matches
json_string = json.dumps(dic)
cur.close()
return json_string
# 3
@app.route('/v2/players/<string:player_id>/game_objectives/', methods=['GET'])
def game_objectives(player_id):
conn = connect()
cur = conn.cursor()
cur.execute("select pl.id, pl.nick as player_nick, mpd.match_id, heroes.localized_name, "
"coalesce(game_objectives.subtype, 'NO_ACTION') "
"from players as pl "
"left join matches_players_details as mpd on mpd.player_id = pl.id "
"left join heroes on heroes.id = mpd.hero_id "
"left join game_objectives on game_objectives.match_player_detail_id_1 = mpd.id "
"where pl.id = " + player_id +
" order by mpd.match_id, subtype")
dic = {}
matches = []
dic['id'] = int(player_id)
for column in cur:
if not 'player_nick' in dic.keys():
dic['player_nick'] = column[1]
current_match = None
for match in matches:
if match['match_id'] == column[2]:
current_match = match
break
if current_match is not None:
current = None
for action in current_match['actions']:
if action['hero_action'] == column[4]:
current = action
break
if current is not None:
current['count'] += 1
else:
current = {}
current['hero_action'] = column[4]
current['count'] = 1
current_match['actions'].append(current)
else:
current_match = {}
current_match['match_id'] = column[2]
current_match['hero_localized_name'] = column[3]
matches.append(current_match)
current_match['actions'] = []
action = {}
action['hero_action'] = column[4]
action['count'] = 1
current_match['actions'].append(action)
dic['matches'] = matches
json_string = json.dumps(dic)
cur.close()
return json_string
# ZADANIE 5
# 1
@app.route('/v3/matches/<string:match_id>/top_purchases/', methods=['GET'])
def top_purch(match_id):
conn = connect()
cur = conn.cursor()
cur.execute(("WITH query_d as ( "
"SELECT items.name, mt.radiant_win, mt_p_det.id mpd_id, mt.id match_id, "
"heroes.localized_name, mt_p_det.player_id, mt_p_det.hero_id, "
"mt_p_det.player_slot, p.item_id, "
"COUNT (p.item_id) buy_count, "
"ROW_NUMBER() OVER (PARTITION BY mt_p_det.id ORDER BY COUNT(p.item_id) DESC, items.name ASC) "
"FROM matches mt "
"JOIN matches_players_details mt_p_det ON mt_p_det.match_id = mt.id "
"JOIN purchase_logs p ON mt_p_det.id = match_player_detail_id "
"JOIN heroes ON mt_p_det.hero_id = heroes.id "
"JOIN items ON p.item_id = items.id "
"WHERE mt.id = {} AND CASE "
"WHEN mt_p_det.player_slot >= 0 AND mt_p_det.player_slot <= 4 AND mt.radiant_win = true THEN true "
"WHEN mt_p_det.player_slot >= 128 AND mt_p_det.player_slot <= 132 AND mt.radiant_win = false THEN true "
"END "
"GROUP BY(mt.id,heroes.localized_name, items.name, mpd_id, p.item_id) "
"ORDER BY mt_p_det.hero_id ASC, buy_count DESC, items.name ASC) "
"SELECT * FROM query_d "
"WHERE ROW_NUMBER <= 5 ").format(match_id))
dic = {}
dic['id'] = int(match_id)
heroes = []
for column in cur:
act_hero = None
for hero in heroes:
if hero['id'] == column[6]:
act_hero = hero
break
if act_hero is None:
act_hero = {}
act_hero['id'] = column[6]
act_hero['name'] = column[4]
purchases = []
purchase = {}
purchase['id'] = column[8]
purchase['name'] = column[0]
purchase['count'] = column[9]
purchases.append(purchase)
act_hero['top_purchases'] = purchases
heroes.append(act_hero)
else:
purchases = act_hero['top_purchases']
purchase = {}
purchase['id'] = column[8]
purchase['name'] = column[0]
purchase['count'] = column[9]
purchases.append(purchase)
dic['heroes'] = heroes
json_string = json.dumps(dic)
cur.close()
return json_string
def linux_version():
var = dotenv_values("/home/en_var.env")
return psy.connect(
host="147.175.150.216",
database="dota2",
user=var['DBUSER'],
password=var['DBPASS'])
def win_version():
return psy.connect(
host="147.175.150.216",
database="dota2",
user=os.getenv('DBUSER'),
password=os.getenv('DBPASS'))
def connect():
if platform.system() == "Linux":
return linux_version()
else:
return win_version()
@app.route('/hello', methods=['POST'])
def hello():
name = request.form.get('name')
if name:
print('Request for hello page received with name=%s' % name)
return render_template('hello.html', name = name)
else:
print('Request for hello page received with no name or blank name -- redirecting')
return redirect(url_for('index'))
if __name__ == '__main__':
app.run() | []
| []
| [
"DBUSER",
"DBPASS"
]
| [] | ["DBUSER", "DBPASS"] | python | 2 | 0 | |
pkg/preparer/orchestrate.go | package preparer
import (
"os"
"path/filepath"
"time"
"github.com/square/p2/Godeps/_workspace/src/github.com/Sirupsen/logrus"
"github.com/square/p2/pkg/auth"
"github.com/square/p2/pkg/hooks"
"github.com/square/p2/pkg/kp"
"github.com/square/p2/pkg/logging"
"github.com/square/p2/pkg/pods"
"github.com/square/p2/pkg/types"
"github.com/square/p2/pkg/util/size"
)
// The Pod ID of the preparer.
// Used because the preparer special-cases itself in a few places.
const POD_ID = types.PodID("p2-preparer")
const svlogdExec = "svlogd -tt ./main"
type Pod interface {
hooks.Pod
Launch(pods.Manifest) (bool, error)
Install(pods.Manifest) error
Uninstall() error
Verify(pods.Manifest, auth.Policy) error
Halt(pods.Manifest) (bool, error)
Prune(size.ByteCount, pods.Manifest)
}
type Hooks interface {
RunHookType(hookType hooks.HookType, pod hooks.Pod, manifest pods.Manifest) error
}
type Store interface {
ListPods(podPrefix kp.PodPrefix, nodeName string) ([]kp.ManifestResult, time.Duration, error)
SetPod(podPrefix kp.PodPrefix, noeName string, podManifest pods.Manifest) (time.Duration, error)
Pod(podPrefix kp.PodPrefix, nodeName string, podId types.PodID) (pods.Manifest, time.Duration, error)
DeletePod(podPrefix kp.PodPrefix, nodeName string, podId types.PodID) (time.Duration, error)
WatchPods(
podPrefix kp.PodPrefix,
nodeName string,
quitChan <-chan struct{},
errorChan chan<- error,
podChan chan<- []kp.ManifestResult,
)
}
func (p *Preparer) WatchForHooks(quit chan struct{}) {
hookErrCh := make(chan error)
hookQuitCh := make(chan struct{})
go p.hookListener.Sync(hookQuitCh, hookErrCh)
for {
select {
case <-quit:
hookQuitCh <- struct{}{}
return
case err := <-hookErrCh:
p.Logger.WithError(err).Errorln("Error updating hooks")
}
}
}
// Sync the hooks just once. This is useful at preparer startup so that hooks
// will be synced before normal pod management begins
func (p *Preparer) SyncHooksOnce() error {
return p.hookListener.SyncOnce()
}
func (p *Preparer) WatchForPodManifestsForNode(quitAndAck chan struct{}) {
pods.Log = p.Logger
// This allows us to signal the goroutine watching consul to quit
quitChan := make(chan struct{})
errChan := make(chan error)
podChan := make(chan []kp.ManifestResult)
go p.store.WatchPods(kp.INTENT_TREE, p.node, quitChan, errChan, podChan)
// we will have one long running goroutine for each app installed on this
// host. We keep a map of podId => podChan so we can send the new manifests
// that come in to the appropriate goroutine
podChanMap := make(map[types.PodID]chan ManifestPair)
// we can't use a shared quit channel for all the goroutines - otherwise,
// we would exit the program before the goroutines actually accepted the
// quit signal. to be sure that each goroutine is done, we have to block and
// wait for it to receive the signal
quitChanMap := make(map[types.PodID]chan struct{})
for {
select {
case err := <-errChan:
p.Logger.WithError(err).
Errorln("there was an error reading the manifest")
case intentResults := <-podChan:
realityResults, _, err := p.store.ListPods(kp.REALITY_TREE, p.node)
if err != nil {
p.Logger.WithError(err).Errorln("Could not check reality")
} else {
// if the preparer's own ID is missing from the intent set, we
// assume it was damaged and discard it
if !checkResultsForID(intentResults, POD_ID) {
p.Logger.NoFields().Errorln("Intent results set did not contain p2-preparer pod ID, consul data may be corrupted")
} else {
resultPairs := ZipResultSets(intentResults, realityResults)
for _, pair := range resultPairs {
if _, ok := podChanMap[pair.ID]; !ok {
// spin goroutine for this pod
podChanMap[pair.ID] = make(chan ManifestPair)
quitChanMap[pair.ID] = make(chan struct{})
go p.handlePods(podChanMap[pair.ID], quitChanMap[pair.ID])
}
podChanMap[pair.ID] <- pair
}
}
}
case <-quitAndAck:
for podToQuit, quitCh := range quitChanMap {
p.Logger.WithField("pod", podToQuit).Infoln("Quitting...")
quitCh <- struct{}{}
}
close(quitChan)
p.Logger.NoFields().Infoln("Done, acknowledging quit")
quitAndAck <- struct{}{} // acknowledge quit
return
}
}
}
func (p *Preparer) tryRunHooks(hookType hooks.HookType, pod hooks.Pod, manifest pods.Manifest, logger logging.Logger) {
err := p.hooks.RunHookType(hookType, pod, manifest)
if err != nil {
logger.WithErrorAndFields(err, logrus.Fields{
"hooks": hookType}).Warnln("Could not run hooks")
}
}
// no return value, no output channels. This should do everything it needs to do
// without outside intervention (other than being signalled to quit)
func (p *Preparer) handlePods(podChan <-chan ManifestPair, quit <-chan struct{}) {
// install new launchables
var nextLaunch ManifestPair
// used to track if we have work to do (i.e. pod manifest came through channel
// and we have yet to operate on it)
working := false
var manifestLogger logging.Logger
for {
select {
case <-quit:
return
case nextLaunch = <-podChan:
var sha string
if nextLaunch.Intent != nil {
sha, _ = nextLaunch.Intent.SHA()
} else {
sha, _ = nextLaunch.Reality.SHA()
}
manifestLogger = p.Logger.SubLogger(logrus.Fields{
"pod": nextLaunch.ID,
"sha": sha,
})
manifestLogger.NoFields().Debugln("New manifest received")
if nextLaunch.Intent == nil {
// if intent=nil then reality!=nil and we need to delete the pod
// therefore we must set working=true here
working = true
} else {
// non-nil intent manifests need to be authorized first
working = p.authorize(nextLaunch.Intent, manifestLogger)
if !working {
p.tryRunHooks(
hooks.AFTER_AUTH_FAIL,
pods.NewPod(nextLaunch.ID, pods.PodPath(p.podRoot, nextLaunch.ID)),
nextLaunch.Intent,
manifestLogger,
)
}
}
case <-time.After(1 * time.Second):
if working {
pod := pods.NewPod(nextLaunch.ID, pods.PodPath(p.podRoot, nextLaunch.ID))
// TODO better solution: force the preparer to have a 0s default timeout, prevent KILLs
if pod.Id == POD_ID {
pod.DefaultTimeout = time.Duration(0)
}
for _, testPodId := range p.logExecTestGroup {
if pod.Id == testPodId {
pod.LogExec = logBridgeExec(pod)
}
}
// podChan is being fed values gathered from a kp.Watch() in
// WatchForPodManifestsForNode(). If the watch returns a new pair of
// intent/reality values before the previous change has finished
// processing in resolvePair(), the reality value will be stale. This
// leads to a bug where the preparer will appear to update a package
// and when that is finished, "update" it again.
//
// The correct solution probably involves watching reality and intent
// and feeding updated pairs to a control loop.
//
// This is a quick fix to ensure that the reality value being used is
// up-to-date. The de-bouncing logic in this method should ensure that the
// intent value is fresh (to the extent that Consul is timely). Fetching
// the reality value again ensures its freshness too.
reality, _, err := p.store.Pod(kp.REALITY_TREE, p.node, nextLaunch.ID)
if err == pods.NoCurrentManifest {
nextLaunch.Reality = nil
} else if err != nil {
manifestLogger.WithError(err).Errorln("Error getting reality manifest")
break
} else {
nextLaunch.Reality = reality
}
ok := p.resolvePair(nextLaunch, pod, manifestLogger)
if ok {
nextLaunch = ManifestPair{}
working = false
}
}
}
}
}
func logBridgeExec(pod *pods.Pod) []string {
return []string{
pod.P2Exec,
"-u",
"nobody",
"-e",
pod.EnvDir(),
"--",
filepath.Join(os.Getenv("POD_HOME"), "p2-log-bridge", "current", "bin", "p2-log-bridge"),
"bridge",
"-logExec",
svlogdExec,
}
}
// check if a manifest satisfies the authorization requirement of this preparer
func (p *Preparer) authorize(manifest pods.Manifest, logger logging.Logger) bool {
err := p.authPolicy.AuthorizeApp(manifest, logger)
if err != nil {
if err, ok := err.(auth.Error); ok {
logger.WithFields(err.Fields).Errorln(err)
} else {
logger.NoFields().Errorln(err)
}
return false
}
return true
}
func (p *Preparer) resolvePair(pair ManifestPair, pod Pod, logger logging.Logger) bool {
// do not remove the logger argument, it's not the same as p.Logger
var oldSHA, newSHA string
if pair.Reality != nil {
oldSHA, _ = pair.Reality.SHA()
}
if pair.Intent != nil {
newSHA, _ = pair.Intent.SHA()
}
if oldSHA == "" {
logger.NoFields().Infoln("manifest is new, will update")
return p.installAndLaunchPod(pair, pod, logger)
}
if newSHA == "" {
logger.NoFields().Infoln("manifest was deleted from intent, will remove")
return p.stopAndUninstallPod(pair, pod, logger)
}
if oldSHA == newSHA {
logger.NoFields().Debugln("manifest is unchanged, no action required")
return true
}
logger.WithField("old_sha", oldSHA).Infoln("manifest SHA has changed, will update")
return p.installAndLaunchPod(pair, pod, logger)
}
func (p *Preparer) installAndLaunchPod(pair ManifestPair, pod Pod, logger logging.Logger) bool {
p.tryRunHooks(hooks.BEFORE_INSTALL, pod, pair.Intent, logger)
logger.NoFields().Infoln("Installing pod and launchables")
err := pod.Install(pair.Intent)
if err != nil {
// install failed, abort and retry
logger.WithError(err).Errorln("Install failed")
return false
}
err = pod.Verify(pair.Intent, p.authPolicy)
if err != nil {
logger.WithError(err).
Errorln("Pod digest verification failed")
p.tryRunHooks(hooks.AFTER_AUTH_FAIL, pod, pair.Intent, logger)
return false
}
p.tryRunHooks(hooks.AFTER_INSTALL, pod, pair.Intent, logger)
if pair.Reality != nil {
success, err := pod.Halt(pair.Reality)
if err != nil {
logger.WithError(err).
Errorln("Pod halt failed")
} else if !success {
logger.NoFields().Warnln("One or more launchables did not halt successfully")
}
}
p.tryRunHooks(hooks.BEFORE_LAUNCH, pod, pair.Intent, logger)
logger.NoFields().Infoln("Calling enable and setting up runit services")
ok, err := pod.Launch(pair.Intent)
if err != nil {
logger.WithError(err).
Errorln("Launch failed")
} else {
duration, err := p.store.SetPod(kp.REALITY_TREE, p.node, pair.Intent)
if err != nil {
logger.WithErrorAndFields(err, logrus.Fields{
"duration": duration}).
Errorln("Could not set pod in reality store")
}
p.tryRunHooks(hooks.AFTER_LAUNCH, pod, pair.Intent, logger)
pod.Prune(p.maxLaunchableDiskUsage, pair.Intent) // errors are logged internally
}
return err == nil && ok
}
func (p *Preparer) stopAndUninstallPod(pair ManifestPair, pod Pod, logger logging.Logger) bool {
success, err := pod.Halt(pair.Reality)
if err != nil {
logger.WithError(err).Errorln("Pod halt failed")
} else if !success {
logger.NoFields().Warnln("One or more launchables did not halt successfully")
}
p.tryRunHooks(hooks.BEFORE_UNINSTALL, pod, pair.Reality, logger)
err = pod.Uninstall()
if err != nil {
logger.WithError(err).Errorln("Uninstall failed")
return false
}
logger.NoFields().Infoln("Successfully uninstalled")
dur, err := p.store.DeletePod(kp.REALITY_TREE, p.node, pair.ID)
if err != nil {
logger.WithErrorAndFields(err, logrus.Fields{"duration": dur}).
Errorln("Could not delete pod from reality store")
}
return true
}
// Close() releases any resources held by a Preparer.
func (p *Preparer) Close() {
p.authPolicy.Close()
// The same verifier is shared twice internally
p.hookListener.authPolicy = nil
p.authPolicy = nil
}
| [
"\"POD_HOME\""
]
| []
| [
"POD_HOME"
]
| [] | ["POD_HOME"] | go | 1 | 0 | |
examples/orcid_app.py | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
r"""Minimal Flask application example for development with orcid handler.
SPHINX-START
1. Register an orcid application with `Authorization callback URL` as
`http://localhost:5000/oauth/authorized/orcid/`
2. Install oauthclient:
.. code-block:: console
cdvirtualenv src/invenio-oauthclient
pip install -e .[orcid]
3. Grab the *Client ID* and *Client Secret* after registering the application
and add them to your instance configuration as `consumer_key` and
`consumer_secret`.
.. code-block:: console
$ export ORCID_APP_CREDENTIALS_KEY=my_orcid_client_id
$ export ORCID_APP_CREDENTIALS_SECRET=my_orcid_client_secret
4. Create database and tables:
.. code-block:: console
$ pip install -e .[all]
$ cd examples
$ export FLASK_APP=orcid_app.py
$ ./app-setup.sh
You can find the database in `examples/orcid_app.db`.
5. Run the development server:
.. code-block:: console
$ flask -a orcid_app.py run -p 5000 -h '0.0.0.0'
6. Open in a browser the page `http://0.0.0.0:5000/orcid`.
You will be redirected to orcid to authorize the application.
Click on `Authorize application` and you will be redirected back to
`http://0.0.0.0:5000/oauth/authorized/orcid/`, where you will be able to
finalize the local user registration, inserting email address.
Insert e.g. `[email protected]` as email address and send the form.
Now, you will be again in homepage but this time it say: `hello [email protected]`.
You have completed the user registration.
7. To be able to uninstall the example app:
.. code-block:: console
$ ./app-teardown.sh
SPHINX-END
"""
import os
from flask import Flask, redirect, url_for
from flask_babelex import Babel
from flask_login import current_user
from flask_menu import Menu as FlaskMenu
from invenio_accounts import InvenioAccounts
from invenio_accounts.views import blueprint as blueprint_user
from invenio_db import InvenioDB
from invenio_mail import InvenioMail as Mail
from invenio_userprofiles import InvenioUserProfiles
from invenio_userprofiles.views import \
blueprint_api_init as blueprint_userprofile_api_init
from invenio_userprofiles.views import \
blueprint_ui_init as blueprint_userprofile_ui_init
from invenio_oauthclient import InvenioOAuthClient
from invenio_oauthclient.contrib import orcid
from invenio_oauthclient.views.client import blueprint as blueprint_client
from invenio_oauthclient.views.settings import blueprint as blueprint_settings
from invenio_oauthclient._compat import monkey_patch_werkzeug # noqa isort:skip
monkey_patch_werkzeug() # noqa isort:skip
from flask_oauthlib.client import OAuth as FlaskOAuth # noqa isort:skip
# [ Configure application credentials ]
ORCID_APP_CREDENTIALS = dict(
consumer_key=os.environ.get('ORCID_APP_CREDENTIALS_KEY'),
consumer_secret=os.environ.get('ORCID_APP_CREDENTIALS_SECRET'),
)
# Create Flask application
app = Flask(__name__)
app.config.update(
SQLALCHEMY_ECHO=False,
SQLALCHEMY_DATABASE_URI=os.environ.get(
'SQLALCHEMY_DATABASE_URI', 'sqlite:///orcid_app.db'
),
OAUTHCLIENT_REMOTE_APPS=dict(
orcid=orcid.REMOTE_SANDBOX_APP,
),
ORCID_APP_CREDENTIALS=ORCID_APP_CREDENTIALS,
DEBUG=True,
SECRET_KEY='TEST',
SECURITY_PASSWORD_SALT='security-password-salt',
SECURITY_LOGIN_WITHOUT_CONFIRMATION=False,
USERPROFILES_EXTEND_SECURITY_FORMS=True,
SQLALCHEMY_TRACK_MODIFICATIONS=False,
APP_THEME=['semantic-ui'],
THEME_ICONS={
'semantic-ui': dict(
link='linkify icon'
)
}
)
Babel(app)
FlaskMenu(app)
Mail(app)
InvenioDB(app)
InvenioAccounts(app)
InvenioUserProfiles(app)
FlaskOAuth(app)
InvenioOAuthClient(app)
app.register_blueprint(blueprint_user)
app.register_blueprint(blueprint_client)
app.register_blueprint(blueprint_settings)
app.register_blueprint(blueprint_userprofile_api_init)
app.register_blueprint(blueprint_userprofile_ui_init)
@app.route('/')
def index():
"""Homepage."""
return 'Home page (without any restrictions)'
@app.route('/orcid')
def orcid():
"""Try to print user email or redirect to login with orcid."""
if not current_user.is_authenticated:
return redirect(url_for('invenio_oauthclient.login',
remote_app='orcid'))
return 'hello {}'.format(current_user.email)
| []
| []
| [
"SQLALCHEMY_DATABASE_URI",
"ORCID_APP_CREDENTIALS_SECRET",
"ORCID_APP_CREDENTIALS_KEY"
]
| [] | ["SQLALCHEMY_DATABASE_URI", "ORCID_APP_CREDENTIALS_SECRET", "ORCID_APP_CREDENTIALS_KEY"] | python | 3 | 0 | |
examples/tracing/honeycomb/ride/ride.go | package ride
import (
"context"
"os"
"time"
"github.com/sirupsen/logrus"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
"rideshare/log"
)
func FindNearestVehicle(ctx context.Context, searchRadius int64, vehicle string) {
ctx, span := otel.GetTracerProvider().Tracer("").Start(ctx, "FindNearestVehicle")
span.SetAttributes(attribute.String("vehicle", vehicle))
defer span.End()
logger := log.Logger(ctx).WithFields(logrus.Fields{
"radius": searchRadius,
"vehicle": vehicle,
})
logger.Info("looking for nearest vehicle")
burnCPU(searchRadius)
if vehicle == "car" {
checkDriverAvailability(ctx, searchRadius)
}
}
func checkDriverAvailability(ctx context.Context, n int64) {
ctx, span := otel.GetTracerProvider().Tracer("").Start(ctx, "CheckDriverAvailability")
defer span.End()
region := os.Getenv("REGION")
logger := log.Logger(ctx).WithField("region", region)
logger.Info("checking for driver availability")
burnCPU(n / 2)
// Every 4 minutes this will artificially make requests in us-west-1 region slow
// this is just for demonstration purposes to show how performance impacts show
// up in the flamegraph.
if region == "us-west-1" && time.Now().Minute()*4%8 == 0 {
burnCPU(n * 2)
}
logger.Info("vehicle found")
}
func burnCPU(n int64) {
var v int
for i := int64(0); i < n*2; i++ {
for j := 0; j < 1<<30; j++ {
v++
}
}
}
| [
"\"REGION\""
]
| []
| [
"REGION"
]
| [] | ["REGION"] | go | 1 | 0 | |
web_reamer.py | #!/usr/bin/env python3
import sys
import os
import re
import argparse
import requests
from bs4 import BeautifulSoup as bs
version=1.1
print("""\033[1;36m
╦ ╦╔═╗╔╗ ╦═╗╔═╗╔═╗╔╦╗╔═╗╦═╗
║║║║╣ ╠╩╗ ╠╦╝║╣ ╠═╣║║║║╣ ╠╦╝
╚╩╝╚═╝╚═╝────╩╚═╚═╝╩ ╩╩ ╩╚═╝╩╚═
🔗🔥🔗🔥🔗🔥🔗🔥🔗🔥🔗🔥🔗🔥🔗🔥
--> Coded by FEBIN 🛡️🌐
\033[1;39m""")
def febrev_fuzz(url):
import requests
os.system("clear")
feblist=open("admin-panel.txt","r+")
text=str(feblist.read())
adminpages=list(text.split())
feblist.close()
print(f"""
[\033[1;37m+\033[1;39m] STARTED CRAWLING TO FIND ADMIN PANEL OF URL : \033[1;34m{url}
""")
try:
if url.startswith("https://") or url.startswith("http://"):
url=url
else:
print("Error : INVALID URL ! URL must start with 'http://' or 'https://'")
exit()
if url.endswith("/"):
url=url
server=requests.get(url).headers.get('Server')
print(f"\033[1;37m SERVER Type >> {server}")
print("\n<----------------------------------------------------------------------------------->")
print(" ")
else:
url=f"{url}/"
server=requests.get(url).headers.get('Server')
print(f"\033[1;37mSERVER Type >> {server}")
print("\n<----------------------------------------------------------------------------------->")
print(" ")
for i in range(len(adminpages)):
reqresp=requests.get(f"{url}/{adminpages[i]}",timeout=10)
if reqresp.status_code == 200:
print(f"\033[1;39m FOUND ==> {url}{adminpages[i]} \033[1;34m")
elif reqresp.status_code == 302:
print("\033[1;39m FOUND 302 ==> {url}{adminpages[i]} \033[1;34m")
else:
pass
except requests.exceptions.ConnectionError:
print("[\033[1;31m-\033[1;39m] Connection to the Server Failed, May be invalid URL or bad Internet connection. Check Your Internet connection,URL and try again\n ")
except requests.exceptions.ReadTimeout:
print("\033[1;31m [\033[1;31m-\033[1;39m] Error : EXECUTION STOPPED DUE TO !TIMED OUT! ERROR, YOUR INTERNET MAY BE DISCONNECTED!!!....EXITTED")
print("\033[1;37m WEB_REAMER Execution Completed. \033[1;33m!HAPPY HACKING! \033[1;34m \n")
def sub_brute(domain,sublist):
if os.path.isfile(sublist):
print(f"[\033[1;37m+\033[1;39m] Subdomain wordlist {sublist} loaded -> OK")
print("")
pass
else:
print(f"[\033[1;31m-\033[1;39m] Wordlist {sublist} not found!!")
exit()
sub=open(sublist,"r+")
subs=sub.read().split("\n")
sub.close()
for host in subs:
try:
req=requests.get(f"http://{host}.{domain}")
print(f"\033[1;39m{host}.{domain} --> \033[1;37m{req.status_code}")
except requests.exceptions.ConnectionError:
pass
except UnicodeError:
pass
print("")
print("[\033[1;37m+\033[1;39m] Finshed!")
print("\033[1;37m WEB_REAMER Execution Completed. \033[1;33m!HAPPY HACKING! \033[1;34m \n")
def wordlistgen(url,filepath):
import requests
from bs4 import BeautifulSoup
print("")
try:
webpage=requests.get(url)
pagedata=webpage.text
soup=BeautifulSoup(pagedata,"html.parser")
except requests.exceptions.ConnectionError:
print("\033[1;31m[-] ERROR CONNECTING THE SERVER...")
exit()
for script in soup(["script","style"]):
script.extract()
text1=soup.get_text()
text=str(text1.strip())
feb=text.split()
iscount=feb.count('is')
wascount=feb.count('was')
arecount=feb.count('are')
forcount=feb.count('for')
thecount=feb.count('the')
ofcount=feb.count('of')
tocount=feb.count('to')
try:
isinit=0
while isinit<=iscount:
feb.remove('is')
isinit=isinit+1
wasinit=0
while wasinit<=wascount:
feb.remove('was')
wasinit=wasinit+1
areinit=0
while areinit<=arecount:
feb.remove('are')
areinit=areinit+1
forinit=0
while forinit<=forcount:
feb.remove('for')
forinit=forinit+1
theinit=0
while theinit<=thecount:
feb.remove('the')
theinit=theinit+1
ofinit=0
while ofinit<=ofcount:
feb.remove('of')
ofinit=ofinit+1
toinit=0
while toinit<=tocount:
feb.remove('to')
toinit=toinit+1
except ValueError:
pass
feb.sort()
for string in feb:
count=feb.count(string)
strinit=0
while strinit < count:
feb.remove(string)
strinit=strinit+1
feb.sort()
for i in range(len(feb)):
try:
file=open(filepath,"a+")
file.write("\n"+feb[i])
file.close()
except FileNotFoundError:
homedir=os.environ.get('HOME')
file=open(f"{homedir}/fr-wordlist.txt","a+")
file.write("\n"+feb[i])
file.close()
if os.path.isfile(filepath):
print("")
print(f"\033[1;39m[\033[1;37m+\033[1;39m]Wordlist {filepath} successfully witten")
else:
print("\033[1;31m[-]Sorry:Path not Found!! The Path You Specified Doesn't Exist")
print("So Saved the wordlist as fr-wordlist.txt in the HOME Directory of the current User.....")
print("\033[1;37m WEB_REAMER Execution Completed. \033[1;33m!HAPPY HACKING! \033[1;34m \n")
def word_analyze(url):
import requests
from bs4 import BeautifulSoup
print("")
try:
webpage=requests.get(url)
pagedata=webpage.text
soup=BeautifulSoup(pagedata,"html.parser")
except requests.exceptions.ConnectionError:
print("\033[1;31m[\033[1;31m-\033[1;39m] ERROR CONNECTING THE SERVER...")
exit()
for script in soup(["script","style"]):
script.extract()
text1=soup.get_text()
text=str(text1.strip())
feb=text.split()
iscount=feb.count('is')
wascount=feb.count('was')
arecount=feb.count('are')
forcount=feb.count('for')
thecount=feb.count('the')
ofcount=feb.count('of')
tocount=feb.count('to')
try:
isinit=0
while isinit<=iscount:
feb.remove('is')
isinit=isinit+1
wasinit=0
while wasinit<=wascount:
feb.remove('was')
wasinit=wasinit+1
areinit=0
while areinit<=arecount:
feb.remove('are')
areinit=areinit+1
forinit=0
while forinit<=forcount:
feb.remove('for')
forinit=forinit+1
theinit=0
while theinit<=thecount:
feb.remove('the')
theinit=theinit+1
ofinit=0
while ofinit<=ofcount:
feb.remove('of')
ofinit=ofinit+1
toinit=0
while toinit<=tocount:
feb.remove('to')
toinit=toinit+1
except ValueError:
pass
feb.sort()
print("\033[1;32m-"*74)
print("\033[1;32m| Words | count/frequency | Graph | ")
print("\033[1;32m-"*74)
for string in feb:
count=feb.count(string)
for i in range(count):
feb.remove(string)
print(f"\033[1;34m| {string + ' ' * (22 - len(string)) + '| '}{str(count) +' ' * (22 - len(str(count)))}| \033[1;32m{'█' * count} " )
print("\033[1;33m-"*74)
def endpoint_harvest(url):
print(f"[\033[1;37m+\033[1;39m] Collecting Endpoints / Links from the webpage {url}")
from bs4 import BeautifulSoup
print("")
try:
webpage=requests.get(url)
pagedata=webpage.text
soup=BeautifulSoup(pagedata,"html.parser")
except requests.exceptions.ConnectionError:
print("\033[1;31m[\033[1;31m-\033[1;39m] ERROR CONNECTING THE SERVER...")
exit()
endpoint_pattern1=re.compile('(?:href=")(.*?)"')
endpoint_pattern2=re.compile('(?:src=")(.*?)"')
endpoint1=endpoint_pattern1.findall(pagedata)
endpoint2=endpoint_pattern2.findall(pagedata)
for link in endpoint1:
print(link.replace("href=","").replace("'","").replace(">","").replace('"','').replace("</"," "))
for src in endpoint2:
print(src.replace("src=","").replace("'","").replace(">","").replace('"','').replace("</"," "))
print("")
print("[\033[1;37m+\033[1;39m] Finished!")
def param(url):
from bs4 import BeautifulSoup
print("")
try:
webpage=requests.get(url)
pagedata=webpage.text
soup=BeautifulSoup(pagedata,"html.parser")
except requests.exceptions.ConnectionError:
print("\033[1;31m[\033[1;31m-\033[1;39m] ERROR CONNECTING THE SERVER...")
exit()
params=soup.find_all("input")
print("[\033[1;37m+\033[1;39m] Extracting Parameters from the WebPage!\n")
for param in params:
print(param.get("name"))
print("[\033[1;37m+\033[1;39m] Finished!")
parser = argparse.ArgumentParser(description='Parse the domain, wordlist etc..')
parser.add_argument('-link',dest='link', action='store_true',help='Extract Endpoints from url!')
parser.add_argument('-admin',dest='admin', action='store_true',help='Find Admin Panel of the given URL !')
parser.add_argument('-sub',dest='sub', action='store_true',help='Subdomain brute force of the given domain !')
parser.add_argument('-param',dest='param', action='store_true',help='Find hidden parameters from the given URL !')
parser.add_argument('-wordlist',dest='wordlist', action='store_true',help='Create targeted wordlist from the given URL !')
parser.add_argument('-analyze',dest='analyze', action='store_true',help='Analyze words and their frequencies from the given URL !')
parser.add_argument('-u',"--url",dest='url', action='store',help='The URL of the webpage!')
parser.add_argument('-d',"--domain",dest='domain', action='store',help='The domain name for sub domain brute-force!')
parser.add_argument('-w',"--wordlist",dest='list', action='store',help='Extract Endpoints from url!')
parser.add_argument('-o',"--outfile",dest='outfile', action='store',help='Output file to save the generated wordlist!!')
parser.add_argument('-v',"--version",dest='version', action='store_true',help='Version / Update Check !')
args=parser.parse_args()
try:
if args.link and args.url:
if args.url.startswith("http://") or args.url.startswith("https://"):
endpoint_harvest(args.url)
else:
print("[\033[1;31m-\033[1;39m] Invalid URL !")
exit()
elif args.admin and args.url:
if args.url.startswith("http://") or args.url.startswith("https://"):
febrev_fuzz(args.url)
else:
print("[\033[1;31m-\033[1;39m] Invalid URL !")
exit()
elif args.sub and args.domain and args.list:
if args.domain.startswith("http://") or args.domain.startswith("https://"):
print("[\033[1;31m-\033[1;39m] Expected Domain name not URL!")
exit()
else:
sub_brute(args.domain,args.list)
elif args.wordlist and args.url and args.outfile:
if args.url.startswith("http://") or args.url.startswith("https://"):
wordlistgen(args.url,args.outfile)
else:
print("[\033[1;31m-\033[1;39m] Invalid URL !")
exit()
elif args.analyze and args.url:
if args.url.startswith("http://") or args.url.startswith("https://"):
word_analyze(args.url)
else:
print("[\033[1;31m-\033[1;39m] Invalid URL !")
exit()
elif args.param and args.url:
if args.url.startswith("http://") or args.url.startswith("https://"):
param(args.url)
else:
print("[\033[1;31m-\033[1;39m] Invalid URL !")
exit()
elif args.version:
print(f"CURRENT VERSION : {version}")
try:
verq=requests.get("http://raw.githubusercontent.com/febinrev/web_reamer/master/version")
ver=float(verq.text.split()[0])
if ver > version:
print(f"[\033[1;37m+\033[1;39m] New Version {ver} of WEB_REAMER is available : https://github.com/febinrev/web_reamer.git")
else:
print("[\033[1;37m+\033[1;39m] WEB_REAMER is up-to-date!")
except requests.exceptions.ConnectionError:
print("[\033[1;31m-\033[1;39m] Error Connecting github !")
else:
print("""\033[1;33m
Usage:
\033[1;32m1. Endpoint / Link Extraction:
\033[1;39m ./web_reamer.py -link -u http://sample.com/ \033[1;32m
2. Admin Panel fuzzing:
\033[1;39m ./web_reamer.py -admin -u http://sample.com/ \033[1;32m
3. Subdomain Brute Force:
\033[1;39m ./web_reamer.py -sub -d sample.com -w subdomains.txt \033[1;32m
4. Find hidden parameters from webpage:
\033[1;39m ./web_reamer.py -param -u http://sample.com/ \033[1;32m
5. Create Targetted Wordlist from webpage:
\033[1;39m ./web_reamer.py -wordlist -u http://sample.com/ -o outfile_wordlist.txt \033[1;32m
6. Analyze Word frequencies from the WebPage :
\033[1;39m ./web_reamer.py -analyze -u http://sample.com/ \033[1;32m
7. Help :
\033[1;39m ./web_reamer.py -h \033[1;32m
\033[1;39m ./web_reamer.py --help \033[1;32m
8. Version / Update Check :
\033[1;39m ./web_reamer.py -v \033[1;32m
\033[1;39m ./web_reamer.py --version \033[1;32m
""")
except KeyboardInterrupt:
print("\n\033[1;39m[\033[1;31m-\033[1;39m] User Interruption! Exit!")
exit()
| []
| []
| [
"HOME"
]
| [] | ["HOME"] | python | 1 | 0 | |
lambda/crawler.py | import difflib
import logging
import os
import re
import urllib.request
import boto3
LOG = logging.getLogger()
LOG.setLevel(logging.INFO)
BUCKET = os.environ['BUCKET']
TOPIC = os.environ['TOPIC']
PAGE_ENCODING = 'utf-8'
IGNORED_LINES = re.compile('^\\s*(<!-- Dynamic page generated in [\\d.]+ seconds. -->|<!-- Cached page generated by WP-Super-Cache on \\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2} -->)\\s*$')
# Assumes UTF-8
def main(event, context):
url = event['url']
title = event['title']
LOG.info("Title: {}, Url: {}, Bucket: {} Topic: {}".format(title, url, BUCKET, TOPIC))
resp = urllib.request.urlopen(url, timeout=5)
status = resp.status
LOG.info("Response status code: {}".format(resp.status))
current_page_body_bytes = resp.read()
current_page_body = current_page_body_bytes.decode(PAGE_ENCODING)
# LOG.debug("Response Body: {}", current_page_body_bytes)
s3_client = boto3.client('s3')
# print(s3_client.exceptions.__dict__) # figure out what exception class it was
object_key = url_to_object_key(url)
try:
previous_page_body_bytes = s3_client.get_object(Bucket=BUCKET, Key=object_key)['Body'].read()
previous_page_body = previous_page_body_bytes.decode(PAGE_ENCODING)
# LOG.debug("Previous Page Body: {}", previous_page_body_bytes)
except s3_client.exceptions.NoSuchKey:
LOG.info("No previous page content found in bucket.")
previous_page_body = ""
differ = difflib.Differ()
compare_result = list(differ.compare(previous_page_body.splitlines(keepends=True), current_page_body.splitlines(keepends=True)))
delta = []
# Filter the comparison result to include only changed lines
# (it includes ALL lines by default), and prepend the line number.
line_number = 1
for line in compare_result:
if (line.startswith('-') or line.startswith('+')) and IGNORED_LINES.search(line) is None:
delta.append("{:03} {}".format(line_number, line))
line_number = line_number + 1
delta_str = "".join(delta)
LOG.debug("Delta: %s", delta)
if len(delta) > 0:
LOG.info("Page content changed! Sending alert and storing new version.")
sns_client = boto3.client('sns')
sns_client.publish(
TopicArn=TOPIC,
Subject="{} Changed!".format(title),
Message="Check it out at {}\r\nDelta: \r\n{}".format(url, delta_str)
)
s3_client.put_object(Body=current_page_body, Bucket=BUCKET, Key=object_key)
return {
"message": "{} changed! Notified subscribers and saved new version.".format(title),
"url": url,
"delta": delta,
"s3_object": "{}/{}".format(BUCKET, object_key)
}
else:
LOG.info("No change to {} content.".format(title))
return {
"message": "No change.",
"url": url,
"delta": [],
"s3_object": "{}/{}".format(BUCKET, object_key)
}
def url_to_object_key(url):
p = re.compile('[^\\w]+')
object_key = p.sub('-', url) + ".html"
object_key = object_key.replace("-.html", ".html", 1)
return object_key
| []
| []
| [
"TOPIC",
"BUCKET"
]
| [] | ["TOPIC", "BUCKET"] | python | 2 | 0 | |
prov/hostspool/hostspool_mgr_allocation.go | // Copyright 2018 Bull S.A.S. Atos Technologies - Bull, Rue Jean Jaures, B.P.68, 78340, Les Clayes-sous-Bois, France.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package hostspool
import (
"github.com/hashicorp/consul/api"
"github.com/pkg/errors"
"github.com/ystia/yorc/helper/consulutil"
"github.com/ystia/yorc/helper/labelsutil"
"net/url"
"path"
"strconv"
"strings"
"time"
)
func (cm *consulManager) Allocate(allocation *Allocation, filters ...labelsutil.Filter) (string, []labelsutil.Warning, error) {
return cm.allocateWait(maxWaitTimeSeconds*time.Second, allocation, filters...)
}
func (cm *consulManager) allocateWait(maxWaitTime time.Duration, allocation *Allocation, filters ...labelsutil.Filter) (string, []labelsutil.Warning, error) {
// Build allocationID
if err := allocation.buildID(); err != nil {
return "", nil, err
}
lockCh, cleanupFn, err := cm.lockKey("", "allocation", maxWaitTime)
if err != nil {
return "", nil, err
}
defer cleanupFn()
hosts, warnings, _, err := cm.List(filters...)
if err != nil {
return "", warnings, err
}
// Filters only free or allocated hosts in case of shareable allocation
var lastErr error
freeHosts := hosts[:0]
for _, h := range hosts {
select {
case <-lockCh:
return "", warnings, errors.New("admin lock lost on hosts pool during host allocation")
default:
}
err := cm.checkConnection(h)
if err != nil {
lastErr = err
continue
}
hs, err := cm.GetHostStatus(h)
if err != nil {
lastErr = err
} else {
if hs == HostStatusFree {
freeHosts = append(freeHosts, h)
} else if hs == HostStatusAllocated && allocation.Shareable {
allocations, err := cm.GetAllocations(h)
if err != nil {
lastErr = err
continue
}
// Check the host allocation is not unshareable
if len(allocations) == 1 && !allocations[0].Shareable {
continue
}
freeHosts = append(freeHosts, h)
}
}
}
if len(freeHosts) == 0 {
if lastErr != nil {
return "", warnings, lastErr
}
return "", warnings, errors.WithStack(noMatchingHostFoundError{})
}
// Get the first host that match
hostname := freeHosts[0]
select {
case <-lockCh:
return "", warnings, errors.New("admin lock lost on hosts pool during host allocation")
default:
}
if err := cm.addAllocation(hostname, allocation); err != nil {
return "", warnings, errors.Wrapf(err, "failed to add allocation for hostname:%q", hostname)
}
return hostname, warnings, cm.setHostStatus(hostname, HostStatusAllocated)
}
func (cm *consulManager) Release(hostname string, allocation *Allocation) error {
return cm.releaseWait(hostname, allocation, maxWaitTimeSeconds*time.Second)
}
func (cm *consulManager) releaseWait(hostname string, allocation *Allocation, maxWaitTime time.Duration) error {
// Build allocationID
if err := allocation.buildID(); err != nil {
return err
}
_, cleanupFn, err := cm.lockKey(hostname, "release", maxWaitTime)
if err != nil {
return err
}
defer cleanupFn()
if err := cm.removeAllocation(hostname, allocation); err != nil {
return errors.Wrapf(err, "failed to remove allocation with ID:%q and hostname:%q", allocation.ID, hostname)
}
host, err := cm.GetHost(hostname)
if err != nil {
return err
}
// Set the host status to free only for host with no allocations
if len(host.Allocations) == 0 {
if err = cm.setHostStatus(hostname, HostStatusFree); err != nil {
return err
}
}
err = cm.checkConnection(hostname)
if err != nil {
cm.backupHostStatus(hostname)
cm.setHostStatusWithMessage(hostname, HostStatusError, "failed to connect to host")
}
return nil
}
func getAddAllocationsOperation(hostname string, allocations []Allocation) (api.KVTxnOps, error) {
allocsOps := api.KVTxnOps{}
hostKVPrefix := path.Join(consulutil.HostsPoolPrefix, hostname)
if allocations != nil {
for _, alloc := range allocations {
allocKVPrefix := path.Join(hostKVPrefix, "allocations", alloc.ID)
allocOps := api.KVTxnOps{
&api.KVTxnOp{
Verb: api.KVSet,
Key: path.Join(allocKVPrefix),
Value: []byte(alloc.ID),
},
&api.KVTxnOp{
Verb: api.KVSet,
Key: path.Join(allocKVPrefix, "node_name"),
Value: []byte(alloc.NodeName),
},
&api.KVTxnOp{
Verb: api.KVSet,
Key: path.Join(allocKVPrefix, "instance"),
Value: []byte(alloc.Instance),
},
&api.KVTxnOp{
Verb: api.KVSet,
Key: path.Join(allocKVPrefix, "deployment_id"),
Value: []byte(alloc.DeploymentID),
},
&api.KVTxnOp{
Verb: api.KVSet,
Key: path.Join(allocKVPrefix, "shareable"),
Value: []byte(strconv.FormatBool(alloc.Shareable)),
},
}
for k, v := range alloc.Resources {
k = url.PathEscape(k)
if k == "" {
return nil, errors.WithStack(badRequestError{"empty labels are not allowed"})
}
allocOps = append(allocOps, &api.KVTxnOp{
Verb: api.KVSet,
Key: path.Join(allocKVPrefix, "resources", k),
Value: []byte(v),
})
}
allocsOps = append(allocsOps, allocOps...)
}
}
return allocsOps, nil
}
func (cm *consulManager) addAllocation(hostname string, allocation *Allocation) error {
var allocOps api.KVTxnOps
var err error
if allocOps, err = getAddAllocationsOperation(hostname, []Allocation{*allocation}); err != nil {
return errors.Wrapf(err, "failed to add allocation to host:%q", hostname)
}
ok, response, _, err := cm.cc.KV().Txn(allocOps, nil)
if err != nil {
return errors.Wrap(err, consulutil.ConsulGenericErrMsg)
}
if !ok {
// Check the response
errs := make([]string, 0)
for _, e := range response.Errors {
errs = append(errs, e.What)
}
return errors.Errorf("Failed to add allocation on host %q: %s", hostname, strings.Join(errs, ", "))
}
return nil
}
func (cm *consulManager) removeAllocation(hostname string, allocation *Allocation) error {
_, err := cm.cc.KV().DeleteTree(path.Join(consulutil.HostsPoolPrefix, hostname, "allocations", allocation.ID), nil)
return errors.Wrap(err, consulutil.ConsulGenericErrMsg)
}
func exist(allocations []Allocation, ID string) bool {
for _, alloc := range allocations {
if alloc.ID == ID {
return true
}
}
return false
}
func (cm *consulManager) GetAllocations(hostname string) ([]Allocation, error) {
allocations := make([]Allocation, 0)
if hostname == "" {
return nil, errors.WithStack(badRequestError{`"hostname" missing`})
}
keys, _, err := cm.cc.KV().Keys(path.Join(consulutil.HostsPoolPrefix, hostname, "allocations")+"/", "/", nil)
if err != nil {
return nil, errors.Wrap(err, consulutil.ConsulGenericErrMsg)
}
for _, key := range keys {
id := path.Base(key)
if exist(allocations, id) {
continue
}
alloc := Allocation{}
alloc.ID = id
kvp, _, err := cm.cc.KV().Get(path.Join(key, "node_name"), nil)
if err != nil {
return nil, errors.Wrap(err, consulutil.ConsulGenericErrMsg)
}
if kvp != nil && len(kvp.Value) > 0 {
alloc.NodeName = string(kvp.Value)
}
kvp, _, err = cm.cc.KV().Get(path.Join(key, "instance"), nil)
if err != nil {
return nil, errors.Wrap(err, consulutil.ConsulGenericErrMsg)
}
if kvp != nil && len(kvp.Value) > 0 {
alloc.Instance = string(kvp.Value)
}
kvp, _, err = cm.cc.KV().Get(path.Join(key, "deployment_id"), nil)
if err != nil {
return nil, errors.Wrap(err, consulutil.ConsulGenericErrMsg)
}
if kvp != nil && len(kvp.Value) > 0 {
alloc.DeploymentID = string(kvp.Value)
}
kvp, _, err = cm.cc.KV().Get(path.Join(key, "shareable"), nil)
if err != nil {
return nil, errors.Wrap(err, consulutil.ConsulGenericErrMsg)
}
if kvp != nil && len(kvp.Value) > 0 {
alloc.Shareable, err = strconv.ParseBool(string(kvp.Value))
if err != nil {
return nil, errors.Wrapf(err, "failed to parse boolean from value:%q", string(kvp.Value))
}
}
kvps, _, err := cm.cc.KV().List(path.Join(key, "resources"), nil)
if err != nil {
return nil, errors.Wrap(err, consulutil.ConsulGenericErrMsg)
}
resources := make(map[string]string, len(kvps))
for _, kvp := range kvps {
resources[path.Base(kvp.Key)] = string(kvp.Value)
}
alloc.Resources = resources
allocations = append(allocations, alloc)
}
return allocations, nil
}
| []
| []
| []
| [] | [] | go | null | null | null |
cb-tumblebug/src/api/grpc/server/server.go | package server
import (
"errors"
"fmt"
"net"
"os"
gc "github.com/cloud-barista/cb-tumblebug/src/api/grpc/common"
"github.com/cloud-barista/cb-tumblebug/src/api/grpc/config"
"github.com/cloud-barista/cb-tumblebug/src/api/grpc/logger"
pb "github.com/cloud-barista/cb-tumblebug/src/api/grpc/protobuf/cbtumblebug"
grpc_common "github.com/cloud-barista/cb-tumblebug/src/api/grpc/server/common"
grpc_mcir "github.com/cloud-barista/cb-tumblebug/src/api/grpc/server/mcir"
grpc_mcis "github.com/cloud-barista/cb-tumblebug/src/api/grpc/server/mcis"
"google.golang.org/grpc/reflection"
)
// RunServer - GRPC 서버 구동
func RunServer() {
logger := logger.NewLogger()
configPath := os.Getenv("CBTUMBLEBUG_ROOT") + "/conf/grpc_conf.yaml"
gConf, err := configLoad(configPath)
if err != nil {
logger.Error("failed to load config : ", err)
return
}
tumblebugsrv := gConf.GSL.TumblebugSrv
conn, err := net.Listen("tcp", tumblebugsrv.Addr)
if err != nil {
logger.Error("failed to listen: ", err)
return
}
cbserver, closer, err := gc.NewCBServer(tumblebugsrv)
if err != nil {
logger.Error("failed to create grpc server: ", err)
return
}
if closer != nil {
defer closer.Close()
}
gs := cbserver.Server
pb.RegisterUTILITYServer(gs, &grpc_common.UTILITYService{})
pb.RegisterNSServer(gs, &grpc_common.NSService{})
pb.RegisterMCIRServer(gs, &grpc_mcir.MCIRService{})
pb.RegisterMCISServer(gs, &grpc_mcis.MCISService{})
if tumblebugsrv.Reflection == "enable" {
if tumblebugsrv.Interceptors.AuthJWT != nil {
fmt.Printf("\n\n*** you can run reflection when jwt auth interceptor is not used ***\n\n")
} else {
reflection.Register(gs)
}
}
//fmt.Printf("\n[CB-Tumblebug: Multi-Cloud Infra Service Management]")
//fmt.Printf("\n Initiating GRPC API Server....__^..^__....")
fmt.Printf("⇨ grpc server started on [::]%s\n", tumblebugsrv.Addr)
if err := gs.Serve(conn); err != nil {
logger.Error("failed to serve: ", err)
}
}
func configLoad(cf string) (config.GrpcConfig, error) {
logger := logger.NewLogger()
// Viper 를 사용하는 설정 파서 생성
parser := config.MakeParser()
var (
gConf config.GrpcConfig
err error
)
if cf == "" {
logger.Error("Please, provide the path to your configuration file")
return gConf, errors.New("configuration file are not specified")
}
logger.Debug("Parsing configuration file: ", cf)
if gConf, err = parser.GrpcParse(cf); err != nil {
logger.Error("ERROR - Parsing the configuration file.\n", err.Error())
return gConf, err
}
// Command line 에 지정된 옵션을 설정에 적용 (우선권)
// TUMBLEBUG 필수 입력 항목 체크
tumblebugsrv := gConf.GSL.TumblebugSrv
if tumblebugsrv == nil {
return gConf, errors.New("tumblebugsrv field are not specified")
}
if tumblebugsrv.Addr == "" {
return gConf, errors.New("tumblebugsrv.addr field are not specified")
}
if tumblebugsrv.TLS != nil {
if tumblebugsrv.TLS.TLSCert == "" {
return gConf, errors.New("tumblebugsrv.tls.tls_cert field are not specified")
}
if tumblebugsrv.TLS.TLSKey == "" {
return gConf, errors.New("tumblebugsrv.tls.tls_key field are not specified")
}
}
if tumblebugsrv.Interceptors != nil {
if tumblebugsrv.Interceptors.AuthJWT != nil {
if tumblebugsrv.Interceptors.AuthJWT.JWTKey == "" {
return gConf, errors.New("tumblebugsrv.interceptors.auth_jwt.jwt_key field are not specified")
}
}
if tumblebugsrv.Interceptors.PrometheusMetrics != nil {
if tumblebugsrv.Interceptors.PrometheusMetrics.ListenPort == 0 {
return gConf, errors.New("tumblebugsrv.interceptors.prometheus_metrics.listen_port field are not specified")
}
}
if tumblebugsrv.Interceptors.Opentracing != nil {
if tumblebugsrv.Interceptors.Opentracing.Jaeger != nil {
if tumblebugsrv.Interceptors.Opentracing.Jaeger.Endpoint == "" {
return gConf, errors.New("tumblebugsrv.interceptors.opentracing.jaeger.endpoint field are not specified")
}
}
}
}
return gConf, nil
}
| [
"\"CBTUMBLEBUG_ROOT\""
]
| []
| [
"CBTUMBLEBUG_ROOT"
]
| [] | ["CBTUMBLEBUG_ROOT"] | go | 1 | 0 | |
proton-connect-file-loader/src/main/java/io/messaging/amqp/connect/MessagingConfigSource.java | package io.messaging.amqp.connect;
import java.io.IOException;
import java.io.UncheckedIOException;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Paths;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import org.eclipse.microprofile.config.spi.ConfigSource;
import io.vertx.core.json.JsonObject;
public class MessagingConfigSource implements ConfigSource {
// amqp-port=5672
// amqp-host=localhost
// amqp-username=demo-user
// amqp-password=demo-user
private static final Set<String> propertyNames;
static {
propertyNames = new HashSet<>();
propertyNames.add("amqp-username");
propertyNames.add("amqp-password");
propertyNames.add("amqp-host");
propertyNames.add("amqp-port");
propertyNames.add("amqp-use-ssl");
}
@Override
public Set<String> getPropertyNames() {
return propertyNames;
}
@Override
public Map<String, String> getProperties() {
try {
return loadConfig();
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
@Override
public String getValue(String key) {
try {
return loadConfig().get(key);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
@Override
public String getName() {
return "messaging-credentials-config";
}
private Map<String, String> loadConfig() throws IOException {
System.out.println("Loading config!!!!!!!!!!!!!!");
String file = System.getenv("MESSAGING_CONNECT_FILE");
if (file == null) {
return new HashMap<>();
}
JsonObject config = new JsonObject(new String(Files.readAllBytes(Paths.get(file))));
Map<String, String> options = new HashMap<>();
options.put("amqp-host", config.getString("host"));
options.put("amqp-port", String.valueOf(config.getInteger("port")));
// options.put("amqp-username", config.getString("user"));
// options.put("amqp-password", config.getString("password"));
options.put("amqp-use-ssl", String.valueOf("amqps".equals(config.getString("scheme"))));
options.put("amqp-username", "@@serviceaccount@@");
options.put("amqp-password", readTokenFromFile());
return options;
}
private static String readTokenFromFile() throws IOException {
return new String(Files.readAllBytes(Paths.get("/var/run/secrets/kubernetes.io/serviceaccount/token")), StandardCharsets.UTF_8);
}
}
| [
"\"MESSAGING_CONNECT_FILE\""
]
| []
| [
"MESSAGING_CONNECT_FILE"
]
| [] | ["MESSAGING_CONNECT_FILE"] | java | 1 | 0 | |
bumptag.go | // The bumptag creates a new tag to release a new version of your code.
//
// The tool finds the last git tag, increments it and create new tag with a changelog.
// https://github.com/sv-tools/bumptag/blob/master/README.md
package main
import (
"bytes"
"errors"
"flag"
"fmt"
"io/ioutil"
"os"
"os/exec"
"os/signal"
"strconv"
"strings"
"github.com/coreos/go-semver/semver"
)
var (
version = "0.0.0"
tagPrefix = "v"
)
const (
defaultRemote = "origin"
defaultEditor = "vim"
)
func realGit(input string, arg ...string) (string, error) {
cmd := exec.Command("git", arg...)
if len(input) > 0 {
cmd.Stdin = strings.NewReader(input)
}
var stdout bytes.Buffer
cmd.Stdout = &stdout
var stderr bytes.Buffer
cmd.Stderr = &stderr
if err := cmd.Run(); err != nil {
text := fmt.Sprintf(
"command '%s' failed: %s",
strings.Join(cmd.Args, " "),
err.Error(),
)
errText := strings.TrimSpace(stderr.String())
if len(errText) > 0 {
text += "\n" + errText
}
return "", errors.New(text)
}
return strings.TrimSpace(stdout.String()), nil
}
var git = realGit
func noOutputGit(input string, arg ...string) error {
_, err := git(input, arg...)
return err
}
func disableGPG() (string, error) {
output, _ := git("", "config", "--local", "--get", "log.showSignature")
if err := noOutputGit("", "config", "--local", "log.showSignature", "false"); err != nil {
return "", err
}
return output, nil
}
func restoreGPG(oldValue string) error {
if len(oldValue) > 0 {
return noOutputGit("", "config", "--local", "log.showSignature", oldValue)
}
return noOutputGit("", "config", "--local", "--unset", "log.showSignature")
}
func setUpGPG() (func(), error) {
oldValue, err := disableGPG()
if err != nil {
return nil, err
}
signalChan := make(chan os.Signal, 1)
signal.Notify(signalChan, os.Interrupt)
go func() {
<-signalChan
_ = restoreGPG(oldValue)
os.Exit(42)
}()
return func() {
signal.Stop(signalChan)
_ = restoreGPG(oldValue)
}, nil
}
func gitConfig(name, defaultValue string) string {
output, err := git("", "config", "--get", name)
if err != nil {
return defaultValue
}
return output
}
func gitConfigBool(name string, defaultValue bool) bool {
output := gitConfig(name, strconv.FormatBool(defaultValue))
value, err := strconv.ParseBool(output)
if err != nil {
return defaultValue
}
return value
}
func findTag() (*semver.Version, string, error) {
output, err := git("", "tag")
if err != nil {
return nil, "", err
}
if output == "" {
return &semver.Version{}, "", nil
}
output, err = git("", "describe", "--tags", "--abbrev=0")
if err != nil {
return nil, "", err
}
currentTagName := output
if !strings.HasPrefix(output, tagPrefix) {
tagPrefix = ""
}
dotParts := strings.SplitN(output, ".", 3)
for i := 3 - len(dotParts); i > 0; i-- {
output += ".0"
}
tag, err := semver.NewVersion(strings.TrimPrefix(output, tagPrefix))
if err != nil {
return nil, "", err
}
return tag, currentTagName, nil
}
func createTag(tagName, annotation string, sign bool) error {
args := []string{"tag", "-F-"}
if sign {
args = append(args, "--sign")
}
args = append(args, tagName)
return noOutputGit(annotation, args...)
}
func showTag(tagName string) (string, error) {
return git("", "show", tagName)
}
func getChangeLog(tagName string) (string, error) {
stat, _ := os.Stdin.Stat()
if (stat.Mode() & os.ModeCharDevice) == 0 {
output, err := ioutil.ReadAll(os.Stdin)
if err != nil {
return "", err
}
return string(output), nil
}
args := []string{"log", "--pretty=%h %s", "--no-merges"}
if len(tagName) > 0 {
args = append(args, tagName+"..HEAD")
}
output, err := git("", args...)
if err != nil {
return "", err
}
var res []string
for _, line := range strings.Split(output, "\n") {
res = append(res, "* "+line)
}
return strings.Join(res, "\n"), nil
}
func parseRemote(remote string) (string, error) {
for _, part := range strings.Split(remote, " ") {
if strings.HasPrefix(part, "[") {
part = strings.Trim(part, "[]")
names := strings.SplitN(part, "/", 2)
if len(names) != 2 {
return "", fmt.Errorf("cannot determine a remote name: %s", part)
}
return names[0], nil
}
}
return "", fmt.Errorf("remote for the active branch '%s' not found", remote)
}
func getRemote() (string, error) {
output, err := git("", "branch", "--list", "-vv")
if err != nil {
return "", err
}
for _, remote := range strings.Split(output, "\n") {
remote = strings.TrimSpace(remote)
if strings.HasPrefix(remote, "*") {
return parseRemote(remote)
}
}
return defaultRemote, nil
}
func pushTag(remote, tagName string) error {
return noOutputGit("", "push", remote, tagName)
}
func makeAnnotation(changeLog string, tagName string) string {
output := []string{
"Bump version " + tagName,
"",
changeLog,
}
return strings.Join(output, "\n")
}
func createFlag(flagSet *flag.FlagSet, name, short string, value bool, usage string) *bool {
p := flagSet.Bool(name, value, usage)
if len(short) > 0 {
flagSet.BoolVar(p, short, value, usage)
}
return p
}
type bumptagArgs struct {
flagSet *flag.FlagSet
edit *bool
dryRun *bool
silent *bool
autoPush *bool
major *bool
minor *bool
patch *bool
version *bool
findTag *bool
}
func (f *bumptagArgs) usage() {
output := `Usage: bumptag [<tagname>]
<tagname> The name of the tag to create, must be Semantic Versions 2.0.0 (http://semver.org)
-e, --edit Edit an annotation
-r, --dry-run Prints an annotation for the new tag
-s, --silent Do not show the created tag
-a, --auto-push Push the created tag automatically
-m, --major Increment the MAJOR version
-n, --minor Increment the MINOR version (default)
-p, --patch Increment the PATCH version
--version Show a version of the bumptag tool
--find-tag Show the last tag, can be useful for CI tools
The change log is automatically generated from git commits from the previous tag or can be passed by <stdin>.`
fmt.Println(output)
}
func (f *bumptagArgs) parse() error {
f.flagSet.Usage = f.usage
return f.flagSet.Parse(os.Args[1:])
}
func newBumptagArgs() *bumptagArgs {
flagSet := flag.NewFlagSet("Bumptag", flag.ExitOnError)
return &bumptagArgs{
flagSet: flagSet,
edit: createFlag(flagSet, "edit", "e", false, "Edit an annotation"),
dryRun: createFlag(flagSet, "dry-run", "r", false, "Prints an annotation for the new tag"),
silent: createFlag(flagSet, "silent", "s", false, "Do not show the created tag"),
autoPush: createFlag(flagSet, "auto-push", "a", false, "Push the created tag automatically"),
major: createFlag(flagSet, "major", "m", false, "Increment the MAJOR version"),
minor: createFlag(flagSet, "minor", "n", false, "Increment the MINOR version (default)"),
patch: createFlag(flagSet, "patch", "p", false, "Increment the PATCH version"),
version: createFlag(flagSet, "version", "", false, "Show a version of the bumptag tool"),
findTag: createFlag(flagSet, "find-tag", "", false, "Show the latest tag, can be useful for CI tools"),
}
}
func setTag(flagSet *flag.FlagSet, tag *semver.Version, args *bumptagArgs) {
if flagSet.NArg() > 0 {
if err := tag.Set(strings.TrimPrefix(flagSet.Arg(0), tagPrefix)); err != nil {
panic(err)
}
} else {
switch true {
case *args.major:
tag.BumpMajor()
case *args.minor:
tag.BumpMinor()
case *args.patch:
tag.BumpPatch()
default:
tag.BumpMinor()
}
}
}
func panicIfError(err error) {
if err != nil {
panic(err)
}
}
func openEditor(filename string) error {
editor := os.Getenv("EDITOR")
if editor == "" {
editor = defaultEditor
}
executable, err := exec.LookPath(editor)
if err != nil {
return err
}
tty := os.Stdin
stat, _ := tty.Stat()
if (stat.Mode() & os.ModeCharDevice) == 0 {
tty, err = os.Open("/dev/tty")
if err != nil {
return err
}
defer tty.Close()
}
cmd := exec.Command(executable, filename)
cmd.Stdin = tty
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
return cmd.Run()
}
func edit(annotation string) (string, error) {
file, err := ioutil.TempFile(os.TempDir(), "*")
if err != nil {
return "", err
}
filename := file.Name()
defer os.Remove(filename)
if _, err := file.WriteString(annotation); err != nil {
return "", err
}
if err := file.Close(); err != nil {
return "", err
}
if err := openEditor(filename); err != nil {
return "", err
}
data, err := ioutil.ReadFile(filename)
if err != nil {
return "", err
}
return string(data), nil
}
func main() {
args := newBumptagArgs()
panicIfError(args.parse())
if *args.version {
fmt.Print(version)
return
}
tearDownGPG, err := setUpGPG()
panicIfError(err)
defer tearDownGPG()
tag, currentTagName, err := findTag()
panicIfError(err)
if *args.findTag {
fmt.Print(currentTagName)
return
}
changeLog, err := getChangeLog(currentTagName)
panicIfError(err)
setTag(args.flagSet, tag, args)
tagName := tagPrefix + tag.String()
annotation := makeAnnotation(changeLog, tagName)
if *args.edit {
annotation, err = edit(annotation)
panicIfError(err)
}
if *args.dryRun {
fmt.Println(annotation)
return
}
sign := gitConfigBool("commit.gpgsign", false)
panicIfError(createTag(tagName, annotation, sign))
if *args.autoPush {
remote, err := getRemote()
panicIfError(err)
panicIfError(pushTag(remote, tagName))
if !*args.silent {
fmt.Printf(
"The tag '%s' has been pushed to the remote '%s'",
tagName,
remote,
)
}
}
if !*args.silent {
output, err := showTag(tagName)
panicIfError(err)
fmt.Println(output)
}
}
| [
"\"EDITOR\""
]
| []
| [
"EDITOR"
]
| [] | ["EDITOR"] | go | 1 | 0 | |
pkg/testhelpers/config.go | package testhelpers
import (
"net/http/httptest"
"os"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/harrykimpel/newrelic-client-go/pkg/config"
"github.com/harrykimpel/newrelic-client-go/pkg/region"
)
const (
HTTPTimeout = 60 * time.Second // HTTPTimeout increases the timeout for integration tests
LicenseKey = "APMLicenseKey" // LicenseKey used in mock configs
LogLevel = "debug" // LogLevel used in mock configs
PersonalAPIKey = "personalAPIKey" // PersonalAPIKey used in mock configs (from Environment for Integration tests)
UserAgent = "newrelic/newrelic-client-go (automated testing)" // UserAgent used in mock configs
)
// NewTestConfig returns a fully saturated configration with modified BaseURLs
// for all endpoints based on the test server passed in
func NewTestConfig(t *testing.T, testServer *httptest.Server) config.Config {
cfg := config.New()
// Set some defaults from Testing constants
cfg.LogLevel = LogLevel
cfg.PersonalAPIKey = PersonalAPIKey
cfg.UserAgent = UserAgent
cfg.LicenseKey = LicenseKey
if testServer != nil {
cfg.Region().SetInfrastructureBaseURL(testServer.URL)
cfg.Region().SetNerdGraphBaseURL(testServer.URL)
cfg.Region().SetRestBaseURL(testServer.URL)
cfg.Region().SetSyntheticsBaseURL(testServer.URL)
cfg.Region().SetLogsBaseURL(testServer.URL)
}
return cfg
}
// NewIntegrationTestConfig grabs environment vars for required fields or skips the test.
// returns a fully saturated configuration
func NewIntegrationTestConfig(t *testing.T) config.Config {
envPersonalAPIKey := os.Getenv("NEW_RELIC_API_KEY")
envInsightsInsertKey := os.Getenv("NEW_RELIC_INSIGHTS_INSERT_KEY")
envLicenseKey := os.Getenv("NEW_RELIC_LICENSE_KEY")
envRegion := os.Getenv("NEW_RELIC_REGION")
envLogLevel := os.Getenv("NEW_RELIC_LOG_LEVEL")
if envPersonalAPIKey == "" {
t.Skipf("acceptance testing requires NEW_RELIC_API_KEY")
}
cfg := config.New()
// Set some defaults
if envLogLevel != "" {
cfg.LogLevel = envLogLevel
} else {
cfg.LogLevel = LogLevel
}
cfg.Logger = cfg.GetLogger()
// HTTP Settings
timeout := HTTPTimeout
cfg.Timeout = &timeout
cfg.UserAgent = UserAgent
// Auth
cfg.PersonalAPIKey = envPersonalAPIKey
cfg.InsightsInsertKey = envInsightsInsertKey
cfg.LicenseKey = envLicenseKey
if envRegion != "" {
regName, err := region.Parse(envRegion)
assert.NoError(t, err)
reg, err := region.Get(regName)
assert.NoError(t, err)
err = cfg.SetRegion(reg)
assert.NoError(t, err)
}
return cfg
}
| [
"\"NEW_RELIC_API_KEY\"",
"\"NEW_RELIC_INSIGHTS_INSERT_KEY\"",
"\"NEW_RELIC_LICENSE_KEY\"",
"\"NEW_RELIC_REGION\"",
"\"NEW_RELIC_LOG_LEVEL\""
]
| []
| [
"NEW_RELIC_INSIGHTS_INSERT_KEY",
"NEW_RELIC_API_KEY",
"NEW_RELIC_REGION",
"NEW_RELIC_LICENSE_KEY",
"NEW_RELIC_LOG_LEVEL"
]
| [] | ["NEW_RELIC_INSIGHTS_INSERT_KEY", "NEW_RELIC_API_KEY", "NEW_RELIC_REGION", "NEW_RELIC_LICENSE_KEY", "NEW_RELIC_LOG_LEVEL"] | go | 5 | 0 | |
app/app/settings.py | """
Django settings for app project.
Generated by 'django-admin startproject' using Django 3.0.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'm#l3xpc86sb2j6k9m&p($fwat7r^_d_pkwr2rk3i99%8vusqt-'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# I needed to commit again
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'user.apps.UserConfig',
'core.apps.CoreConfig',
'recipe.apps.RecipeConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'HOST': os.environ.get('DB_HOST'),
'NAME': os.environ.get('DB_NAME'),
'USER': os.environ.get('DB_USER'),
'PASSWORD': os.environ.get('DB_PASS'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
MEDIA_ROOT = '/vol/web/media'
STATIC_ROOT = '/vol/web/static'
AUTH_USER_MODEL = 'core.User' | []
| []
| [
"DB_PASS",
"DB_USER",
"DB_NAME",
"DB_HOST"
]
| [] | ["DB_PASS", "DB_USER", "DB_NAME", "DB_HOST"] | python | 4 | 0 | |
go/src/koding/workers/cmd/tunnelproxymanager/main.go | package main
import (
"fmt"
"koding/workers/tunnelproxymanager"
"log"
"os"
"os/signal"
"syscall"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
awssession "github.com/aws/aws-sdk-go/aws/session"
"github.com/koding/asgd"
"github.com/koding/logging"
"github.com/koding/multiconfig"
)
const Name = "tunnelproxymanager"
func main() {
c, err := configure()
if err != nil {
log.Fatal("Reading config failed: ", err.Error())
}
conf := &asgd.Config{
Name: fmt.Sprintf("%s-%s", "tunnelproxymanager", c.EBEnvName),
AccessKeyID: c.AccessKeyID,
SecretAccessKey: c.SecretAccessKey,
Region: c.Region,
AutoScalingName: c.AutoScalingName,
Debug: c.Debug,
}
session, err := asgd.Configure(conf)
if err != nil {
log.Fatal("Reading config failed: ", err.Error())
}
log := logging.NewCustom(Name, conf.Debug)
// remove formatting from call stack and output correct line
log.SetCallDepth(1)
route53Session := awssession.New(&aws.Config{
Credentials: credentials.NewStaticCredentials(
c.Route53AccessKeyID,
c.Route53SecretAccessKey,
"",
),
Region: aws.String(conf.Region),
MaxRetries: aws.Int(5),
})
// create record manager
recordManager := tunnelproxymanager.NewRecordManager(route53Session, log, conf.Region, c.HostedZone)
if err := recordManager.Init(); err != nil {
log.Fatal(err.Error())
}
// create lifecycle
l := asgd.NewLifeCycle(session, log, conf.AutoScalingName)
// configure lifecycle with system name
if err := l.Configure(conf.Name); err != nil {
log.Fatal(err.Error())
}
done := registerSignalHandler(l, log)
// listen to lifecycle events
if err := l.Listen(recordManager.ProcessFunc); err != nil {
log.Fatal(err.Error())
}
<-done
}
func registerSignalHandler(l *asgd.LifeCycle, log logging.Logger) chan struct{} {
done := make(chan struct{}, 1)
go func() {
signals := make(chan os.Signal, 1)
signal.Notify(signals)
signal := <-signals
switch signal {
case syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT, syscall.SIGSTOP, syscall.SIGKILL:
log.Info("recieved exit signal, closing...")
err := l.Close()
if err != nil {
log.Critical(err.Error())
}
close(done)
}
}()
return done
}
func configure() (*tunnelproxymanager.Config, error) {
c := &tunnelproxymanager.Config{}
mc := multiconfig.New()
mc.Loader = multiconfig.MultiLoader(
&multiconfig.TagLoader{},
&multiconfig.EnvironmentLoader{},
&multiconfig.EnvironmentLoader{Prefix: "KONFIG_TUNNELPROXYMANAGER"},
&multiconfig.FlagLoader{},
)
mc.MustLoad(c)
// decide on eb env name
ebEnvName, err := getEBEnvName(c)
if err != nil {
return nil, err
}
c.EBEnvName = ebEnvName
return c, nil
}
// getEBEnvName checks if region name is given in config, if not tries to get it
// from env variable
func getEBEnvName(conf *tunnelproxymanager.Config) (string, error) {
if conf.EBEnvName != "" {
return conf.EBEnvName, nil
}
// get EB_ENV_NAME param
ebEnvName := os.Getenv("EB_ENV_NAME")
if ebEnvName == "" {
return "", fmt.Errorf("EB_ENV_NAME can not be empty")
}
return ebEnvName, nil
}
| [
"\"EB_ENV_NAME\""
]
| []
| [
"EB_ENV_NAME"
]
| [] | ["EB_ENV_NAME"] | go | 1 | 0 | |
pkg/http/handles/handles_test.go | package handles
import (
"fmt"
"io/ioutil"
"net/http/httptest"
"os"
"strings"
"testing"
)
func Test_RootGET(t *testing.T) {
req := httptest.NewRequest("GET", "/", nil)
w := httptest.NewRecorder()
BaseRoot(w, req)
resp := w.Result()
body, _ := ioutil.ReadAll(resp.Body)
fmt.Println(resp.StatusCode)
fmt.Println(resp.Header.Get("Content-Type"))
fmt.Println(string(body))
if !strings.Contains(string(body), "version:") {
t.Fatalf("GET on root failed")
}
}
func Test_RootPUT(t *testing.T) {
req := httptest.NewRequest("PUT", "/", nil)
w := httptest.NewRecorder()
BaseRoot(w, req)
resp := w.Result()
body, _ := ioutil.ReadAll(resp.Body)
fmt.Println(resp.StatusCode)
fmt.Println(resp.Header.Get("Content-Type"))
fmt.Println(string(body))
if resp.StatusCode != 200 {
t.Fatalf("PUT is causing error")
}
}
func Test_RootPOST(t *testing.T) {
req := httptest.NewRequest("POST", "/", nil)
w := httptest.NewRecorder()
BaseRoot(w, req)
resp := w.Result()
body, _ := ioutil.ReadAll(resp.Body)
fmt.Println(resp.StatusCode)
fmt.Println(resp.Header.Get("Content-Type"))
fmt.Println(string(body))
if string(body) != "post" {
t.Log(string(body))
t.Fatalf("post response not what expected")
}
}
func Test_SubScribe(t *testing.T) {
os.Setenv("PUBSUBTOKEN", "1a3a")
token := os.Getenv("PUBSUBTOKEN")
req := httptest.NewRequest("GET", "/subscript?key="+token, nil)
w := httptest.NewRecorder()
Subscript(w, req)
resp := w.Result()
body, _ := ioutil.ReadAll(resp.Body)
fmt.Println(resp.StatusCode)
fmt.Println(resp.Header.Get("Content-Type"))
fmt.Println(string(body))
if !strings.Contains(string(body), "bad key") {
t.Fatalf("GET on SubScript failed")
}
}
| [
"\"PUBSUBTOKEN\""
]
| []
| [
"PUBSUBTOKEN"
]
| [] | ["PUBSUBTOKEN"] | go | 1 | 0 | |
OcCo_Torch/train_svm.py | # Copyright (c) 2020. Hanchen Wang, [email protected]
# Ref: https://scikit-learn.org/stable/modules/svm.html
# Ref: https://scikit-learn.org/stable/modules/classes.html#module-sklearn.model_selection
import os, sys, torch, argparse, datetime, importlib, numpy as np
sys.path.append('utils')
sys.path.append('models')
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from ModelNetDataLoader import General_CLSDataLoader_HDF5
from Torch_Utility import copy_parameters
# from sklearn.preprocessing import scale
from torch.utils.data import DataLoader
from Dataset_Loc import Dataset_Loc
from sklearn import svm, metrics
from tqdm import tqdm
def parse_args():
parser = argparse.ArgumentParser('SVM on Point Cloud Classification')
''' === Network Model === '''
parser.add_argument('--gpu', type=str, default='0', help='GPU [default: 0]')
parser.add_argument('--model', default='pcn_util', help='model [default: pcn_util]')
parser.add_argument('--batch_size', type=int, default=24, help='batch size [default: 24]')
parser.add_argument('--restore_path', type=str, help="path to pre-trained weights [default: None]")
parser.add_argument('--grid_search', action='store_true', help='opt parameters via Grid Search [default: False]')
''' === Dataset === '''
parser.add_argument('--partial', action='store_true', help='partial objects [default: False]')
parser.add_argument('--bn', action='store_true', help='with background noise [default: False]')
parser.add_argument('--dataset', type=str, default='modelnet40', help='dataset [default: modelnet40]')
parser.add_argument('--fname', type=str, default="", help='filename, used in ScanObjectNN [default: ]')
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
_, TRAIN_FILES, TEST_FILES = Dataset_Loc(dataset=args.dataset, fname=args.fname,
partial=args.partial, bn=args.bn)
TRAIN_DATASET = General_CLSDataLoader_HDF5(file_list=TRAIN_FILES)
TEST_DATASET = General_CLSDataLoader_HDF5(file_list=TEST_FILES)
trainDataLoader = DataLoader(TRAIN_DATASET, batch_size=args.batch_size, shuffle=True, num_workers=4)
testDataLoader = DataLoader(TEST_DATASET, batch_size=args.batch_size, shuffle=Falses, num_workers=4)
MODEL = importlib.import_module(args.model)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
encoder = MODEL.encoder(args=args, num_channel=3).to(device)
encoder = torch.nn.DataParallel(encoder)
checkpoint = torch.load(args.restore_path)
encoder = copy_parameters(encoder, checkpoint, verbose=True)
X_train, y_train, X_test, y_test = [], [], [], []
with torch.no_grad():
encoder.eval()
for points, target in tqdm(trainDataLoader, total=len(trainDataLoader), smoothing=0.9):
points, target = points.float().transpose(2, 1).cuda(), target.long().cuda()
feats = encoder(points)
X_train.append(feats.cpu().numpy())
y_train.append(target.cpu().numpy())
for points, target in tqdm(testDataLoader, total=len(testDataLoader), smoothing=0.9):
points, target = points.float().transpose(2, 1).cuda(), target.long().cuda()
feats = encoder(points)
X_test.append(feats.cpu().numpy())
y_test.append(target.cpu().numpy())
X_train, y_train = np.concatenate(X_train), np.concatenate(y_train)
X_test, y_test = np.concatenate(X_test), np.concatenate(y_test)
# Optional: Standardize the Feature Space
# X_train, X_test = scale(X_train), scale(X_test)
''' === Simple Trial === '''
linear_svm = svm.SVC(kernel='linear')
linear_svm.fit(X_train, y_train)
y_pred = linear_svm.predict(X_test)
print("\n", "Simple Linear SVC accuracy:", metrics.accuracy_score(y_test, y_pred), "\n")
rbf_svm = svm.SVC(kernel='rbf')
rbf_svm.fit(X_train, y_train)
y_pred = rbf_svm.predict(X_test)
print("Simple RBF SVC accuracy:", metrics.accuracy_score(y_test, y_pred), "\n")
''' === Grid Search for SVM with RBF Kernel === '''
if not args.grid_search:
sys.exit()
print("Now we use Grid Search to opt the parameters for SVM RBF kernel")
# [1e-3, 5e-3, 1e-2, ..., 5e1]
gamma_range = np.outer(np.logspace(-3, 1, 5), np.array([1, 5])).flatten()
# [1e-1, 5e-1, 1e0, ..., 5e1]
C_range = np.outer(np.logspace(-1, 1, 3), np.array([1, 5])).flatten()
parameters = {'kernel': ['rbf'], 'C': C_range, 'gamma': gamma_range}
svm_clsf = svm.SVC()
grid_clsf = GridSearchCV(estimator=svm_clsf, param_grid=parameters, n_jobs=8, verbose=1)
start_time = datetime.datetime.now()
print('Start Param Searching at {}'.format(str(start_time)))
grid_clsf.fit(X_train, y_train)
print('Elapsed time, param searching {}'.format(str(datetime.datetime.now() - start_time)))
sorted(grid_clsf.cv_results_.keys())
# scores = grid_clsf.cv_results_['mean_test_score'].reshape(len(C_range), len(gamma_range))
y_pred = grid_clsf.best_estimator_.predict(X_test)
print("\n\n")
print("="*37)
print("Best Params via Grid Search Cross Validation on Train Split is: ", grid_clsf.best_params_)
print("Best Model's Accuracy on Test Dataset: {}".format(metrics.accuracy_score(y_test, y_pred)))
| []
| []
| [
"CUDA_DEVICE_ORDER",
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_DEVICE_ORDER", "CUDA_VISIBLE_DEVICES"] | python | 2 | 0 | |
HackerRank/Problem Solving/Algorithms/CycleDetection.java | /*
Available at: https://www.hackerrank.com/challenges/detect-whether-a-linked-list-contains-a-cycle/problem
@author unobatbayar
*/
import java.io.*;
import java.math.*;
import java.security.*;
import java.text.*;
import java.util.*;
import java.util.concurrent.*;
import java.util.regex.*;
public class Solution {
static class SinglyLinkedListNode {
public int data;
public SinglyLinkedListNode next;
public SinglyLinkedListNode(int nodeData) {
this.data = nodeData;
this.next = null;
}
}
static class SinglyLinkedList {
public SinglyLinkedListNode head;
public SinglyLinkedListNode tail;
public SinglyLinkedList() {
this.head = null;
this.tail = null;
}
public void insertNode(int nodeData) {
SinglyLinkedListNode node = new SinglyLinkedListNode(nodeData);
if (this.head == null) {
this.head = node;
} else {
this.tail.next = node;
}
this.tail = node;
}
}
public static void printSinglyLinkedList(SinglyLinkedListNode node, String sep, BufferedWriter bufferedWriter) throws IOException {
while (node != null) {
bufferedWriter.write(String.valueOf(node.data));
node = node.next;
if (node != null) {
bufferedWriter.write(sep);
}
}
}
// Complete the hasCycle function below.
/*
* For your reference:
*
* SinglyLinkedListNode {
* int data;
* SinglyLinkedListNode next;
* }
*
*/
static boolean hasCycle(SinglyLinkedListNode head) {
SinglyLinkedListNode tortoise = head;
SinglyLinkedListNode hare = head;
while(hare != null && hare.next != null){
tortoise = tortoise.next;
hare = hare.next.next;
if(tortoise == hare) return true;
}
return false;
}
private static final Scanner scanner = new Scanner(System.in);
public static void main(String[] args) throws IOException {
BufferedWriter bufferedWriter = new BufferedWriter(new FileWriter(System.getenv("OUTPUT_PATH")));
int tests = scanner.nextInt();
scanner.skip("(\r\n|[\n\r\u2028\u2029\u0085])?");
for (int testsItr = 0; testsItr < tests; testsItr++) {
int index = scanner.nextInt();
scanner.skip("(\r\n|[\n\r\u2028\u2029\u0085])?");
SinglyLinkedList llist = new SinglyLinkedList();
int llistCount = scanner.nextInt();
scanner.skip("(\r\n|[\n\r\u2028\u2029\u0085])?");
for (int i = 0; i < llistCount; i++) {
int llistItem = scanner.nextInt();
scanner.skip("(\r\n|[\n\r\u2028\u2029\u0085])?");
llist.insertNode(llistItem);
}
SinglyLinkedListNode extra = new SinglyLinkedListNode(-1);
SinglyLinkedListNode temp = llist.head;
for (int i = 0; i < llistCount; i++) {
if (i == index) {
extra = temp;
}
if (i != llistCount-1) {
temp = temp.next;
}
}
temp.next = extra;
boolean result = hasCycle(llist.head);
bufferedWriter.write(String.valueOf(result ? 1 : 0));
bufferedWriter.newLine();
}
bufferedWriter.close();
scanner.close();
}
}
| [
"\"OUTPUT_PATH\""
]
| []
| [
"OUTPUT_PATH"
]
| [] | ["OUTPUT_PATH"] | java | 1 | 0 | |
django/test/runner.py | import ctypes
import itertools
import logging
import multiprocessing
import os
import pickle
import textwrap
import unittest
from importlib import import_module
from io import StringIO
from django.core.management import call_command
from django.db import connections
from django.test import SimpleTestCase, TestCase
from django.test.utils import (
setup_databases as _setup_databases, setup_test_environment,
teardown_databases as _teardown_databases, teardown_test_environment,
)
from django.utils.datastructures import OrderedSet
from django.utils.version import PY37
try:
import ipdb as pdb
except ImportError:
import pdb
try:
import tblib.pickling_support
except ImportError:
tblib = None
class DebugSQLTextTestResult(unittest.TextTestResult):
def __init__(self, stream, descriptions, verbosity):
self.logger = logging.getLogger('django.db.backends')
self.logger.setLevel(logging.DEBUG)
super().__init__(stream, descriptions, verbosity)
def startTest(self, test):
self.debug_sql_stream = StringIO()
self.handler = logging.StreamHandler(self.debug_sql_stream)
self.logger.addHandler(self.handler)
super().startTest(test)
def stopTest(self, test):
super().stopTest(test)
self.logger.removeHandler(self.handler)
if self.showAll:
self.debug_sql_stream.seek(0)
self.stream.write(self.debug_sql_stream.read())
self.stream.writeln(self.separator2)
def addError(self, test, err):
super().addError(test, err)
self.debug_sql_stream.seek(0)
self.errors[-1] = self.errors[-1] + (self.debug_sql_stream.read(),)
def addFailure(self, test, err):
super().addFailure(test, err)
self.debug_sql_stream.seek(0)
self.failures[-1] = self.failures[-1] + (self.debug_sql_stream.read(),)
def addSubTest(self, test, subtest, err):
super().addSubTest(test, subtest, err)
if err is not None:
self.debug_sql_stream.seek(0)
errors = self.failures if issubclass(err[0], test.failureException) else self.errors
errors[-1] = errors[-1] + (self.debug_sql_stream.read(),)
def printErrorList(self, flavour, errors):
for test, err, sql_debug in errors:
self.stream.writeln(self.separator1)
self.stream.writeln("%s: %s" % (flavour, self.getDescription(test)))
self.stream.writeln(self.separator2)
self.stream.writeln(err)
self.stream.writeln(self.separator2)
self.stream.writeln(sql_debug)
class PDBDebugResult(unittest.TextTestResult):
"""
Custom result class that triggers a PDB session when an error or failure
occurs.
"""
def addError(self, test, err):
super().addError(test, err)
self.debug(err)
def addFailure(self, test, err):
super().addFailure(test, err)
self.debug(err)
def debug(self, error):
exc_type, exc_value, traceback = error
print("\nOpening PDB: %r" % exc_value)
pdb.post_mortem(traceback)
class RemoteTestResult:
"""
Record information about which tests have succeeded and which have failed.
The sole purpose of this class is to record events in the child processes
so they can be replayed in the master process. As a consequence it doesn't
inherit unittest.TestResult and doesn't attempt to implement all its API.
The implementation matches the unpythonic coding style of unittest2.
"""
def __init__(self):
if tblib is not None:
tblib.pickling_support.install()
self.events = []
self.failfast = False
self.shouldStop = False
self.testsRun = 0
@property
def test_index(self):
return self.testsRun - 1
def _confirm_picklable(self, obj):
"""
Confirm that obj can be pickled and unpickled as multiprocessing will
need to pickle the exception in the child process and unpickle it in
the parent process. Let the exception rise, if not.
"""
pickle.loads(pickle.dumps(obj))
def _print_unpicklable_subtest(self, test, subtest, pickle_exc):
print("""
Subtest failed:
test: {}
subtest: {}
Unfortunately, the subtest that failed cannot be pickled, so the parallel
test runner cannot handle it cleanly. Here is the pickling error:
> {}
You should re-run this test with --parallel=1 to reproduce the failure
with a cleaner failure message.
""".format(test, subtest, pickle_exc))
def check_picklable(self, test, err):
# Ensure that sys.exc_info() tuples are picklable. This displays a
# clear multiprocessing.pool.RemoteTraceback generated in the child
# process instead of a multiprocessing.pool.MaybeEncodingError, making
# the root cause easier to figure out for users who aren't familiar
# with the multiprocessing module. Since we're in a forked process,
# our best chance to communicate with them is to print to stdout.
try:
self._confirm_picklable(err)
except Exception as exc:
original_exc_txt = repr(err[1])
original_exc_txt = textwrap.fill(original_exc_txt, 75, initial_indent=' ', subsequent_indent=' ')
pickle_exc_txt = repr(exc)
pickle_exc_txt = textwrap.fill(pickle_exc_txt, 75, initial_indent=' ', subsequent_indent=' ')
if tblib is None:
print("""
{} failed:
{}
Unfortunately, tracebacks cannot be pickled, making it impossible for the
parallel test runner to handle this exception cleanly.
In order to see the traceback, you should install tblib:
python -m pip install tblib
""".format(test, original_exc_txt))
else:
print("""
{} failed:
{}
Unfortunately, the exception it raised cannot be pickled, making it impossible
for the parallel test runner to handle it cleanly.
Here's the error encountered while trying to pickle the exception:
{}
You should re-run this test with the --parallel=1 option to reproduce the
failure and get a correct traceback.
""".format(test, original_exc_txt, pickle_exc_txt))
raise
def check_subtest_picklable(self, test, subtest):
try:
self._confirm_picklable(subtest)
except Exception as exc:
self._print_unpicklable_subtest(test, subtest, exc)
raise
def stop_if_failfast(self):
if self.failfast:
self.stop()
def stop(self):
self.shouldStop = True
def startTestRun(self):
self.events.append(('startTestRun',))
def stopTestRun(self):
self.events.append(('stopTestRun',))
def startTest(self, test):
self.testsRun += 1
self.events.append(('startTest', self.test_index))
def stopTest(self, test):
self.events.append(('stopTest', self.test_index))
def addError(self, test, err):
self.check_picklable(test, err)
self.events.append(('addError', self.test_index, err))
self.stop_if_failfast()
def addFailure(self, test, err):
self.check_picklable(test, err)
self.events.append(('addFailure', self.test_index, err))
self.stop_if_failfast()
def addSubTest(self, test, subtest, err):
# Follow Python 3.5's implementation of unittest.TestResult.addSubTest()
# by not doing anything when a subtest is successful.
if err is not None:
# Call check_picklable() before check_subtest_picklable() since
# check_picklable() performs the tblib check.
self.check_picklable(test, err)
self.check_subtest_picklable(test, subtest)
self.events.append(('addSubTest', self.test_index, subtest, err))
self.stop_if_failfast()
def addSuccess(self, test):
self.events.append(('addSuccess', self.test_index))
def addSkip(self, test, reason):
self.events.append(('addSkip', self.test_index, reason))
def addExpectedFailure(self, test, err):
# If tblib isn't installed, pickling the traceback will always fail.
# However we don't want tblib to be required for running the tests
# when they pass or fail as expected. Drop the traceback when an
# expected failure occurs.
if tblib is None:
err = err[0], err[1], None
self.check_picklable(test, err)
self.events.append(('addExpectedFailure', self.test_index, err))
def addUnexpectedSuccess(self, test):
self.events.append(('addUnexpectedSuccess', self.test_index))
self.stop_if_failfast()
class RemoteTestRunner:
"""
Run tests and record everything but don't display anything.
The implementation matches the unpythonic coding style of unittest2.
"""
resultclass = RemoteTestResult
def __init__(self, failfast=False, resultclass=None):
self.failfast = failfast
if resultclass is not None:
self.resultclass = resultclass
def run(self, test):
result = self.resultclass()
unittest.registerResult(result)
result.failfast = self.failfast
test(result)
return result
def default_test_processes():
"""Default number of test processes when using the --parallel option."""
# The current implementation of the parallel test runner requires
# multiprocessing to start subprocesses with fork().
if multiprocessing.get_start_method() != 'fork':
return 1
try:
return int(os.environ['DJANGO_TEST_PROCESSES'])
except KeyError:
return multiprocessing.cpu_count()
_worker_id = 0
def _init_worker(counter):
"""
Switch to databases dedicated to this worker.
This helper lives at module-level because of the multiprocessing module's
requirements.
"""
global _worker_id
with counter.get_lock():
counter.value += 1
_worker_id = counter.value
for alias in connections:
connection = connections[alias]
settings_dict = connection.creation.get_test_db_clone_settings(str(_worker_id))
# connection.settings_dict must be updated in place for changes to be
# reflected in django.db.connections. If the following line assigned
# connection.settings_dict = settings_dict, new threads would connect
# to the default database instead of the appropriate clone.
connection.settings_dict.update(settings_dict)
connection.close()
def _run_subsuite(args):
"""
Run a suite of tests with a RemoteTestRunner and return a RemoteTestResult.
This helper lives at module-level and its arguments are wrapped in a tuple
because of the multiprocessing module's requirements.
"""
runner_class, subsuite_index, subsuite, failfast = args
runner = runner_class(failfast=failfast)
result = runner.run(subsuite)
return subsuite_index, result.events
class ParallelTestSuite(unittest.TestSuite):
"""
Run a series of tests in parallel in several processes.
While the unittest module's documentation implies that orchestrating the
execution of tests is the responsibility of the test runner, in practice,
it appears that TestRunner classes are more concerned with formatting and
displaying test results.
Since there are fewer use cases for customizing TestSuite than TestRunner,
implementing parallelization at the level of the TestSuite improves
interoperability with existing custom test runners. A single instance of a
test runner can still collect results from all tests without being aware
that they have been run in parallel.
"""
# In case someone wants to modify these in a subclass.
init_worker = _init_worker
run_subsuite = _run_subsuite
runner_class = RemoteTestRunner
def __init__(self, suite, processes, failfast=False):
self.subsuites = partition_suite_by_case(suite)
self.processes = processes
self.failfast = failfast
super().__init__()
def run(self, result):
"""
Distribute test cases across workers.
Return an identifier of each test case with its result in order to use
imap_unordered to show results as soon as they're available.
To minimize pickling errors when getting results from workers:
- pass back numeric indexes in self.subsuites instead of tests
- make tracebacks picklable with tblib, if available
Even with tblib, errors may still occur for dynamically created
exception classes which cannot be unpickled.
"""
counter = multiprocessing.Value(ctypes.c_int, 0)
pool = multiprocessing.Pool(
processes=self.processes,
initializer=self.init_worker.__func__,
initargs=[counter],
)
args = [
(self.runner_class, index, subsuite, self.failfast)
for index, subsuite in enumerate(self.subsuites)
]
test_results = pool.imap_unordered(self.run_subsuite.__func__, args)
while True:
if result.shouldStop:
pool.terminate()
break
try:
subsuite_index, events = test_results.next(timeout=0.1)
except multiprocessing.TimeoutError:
continue
except StopIteration:
pool.close()
break
tests = list(self.subsuites[subsuite_index])
for event in events:
event_name = event[0]
handler = getattr(result, event_name, None)
if handler is None:
continue
test = tests[event[1]]
args = event[2:]
handler(test, *args)
pool.join()
return result
def __iter__(self):
return iter(self.subsuites)
class DiscoverRunner:
"""A Django test runner that uses unittest2 test discovery."""
test_suite = unittest.TestSuite
parallel_test_suite = ParallelTestSuite
test_runner = unittest.TextTestRunner
test_loader = unittest.defaultTestLoader
reorder_by = (TestCase, SimpleTestCase)
def __init__(self, pattern=None, top_level=None, verbosity=1,
interactive=True, failfast=False, keepdb=False,
reverse=False, debug_mode=False, debug_sql=False, parallel=0,
tags=None, exclude_tags=None, test_name_patterns=None,
pdb=False, buffer=False, **kwargs):
self.pattern = pattern
self.top_level = top_level
self.verbosity = verbosity
self.interactive = interactive
self.failfast = failfast
self.keepdb = keepdb
self.reverse = reverse
self.debug_mode = debug_mode
self.debug_sql = debug_sql
self.parallel = parallel
self.tags = set(tags or [])
self.exclude_tags = set(exclude_tags or [])
self.pdb = pdb
if self.pdb and self.parallel > 1:
raise ValueError('You cannot use --pdb with parallel tests; pass --parallel=1 to use it.')
self.buffer = buffer
if self.buffer and self.parallel > 1:
raise ValueError(
'You cannot use -b/--buffer with parallel tests; pass '
'--parallel=1 to use it.'
)
self.test_name_patterns = None
if test_name_patterns:
# unittest does not export the _convert_select_pattern function
# that converts command-line arguments to patterns.
self.test_name_patterns = {
pattern if '*' in pattern else '*%s*' % pattern
for pattern in test_name_patterns
}
@classmethod
def add_arguments(cls, parser):
parser.add_argument(
'-t', '--top-level-directory', dest='top_level',
help='Top level of project for unittest discovery.',
)
parser.add_argument(
'-p', '--pattern', default="test*.py",
help='The test matching pattern. Defaults to test*.py.',
)
parser.add_argument(
'--keepdb', action='store_true',
help='Preserves the test DB between runs.'
)
parser.add_argument(
'-r', '--reverse', action='store_true',
help='Reverses test cases order.',
)
parser.add_argument(
'--debug-mode', action='store_true',
help='Sets settings.DEBUG to True.',
)
parser.add_argument(
'-d', '--debug-sql', action='store_true',
help='Prints logged SQL queries on failure.',
)
parser.add_argument(
'--parallel', nargs='?', default=1, type=int,
const=default_test_processes(), metavar='N',
help='Run tests using up to N parallel processes.',
)
parser.add_argument(
'--tag', action='append', dest='tags',
help='Run only tests with the specified tag. Can be used multiple times.',
)
parser.add_argument(
'--exclude-tag', action='append', dest='exclude_tags',
help='Do not run tests with the specified tag. Can be used multiple times.',
)
parser.add_argument(
'--pdb', action='store_true',
help='Runs a debugger (pdb, or ipdb if installed) on error or failure.'
)
parser.add_argument(
'-b', '--buffer', action='store_true',
help='Discard output from passing tests.',
)
if PY37:
parser.add_argument(
'-k', action='append', dest='test_name_patterns',
help=(
'Only run test methods and classes that match the pattern '
'or substring. Can be used multiple times. Same as '
'unittest -k option.'
),
)
def setup_test_environment(self, **kwargs):
setup_test_environment(debug=self.debug_mode)
unittest.installHandler()
def build_suite(self, test_labels=None, extra_tests=None, **kwargs):
suite = self.test_suite()
test_labels = test_labels or ['.']
extra_tests = extra_tests or []
self.test_loader.testNamePatterns = self.test_name_patterns
discover_kwargs = {}
if self.pattern is not None:
discover_kwargs['pattern'] = self.pattern
if self.top_level is not None:
discover_kwargs['top_level_dir'] = self.top_level
for label in test_labels:
kwargs = discover_kwargs.copy()
tests = None
label_as_path = os.path.abspath(label)
# if a module, or "module.ClassName[.method_name]", just run those
if not os.path.exists(label_as_path):
tests = self.test_loader.loadTestsFromName(label)
elif os.path.isdir(label_as_path) and not self.top_level:
# Try to be a bit smarter than unittest about finding the
# default top-level for a given directory path, to avoid
# breaking relative imports. (Unittest's default is to set
# top-level equal to the path, which means relative imports
# will result in "Attempted relative import in non-package.").
# We'd be happy to skip this and require dotted module paths
# (which don't cause this problem) instead of file paths (which
# do), but in the case of a directory in the cwd, which would
# be equally valid if considered as a top-level module or as a
# directory path, unittest unfortunately prefers the latter.
top_level = label_as_path
while True:
init_py = os.path.join(top_level, '__init__.py')
if os.path.exists(init_py):
try_next = os.path.dirname(top_level)
if try_next == top_level:
# __init__.py all the way down? give up.
break
top_level = try_next
continue
break
kwargs['top_level_dir'] = top_level
if not (tests and tests.countTestCases()) and is_discoverable(label):
# Try discovery if path is a package or directory
tests = self.test_loader.discover(start_dir=label, **kwargs)
# Make unittest forget the top-level dir it calculated from this
# run, to support running tests from two different top-levels.
self.test_loader._top_level_dir = None
suite.addTests(tests)
for test in extra_tests:
suite.addTest(test)
if self.tags or self.exclude_tags:
if self.verbosity >= 2:
if self.tags:
print('Including test tag(s): %s.' % ', '.join(sorted(self.tags)))
if self.exclude_tags:
print('Excluding test tag(s): %s.' % ', '.join(sorted(self.exclude_tags)))
suite = filter_tests_by_tags(suite, self.tags, self.exclude_tags)
suite = reorder_suite(suite, self.reorder_by, self.reverse)
if self.parallel > 1:
parallel_suite = self.parallel_test_suite(suite, self.parallel, self.failfast)
# Since tests are distributed across processes on a per-TestCase
# basis, there's no need for more processes than TestCases.
parallel_units = len(parallel_suite.subsuites)
self.parallel = min(self.parallel, parallel_units)
# If there's only one TestCase, parallelization isn't needed.
if self.parallel > 1:
suite = parallel_suite
return suite
def setup_databases(self, **kwargs):
return _setup_databases(
self.verbosity, self.interactive, self.keepdb, self.debug_sql,
self.parallel, **kwargs
)
def get_resultclass(self):
if self.debug_sql:
return DebugSQLTextTestResult
elif self.pdb:
return PDBDebugResult
def get_test_runner_kwargs(self):
return {
'failfast': self.failfast,
'resultclass': self.get_resultclass(),
'verbosity': self.verbosity,
'buffer': self.buffer,
}
def run_checks(self):
# Checks are run after database creation since some checks require
# database access.
call_command('check', verbosity=self.verbosity)
def run_suite(self, suite, **kwargs):
kwargs = self.get_test_runner_kwargs()
runner = self.test_runner(**kwargs)
return runner.run(suite)
def teardown_databases(self, old_config, **kwargs):
"""Destroy all the non-mirror databases."""
_teardown_databases(
old_config,
verbosity=self.verbosity,
parallel=self.parallel,
keepdb=self.keepdb,
)
def teardown_test_environment(self, **kwargs):
unittest.removeHandler()
teardown_test_environment()
def suite_result(self, suite, result, **kwargs):
return len(result.failures) + len(result.errors)
def _get_databases(self, suite):
databases = set()
for test in suite:
if isinstance(test, unittest.TestCase):
test_databases = getattr(test, 'databases', None)
if test_databases == '__all__':
return set(connections)
if test_databases:
databases.update(test_databases)
else:
databases.update(self._get_databases(test))
return databases
def get_databases(self, suite):
databases = self._get_databases(suite)
if self.verbosity >= 2:
unused_databases = [alias for alias in connections if alias not in databases]
if unused_databases:
print('Skipping setup of unused database(s): %s.' % ', '.join(sorted(unused_databases)))
return databases
def run_tests(self, test_labels, extra_tests=None, **kwargs):
"""
Run the unit tests for all the test labels in the provided list.
Test labels should be dotted Python paths to test modules, test
classes, or test methods.
A list of 'extra' tests may also be provided; these tests
will be added to the test suite.
Return the number of tests that failed.
"""
self.setup_test_environment()
suite = self.build_suite(test_labels, extra_tests)
databases = self.get_databases(suite)
old_config = self.setup_databases(aliases=databases)
run_failed = False
try:
self.run_checks()
result = self.run_suite(suite)
except Exception:
run_failed = True
raise
finally:
try:
self.teardown_databases(old_config)
self.teardown_test_environment()
except Exception:
# Silence teardown exceptions if an exception was raised during
# runs to avoid shadowing it.
if not run_failed:
raise
return self.suite_result(suite, result)
def is_discoverable(label):
"""
Check if a test label points to a Python package or file directory.
Relative labels like "." and ".." are seen as directories.
"""
try:
mod = import_module(label)
except (ImportError, TypeError):
pass
else:
return hasattr(mod, '__path__')
return os.path.isdir(os.path.abspath(label))
def reorder_suite(suite, classes, reverse=False):
"""
Reorder a test suite by test type.
`classes` is a sequence of types
All tests of type classes[0] are placed first, then tests of type
classes[1], etc. Tests with no match in classes are placed last.
If `reverse` is True, sort tests within classes in opposite order but
don't reverse test classes.
"""
class_count = len(classes)
suite_class = type(suite)
bins = [OrderedSet() for i in range(class_count + 1)]
partition_suite_by_type(suite, classes, bins, reverse=reverse)
reordered_suite = suite_class()
for i in range(class_count + 1):
reordered_suite.addTests(bins[i])
return reordered_suite
def partition_suite_by_type(suite, classes, bins, reverse=False):
"""
Partition a test suite by test type. Also prevent duplicated tests.
classes is a sequence of types
bins is a sequence of TestSuites, one more than classes
reverse changes the ordering of tests within bins
Tests of type classes[i] are added to bins[i],
tests with no match found in classes are place in bins[-1]
"""
suite_class = type(suite)
if reverse:
suite = reversed(tuple(suite))
for test in suite:
if isinstance(test, suite_class):
partition_suite_by_type(test, classes, bins, reverse=reverse)
else:
for i in range(len(classes)):
if isinstance(test, classes[i]):
bins[i].add(test)
break
else:
bins[-1].add(test)
def partition_suite_by_case(suite):
"""Partition a test suite by test case, preserving the order of tests."""
groups = []
suite_class = type(suite)
for test_type, test_group in itertools.groupby(suite, type):
if issubclass(test_type, unittest.TestCase):
groups.append(suite_class(test_group))
else:
for item in test_group:
groups.extend(partition_suite_by_case(item))
return groups
def filter_tests_by_tags(suite, tags, exclude_tags):
suite_class = type(suite)
filtered_suite = suite_class()
for test in suite:
if isinstance(test, suite_class):
filtered_suite.addTests(filter_tests_by_tags(test, tags, exclude_tags))
else:
test_tags = set(getattr(test, 'tags', set()))
test_fn_name = getattr(test, '_testMethodName', str(test))
test_fn = getattr(test, test_fn_name, test)
test_fn_tags = set(getattr(test_fn, 'tags', set()))
all_tags = test_tags.union(test_fn_tags)
matched_tags = all_tags.intersection(tags)
if (matched_tags or not tags) and not all_tags.intersection(exclude_tags):
filtered_suite.addTest(test)
return filtered_suite
| []
| []
| [
"DJANGO_TEST_PROCESSES"
]
| [] | ["DJANGO_TEST_PROCESSES"] | python | 1 | 0 | |
modules/tools/prediction/data_pipelines/cruiseMLP_train.py | ###############################################################################
# Copyright 2018 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
"""
@requirement:
pytorch 0.4.1
"""
import os
import h5py
import numpy as np
import logging
import argparse
import proto.cruise_model_pb2
from proto.cruise_model_pb2 import TensorParameter, InputParameter,\
Conv1dParameter, DenseParameter, ActivationParameter, MaxPool1dParameter,\
AvgPool1dParameter, LaneFeatureConvParameter, ObsFeatureFCParameter,\
ClassifyParameter, RegressParameter, CruiseModelParameter
from cruise_models import FullyConn_NN, FCNN_CNN1D
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
from torch.utils.data import Dataset, DataLoader, sampler
import sklearn
from sklearn.model_selection import train_test_split
from sklearn.utils import class_weight
from common.configure import parameters
# TODO(panjiacheng): the data-loader part needs to be modified.
# Constants
dim_input = parameters['cruise_mlp']['dim_input']
dim_hidden_1 = parameters['cruise_mlp']['dim_hidden_1']
dim_hidden_2 = parameters['cruise_mlp']['dim_hidden_2']
dim_output = parameters['cruise_mlp']['dim_output']
# Setup
cuda_is_available = torch.cuda.is_available()
logging.basicConfig(filename='training.log', level=logging.INFO)
def load_Conv1dParameter(model, key, stride=1):
model_pb = Conv1dParameter()
model_pb.shape.extend(list(model.state_dict()[key+'.weight'].shape))
model_pb.use_bias = True
kernel_param = TensorParameter()
kernel_param.shape.extend(list(model.state_dict()[key+'.weight'].shape))
kernel_param.data.extend(
list(model.state_dict()[key+'.weight'].numpy().reshape(-1)))
model_pb.kernel.CopyFrom(kernel_param)
bias_param = TensorParameter()
bias_param.shape.extend(list(model.state_dict()[key+'.bias'].shape))
bias_param.data.extend(
list(model.state_dict()[key+'.bias'].numpy().reshape(-1)))
model_pb.bias.CopyFrom(bias_param)
model_pb.stride = stride
return model_pb
def load_DenseParameter(model, key):
model_pb = DenseParameter()
model_pb.use_bias = True
weights_param = TensorParameter()
weights_param.shape.extend(
list(model.state_dict()[key+'.weight'].numpy().T.shape))
weights_param.data.extend(
list(model.state_dict()[key+'.weight'].numpy().T.reshape(-1)))
model_pb.weights.CopyFrom(weights_param)
bias_param = TensorParameter()
bias_param.shape.extend(
list(model.state_dict()[key+'.bias'].numpy().shape))
bias_param.data.extend(list(model.state_dict()[key+'.bias'].numpy()))
model_pb.bias.CopyFrom(bias_param)
model_pb.units = model_pb.bias.shape[0]
return model_pb
def save_FCNN_CNN1D(model, filename):
model_pb = CruiseModelParameter()
lane_feature_conv = LaneFeatureConvParameter()
lane_feature_conv.conv1d_0.CopyFrom(
load_Conv1dParameter(model, 'lane_feature_conv.0', stride=1))
lane_feature_conv.activation_1.activation = 'relu'
lane_feature_conv.conv1d_2.CopyFrom(
load_Conv1dParameter(model, 'lane_feature_conv.2', stride=2))
lane_feature_conv.activation_3.activation = 'relu'
lane_feature_conv.conv1d_4.CopyFrom(
load_Conv1dParameter(model, 'lane_feature_conv.4', stride=2))
lane_feature_maxpool = MaxPool1dParameter()
lane_feature_maxpool.kernel_size = 3
lane_feature_maxpool.stride = 3
lane_feature_avgpool = AvgPool1dParameter()
lane_feature_avgpool.kernel_size = 3
lane_feature_avgpool.stride = 3
obs_feature_fc = ObsFeatureFCParameter()
obs_feature_fc.linear_0.CopyFrom(
load_DenseParameter(model, 'obs_feature_fc.0'))
obs_feature_fc.activation_1.activation = 'sigmoid'
obs_feature_fc.linear_3.CopyFrom(
load_DenseParameter(model, 'obs_feature_fc.3'))
obs_feature_fc.activation_4.activation = 'sigmoid'
classify = ClassifyParameter()
classify.linear_0.CopyFrom(load_DenseParameter(model, 'classify.0'))
classify.activation_1.activation = 'sigmoid'
classify.linear_3.CopyFrom(load_DenseParameter(model, 'classify.3'))
classify.activation_4.activation = 'sigmoid'
classify.linear_6.CopyFrom(load_DenseParameter(model, 'classify.6'))
classify.activation_7.activation = 'sigmoid'
classify.linear_9.CopyFrom(load_DenseParameter(model, 'classify.9'))
classify.activation_10.activation = 'sigmoid'
regress = RegressParameter()
regress.linear_0.CopyFrom(load_DenseParameter(model, 'regress.0'))
regress.activation_1.activation = 'relu'
regress.linear_3.CopyFrom(load_DenseParameter(model, 'regress.3'))
regress.activation_4.activation = 'relu'
regress.linear_6.CopyFrom(load_DenseParameter(model, 'regress.6'))
regress.activation_7.activation = 'relu'
regress.linear_9.CopyFrom(load_DenseParameter(model, 'regress.9'))
regress.activation_10.activation = 'relu'
model_pb.lane_feature_conv.CopyFrom(lane_feature_conv)
model_pb.lane_feature_maxpool.CopyFrom(lane_feature_maxpool)
model_pb.lane_feature_avgpool.CopyFrom(lane_feature_avgpool)
model_pb.obs_feature_fc.CopyFrom(obs_feature_fc)
model_pb.classify.CopyFrom(classify)
model_pb.regress.CopyFrom(regress)
with open(filename, 'wb') as params_file:
params_file.write(model_pb.SerializeToString())
'''
Custom defined loss function that lumps the loss of classification and
of regression together.
'''
def loss_fn(c_pred, r_pred, target, balance):
loss_C = nn.BCEWithLogitsLoss(
pos_weight=torch.FloatTensor([balance]).cuda()) # nn.BCELoss()
loss_R = nn.MSELoss()
#loss = loss_C(c_pred, target[:,0].view(target.shape[0],1))
loss = 4 * loss_C(c_pred, target[:, 0].view(target.shape[0], 1)) + \
loss_R(((target[:, 2] > 0.0) * (target[:, 2] <= 3.0)).float().view(target.shape[0], 1) * r_pred +
((target[:, 2] <= 0.0) + (target[:, 2] > 3.0)).float().view(
target.shape[0], 1) * target[:, 2].view(target.shape[0], 1),
target[:, 2].view(target.shape[0], 1))
#loss_R((target[:,1] < 10.0).float().view(target.shape[0],1) * r_pred + \
# (target[:,1] >= 10.0).float().view(target.shape[0],1) * target[:,1].view(target.shape[0],1), \
# target[:,1].view(target.shape[0],1))
return loss
# ========================================================================
# Helper functions
'''
Get the full path of all files under the directory: 'dirName'
'''
def getListOfFiles(dirName):
listOfFiles = os.listdir(dirName)
allFiles = list()
for entry in listOfFiles:
fullPath = os.path.join(dirName, entry)
if os.path.isdir(fullPath):
allFiles = allFiles + getListOfFiles(fullPath)
else:
allFiles.append(fullPath)
return allFiles
'''
Print the distribution of data labels.
'''
def print_dist(label):
unique_labels = np.unique(label)
for l in unique_labels:
print ('Label = {}: {}%'.format(l, np.sum(label == l)/len(label)*100))
# ========================================================================
# ========================================================================
# Data Loading and preprocessing (Non Data-Loader case)
def load_data(filename):
'''
Load the data from h5 file to the numpy format.
(Only for non data-loader case)
'''
if not (os.path.exists(filename)):
logging.error("file: {}, does not exist".format(filename))
os._exit(1)
if os.path.splitext(filename)[1] != '.h5':
logging.error("file: {} is not an hdf5 file".format(filename))
os._exit(1)
samples = dict()
h5_file = h5py.File(filename, 'r')
for key in h5_file.keys():
samples[key] = h5_file[key][:]
print("load file success")
return samples['data']
def load_npy_data(dir):
'''
Load all .npy files under a certain dir;
merge them together into one;
return.
'''
def data_preprocessing(data):
'''
Preprocess the data.
(Only for non data-loader case)
- separate input X and output y
- process output label from {-1,0,1,2,3,4} to {0,1}
- Take out only those meaningful features
- shuffle data
'''
# Various input features separation
X_obs_old_features = data[:, 0:23]
X_surround_obs = data[:, -dim_output-8:-dim_output]
X_obs_now = data[:, 23:32]
X_obs_hist_5 = data[:, 23:68]
X_lane = data[:, 68:-dim_output-8]
# mask out those that don't have any history
# mask5 = (data[:,53] != 100)
X = np.concatenate((X_obs_old_features, X_obs_hist_5, X_lane), axis=1)
# X = X[mask5, :]
y = data[:, -dim_output:]
# y = y[mask5, :]
# Binary classification
y[:, 0] = (y[:, 0] > 0).astype(float)
#y[:, 0] = np.logical_and((y[:, 0] > 0), (y[:, 1] < 1.0))
# Random shuffling
X_new, X_dummy, y_new, y_dummy = train_test_split(
X, y, test_size=0.0, random_state=233)
return X_new, y_new # , X_dummy, y_dummy
# ========================================================================
# ========================================================================
# Data Loading and preprocessing (Data-Loader case)
'''
TODO: implement custom collate_fn to incorporate down-sampling function
for certain labels.
'''
def collate_wDownSample(batch):
return None
'''
If datasets are too large, use Dataloader to load from disk.
'''
class TrainValidDataset(Dataset):
'''
Args:
- root_dir (string): Directory containing all folders with different
dates, each folder containing .cruise.h5 data files.
'''
def __init__(self, list_of_files):
self.list_of_files_ = list_of_files
self.data_size_until_this_file_ = []
self.dataset_size = 0
for file in self.list_of_files_:
with h5py.File(file, 'r') as h5_file:
data_size = h5_file[list(h5_file.keys())[0]].shape[0]
self.dataset_size += data_size
self.data_size_until_this_file_.append(self.dataset_size)
#print ('Total size of dataset: {}'.format(self.data_size_until_this_file_))
def __len__(self):
return self.dataset_size
def __getitem__(self, index):
bin_idx = self.FindBin(index, 0, len(
self.data_size_until_this_file_)-1)
with h5py.File(self.list_of_files_[bin_idx], 'r') as h5_file:
idx_offset = self.data_size_until_this_file_[bin_idx] - \
h5_file[list(h5_file.keys())[0]].shape[0]
data = h5_file[list(h5_file.keys())[0]][index-idx_offset]
label = data[-dim_output:]
label[0] = (label[0] > 0.0).astype(float)
return data[:-dim_output], label
# Binary search to expedite the data-loading process.
def FindBin(self, index, start, end):
if (start == end):
return start
mid = int((start+end)/2.0)
if (self.data_size_until_this_file_[mid] <= index):
return self.FindBin(index, mid+1, end)
else:
return self.FindBin(index, start, mid)
# ========================================================================
# ========================================================================
# Data training and validation
'''
Train the data. (vanilla version without dataloader)
'''
def train_vanilla(train_X, train_y, model, optimizer, epoch, batch_size=2048, balance=1.0):
model.train()
loss_history = []
logging.info('Epoch: {}'.format(epoch+1))
print ('Epoch: {}.'.format(epoch+1))
num_of_data = train_X.shape[0]
num_of_batch = int(num_of_data / batch_size) + 1
pred_y = None
for i in range(num_of_batch):
optimizer.zero_grad()
X = train_X[i*batch_size: min(num_of_data, (i+1)*batch_size), ]
y = train_y[i*batch_size: min(num_of_data, (i+1)*batch_size), ]
c_pred, r_pred = model(X)
loss = loss_fn(c_pred, r_pred, y, balance)
loss_history.append(loss.data)
loss.backward()
optimizer.step()
c_pred = c_pred.data.cpu().numpy()
c_pred = c_pred.reshape(c_pred.shape[0], 1)
pred_y = np.concatenate((pred_y, c_pred), axis=0) if pred_y is not None \
else c_pred
if (i > 0) and (i % 100 == 0):
logging.info('Step: {}, train_loss: {}'.format(
i, np.mean(loss_history[-100:])))
print ("Step: {}, training loss: {}".format(
i, np.mean(loss_history[-100:])))
pred_y = (pred_y > 0.0)
train_y = train_y.data.cpu().numpy()
training_accuracy = sklearn.metrics.accuracy_score(
train_y[:, 0], pred_y.reshape(-1))
train_loss = np.mean(loss_history)
logging.info('Training loss: {}'.format(train_loss))
logging.info('Training Accuracy: {}.'.format(training_accuracy))
print ('Training Loss: {}. Training Accuracy: {}'
.format(train_loss, training_accuracy))
'''
Validation (vanilla version without dataloader)
'''
def validate_vanilla(valid_X, valid_y, model, batch_size=2048, balance=1.0, pos_label=1.0):
model.eval()
loss_history = []
num_of_data = valid_X.shape[0]
num_of_batch = int(num_of_data / batch_size) + 1
pred_y = None
for i in range(num_of_batch):
X = valid_X[i*batch_size: min(num_of_data, (i+1)*batch_size), ]
y = valid_y[i*batch_size: min(num_of_data, (i+1)*batch_size), ]
c_pred, r_pred = model(X)
valid_loss = loss_fn(c_pred, r_pred, y, balance)
loss_history.append(valid_loss.data)
c_pred = c_pred.data.cpu().numpy()
c_pred = c_pred.reshape(c_pred.shape[0], 1)
pred_y = np.concatenate((pred_y, c_pred), axis=0) if pred_y is not None \
else c_pred
valid_y = valid_y.data.cpu().numpy()
valid_auc = sklearn.metrics.roc_auc_score(
valid_y[:, 0], pred_y.reshape(-1))
pred_y = (pred_y > 0.0)
valid_accuracy = sklearn.metrics.accuracy_score(
valid_y[:, 0], pred_y.reshape(-1))
valid_precision = sklearn.metrics.precision_score(
valid_y[:, 0], pred_y.reshape(-1), pos_label=pos_label)
valid_recall = sklearn.metrics.recall_score(
valid_y[:, 0], pred_y.reshape(-1), pos_label=pos_label)
logging.info('Validation loss: {}. Accuracy: {}.\
Precision: {}. Recall: {}. AUC: {}.'
.format(np.mean(loss_history), valid_accuracy, valid_precision,
valid_recall, valid_auc))
print ('Validation loss: {}. Accuracy: {}.\
Precision: {}. Recall: {}. AUC: {}.'
.format(np.mean(loss_history), valid_accuracy, valid_precision,
valid_recall, valid_auc))
return np.mean(loss_history)
'''
Train the data. (using dataloader)
'''
def train_dataloader(train_loader, model, optimizer, epoch, balance=1.0):
model.train()
loss_history = []
train_correct_class = 0
total_size = 0
logging.info('Epoch: {}'.format(epoch))
for i, (inputs, targets) in enumerate(train_loader):
total_size += targets.shape[0]
optimizer.zero_grad()
if cuda_is_available:
X = (inputs).float().cuda()
y = (targets).float().cuda()
c_pred, r_pred = model(X)
loss = loss_fn(c_pred, r_pred, y, balance)
#loss.data[0].cpu().numpy()
loss_history.append(loss.data)
loss.backward()
optimizer.step()
train_correct_class += \
np.sum((c_pred.data.cpu().numpy() > 0.5).astype(float) ==
y[:, 0].data.cpu().numpy().reshape(c_pred.data.cpu().numpy().shape[0], 1))
#if i > 100:
# break
if i % 100 == 0:
logging.info('Step: {}, train_loss: {}'.format(
i, np.mean(loss_history[-100:])))
print ("Step: {}, training loss: {}".format(
i, np.mean(loss_history[-100:])))
train_loss = np.mean(loss_history)
logging.info('Training loss: {}'.format(train_loss))
print ('Epoch: {}. Training Loss: {}'.format(epoch, train_loss))
'''
Validation (using dataloader)
'''
def validate_dataloader(valid_loader, model, balance=1.0):
model.eval()
loss_history = []
valid_correct_class = 0.0
total_size = 0
for i, (X, y) in enumerate(valid_loader):
total_size += y.shape[0]
if cuda_is_available:
X = X.float().cuda()
y = y.float().cuda()
c_pred, r_pred = model(X)
valid_loss = loss_fn(c_pred, r_pred, y, balance)
loss_history.append(valid_loss.data)
valid_correct_class += \
np.sum((c_pred.data.cpu().numpy() > 0.5).astype(float) ==
y[:, 0].data.cpu().numpy().reshape(c_pred.data.cpu().numpy().shape[0], 1))
valid_classification_accuracy = valid_correct_class / total_size
logging.info('Validation loss: {}. Validation classification accuracy: {}'
.format(np.mean(loss_history), valid_classification_accuracy))
print ('Validation loss: {}. Classification accuracy: {}.'
.format(np.mean(loss_history), valid_classification_accuracy))
return valid_loss
# ========================================================================
# ========================================================================
# Main function:
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='train neural network based on feature files and save parameters')
parser.add_argument('train_file', type=str, help='training data (h5)')
parser.add_argument('valid_file', type=str, help='validation data (h5)')
parser.add_argument('-n', '--network-structure', type=int, default=1,
help='Specify which network to use:\n \
\t 0: Fully connected neural network.\n \
\t 1: 1D-CNN for lane feature extraction.')
parser.add_argument('-d', '--data-loader', action='store_true',
help='Use the dataloader (when memory size is smaller than dataset size)')
parser.add_argument('-s', '--save-path', type=str, default='./',
help='Specify the directory to save trained models.')
parser.add_argument('-g', '--go', action='store_true',
help='It is training lane-follow (go) cases.')
parser.add_argument('-b', '--balance', type=float, default=1.0,
help='Specify the weight for positive predictions.')
#parser.add_argument('-g', '--gpu_num', type=int, default=0, \
# help='Specify which GPU to use.')
args = parser.parse_args()
#os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID' #specifies the same order as nvidia-smi
#os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu_num)
if not args.data_loader:
# Load from file and print out general information of the data.
train_file = args.train_file
valid_file = args.valid_file
train_data = load_data(train_file)
valid_data = load_data(valid_file)
print ('Data loaded successfully.')
classes_train = np.asarray(train_data[:, -dim_output])
print ('Total number of training samples: {}'.format(len(classes_train)))
print ('Training set distribution:')
print_dist(classes_train)
classes_valid = np.asarray(valid_data[:, -dim_output])
print ('Total number of validation samples: {}'.format(len(classes_valid)))
print ('Validation set distribution:')
print_dist(classes_valid)
# Data preprocessing
X_train, y_train = data_preprocessing(train_data)
X_valid, y_valid = data_preprocessing(valid_data)
# Model declaration
model = None
if args.network_structure == 0:
model = FullyConn_NN()
elif args.network_structure == 1:
model = FCNN_CNN1D()
print ("The model used is: ")
print (model)
learning_rate = 6.561e-4
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(
optimizer, factor=0.3, patience=2, min_lr=1e-8, verbose=1, mode='min')
# CUDA set-up:
cuda_is_available = torch.cuda.is_available()
if (cuda_is_available):
print ("Using CUDA to speed up training.")
model.cuda()
X_train = Variable(torch.FloatTensor(X_train).cuda())
X_valid = Variable(torch.FloatTensor(X_valid).cuda())
y_train = Variable(torch.FloatTensor(y_train).cuda())
y_valid = Variable(torch.FloatTensor(y_valid).cuda())
# Model training:
pos_label = 1.0
if args.go:
pos_label = 0.0
best_valid_loss = float('+inf')
for epoch in range(50):
train_vanilla(X_train, y_train, model, optimizer,
epoch, balance=args.balance)
valid_loss = validate_vanilla(
X_valid, y_valid, model, balance=args.balance, pos_label=pos_label)
scheduler.step(valid_loss)
if valid_loss < best_valid_loss:
best_valid_loss = valid_loss
torch.save(model.state_dict(), args.save_path + 'cruise_model{}_epoch{}_valloss{:.6f}.pt'
.format(args.network_structure, epoch+1, valid_loss))
else:
train_dir = args.train_file
valid_dir = args.valid_file
# Data preprocessing (training data balancing).
list_of_training_files = getListOfFiles(train_dir)
list_of_validation_files = getListOfFiles(valid_dir)
classes_train = []
for file in list_of_training_files:
with h5py.File(file, 'r') as h5_file:
data = h5_file[list(h5_file.keys())[0]][:, -2]
classes_train.append(data.tolist())
# "Flattening" the list of lists
classes_train = [item for sublist in classes_train for item in sublist]
classes_train = np.asarray(classes_train)
print ('Total number of training samples: {}'.format(len(classes_train)))
print ('Training set distribution:')
print_dist(classes_train)
classes_valid = []
for file in list_of_validation_files:
with h5py.File(file, 'r') as h5_file:
data = h5_file[list(h5_file.keys())[0]][:, -2]
classes_valid.append(data.tolist())
# "Flattening" the list of lists
classes_valid = [item for sublist in classes_valid for item in sublist]
classes_valid = np.asarray(classes_valid)
print ('Total number of validation samples: {}'.format(len(classes_valid)))
print ('Validation set distribution:')
print_dist(classes_valid)
#class_weights = class_weight.compute_class_weight('balanced', np.unique(classes_train), classes_train)
#weights = [class_weights[int(i+1)] for i in classes_train]
#weights = torch.DoubleTensor(weights)
#train_sampler = sampler.WeightedRandomSampler(weights, int(len(weights)/1), replacement=True)
model = FCNN_CNN1D()
learning_rate = 6.561e-4
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(
optimizer, factor=0.3, patience=2, min_lr=1e-8, verbose=1, mode='min')
if (cuda_is_available):
print ('Using CUDA to speed up training.')
model.cuda()
train_dataset = TrainValidDataset(list_of_training_files)
valid_dataset = TrainValidDataset(list_of_validation_files)
train_loader = DataLoader(train_dataset, batch_size=1024, num_workers=8,
pin_memory=True, shuffle=True) # sampler=train_sampler)
valid_loader = DataLoader(
valid_dataset, batch_size=1024, num_workers=8, pin_memory=True)
for epoch in range(100):
train_dataloader(train_loader, model, optimizer, epoch)
valid_loss = validate_dataloader(valid_loader, model)
scheduler.step(valid_loss)
# ========================================================================
| []
| []
| [
"CUDA_DEVICE_ORDER",
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_DEVICE_ORDER", "CUDA_VISIBLE_DEVICES"] | python | 2 | 0 | |
test/integration/maincluster_test.go | package integration
import (
"fmt"
"log"
"os"
"reflect"
"regexp"
"strings"
"testing"
"github.com/google/go-cmp/cmp"
"github.com/kubernetes-incubator/kube-aws/builtin"
"github.com/kubernetes-incubator/kube-aws/cfnstack"
"github.com/kubernetes-incubator/kube-aws/core/root"
"github.com/kubernetes-incubator/kube-aws/core/root/config"
"github.com/kubernetes-incubator/kube-aws/pkg/api"
"github.com/kubernetes-incubator/kube-aws/pkg/model"
"github.com/kubernetes-incubator/kube-aws/test/helper"
)
type ConfigTester func(c *config.Config, t *testing.T)
type ClusterTester func(c *root.Cluster, t *testing.T)
// Integration testing with real AWS services including S3, KMS, CloudFormation
func TestMainClusterConfig(t *testing.T) {
kubeAwsSettings := newKubeAwsSettingsFromEnv(t)
s3URI, s3URIExists := os.LookupEnv("KUBE_AWS_S3_DIR_URI")
if !s3URIExists || s3URI == "" {
s3URI = "s3://mybucket/mydir"
t.Logf(`Falling back s3URI to a stub value "%s" for tests of validating stack templates. No assets will actually be uploaded to S3`, s3URI)
} else {
log.Printf("s3URI is %s", s3URI)
}
s3Loc, err := cfnstack.S3URIFromString(s3URI)
if err != nil {
t.Errorf("failed to parse s3 uri: %v", err)
t.FailNow()
}
s3Bucket := s3Loc.Bucket()
s3Dir := s3Loc.KeyComponents()[0]
firstAz := kubeAwsSettings.region + "c"
hasDefaultEtcdSettings := func(c *config.Config, t *testing.T) {
subnet1 := api.NewPublicSubnet(firstAz, "10.0.0.0/24")
subnet1.Name = "Subnet0"
expected := api.EtcdSettings{
Etcd: api.Etcd{
EC2Instance: api.EC2Instance{
Count: 1,
InstanceType: "t2.medium",
Tenancy: "default",
RootVolume: api.RootVolume{
Size: 30,
Type: "gp2",
IOPS: 0,
},
},
DataVolume: api.DataVolume{
Size: 30,
Type: "gp2",
IOPS: 0,
Ephemeral: false,
},
Subnets: api.Subnets{
subnet1,
},
UserSuppliedArgs: api.UserSuppliedArgs{
QuotaBackendBytes: api.DefaultQuotaBackendBytes,
},
},
}
actual := c.EtcdSettings
if diff := cmp.Diff(actual, expected); diff != "" {
t.Errorf("EtcdSettings didn't match: %s", diff)
}
}
hasDefaultExperimentalFeatures := func(c *config.Config, t *testing.T) {
expected := api.Experimental{
Admission: api.Admission{
PodSecurityPolicy: api.PodSecurityPolicy{
Enabled: false,
},
AlwaysPullImages: api.AlwaysPullImages{
Enabled: false,
},
DenyEscalatingExec: api.DenyEscalatingExec{
Enabled: false,
},
Priority: api.Priority{
Enabled: false,
},
MutatingAdmissionWebhook: api.MutatingAdmissionWebhook{
Enabled: false,
},
ValidatingAdmissionWebhook: api.ValidatingAdmissionWebhook{
Enabled: false,
},
PersistentVolumeClaimResize: api.PersistentVolumeClaimResize{
Enabled: false,
},
},
AuditLog: api.AuditLog{
Enabled: false,
LogPath: "/var/log/kube-apiserver-audit.log",
MaxAge: 30,
MaxBackup: 1,
MaxSize: 100,
},
Authentication: api.Authentication{
Webhook: api.Webhook{
Enabled: false,
CacheTTL: "5m0s",
Config: "",
},
},
AwsEnvironment: api.AwsEnvironment{
Enabled: false,
},
AwsNodeLabels: api.AwsNodeLabels{
Enabled: false,
},
ClusterAutoscalerSupport: api.ClusterAutoscalerSupport{
Enabled: true,
Options: map[string]string{},
},
TLSBootstrap: api.TLSBootstrap{
Enabled: false,
},
EphemeralImageStorage: api.EphemeralImageStorage{
Enabled: false,
Disk: "xvdb",
Filesystem: "xfs",
},
KIAMSupport: api.KIAMSupport{
Enabled: false,
Image: api.Image{Repo: "quay.io/uswitch/kiam", Tag: "v2.8", RktPullDocker: false},
SessionDuration: "15m",
ServerAddresses: api.KIAMServerAddresses{ServerAddress: "localhost:443", AgentAddress: "kiam-server:443"},
},
Kube2IamSupport: api.Kube2IamSupport{
Enabled: false,
},
GpuSupport: api.GpuSupport{
Enabled: false,
Version: "",
InstallImage: "shelmangroup/coreos-nvidia-driver-installer:latest",
},
LoadBalancer: api.LoadBalancer{
Enabled: false,
},
Oidc: api.Oidc{
Enabled: false,
IssuerUrl: "https://accounts.google.com",
ClientId: "kubernetes",
UsernameClaim: "email",
GroupsClaim: "groups",
},
NodeDrainer: api.NodeDrainer{
Enabled: false,
DrainTimeout: 5,
},
}
actual := c.Experimental
if !reflect.DeepEqual(expected, actual) {
t.Errorf("experimental settings didn't match :\nexpected=%v\nactual=%v", expected, actual)
}
if !c.WaitSignal.Enabled() {
t.Errorf("waitSignal should be enabled but was not: %v", c.WaitSignal)
}
if c.WaitSignal.MaxBatchSize() != 1 {
t.Errorf("waitSignal.maxBatchSize should be 1 but was %d: %v", c.WaitSignal.MaxBatchSize(), c.WaitSignal)
}
if len(c.NodePools) > 0 && c.NodePools[0].ClusterAutoscalerSupport.Enabled {
t.Errorf("ClusterAutoscalerSupport must be disabled by default on node pools")
}
}
everyPublicSubnetHasRouteToIGW := func(c *config.Config, t *testing.T) {
for i, s := range c.PublicSubnets() {
if !s.ManageRouteToInternet() {
t.Errorf("Public subnet %d should have a route to the IGW but it doesn't: %+v", i, s)
}
}
}
hasDefaultLaunchSpecifications := func(c *config.Config, t *testing.T) {
expected := []api.LaunchSpecification{
{
WeightedCapacity: 1,
InstanceType: "c4.large",
SpotPrice: "",
RootVolume: api.NewGp2RootVolume(30),
},
{
WeightedCapacity: 2,
InstanceType: "c4.xlarge",
SpotPrice: "",
RootVolume: api.NewGp2RootVolume(60),
},
}
p := c.NodePools[0]
actual := p.WorkerNodePool.SpotFleet.LaunchSpecifications
if !reflect.DeepEqual(expected, actual) {
t.Errorf(
"LaunchSpecifications didn't match: expected=%v actual=%v",
expected,
actual,
)
}
globalSpotPrice := p.WorkerNodePool.SpotFleet.SpotPrice
if globalSpotPrice != "0.06" {
t.Errorf("Default spot price is expected to be 0.06 but was: %s", globalSpotPrice)
}
}
spotFleetBasedNodePoolHasWaitSignalDisabled := func(c *config.Config, t *testing.T) {
p := c.NodePools[0]
if !p.SpotFleet.Enabled() {
t.Errorf("1st node pool is expected to be a spot fleet based one but was not: %+v", p)
}
if p.WaitSignal.Enabled() {
t.Errorf(
"WaitSignal should be enabled but was not: %v",
p.WaitSignal,
)
}
}
asgBasedNodePoolHasWaitSignalEnabled := func(c *config.Config, t *testing.T) {
p := c.NodePools[0]
if p.SpotFleet.Enabled() {
t.Errorf("1st node pool is expected to be an asg-based one but was not: %+v", p)
}
if !p.WaitSignal.Enabled() {
t.Errorf(
"WaitSignal should be disabled but was not: %v",
p.WaitSignal,
)
}
}
hasDefaultNodePoolRollingStrategy := func(c *config.Config, t *testing.T) {
s := c.NodePools[0].NodePoolRollingStrategy
if s != "Parallel" {
t.Errorf("Default nodePool rolling strategy should be 'Parallel' but is not: %v", s)
}
}
hasSpecificNodePoolRollingStrategy := func(expRollingStrategy string) func(c *config.Config, t *testing.T) {
return func(c *config.Config, t *testing.T) {
actRollingStrategy := c.NodePools[0].NodePoolRollingStrategy
if actRollingStrategy != expRollingStrategy {
t.Errorf("The nodePool Rolling Strategy (%s) does not match with the expected one: %s", actRollingStrategy, expRollingStrategy)
}
}
}
hasWorkerAndNodePoolStrategy := func(expWorkerStrategy, expNodePoolStrategy string) func(c *config.Config, t *testing.T) {
return func(c *config.Config, t *testing.T) {
actWorkerStrategy := c.NodePools[0].NodePoolRollingStrategy
actNodePoolStrategy := c.NodePools[1].NodePoolRollingStrategy
if expWorkerStrategy != actWorkerStrategy {
t.Errorf("The nodePool Rolling Strategy (%s) does not match with the expected one: %s", actWorkerStrategy, expWorkerStrategy)
}
if expNodePoolStrategy != actNodePoolStrategy {
t.Errorf("The nodePool Rolling Strategy (%s) does not match with the expected one: %s", actNodePoolStrategy, expNodePoolStrategy)
}
}
}
hasPrivateSubnetsWithManagedNGWs := func(numExpectedNum int) func(c *config.Config, t *testing.T) {
return func(c *config.Config, t *testing.T) {
for i, s := range c.PrivateSubnets() {
if !s.ManageNATGateway() {
t.Errorf("NAT gateway for the existing private subnet #%d should be created by kube-aws but was not", i)
}
if s.ManageRouteToInternet() {
t.Errorf("Route to IGW shouldn't be created for a private subnet: %v", s)
}
}
}
}
hasSpecificNumOfManagedNGWsWithUnmanagedEIPs := func(ngwExpectedNum int) func(c *config.Config, t *testing.T) {
return func(c *config.Config, t *testing.T) {
ngwActualNum := len(c.NATGateways())
if ngwActualNum != ngwExpectedNum {
t.Errorf("Number of NAT gateways(%d) doesn't match with the expexted one: %d", ngwActualNum, ngwExpectedNum)
}
for i, n := range c.NATGateways() {
if !n.ManageNATGateway() {
t.Errorf("NGW #%d is expected to be managed by kube-aws but was not: %+v", i, n)
}
if n.ManageEIP() {
t.Errorf("EIP for NGW #%d is expected to be unmanaged by kube-aws but was not: %+v", i, n)
}
if !n.ManageRoute() {
t.Errorf("Routes for NGW #%d is expected to be managed by kube-aws but was not: %+v", i, n)
}
}
}
}
hasSpecificNumOfManagedNGWsAndEIPs := func(ngwExpectedNum int) func(c *config.Config, t *testing.T) {
return func(c *config.Config, t *testing.T) {
ngwActualNum := len(c.NATGateways())
if ngwActualNum != ngwExpectedNum {
t.Errorf("Number of NAT gateways(%d) doesn't match with the expexted one: %d", ngwActualNum, ngwExpectedNum)
}
for i, n := range c.NATGateways() {
if !n.ManageNATGateway() {
t.Errorf("NGW #%d is expected to be managed by kube-aws but was not: %+v", i, n)
}
if !n.ManageEIP() {
t.Errorf("EIP for NGW #%d is expected to be managed by kube-aws but was not: %+v", i, n)
}
if !n.ManageRoute() {
t.Errorf("Routes for NGW #%d is expected to be managed by kube-aws but was not: %+v", i, n)
}
}
}
}
hasTwoManagedNGWsAndEIPs := hasSpecificNumOfManagedNGWsAndEIPs(2)
hasNoManagedNGWsButSpecificNumOfRoutesToUnmanagedNGWs := func(ngwExpectedNum int) func(c *config.Config, t *testing.T) {
return func(c *config.Config, t *testing.T) {
ngwActualNum := len(c.NATGateways())
if ngwActualNum != ngwExpectedNum {
t.Errorf("Number of NAT gateways(%d) doesn't match with the expexted one: %d", ngwActualNum, ngwExpectedNum)
}
for i, n := range c.NATGateways() {
if n.ManageNATGateway() {
t.Errorf("NGW #%d is expected to be unmanaged by kube-aws but was not: %+v", i, n)
}
if n.ManageEIP() {
t.Errorf("EIP for NGW #%d is expected to be unmanaged by kube-aws but was not: %+v", i, n)
}
if !n.ManageRoute() {
t.Errorf("Routes for NGW #%d is expected to be managed by kube-aws but was not: %+v", i, n)
}
}
}
}
hasNoNGWsOrEIPsOrRoutes := func(c *config.Config, t *testing.T) {
ngwActualNum := len(c.NATGateways())
ngwExpectedNum := 0
if ngwActualNum != ngwExpectedNum {
t.Errorf("Number of NAT gateways(%d) doesn't match with the expexted one: %d", ngwActualNum, ngwExpectedNum)
}
}
hasDefaultCluster := func(c *root.Cluster, t *testing.T) {
assets, err := c.EnsureAllAssetsGenerated()
if err != nil {
t.Errorf("failed to list assets: %v", err)
t.FailNow()
}
t.Run("Assets/RootStackTemplate", func(t *testing.T) {
cluster := kubeAwsSettings.clusterName
stack := kubeAwsSettings.clusterName
file := "stack.json"
expected := api.Asset{
Content: "",
AssetLocation: api.AssetLocation{
ID: api.NewAssetID(stack, file),
Bucket: s3Bucket,
Key: s3Dir + "/kube-aws/clusters/" + cluster + "/exported/stacks/" + stack + "/" + file,
Path: stack + "/stack.json",
},
}
actual, err := assets.FindAssetByStackAndFileName(stack, file)
if err != nil {
t.Errorf("failed to find asset: %v", err)
}
if expected.ID != actual.ID {
t.Errorf(
"Asset id didn't match: expected=%v actual=%v",
expected.ID,
actual.ID,
)
}
if expected.Key != actual.Key {
t.Errorf(
"Asset key didn't match: expected=%v actual=%v",
expected.Key,
actual.Key,
)
}
})
t.Run("Assets/ControlplaneStackTemplate", func(t *testing.T) {
cluster := kubeAwsSettings.clusterName
stack := "control-plane"
file := "stack.json"
expected := api.Asset{
Content: builtin.String("stack-templates/control-plane.json.tmpl"),
AssetLocation: api.AssetLocation{
ID: api.NewAssetID(stack, file),
Bucket: s3Bucket,
Key: s3Dir + "/kube-aws/clusters/" + cluster + "/exported/stacks/" + stack + "/" + file,
Path: stack + "/stack.json",
},
}
actual, err := assets.FindAssetByStackAndFileName(stack, file)
if err != nil {
t.Errorf("failed to find asset: %v", err)
}
if expected.ID != actual.ID {
t.Errorf(
"Asset id didn't match: expected=%v actual=%v",
expected.ID,
actual.ID,
)
}
if expected.Key != actual.Key {
t.Errorf(
"Asset key didn't match: expected=%v actual=%v",
expected.Key,
actual.Key,
)
}
})
}
mainClusterYaml := kubeAwsSettings.mainClusterYaml()
minimalValidConfigYaml := kubeAwsSettings.minimumValidClusterYamlWithAZ("c")
configYamlWithoutExernalDNSName := kubeAwsSettings.mainClusterYamlWithoutAPIEndpoint() + `
availabilityZone: us-west-1c
`
validCases := []struct {
context string
configYaml string
assertConfig []ConfigTester
assertCluster []ClusterTester
}{
{
context: "WithAddons",
configYaml: minimalValidConfigYaml + `
addons:
rescheduler:
enabled: true
clusterAutoscaler:
enabled: true
options:
v: 5
test: present
metricsServer:
enabled: true
worker:
nodePools:
- name: pool1
`,
assertConfig: []ConfigTester{
hasDefaultEtcdSettings,
asgBasedNodePoolHasWaitSignalEnabled,
func(c *config.Config, t *testing.T) {
expected := api.Addons{
Rescheduler: api.Rescheduler{
Enabled: true,
},
ClusterAutoscaler: api.ClusterAutoscalerSupport{
Enabled: true,
Options: map[string]string{"v": "5", "test": "present"},
},
MetricsServer: api.MetricsServer{
Enabled: true,
},
APIServerAggregator: api.APIServerAggregator{
Enabled: true,
},
}
actual := c.Addons
if !reflect.DeepEqual(expected, actual) {
t.Errorf("addons didn't match : expected=%+v actual=%+v", expected, actual)
}
},
},
assertCluster: []ClusterTester{
hasDefaultCluster,
},
},
{
context: "WithAutoscalingByClusterAutoscaler",
configYaml: minimalValidConfigYaml + `
addons:
clusterAutoscaler:
enabled: true
worker:
nodePools:
- name: pool1
autoscaling:
clusterAutoscaler:
enabled: true
`,
assertConfig: []ConfigTester{
hasDefaultEtcdSettings,
asgBasedNodePoolHasWaitSignalEnabled,
func(c *config.Config, t *testing.T) {
p := c.NodePools[0]
expected := true
actual := p.Autoscaling.ClusterAutoscaler.Enabled
if !reflect.DeepEqual(expected, actual) {
t.Errorf("autoscaling.clusterAutoscaler.enabled didn't match : expected=%v actual=%v", expected, actual)
}
},
},
assertCluster: []ClusterTester{
hasDefaultCluster,
},
},
{
context: "WithAPIEndpointLBAPIAccessAllowedSourceCIDRsSpecified",
configYaml: configYamlWithoutExernalDNSName + `
apiEndpoints:
- name: default
dnsName: k8s.example.com
loadBalancer:
apiAccessAllowedSourceCIDRs:
- 1.2.3.255/32
hostedZone:
id: a1b2c4
`,
assertConfig: []ConfigTester{
func(c *config.Config, t *testing.T) {
l := len(c.APIEndpointConfigs[0].LoadBalancer.APIAccessAllowedSourceCIDRs)
if l != 1 {
t.Errorf("unexpected size of apiEndpoints[0].loadBalancer.apiAccessAllowedSourceCIDRs: %d", l)
t.FailNow()
}
actual := c.APIEndpointConfigs[0].LoadBalancer.APIAccessAllowedSourceCIDRs[0].String()
expected := "1.2.3.255/32"
if actual != expected {
t.Errorf("unexpected cidr in apiEndpoints[0].loadBalancer.apiAccessAllowedSourceCIDRs[0]. expected = %s, actual = %s", expected, actual)
}
},
},
},
{
context: "WithAPIEndpointLBAPIAccessAllowedSourceCIDRsOmitted",
configYaml: configYamlWithoutExernalDNSName + `
apiEndpoints:
- name: default
dnsName: k8s.example.com
loadBalancer:
hostedZone:
id: a1b2c4
`,
assertConfig: []ConfigTester{
func(c *config.Config, t *testing.T) {
l := len(c.APIEndpointConfigs[0].LoadBalancer.APIAccessAllowedSourceCIDRs)
if l != 1 {
t.Errorf("unexpected size of apiEndpoints[0].loadBalancer.apiAccessAllowedSourceCIDRs: %d", l)
t.FailNow()
}
actual := c.APIEndpointConfigs[0].LoadBalancer.APIAccessAllowedSourceCIDRs[0].String()
expected := "0.0.0.0/0"
if actual != expected {
t.Errorf("unexpected cidr in apiEndpoints[0].loadBalancer.apiAccessAllowedSourceCIDRs[0]. expected = %s, actual = %s", expected, actual)
}
},
},
},
{
context: "WithKubeProxyIPVSModeDisabledByDefault",
configYaml: minimalValidConfigYaml,
assertConfig: []ConfigTester{
func(c *config.Config, t *testing.T) {
if c.KubeProxy.IPVSMode.Enabled != false {
t.Errorf("kube-proxy IPVS mode must be disabled by default")
}
expectedScheduler := "rr"
if c.KubeProxy.IPVSMode.Scheduler != expectedScheduler {
t.Errorf("IPVS scheduler should be by default set to: %s (actual = %s)", expectedScheduler, c.KubeProxy.IPVSMode.Scheduler)
}
expectedSyncPeriod := "60s"
if c.KubeProxy.IPVSMode.SyncPeriod != expectedSyncPeriod {
t.Errorf("Sync period should be by default set to: %s (actual = %s)", expectedSyncPeriod, c.KubeProxy.IPVSMode.SyncPeriod)
}
expectedMinSyncPeriod := "10s"
if c.KubeProxy.IPVSMode.MinSyncPeriod != expectedMinSyncPeriod {
t.Errorf("Minimal sync period should be by default set to: %s (actual = %s)", expectedMinSyncPeriod, c.KubeProxy.IPVSMode.MinSyncPeriod)
}
},
},
},
{
context: "WithKubeProxyIPVSModeEnabled",
configYaml: minimalValidConfigYaml + `
kubeProxy:
ipvsMode:
enabled: true
scheduler: lc
syncPeriod: 90s
minSyncPeriod: 15s
`,
assertConfig: []ConfigTester{
func(c *config.Config, t *testing.T) {
if c.KubeProxy.IPVSMode.Enabled != true {
t.Errorf("kube-proxy IPVS mode must be enabled")
}
expectedScheduler := "lc"
if c.KubeProxy.IPVSMode.Scheduler != expectedScheduler {
t.Errorf("IPVS scheduler should be set to: %s (actual = %s)", expectedScheduler, c.KubeProxy.IPVSMode.Scheduler)
}
expectedSyncPeriod := "90s"
if c.KubeProxy.IPVSMode.SyncPeriod != expectedSyncPeriod {
t.Errorf("Sync period should be set to: %s (actual = %s)", expectedSyncPeriod, c.KubeProxy.IPVSMode.SyncPeriod)
}
expectedMinSyncPeriod := "15s"
if c.KubeProxy.IPVSMode.MinSyncPeriod != expectedMinSyncPeriod {
t.Errorf("Minimal sync period should be set to: %s (actual = %s)", expectedMinSyncPeriod, c.KubeProxy.IPVSMode.MinSyncPeriod)
}
},
},
},
{
// See https://github.com/kubernetes-incubator/kube-aws/issues/365
context: "WithClusterNameContainsHyphens",
configYaml: kubeAwsSettings.withClusterName("my-cluster").minimumValidClusterYaml(),
},
{
context: "WithCustomSettings",
configYaml: minimalValidConfigYaml + `
customSettings:
stack-type: control-plane
worker:
nodePools:
- name: pool1
customSettings:
stack-type: node-pool
`,
assertConfig: []ConfigTester{
hasDefaultEtcdSettings,
asgBasedNodePoolHasWaitSignalEnabled,
func(c *config.Config, t *testing.T) {
p := c.NodePools[0]
{
expected := map[string]interface{}{
"stack-type": "control-plane",
}
actual := c.CustomSettings
if !reflect.DeepEqual(expected, actual) {
t.Errorf("customSettings didn't match : expected=%v actual=%v", expected, actual)
}
}
{
expected := map[string]interface{}{
"stack-type": "node-pool",
}
actual := p.CustomSettings
if !reflect.DeepEqual(expected, actual) {
t.Errorf("customSettings didn't match : expected=%v actual=%v", expected, actual)
}
}
},
},
assertCluster: []ClusterTester{
hasDefaultCluster,
},
},
{
context: "WithDifferentReleaseChannels",
configYaml: minimalValidConfigYaml + `
releaseChannel: stable
worker:
nodePools:
- name: pool1
releaseChannel: alpha
`,
assertConfig: []ConfigTester{
hasDefaultEtcdSettings,
asgBasedNodePoolHasWaitSignalEnabled,
},
assertCluster: []ClusterTester{
func(c *root.Cluster, t *testing.T) {
cp := c.ControlPlane().Config.AMI
np := c.NodePools()[0].NodePoolConfig.AMI
if cp == "" {
t.Error("the default AMI ID should not be empty but it was")
}
if np == "" {
t.Error("the AMI ID for the node pool should not be empty but it was")
}
if cp == np {
t.Errorf("the default AMI ID and the AMI ID for the node pool should not match but they did: default=%s, nodepool=%s", cp, np)
}
},
},
},
{
context: "WithElasticFileSystemId",
configYaml: minimalValidConfigYaml + `
elasticFileSystemId: efs-12345
worker:
nodePools:
- name: pool1
`,
assertConfig: []ConfigTester{
func(c *config.Config, t *testing.T) {
if c.NodePools[0].ElasticFileSystemID != "efs-12345" {
t.Errorf("The value of worker.nodePools[0].elasticFileSystemId should match the one for the top-leve elasticFileSystemId, but it wan't: worker.nodePools[0].elasticFileSystemId=%s", c.NodePools[0].ElasticFileSystemID)
}
},
},
},
{
context: "WithElasticFileSystemIdInSpecificNodePool",
configYaml: mainClusterYaml + `
subnets:
- name: existing1
id: subnet-12345
availabilityZone: us-west-1a
worker:
nodePools:
- name: pool1
subnets:
- name: existing1
elasticFileSystemId: efs-12345
- name: pool2
`,
assertConfig: []ConfigTester{
func(c *config.Config, t *testing.T) {
if c.NodePools[0].ElasticFileSystemID != "efs-12345" {
t.Errorf("Unexpected worker.nodePools[0].elasticFileSystemId: %s", c.NodePools[0].ElasticFileSystemID)
}
if c.NodePools[1].ElasticFileSystemID != "" {
t.Errorf("Unexpected worker.nodePools[1].elasticFileSystemId: %s", c.NodePools[1].ElasticFileSystemID)
}
},
},
},
{
context: "WithEtcdDataVolumeEncrypted",
configYaml: minimalValidConfigYaml + `
etcd:
dataVolume:
encrypted: true
`,
assertConfig: []ConfigTester{
func(c *config.Config, t *testing.T) {
if !c.Etcd.DataVolume.Encrypted {
t.Errorf("Etcd data volume should be encrypted but was not: %v", c.Etcd)
}
},
},
},
{
context: "WithEtcdDataVolumeEncryptedKMSKeyARN",
configYaml: minimalValidConfigYaml + `
etcd:
dataVolume:
encrypted: true
kmsKeyArn: arn:aws:kms:eu-west-1:XXX:key/XXX
`,
assertConfig: []ConfigTester{
func(c *config.Config, t *testing.T) {
expected := "arn:aws:kms:eu-west-1:XXX:key/XXX"
if c.Etcd.KMSKeyARN() != expected {
t.Errorf("Etcd data volume KMS Key ARN didn't match : expected=%v actual=%v", expected, c.Etcd.KMSKeyARN())
}
if !c.Etcd.DataVolume.Encrypted {
t.Error("Etcd data volume should be encrypted but was not")
}
},
},
},
{
context: "WithEtcdMemberIdentityProviderEIP",
configYaml: minimalValidConfigYaml + `
etcd:
memberIdentityProvider: eip
`,
assertConfig: []ConfigTester{
func(c *config.Config, t *testing.T) {
subnet1 := api.NewPublicSubnet(firstAz, "10.0.0.0/24")
subnet1.Name = "Subnet0"
expected := api.EtcdSettings{
Etcd: api.Etcd{
Cluster: api.EtcdCluster{
MemberIdentityProvider: "eip",
},
EC2Instance: api.EC2Instance{
Count: 1,
InstanceType: "t2.medium",
Tenancy: "default",
RootVolume: api.RootVolume{
Size: 30,
Type: "gp2",
IOPS: 0,
},
},
DataVolume: api.DataVolume{
Size: 30,
Type: "gp2",
IOPS: 0,
Ephemeral: false,
},
Subnets: api.Subnets{
subnet1,
},
UserSuppliedArgs: api.UserSuppliedArgs{
QuotaBackendBytes: api.DefaultQuotaBackendBytes,
},
},
}
actual := c.EtcdSettings
if diff := cmp.Diff(actual, expected); diff != "" {
t.Errorf("EtcdSettings didn't match: %s", diff)
}
if !actual.NodeShouldHaveEIP() {
t.Errorf(
"NodeShouldHaveEIP returned unexpected value: %v",
actual.NodeShouldHaveEIP(),
)
}
},
},
assertCluster: []ClusterTester{
hasDefaultCluster,
},
},
{
context: "WithEtcdMemberIdentityProviderENI",
configYaml: minimalValidConfigYaml + `
etcd:
memberIdentityProvider: eni
`,
assertConfig: []ConfigTester{
func(c *config.Config, t *testing.T) {
subnet1 := api.NewPublicSubnet(firstAz, "10.0.0.0/24")
subnet1.Name = "Subnet0"
expected := api.EtcdSettings{
Etcd: api.Etcd{
EC2Instance: api.EC2Instance{
Count: 1,
InstanceType: "t2.medium",
RootVolume: api.RootVolume{
Size: 30,
Type: "gp2",
IOPS: 0,
},
Tenancy: "default",
},
DataVolume: api.DataVolume{
Size: 30,
Type: "gp2",
IOPS: 0,
Ephemeral: false,
},
Cluster: api.EtcdCluster{
MemberIdentityProvider: "eni",
},
Subnets: api.Subnets{
subnet1,
},
UserSuppliedArgs: api.UserSuppliedArgs{
QuotaBackendBytes: api.DefaultQuotaBackendBytes,
},
},
}
actual := c.EtcdSettings
if diff := cmp.Diff(actual, expected); diff != "" {
t.Errorf("EtcdSettings didn't match: %s", diff)
}
if !actual.NodeShouldHaveSecondaryENI() {
t.Errorf(
"NodeShouldHaveSecondaryENI returned unexpected value: %v",
actual.NodeShouldHaveSecondaryENI(),
)
}
},
},
assertCluster: []ClusterTester{
hasDefaultCluster,
},
},
{
context: "WithEtcdMemberIdentityProviderENIWithCustomDomain",
configYaml: minimalValidConfigYaml + `
etcd:
memberIdentityProvider: eni
internalDomainName: internal.example.com
`,
assertConfig: []ConfigTester{
func(c *config.Config, t *testing.T) {
subnet1 := api.NewPublicSubnet(firstAz, "10.0.0.0/24")
subnet1.Name = "Subnet0"
expected := api.EtcdSettings{
Etcd: api.Etcd{
Cluster: api.EtcdCluster{
MemberIdentityProvider: "eni",
InternalDomainName: "internal.example.com",
},
EC2Instance: api.EC2Instance{
Count: 1,
InstanceType: "t2.medium",
RootVolume: api.RootVolume{
Size: 30,
Type: "gp2",
IOPS: 0,
},
Tenancy: "default",
},
DataVolume: api.DataVolume{
Size: 30,
Type: "gp2",
IOPS: 0,
Ephemeral: false,
},
Subnets: api.Subnets{
subnet1,
},
UserSuppliedArgs: api.UserSuppliedArgs{
QuotaBackendBytes: api.DefaultQuotaBackendBytes,
},
},
}
actual := c.EtcdSettings
if diff := cmp.Diff(actual, expected); diff != "" {
t.Errorf("EtcdSettings didn't match: %s", diff)
}
if !actual.NodeShouldHaveSecondaryENI() {
t.Errorf(
"NodeShouldHaveSecondaryENI returned unexpected value: %v",
actual.NodeShouldHaveSecondaryENI(),
)
}
},
},
assertCluster: []ClusterTester{
hasDefaultCluster,
},
},
{
context: "WithEtcdMemberIdentityProviderENIWithCustomFQDNs",
configYaml: minimalValidConfigYaml + `
etcd:
memberIdentityProvider: eni
internalDomainName: internal.example.com
nodes:
- fqdn: etcd1a.internal.example.com
- fqdn: etcd1b.internal.example.com
- fqdn: etcd1c.internal.example.com
`,
assertConfig: []ConfigTester{
func(c *config.Config, t *testing.T) {
subnet1 := api.NewPublicSubnet(firstAz, "10.0.0.0/24")
subnet1.Name = "Subnet0"
expected := api.EtcdSettings{
Etcd: api.Etcd{
Cluster: api.EtcdCluster{
MemberIdentityProvider: "eni",
InternalDomainName: "internal.example.com",
},
EC2Instance: api.EC2Instance{
Count: 1,
InstanceType: "t2.medium",
RootVolume: api.RootVolume{
Size: 30,
Type: "gp2",
IOPS: 0,
},
Tenancy: "default",
},
DataVolume: api.DataVolume{
Size: 30,
Type: "gp2",
IOPS: 0,
Ephemeral: false,
},
Nodes: []api.EtcdNode{
api.EtcdNode{
FQDN: "etcd1a.internal.example.com",
},
api.EtcdNode{
FQDN: "etcd1b.internal.example.com",
},
api.EtcdNode{
FQDN: "etcd1c.internal.example.com",
},
},
Subnets: api.Subnets{
subnet1,
},
UserSuppliedArgs: api.UserSuppliedArgs{
QuotaBackendBytes: api.DefaultQuotaBackendBytes,
},
},
}
actual := c.EtcdSettings
if diff := cmp.Diff(actual, expected); diff != "" {
t.Errorf("EtcdSettings didn't match: %s", diff)
}
if !actual.NodeShouldHaveSecondaryENI() {
t.Errorf(
"NodeShouldHaveSecondaryENI returned unexpected value: %v",
actual.NodeShouldHaveSecondaryENI(),
)
}
},
},
assertCluster: []ClusterTester{
hasDefaultCluster,
},
},
{
context: "WithEtcdMemberIdentityProviderENIWithCustomNames",
configYaml: minimalValidConfigYaml + `
etcd:
memberIdentityProvider: eni
internalDomainName: internal.example.com
nodes:
- name: etcd1a
- name: etcd1b
- name: etcd1c
`,
assertConfig: []ConfigTester{
func(c *config.Config, t *testing.T) {
subnet1 := api.NewPublicSubnet(firstAz, "10.0.0.0/24")
subnet1.Name = "Subnet0"
expected := api.EtcdSettings{
Etcd: api.Etcd{
Cluster: api.EtcdCluster{
MemberIdentityProvider: "eni",
InternalDomainName: "internal.example.com",
},
EC2Instance: api.EC2Instance{
Count: 1,
InstanceType: "t2.medium",
RootVolume: api.RootVolume{
Size: 30,
Type: "gp2",
IOPS: 0,
},
Tenancy: "default",
},
DataVolume: api.DataVolume{
Size: 30,
Type: "gp2",
IOPS: 0,
Ephemeral: false,
},
Nodes: []api.EtcdNode{
api.EtcdNode{
Name: "etcd1a",
},
api.EtcdNode{
Name: "etcd1b",
},
api.EtcdNode{
Name: "etcd1c",
},
},
Subnets: api.Subnets{
subnet1,
},
UserSuppliedArgs: api.UserSuppliedArgs{
QuotaBackendBytes: api.DefaultQuotaBackendBytes,
},
},
}
actual := c.EtcdSettings
if diff := cmp.Diff(actual, expected); diff != "" {
t.Errorf("EtcdSettings didn't match: %s", diff)
}
if !actual.NodeShouldHaveSecondaryENI() {
t.Errorf(
"NodeShouldHaveSecondaryENI returned unexpected value: %v",
actual.NodeShouldHaveSecondaryENI(),
)
}
},
},
assertCluster: []ClusterTester{
hasDefaultCluster,
},
},
{
context: "WithEtcdMemberIdentityProviderENIWithoutRecordSets",
configYaml: minimalValidConfigYaml + `
etcd:
memberIdentityProvider: eni
internalDomainName: internal.example.com
manageRecordSets: false
nodes:
- name: etcd1a
- name: etcd1b
- name: etcd1c
`,
assertConfig: []ConfigTester{
func(c *config.Config, t *testing.T) {
subnet1 := api.NewPublicSubnet(firstAz, "10.0.0.0/24")
subnet1.Name = "Subnet0"
manageRecordSets := false
expected := api.EtcdSettings{
Etcd: api.Etcd{
Cluster: api.EtcdCluster{
ManageRecordSets: &manageRecordSets,
MemberIdentityProvider: "eni",
InternalDomainName: "internal.example.com",
},
EC2Instance: api.EC2Instance{
Count: 1,
InstanceType: "t2.medium",
RootVolume: api.RootVolume{
Size: 30,
Type: "gp2",
IOPS: 0,
},
Tenancy: "default",
},
DataVolume: api.DataVolume{
Size: 30,
Type: "gp2",
IOPS: 0,
Ephemeral: false,
},
Nodes: []api.EtcdNode{
api.EtcdNode{
Name: "etcd1a",
},
api.EtcdNode{
Name: "etcd1b",
},
api.EtcdNode{
Name: "etcd1c",
},
},
Subnets: api.Subnets{
subnet1,
},
UserSuppliedArgs: api.UserSuppliedArgs{
QuotaBackendBytes: api.DefaultQuotaBackendBytes,
},
},
}
actual := c.EtcdSettings
if diff := cmp.Diff(actual, expected); diff != "" {
t.Errorf("EtcdSettings didn't match: %s", diff)
}
if !actual.NodeShouldHaveSecondaryENI() {
t.Errorf(
"NodeShouldHaveSecondaryENI returned unexpected value: %v",
actual.NodeShouldHaveSecondaryENI(),
)
}
},
},
assertCluster: []ClusterTester{
hasDefaultCluster,
},
},
{
context: "WithEtcdMemberIdentityProviderENIWithHostedZoneID",
configYaml: minimalValidConfigYaml + `
etcd:
memberIdentityProvider: eni
internalDomainName: internal.example.com
hostedZone:
id: hostedzone-abcdefg
nodes:
- name: etcd1a
- name: etcd1b
- name: etcd1c
`,
assertConfig: []ConfigTester{
func(c *config.Config, t *testing.T) {
subnet1 := api.NewPublicSubnet(firstAz, "10.0.0.0/24")
subnet1.Name = "Subnet0"
expected := api.EtcdSettings{
Etcd: api.Etcd{
Cluster: api.EtcdCluster{
HostedZone: api.Identifier{ID: "hostedzone-abcdefg"},
MemberIdentityProvider: "eni",
InternalDomainName: "internal.example.com",
},
EC2Instance: api.EC2Instance{
Count: 1,
InstanceType: "t2.medium",
RootVolume: api.RootVolume{
Size: 30,
Type: "gp2",
IOPS: 0,
},
Tenancy: "default",
},
DataVolume: api.DataVolume{
Size: 30,
Type: "gp2",
IOPS: 0,
Ephemeral: false,
},
Nodes: []api.EtcdNode{
api.EtcdNode{
Name: "etcd1a",
},
api.EtcdNode{
Name: "etcd1b",
},
api.EtcdNode{
Name: "etcd1c",
},
},
Subnets: api.Subnets{
subnet1,
},
UserSuppliedArgs: api.UserSuppliedArgs{
QuotaBackendBytes: api.DefaultQuotaBackendBytes,
},
},
}
actual := c.EtcdSettings
if diff := cmp.Diff(actual, expected); diff != "" {
t.Errorf("EtcdSettings didn't match: %s", diff)
}
if !actual.NodeShouldHaveSecondaryENI() {
t.Errorf(
"NodeShouldHaveSecondaryENI returned unexpected value: %v",
actual.NodeShouldHaveSecondaryENI(),
)
}
},
},
assertCluster: []ClusterTester{
hasDefaultCluster,
},
},
{
context: "WithExperimentalFeatures",
configYaml: minimalValidConfigYaml + `
experimental:
admission:
podSecurityPolicy:
enabled: true
denyEscalatingExec:
enabled: true
alwaysPullImages:
enabled: true
priority:
enabled: true
mutatingAdmissionWebhook:
enabled: true
validatingAdmissionWebhook:
enabled: true
persistentVolumeClaimResize:
enabled: true
auditLog:
enabled: true
logPath: "/var/log/audit.log"
maxAge: 100
maxBackup: 10
maxSize: 5
authentication:
webhook:
enabled: true
cacheTTL: "1234s"
configBase64: "e30k"
awsEnvironment:
enabled: true
environment:
CFNSTACK: '{ "Ref" : "AWS::StackId" }'
awsNodeLabels:
enabled: true
tlsBootstrap:
enabled: true
ephemeralImageStorage:
enabled: true
kiamSupport:
enabled: false
kube2IamSupport:
enabled: true
gpuSupport:
enabled: true
version: "375.66"
installImage: "shelmangroup/coreos-nvidia-driver-installer:latest"
kubeletOpts: '--image-gc-low-threshold 60 --image-gc-high-threshold 70'
loadBalancer:
enabled: true
names:
- manuallymanagedlb
securityGroupIds:
- sg-12345678
targetGroup:
enabled: true
arns:
- arn:aws:elasticloadbalancing:eu-west-1:xxxxxxxxxxxx:targetgroup/manuallymanagedetg/xxxxxxxxxxxxxxxx
securityGroupIds:
- sg-12345678
oidc:
enabled: true
oidc-issuer-url: "https://accounts.google.com"
oidc-client-id: "kubernetes"
oidc-username-claim: "email"
oidc-groups-claim: "groups"
nodeDrainer:
enabled: true
drainTimeout: 3
cloudWatchLogging:
enabled: true
amazonSsmAgent:
enabled: true
worker:
nodePools:
- name: pool1
`,
assertConfig: []ConfigTester{
hasDefaultEtcdSettings,
asgBasedNodePoolHasWaitSignalEnabled,
func(c *config.Config, t *testing.T) {
expected := api.Experimental{
Admission: api.Admission{
PodSecurityPolicy: api.PodSecurityPolicy{
Enabled: true,
},
AlwaysPullImages: api.AlwaysPullImages{
Enabled: true,
},
DenyEscalatingExec: api.DenyEscalatingExec{
Enabled: true,
},
Priority: api.Priority{
Enabled: true,
},
MutatingAdmissionWebhook: api.MutatingAdmissionWebhook{
Enabled: true,
},
ValidatingAdmissionWebhook: api.ValidatingAdmissionWebhook{
Enabled: true,
},
PersistentVolumeClaimResize: api.PersistentVolumeClaimResize{
Enabled: true,
},
},
AuditLog: api.AuditLog{
Enabled: true,
LogPath: "/var/log/audit.log",
MaxAge: 100,
MaxBackup: 10,
MaxSize: 5,
},
Authentication: api.Authentication{
Webhook: api.Webhook{
Enabled: true,
CacheTTL: "1234s",
Config: "e30k",
},
},
AwsEnvironment: api.AwsEnvironment{
Enabled: true,
Environment: map[string]string{
"CFNSTACK": `{ "Ref" : "AWS::StackId" }`,
},
},
AwsNodeLabels: api.AwsNodeLabels{
Enabled: true,
},
ClusterAutoscalerSupport: api.ClusterAutoscalerSupport{
Enabled: true,
Options: map[string]string{},
},
TLSBootstrap: api.TLSBootstrap{
Enabled: true,
},
EphemeralImageStorage: api.EphemeralImageStorage{
Enabled: true,
Disk: "xvdb",
Filesystem: "xfs",
},
KIAMSupport: api.KIAMSupport{
Enabled: false,
Image: api.Image{Repo: "quay.io/uswitch/kiam", Tag: "v2.8", RktPullDocker: false},
SessionDuration: "15m",
ServerAddresses: api.KIAMServerAddresses{ServerAddress: "localhost:443", AgentAddress: "kiam-server:443"},
},
Kube2IamSupport: api.Kube2IamSupport{
Enabled: true,
},
GpuSupport: api.GpuSupport{
Enabled: true,
Version: "375.66",
InstallImage: "shelmangroup/coreos-nvidia-driver-installer:latest",
},
KubeletOpts: "--image-gc-low-threshold 60 --image-gc-high-threshold 70",
LoadBalancer: api.LoadBalancer{
Enabled: true,
Names: []string{"manuallymanagedlb"},
SecurityGroupIds: []string{"sg-12345678"},
},
TargetGroup: api.TargetGroup{
Enabled: true,
Arns: []string{"arn:aws:elasticloadbalancing:eu-west-1:xxxxxxxxxxxx:targetgroup/manuallymanagedetg/xxxxxxxxxxxxxxxx"},
SecurityGroupIds: []string{"sg-12345678"},
},
Oidc: api.Oidc{
Enabled: true,
IssuerUrl: "https://accounts.google.com",
ClientId: "kubernetes",
UsernameClaim: "email",
GroupsClaim: "groups",
},
NodeDrainer: api.NodeDrainer{
Enabled: true,
DrainTimeout: 3,
},
}
actual := c.Experimental
if !reflect.DeepEqual(expected, actual) {
t.Errorf("experimental settings didn't match : expected=%+v actual=%+v", expected, actual)
}
p := c.NodePools[0]
if reflect.DeepEqual(expected, p.Experimental) {
t.Errorf("experimental settings shouldn't be inherited to a node pool but it did : toplevel=%v nodepool=%v", expected, p.Experimental)
}
},
},
assertCluster: []ClusterTester{
hasDefaultCluster,
func(c *root.Cluster, t *testing.T) {
cp := c.ControlPlane()
controllerUserdataS3Part := cp.UserData["Controller"].Parts[api.USERDATA_S3].Asset.Content
if !strings.Contains(controllerUserdataS3Part, `--feature-gates=PodPriority=true`) {
t.Error("missing controller feature gate: PodPriority=true")
}
if !strings.Contains(controllerUserdataS3Part, `scheduling.k8s.io/v1alpha1=true`) {
t.Error("missing controller runtime config: scheduling.k8s.io/v1alpha1=true")
}
re, _ := regexp.Compile("--enable-admission-plugins=[a-zA-z,]*,Priority")
if len(re.FindString(controllerUserdataS3Part)) == 0 {
t.Error("missing controller --enable-admission-plugins config: Priority")
}
},
},
},
{
context: "WithExperimentalFeaturesForWorkerNodePool",
configYaml: minimalValidConfigYaml + `
addons:
clusterAutoscaler:
enabled: true
worker:
nodePools:
- name: pool1
admission:
podSecurityPolicy:
enabled: true
auditLog:
enabled: true
maxage: 100
logpath: "/var/log/audit.log"
awsEnvironment:
enabled: true
environment:
CFNSTACK: '{ "Ref" : "AWS::StackId" }'
awsNodeLabels:
enabled: true
clusterAutoscalerSupport:
enabled: true
tlsBootstrap:
enabled: true # Must be ignored, value is synced with the one from control plane
ephemeralImageStorage:
enabled: true
kube2IamSupport:
enabled: true
loadBalancer:
enabled: true
names:
- manuallymanagedlb
securityGroupIds:
- sg-12345678
targetGroup:
enabled: true
arns:
- arn:aws:elasticloadbalancing:eu-west-1:xxxxxxxxxxxx:targetgroup/manuallymanagedetg/xxxxxxxxxxxxxxxx
securityGroupIds:
- sg-12345678
# Ignored, uses global setting
nodeDrainer:
enabled: true
drainTimeout: 5
nodeLabels:
kube-aws.coreos.com/role: worker
taints:
- key: reservation
value: spot
effect: NoSchedule
`,
assertConfig: []ConfigTester{
hasDefaultEtcdSettings,
asgBasedNodePoolHasWaitSignalEnabled,
func(c *config.Config, t *testing.T) {
expected := api.Experimental{
AwsEnvironment: api.AwsEnvironment{
Enabled: true,
Environment: map[string]string{
"CFNSTACK": `{ "Ref" : "AWS::StackId" }`,
},
},
AwsNodeLabels: api.AwsNodeLabels{
Enabled: true,
},
ClusterAutoscalerSupport: api.ClusterAutoscalerSupport{
Enabled: true,
Options: map[string]string{},
},
TLSBootstrap: api.TLSBootstrap{
Enabled: false,
},
EphemeralImageStorage: api.EphemeralImageStorage{
Enabled: true,
Disk: "xvdb",
Filesystem: "xfs",
},
Kube2IamSupport: api.Kube2IamSupport{
Enabled: true,
},
LoadBalancer: api.LoadBalancer{
Enabled: true,
Names: []string{"manuallymanagedlb"},
SecurityGroupIds: []string{"sg-12345678"},
},
TargetGroup: api.TargetGroup{
Enabled: true,
Arns: []string{"arn:aws:elasticloadbalancing:eu-west-1:xxxxxxxxxxxx:targetgroup/manuallymanagedetg/xxxxxxxxxxxxxxxx"},
SecurityGroupIds: []string{"sg-12345678"},
},
NodeDrainer: api.NodeDrainer{
Enabled: false,
DrainTimeout: 0,
},
}
p := c.NodePools[0]
if reflect.DeepEqual(expected, p.Experimental) {
t.Errorf("experimental settings for node pool didn't match : expected=%v actual=%v", expected, p.Experimental)
}
expectedNodeLabels := api.NodeLabels{
"kube-aws.coreos.com/cluster-autoscaler-supported": "true",
"kube-aws.coreos.com/role": "worker",
}
actualNodeLabels := c.NodePools[0].NodeLabels()
if !reflect.DeepEqual(expectedNodeLabels, actualNodeLabels) {
t.Errorf("worker node labels didn't match: expected=%v, actual=%v", expectedNodeLabels, actualNodeLabels)
}
expectedTaints := api.Taints{
{Key: "reservation", Value: "spot", Effect: "NoSchedule"},
}
actualTaints := c.NodePools[0].Taints
if !reflect.DeepEqual(expectedTaints, actualTaints) {
t.Errorf("worker node taints didn't match: expected=%v, actual=%v", expectedTaints, actualTaints)
}
},
},
},
{
context: "WithExperimentalFeatureKiam",
configYaml: minimalValidConfigYaml + `
experimental:
kiamSupport:
enabled: true
image:
repo: quay.io/uswitch/kiam
tag: v2.6
sessionDuration: 30m
serverAddresses:
serverAddress: localhost
agentAddress: kiam-server
worker:
nodePools:
- name: pool1
`,
assertConfig: []ConfigTester{
func(c *config.Config, t *testing.T) {
expected := api.KIAMSupport{
Enabled: true,
Image: api.Image{Repo: "quay.io/uswitch/kiam", Tag: "v2.6", RktPullDocker: false},
SessionDuration: "30m",
ServerAddresses: api.KIAMServerAddresses{ServerAddress: "localhost", AgentAddress: "kiam-server"},
}
actual := c.Experimental
if !reflect.DeepEqual(expected, actual.KIAMSupport) {
t.Errorf("experimental settings didn't match : expected=%+v actual=%+v", expected, actual)
}
p := c.NodePools[0]
if reflect.DeepEqual(expected, p.Experimental.KIAMSupport) {
t.Errorf("experimental settings shouldn't be inherited to a node pool but it did : toplevel=%v nodepool=%v", expected, p.Experimental)
}
},
},
},
{
context: "WithExperimentalFeatureKiamForWorkerNodePool",
configYaml: minimalValidConfigYaml + `
worker:
nodePools:
- name: pool1
kiamSupport:
enabled: true
`,
assertConfig: []ConfigTester{
func(c *config.Config, t *testing.T) {
expected := api.Experimental{
KIAMSupport: api.KIAMSupport{
Enabled: true,
Image: api.Image{Repo: "quay.io/uswitch/kiam", Tag: "v2.8", RktPullDocker: false},
SessionDuration: "15m",
ServerAddresses: api.KIAMServerAddresses{ServerAddress: "localhost:443", AgentAddress: "kiam-server:443"},
},
}
p := c.NodePools[0]
if reflect.DeepEqual(expected, p.Experimental) {
t.Errorf("experimental settings for node pool didn't match : expected=%v actual=%v", expected, p.Experimental)
}
},
},
},
{
context: "WithKube2IamSupport",
configYaml: minimalValidConfigYaml + `
controller:
iam:
role:
name: myrole1
experimental:
kube2IamSupport:
enabled: true
worker:
nodePools:
- name: pool1
iam:
role:
name: myrole2
kube2IamSupport:
enabled: true
`,
assertConfig: []ConfigTester{
hasDefaultEtcdSettings,
asgBasedNodePoolHasWaitSignalEnabled,
func(c *config.Config, t *testing.T) {
expectedControllerRoleName := "myrole1"
expectedWorkerRoleName := "myrole2"
if expectedControllerRoleName != c.Controller.IAMConfig.Role.Name {
t.Errorf("controller's iam.role.name didn't match : expected=%v actual=%v", expectedControllerRoleName, c.Controller.IAMConfig.Role.Name)
}
if !c.Experimental.Kube2IamSupport.Enabled {
t.Errorf("controller's experimental.kube2IamSupport should be enabled but was not: %+v", c.Experimental)
}
p := c.NodePools[0]
if expectedWorkerRoleName != p.IAMConfig.Role.Name {
t.Errorf("worker node pool's iam.role.name didn't match : expected=%v actual=%v", expectedWorkerRoleName, p.IAMConfig.Role.Name)
}
if !p.Kube2IamSupport.Enabled {
t.Errorf("worker node pool's kube2IamSupport should be enabled but was not: %+v", p.Experimental)
}
},
},
},
{
context: "WithControllerIAMDefaultManageExternally",
configYaml: minimalValidConfigYaml,
assertConfig: []ConfigTester{
func(c *config.Config, t *testing.T) {
expectedValue := false
if c.Controller.IAMConfig.Role.ManageExternally != expectedValue {
t.Errorf("controller's iam.role.manageExternally didn't match : expected=%v actual=%v", expectedValue, c.Controller.IAMConfig.Role.ManageExternally)
}
},
},
},
{
context: "WithControllerIAMEnabledManageExternally",
configYaml: minimalValidConfigYaml + `
controller:
iam:
role:
name: myrole1
manageExternally: true
`,
assertConfig: []ConfigTester{
func(c *config.Config, t *testing.T) {
expectedManageExternally := true
expectedRoleName := "myrole1"
if expectedRoleName != c.Controller.IAMConfig.Role.Name {
t.Errorf("controller's iam.role.name didn't match : expected=%v actual=%v", expectedRoleName, c.Controller.IAMConfig.Role.Name)
}
if expectedManageExternally != c.Controller.IAMConfig.Role.ManageExternally {
t.Errorf("controller's iam.role.manageExternally didn't matchg : expected=%v actual=%v", expectedManageExternally, c.Controller.IAMConfig.Role.ManageExternally)
}
},
},
},
{
context: "WithControllerIAMEnabledStrictName",
configYaml: minimalValidConfigYaml + `
controller:
iam:
role:
name: myrole1
strictName: true
`,
assertConfig: []ConfigTester{
func(c *config.Config, t *testing.T) {
expectedRoleName := "myrole1"
if expectedRoleName != c.Controller.IAMConfig.Role.Name {
t.Errorf("controller's iam.role.name didn't match : expected=%v actual=%v", expectedRoleName, c.Controller.IAMConfig.Role.Name)
}
},
},
},
{
context: "WithWaitSignalDisabled",
configYaml: minimalValidConfigYaml + `
waitSignal:
enabled: false
`,
assertConfig: []ConfigTester{
hasDefaultEtcdSettings,
func(c *config.Config, t *testing.T) {
if c.WaitSignal.Enabled() {
t.Errorf("waitSignal should be disabled but was not: %v", c.WaitSignal)
}
},
},
},
{
context: "WithWaitSignalEnabled",
configYaml: minimalValidConfigYaml + `
waitSignal:
enabled: true
`,
assertConfig: []ConfigTester{
hasDefaultEtcdSettings,
func(c *config.Config, t *testing.T) {
if !c.WaitSignal.Enabled() {
t.Errorf("waitSignal should be enabled but was not: %v", c.WaitSignal)
}
},
},
},
{
context: "WithNodePoolWithWaitSignalDisabled",
configYaml: minimalValidConfigYaml + `
worker:
nodePools:
- name: pool1
waitSignal:
enabled: false
- name: pool2
waitSignal:
enabled: false
maxBatchSize: 2
`,
assertConfig: []ConfigTester{
hasDefaultEtcdSettings,
func(c *config.Config, t *testing.T) {
if c.NodePools[0].WaitSignal.Enabled() {
t.Errorf("waitSignal should be disabled for node pool at index %d but was not", 0)
}
if c.NodePools[1].WaitSignal.Enabled() {
t.Errorf("waitSignal should be disabled for node pool at index %d but was not", 1)
}
},
},
},
{
context: "WithNodePoolWithWaitSignalEnabled",
configYaml: minimalValidConfigYaml + `
worker:
nodePools:
- name: pool1
waitSignal:
enabled: true
- name: pool2
waitSignal:
enabled: true
maxBatchSize: 2
`,
assertConfig: []ConfigTester{
hasDefaultEtcdSettings,
func(c *config.Config, t *testing.T) {
if !c.NodePools[0].WaitSignal.Enabled() {
t.Errorf("waitSignal should be enabled for node pool at index %d but was not", 0)
}
if c.NodePools[0].WaitSignal.MaxBatchSize() != 1 {
t.Errorf("waitSignal.maxBatchSize should be 1 for node pool at index %d but was %d", 0, c.NodePools[0].WaitSignal.MaxBatchSize())
}
if !c.NodePools[1].WaitSignal.Enabled() {
t.Errorf("waitSignal should be enabled for node pool at index %d but was not", 1)
}
if c.NodePools[1].WaitSignal.MaxBatchSize() != 2 {
t.Errorf("waitSignal.maxBatchSize should be 2 for node pool at index %d but was %d", 1, c.NodePools[1].WaitSignal.MaxBatchSize())
}
},
},
},
{
context: "WithDefaultNodePoolRollingStrategy",
configYaml: minimalValidConfigYaml + `
worker:
nodePools:
- name: pool1
`,
assertConfig: []ConfigTester{
hasDefaultNodePoolRollingStrategy,
},
},
{
context: "WithSpecificNodePoolRollingStrategy",
configYaml: minimalValidConfigYaml + `
worker:
nodePools:
- name: pool1
nodePoolRollingStrategy: Sequential`,
assertConfig: []ConfigTester{
hasSpecificNodePoolRollingStrategy("Sequential"),
},
},
{
context: "WithSpecificWorkerRollingStrategy",
configYaml: minimalValidConfigYaml + `
worker:
nodePoolRollingStrategy: Sequential
nodePools:
- name: pool1`,
assertConfig: []ConfigTester{
hasSpecificNodePoolRollingStrategy("Sequential"),
},
},
{
context: "WithWorkerAndNodePoolStrategy",
configYaml: minimalValidConfigYaml + `
worker:
nodePoolRollingStrategy: Sequential
nodePools:
- name: pool1
- name: pool2
nodePoolRollingStrategy: Parallel
`,
assertConfig: []ConfigTester{
hasWorkerAndNodePoolStrategy("Sequential", "Parallel"),
},
},
{
context: "WithMinimalValidConfig",
configYaml: minimalValidConfigYaml,
assertConfig: []ConfigTester{
hasDefaultEtcdSettings,
hasDefaultExperimentalFeatures,
},
},
{
context: "WithVaryingWorkerCountPerNodePool",
configYaml: minimalValidConfigYaml + `
worker:
nodePools:
- name: pool1
- name: pool2
count: 2
- name: pool3
count: 0
`,
assertConfig: []ConfigTester{
hasDefaultEtcdSettings,
hasDefaultExperimentalFeatures,
func(c *config.Config, t *testing.T) {
if c.NodePools[0].Count != 1 {
t.Errorf("default worker count should be 1 but was: %d", c.NodePools[0].Count)
}
if c.NodePools[1].Count != 2 {
t.Errorf("worker count should be set to 2 but was: %d", c.NodePools[1].Count)
}
if c.NodePools[2].Count != 0 {
t.Errorf("worker count should be be set to 0 but was: %d", c.NodePools[2].Count)
}
},
},
},
{
context: "WithVaryingWorkerASGSizePerNodePool",
configYaml: minimalValidConfigYaml + `
worker:
nodePools:
- name: pool1
- name: pool2
count: 2
- name: pool3
autoScalingGroup:
minSize: 0
maxSize: 10
`,
assertConfig: []ConfigTester{
hasDefaultEtcdSettings,
hasDefaultExperimentalFeatures,
func(c *config.Config, t *testing.T) {
if c.NodePools[0].MaxCount() != 1 {
t.Errorf("worker max count should be 1 but was: %d", c.NodePools[0].MaxCount())
}
if c.NodePools[0].MinCount() != 1 {
t.Errorf("worker min count should be 1 but was: %d", c.NodePools[0].MinCount())
}
if c.NodePools[1].MaxCount() != 2 {
t.Errorf("worker max count should be 2 but was: %d", c.NodePools[1].MaxCount())
}
if c.NodePools[1].MinCount() != 2 {
t.Errorf("worker min count should be 2 but was: %d", c.NodePools[1].MinCount())
}
if c.NodePools[2].MaxCount() != 10 {
t.Errorf("worker max count should be 10 but was: %d", c.NodePools[2].MaxCount())
}
if c.NodePools[2].MinCount() != 0 {
t.Errorf("worker min count should be 0 but was: %d", c.NodePools[2].MinCount())
}
},
},
},
{
context: "WithMultiAPIEndpoints",
configYaml: kubeAwsSettings.mainClusterYamlWithoutAPIEndpoint() + `
vpc:
id: vpc-1a2b3c4d
internetGateway:
id: igw-1a2b3c4d
subnets:
- name: privateSubnet1
availabilityZone: us-west-1a
instanceCIDR: "10.0.1.0/24"
private: true
- name: privateSubnet2
availabilityZone: us-west-1b
instanceCIDR: "10.0.2.0/24"
private: true
- name: publicSubnet1
availabilityZone: us-west-1a
instanceCIDR: "10.0.3.0/24"
- name: publicSubnet2
availabilityZone: us-west-1b
instanceCIDR: "10.0.4.0/24"
worker:
# cant be possibly "unversioned" one w/ existing elb because doing so would result in a worker kubelet has chances to
# connect to multiple masters from different clusters!
apiEndpointName: versionedPrivate
# btw apiEndpointName can be defaulted to a private/public managed(hence unstable/possibly versioned but not stable/unversioned)
# elb/round-robin if and only if there is only one. However we dont do the complex defaulting like that for now.
adminAPIEndpointName: versionedPublic
apiEndpoints:
- name: unversionedPublic
dnsName: api.example.com
loadBalancer:
id: elb-internet-facing
##you cant configure existing elb like below
#private: true
#subnets:
#- name: privateSubnet1
##hostedZone must be omitted when elb id is specified.
##in other words, it your responsibility to create an alias record for the elb
#hostedZone:
# id: hostedzone-private
- name: unversionedPrivate
dnsName: api.internal.example.com
loadBalancer:
id: elb-internal
- name: versionedPublic
dnsName: v1api.example.com
loadBalancer:
subnets:
- name: publicSubnet1
hostedZone:
id: hostedzone-public
- name: versionedPrivate
dnsName: v1api.internal.example.com
loadBalancer:
private: true
subnets:
- name: privateSubnet1
hostedZone:
id: hostedzone-private
- name: versionedPublicAlt
dnsName: v1apialt.example.com
loadBalancer:
# "private: false" implies all the private subnets defined in the top-level "subnets"
#subnets:
#- name: publicSubnet1
#- name: publicSubnet2
hostedZone:
id: hostedzone-public
- name: versionedPrivateAlt
dnsName: v1apialt.internal.example.com
loadBalancer:
private: true
# "private: true" implies all the private subnets defined in the top-level "subnets"
#subnets:
#- name: privateSubnet1
#- name: privateSubnet2
hostedZone:
id: hostedzone-private
- name: addedToCertCommonNames
dnsName: api-alt.example.com
loadBalancer:
managed: false
- name: elbOnly
dnsName: registerme.example.com
loadBalancer:
recordSetManaged: false
`,
assertCluster: []ClusterTester{
func(rootCluster *root.Cluster, t *testing.T) {
c := rootCluster.ControlPlane().Config
private1 := api.NewPrivateSubnet("us-west-1a", "10.0.1.0/24")
private1.Name = "privateSubnet1"
private2 := api.NewPrivateSubnet("us-west-1b", "10.0.2.0/24")
private2.Name = "privateSubnet2"
public1 := api.NewPublicSubnet("us-west-1a", "10.0.3.0/24")
public1.Name = "publicSubnet1"
public2 := api.NewPublicSubnet("us-west-1b", "10.0.4.0/24")
public2.Name = "publicSubnet2"
//private1 := api.NewPrivateSubnetFromFn("us-west-1a", `{"Fn::ImportValue":{"Fn::Sub":"${NetworkStackName}-PrivateSubnet1"}}`)
//private1.Name = "privateSubnet1"
//
//private2 := api.NewPrivateSubnetFromFn("us-west-1b", `{"Fn::ImportValue":{"Fn::Sub":"${NetworkStackName}-PrivateSubnet2"}}`)
//private2.Name = "privateSubnet2"
//
//public1 := api.NewPublicSubnetFromFn("us-west-1a", `{"Fn::ImportValue":{"Fn::Sub":"${NetworkStackName}-PublicSubnet1"}}`)
//public1.Name = "publicSubnet1"
//
//public2 := api.NewPublicSubnetFromFn("us-west-1b", `{"Fn::ImportValue":{"Fn::Sub":"${NetworkStackName}-PublicSubnet2"}}`)
//public2.Name = "publicSubnet2"
subnets := api.Subnets{
private1,
private2,
public1,
public2,
}
if !reflect.DeepEqual(c.AllSubnets(), subnets) {
t.Errorf("Managed subnets didn't match: expected=%+v actual=%+v", subnets, c.AllSubnets())
}
publicSubnets := api.Subnets{
public1,
public2,
}
privateSubnets := api.Subnets{
private1,
private2,
}
unversionedPublic := c.APIEndpoints["unversionedPublic"]
unversionedPrivate := c.APIEndpoints["unversionedPrivate"]
versionedPublic := c.APIEndpoints["versionedPublic"]
versionedPrivate := c.APIEndpoints["versionedPrivate"]
versionedPublicAlt := c.APIEndpoints["versionedPublicAlt"]
versionedPrivateAlt := c.APIEndpoints["versionedPrivateAlt"]
addedToCertCommonNames := c.APIEndpoints["addedToCertCommonNames"]
elbOnly := c.APIEndpoints["elbOnly"]
if len(unversionedPublic.LoadBalancer.Subnets) != 0 {
t.Errorf("unversionedPublic: subnets shuold be empty but was not: actual=%+v", unversionedPublic.LoadBalancer.Subnets)
}
if !unversionedPublic.LoadBalancer.Enabled() {
t.Errorf("unversionedPublic: it should be enabled as the lb to which controller nodes are added, but it was not: loadBalancer=%+v", unversionedPublic.LoadBalancer)
}
if len(unversionedPrivate.LoadBalancer.Subnets) != 0 {
t.Errorf("unversionedPrivate: subnets shuold be empty but was not: actual=%+v", unversionedPrivate.LoadBalancer.Subnets)
}
if !unversionedPrivate.LoadBalancer.Enabled() {
t.Errorf("unversionedPrivate: it should be enabled as the lb to which controller nodes are added, but it was not: loadBalancer=%+v", unversionedPrivate.LoadBalancer)
}
if diff := cmp.Diff(versionedPublic.LoadBalancer.Subnets, api.Subnets{public1}); diff != "" {
t.Errorf("versionedPublic: subnets didn't match: %s", diff)
}
if !versionedPublic.LoadBalancer.Enabled() {
t.Errorf("versionedPublic: it should be enabled as the lb to which controller nodes are added, but it was not: loadBalancer=%+v", versionedPublic.LoadBalancer)
}
if diff := cmp.Diff(versionedPrivate.LoadBalancer.Subnets, api.Subnets{private1}); diff != "" {
t.Errorf("versionedPrivate: subnets didn't match: %s", diff)
}
if !versionedPrivate.LoadBalancer.Enabled() {
t.Errorf("versionedPrivate: it should be enabled as the lb to which controller nodes are added, but it was not: loadBalancer=%+v", versionedPrivate.LoadBalancer)
}
if diff := cmp.Diff(versionedPublicAlt.LoadBalancer.Subnets, publicSubnets); diff != "" {
t.Errorf("versionedPublicAlt: subnets didn't match: %s", diff)
}
if !versionedPublicAlt.LoadBalancer.Enabled() {
t.Errorf("versionedPublicAlt: it should be enabled as the lb to which controller nodes are added, but it was not: loadBalancer=%+v", versionedPublicAlt.LoadBalancer)
}
if diff := cmp.Diff(versionedPrivateAlt.LoadBalancer.Subnets, privateSubnets); diff != "" {
t.Errorf("versionedPrivateAlt: subnets didn't match: %s", diff)
}
if !versionedPrivateAlt.LoadBalancer.Enabled() {
t.Errorf("versionedPrivateAlt: it should be enabled as the lb to which controller nodes are added, but it was not: loadBalancer=%+v", versionedPrivateAlt.LoadBalancer)
}
if len(addedToCertCommonNames.LoadBalancer.Subnets) != 0 {
t.Errorf("addedToCertCommonNames: subnets should be empty but was not: actual=%+v", addedToCertCommonNames.LoadBalancer.Subnets)
}
if addedToCertCommonNames.LoadBalancer.Enabled() {
t.Errorf("addedToCertCommonNames: it should not be enabled as the lb to which controller nodes are added, but it was: loadBalancer=%+v", addedToCertCommonNames.LoadBalancer)
}
if diff := cmp.Diff(elbOnly.LoadBalancer.Subnets, publicSubnets); diff != "" {
t.Errorf("elbOnly: subnets didn't match: %s", diff)
}
if !elbOnly.LoadBalancer.Enabled() {
t.Errorf("elbOnly: it should be enabled but it was not: loadBalancer=%+v", elbOnly.LoadBalancer)
}
if elbOnly.LoadBalancer.ManageELBRecordSet() {
t.Errorf("elbOnly: record set should not be managed but it was: loadBalancer=%+v", elbOnly.LoadBalancer)
}
if diff := cmp.Diff(c.ExternalDNSNames(), []string{"api-alt.example.com", "api.example.com", "api.internal.example.com", "registerme.example.com", "v1api.example.com", "v1api.internal.example.com", "v1apialt.example.com", "v1apialt.internal.example.com"}); diff != "" {
t.Errorf("unexpected external DNS names: %s", diff)
}
if !reflect.DeepEqual(c.APIEndpoints.ManagedELBLogicalNames(), []string{"APIEndpointElbOnlyELB", "APIEndpointVersionedPrivateAltELB", "APIEndpointVersionedPrivateELB", "APIEndpointVersionedPublicAltELB", "APIEndpointVersionedPublicELB"}) {
t.Errorf("unexpected managed ELB logical names: %s", strings.Join(c.APIEndpoints.ManagedELBLogicalNames(), ", "))
}
},
},
},
{
context: "WithNetworkTopologyExplicitSubnets",
configYaml: mainClusterYaml + `
vpc:
id: vpc-1a2b3c4d
internetGateway:
id: igw-1a2b3c4d
# routeTableId must be omitted
# See https://github.com/kubernetes-incubator/kube-aws/pull/284#issuecomment-275962332
# routeTableId: rtb-1a2b3c4d
subnets:
- name: private1
availabilityZone: us-west-1a
instanceCIDR: "10.0.1.0/24"
private: true
- name: private2
availabilityZone: us-west-1b
instanceCIDR: "10.0.2.0/24"
private: true
- name: public1
availabilityZone: us-west-1a
instanceCIDR: "10.0.3.0/24"
- name: public2
availabilityZone: us-west-1b
instanceCIDR: "10.0.4.0/24"
controller:
subnets:
- name: private1
- name: private2
loadBalancer:
subnets:
- name: public1
- name: public2
private: false
etcd:
subnets:
- name: private1
- name: private2
worker:
nodePools:
- name: pool1
subnets:
- name: public1
- name: public2
`,
assertConfig: []ConfigTester{
hasDefaultExperimentalFeatures,
everyPublicSubnetHasRouteToIGW,
hasTwoManagedNGWsAndEIPs,
func(c *config.Config, t *testing.T) {
private1 := api.NewPrivateSubnet("us-west-1a", "10.0.1.0/24")
private1.Name = "private1"
private2 := api.NewPrivateSubnet("us-west-1b", "10.0.2.0/24")
private2.Name = "private2"
public1 := api.NewPublicSubnet("us-west-1a", "10.0.3.0/24")
public1.Name = "public1"
public2 := api.NewPublicSubnet("us-west-1b", "10.0.4.0/24")
public2.Name = "public2"
subnets := api.Subnets{
private1,
private2,
public1,
public2,
}
if !reflect.DeepEqual(c.AllSubnets(), subnets) {
t.Errorf("Managed subnets didn't match: expected=%v actual=%v", subnets, c.AllSubnets())
}
publicSubnets := api.Subnets{
public1,
public2,
}
importedPublicSubnets := api.Subnets{
api.NewPublicSubnetFromFn("us-west-1a", `{"Fn::ImportValue":{"Fn::Sub":"${NetworkStackName}-Public1"}}`),
api.NewPublicSubnetFromFn("us-west-1b", `{"Fn::ImportValue":{"Fn::Sub":"${NetworkStackName}-Public2"}}`),
}
p := c.NodePools[0]
if !reflect.DeepEqual(p.Subnets, importedPublicSubnets) {
t.Errorf("Worker subnets didn't match: expected=%v actual=%v", importedPublicSubnets, p.Subnets)
}
privateSubnets := api.Subnets{
private1,
private2,
}
if !reflect.DeepEqual(c.Controller.Subnets, privateSubnets) {
t.Errorf("Controller subnets didn't match: expected=%v actual=%v", privateSubnets, c.Controller.Subnets)
}
if !reflect.DeepEqual(c.Controller.LoadBalancer.Subnets, publicSubnets) {
t.Errorf("Controller loadbalancer subnets didn't match: expected=%v actual=%v", publicSubnets, c.Controller.LoadBalancer.Subnets)
}
if diff := cmp.Diff(c.Etcd.Subnets, privateSubnets); diff != "" {
t.Errorf("Etcd subnets didn't match: %s", diff)
}
for i, s := range c.PrivateSubnets() {
if !s.ManageNATGateway() {
t.Errorf("NAT gateway for the private subnet #%d should be created by kube-aws but it is not going to be", i)
}
if s.ManageRouteToInternet() {
t.Errorf("Route to IGW shouldn't be created for a private subnet: %v", s)
}
}
},
},
},
{
context: "WithNetworkTopologyImplicitSubnets",
configYaml: mainClusterYaml + `
vpc:
id: vpc-1a2b3c4d
internetGateway:
id: igw-1a2b3c4d
# routeTableId must be omitted
# See https://github.com/kubernetes-incubator/kube-aws/pull/284#issuecomment-275962332
# routeTableId: rtb-1a2b3c4d
subnets:
- name: private1
availabilityZone: us-west-1a
instanceCIDR: "10.0.1.0/24"
private: true
- name: private2
availabilityZone: us-west-1b
instanceCIDR: "10.0.2.0/24"
private: true
- name: public1
availabilityZone: us-west-1a
instanceCIDR: "10.0.3.0/24"
- name: public2
availabilityZone: us-west-1b
instanceCIDR: "10.0.4.0/24"
`,
assertConfig: []ConfigTester{
hasDefaultExperimentalFeatures,
everyPublicSubnetHasRouteToIGW,
hasTwoManagedNGWsAndEIPs,
func(c *config.Config, t *testing.T) {
private1 := api.NewPrivateSubnet("us-west-1a", "10.0.1.0/24")
private1.Name = "private1"
private2 := api.NewPrivateSubnet("us-west-1b", "10.0.2.0/24")
private2.Name = "private2"
public1 := api.NewPublicSubnet("us-west-1a", "10.0.3.0/24")
public1.Name = "public1"
public2 := api.NewPublicSubnet("us-west-1b", "10.0.4.0/24")
public2.Name = "public2"
subnets := api.Subnets{
private1,
private2,
public1,
public2,
}
if !reflect.DeepEqual(c.AllSubnets(), subnets) {
t.Errorf("Managed subnets didn't match: expected=%v actual=%v", subnets, c.AllSubnets())
}
publicSubnets := api.Subnets{
public1,
public2,
}
if !reflect.DeepEqual(c.Controller.Subnets, publicSubnets) {
t.Errorf("Controller subnets didn't match: expected=%v actual=%v", publicSubnets, c.Controller.Subnets)
}
if !reflect.DeepEqual(c.Controller.LoadBalancer.Subnets, publicSubnets) {
t.Errorf("Controller loadbalancer subnets didn't match: expected=%v actual=%v", publicSubnets, c.Controller.LoadBalancer.Subnets)
}
if !reflect.DeepEqual(c.Etcd.Subnets, publicSubnets) {
t.Errorf("Etcd subnets didn't match: expected=%v actual=%v", publicSubnets, c.Etcd.Subnets)
}
for i, s := range c.PrivateSubnets() {
if !s.ManageNATGateway() {
t.Errorf("NAT gateway for the private subnet #%d should be created by kube-aws but it is not going to be", i)
}
if s.ManageRouteToInternet() {
t.Errorf("Route to IGW shouldn't be created for a private subnet: %v", s)
}
}
},
},
},
{
context: "WithNetworkTopologyControllerPrivateLB",
configYaml: mainClusterYaml + `
vpc:
id: vpc-1a2b3c4d
internetGateway:
id: igw-1a2b3c4d
# routeTableId must be omitted
# See https://github.com/kubernetes-incubator/kube-aws/pull/284#issuecomment-275962332
# routeTableId: rtb-1a2b3c4d
subnets:
- name: private1
availabilityZone: us-west-1a
instanceCIDR: "10.0.1.0/24"
private: true
- name: private2
availabilityZone: us-west-1b
instanceCIDR: "10.0.2.0/24"
private: true
- name: public1
availabilityZone: us-west-1a
instanceCIDR: "10.0.3.0/24"
- name: public2
availabilityZone: us-west-1b
instanceCIDR: "10.0.4.0/24"
controller:
subnets:
- name: private1
- name: private2
loadBalancer:
private: true
etcd:
subnets:
- name: private1
- name: private2
worker:
nodePools:
- name: pool1
subnets:
- name: public1
- name: public2
`,
assertConfig: []ConfigTester{
hasDefaultExperimentalFeatures,
everyPublicSubnetHasRouteToIGW,
hasTwoManagedNGWsAndEIPs,
func(c *config.Config, t *testing.T) {
private1 := api.NewPrivateSubnet("us-west-1a", "10.0.1.0/24")
private1.Name = "private1"
private2 := api.NewPrivateSubnet("us-west-1b", "10.0.2.0/24")
private2.Name = "private2"
public1 := api.NewPublicSubnet("us-west-1a", "10.0.3.0/24")
public1.Name = "public1"
public2 := api.NewPublicSubnet("us-west-1b", "10.0.4.0/24")
public2.Name = "public2"
subnets := api.Subnets{
private1,
private2,
public1,
public2,
}
if !reflect.DeepEqual(c.AllSubnets(), subnets) {
t.Errorf("Managed subnets didn't match: expected=%v actual=%v", subnets, c.AllSubnets())
}
importedPublicSubnets := api.Subnets{
api.NewPublicSubnetFromFn("us-west-1a", `{"Fn::ImportValue":{"Fn::Sub":"${NetworkStackName}-Public1"}}`),
api.NewPublicSubnetFromFn("us-west-1b", `{"Fn::ImportValue":{"Fn::Sub":"${NetworkStackName}-Public2"}}`),
}
p := c.NodePools[0]
if !reflect.DeepEqual(p.Subnets, importedPublicSubnets) {
t.Errorf("Worker subnets didn't match: expected=%v actual=%v", importedPublicSubnets, p.Subnets)
}
privateSubnets := api.Subnets{
private1,
private2,
}
if !reflect.DeepEqual(c.Controller.Subnets, privateSubnets) {
t.Errorf("Controller subnets didn't match: expected=%v actual=%v", privateSubnets, c.Controller.Subnets)
}
if !reflect.DeepEqual(c.Controller.LoadBalancer.Subnets, privateSubnets) {
t.Errorf("Controller loadbalancer subnets didn't match: expected=%v actual=%v", privateSubnets, c.Controller.LoadBalancer.Subnets)
}
if !reflect.DeepEqual(c.Etcd.Subnets, privateSubnets) {
t.Errorf("Etcd subnets didn't match: expected=%v actual=%v", privateSubnets, c.Etcd.Subnets)
}
for i, s := range c.PrivateSubnets() {
if !s.ManageNATGateway() {
t.Errorf("NAT gateway for the private subnet #%d should be created by kube-aws but it is not going to be", i)
}
if s.ManageRouteToInternet() {
t.Errorf("Route to IGW shouldn't be created for a private subnet: %v", s)
}
}
},
},
},
{
context: "WithNetworkTopologyControllerPublicLB",
configYaml: mainClusterYaml + `
vpc:
id: vpc-1a2b3c4d
internetGateway:
id: igw-1a2b3c4d
# routeTableId must be omitted
# See https://github.com/kubernetes-incubator/kube-aws/pull/284#issuecomment-275962332
# routeTableId: rtb-1a2b3c4d
subnets:
- name: private1
availabilityZone: us-west-1a
instanceCIDR: "10.0.1.0/24"
private: true
- name: private2
availabilityZone: us-west-1b
instanceCIDR: "10.0.2.0/24"
private: true
- name: public1
availabilityZone: us-west-1a
instanceCIDR: "10.0.3.0/24"
- name: public2
availabilityZone: us-west-1b
instanceCIDR: "10.0.4.0/24"
controller:
loadBalancer:
private: false
etcd:
subnets:
- name: private1
- name: private2
worker:
nodePools:
- name: pool1
subnets:
- name: public1
- name: public2
`,
assertConfig: []ConfigTester{
hasDefaultExperimentalFeatures,
everyPublicSubnetHasRouteToIGW,
hasTwoManagedNGWsAndEIPs,
func(c *config.Config, t *testing.T) {
private1 := api.NewPrivateSubnet("us-west-1a", "10.0.1.0/24")
private1.Name = "private1"
private2 := api.NewPrivateSubnet("us-west-1b", "10.0.2.0/24")
private2.Name = "private2"
public1 := api.NewPublicSubnet("us-west-1a", "10.0.3.0/24")
public1.Name = "public1"
public2 := api.NewPublicSubnet("us-west-1b", "10.0.4.0/24")
public2.Name = "public2"
subnets := api.Subnets{
private1,
private2,
public1,
public2,
}
publicSubnets := api.Subnets{
public1,
public2,
}
privateSubnets := api.Subnets{
private1,
private2,
}
importedPublicSubnets := api.Subnets{
api.NewPublicSubnetFromFn("us-west-1a", `{"Fn::ImportValue":{"Fn::Sub":"${NetworkStackName}-Public1"}}`),
api.NewPublicSubnetFromFn("us-west-1b", `{"Fn::ImportValue":{"Fn::Sub":"${NetworkStackName}-Public2"}}`),
}
if !reflect.DeepEqual(c.AllSubnets(), subnets) {
t.Errorf("Managed subnets didn't match: expected=%v actual=%v", subnets, c.AllSubnets())
}
p := c.NodePools[0]
if !reflect.DeepEqual(p.Subnets, importedPublicSubnets) {
t.Errorf("Worker subnets didn't match: expected=%v actual=%v", importedPublicSubnets, p.Subnets)
}
if !reflect.DeepEqual(c.Controller.Subnets, publicSubnets) {
t.Errorf("Controller subnets didn't match: expected=%v actual=%v", privateSubnets, c.Controller.Subnets)
}
if !reflect.DeepEqual(c.Controller.LoadBalancer.Subnets, publicSubnets) {
t.Errorf("Controller loadbalancer subnets didn't match: expected=%v actual=%v", privateSubnets, c.Controller.LoadBalancer.Subnets)
}
if !reflect.DeepEqual(c.Etcd.Subnets, privateSubnets) {
t.Errorf("Etcd subnets didn't match: expected=%v actual=%v", privateSubnets, c.Etcd.Subnets)
}
for i, s := range c.PrivateSubnets() {
if !s.ManageNATGateway() {
t.Errorf("NAT gateway for the private subnet #%d should be created by kube-aws but it is not going to be", i)
}
if s.ManageRouteToInternet() {
t.Errorf("Route to IGW shouldn't be created for a private subnet: %v", s)
}
}
},
},
},
{
context: "WithNetworkTopologyExistingVaryingSubnets",
configYaml: mainClusterYaml + `
vpc:
id: vpc-1a2b3c4d
subnets:
- name: private1
availabilityZone: us-west-1a
id: subnet-1
private: true
- name: private2
availabilityZone: us-west-1b
idFromStackOutput: mycluster-private-subnet-1
private: true
- name: public1
availabilityZone: us-west-1a
id: subnet-2
- name: public2
availabilityZone: us-west-1b
idFromStackOutput: mycluster-public-subnet-1
controller:
loadBalancer:
private: false
etcd:
subnets:
- name: private1
- name: private2
worker:
nodePools:
- name: pool1
subnets:
- name: public1
- name: public2
`,
assertConfig: []ConfigTester{
hasDefaultExperimentalFeatures,
hasNoNGWsOrEIPsOrRoutes,
func(c *config.Config, t *testing.T) {
private1 := api.NewExistingPrivateSubnet("us-west-1a", "subnet-1")
private1.Name = "private1"
private2 := api.NewImportedPrivateSubnet("us-west-1b", "mycluster-private-subnet-1")
private2.Name = "private2"
public1 := api.NewExistingPublicSubnet("us-west-1a", "subnet-2")
public1.Name = "public1"
public2 := api.NewImportedPublicSubnet("us-west-1b", "mycluster-public-subnet-1")
public2.Name = "public2"
subnets := api.Subnets{
private1,
private2,
public1,
public2,
}
publicSubnets := api.Subnets{
public1,
public2,
}
privateSubnets := api.Subnets{
private1,
private2,
}
if !reflect.DeepEqual(c.AllSubnets(), subnets) {
t.Errorf("Managed subnets didn't match: expected=%v actual=%v", subnets, c.AllSubnets())
}
p := c.NodePools[0]
if !reflect.DeepEqual(p.Subnets, publicSubnets) {
t.Errorf("Worker subnets didn't match: expected=%v actual=%v", publicSubnets, p.Subnets)
}
if !reflect.DeepEqual(c.Controller.Subnets, publicSubnets) {
t.Errorf("Controller subnets didn't match: expected=%v actual=%v", privateSubnets, c.Controller.Subnets)
}
if !reflect.DeepEqual(c.Controller.LoadBalancer.Subnets, publicSubnets) {
t.Errorf("Controller loadbalancer subnets didn't match: expected=%v actual=%v", privateSubnets, c.Controller.LoadBalancer.Subnets)
}
if !reflect.DeepEqual(c.Etcd.Subnets, privateSubnets) {
t.Errorf("Etcd subnets didn't match: expected=%v actual=%v", privateSubnets, c.Etcd.Subnets)
}
for i, s := range c.PrivateSubnets() {
if s.ManageNATGateway() {
t.Errorf("NAT gateway for the existing private subnet #%d should not be created by kube-aws", i)
}
if s.ManageRouteToInternet() {
t.Errorf("Route to IGW shouldn't be created for a private subnet: %v", s)
}
}
},
},
},
{
context: "WithNetworkTopologyAllExistingPrivateSubnets",
configYaml: kubeAwsSettings.mainClusterYamlWithoutAPIEndpoint() + fmt.Sprintf(`
vpc:
id: vpc-1a2b3c4d
subnets:
- name: private1
availabilityZone: us-west-1a
id: subnet-1
private: true
- name: private2
availabilityZone: us-west-1b
idFromStackOutput: mycluster-private-subnet-1
private: true
controller:
subnets:
- name: private1
- name: private2
etcd:
subnets:
- name: private1
- name: private2
worker:
nodePools:
- name: pool1
subnets:
- name: private1
- name: private2
apiEndpoints:
- name: public
dnsName: "%s"
loadBalancer:
hostedZone:
id: hostedzone-xxxx
private: true
`, kubeAwsSettings.externalDNSName),
assertConfig: []ConfigTester{
hasDefaultExperimentalFeatures,
hasNoNGWsOrEIPsOrRoutes,
},
},
{
context: "WithNetworkTopologyAllExistingPublicSubnets",
configYaml: mainClusterYaml + `
vpc:
id: vpc-1a2b3c4d
subnets:
- name: public1
availabilityZone: us-west-1a
id: subnet-2
- name: public2
availabilityZone: us-west-1b
idFromStackOutput: mycluster-public-subnet-1
etcd:
subnets:
- name: public1
- name: public2
worker:
nodePools:
- name: pool1
subnets:
- name: public1
- name: public2
`,
assertConfig: []ConfigTester{
hasDefaultExperimentalFeatures,
hasNoNGWsOrEIPsOrRoutes,
},
},
{
context: "WithNetworkTopologyExistingNATGateways",
configYaml: mainClusterYaml + `
vpc:
id: vpc-1a2b3c4d
internetGateway:
id: igw-1a2b3c4d
subnets:
- name: private1
availabilityZone: us-west-1a
instanceCIDR: "10.0.1.0/24"
private: true
natGateway:
id: ngw-11111111
- name: private2
availabilityZone: us-west-1b
instanceCIDR: "10.0.2.0/24"
private: true
natGateway:
id: ngw-22222222
- name: public1
availabilityZone: us-west-1a
instanceCIDR: "10.0.3.0/24"
- name: public2
availabilityZone: us-west-1b
instanceCIDR: "10.0.4.0/24"
etcd:
subnets:
- name: private1
- name: private2
worker:
nodePools:
- name: pool1
subnets:
- name: public1
- name: public2
`,
assertConfig: []ConfigTester{
hasDefaultExperimentalFeatures,
hasNoManagedNGWsButSpecificNumOfRoutesToUnmanagedNGWs(2),
func(c *config.Config, t *testing.T) {
private1 := api.NewPrivateSubnetWithPreconfiguredNATGateway("us-west-1a", "10.0.1.0/24", "ngw-11111111")
private1.Name = "private1"
private2 := api.NewPrivateSubnetWithPreconfiguredNATGateway("us-west-1b", "10.0.2.0/24", "ngw-22222222")
private2.Name = "private2"
public1 := api.NewPublicSubnet("us-west-1a", "10.0.3.0/24")
public1.Name = "public1"
public2 := api.NewPublicSubnet("us-west-1b", "10.0.4.0/24")
public2.Name = "public2"
subnets := api.Subnets{
private1,
private2,
public1,
public2,
}
publicSubnets := api.Subnets{
public1,
public2,
}
privateSubnets := api.Subnets{
private1,
private2,
}
importedPublicSubnets := api.Subnets{
api.NewPublicSubnetFromFn("us-west-1a", `{"Fn::ImportValue":{"Fn::Sub":"${NetworkStackName}-Public1"}}`),
api.NewPublicSubnetFromFn("us-west-1b", `{"Fn::ImportValue":{"Fn::Sub":"${NetworkStackName}-Public2"}}`),
}
if diff := cmp.Diff(c.AllSubnets(), subnets); diff != "" {
t.Errorf("Managed subnets didn't match: %s", diff)
}
p := c.NodePools[0]
if diff := cmp.Diff(p.Subnets, importedPublicSubnets); diff != "" {
t.Errorf("Worker subnets didn't match: %s", diff)
}
if diff := cmp.Diff(c.Controller.Subnets, publicSubnets); diff != "" {
t.Errorf("Controller subnets didn't match: %s", diff)
}
if diff := cmp.Diff(c.Controller.LoadBalancer.Subnets, publicSubnets); diff != "" {
t.Errorf("Controller loadbalancer subnets didn't match: %s", diff)
}
if diff := cmp.Diff(c.Etcd.Subnets, privateSubnets); diff != "" {
t.Errorf("Etcd subnets didn't match: %s", diff)
}
for i, s := range c.PrivateSubnets() {
if s.ManageNATGateway() {
t.Errorf("NAT gateway for the existing private subnet #%d should not be created by kube-aws", i)
}
if s.ManageRouteToInternet() {
t.Errorf("Route to IGW shouldn't be created for a private subnet: %v", s)
}
}
},
},
},
{
context: "WithNetworkTopologyExistingNATGatewayEIPs",
configYaml: mainClusterYaml + `
vpc:
id: vpc-1a2b3c4d
internetGateway:
id: igw-1a2b3c4d
subnets:
- name: private1
availabilityZone: us-west-1a
instanceCIDR: "10.0.1.0/24"
private: true
natGateway:
eipAllocationId: eipalloc-11111111
- name: private2
availabilityZone: us-west-1b
instanceCIDR: "10.0.2.0/24"
private: true
natGateway:
eipAllocationId: eipalloc-22222222
- name: public1
availabilityZone: us-west-1a
instanceCIDR: "10.0.3.0/24"
- name: public2
availabilityZone: us-west-1b
instanceCIDR: "10.0.4.0/24"
etcd:
subnets:
- name: private1
- name: private2
worker:
nodePools:
- name: pool1
subnets:
- name: public1
- name: public2
`,
assertConfig: []ConfigTester{
hasDefaultExperimentalFeatures,
hasSpecificNumOfManagedNGWsWithUnmanagedEIPs(2),
hasPrivateSubnetsWithManagedNGWs(2),
func(c *config.Config, t *testing.T) {
private1 := api.NewPrivateSubnetWithPreconfiguredNATGatewayEIP("us-west-1a", "10.0.1.0/24", "eipalloc-11111111")
private1.Name = "private1"
private2 := api.NewPrivateSubnetWithPreconfiguredNATGatewayEIP("us-west-1b", "10.0.2.0/24", "eipalloc-22222222")
private2.Name = "private2"
public1 := api.NewPublicSubnet("us-west-1a", "10.0.3.0/24")
public1.Name = "public1"
public2 := api.NewPublicSubnet("us-west-1b", "10.0.4.0/24")
public2.Name = "public2"
subnets := api.Subnets{
private1,
private2,
public1,
public2,
}
publicSubnets := api.Subnets{
public1,
public2,
}
privateSubnets := api.Subnets{
private1,
private2,
}
importedPublicSubnets := api.Subnets{
api.NewPublicSubnetFromFn("us-west-1a", `{"Fn::ImportValue":{"Fn::Sub":"${NetworkStackName}-Public1"}}`),
api.NewPublicSubnetFromFn("us-west-1b", `{"Fn::ImportValue":{"Fn::Sub":"${NetworkStackName}-Public2"}}`),
}
if diff := cmp.Diff(c.AllSubnets(), subnets); diff != "" {
t.Errorf("Managed subnets didn't match: %s", diff)
}
p := c.NodePools[0]
if diff := cmp.Diff(p.Subnets, importedPublicSubnets); diff != "" {
t.Errorf("Worker subnets didn't match: %s", diff)
}
if diff := cmp.Diff(c.Controller.Subnets, publicSubnets); diff != "" {
t.Errorf("Controller subnets didn't match: %s", diff)
}
if diff := cmp.Diff(c.Controller.LoadBalancer.Subnets, publicSubnets); diff != "" {
t.Errorf("Controller loadbalancer subnets didn't match: %s", diff)
}
if diff := cmp.Diff(c.Etcd.Subnets, privateSubnets); diff != "" {
t.Errorf("Etcd subnets didn't match: %s", diff)
}
},
},
},
{
context: "WithNetworkTopologyVaryingPublicSubnets",
configYaml: mainClusterYaml + `
vpc:
id: vpc-1a2b3c4d
#required only for the managed subnet "public1"
# "public2" is assumed to have an existing route table and an igw already associated to it
internetGateway:
id: igw-1a2b3c4d
subnets:
- name: public1
availabilityZone: us-west-1a
instanceCIDR: "10.0.1.0/24"
- name: public2
availabilityZone: us-west-1b
id: subnet-2
controller:
loadBalancer:
private: false
etcd:
subnets:
- name: public1
- name: public2
worker:
nodePools:
- name: pool1
subnets:
- name: public1
- name: public2
`,
assertConfig: []ConfigTester{},
},
{
context: "WithSpotFleetEnabled",
configYaml: minimalValidConfigYaml + `
worker:
nodePools:
- name: pool1
spotFleet:
targetCapacity: 10
`,
assertConfig: []ConfigTester{
hasDefaultExperimentalFeatures,
hasDefaultLaunchSpecifications,
spotFleetBasedNodePoolHasWaitSignalDisabled,
},
},
{
context: "WithSpotFleetEnabledWithCustomIamRole",
configYaml: minimalValidConfigYaml + `
worker:
nodePools:
- name: pool1
spotFleet:
targetCapacity: 10
iamFleetRoleArn: custom-iam-role
`,
assertConfig: []ConfigTester{
hasDefaultExperimentalFeatures,
hasDefaultLaunchSpecifications,
spotFleetBasedNodePoolHasWaitSignalDisabled,
},
},
{
context: "WithSpotFleetWithCustomGp2RootVolumeSettings",
configYaml: minimalValidConfigYaml + `
worker:
nodePools:
- name: pool1
spotFleet:
targetCapacity: 10
unitRootVolumeSize: 40
launchSpecifications:
- weightedCapacity: 1
instanceType: c4.large
- weightedCapacity: 2
instanceType: c4.xlarge
rootVolume:
size: 100
`,
assertConfig: []ConfigTester{
hasDefaultExperimentalFeatures,
spotFleetBasedNodePoolHasWaitSignalDisabled,
func(c *config.Config, t *testing.T) {
expected := []api.LaunchSpecification{
{
WeightedCapacity: 1,
InstanceType: "c4.large",
SpotPrice: "",
// RootVolumeSize was not specified in the configYaml but should default to workerRootVolumeSize * weightedCapacity
// RootVolumeType was not specified in the configYaml but should default to "gp2"
RootVolume: api.NewGp2RootVolume(40),
},
{
WeightedCapacity: 2,
InstanceType: "c4.xlarge",
SpotPrice: "",
RootVolume: api.NewGp2RootVolume(100),
},
}
p := c.NodePools[0]
actual := p.WorkerNodePool.SpotFleet.LaunchSpecifications
if !reflect.DeepEqual(expected, actual) {
t.Errorf(
"LaunchSpecifications didn't match: expected=%v actual=%v",
expected,
actual,
)
}
},
},
},
{
context: "WithSpotFleetWithCustomInstanceTypes",
configYaml: minimalValidConfigYaml + `
worker:
nodePools:
- name: pool1
spotFleet:
targetCapacity: 10
unitRootVolumeSize: 40
launchSpecifications:
- weightedCapacity: 1
instanceType: m4.large
- weightedCapacity: 2
instanceType: m4.xlarge
`,
assertConfig: []ConfigTester{
hasDefaultExperimentalFeatures,
spotFleetBasedNodePoolHasWaitSignalDisabled,
func(c *config.Config, t *testing.T) {
expected := []api.LaunchSpecification{
{
WeightedCapacity: 1,
InstanceType: "m4.large",
SpotPrice: "",
// RootVolumeType was not specified in the configYaml but should default to gp2:
RootVolume: api.NewGp2RootVolume(40),
},
{
WeightedCapacity: 2,
InstanceType: "m4.xlarge",
SpotPrice: "",
RootVolume: api.NewGp2RootVolume(80),
},
}
p := c.NodePools[0]
actual := p.WorkerNodePool.SpotFleet.LaunchSpecifications
if !reflect.DeepEqual(expected, actual) {
t.Errorf(
"LaunchSpecifications didn't match: expected=%v actual=%v",
expected,
actual,
)
}
},
},
},
{
context: "WithSpotFleetWithCustomIo1RootVolumeSettings",
configYaml: minimalValidConfigYaml + `
worker:
nodePools:
- name: pool1
spotFleet:
targetCapacity: 10
rootVolumeType: io1
unitRootVolumeSize: 40
unitRootVolumeIOPS: 100
launchSpecifications:
- weightedCapacity: 1
instanceType: c4.large
- weightedCapacity: 2
instanceType: c4.xlarge
rootVolume:
iops: 500
`,
assertConfig: []ConfigTester{
hasDefaultExperimentalFeatures,
spotFleetBasedNodePoolHasWaitSignalDisabled,
func(c *config.Config, t *testing.T) {
expected := []api.LaunchSpecification{
{
WeightedCapacity: 1,
InstanceType: "c4.large",
SpotPrice: "",
// RootVolumeSize was not specified in the configYaml but should default to workerRootVolumeSize * weightedCapacity
// RootVolumeIOPS was not specified in the configYaml but should default to workerRootVolumeIOPS * weightedCapacity
// RootVolumeType was not specified in the configYaml but should default to "io1"
RootVolume: api.NewIo1RootVolume(40, 100),
},
{
WeightedCapacity: 2,
InstanceType: "c4.xlarge",
SpotPrice: "",
// RootVolumeType was not specified in the configYaml but should default to:
RootVolume: api.NewIo1RootVolume(80, 500),
},
}
p := c.NodePools[0]
actual := p.WorkerNodePool.SpotFleet.LaunchSpecifications
if !reflect.DeepEqual(expected, actual) {
t.Errorf(
"LaunchSpecifications didn't match: expected=%v actual=%v",
expected,
actual,
)
}
},
},
},
{
context: "WithVpcIdSpecified",
configYaml: minimalValidConfigYaml + `
vpc:
id: vpc-1a2b3c4d
internetGateway:
id: igw-1a2b3c4d
`,
assertConfig: []ConfigTester{
hasDefaultEtcdSettings,
hasDefaultExperimentalFeatures,
func(c *config.Config, t *testing.T) {
vpcId := "vpc-1a2b3c4d"
if c.VPC.ID != vpcId {
t.Errorf("vpc id didn't match: expected=%v actual=%v", vpcId, c.VPC.ID)
}
igwId := "igw-1a2b3c4d"
if c.InternetGateway.ID != igwId {
t.Errorf("internet gateway id didn't match: expected=%v actual=%v", igwId, c.InternetGateway.ID)
}
},
},
},
{
context: "WithLegacyVpcAndIGWIdSpecified",
configYaml: minimalValidConfigYaml + `
vpcId: vpc-1a2b3c4d
internetGatewayId: igw-1a2b3c4d
`,
assertConfig: []ConfigTester{
hasDefaultEtcdSettings,
hasDefaultExperimentalFeatures,
func(c *config.Config, t *testing.T) {
vpcId := "vpc-1a2b3c4d"
if c.VPC.ID != vpcId {
t.Errorf("vpc id didn't match: expected=%v actual=%v", vpcId, c.VPC.ID)
}
igwId := "igw-1a2b3c4d"
if c.InternetGateway.ID != igwId {
t.Errorf("internet gateway id didn't match: expected=%v actual=%v", igwId, c.InternetGateway.ID)
}
},
},
},
{
context: "WithVpcIdAndRouteTableIdSpecified",
configYaml: mainClusterYaml + `
vpc:
id: vpc-1a2b3c4d
subnets:
- name: Subnet0
availabilityZone: ` + firstAz + `
instanceCIDR: "10.0.0.0/24"
routeTable:
id: rtb-1a2b3c4d
`,
assertConfig: []ConfigTester{
hasDefaultExperimentalFeatures,
func(c *config.Config, t *testing.T) {
subnet1 := api.NewPublicSubnetWithPreconfiguredRouteTable(firstAz, "10.0.0.0/24", "rtb-1a2b3c4d")
subnet1.Name = "Subnet0"
subnets := api.Subnets{
subnet1,
}
expected := api.EtcdSettings{
Etcd: api.Etcd{
EC2Instance: api.EC2Instance{
Count: 1,
InstanceType: "t2.medium",
RootVolume: api.RootVolume{
Size: 30,
Type: "gp2",
IOPS: 0,
},
Tenancy: "default",
},
DataVolume: api.DataVolume{
Size: 30,
Type: "gp2",
IOPS: 0,
Ephemeral: false,
},
Subnets: subnets,
UserSuppliedArgs: api.UserSuppliedArgs{
QuotaBackendBytes: api.DefaultQuotaBackendBytes,
},
},
}
actual := c.EtcdSettings
if !reflect.DeepEqual(expected, actual) {
t.Errorf(
"EtcdSettings didn't match: expected=%v actual=%v",
expected,
actual,
)
}
},
},
},
{
context: "WithWorkerManagedIamRoleName",
configYaml: minimalValidConfigYaml + `
worker:
nodePools:
- name: pool1
iam:
role:
name: "myManagedRole"
`,
assertConfig: []ConfigTester{
hasDefaultEtcdSettings,
hasDefaultExperimentalFeatures,
func(c *config.Config, t *testing.T) {
if c.NodePools[0].IAMConfig.Role.Name != "myManagedRole" {
t.Errorf("iam.role.name: expected=myManagedRole actual=%s", c.NodePools[0].IAMConfig.Role.Name)
}
},
},
},
{
context: "WithWorkerManagedPolicies",
configYaml: minimalValidConfigYaml + `
worker:
nodePools:
- name: pool1
iam:
role:
managedPolicies:
- arn: "arn:aws:iam::aws:policy/AdministratorAccess"
- arn: "arn:aws:iam::000000000000:policy/myManagedPolicy"
`,
assertConfig: []ConfigTester{
hasDefaultEtcdSettings,
hasDefaultExperimentalFeatures,
func(c *config.Config, t *testing.T) {
if len(c.NodePools[0].IAMConfig.Role.ManagedPolicies) < 2 {
t.Errorf("iam.role.managedPolicies: incorrect number of policies expected=2 actual=%d", len(c.NodePools[0].IAMConfig.Role.ManagedPolicies))
}
if c.NodePools[0].IAMConfig.Role.ManagedPolicies[0].Arn != "arn:aws:iam::aws:policy/AdministratorAccess" {
t.Errorf("iam.role.managedPolicies: expected=arn:aws:iam::aws:policy/AdministratorAccess actual=%s", c.NodePools[0].IAMConfig.Role.ManagedPolicies[0].Arn)
}
if c.NodePools[0].IAMConfig.Role.ManagedPolicies[1].Arn != "arn:aws:iam::000000000000:policy/myManagedPolicy" {
t.Errorf("iam.role.managedPolicies: expected=arn:aws:iam::000000000000:policy/myManagedPolicy actual=%s", c.NodePools[0].IAMConfig.Role.ManagedPolicies[1].Arn)
}
},
},
},
{
context: "WithWorkerExistingInstanceProfile",
configYaml: minimalValidConfigYaml + `
worker:
nodePools:
- name: pool1
iam:
instanceProfile:
arn: "arn:aws:iam::000000000000:instance-profile/myInstanceProfile"
`,
assertConfig: []ConfigTester{
hasDefaultEtcdSettings,
hasDefaultExperimentalFeatures,
func(c *config.Config, t *testing.T) {
if c.NodePools[0].IAMConfig.InstanceProfile.Arn != "arn:aws:iam::000000000000:instance-profile/myInstanceProfile" {
t.Errorf("existingInstanceProfile: expected=arn:aws:iam::000000000000:instance-profile/myInstanceProfile actual=%s", c.NodePools[0].IAMConfig.InstanceProfile.Arn)
}
},
},
},
{
context: "WithWorkerAndControllerExistingInstanceProfile",
configYaml: minimalValidConfigYaml + `
controller:
iam:
instanceProfile:
arn: "arn:aws:iam::000000000000:instance-profile/myControllerInstanceProfile"
worker:
nodePools:
- name: pool1
iam:
instanceProfile:
arn: "arn:aws:iam::000000000000:instance-profile/myInstanceProfile"
`,
assertConfig: []ConfigTester{
hasDefaultEtcdSettings,
hasDefaultExperimentalFeatures,
func(c *config.Config, t *testing.T) {
if c.Controller.IAMConfig.InstanceProfile.Arn != "arn:aws:iam::000000000000:instance-profile/myControllerInstanceProfile" {
t.Errorf("existingInstanceProfile: expected=arn:aws:iam::000000000000:instance-profile/myControllerInstanceProfile actual=%s", c.Controller.IAMConfig.InstanceProfile.Arn)
}
if c.NodePools[0].IAMConfig.InstanceProfile.Arn != "arn:aws:iam::000000000000:instance-profile/myInstanceProfile" {
t.Errorf("existingInstanceProfile: expected=arn:aws:iam::000000000000:instance-profile/myInstanceProfile actual=%s", c.NodePools[0].IAMConfig.InstanceProfile.Arn)
}
},
},
},
{
context: "WithWorkerSecurityGroupIds",
configYaml: minimalValidConfigYaml + `
worker:
nodePools:
- name: pool1
securityGroupIds:
- sg-12345678
- sg-abcdefab
- sg-23456789
- sg-bcdefabc
`,
assertConfig: []ConfigTester{
hasDefaultEtcdSettings,
hasDefaultExperimentalFeatures,
func(c *config.Config, t *testing.T) {
p := c.NodePools[0]
expectedWorkerSecurityGroupIds := []string{
`sg-12345678`, `sg-abcdefab`, `sg-23456789`, `sg-bcdefabc`,
}
if !reflect.DeepEqual(p.SecurityGroupIds, expectedWorkerSecurityGroupIds) {
t.Errorf("WorkerSecurityGroupIds didn't match: expected=%v actual=%v", expectedWorkerSecurityGroupIds, p.SecurityGroupIds)
}
expectedWorkerSecurityGroupRefs := []string{
`"sg-12345678"`, `"sg-abcdefab"`, `"sg-23456789"`, `"sg-bcdefabc"`,
`{"Fn::ImportValue" : {"Fn::Sub" : "${NetworkStackName}-WorkerSecurityGroup"}}`,
}
if !reflect.DeepEqual(p.SecurityGroupRefs(), expectedWorkerSecurityGroupRefs) {
t.Errorf("SecurityGroupRefs didn't match: expected=%v actual=%v", expectedWorkerSecurityGroupRefs, p.SecurityGroupRefs())
}
},
},
},
{
context: "WithWorkerAndLBSecurityGroupIds",
configYaml: minimalValidConfigYaml + `
worker:
nodePools:
- name: pool1
securityGroupIds:
- sg-12345678
- sg-abcdefab
loadBalancer:
enabled: true
securityGroupIds:
- sg-23456789
- sg-bcdefabc
`,
assertConfig: []ConfigTester{
hasDefaultEtcdSettings,
func(c *config.Config, t *testing.T) {
p := c.NodePools[0]
expectedWorkerSecurityGroupIds := []string{
`sg-12345678`, `sg-abcdefab`,
}
if !reflect.DeepEqual(p.SecurityGroupIds, expectedWorkerSecurityGroupIds) {
t.Errorf("WorkerSecurityGroupIds didn't match: expected=%v actual=%v", expectedWorkerSecurityGroupIds, p.SecurityGroupIds)
}
expectedLBSecurityGroupIds := []string{
`sg-23456789`, `sg-bcdefabc`,
}
if !reflect.DeepEqual(p.LoadBalancer.SecurityGroupIds, expectedLBSecurityGroupIds) {
t.Errorf("LBSecurityGroupIds didn't match: expected=%v actual=%v", expectedLBSecurityGroupIds, p.LoadBalancer.SecurityGroupIds)
}
expectedWorkerSecurityGroupRefs := []string{
`"sg-23456789"`, `"sg-bcdefabc"`, `"sg-12345678"`, `"sg-abcdefab"`,
`{"Fn::ImportValue" : {"Fn::Sub" : "${NetworkStackName}-WorkerSecurityGroup"}}`,
}
if !reflect.DeepEqual(p.SecurityGroupRefs(), expectedWorkerSecurityGroupRefs) {
t.Errorf("SecurityGroupRefs didn't match: expected=%v actual=%v", expectedWorkerSecurityGroupRefs, p.SecurityGroupRefs())
}
},
},
},
{
context: "WithWorkerAndALBSecurityGroupIds",
configYaml: minimalValidConfigYaml + `
worker:
nodePools:
- name: pool1
securityGroupIds:
- sg-12345678
- sg-abcdefab
targetGroup:
enabled: true
securityGroupIds:
- sg-23456789
- sg-bcdefabc
`,
assertConfig: []ConfigTester{
hasDefaultEtcdSettings,
func(c *config.Config, t *testing.T) {
p := c.NodePools[0]
expectedWorkerSecurityGroupIds := []string{
`sg-12345678`, `sg-abcdefab`,
}
if !reflect.DeepEqual(p.SecurityGroupIds, expectedWorkerSecurityGroupIds) {
t.Errorf("WorkerSecurityGroupIds didn't match: expected=%v actual=%v", expectedWorkerSecurityGroupIds, p.SecurityGroupIds)
}
expectedALBSecurityGroupIds := []string{
`sg-23456789`, `sg-bcdefabc`,
}
if !reflect.DeepEqual(p.TargetGroup.SecurityGroupIds, expectedALBSecurityGroupIds) {
t.Errorf("LBSecurityGroupIds didn't match: expected=%v actual=%v", expectedALBSecurityGroupIds, p.TargetGroup.SecurityGroupIds)
}
expectedWorkerSecurityGroupRefs := []string{
`"sg-23456789"`, `"sg-bcdefabc"`, `"sg-12345678"`, `"sg-abcdefab"`,
`{"Fn::ImportValue" : {"Fn::Sub" : "${NetworkStackName}-WorkerSecurityGroup"}}`,
}
if !reflect.DeepEqual(p.SecurityGroupRefs(), expectedWorkerSecurityGroupRefs) {
t.Errorf("SecurityGroupRefs didn't match: expected=%v actual=%v", expectedWorkerSecurityGroupRefs, p.SecurityGroupRefs())
}
},
},
},
{
context: "WithDedicatedInstanceTenancy",
configYaml: minimalValidConfigYaml + `
workerTenancy: dedicated
controller:
tenancy: dedicated
etcd:
tenancy: dedicated
`,
assertConfig: []ConfigTester{
func(c *config.Config, t *testing.T) {
if c.Etcd.Tenancy != "dedicated" {
t.Errorf("Etcd.Tenancy didn't match: expected=dedicated actual=%s", c.Etcd.Tenancy)
}
if c.WorkerTenancy != "dedicated" {
t.Errorf("WorkerTenancy didn't match: expected=dedicated actual=%s", c.WorkerTenancy)
}
if c.Controller.Tenancy != "dedicated" {
t.Errorf("Controller.Tenancy didn't match: expected=dedicated actual=%s", c.Controller.Tenancy)
}
},
},
},
{
context: "WithControllerNodeLabels",
configYaml: minimalValidConfigYaml + `
controller:
nodeLabels:
kube-aws.coreos.com/role: controller
`,
assertConfig: []ConfigTester{
hasDefaultExperimentalFeatures,
func(c *config.Config, t *testing.T) {
expected := api.NodeLabels{"kube-aws.coreos.com/role": "controller"}
actual := c.NodeLabels()
if !reflect.DeepEqual(expected, actual) {
t.Errorf("unexpected controller node labels: expected=%v, actual=%v", expected, actual)
}
},
},
},
{
context: "WithSSHAccessAllowedSourceCIDRsSpecified",
configYaml: minimalValidConfigYaml + `
sshAccessAllowedSourceCIDRs:
- 1.2.3.255/32
`,
assertConfig: []ConfigTester{
func(c *config.Config, t *testing.T) {
l := len(c.SSHAccessAllowedSourceCIDRs)
if l != 1 {
t.Errorf("unexpected size of sshAccessAllowedSouceCIDRs: %d", l)
t.FailNow()
}
actual := c.SSHAccessAllowedSourceCIDRs[0].String()
expected := "1.2.3.255/32"
if actual != expected {
t.Errorf("unexpected cidr in sshAccessAllowedSourecCIDRs[0]. expected = %s, actual = %s", expected, actual)
}
},
},
},
{
context: "WithSSHAccessAllowedSourceCIDRsOmitted",
configYaml: minimalValidConfigYaml,
assertConfig: []ConfigTester{
func(c *config.Config, t *testing.T) {
l := len(c.SSHAccessAllowedSourceCIDRs)
if l != 1 {
t.Errorf("unexpected size of sshAccessAllowedSouceCIDRs: %d", l)
t.FailNow()
}
actual := c.SSHAccessAllowedSourceCIDRs[0].String()
expected := "0.0.0.0/0"
if actual != expected {
t.Errorf("unexpected cidr in sshAccessAllowedSourecCIDRs[0]. expected = %s, actual = %s", expected, actual)
}
},
},
},
{
context: "WithSSHAccessAllowedSourceCIDRsEmptied",
configYaml: minimalValidConfigYaml + `
sshAccessAllowedSourceCIDRs:
`,
assertConfig: []ConfigTester{
func(c *config.Config, t *testing.T) {
l := len(c.SSHAccessAllowedSourceCIDRs)
if l != 0 {
t.Errorf("unexpected size of sshAccessAllowedSouceCIDRs: %d", l)
t.FailNow()
}
},
},
},
{
context: "WithWorkerWithoutGPUSettings",
configYaml: minimalValidConfigYaml + `
worker:
nodePools:
- name: pool1
`,
assertConfig: []ConfigTester{
func(c *config.Config, t *testing.T) {
enabled := c.NodePools[0].Gpu.Nvidia.Enabled
if enabled {
t.Errorf("unexpected enabled of gpu.nvidia: %v. its default value should be false", enabled)
t.FailNow()
}
},
},
},
{
context: "WithGPUEnabledWorker",
configYaml: minimalValidConfigYaml + `
worker:
nodePools:
- name: pool1
instanceType: p2.xlarge
gpu:
nvidia:
enabled: true
version: "123.45"
`,
assertConfig: []ConfigTester{
func(c *config.Config, t *testing.T) {
enabled := c.NodePools[0].Gpu.Nvidia.Enabled
version := c.NodePools[0].Gpu.Nvidia.Version
if !enabled {
t.Errorf("unexpected enabled value of gpu.nvidia: %v.", enabled)
t.FailNow()
}
if version != "123.45" {
t.Errorf("unexpected version value of gpu.nvidia: %v.", version)
t.FailNow()
}
},
},
},
{
context: "WithGPUDisabledWorker",
configYaml: minimalValidConfigYaml + `
worker:
nodePools:
- name: pool1
gpu:
nvidia:
enabled: false
version: "123.45"
`,
assertConfig: []ConfigTester{
func(c *config.Config, t *testing.T) {
enabled := c.NodePools[0].Gpu.Nvidia.Enabled
version := c.NodePools[0].Gpu.Nvidia.Version
if enabled {
t.Errorf("unexpected enabled value of gpu.nvidia: %v.", enabled)
t.FailNow()
}
if version != "123.45" {
t.Errorf("unexpected version value of gpu.nvidia: %v.", version)
t.FailNow()
}
},
},
},
}
for _, validCase := range validCases {
t.Run(validCase.context, func(t *testing.T) {
configBytes := validCase.configYaml
// TODO Allow including plugins in test data?
plugins := []*api.Plugin{}
providedConfig, err := config.ConfigFromBytes([]byte(configBytes), plugins)
if err != nil {
t.Errorf("failed to parse config %s: %+v", configBytes, err)
t.FailNow()
}
t.Run("AssertConfig", func(t *testing.T) {
for _, assertion := range validCase.assertConfig {
assertion(providedConfig, t)
}
})
helper.WithDummyCredentials(func(dummyAssetsDir string) {
var stackTemplateOptions = root.NewOptions(false, false)
stackTemplateOptions.AssetsDir = dummyAssetsDir
stackTemplateOptions.ControllerTmplFile = "../../builtin/files/userdata/cloud-config-controller"
stackTemplateOptions.WorkerTmplFile = "../../builtin/files/userdata/cloud-config-worker"
stackTemplateOptions.EtcdTmplFile = "../../builtin/files/userdata/cloud-config-etcd"
stackTemplateOptions.RootStackTemplateTmplFile = "../../builtin/files/stack-templates/root.json.tmpl"
stackTemplateOptions.NodePoolStackTemplateTmplFile = "../../builtin/files/stack-templates/node-pool.json.tmpl"
stackTemplateOptions.ControlPlaneStackTemplateTmplFile = "../../builtin/files/stack-templates/control-plane.json.tmpl"
stackTemplateOptions.NetworkStackTemplateTmplFile = "../../builtin/files/stack-templates/network.json.tmpl"
stackTemplateOptions.EtcdStackTemplateTmplFile = "../../builtin/files/stack-templates/etcd.json.tmpl"
cl, err := root.CompileClusterFromConfig(providedConfig, stackTemplateOptions, false)
if err != nil {
t.Errorf("failed to create cluster driver : %v", err)
t.FailNow()
}
cl.Context = &model.Context{
ProvidedEncryptService: helper.DummyEncryptService{},
ProvidedCFInterrogator: helper.DummyCFInterrogator{},
ProvidedEC2Interrogator: helper.DummyEC2Interrogator{},
StackTemplateGetter: helper.DummyStackTemplateGetter{},
}
_, err = cl.EnsureAllAssetsGenerated()
if err != nil {
t.Errorf("%v", err)
t.FailNow()
}
t.Run("AssertCluster", func(t *testing.T) {
for _, assertion := range validCase.assertCluster {
assertion(cl, t)
}
})
t.Run("ValidateTemplates", func(t *testing.T) {
if err := cl.ValidateTemplates(); err != nil {
t.Errorf("failed to render stack template: %v", err)
}
})
if os.Getenv("KUBE_AWS_INTEGRATION_TEST") == "" {
t.Skipf("`export KUBE_AWS_INTEGRATION_TEST=1` is required to run integration tests. Skipping.")
t.SkipNow()
} else {
t.Run("ValidateStack", func(t *testing.T) {
if !s3URIExists {
t.Errorf("failed to obtain value for KUBE_AWS_S3_DIR_URI")
t.FailNow()
}
report, err := cl.ValidateStack()
if err != nil {
t.Errorf("failed to validate stack: %s %v", report, err)
}
})
}
})
})
}
parseErrorCases := []struct {
context string
configYaml string
expectedErrorMessage string
}{
{
context: "WithAPIEndpointLBAPIAccessAllowedSourceCIDRsEmptied",
configYaml: configYamlWithoutExernalDNSName + `
apiEndpoints:
- name: default
dnsName: k8s.example.com
loadBalancer:
apiAccessAllowedSourceCIDRs:
hostedZone:
id: a1b2c4
`,
expectedErrorMessage: `invalid cluster: invalid apiEndpoint "default" at index 0: invalid loadBalancer: either apiAccessAllowedSourceCIDRs or securityGroupIds must be present. Try not to explicitly empty apiAccessAllowedSourceCIDRs or set one or more securityGroupIDs`,
},
{
context: "WithAutoscalingEnabledButClusterAutoscalerIsDefault",
configYaml: minimalValidConfigYaml + `
worker:
nodePools:
- name: pool1
autoscaling:
clusterAutoscaler:
enabled: true
`,
expectedErrorMessage: "Autoscaling with cluster-autoscaler can't be enabled for node pools because " +
"you didn't enabled the cluster-autoscaler addon. Enable it by turning on `addons.clusterAutoscaler.enabled`",
},
{
context: "WithAutoscalingEnabledButClusterAutoscalerIsNot",
configYaml: minimalValidConfigYaml + `
addons:
clusterAutoscaler:
enabled: false
worker:
nodePools:
- name: pool1
autoscaling:
clusterAutoscaler:
enabled: true
`,
expectedErrorMessage: "Autoscaling with cluster-autoscaler can't be enabled for node pools because " +
"you didn't enabled the cluster-autoscaler addon. Enable it by turning on `addons.clusterAutoscaler.enabled`",
},
{
context: "WithClusterAutoscalerEnabledForControlPlane",
configYaml: minimalValidConfigYaml + `
controller:
autoscaling:
clusterAutoscaler:
enabled: true
`,
expectedErrorMessage: "cluster-autoscaler can't be enabled for a control plane because " +
"allowing so for a group of controller nodes spreading over 2 or more availability zones " +
"results in unreliability while scaling nodes out.",
},
{
// See https://github.com/kubernetes-incubator/kube-aws/issues/365
context: "WithClusterNameContainsDots",
configYaml: kubeAwsSettings.withClusterName("my.cluster").minimumValidClusterYaml(),
expectedErrorMessage: "clusterName(=my.cluster) is malformed. It must consist only of alphanumeric characters, colons, or hyphens",
},
{
context: "WithControllerTaint",
configYaml: minimalValidConfigYaml + `
controller:
taints:
- key: foo
value: bar
effect: NoSchedule
`,
expectedErrorMessage: "`controller.taints` must not be specified because tainting controller nodes breaks the cluster",
},
{
context: "WithElasticFileSystemIdInSpecificNodePoolWithManagedSubnets",
configYaml: mainClusterYaml + `
subnets:
- name: managed1
availabilityZone: us-west-1a
instanceCIDR: 10.0.1.0/24
worker:
nodePools:
- name: pool1
subnets:
- name: managed1
elasticFileSystemId: efs-12345
- name: pool2
`,
expectedErrorMessage: "invalid node pool at index 0: elasticFileSystemId cannot be specified for a node pool in managed subnet(s), but was: efs-12345",
},
{
context: "WithEtcdAutomatedDisasterRecoveryRequiresAutomatedSnapshot",
configYaml: minimalValidConfigYaml + `
etcd:
version: 3
snapshot:
automated: false
disasterRecovery:
automated: true
`,
expectedErrorMessage: "`etcd.disasterRecovery.automated` is set to true but `etcd.snapshot.automated` is not - automated disaster recovery requires snapshot to be also automated",
},
{
context: "WithEtcdAutomatedDisasterRecoveryDoesntSupportEtcd2",
configYaml: minimalValidConfigYaml + `
etcd:
version: 2
snapshot:
automated: true
disasterRecovery:
automated: false
`,
expectedErrorMessage: "`etcd.snapshot.automated` is set to true for enabling automated snapshot. However the feature is available only for etcd version 3",
},
{
context: "WithEtcdAutomatedSnapshotDoesntSupportEtcd2",
configYaml: minimalValidConfigYaml + `
etcd:
version: 2
snapshot:
automated: false
disasterRecovery:
automated: true
`,
expectedErrorMessage: "`etcd.disasterRecovery.automated` is set to true for enabling automated disaster recovery. However the feature is available only for etcd version 3",
},
{
context: "WithInvalidNodeDrainTimeout",
configYaml: minimalValidConfigYaml + `
experimental:
nodeDrainer:
enabled: true
drainTimeout: 100
`,
expectedErrorMessage: "Drain timeout must be an integer between 1 and 60, but was 100",
},
{
context: "WithInvalidTaint",
configYaml: minimalValidConfigYaml + `
worker:
nodePools:
- name: pool1
taints:
- key: foo
value: bar
effect: UnknownEffect
`,
expectedErrorMessage: "invalid taint effect: UnknownEffect",
},
{
context: "WithLegacyControllerSettingKeys",
configYaml: minimalValidConfigYaml + `
vpc:
id: vpc-1a2b3c4d
internetGateway:
id: igw-1a2b3c4d
routeTableId: rtb-1a2b3c4d
controllerCount: 2
controllerCreateTimeout: PT10M
controllerInstanceType: t2.large
controllerRootVolumeSize: 101
controllerRootVolumeType: io1
controllerRootVolumeIOPS: 102
controllerTenancy: dedicated
`,
expectedErrorMessage: "unknown keys found: controllerCount, controllerCreateTimeout, controllerInstanceType, controllerRootVolumeIOPS, controllerRootVolumeSize, controllerRootVolumeType, controllerTenancy",
},
{
context: "WithLegacyEtcdSettingKeys",
configYaml: minimalValidConfigYaml + `
vpc:
id: vpc-1a2b3c4d
internetGateway:
id: igw-1a2b3c4d
routeTableId: rtb-1a2b3c4d
etcdCount: 2
etcdTenancy: dedicated
etcdInstanceType: t2.large
etcdRootVolumeSize: 101
etcdRootVolumeType: io1
etcdRootVolumeIOPS: 102
etcdDataVolumeSize: 103
etcdDataVolumeType: io1
etcdDataVolumeIOPS: 104
etcdDataVolumeEncrypted: true
`,
expectedErrorMessage: "unknown keys found: etcdCount, etcdDataVolumeEncrypted, etcdDataVolumeIOPS, etcdDataVolumeSize, etcdDataVolumeType, etcdInstanceType, etcdRootVolumeIOPS, etcdRootVolumeSize, etcdRootVolumeType, etcdTenancy",
},
{
context: "WithAwsNodeLabelEnabledForTooLongClusterNameAndPoolName",
configYaml: minimalValidConfigYaml + `
# clusterName + nodePools[].name should be less than or equal to 25 characters or the launch configuration name
# "mykubeawsclustername-mynestedstackname-1N2C4K3LLBEDZ-WorkersLC-BC2S9P3JG2QD" exceeds the limit of 63 characters
# See https://kubernetes.io/docs/user-guide/labels/#syntax-and-character-set
clusterName: my-cluster1 # 11 characters
worker:
nodePools:
- name: workernodepool1 # 15 characters
awsNodeLabels:
enabled: true
`,
expectedErrorMessage: "awsNodeLabels can't be enabled for node pool because the total number of characters in clusterName(=\"my-cluster1\") + node pool's name(=\"workernodepool1\") exceeds the limit of 25",
},
{
context: "WithAwsNodeLabelEnabledForTooLongClusterName",
configYaml: minimalValidConfigYaml + `
# clusterName should be less than or equal to 21 characters or the launch configuration name
# "mykubeawsclustername-mynestedstackname-1N2C4K3LLBEDZ-ControllersLC-BC2S9P3JG2QD" exceeds the limit of 63 characters
# See https://kubernetes.io/docs/user-guide/labels/#syntax-and-character-set
clusterName: mycluster # 9
experimental:
awsNodeLabels:
enabled: true
`,
expectedErrorMessage: "awsNodeLabels can't be enabled for controllers because the total number of characters in clusterName(=\"mycluster\") exceeds the limit of 8",
},
{
context: "WithMultiAPIEndpointsInvalidLB",
configYaml: kubeAwsSettings.mainClusterYamlWithoutAPIEndpoint() + `
vpc:
id: vpc-1a2b3c4d
internetGateway:
id: igw-1a2b3c4d
subnets:
- name: publicSubnet1
availabilityZone: us-west-1a
instanceCIDR: "10.0.1.0/24"
worker:
apiEndpointName: unversionedPublic
apiEndpoints:
- name: unversionedPublic
dnsName: api.example.com
loadBalancer:
id: elb-internet-facing
private: true
subnets:
- name: publicSubnet1
hostedZone:
id: hostedzone-public
`,
expectedErrorMessage: "invalid apiEndpoint \"unversionedPublic\" at index 0: invalid loadBalancer: type, private, subnets, hostedZone must be omitted when id is specified to reuse an existing ELB",
},
{
context: "WithMultiAPIEndpointsInvalidWorkerAPIEndpointName",
configYaml: kubeAwsSettings.mainClusterYamlWithoutAPIEndpoint() + `
vpc:
id: vpc-1a2b3c4d
internetGateway:
id: igw-1a2b3c4d
subnets:
- name: publicSubnet1
availabilityZone: us-west-1a
instanceCIDR: "10.0.1.0/24"
worker:
# no api endpoint named like that exists!
apiEndpointName: unknownEndpoint
adminAPIEndpointName: versionedPublic
apiEndpoints:
- name: unversionedPublic
dnsName: api.example.com
loadBalancer:
subnets:
- name: publicSubnet1
hostedZone:
id: hostedzone-public
- name: versionedPublic
dnsName: apiv1.example.com
loadBalancer:
subnets:
- name: publicSubnet1
hostedZone:
id: hostedzone-public
`,
expectedErrorMessage: "invalid value for worker.apiEndpointName: no API endpoint named \"unknownEndpoint\" found",
},
{
context: "WithMultiAPIEndpointsInvalidWorkerNodePoolAPIEndpointName",
configYaml: kubeAwsSettings.mainClusterYamlWithoutAPIEndpoint() + `
vpc:
id: vpc-1a2b3c4d
internetGateway:
id: igw-1a2b3c4d
subnets:
- name: publicSubnet1
availabilityZone: us-west-1a
instanceCIDR: "10.0.1.0/24"
worker:
# this one is ok but...
apiEndpointName: versionedPublic
nodePools:
- name: pool1
# this one is ng; no api endpoint named this exists!
apiEndpointName: unknownEndpoint
adminAPIEndpointName: versionedPublic
apiEndpoints:
- name: unversionedPublic
dnsName: api.example.com
loadBalancer:
subnets:
- name: publicSubnet1
hostedZone:
id: hostedzone-public
- name: versionedPublic
dnsName: apiv1.example.com
loadBalancer:
subnets:
- name: publicSubnet1
hostedZone:
id: hostedzone-public
`,
expectedErrorMessage: "invalid node pool at index 0: failed to find an API endpoint named \"unknownEndpoint\": no API endpoint named \"unknownEndpoint\" defined under the `apiEndpoints[]`",
},
{
context: "WithMultiAPIEndpointsMissingDNSName",
configYaml: kubeAwsSettings.mainClusterYamlWithoutAPIEndpoint() + `
vpc:
id: vpc-1a2b3c4d
internetGateway:
id: igw-1a2b3c4d
subnets:
- name: publicSubnet1
availabilityZone: us-west-1a
instanceCIDR: "10.0.1.0/24"
apiEndpoints:
- name: unversionedPublic
dnsName:
loadBalancer:
hostedZone:
id: hostedzone-public
`,
expectedErrorMessage: "invalid apiEndpoint \"unversionedPublic\" at index 0: dnsName must be set",
},
{
context: "WithMultiAPIEndpointsMissingGlobalAPIEndpointName",
configYaml: kubeAwsSettings.mainClusterYamlWithoutAPIEndpoint() + `
vpc:
id: vpc-1a2b3c4d
internetGateway:
id: igw-1a2b3c4d
subnets:
- name: publicSubnet1
availabilityZone: us-west-1a
instanceCIDR: "10.0.1.0/24"
worker:
nodePools:
- name: pool1
# this one is ng; no api endpoint named this exists!
apiEndpointName: unknownEndpoint
- name: pool1
# this one is ng; missing apiEndpointName
adminAPIEndpointName: versionedPublic
apiEndpoints:
- name: unversionedPublic
dnsName: api.example.com
loadBalancer:
subnets:
- name: publicSubnet1
hostedZone:
id: hostedzone-public
- name: versionedPublic
dnsName: apiv1.example.com
loadBalancer:
subnets:
- name: publicSubnet1
hostedZone:
id: hostedzone-public
`,
expectedErrorMessage: "worker.apiEndpointName must not be empty when there're 2 or more API endpoints under the key `apiEndpoints` and one of worker.nodePools[] are missing apiEndpointName",
},
{
context: "WithMultiAPIEndpointsRecordSetImpliedBySubnetsMissingHostedZoneID",
configYaml: kubeAwsSettings.mainClusterYamlWithoutAPIEndpoint() + `
vpc:
id: vpc-1a2b3c4d
internetGateway:
id: igw-1a2b3c4d
subnets:
- name: publicSubnet1
availabilityZone: us-west-1a
instanceCIDR: "10.0.1.0/24"
worker:
apiEndpointName: unversionedPublic
apiEndpoints:
- name: unversionedPublic
dnsName: api.example.com
loadBalancer:
# an internet-facing(which is the default) lb in the public subnet is going to be created with a corresponding record set
# however no hosted zone for the record set is provided!
subnets:
- name: publicSubnet1
# missing hosted zone id here!
`,
expectedErrorMessage: "invalid apiEndpoint \"unversionedPublic\" at index 0: invalid loadBalancer: missing hostedZone.id",
},
{
context: "WithMultiAPIEndpointsRecordSetImpliedByExplicitPublicMissingHostedZoneID",
configYaml: kubeAwsSettings.mainClusterYamlWithoutAPIEndpoint() + `
vpc:
id: vpc-1a2b3c4d
internetGateway:
id: igw-1a2b3c4d
subnets:
- name: publicSubnet1
availabilityZone: us-west-1a
instanceCIDR: "10.0.1.0/24"
worker:
apiEndpointName: unversionedPublic
apiEndpoints:
- name: unversionedPublic
dnsName: api.example.com
loadBalancer:
# an internet-facing lb is going to be created with a corresponding record set
# however no hosted zone for the record set is provided!
private: false
# missing hosted zone id here!
`,
expectedErrorMessage: "invalid apiEndpoint \"unversionedPublic\" at index 0: invalid loadBalancer: missing hostedZone.id",
},
{
context: "WithMultiAPIEndpointsRecordSetImpliedByExplicitPrivateMissingHostedZoneID",
configYaml: kubeAwsSettings.mainClusterYamlWithoutAPIEndpoint() + `
vpc:
id: vpc-1a2b3c4d
internetGateway:
id: igw-1a2b3c4d
subnets:
- name: publicSubnet1
availabilityZone: us-west-1a
instanceCIDR: "10.0.1.0/24"
- name: privateSubnet1
availabilityZone: us-west-1a
instanceCIDR: "10.0.2.0/24"
worker:
apiEndpointName: unversionedPublic
apiEndpoints:
- name: unversionedPublic
dnsName: api.example.com
loadBalancer:
# an internal lb is going to be created with a corresponding record set
# however no hosted zone for the record set is provided!
private: true
# missing hosted zone id here!
`,
expectedErrorMessage: "invalid apiEndpoint \"unversionedPublic\" at index 0: invalid loadBalancer: missing hostedZone.id",
},
{
context: "WithNetworkTopologyAllExistingPrivateSubnetsRejectingExistingIGW",
configYaml: mainClusterYaml + `
vpc:
id: vpc-1a2b3c4d
internetGateway:
id: igw-1a2b3c4d
subnets:
- name: private1
availabilityZone: us-west-1a
id: subnet-1
private: true
controller:
loadBalancer:
private: true
etcd:
subnets:
- name: private1
worker:
nodePools:
- name: pool1
subnets:
- name: private1
`,
expectedErrorMessage: `internet gateway id can't be specified when all the subnets are existing private subnets`,
},
{
context: "WithNetworkTopologyAllExistingPublicSubnetsRejectingExistingIGW",
configYaml: mainClusterYaml + `
vpc:
id: vpc-1a2b3c4d
internetGateway:
id: igw-1a2b3c4d
subnets:
- name: public1
availabilityZone: us-west-1a
id: subnet-1
controller:
loadBalancer:
private: false
etcd:
subnets:
- name: public1
worker:
nodePools:
- name: pool1
subnets:
- name: public1
`,
expectedErrorMessage: `internet gateway id can't be specified when all the public subnets have existing route tables associated. kube-aws doesn't try to modify an exisinting route table to include a route to the internet gateway`,
},
{
context: "WithNetworkTopologyAllManagedPublicSubnetsWithExistingRouteTableRejectingExistingIGW",
configYaml: mainClusterYaml + `
vpc:
id: vpc-1a2b3c4d
internetGateway:
id: igw-1a2b3c4d
subnets:
- name: public1
availabilityZone: us-west-1a
instanceCIDR: 10.0.1.0/24
routeTable:
id: subnet-1
controller:
loadBalancer:
private: false
etcd:
subnets:
- name: public1
worker:
nodePools:
- name: pool1
subnets:
- name: public1
`,
expectedErrorMessage: `internet gateway id can't be specified when all the public subnets have existing route tables associated. kube-aws doesn't try to modify an exisinting route table to include a route to the internet gateway`,
},
{
context: "WithNetworkTopologyAllManagedPublicSubnetsMissingExistingIGW",
configYaml: mainClusterYaml + `
vpc:
id: vpc-1a2b3c4d
#misses this
#internetGateway:
# id: igw-1a2b3c4d
subnets:
- name: public1
availabilityZone: us-west-1a
instanceCIDR: "10.0.1.0/24"
controller:
loadBalancer:
private: false
etcd:
subnets:
- name: public1
worker:
nodePools:
- name: pool1
subnets:
- name: public1
`,
expectedErrorMessage: `internet gateway id can't be omitted when there're one or more managed public subnets in an existing VPC`,
},
{
context: "WithNetworkTopologyAllPreconfiguredPrivateDeprecatedAndThenRemoved",
configYaml: mainClusterYaml + `
vpc:
id: vpc-1a2b3c4d
# This, in combination with mapPublicIPs=false, had been implying that the route table contains a route to a preconfigured NAT gateway
# See https://github.com/kubernetes-incubator/kube-aws/pull/284#issuecomment-276008202
routeTableId: rtb-1a2b3c4d
# This had been implied that all the subnets created by kube-aws should be private
mapPublicIPs: false
subnets:
- availabilityZone: us-west-1a
instanceCIDR: "10.0.1.0/24"
# implies
# private: true
# routeTable
# id: rtb-1a2b3c4d
- availabilityZone: us-west-1b
instanceCIDR: "10.0.2.0/24"
# implies
# private: true
# routeTable
# id: rtb-1a2b3c4d
`,
expectedErrorMessage: "internet gateway id can't be omitted when there're one or more managed public subnets in an existing VPC",
},
{
context: "WithNetworkTopologyAllPreconfiguredPublicDeprecatedAndThenRemoved",
configYaml: mainClusterYaml + `
vpc:
id: vpc-1a2b3c4d
# This, in combination with mapPublicIPs=true, had been implying that the route table contains a route to a preconfigured internet gateway
# See https://github.com/kubernetes-incubator/kube-aws/pull/284#issuecomment-276008202
routeTableId: rtb-1a2b3c4d
# This had been implied that all the subnets created by kube-aws should be public
mapPublicIPs: true
# internetGateway.id should be omitted as we assume that the route table specified by routeTableId already contain a route to one
#internetGateway:
# id:
subnets:
- availabilityZone: us-west-1a
instanceCIDR: "10.0.1.0/24"
# #implies
# private: false
# routeTable
# id: rtb-1a2b3c4d
- availabilityZone: us-west-1b
instanceCIDR: "10.0.2.0/24"
# #implies
# private: false
# routeTable
# id: rtb-1a2b3c4d
`,
expectedErrorMessage: "internet gateway id can't be omitted when there're one or more managed public subnets in an existing VPC",
},
{
context: "WithVpcIdAndVPCCIDRSpecified",
configYaml: minimalValidConfigYaml + `
vpc:
id: vpc-1a2b3c4d
internetGateway:
id: igw-1a2b3c4d
# vpcCIDR (10.1.0.0/16) does not contain instanceCIDR (10.0.1.0/24)
vpcCIDR: "10.1.0.0/16"
`,
},
{
context: "WithRouteTableIdSpecified",
configYaml: minimalValidConfigYaml + `
# vpc.id must be specified if routeTableId is specified
routeTableId: rtb-1a2b3c4d
`,
},
{
context: "WithWorkerSecurityGroupIds",
configYaml: minimalValidConfigYaml + `
worker:
nodePools:
- name: pool1
securityGroupIds:
- sg-12345678
- sg-abcdefab
- sg-23456789
- sg-bcdefabc
- sg-34567890
`,
expectedErrorMessage: "number of user provided security groups must be less than or equal to 4 but was 5",
},
{
context: "WithWorkerAndLBSecurityGroupIds",
configYaml: minimalValidConfigYaml + `
worker:
nodePools:
- name: pool1
securityGroupIds:
- sg-12345678
- sg-abcdefab
- sg-23456789
loadBalancer:
enabled: true
securityGroupIds:
- sg-bcdefabc
- sg-34567890
`,
expectedErrorMessage: "number of user provided security groups must be less than or equal to 4 but was 5",
},
{
context: "WithWorkerAndALBSecurityGroupIds",
configYaml: minimalValidConfigYaml + `
worker:
nodePools:
- name: pool1
securityGroupIds:
- sg-12345678
- sg-abcdefab
- sg-23456789
targetGroup:
enabled: true
securityGroupIds:
- sg-bcdefabc
- sg-34567890
`,
expectedErrorMessage: "number of user provided security groups must be less than or equal to 4 but was 5",
},
{
context: "WithUnknownKeyInRoot",
configYaml: minimalValidConfigYaml + `
foo: bar
`,
expectedErrorMessage: "unknown keys found: foo",
},
{
context: "WithUnknownKeyInController",
configYaml: minimalValidConfigYaml + `
controller:
foo: 1
`,
expectedErrorMessage: "unknown keys found in controller: foo",
},
{
context: "WithUnknownKeyInControllerASG",
configYaml: minimalValidConfigYaml + `
controller:
autoScalingGroup:
foo: 1
`,
expectedErrorMessage: "unknown keys found in controller.autoScalingGroup: foo",
},
{
context: "WithUnknownKeyInEtcd",
configYaml: minimalValidConfigYaml + `
etcd:
foo: 1
`,
expectedErrorMessage: "unknown keys found in etcd: foo",
},
{
context: "WithUnknownKeyInWorkerNodePool",
configYaml: minimalValidConfigYaml + `
worker:
nodePools:
- name: pool1
clusterAutoscaler:
enabled: true
`,
expectedErrorMessage: "unknown keys found in worker.nodePools[0]: clusterAutoscaler",
},
{
context: "WithUnknownKeyInWorkerNodePoolASG",
configYaml: minimalValidConfigYaml + `
worker:
nodePools:
- name: pool1
autoScalingGroup:
foo: 1
`,
expectedErrorMessage: "unknown keys found in worker.nodePools[0].autoScalingGroup: foo",
},
{
context: "WithUnknownKeyInWorkerNodePoolSpotFleet",
configYaml: minimalValidConfigYaml + `
worker:
nodePools:
- name: pool1
spotFleet:
bar: 1
`,
expectedErrorMessage: "unknown keys found in worker.nodePools[0].spotFleet: bar",
},
{
context: "WithUnknownKeyInWorkerNodePoolCA",
configYaml: minimalValidConfigYaml + `
worker:
nodePools:
- name: pool1
autoscaling:
clusterAutoscaler:
baz: 1
`,
expectedErrorMessage: "unknown keys found in worker.nodePools[0].autoscaling.clusterAutoscaler: baz",
},
{
context: "WithUnknownKeyInAddons",
configYaml: minimalValidConfigYaml + `
addons:
blah: 5
`,
expectedErrorMessage: "unknown keys found in addons: blah",
},
{
context: "WithUnknownKeyInReschedulerAddon",
configYaml: minimalValidConfigYaml + `
addons:
rescheduler:
foo: yeah
`,
expectedErrorMessage: "unknown keys found in addons.rescheduler: foo",
},
{
context: "WithUnknownKeyInClusterAutoscalerAddon",
configYaml: minimalValidConfigYaml + `
addons:
clusterAutoscaler:
foo: yeah
`,
expectedErrorMessage: "unknown keys found in addons.clusterAutoscaler: foo",
},
{
context: "WithTooLongControllerIAMRoleName",
configYaml: kubeAwsSettings.withClusterName("kubeaws-it-main").withRegion("ap-northeast-1").minimumValidClusterYaml() + `
controller:
iam:
role:
name: foobarba-foobarba-foobarba-foobarba-foobarba-foobarba
`,
expectedErrorMessage: "IAM role name(=kubeaws-it-main-ap-northeast-1-foobarba-foobarba-foobarba-foobarba-foobarba-foobarba) will be 84 characters long. It exceeds the AWS limit of 64 characters: clusterName(=kubeaws-it-main) + region name(=ap-northeast-1) + managed iam role name(=foobarba-foobarba-foobarba-foobarba-foobarba-foobarba) should be less than or equal to 33",
},
{
context: "WithTooLongWorkerIAMRoleName",
configYaml: kubeAwsSettings.withClusterName("kubeaws-it-main").withRegion("ap-northeast-1").minimumValidClusterYaml() + `
worker:
nodePools:
- name: pool1
iam:
role:
name: foobarba-foobarba-foobarba-foobarba-foobarba-foobarbazzz
`,
expectedErrorMessage: "IAM role name(=kubeaws-it-main-ap-northeast-1-foobarba-foobarba-foobarba-foobarba-foobarba-foobarbazzz) will be 87 characters long. It exceeds the AWS limit of 64 characters: clusterName(=kubeaws-it-main) + region name(=ap-northeast-1) + managed iam role name(=foobarba-foobarba-foobarba-foobarba-foobarba-foobarbazzz) should be less than or equal to 33",
},
{
context: "WithInvalidEtcdInstanceProfileArn",
configYaml: minimalValidConfigYaml + `
etcd:
iam:
instanceProfile:
arn: "badArn"
`,
expectedErrorMessage: "invalid etcd settings: invalid instance profile, your instance profile must match (=arn:aws:iam::YOURACCOUNTID:instance-profile/INSTANCEPROFILENAME), provided (badArn)",
},
{
context: "WithInvalidEtcdManagedPolicyArn",
configYaml: minimalValidConfigYaml + `
etcd:
iam:
role:
managedPolicies:
- arn: "badArn"
`,
expectedErrorMessage: "invalid etcd settings: invalid managed policy arn, your managed policy must match this (=arn:aws:iam::(YOURACCOUNTID|aws):policy/POLICYNAME), provided this (badArn)",
},
{
context: "WithInvalidWorkerInstanceProfileArn",
configYaml: minimalValidConfigYaml + `
worker:
nodePools:
- name: pool1
iam:
instanceProfile:
arn: "badArn"
`,
expectedErrorMessage: "invalid instance profile, your instance profile must match (=arn:aws:iam::YOURACCOUNTID:instance-profile/INSTANCEPROFILENAME), provided (badArn)",
},
{
context: "WithInvalidWorkerManagedPolicyArn",
configYaml: minimalValidConfigYaml + `
worker:
nodePools:
- name: pool1
iam:
role:
managedPolicies:
- arn: "badArn"
`,
expectedErrorMessage: "invalid managed policy arn, your managed policy must match this (=arn:aws:iam::(YOURACCOUNTID|aws):policy/POLICYNAME), provided this (badArn)",
},
{
context: "WithGPUEnabledWorkerButEmptyVersion",
configYaml: minimalValidConfigYaml + `
worker:
nodePools:
- name: pool1
instanceType: p2.xlarge
gpu:
nvidia:
enabled: true
version: ""
`,
expectedErrorMessage: `gpu.nvidia.version must not be empty when gpu.nvidia is enabled.`,
},
{
context: "WithGPUDisabledWorkerButIntallationSupportEnabled",
configYaml: minimalValidConfigYaml + `
worker:
nodePools:
- name: pool1
instanceType: t2.medium
gpu:
nvidia:
enabled: true
version: ""
`,
expectedErrorMessage: `instance type t2.medium doesn't support GPU. You can enable Nvidia driver intallation support only when use [p2 p3 g2 g3] instance family.`,
},
}
for _, invalidCase := range parseErrorCases {
t.Run(invalidCase.context, func(t *testing.T) {
configBytes := invalidCase.configYaml
// TODO Allow including plugins in test data?
plugins := []*api.Plugin{}
providedConfig, err := config.ConfigFromBytes([]byte(configBytes), plugins)
if err == nil {
t.Errorf("expected to fail parsing config %s: %+v: %+v", configBytes, *providedConfig, err)
t.FailNow()
}
errorMsg := fmt.Sprintf("%v", err)
if !strings.Contains(errorMsg, invalidCase.expectedErrorMessage) {
t.Errorf(`expected "%s" to be contained in the error message : %s`, invalidCase.expectedErrorMessage, errorMsg)
}
})
}
}
| [
"\"KUBE_AWS_INTEGRATION_TEST\""
]
| []
| [
"KUBE_AWS_INTEGRATION_TEST"
]
| [] | ["KUBE_AWS_INTEGRATION_TEST"] | go | 1 | 0 | |
main.py | import requests
import os
import json
import sys
def actionColor(status):
"""
Get a action color based on the workflow status.
"""
if status == 'success':
return 'good'
elif status == 'failure':
return 'danger'
return 'warning'
def actionStatus(status):
"""
Get a transformed status based on the workflow status.
"""
if status == 'success':
return 'works'
elif status == 'failure':
return 'failed <!here>'
return 'passed with warnings'
def actionEmoji(status):
"""
Get an emoji based on the workflow status.
"""
if status == 'success':
return ':sunglasses:'
elif status == 'failure':
return ':fire:'
return ':zipper_mouth_face:'
def notify_slack(job_status, notify_when):
url = os.getenv('SLACK_WEBHOOK_URL')
workflow = os.getenv('GITHUB_WORKFLOW')
repo = os.getenv('GITHUB_REPOSITORY')
branch = os.getenv('GITHUB_REF')
commit = os.getenv('GITHUB_SHA')
commit_url = f'https://github.com/{repo}/commit/{commit}'
repo_url = f'https://github.com/{repo}/tree/{branch}'
color = actionColor(job_status)
status_message = actionStatus(job_status)
emoji = actionEmoji(job_status)
message = f'{emoji} {workflow} {status_message} for the test <{repo_url}|{repo}@{branch}> on <{commit_url}|{commit[:7]}>.'
payload = {
'attachments': [
{
'text': message,
'fallback': 'New Github Action Run',
'color': color,
'mrkdwn_in': ['text'],
}
]
}
payload = json.dumps(payload)
headers = {'Content-Type': 'application/json'}
if notify_when is None:
notify_when = 'success,failure,warnings'
if job_status in notify_when and not testing:
requests.post(url, data=payload, headers=headers)
def main():
job_status = os.getenv('INPUT_STATUS')
notify_when = os.getenv('INPUT_NOTIFY_WHEN')
notify_slack(job_status, notify_when)
if __name__ == '__main__':
try:
testing = True if sys.argv[1] == '--test' else False
except IndexError as e:
testing = False
main()
| []
| []
| [
"INPUT_NOTIFY_WHEN",
"GITHUB_REF",
"GITHUB_WORKFLOW",
"GITHUB_REPOSITORY",
"GITHUB_SHA",
"SLACK_WEBHOOK_URL",
"INPUT_STATUS"
]
| [] | ["INPUT_NOTIFY_WHEN", "GITHUB_REF", "GITHUB_WORKFLOW", "GITHUB_REPOSITORY", "GITHUB_SHA", "SLACK_WEBHOOK_URL", "INPUT_STATUS"] | python | 7 | 0 | |
pkg/docker.go | package docker
import (
"bufio"
"encoding/base64"
"encoding/json"
"github.com/docker/docker/api/types"
log "github.com/sirupsen/logrus"
"golang.org/x/net/context"
"os"
"github.com/docker/docker/client"
"github.com/docker/docker/pkg/archive"
"github.com/mitchellh/go-homedir"
"io"
)
// Thank you Justin Wilson for your comment on
// https://forums.docker.com/t/how-to-create-registryauth-for-private-registry-login-credentials/29235/2
func PushImage(imageName string, tag string) {
ctx := context.Background()
cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
if err != nil {
panic(err)
}
registryUrl := os.Getenv("DOCKER_REGISTRY_URL")
if registryUrl == "" {
panic("Registry URL cannot be null")
}
log.WithField("Registry URL", registryUrl).Info("Found Registry URL")
fqin := registryUrl + "/" + imageName
username, password := GetAuthToken(DockerRegistry{
url: registryUrl,
})
authConfig := types.AuthConfig{
Username: *username,
Password: *password,
}
encodedJSON, err := json.Marshal(authConfig)
if err != nil {
panic(err)
}
authStr := base64.URLEncoding.EncodeToString(encodedJSON)
//log.WithField("token", *token).Info("Token:")
reader, err := cli.ImagePush(ctx, fqin, types.ImagePushOptions{
RegistryAuth: authStr,
})
if err != nil {
panic(err)
}
io.Copy(os.Stdout, reader)
_, _ = ctx, cli
}
// Pull a docker image provided as an argument.
func PullImage(imageName string) {
ctx := context.Background()
cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
if err != nil {
panic(err)
}
registryUrl := os.Getenv("DOCKER_REGISTRY_URL")
if registryUrl == "" {
panic("Registry URL cannot be null")
}
log.WithField("Registry URL", registryUrl).Info("Found Registry URL")
fqin := registryUrl + "/" + imageName
username, password := GetAuthToken(DockerRegistry{
url: registryUrl,
})
authConfig := types.AuthConfig{
Username: *username,
Password: *password,
}
encodedJSON, err := json.Marshal(authConfig)
if err != nil {
panic(err)
}
authStr := base64.URLEncoding.EncodeToString(encodedJSON)
reader, err := cli.ImagePull(ctx, fqin, types.ImagePullOptions{
RegistryAuth: authStr,
})
if err != nil {
panic(err)
}
io.Copy(os.Stdout, reader)
_, _ = ctx, cli
}
// Build the Docker Context.
// Essentially, Docker uses Tar file as the context.
func GetContext(filePath string) io.Reader {
// Use homedir.Expand to resolve paths like '~/repos/myrepo'
filePath, _ = homedir.Expand(filePath)
log.Println(filePath)
ctx, _ := archive.TarWithOptions(filePath, &archive.TarOptions{})
return ctx
}
// Building docker image
// Refer to https://stackoverflow.com/questions/38804313/build-docker-image-from-go-code
func BuildImage(dockerfilePath string, dockerTag string) {
cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
if err != nil {
panic(err)
}
registryUrl := os.Getenv("DOCKER_REGISTRY_URL")
if registryUrl == "" {
panic("Registry URL cannot be null")
}
log.WithField("Registry URL", registryUrl).Info("Found Registry URL")
tag := registryUrl + "/" + dockerTag
buildResp, err := cli.ImageBuild(context.Background(), GetContext(dockerfilePath),
types.ImageBuildOptions{
Dockerfile: "Dockerfile", // optional, is the default
SuppressOutput: false,
Tags: []string{tag},
PullParent: true,
})
if err != nil {
log.Fatal(err)
}
writeToLog(buildResp.Body)
log.Println("Build Completed")
}
// https://medium.com/faun/how-to-build-docker-images-on-the-fly-2a1fd696c3fd
// Write LOG to the console.
func writeToLog(reader io.ReadCloser) error {
defer reader.Close()
rd := bufio.NewReader(reader)
for {
n, _, err := rd.ReadLine()
if err != nil && err == io.EOF {
break
} else if err != nil {
return err
}
log.Println(string(n))
}
return nil
}
| [
"\"DOCKER_REGISTRY_URL\"",
"\"DOCKER_REGISTRY_URL\"",
"\"DOCKER_REGISTRY_URL\""
]
| []
| [
"DOCKER_REGISTRY_URL"
]
| [] | ["DOCKER_REGISTRY_URL"] | go | 1 | 0 | |
src/data/fetch_database.py | # import motor.motor_asyncio
from pymongo import MongoClient
from dotenv import load_dotenv
import os
import pandas as pd
from dagster import solid
def load_env_variables():
"""
Function to load environment variables from .env file
:return: database password and database name
"""
load_dotenv()
database_password = os.environ.get('PASSWORD')
database_name = os.environ.get('DATABASE')
return database_password, database_name
def configure_database_collection(collection_name: str):
"""
Configure the database connection, database and collection by passing the collection name
:return: the collection
"""
# load database password and name from environment variables
database_password, database_name = load_env_variables()
MONGO_DETAILS = "mongodb+srv://admin:" + database_password + "@wineestimations.ycvrd.mongodb.net/" + database_name + \
"?retryWrites=true "
client = MongoClient(MONGO_DETAILS)
database = client[database_name]
collection = database.get_collection(collection_name)
return collection
# def estimation_helper(estimation) -> dict:
# return {
# "id": str(estimation["_id"]),
# "wineName": estimation["wineName"],
# "designation": estimation["designation"],
# "vineyard": estimation["vineyard"],
# "cuvee": estimation["cuvee"],
# "bottleType": estimation["bottleType"],
# "color": estimation["color"],
# "vintage": estimation["vintage"],
# "wineSearcherMin": estimation["wineSearcherMin"],
# "wineSearcherMax": estimation["wineSearcherMax"],
# "idealWinePrice": estimation["idealWinePrice"],
# "correctedMin": estimation["correctedMin"],
# "correctedMax": estimation["correctedMax"],
# "weightedMin": estimation["weightedMin"],
# "weightedMax": estimation["weightedMax"],
# "wineLevel": estimation["wineLevel"],
# "label": estimation["label"],
# "cap": estimation["cap"],
# "limpidity": estimation["limpidity"],
# "date": estimation["date"],
# }
@solid
def retrieve_filtered_estimations(collection_name: str, condition: dict):
"""
Retrieve records from mongo database by passing collection name and condition for filtering
:return: list of retrieved records
example: collection_name:'estimations_collection', condition:{"wineLevel": 1, "label": 1, "cap": 1, "limpidity": 1}
"""
collection = configure_database_collection(collection_name)
filtered_estimations = []
for estimation in collection.find(condition):
filtered_estimations.append(estimation)
return filtered_estimations
@solid
def convert_to_csv(collection_name: str, condition: dict, filename: str):
"""
Convert the retrieved data from the database to csv format by passing collection name, condition, and filename in
order to save it in data/raw as a centralised directory for data
"""
records = retrieve_filtered_estimations(collection_name, condition)
records_df = pd.DataFrame.from_records(records)
records_df.to_csv(path_or_buf="../../data/raw/" + filename + ".csv",
index=False)
# convert_to_csv("estimations_collection", {"wineLevel": 1, "label": 1, "cap": 1, "limpidity": 1}, "wine_estimations")
convert_to_csv("add_weight_collection", {"updatedWeight": True, "caps_score": 1, "label_score": 1, "limpidity_score": 1,
"wineLevel_score": 1}, "weighted_wine_estimations")
| []
| []
| [
"DATABASE",
"PASSWORD"
]
| [] | ["DATABASE", "PASSWORD"] | python | 2 | 0 | |
cmd/abapEnvironmentCheckoutBranch_generated.go | // Code generated by piper's step-generator. DO NOT EDIT.
package cmd
import (
"fmt"
"os"
"time"
"github.com/SAP/jenkins-library/pkg/config"
"github.com/SAP/jenkins-library/pkg/log"
"github.com/SAP/jenkins-library/pkg/telemetry"
"github.com/spf13/cobra"
)
type abapEnvironmentCheckoutBranchOptions struct {
Username string `json:"username,omitempty"`
Password string `json:"password,omitempty"`
RepositoryName string `json:"repositoryName,omitempty"`
BranchName string `json:"branchName,omitempty"`
Host string `json:"host,omitempty"`
CfAPIEndpoint string `json:"cfApiEndpoint,omitempty"`
CfOrg string `json:"cfOrg,omitempty"`
CfSpace string `json:"cfSpace,omitempty"`
CfServiceInstance string `json:"cfServiceInstance,omitempty"`
CfServiceKeyName string `json:"cfServiceKeyName,omitempty"`
}
// AbapEnvironmentCheckoutBranchCommand Switches between branches of a git repository on a SAP Cloud Platform ABAP Environment system
func AbapEnvironmentCheckoutBranchCommand() *cobra.Command {
const STEP_NAME = "abapEnvironmentCheckoutBranch"
metadata := abapEnvironmentCheckoutBranchMetadata()
var stepConfig abapEnvironmentCheckoutBranchOptions
var startTime time.Time
var createAbapEnvironmentCheckoutBranchCmd = &cobra.Command{
Use: STEP_NAME,
Short: "Switches between branches of a git repository on a SAP Cloud Platform ABAP Environment system",
Long: `This step switches between branches of a git repository (Software Component) on a SAP Cloud Platform ABAP Environment system.
Please provide either of the following options:
* The host and credentials the Cloud Platform ABAP Environment system itself. The credentials must be configured for the Communication Scenario SAP_COM_0510.
* The Cloud Foundry parameters (API endpoint, organization, space), credentials, the service instance for the ABAP service and the service key for the Communication Scenario SAP_COM_0510.
* Only provide one of those options with the respective credentials. If all values are provided, the direct communication (via host) has priority.`,
PreRunE: func(cmd *cobra.Command, _ []string) error {
startTime = time.Now()
log.SetStepName(STEP_NAME)
log.SetVerbose(GeneralConfig.Verbose)
path, _ := os.Getwd()
fatalHook := &log.FatalHook{CorrelationID: GeneralConfig.CorrelationID, Path: path}
log.RegisterHook(fatalHook)
err := PrepareConfig(cmd, &metadata, STEP_NAME, &stepConfig, config.OpenPiperFile)
if err != nil {
log.SetErrorCategory(log.ErrorConfiguration)
return err
}
log.RegisterSecret(stepConfig.Username)
log.RegisterSecret(stepConfig.Password)
if len(GeneralConfig.HookConfig.SentryConfig.Dsn) > 0 {
sentryHook := log.NewSentryHook(GeneralConfig.HookConfig.SentryConfig.Dsn, GeneralConfig.CorrelationID)
log.RegisterHook(&sentryHook)
}
return nil
},
Run: func(_ *cobra.Command, _ []string) {
telemetryData := telemetry.CustomData{}
telemetryData.ErrorCode = "1"
handler := func() {
telemetryData.Duration = fmt.Sprintf("%v", time.Since(startTime).Milliseconds())
telemetry.Send(&telemetryData)
}
log.DeferExitHandler(handler)
defer handler()
telemetry.Initialize(GeneralConfig.NoTelemetry, STEP_NAME)
abapEnvironmentCheckoutBranch(stepConfig, &telemetryData)
telemetryData.ErrorCode = "0"
log.Entry().Info("SUCCESS")
},
}
addAbapEnvironmentCheckoutBranchFlags(createAbapEnvironmentCheckoutBranchCmd, &stepConfig)
return createAbapEnvironmentCheckoutBranchCmd
}
func addAbapEnvironmentCheckoutBranchFlags(cmd *cobra.Command, stepConfig *abapEnvironmentCheckoutBranchOptions) {
cmd.Flags().StringVar(&stepConfig.Username, "username", os.Getenv("PIPER_username"), "User for either the Cloud Foundry API or the Communication Arrangement for SAP_COM_0510")
cmd.Flags().StringVar(&stepConfig.Password, "password", os.Getenv("PIPER_password"), "Password for either the Cloud Foundry API or the Communication Arrangement for SAP_COM_0510")
cmd.Flags().StringVar(&stepConfig.RepositoryName, "repositoryName", os.Getenv("PIPER_repositoryName"), "Specifies a Repository (Software Component) on the SAP Cloud Platform ABAP Environment system")
cmd.Flags().StringVar(&stepConfig.BranchName, "branchName", os.Getenv("PIPER_branchName"), "Specifies a Branch of a Repository (Software Component) on the SAP Cloud Platform ABAP Environment system")
cmd.Flags().StringVar(&stepConfig.Host, "host", os.Getenv("PIPER_host"), "Specifies the host address of the SAP Cloud Platform ABAP Environment system")
cmd.Flags().StringVar(&stepConfig.CfAPIEndpoint, "cfApiEndpoint", os.Getenv("PIPER_cfApiEndpoint"), "Cloud Foundry API Enpoint")
cmd.Flags().StringVar(&stepConfig.CfOrg, "cfOrg", os.Getenv("PIPER_cfOrg"), "Cloud Foundry target organization")
cmd.Flags().StringVar(&stepConfig.CfSpace, "cfSpace", os.Getenv("PIPER_cfSpace"), "Cloud Foundry target space")
cmd.Flags().StringVar(&stepConfig.CfServiceInstance, "cfServiceInstance", os.Getenv("PIPER_cfServiceInstance"), "Cloud Foundry Service Instance")
cmd.Flags().StringVar(&stepConfig.CfServiceKeyName, "cfServiceKeyName", os.Getenv("PIPER_cfServiceKeyName"), "Cloud Foundry Service Key")
cmd.MarkFlagRequired("username")
cmd.MarkFlagRequired("password")
cmd.MarkFlagRequired("repositoryName")
cmd.MarkFlagRequired("branchName")
}
// retrieve step metadata
func abapEnvironmentCheckoutBranchMetadata() config.StepData {
var theMetaData = config.StepData{
Metadata: config.StepMetadata{
Name: "abapEnvironmentCheckoutBranch",
Aliases: []config.Alias{},
},
Spec: config.StepSpec{
Inputs: config.StepInputs{
Parameters: []config.StepParameters{
{
Name: "username",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{},
},
{
Name: "password",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{},
},
{
Name: "repositoryName",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{},
},
{
Name: "branchName",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{},
},
{
Name: "host",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS", "GENERAL"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
},
{
Name: "cfApiEndpoint",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS", "GENERAL"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{{Name: "cloudFoundry/apiEndpoint"}},
},
{
Name: "cfOrg",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS", "GENERAL"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{{Name: "cloudFoundry/org"}},
},
{
Name: "cfSpace",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS", "GENERAL"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{{Name: "cloudFoundry/space"}},
},
{
Name: "cfServiceInstance",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS", "GENERAL"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{{Name: "cloudFoundry/serviceInstance"}},
},
{
Name: "cfServiceKeyName",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS", "GENERAL"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{{Name: "cloudFoundry/serviceKey"}, {Name: "cloudFoundry/serviceKeyName"}, {Name: "cfServiceKeyName"}},
},
},
},
},
}
return theMetaData
}
| [
"\"PIPER_username\"",
"\"PIPER_password\"",
"\"PIPER_repositoryName\"",
"\"PIPER_branchName\"",
"\"PIPER_host\"",
"\"PIPER_cfApiEndpoint\"",
"\"PIPER_cfOrg\"",
"\"PIPER_cfSpace\"",
"\"PIPER_cfServiceInstance\"",
"\"PIPER_cfServiceKeyName\""
]
| []
| [
"PIPER_repositoryName",
"PIPER_cfSpace",
"PIPER_host",
"PIPER_password",
"PIPER_cfApiEndpoint",
"PIPER_username",
"PIPER_cfServiceInstance",
"PIPER_cfServiceKeyName",
"PIPER_cfOrg",
"PIPER_branchName"
]
| [] | ["PIPER_repositoryName", "PIPER_cfSpace", "PIPER_host", "PIPER_password", "PIPER_cfApiEndpoint", "PIPER_username", "PIPER_cfServiceInstance", "PIPER_cfServiceKeyName", "PIPER_cfOrg", "PIPER_branchName"] | go | 10 | 0 | |
proxy-node-gateway/src/main/java/uk/gov/ida/notification/GatewayApplication.java | package uk.gov.ida.notification;
import io.dropwizard.Application;
import io.dropwizard.configuration.EnvironmentVariableSubstitutor;
import io.dropwizard.configuration.SubstitutingSourceProvider;
import io.dropwizard.setup.Bootstrap;
import io.dropwizard.setup.Environment;
import io.dropwizard.views.ViewBundle;
import net.shibboleth.utilities.java.support.security.SecureRandomIdentifierGenerationStrategy;
import org.eclipse.jetty.server.session.SessionHandler;
import uk.gov.ida.dropwizard.logstash.LogstashBundle;
import uk.gov.ida.notification.configuration.RedisServiceConfiguration;
import uk.gov.ida.notification.exceptions.mappers.ErrorPageExceptionMapper;
import uk.gov.ida.notification.exceptions.mappers.ExceptionToSamlErrorResponseMapper;
import uk.gov.ida.notification.exceptions.mappers.GenericExceptionMapper;
import uk.gov.ida.notification.healthcheck.ProxyNodeHealthCheck;
import uk.gov.ida.notification.proxy.EidasSamlParserProxy;
import uk.gov.ida.notification.proxy.TranslatorProxy;
import uk.gov.ida.notification.resources.EidasAuthnRequestResource;
import uk.gov.ida.notification.resources.HubResponseResource;
import uk.gov.ida.notification.session.storage.InMemoryStorage;
import uk.gov.ida.notification.session.storage.RedisStorage;
import uk.gov.ida.notification.session.storage.SessionStore;
import uk.gov.ida.notification.shared.IstioHeaderMapperFilter;
import uk.gov.ida.notification.shared.ProxyNodeLoggingFilter;
import uk.gov.ida.notification.shared.Urls;
import uk.gov.ida.notification.shared.proxy.VerifyServiceProviderProxy;
import javax.servlet.DispatcherType;
import java.net.URI;
import java.util.EnumSet;
public class GatewayApplication extends Application<GatewayConfiguration> {
@SuppressWarnings("WeakerAccess") // Needed for DropwizardAppRules
public GatewayApplication() {
}
public static void main(final String[] args) throws Exception {
if (args == null || args.length == 0) {
String configFile = System.getenv("CONFIG_FILE");
if (configFile == null) {
throw new RuntimeException("CONFIG_FILE environment variable should be set with path to configuration file");
}
new GatewayApplication().run("server", configFile);
} else {
new GatewayApplication().run(args);
}
}
@Override
public String getName() {
return "EidasProxyNode";
}
@Override
public void initialize(final Bootstrap<GatewayConfiguration> bootstrap) {
// Needed to correctly interpolate environment variables in config file
bootstrap.setConfigurationSourceProvider(
new SubstitutingSourceProvider(bootstrap.getConfigurationSourceProvider(),
new EnvironmentVariableSubstitutor(false)
)
);
bootstrap.addBundle(new ViewBundle<>());
bootstrap.addBundle(new LogstashBundle());
}
@Override
public void run(final GatewayConfiguration configuration,
final Environment environment) {
final ProxyNodeHealthCheck proxyNodeHealthCheck = new ProxyNodeHealthCheck("gateway");
environment.healthChecks().register(proxyNodeHealthCheck.getName(), proxyNodeHealthCheck);
final RedisServiceConfiguration redisService = configuration.getRedisService();
final SessionStore sessionStorage = redisService.isLocal() ?
new InMemoryStorage() : new RedisStorage(redisService);
final SamlFormViewBuilder samlFormViewBuilder = new SamlFormViewBuilder();
final TranslatorProxy translatorProxy = configuration
.getTranslatorServiceConfiguration()
.buildTranslatorProxy(environment);
registerProviders(environment);
registerResources(configuration, environment, samlFormViewBuilder, translatorProxy, sessionStorage);
registerExceptionMappers(environment, samlFormViewBuilder, translatorProxy, sessionStorage, configuration.getErrorPageRedirectUrl());
}
private void registerProviders(Environment environment) {
SessionHandler sessionHandler = new SessionHandler();
sessionHandler.setSessionCookie("gateway-session");
environment.servlets().setSessionHandler(sessionHandler);
setRequestServletFilter(environment);
setResponseServletFilter(environment);
environment.jersey().register(IstioHeaderMapperFilter.class);
environment.jersey().register(ProxyNodeLoggingFilter.class);
}
private void setRequestServletFilter(Environment environment) {
JourneyIdGeneratingServletFilter requestFilter = new JourneyIdGeneratingServletFilter(new SecureRandomIdentifierGenerationStrategy());
environment.servlets()
.addFilter(requestFilter.getClass().getSimpleName(), requestFilter)
.addMappingForUrlPatterns(
EnumSet.of(DispatcherType.REQUEST),
true,
Urls.GatewayUrls.GATEWAY_ROOT + Urls.GatewayUrls.GATEWAY_EIDAS_AUTHN_REQUEST_POST_PATH,
Urls.GatewayUrls.GATEWAY_ROOT + Urls.GatewayUrls.GATEWAY_EIDAS_AUTHN_REQUEST_REDIRECT_PATH);
}
private void setResponseServletFilter(Environment environment) {
JourneyIdHubResponseServletFilter responseFilter = new JourneyIdHubResponseServletFilter();
environment.servlets()
.addFilter(responseFilter.getClass().getSimpleName(), responseFilter)
.addMappingForUrlPatterns(
EnumSet.of(DispatcherType.REQUEST),
true,
Urls.GatewayUrls.GATEWAY_HUB_RESPONSE_RESOURCE);
}
private void registerExceptionMappers(
Environment environment,
SamlFormViewBuilder samlFormViewBuilder,
TranslatorProxy translatorProxy,
SessionStore sessionStore,
URI errorPageRedirectUrl) {
environment.jersey().register(new ExceptionToSamlErrorResponseMapper(samlFormViewBuilder, translatorProxy, sessionStore));
environment.jersey().register(new ErrorPageExceptionMapper(errorPageRedirectUrl));
environment.jersey().register(new GenericExceptionMapper(errorPageRedirectUrl));
}
private void registerResources(
GatewayConfiguration configuration,
Environment environment,
SamlFormViewBuilder samlFormViewBuilder,
TranslatorProxy translatorProxy,
SessionStore sessionStorage) {
EidasSamlParserProxy espProxy = configuration
.getEidasSamlParserServiceConfiguration()
.buildEidasSamlParserService(environment);
VerifyServiceProviderProxy vspProxy = configuration
.getVerifyServiceProviderConfiguration()
.buildVerifyServiceProviderProxy(environment);
environment.lifecycle().manage(sessionStorage);
environment.jersey().register(new EidasAuthnRequestResource(
espProxy,
vspProxy,
samlFormViewBuilder,
sessionStorage));
environment.jersey().register(new HubResponseResource(
samlFormViewBuilder,
translatorProxy,
sessionStorage
));
}
}
| [
"\"CONFIG_FILE\""
]
| []
| [
"CONFIG_FILE"
]
| [] | ["CONFIG_FILE"] | java | 1 | 0 | |
manage.py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "foodalliance.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
serverless_functions/aws/java/cycle_translator/sentence_translation/src/main/java/sentence_translation/Handler.java | package sentence_translation;
import com.amazonaws.auth.AWSStaticCredentialsProvider;
import com.amazonaws.auth.DefaultAWSCredentialsProviderChain;
import com.amazonaws.services.lambda.runtime.Context;
import com.amazonaws.services.lambda.runtime.RequestStreamHandler;
import com.amazonaws.services.translate.AmazonTranslate;
import com.amazonaws.services.translate.AmazonTranslateClient;
import com.amazonaws.services.translate.model.TranslateTextRequest;
import com.google.gson.Gson;
import com.google.gson.GsonBuilder;
import com.google.gson.JsonSyntaxException;
import org.jetbrains.annotations.NotNull;
import javax.json.Json;
import javax.json.JsonObjectBuilder;
import java.io.*;
import java.nio.charset.StandardCharsets;
import java.util.HashMap;
@SuppressWarnings("rawtypes")
public class Handler implements RequestStreamHandler {
@Override
public void handleRequest(InputStream inputStream, OutputStream outputStream, Context context) {
// request reading
HashMap event;
BufferedReader reader = new BufferedReader(new InputStreamReader(inputStream, StandardCharsets.UTF_8));
Gson gson = new GsonBuilder().setPrettyPrinting().create();
try {
event = gson.fromJson(reader, HashMap.class);
} catch (JsonSyntaxException ignored) {
event = new HashMap();
}
// extract string to recognize from request
String sentence;
if (event.containsKey("sentence")) {
sentence = (String)event.get("sentence");
} else {
returnResult(outputStream, null, null);
return;
}
String languageCode;
if (event.containsKey("language_code")) {
languageCode = (String)event.get("language_code");
} else {
returnResult(outputStream, null, null);
return;
}
// translate string and return result
returnResult(outputStream, sentence, translateText(sentence, languageCode));
}
private static String translateText(String text, String languageCode) {
// prepare request
AmazonTranslate client = AmazonTranslateClient.builder()
.withCredentials(new AWSStaticCredentialsProvider(
DefaultAWSCredentialsProviderChain.getInstance().getCredentials()))
.withRegion(System.getenv("AWS_REGION"))
.build();
TranslateTextRequest request = new TranslateTextRequest()
.withText(text)
.withSourceLanguageCode(languageCode)
.withTargetLanguageCode("en");
// return result
return client.translateText(request).getTranslatedText();
}
private static void returnResult(@NotNull OutputStream outputStream, String originalSentence, String sentence) {
// response creation
String result;
if (originalSentence == null || sentence == null) {
result = "Error";
} else {
JsonObjectBuilder job = Json.createObjectBuilder();
job.add("original_sentence", originalSentence);
job.add("sentence", sentence);
result = job.build().toString();
}
// response writing
PrintWriter writer = new PrintWriter(new BufferedWriter(new OutputStreamWriter(outputStream,
StandardCharsets.UTF_8)));
writer.write(result);
writer.close();
}
}
| [
"\"AWS_REGION\""
]
| []
| [
"AWS_REGION"
]
| [] | ["AWS_REGION"] | java | 1 | 0 | |
rqalpha/utils/config.py | # -*- coding: utf-8 -*-
# 版权所有 2019 深圳米筐科技有限公司(下称“米筐科技”)
#
# 除非遵守当前许可,否则不得使用本软件。
#
# * 非商业用途(非商业用途指个人出于非商业目的使用本软件,或者高校、研究所等非营利机构出于教育、科研等目的使用本软件):
# 遵守 Apache License 2.0(下称“Apache 2.0 许可”),您可以在以下位置获得 Apache 2.0 许可的副本:http://www.apache.org/licenses/LICENSE-2.0。
# 除非法律有要求或以书面形式达成协议,否则本软件分发时需保持当前许可“原样”不变,且不得附加任何条件。
#
# * 商业用途(商业用途指个人出于任何商业目的使用本软件,或者法人或其他组织出于任何目的使用本软件):
# 未经米筐科技授权,任何个人不得出于任何商业目的使用本软件(包括但不限于向第三方提供、销售、出租、出借、转让本软件、本软件的衍生产品、引用或借鉴了本软件功能或源代码的产品或服务),任何法人或其他组织不得出于任何目的使用本软件,否则米筐科技有权追究相应的知识产权侵权责任。
# 在此前提下,对本软件的使用同样需要遵守 Apache 2.0 许可,Apache 2.0 许可与本许可冲突之处,以本许可为准。
# 详细的授权流程,请联系 [email protected] 获取。
import os
import locale
import codecs
import pandas as pd
import yaml
import simplejson as json
import six
from rqalpha.const import RUN_TYPE, PERSIST_MODE, MARKET, COMMISSION_TYPE
from rqalpha.utils import RqAttrDict, logger
from rqalpha.utils.i18n import gettext as _, localization
from rqalpha.utils.dict_func import deep_update
from rqalpha.utils.py2 import to_utf8
from rqalpha.utils.logger import system_log
from rqalpha.mod.utils import mod_config_value_parse
rqalpha_path = "~/.rqalpha"
def load_yaml(path):
with codecs.open(path, encoding='utf-8') as f:
return yaml.safe_load(f)
def load_json(path):
with codecs.open(path, encoding='utf-8') as f:
return json.loads(f.read())
default_config_path = os.path.join(os.path.dirname(__file__), '..', 'config.yml')
default_mod_config_path = os.path.join(os.path.dirname(__file__), '..', 'mod_config.yml')
def user_mod_conf_path():
return os.path.join(os.path.expanduser(rqalpha_path), 'mod_config.yml')
def get_mod_conf():
base = load_yaml(default_mod_config_path)
user_mod_conf = os.path.join(os.path.expanduser(rqalpha_path), 'mod_config.yml')
user = load_yaml(user_mod_conf) if os.path.exists(user_mod_conf) else {}
deep_update(user, base)
return base
def load_config_from_folder(folder):
folder = os.path.expanduser(folder)
path = os.path.join(folder, 'config.yml')
base = load_yaml(path) if os.path.exists(path) else {}
mod_path = os.path.join(folder, 'mod_config.yml')
mod = load_yaml(mod_path) if os.path.exists(mod_path) else {}
deep_update(mod, base)
return base
def default_config():
base = load_yaml(default_config_path)
base['base']['source_code'] = None
mod = load_yaml(default_mod_config_path)
deep_update(mod, base)
return base
def user_config():
return load_config_from_folder(rqalpha_path)
def project_config():
return load_config_from_folder(os.getcwd())
def code_config(config, source_code=None):
try:
if source_code is None:
with codecs.open(config["base"]["strategy_file"], encoding="utf-8") as f:
source_code = f.read()
# FIXME: hardcode for parametric mod
def noop(*args, **kwargs):
pass
scope = {'define_parameter': noop}
code = compile(source_code, config["base"]["strategy_file"], 'exec')
six.exec_(code, scope)
return scope.get('__config__', {})
except Exception as e:
system_log.error(_(u"in parse_user_config, exception: {e}").format(e=e))
return {}
def dump_config(config_path, config, dumper=yaml.Dumper):
dirname = os.path.dirname(config_path)
if not os.path.exists(dirname):
os.makedirs(dirname)
with codecs.open(config_path, mode='w', encoding='utf-8') as stream:
stream.write(to_utf8(yaml.dump(config, Dumper=dumper)))
def set_locale(lc):
# FIXME: It should depends on the system and locale config
try:
locale.setlocale(locale.LC_ALL, "en_US.UTF-8")
locale.setlocale(locale.LC_CTYPE, "en_US.UTF-8")
os.environ['TZ'] = 'Asia/Shanghai'
except Exception as e:
if os.name != 'nt':
raise
localization.set_locale([lc])
def parse_config(config_args, config_path=None, click_type=False, source_code=None, user_funcs=None):
conf = default_config()
deep_update(user_config(), conf)
deep_update(project_config(), conf)
if config_path is not None:
deep_update(load_yaml(config_path), conf)
if 'base__strategy_file' in config_args and config_args['base__strategy_file']:
# FIXME: ugly, we need this to get code
conf['base']['strategy_file'] = config_args['base__strategy_file']
elif ('base' in config_args and 'strategy_file' in config_args['base'] and
config_args['base']['strategy_file']):
conf['base']['strategy_file'] = config_args['base']['strategy_file']
if user_funcs is None:
for k, v in code_config(conf, source_code).items():
if k in conf['whitelist']:
deep_update(v, conf[k])
mod_configs = config_args.pop('mod_configs', [])
for k, v in mod_configs:
key = 'mod__{}'.format(k.replace('.', '__'))
config_args[key] = mod_config_value_parse(v)
if click_type:
for k, v in config_args.items():
if v is None:
continue
if k == 'base__accounts' and not v:
continue
key_path = k.split('__')
sub_dict = conf
for p in key_path[:-1]:
if p not in sub_dict:
sub_dict[p] = {}
sub_dict = sub_dict[p]
sub_dict[key_path[-1]] = v
else:
deep_update(config_args, conf)
config = RqAttrDict(conf)
set_locale(config.extra.locale)
def _to_date(v):
return pd.Timestamp(v).date()
config.base.start_date = _to_date(config.base.start_date)
config.base.end_date = _to_date(config.base.end_date)
if config.base.data_bundle_path is None:
config.base.data_bundle_path = os.path.join(os.path.expanduser(rqalpha_path), "bundle")
config.base.run_type = parse_run_type(config.base.run_type)
config.base.accounts = parse_accounts(config.base.accounts)
config.base.init_positions = parse_init_positions(config.base.init_positions)
config.base.persist_mode = parse_persist_mode(config.base.persist_mode)
config.base.market = parse_market(config.base.market)
config.base.future_info = parse_future_info(config.base.future_info)
if config.extra.context_vars:
if isinstance(config.extra.context_vars, six.string_types):
config.extra.context_vars = json.loads(to_utf8(config.extra.context_vars))
if config.base.frequency == "1d":
logger.DATETIME_FORMAT = "%Y-%m-%d"
return config
def parse_future_info(future_info):
new_info = {}
for underlying_symbol, info in future_info.items():
try:
underlying_symbol = underlying_symbol.upper()
except AttributeError:
raise RuntimeError(_("Invalid future info: underlying_symbol {} is illegal.".format(underlying_symbol)))
for field, value in info.items():
if field in (
"open_commission_ratio", "close_commission_ratio", "close_commission_today_ratio"
):
new_info.setdefault(underlying_symbol, {})[field] = float(value)
elif field == "commission_type":
if isinstance(value, six.string_types) and value.upper() == "BY_MONEY":
new_info.setdefault(underlying_symbol, {})[field] = COMMISSION_TYPE.BY_MONEY
elif isinstance(value, six.string_types) and value.upper() == "BY_VOLUME":
new_info.setdefault(underlying_symbol, {})[field] = COMMISSION_TYPE.BY_VOLUME
elif isinstance(value, COMMISSION_TYPE):
new_info.setdefault(underlying_symbol, {})[field] = value
else:
raise RuntimeError(_(
"Invalid future info: commission_type is suppose to be BY_MONEY or BY_VOLUME"
))
else:
raise RuntimeError(_("Invalid future info: field {} is not valid".format(field)))
return new_info
def parse_accounts(accounts):
a = {}
if isinstance(accounts, tuple):
accounts = {account_type: starting_cash for account_type, starting_cash in accounts}
for account_type, starting_cash in accounts.items():
if starting_cash is None:
continue
starting_cash = float(starting_cash)
a[account_type.upper()] = starting_cash
# if len(a) == 0:
# raise RuntimeError(_(u"None account type has been selected."))
return a
def parse_init_positions(positions):
# --position 000001.XSHE:1000,IF1701:-1
result = []
if not isinstance(positions, str):
return result
for s in positions.split(','):
try:
order_book_id, quantity = s.split(':')
except ValueError:
raise RuntimeError(_(u"invalid init position {}, should be in format 'order_book_id:quantity'").format(s))
try:
result.append((order_book_id, float(quantity)))
except ValueError:
raise RuntimeError(_(u"invalid quantity for instrument {order_book_id}: {quantity}").format(
order_book_id=order_book_id, quantity=quantity))
return result
def parse_run_type(rt_str):
assert isinstance(rt_str, six.string_types)
mapping = {
"b": RUN_TYPE.BACKTEST,
"p": RUN_TYPE.PAPER_TRADING,
"r": RUN_TYPE.LIVE_TRADING,
}
try:
return mapping[rt_str]
except KeyError:
raise RuntimeError(_(u"unknown run type: {}").format(rt_str))
def parse_persist_mode(persist_mode):
assert isinstance(persist_mode, six.string_types)
mapping = {
"real_time": PERSIST_MODE.REAL_TIME,
"on_crash": PERSIST_MODE.ON_CRASH,
"on_normal_exit": PERSIST_MODE.ON_NORMAL_EXIT,
}
try:
return mapping[persist_mode]
except KeyError:
raise RuntimeError(_(u"unknown persist mode: {}").format(persist_mode))
def parse_market(market):
assert isinstance(market, six.string_types)
mapping = {
"cn": MARKET.CN,
"hk": MARKET.HK
}
try:
return mapping[market.lower()]
except KeyError:
raise RuntimeError(_(u"unknown market type: {}".format(market)))
| []
| []
| [
"TZ"
]
| [] | ["TZ"] | python | 1 | 0 | |
fadderanmalan/admin/forms.py | from django.forms import ValidationError, ModelForm
class JobAdminForm(ModelForm):
def clean(self):
cleaned_data = super(JobAdminForm, self).clean()
if cleaned_data.get("hidden_until") > cleaned_data.get("hidden_after"):
raise ValidationError(dict(
hidden_until="'Hidden until' has to be before 'Hidden after'.",
hidden_after="'Hidden until' has to be before 'Hidden after'.",
))
if cleaned_data.get("locked_until") > cleaned_data.get("locked_after"):
raise ValidationError(dict(
locked_until="'Locked until' has to be before 'Locked after'.",
locked_after="'Locked until' has to be before 'Locked after'.",
))
if cleaned_data.get("start_date") == cleaned_data.get("end_date"):
if cleaned_data.get("start_time") > cleaned_data.get("end_time"):
raise ValidationError(dict(
start_time="'Start time' has to be before 'End time'.",
end_time="'Start time' has to be before 'End time'.",
))
elif cleaned_data.get("start_date") > cleaned_data.get("end_date"):
raise ValidationError(dict(
start_date="'Start date' has to be before 'End date'.",
end_date="'Start date' has to be before 'End date'.",
))
return cleaned_data
| []
| []
| []
| [] | [] | python | null | null | null |
vacation_rental/vacation_rental/settings.py | """
Django settings for vacation_rental project.
Generated by 'django-admin startproject' using Django 2.0.1.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get('SECRET_KEY', '')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = bool(os.environ.get('DEBUG', ''))
ALLOWED_HOSTS = ['localhost', 'testserver']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'vacation_rental'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'vacation_rental.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'vacation_rental.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': os.environ.get('DB_NAME'),
'USER': os.environ.get('DB_USER', ''),
'PASSWORD': os.environ.get('DB_PASSWORD', ''),
'HOST': os.environ.get('DB_HOST', 'localhost'),
'PORT': '5432',
'TEST': {
'NAME': os.environ.get('TEST_DB')
}
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
| []
| []
| [
"DB_PASSWORD",
"DB_HOST",
"DB_NAME",
"TEST_DB",
"SECRET_KEY",
"DEBUG",
"DB_USER"
]
| [] | ["DB_PASSWORD", "DB_HOST", "DB_NAME", "TEST_DB", "SECRET_KEY", "DEBUG", "DB_USER"] | python | 7 | 0 | |
program/object-detection-onnx-py/detect.py | #
# Copyright (c) 2018 cTuning foundation.
# See CK COPYRIGHT.txt for copyright details.
#
# See CK LICENSE for licensing details.
# See CK COPYRIGHT for copyright details.
#
# import sys
import os
import json
import numpy as np
import time
import onnxruntime as rt
from coco_helper import (load_preprocessed_batch, image_filenames, original_w_h,
class_labels, num_classes, bg_class_offset, class_map,
MODEL_DATA_LAYOUT, MODEL_COLOURS_BGR, MODEL_INPUT_DATA_TYPE, MODEL_DATA_TYPE, MODEL_USE_DLA,
MODEL_IMAGE_WIDTH, MODEL_IMAGE_HEIGHT, MODEL_IMAGE_CHANNELS,
IMAGE_DIR, IMAGE_LIST_FILE, MODEL_NORMALIZE_DATA, SUBTRACT_MEAN, GIVEN_CHANNEL_MEANS, BATCH_SIZE, BATCH_COUNT)
## Model properties:
#
MODEL_PATH = os.environ['CK_ENV_ONNX_MODEL_ONNX_FILEPATH']
INPUT_LAYER_NAME = os.environ['CK_ENV_ONNX_MODEL_INPUT_LAYER_NAME']
OUTPUT_LAYER_BBOXES = os.environ['CK_ENV_ONNX_MODEL_OUTPUT_LAYER_BBOXES']
OUTPUT_LAYER_LABELS = os.environ['CK_ENV_ONNX_MODEL_OUTPUT_LAYER_LABELS']
OUTPUT_LAYER_SCORES = os.environ['CK_ENV_ONNX_MODEL_OUTPUT_LAYER_SCORES']
# Program parameters
SCORE_THRESHOLD = float(os.getenv('CK_DETECTION_THRESHOLD', 0.0))
CPU_THREADS = int(os.getenv('CK_HOST_CPU_NUMBER_OF_PROCESSORS',0))
## Writing the results out:
#
CUR_DIR = os.getcwd()
DETECTIONS_OUT_DIR = os.path.join(CUR_DIR, os.environ['CK_DETECTIONS_OUT_DIR'])
ANNOTATIONS_OUT_DIR = os.path.join(CUR_DIR, os.environ['CK_ANNOTATIONS_OUT_DIR'])
FULL_REPORT = os.getenv('CK_SILENT_MODE', '0') in ('NO', 'no', 'OFF', 'off', '0')
TIMER_JSON = 'tmp-ck-timer.json'
ENV_JSON = 'env.json'
def main():
global INPUT_LAYER_NAME
OPENME = {}
setup_time_begin = time.time()
# Load the ONNX model from file
sess_options = rt.SessionOptions()
# sess_options.session_log_verbosity_level = 0
if CPU_THREADS > 0:
sess_options.enable_sequential_execution = False
sess_options.session_thread_pool_size = CPU_THREADS
graph_load_time_begin = time.time()
sess = rt.InferenceSession(MODEL_PATH, sess_options)
graph_load_time = time.time() - graph_load_time_begin
input_layer_names = [x.name for x in sess.get_inputs()] # FIXME: check that INPUT_LAYER_NAME belongs to this list
INPUT_LAYER_NAME = INPUT_LAYER_NAME or input_layer_names[0]
output_layer_names = [x.name for x in sess.get_outputs()] # FIXME: check that OUTPUT_LAYER_NAME belongs to this list
model_input_shape = sess.get_inputs()[0].shape
model_input_type = sess.get_inputs()[0].type
model_input_type = np.uint8 if model_input_type == 'tensor(uint8)' else np.float32 # FIXME: there must be a more humane way!
# a more portable way to detect the number of classes
for output in sess.get_outputs():
if output.name == OUTPUT_LAYER_LABELS:
model_classes = output.shape[1]
print("Data layout: {}".format(MODEL_DATA_LAYOUT) )
print("Input layers: {}".format(input_layer_names))
print("Output layers: {}".format(output_layer_names))
print("Input layer name: " + INPUT_LAYER_NAME)
print("Expected input shape: {}".format(model_input_shape))
print("Expected input type: {}".format(model_input_type))
print("Output layer names: " + ", ".join([OUTPUT_LAYER_BBOXES, OUTPUT_LAYER_LABELS, OUTPUT_LAYER_SCORES]))
print("Data normalization: {}".format(MODEL_NORMALIZE_DATA))
print("Background/unlabelled classes to skip: {}".format(bg_class_offset))
print("")
try:
expected_batch_size = int(model_input_shape[0])
if BATCH_SIZE!=expected_batch_size:
raise Exception("expected_batch_size={}, desired CK_BATCH_SIZE={}, they do not match - exiting.".format(expected_batch_size, BATCH_SIZE))
except ValueError:
max_batch_size = None
setup_time = time.time() - setup_time_begin
# Run batched mode
test_time_begin = time.time()
total_load_time = 0
next_batch_offset = 0
total_inference_time = 0
first_inference_time = 0
images_loaded = 0
for batch_index in range(BATCH_COUNT):
batch_number = batch_index+1
begin_time = time.time()
current_batch_offset = next_batch_offset
batch_data, next_batch_offset = load_preprocessed_batch(image_filenames, current_batch_offset)
load_time = time.time() - begin_time
total_load_time += load_time
images_loaded += BATCH_SIZE
# Detect batch
begin_time = time.time()
run_options = rt.RunOptions()
# run_options.run_log_verbosity_level = 0
batch_results = sess.run([OUTPUT_LAYER_BBOXES, OUTPUT_LAYER_LABELS, OUTPUT_LAYER_SCORES], {INPUT_LAYER_NAME: batch_data}, run_options)
inference_time = time.time() - begin_time
print("[batch {} of {}] loading={:.2f} ms, inference={:.2f} ms".format(
batch_number, BATCH_COUNT, load_time*1000, inference_time*1000))
total_inference_time += inference_time
# Remember first batch prediction time
if batch_index == 0:
first_inference_time = inference_time
# Process results
for index_in_batch in range(BATCH_SIZE):
global_image_index = current_batch_offset + index_in_batch
width_orig, height_orig = original_w_h[global_image_index]
filename_orig = image_filenames[global_image_index]
detections_filename = os.path.splitext(filename_orig)[0] + '.txt'
detections_filepath = os.path.join(DETECTIONS_OUT_DIR, detections_filename)
with open(detections_filepath, 'w') as f:
f.write('{:d} {:d}\n'.format(width_orig, height_orig))
for i in range(len(batch_results[2][index_in_batch])):
confidence = batch_results[2][index_in_batch][i]
if confidence > SCORE_THRESHOLD:
class_number = int(batch_results[1][index_in_batch][i])
if class_map:
class_number = class_map[class_number]
else:
class_number = class_number
box = batch_results[0][index_in_batch][i]
x1 = box[0] * width_orig
y1 = box[1] * height_orig
x2 = box[2] * width_orig
y2 = box[3] * height_orig
class_label = class_labels[class_number - bg_class_offset]
f.write('{:.2f} {:.2f} {:.2f} {:.2f} {:.3f} {} {}\n'.format(x1,
y1,
x2,
y2,
confidence,
class_number,
class_label
)
)
test_time = time.time() - test_time_begin
if BATCH_COUNT > 1:
avg_inference_time = (total_inference_time - first_inference_time) / (images_loaded - BATCH_SIZE)
else:
avg_inference_time = total_inference_time / images_loaded
avg_load_time = total_load_time / images_loaded
# Save processed images ids list to be able to run
# evaluation without repeating detections (CK_SKIP_DETECTION=YES)
# with open(IMAGE_LIST_FILE, 'w') as f:
# f.write(json.dumps(processed_image_ids))
OPENME['setup_time_s'] = setup_time
OPENME['test_time_s'] = test_time
OPENME['load_images_time_total_s'] = total_load_time
OPENME['load_images_time_avg_s'] = avg_load_time
OPENME['prediction_time_total_s'] = total_inference_time
OPENME['prediction_time_avg_s'] = avg_inference_time
OPENME['avg_time_ms'] = avg_inference_time * 1000
OPENME['avg_fps'] = 1.0 / avg_inference_time if avg_inference_time > 0 else 0
run_time_state = {"run_time_state": OPENME}
with open(TIMER_JSON, 'w') as o:
json.dump(run_time_state, o, indent=2, sort_keys=True)
if __name__ == '__main__':
main()
| []
| []
| [
"CK_ENV_ONNX_MODEL_OUTPUT_LAYER_LABELS",
"CK_DETECTION_THRESHOLD",
"CK_ENV_ONNX_MODEL_OUTPUT_LAYER_SCORES",
"CK_ENV_ONNX_MODEL_ONNX_FILEPATH",
"CK_SILENT_MODE",
"CK_ENV_ONNX_MODEL_OUTPUT_LAYER_BBOXES",
"CK_HOST_CPU_NUMBER_OF_PROCESSORS",
"CK_ENV_ONNX_MODEL_INPUT_LAYER_NAME",
"CK_DETECTIONS_OUT_DIR",
"CK_ANNOTATIONS_OUT_DIR"
]
| [] | ["CK_ENV_ONNX_MODEL_OUTPUT_LAYER_LABELS", "CK_DETECTION_THRESHOLD", "CK_ENV_ONNX_MODEL_OUTPUT_LAYER_SCORES", "CK_ENV_ONNX_MODEL_ONNX_FILEPATH", "CK_SILENT_MODE", "CK_ENV_ONNX_MODEL_OUTPUT_LAYER_BBOXES", "CK_HOST_CPU_NUMBER_OF_PROCESSORS", "CK_ENV_ONNX_MODEL_INPUT_LAYER_NAME", "CK_DETECTIONS_OUT_DIR", "CK_ANNOTATIONS_OUT_DIR"] | python | 10 | 0 | |
terraform/ibm/vendor/github.com/IBM-Cloud/terraform-provider-ibm/ibm/resource_ibm_is_vpn_gateway.go | // Copyright IBM Corp. 2017, 2021 All Rights Reserved.
// Licensed under the Mozilla Public License v2.0
package ibm
import (
"context"
"fmt"
"log"
"os"
"time"
"github.com/IBM/vpc-go-sdk/vpcv1"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
)
const (
isVPNGatewayName = "name"
isVPNGatewayResourceGroup = "resource_group"
isVPNGatewayMode = "mode"
isVPNGatewayCRN = "crn"
isVPNGatewayTags = "tags"
isVPNGatewaySubnet = "subnet"
isVPNGatewayStatus = "status"
isVPNGatewayDeleting = "deleting"
isVPNGatewayDeleted = "done"
isVPNGatewayProvisioning = "provisioning"
isVPNGatewayProvisioningDone = "done"
isVPNGatewayPublicIPAddress = "public_ip_address"
isVPNGatewayMembers = "members"
isVPNGatewayCreatedAt = "created_at"
isVPNGatewayPublicIPAddress2 = "public_ip_address2"
isVPNGatewayPrivateIPAddress = "private_ip_address"
isVPNGatewayPrivateIPAddress2 = "private_ip_address2"
)
func resourceIBMISVPNGateway() *schema.Resource {
return &schema.Resource{
Create: resourceIBMISVPNGatewayCreate,
Read: resourceIBMISVPNGatewayRead,
Update: resourceIBMISVPNGatewayUpdate,
Delete: resourceIBMISVPNGatewayDelete,
Exists: resourceIBMISVPNGatewayExists,
Importer: &schema.ResourceImporter{},
Timeouts: &schema.ResourceTimeout{
Create: schema.DefaultTimeout(10 * time.Minute),
Delete: schema.DefaultTimeout(10 * time.Minute),
},
CustomizeDiff: customdiff.Sequence(
func(_ context.Context, diff *schema.ResourceDiff, v interface{}) error {
return resourceTagsCustomizeDiff(diff)
},
),
Schema: map[string]*schema.Schema{
isVPNGatewayName: {
Type: schema.TypeString,
Required: true,
ForceNew: false,
ValidateFunc: InvokeValidator("ibm_is_route", isVPNGatewayName),
Description: "VPN Gateway instance name",
},
isVPNGatewaySubnet: {
Type: schema.TypeString,
Required: true,
ForceNew: true,
Description: "VPNGateway subnet info",
},
isVPNGatewayResourceGroup: {
Type: schema.TypeString,
ForceNew: true,
Optional: true,
Computed: true,
Description: "The resource group for this VPN gateway",
},
isVPNGatewayStatus: {
Type: schema.TypeString,
Computed: true,
Description: "The status of the VPN gateway",
},
isVPNGatewayPublicIPAddress: {
Type: schema.TypeString,
Computed: true,
Description: "The public IP address assigned to the VPN gateway member.",
},
isVPNGatewayPublicIPAddress2: {
Type: schema.TypeString,
Computed: true,
Description: "The second public IP address assigned to the VPN gateway member.",
},
isVPNGatewayPrivateIPAddress: {
Type: schema.TypeString,
Computed: true,
Description: "The Private IP address assigned to the VPN gateway member.",
},
isVPNGatewayPrivateIPAddress2: {
Type: schema.TypeString,
Computed: true,
Description: "The Second Private IP address assigned to the VPN gateway member.",
},
isVPNGatewayTags: {
Type: schema.TypeSet,
Optional: true,
Computed: true,
Elem: &schema.Schema{Type: schema.TypeString, ValidateFunc: InvokeValidator("ibm_is_vpn_gateway", "tag")},
Set: resourceIBMVPCHash,
Description: "VPN Gateway tags list",
},
ResourceControllerURL: {
Type: schema.TypeString,
Computed: true,
Description: "The URL of the IBM Cloud dashboard that can be used to explore and view details about this instance",
},
ResourceName: {
Type: schema.TypeString,
Computed: true,
Description: "The name of the resource",
},
ResourceCRN: {
Type: schema.TypeString,
Computed: true,
Description: "The crn of the resource",
},
isVPNGatewayCRN: {
Type: schema.TypeString,
Computed: true,
Description: "The crn of the resource",
},
ResourceStatus: {
Type: schema.TypeString,
Computed: true,
Description: "The status of the resource",
},
ResourceGroupName: {
Type: schema.TypeString,
Computed: true,
Description: "The resource group name in which resource is provisioned",
},
isVPNGatewayCreatedAt: {
Type: schema.TypeString,
Computed: true,
Description: "Created Time of the VPN Gateway",
},
isVPNGatewayMode: {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
Default: "route",
ValidateFunc: InvokeValidator("ibm_is_vpn_gateway", isVPNGatewayMode),
Description: "mode in VPN gateway(route/policy)",
},
isVPNGatewayMembers: {
Type: schema.TypeList,
Computed: true,
Description: "Collection of VPN gateway members",
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"address": {
Type: schema.TypeString,
Computed: true,
Description: "The public IP address assigned to the VPN gateway member",
},
"private_address": {
Type: schema.TypeString,
Computed: true,
Description: "The private IP address assigned to the VPN gateway member",
},
"role": {
Type: schema.TypeString,
Computed: true,
Description: "The high availability role assigned to the VPN gateway member",
},
"status": {
Type: schema.TypeString,
Computed: true,
Description: "The status of the VPN gateway member",
},
},
},
},
},
}
}
func resourceIBMISVPNGatewayValidator() *ResourceValidator {
modeCheckTypes := "route,policy"
validateSchema := make([]ValidateSchema, 0)
validateSchema = append(validateSchema,
ValidateSchema{
Identifier: isVPNGatewayName,
ValidateFunctionIdentifier: ValidateRegexpLen,
Type: TypeString,
Required: true,
Regexp: `^([a-z]|[a-z][-a-z0-9]*[a-z0-9])$`,
MinValueLength: 1,
MaxValueLength: 63})
validateSchema = append(validateSchema,
ValidateSchema{
Identifier: isVPNGatewayMode,
ValidateFunctionIdentifier: ValidateAllowedStringValue,
Type: TypeString,
Required: false,
AllowedValues: modeCheckTypes})
validateSchema = append(validateSchema,
ValidateSchema{
Identifier: "tag",
ValidateFunctionIdentifier: ValidateRegexpLen,
Type: TypeString,
Optional: true,
Regexp: `^[A-Za-z0-9:_ .-]+$`,
MinValueLength: 1,
MaxValueLength: 128})
ibmISVPNGatewayResourceValidator := ResourceValidator{ResourceName: "ibm_is_vpn_gateway", Schema: validateSchema}
return &ibmISVPNGatewayResourceValidator
}
func resourceIBMISVPNGatewayCreate(d *schema.ResourceData, meta interface{}) error {
log.Printf("[DEBUG] VPNGateway create")
name := d.Get(isVPNGatewayName).(string)
subnetID := d.Get(isVPNGatewaySubnet).(string)
mode := d.Get(isVPNGatewayMode).(string)
err := vpngwCreate(d, meta, name, subnetID, mode)
if err != nil {
return err
}
return resourceIBMISVPNGatewayRead(d, meta)
}
func vpngwCreate(d *schema.ResourceData, meta interface{}, name, subnetID, mode string) error {
sess, err := vpcClient(meta)
if err != nil {
return err
}
vpnGatewayPrototype := &vpcv1.VPNGatewayPrototype{
Subnet: &vpcv1.SubnetIdentity{
ID: &subnetID,
},
Name: &name,
Mode: &mode,
}
options := &vpcv1.CreateVPNGatewayOptions{
VPNGatewayPrototype: vpnGatewayPrototype,
}
if rgrp, ok := d.GetOk(isVPNGatewayResourceGroup); ok {
rg := rgrp.(string)
vpnGatewayPrototype.ResourceGroup = &vpcv1.ResourceGroupIdentity{
ID: &rg,
}
}
vpnGatewayIntf, response, err := sess.CreateVPNGateway(options)
if err != nil {
return fmt.Errorf("[DEBUG] Create vpc VPN Gateway %s\n%s", err, response)
}
vpnGateway := vpnGatewayIntf.(*vpcv1.VPNGateway)
_, err = isWaitForVpnGatewayAvailable(sess, *vpnGateway.ID, d.Timeout(schema.TimeoutCreate))
if err != nil {
return err
}
d.SetId(*vpnGateway.ID)
log.Printf("[INFO] VPNGateway : %s", *vpnGateway.ID)
v := os.Getenv("IC_ENV_TAGS")
if _, ok := d.GetOk(isVPNGatewayTags); ok || v != "" {
oldList, newList := d.GetChange(isVPNGatewayTags)
err = UpdateTagsUsingCRN(oldList, newList, meta, *vpnGateway.CRN)
if err != nil {
log.Printf(
"Error on create of resource vpc VPN Gateway (%s) tags: %s", d.Id(), err)
}
}
return nil
}
func isWaitForVpnGatewayAvailable(vpnGateway *vpcv1.VpcV1, id string, timeout time.Duration) (interface{}, error) {
log.Printf("Waiting for vpn gateway (%s) to be available.", id)
stateConf := &resource.StateChangeConf{
Pending: []string{"retry", isVPNGatewayProvisioning},
Target: []string{isVPNGatewayProvisioningDone, ""},
Refresh: isVpnGatewayRefreshFunc(vpnGateway, id),
Timeout: timeout,
Delay: 10 * time.Second,
MinTimeout: 10 * time.Second,
}
return stateConf.WaitForState()
}
func isVpnGatewayRefreshFunc(vpnGateway *vpcv1.VpcV1, id string) resource.StateRefreshFunc {
return func() (interface{}, string, error) {
getVpnGatewayOptions := &vpcv1.GetVPNGatewayOptions{
ID: &id,
}
vpnGatewayIntf, response, err := vpnGateway.GetVPNGateway(getVpnGatewayOptions)
if err != nil {
return nil, "", fmt.Errorf("Error Getting Vpn Gateway: %s\n%s", err, response)
}
vpnGateway := vpnGatewayIntf.(*vpcv1.VPNGateway)
if *vpnGateway.Status == "available" || *vpnGateway.Status == "failed" || *vpnGateway.Status == "running" {
return vpnGateway, isVPNGatewayProvisioningDone, nil
}
return vpnGateway, isVPNGatewayProvisioning, nil
}
}
func resourceIBMISVPNGatewayRead(d *schema.ResourceData, meta interface{}) error {
id := d.Id()
err := vpngwGet(d, meta, id)
if err != nil {
return err
}
return nil
}
func vpngwGet(d *schema.ResourceData, meta interface{}, id string) error {
sess, err := vpcClient(meta)
if err != nil {
return err
}
getVpnGatewayOptions := &vpcv1.GetVPNGatewayOptions{
ID: &id,
}
vpnGatewayIntf, response, err := sess.GetVPNGateway(getVpnGatewayOptions)
if err != nil {
if response != nil && response.StatusCode == 404 {
d.SetId("")
return nil
}
return fmt.Errorf("Error Getting Vpn Gateway (%s): %s\n%s", id, err, response)
}
vpnGateway := vpnGatewayIntf.(*vpcv1.VPNGateway)
d.Set(isVPNGatewayName, *vpnGateway.Name)
d.Set(isVPNGatewaySubnet, *vpnGateway.Subnet.ID)
d.Set(isVPNGatewayStatus, *vpnGateway.Status)
members := []vpcv1.VPNGatewayMember{}
for _, member := range vpnGateway.Members {
members = append(members, member)
}
if len(members) > 0 {
d.Set(isVPNGatewayPublicIPAddress, *members[0].PublicIP.Address)
if members[0].PrivateIP != nil && members[0].PrivateIP.Address != nil {
d.Set(isVPNGatewayPrivateIPAddress, *members[0].PrivateIP.Address)
}
}
if len(members) > 1 {
d.Set(isVPNGatewayPublicIPAddress2, *members[1].PublicIP.Address)
if members[1].PrivateIP != nil && members[1].PrivateIP.Address != nil {
d.Set(isVPNGatewayPrivateIPAddress2, *members[1].PrivateIP.Address)
}
}
tags, err := GetTagsUsingCRN(meta, *vpnGateway.CRN)
if err != nil {
log.Printf(
"Error on get of resource vpc VPN Gateway (%s) tags: %s", d.Id(), err)
}
d.Set(isVPNGatewayTags, tags)
controller, err := getBaseController(meta)
if err != nil {
return err
}
d.Set(ResourceControllerURL, controller+"/vpc/network/vpngateways")
d.Set(ResourceName, *vpnGateway.Name)
d.Set(ResourceCRN, *vpnGateway.CRN)
d.Set(isVPNGatewayCRN, *vpnGateway.CRN)
d.Set(ResourceStatus, *vpnGateway.Status)
if vpnGateway.ResourceGroup != nil {
d.Set(ResourceGroupName, *vpnGateway.ResourceGroup.Name)
d.Set(isVPNGatewayResourceGroup, *vpnGateway.ResourceGroup.ID)
}
d.Set(isVPNGatewayMode, *vpnGateway.Mode)
if vpnGateway.Members != nil {
vpcMembersIpsList := make([]map[string]interface{}, 0)
for _, memberIP := range vpnGateway.Members {
currentMemberIP := map[string]interface{}{}
if memberIP.PublicIP != nil {
currentMemberIP["address"] = *memberIP.PublicIP.Address
currentMemberIP["role"] = *memberIP.Role
currentMemberIP["status"] = *memberIP.Status
vpcMembersIpsList = append(vpcMembersIpsList, currentMemberIP)
}
if memberIP.PrivateIP != nil {
currentMemberIP["private_address"] = *memberIP.PrivateIP.Address
}
}
d.Set(isVPNGatewayMembers, vpcMembersIpsList)
}
if vpnGateway.CreatedAt != nil {
d.Set(isVPNGatewayCreatedAt, (vpnGateway.CreatedAt).String())
}
return nil
}
func resourceIBMISVPNGatewayUpdate(d *schema.ResourceData, meta interface{}) error {
id := d.Id()
name := ""
hasChanged := false
if d.HasChange(isVPNGatewayName) {
name = d.Get(isVPNGatewayName).(string)
hasChanged = true
}
err := vpngwUpdate(d, meta, id, name, hasChanged)
if err != nil {
return err
}
return resourceIBMISVPNGatewayRead(d, meta)
}
func vpngwUpdate(d *schema.ResourceData, meta interface{}, id, name string, hasChanged bool) error {
sess, err := vpcClient(meta)
if err != nil {
return err
}
if d.HasChange(isVPNGatewayTags) {
getVpnGatewayOptions := &vpcv1.GetVPNGatewayOptions{
ID: &id,
}
vpnGatewayIntf, response, err := sess.GetVPNGateway(getVpnGatewayOptions)
if err != nil {
return fmt.Errorf("Error getting Volume : %s\n%s", err, response)
}
vpnGateway := vpnGatewayIntf.(*vpcv1.VPNGateway)
oldList, newList := d.GetChange(isVPNGatewayTags)
err = UpdateTagsUsingCRN(oldList, newList, meta, *vpnGateway.CRN)
if err != nil {
log.Printf(
"Error on update of resource vpc Vpn Gateway (%s) tags: %s", id, err)
}
}
if hasChanged {
options := &vpcv1.UpdateVPNGatewayOptions{
ID: &id,
}
vpnGatewayPatchModel := &vpcv1.VPNGatewayPatch{
Name: &name,
}
vpnGatewayPatch, err := vpnGatewayPatchModel.AsPatch()
if err != nil {
return fmt.Errorf("Error calling asPatch for VPNGatewayPatch: %s", err)
}
options.VPNGatewayPatch = vpnGatewayPatch
_, response, err := sess.UpdateVPNGateway(options)
if err != nil {
return fmt.Errorf("Error updating vpc Vpn Gateway: %s\n%s", err, response)
}
}
return nil
}
func resourceIBMISVPNGatewayDelete(d *schema.ResourceData, meta interface{}) error {
id := d.Id()
err := vpngwDelete(d, meta, id)
if err != nil {
return err
}
return nil
}
func vpngwDelete(d *schema.ResourceData, meta interface{}, id string) error {
sess, err := vpcClient(meta)
if err != nil {
return err
}
getVpnGatewayOptions := &vpcv1.GetVPNGatewayOptions{
ID: &id,
}
_, response, err := sess.GetVPNGateway(getVpnGatewayOptions)
if err != nil {
if response != nil && response.StatusCode == 404 {
return nil
}
return fmt.Errorf("Error Getting Vpn Gateway (%s): %s\n%s", id, err, response)
}
options := &vpcv1.DeleteVPNGatewayOptions{
ID: &id,
}
response, err = sess.DeleteVPNGateway(options)
if err != nil {
return fmt.Errorf("Error Deleting Vpn Gateway : %s\n%s", err, response)
}
_, err = isWaitForVpnGatewayDeleted(sess, id, d.Timeout(schema.TimeoutDelete))
if err != nil {
return err
}
d.SetId("")
return nil
}
func isWaitForVpnGatewayDeleted(vpnGateway *vpcv1.VpcV1, id string, timeout time.Duration) (interface{}, error) {
log.Printf("Waiting for VPNGateway (%s) to be deleted.", id)
stateConf := &resource.StateChangeConf{
Pending: []string{"retry", isVPNGatewayDeleting},
Target: []string{isVPNGatewayDeleted, ""},
Refresh: isVpnGatewayDeleteRefreshFunc(vpnGateway, id),
Timeout: timeout,
Delay: 10 * time.Second,
MinTimeout: 10 * time.Second,
}
return stateConf.WaitForState()
}
func isVpnGatewayDeleteRefreshFunc(vpnGateway *vpcv1.VpcV1, id string) resource.StateRefreshFunc {
return func() (interface{}, string, error) {
getVpnGatewayOptions := &vpcv1.GetVPNGatewayOptions{
ID: &id,
}
vpngw, response, err := vpnGateway.GetVPNGateway(getVpnGatewayOptions)
if err != nil {
if response != nil && response.StatusCode == 404 {
return "", isVPNGatewayDeleted, nil
}
return "", "", fmt.Errorf("Error Getting Vpn Gateway: %s\n%s", err, response)
}
return vpngw, isVPNGatewayDeleting, err
}
}
func resourceIBMISVPNGatewayExists(d *schema.ResourceData, meta interface{}) (bool, error) {
id := d.Id()
exists, err := vpngwExists(d, meta, id)
return exists, err
}
func vpngwExists(d *schema.ResourceData, meta interface{}, id string) (bool, error) {
sess, err := vpcClient(meta)
if err != nil {
return false, err
}
getVpnGatewayOptions := &vpcv1.GetVPNGatewayOptions{
ID: &id,
}
_, response, err := sess.GetVPNGateway(getVpnGatewayOptions)
if err != nil {
if response != nil && response.StatusCode == 404 {
return false, nil
}
return false, fmt.Errorf("Error getting Vpn Gatewa: %s\n%s", err, response)
}
return true, nil
}
| [
"\"IC_ENV_TAGS\""
]
| []
| [
"IC_ENV_TAGS"
]
| [] | ["IC_ENV_TAGS"] | go | 1 | 0 | |
directlinkv1/direct_link_v1_integration_test.go | /**
* (C) Copyright IBM Corp. 2021.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package directlinkv1_test
/*
How to run this test:
go test -v ./directlinkv1
*/
import (
"bytes"
"io/ioutil"
"os"
"strconv"
"time"
"github.com/IBM/go-sdk-core/v4/core"
"github.com/IBM/networking-go-sdk/directlinkv1"
"github.com/joho/godotenv"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var configLoaded = false
func shouldSkipTest() {
if !configLoaded {
Skip("External configuration is not available, skipping...")
}
}
var _ = Describe(`DirectLinkV1`, func() {
err := godotenv.Load("../directlink.env")
It(`Successfully loading .env file`, func() {
if err == nil {
serviceURL := os.Getenv("SERVICE_URL")
if serviceURL != "" {
configLoaded = true
}
}
if !configLoaded {
Skip("External configuration could not be loaded, skipping...")
}
})
authenticator := &core.IamAuthenticator{
ApiKey: os.Getenv("IAMAPIKEY"),
URL: "https://iam.test.cloud.ibm.com/identity/token",
}
version := time.Now().Format("2006-01-02")
serviceURL := os.Getenv("SERVICE_URL")
options := &directlinkv1.DirectLinkV1Options{
ServiceName: "DirectLinkV1_Mocking",
Authenticator: authenticator,
URL: serviceURL,
Version: &version,
}
service, err := directlinkv1.NewDirectLinkV1UsingExternalConfig(options)
It(`Successfully created DirectLinkV1 service instance`, func() {
shouldSkipTest()
Expect(err).To(BeNil())
})
Describe("Direct Link Gateways", func() {
timestamp := time.Now().Unix()
gatewayName := "GO-INT-SDK-" + strconv.FormatInt(timestamp, 10)
updatedGatewayName := "GO-INT-SDK-PATCH-" + strconv.FormatInt(timestamp, 10)
bgpAsn := int64(64999)
crossConnectRouter := "LAB-xcr01.dal09"
global := true
locationName := os.Getenv("LOCATION_NAME")
speedMbps := int64(1000)
metered := false
carrierName := "carrier1"
customerName := "customer1"
gatewayType := "dedicated"
invalidGatewayId := "000000000000000000000000000000000000"
Context("Get non existing gateway", func() {
getGatewayOptions := service.NewGetGatewayOptions(invalidGatewayId)
It(`Returns the http response with error code 404`, func() {
shouldSkipTest()
result, detailedResponse, err := service.GetGateway(getGatewayOptions)
Expect(result).To(BeNil())
Expect(err).NotTo(BeNil())
Expect(err.Error()).To(Equal("Cannot find Gateway"))
Expect(detailedResponse.StatusCode).To(Equal(404))
})
})
Context("Create gateway", func() {
gateway, _ := service.NewGatewayTemplateGatewayTypeDedicatedTemplate(bgpAsn, global, metered, gatewayName, speedMbps, gatewayType, carrierName, crossConnectRouter, customerName, locationName)
createGatewayOptions := service.NewCreateGatewayOptions(gateway)
It("Fails when Invalid BGP is provided", func() {
shouldSkipTest()
gateway, _ := service.NewGatewayTemplateGatewayTypeDedicatedTemplate(65500, global, metered, gatewayName, speedMbps, gatewayType, carrierName, crossConnectRouter, customerName, locationName)
createGatewayOptions := service.NewCreateGatewayOptions(gateway)
result, detailedResponse, err := service.CreateGateway(createGatewayOptions)
Expect(result).To(BeNil())
Expect(err).NotTo(BeNil())
Expect(err.Error()).To(Equal("BGP AS Number is invalid."))
Expect(detailedResponse.StatusCode).To(Equal(400))
})
It("Fails when invalid speed_mbps is provided", func() {
shouldSkipTest()
gateway, _ := service.NewGatewayTemplateGatewayTypeDedicatedTemplate(bgpAsn, global, metered, gatewayName, 10000000000, gatewayType, carrierName, crossConnectRouter, customerName, locationName)
createGatewayOptions := service.NewCreateGatewayOptions(gateway)
result, detailedResponse, err := service.CreateGateway(createGatewayOptions)
Expect(result).To(BeNil())
Expect(err).NotTo(BeNil())
Expect(err.Error()).To(Equal("Cannot find Location with provided 'linkSpeed' and 'OfferingType'."))
Expect(detailedResponse.StatusCode).To(Equal(400))
})
It("Fails when invalid locations is provided", func() {
shouldSkipTest()
gateway, _ := service.NewGatewayTemplateGatewayTypeDedicatedTemplate(bgpAsn, global, metered, gatewayName, speedMbps, gatewayType, carrierName, crossConnectRouter, customerName, "InvalidCity")
createGatewayOptions := service.NewCreateGatewayOptions(gateway)
result, detailedResponse, err := service.CreateGateway(createGatewayOptions)
Expect(result).To(BeNil())
Expect(err).NotTo(BeNil())
Expect(err.Error()).To(Equal("Cannot find Location with provided 'linkSpeed' and 'OfferingType'."))
Expect(detailedResponse.StatusCode).To(Equal(400))
})
It("Successfully Creates a gateway", func() {
shouldSkipTest()
result, detailedResponse, err := service.CreateGateway(createGatewayOptions)
Expect(err).To(BeNil())
Expect(detailedResponse.StatusCode).To(Equal(201))
os.Setenv("GATEWAY_ID", *result.ID)
Expect(*result.Name).To(Equal(gatewayName))
Expect(*result.BgpAsn).To(Equal(bgpAsn))
Expect(*result.Global).To(Equal(global))
Expect(*result.Metered).To(Equal(metered))
Expect(*result.SpeedMbps).To(Equal(speedMbps))
Expect(*result.Type).To(Equal(gatewayType))
Expect(*result.CrossConnectRouter).To(Equal(crossConnectRouter))
Expect(*result.LocationName).To(Equal(locationName))
Expect(*result.LocationDisplayName).NotTo(Equal(""))
Expect(*result.BgpCerCidr).NotTo(BeEmpty())
Expect(*result.BgpIbmCidr).NotTo(Equal(""))
Expect(*result.BgpIbmAsn).NotTo(Equal(""))
Expect(*result.BgpStatus).To(Equal("idle"))
Expect(*result.CreatedAt).NotTo(Equal(""))
Expect(*result.Crn).To(HavePrefix("crn:v1"))
Expect(*result.LinkStatus).To(Equal("down"))
Expect(*result.OperationalStatus).To(Equal("awaiting_loa"))
Expect(*result.ResourceGroup.ID).NotTo(Equal(""))
})
It("Successfully fetches the created Gateway", func() {
shouldSkipTest()
gatewayId := os.Getenv("GATEWAY_ID")
getGatewayOptions := service.NewGetGatewayOptions(gatewayId)
result, detailedResponse, err := service.GetGateway(getGatewayOptions)
Expect(err).To(BeNil())
Expect(detailedResponse.StatusCode).To(Equal(200))
Expect(*result.ID).To(Equal(gatewayId))
Expect(*result.Name).To(Equal(gatewayName))
Expect(*result.BgpAsn).To(Equal(bgpAsn))
Expect(*result.Global).To(Equal(global))
Expect(*result.Metered).To(Equal(metered))
Expect(*result.SpeedMbps).To(Equal(speedMbps))
Expect(*result.Type).To(Equal(gatewayType))
Expect(*result.CrossConnectRouter).To(Equal(crossConnectRouter))
Expect(*result.LocationName).To(Equal(locationName))
Expect(*result.LocationDisplayName).NotTo(Equal(""))
Expect(*result.BgpCerCidr).NotTo(BeEmpty())
Expect(*result.BgpIbmCidr).NotTo(Equal(""))
Expect(*result.BgpIbmAsn).NotTo(Equal(""))
Expect(*result.BgpStatus).To(Equal("idle"))
Expect(*result.CreatedAt).NotTo(Equal(""))
Expect(*result.Crn).To(HavePrefix("crn:v1"))
Expect(*result.LinkStatus).To(Equal("down"))
Expect(*result.OperationalStatus).To(Equal("awaiting_loa"))
Expect(*result.ResourceGroup.ID).NotTo(Equal(""))
})
It("Throws an Error when creating a gateway with same name", func() {
shouldSkipTest()
result, detailedResponse, err := service.CreateGateway(createGatewayOptions)
Expect(result).To(BeNil())
Expect(err).NotTo(BeNil())
Expect(err.Error()).To(Equal("A gateway with the same name already exists"))
Expect(detailedResponse.StatusCode).To(Equal(409))
})
})
Context("Successfully fetch the gateways list", func() {
listGatewaysOptions := service.NewListGatewaysOptions()
It(`Successfully list all gateways`, func() {
shouldSkipTest()
result, detailedResponse, err := service.ListGateways(listGatewaysOptions)
Expect(err).To(BeNil())
Expect(detailedResponse.StatusCode).To(Equal(200))
gateways := result.Gateways
Expect(len(gateways)).Should(BeNumerically(">", 0))
found := false
// find the created gateway and verify the attributes
gatewayId := os.Getenv("GATEWAY_ID")
for _, gw := range gateways {
if *gw.ID == gatewayId {
found = true
Expect(*gw.Name).To(Equal(gatewayName))
Expect(*gw.BgpAsn).To(Equal(bgpAsn))
Expect(*gw.Global).To(Equal(global))
Expect(*gw.Metered).To(Equal(metered))
Expect(*gw.SpeedMbps).To(Equal(speedMbps))
Expect(*gw.Type).To(Equal(gatewayType))
Expect(*gw.CrossConnectRouter).To(Equal(crossConnectRouter))
Expect(*gw.LocationName).To(Equal(locationName))
Expect(*gw.LocationDisplayName).NotTo(Equal(""))
Expect(*gw.BgpCerCidr).NotTo(BeEmpty())
Expect(*gw.BgpIbmCidr).NotTo(Equal(""))
Expect(*gw.BgpIbmAsn).NotTo(Equal(""))
Expect(*gw.BgpStatus).To(Equal("idle"))
Expect(*gw.CreatedAt).NotTo(Equal(""))
Expect(*gw.Crn).To(HavePrefix("crn:v1"))
Expect(*gw.LinkStatus).To(Equal("down"))
Expect(*gw.OperationalStatus).To(Equal("awaiting_loa"))
Expect(*gw.ResourceGroup.ID).NotTo(Equal(""))
break
}
}
// expect the created gateway to have been found. If not found, throw an error
Expect(found).To(Equal(true))
})
})
Context("Fail update Gateway", func() {
It("Fails if an invalid GatewayID is provided", func() {
shouldSkipTest()
patchGatewayOptions := service.NewUpdateGatewayOptions(invalidGatewayId).SetOperationalStatus("loa_accepted")
result, detailedResponse, err := service.UpdateGateway(patchGatewayOptions)
Expect(result).To(BeNil())
Expect(err).NotTo(BeNil())
Expect(err.Error()).To(Equal("Cannot find Gateway"))
Expect(detailedResponse.StatusCode).To(Equal(404))
})
It("Successfully Updates the Gateway", func() {
shouldSkipTest()
gatewayId := os.Getenv("GATEWAY_ID")
patchGatewayOptions := service.NewUpdateGatewayOptions(gatewayId)
result, detailedResponse, err := service.UpdateGateway(patchGatewayOptions.SetGlobal(false).SetSpeedMbps(int64(1000)).SetName(updatedGatewayName))
Expect(err).To(BeNil())
Expect(detailedResponse.StatusCode).To(Equal(200))
Expect(*result.ID).To(Equal(gatewayId))
Expect(*result.Name).To(Equal(updatedGatewayName))
Expect(*result.BgpAsn).To(Equal(bgpAsn))
Expect(*result.Global).To(Equal(false))
Expect(*result.Metered).To(Equal(metered))
Expect(*result.SpeedMbps).To(Equal(speedMbps))
Expect(*result.Type).To(Equal(gatewayType))
Expect(*result.CrossConnectRouter).To(Equal(crossConnectRouter))
Expect(*result.LocationName).To(Equal(locationName))
Expect(*result.LocationDisplayName).NotTo(Equal(""))
Expect(*result.BgpCerCidr).NotTo(BeEmpty())
Expect(*result.BgpIbmCidr).NotTo(Equal(""))
Expect(*result.BgpIbmAsn).NotTo(Equal(""))
Expect(*result.BgpStatus).To(Equal("idle"))
Expect(*result.CreatedAt).NotTo(Equal(""))
Expect(*result.Crn).To(HavePrefix("crn:v1"))
Expect(*result.LinkStatus).To(Equal("down"))
Expect(*result.OperationalStatus).To(Equal("awaiting_loa"))
Expect(*result.ResourceGroup.ID).NotTo(Equal(""))
})
It("Successfully fetches the updated Gateway", func() {
shouldSkipTest()
gatewayId := os.Getenv("GATEWAY_ID")
getGatewayOptions := service.NewGetGatewayOptions(gatewayId)
result, detailedResponse, err := service.GetGateway(getGatewayOptions)
Expect(err).To(BeNil())
Expect(detailedResponse.StatusCode).To(Equal(200))
Expect(*result.ID).To(Equal(gatewayId))
Expect(*result.Name).To(Equal(updatedGatewayName))
Expect(*result.BgpAsn).To(Equal(bgpAsn))
Expect(*result.Global).To(Equal(false))
Expect(*result.Metered).To(Equal(metered))
Expect(*result.SpeedMbps).To(Equal(speedMbps))
Expect(*result.Type).To(Equal(gatewayType))
Expect(*result.CrossConnectRouter).To(Equal(crossConnectRouter))
Expect(*result.LocationName).To(Equal(locationName))
Expect(*result.LocationDisplayName).NotTo(Equal(""))
Expect(*result.BgpCerCidr).NotTo(BeEmpty())
Expect(*result.BgpIbmCidr).NotTo(Equal(""))
Expect(*result.BgpIbmAsn).NotTo(Equal(""))
Expect(*result.BgpStatus).To(Equal("idle"))
Expect(*result.CreatedAt).NotTo(Equal(""))
Expect(*result.Crn).To(HavePrefix("crn:v1"))
Expect(*result.LinkStatus).To(Equal("down"))
Expect(*result.OperationalStatus).To(Equal("awaiting_loa"))
Expect(*result.ResourceGroup.ID).NotTo(Equal(""))
})
})
Context("Delete a gateway", func() {
It("Fails if an invalid GatewayID is provided", func() {
shouldSkipTest()
deteleGatewayOptions := service.NewDeleteGatewayOptions(invalidGatewayId)
detailedResponse, err := service.DeleteGateway(deteleGatewayOptions)
Expect(err).NotTo(BeNil())
Expect(err.Error()).To(Equal("Cannot find Gateway"))
Expect(detailedResponse.StatusCode).To(Equal(404))
})
It("Successfully deletes a gateway", func() {
shouldSkipTest()
gatewayId := os.Getenv("GATEWAY_ID")
deteleGatewayOptions := service.NewDeleteGatewayOptions(gatewayId)
detailedResponse, err := service.DeleteGateway(deteleGatewayOptions)
Expect(err).To(BeNil())
Expect(detailedResponse.StatusCode).To(Equal(204))
})
})
Context("DirectLink connect gateway", func() {
// to create a connect gateway, we need to have a port. List the ports and save the id of the 1st one found
portId := ""
portLocationDisplayName := ""
portLocationName := ""
timestamp := time.Now().Unix()
It("List ports and save the id of the first port", func() {
shouldSkipTest()
listPortsOptions := service.NewListPortsOptions()
result, detailedResponse, err := service.ListPorts(listPortsOptions)
Expect(err).To(BeNil())
Expect(detailedResponse.StatusCode).To(Equal(200))
portId = *result.Ports[0].ID
portLocationDisplayName = *result.Ports[0].LocationDisplayName
portLocationName = *result.Ports[0].LocationName
})
It("create connect gateway", func() {
shouldSkipTest()
gatewayName = "GO-INT-SDK-CONNECT-" + strconv.FormatInt(timestamp, 10)
portIdentity, _ := service.NewGatewayPortIdentity(portId)
gateway, _ := service.NewGatewayTemplateGatewayTypeConnectTemplate(bgpAsn, global, metered, gatewayName, speedMbps, "connect", portIdentity)
createGatewayOptions := service.NewCreateGatewayOptions(gateway)
result, detailedResponse, err := service.CreateGateway(createGatewayOptions)
Expect(err).To(BeNil())
Expect(detailedResponse.StatusCode).To(Equal(201))
// Save the gateway id for deletion
os.Setenv("GATEWAY_ID", *result.ID)
Expect(*result.Name).To(Equal(gatewayName))
Expect(*result.BgpAsn).To(Equal(bgpAsn))
Expect(*result.Global).To(Equal(true))
Expect(*result.Metered).To(Equal(metered))
Expect(*result.SpeedMbps).To(Equal(speedMbps))
Expect(*result.LocationName).To(Equal(portLocationName))
Expect(*result.LocationDisplayName).To(Equal(portLocationDisplayName))
Expect(*result.BgpCerCidr).NotTo(BeEmpty())
Expect(*result.BgpIbmCidr).NotTo(Equal(""))
Expect(*result.BgpIbmAsn).NotTo(Equal(0))
Expect(*result.BgpStatus).To(Equal("idle"))
Expect(*result.CreatedAt).NotTo(Equal(""))
Expect(*result.Crn).To(HavePrefix("crn:v1"))
Expect(*result.OperationalStatus).To(Equal("create_pending"))
Expect(*result.ResourceGroup.ID).NotTo(Equal(""))
Expect(*result.Type).To(Equal("connect"))
Expect(*result.Port.ID).To(Equal(portId))
Expect(*result.ProviderApiManaged).To(Equal(false))
})
It("Successfully waits for connect gateway to be provisioned state", func() {
shouldSkipTest()
getGatewayOptions := service.NewGetGatewayOptions(os.Getenv("GATEWAY_ID"))
// before a connect gateway can be deleted, it needs to have operational_status of provisioned. We need to wait for
// the new gateway to go to provisioned so we can delete it.
timer := 0
for {
// Get the current status for the gateway
result, detailedResponse, err := service.GetGateway(getGatewayOptions)
Expect(err).To(BeNil())
Expect(detailedResponse.StatusCode).To(Equal(200))
Expect(*result.Name).To(Equal(gatewayName))
Expect(*result.BgpAsn).To(Equal(bgpAsn))
Expect(*result.Global).To(Equal(true))
Expect(*result.Metered).To(Equal(metered))
Expect(*result.SpeedMbps).To(Equal(speedMbps))
Expect(*result.LocationName).To(Equal(portLocationName))
Expect(*result.LocationDisplayName).To(Equal(portLocationDisplayName))
Expect(*result.BgpCerCidr).NotTo(BeEmpty())
Expect(*result.BgpIbmCidr).NotTo(Equal(""))
Expect(*result.BgpIbmAsn).NotTo(Equal(0))
Expect(*result.BgpStatus).To(Equal("idle"))
Expect(*result.CreatedAt).NotTo(Equal(""))
Expect(*result.Crn).To(HavePrefix("crn:v1"))
Expect(*result.ResourceGroup.ID).NotTo(Equal(""))
Expect(*result.Type).To(Equal("connect"))
Expect(*result.Port.ID).To(Equal(portId))
Expect(*result.ProviderApiManaged).To(Equal(false))
// if operational status is "provisioned" then we are done
if *result.OperationalStatus == "provisioned" {
Expect(*result.OperationalStatus).To(Equal("provisioned"))
break
}
// not provisioned yet, see if we have reached the timeout value. If so, exit with failure
if timer > 24 { // 2 min timer (24x5sec)
Expect(*result.OperationalStatus).To(Equal("provisioned")) // timed out fail if status is not provisioned
break
} else {
// Still exists, wait 5 sec
time.Sleep(time.Duration(5) * time.Second)
timer = timer + 1
}
}
})
It("Successfully deletes connect gateway", func() {
shouldSkipTest()
gatewayId := os.Getenv("GATEWAY_ID")
deteleGatewayOptions := service.NewDeleteGatewayOptions(gatewayId)
detailedResponse, err := service.DeleteGateway(deteleGatewayOptions)
Expect(err).To(BeNil())
Expect(detailedResponse.StatusCode).To(Equal(204))
})
})
// Context("DirectLink MACsec Enabled Gateway", func() {
// timestamp := time.Now().Unix()
// gatewayName := "GO-INT-SDK-MACSEC" + strconv.FormatInt(timestamp, 10)
// updatedGatewayName := "GO-INT-SDK-MACSEC-PATCH-" + strconv.FormatInt(timestamp, 10)
// bgpAsn := int64(64999)
// crossConnectRouter := "LAB-xcr01.dal09"
// global := true
// locationName := os.Getenv("LOCATION_NAME")
// speedMbps := int64(1000)
// metered := false
// carrierName := "carrier1"
// customerName := "customer1"
// gatewayType := "dedicated"
// macsecCak := os.Getenv("MACSEC_CAK")
// macsecSakExpiryTime := int64(86400)
// macsecWindowSize := int64(64)
// It("Create a macsec enabled dedicated gateway", func() {
// shouldSkipTest()
// // Construct an instance of the GatewayMacsecCak model
// gatewayMacsecCak := new(directlinkv1.GatewayMacsecConfigTemplatePrimaryCak)
// gatewayMacsecCak.Crn = core.StringPtr(macsecCak)
// // Construct an instance of the GatewayMacsecConfigTemplate model
// gatewayMacsecConfigTemplate := new(directlinkv1.GatewayMacsecConfigTemplate)
// gatewayMacsecConfigTemplate.Active = core.BoolPtr(true)
// gatewayMacsecConfigTemplate.PrimaryCak = gatewayMacsecCak
// gatewayMacsecConfigTemplate.WindowSize = core.Int64Ptr(macsecWindowSize)
// gatewayTemplate := new(directlinkv1.GatewayTemplateGatewayTypeDedicatedTemplate)
// gatewayTemplate.BgpAsn = core.Int64Ptr(bgpAsn)
// gatewayTemplate.Global = core.BoolPtr(global)
// gatewayTemplate.Metered = core.BoolPtr(metered)
// gatewayTemplate.Name = core.StringPtr(gatewayName)
// gatewayTemplate.SpeedMbps = core.Int64Ptr(int64(1000))
// gatewayTemplate.Type = core.StringPtr(gatewayType)
// gatewayTemplate.CarrierName = core.StringPtr(carrierName)
// gatewayTemplate.CrossConnectRouter = core.StringPtr(crossConnectRouter)
// gatewayTemplate.CustomerName = core.StringPtr(customerName)
// gatewayTemplate.LocationName = core.StringPtr(locationName)
// gatewayTemplate.MacsecConfig = gatewayMacsecConfigTemplate
// createGatewayOptions := service.NewCreateGatewayOptions(gatewayTemplate)
// result, detailedResponse, err := service.CreateGateway(createGatewayOptions)
// Expect(err).To(BeNil())
// Expect(detailedResponse.StatusCode).To(Equal(201))
// os.Setenv("GATEWAY_ID", *result.ID)
// Expect(*result.Name).To(Equal(gatewayName))
// Expect(*result.BgpAsn).To(Equal(bgpAsn))
// Expect(*result.Global).To(Equal(global))
// Expect(*result.Metered).To(Equal(metered))
// Expect(*result.SpeedMbps).To(Equal(speedMbps))
// Expect(*result.Type).To(Equal(gatewayType))
// Expect(*result.CrossConnectRouter).To(Equal(crossConnectRouter))
// Expect(*result.LocationName).To(Equal(locationName))
// Expect(*result.OperationalStatus).To(Equal("awaiting_loa"))
// Expect(*result.MacsecConfig.Active).To(Equal(true))
// Expect(*result.MacsecConfig.PrimaryCak.Crn).To(Equal(macsecCak))
// Expect(*result.MacsecConfig.SakExpiryTime).To(Equal(macsecSakExpiryTime))
// Expect(*result.MacsecConfig.WindowSize).To(Equal(macsecWindowSize))
// })
// It("Should successfully update the macsec enabled gateway", func() {
// shouldSkipTest()
// // Construct an instance of the GatewayMacsecCak model
// gatewayMacsecCak := new(directlinkv1.GatewayMacsecConfigPatchTemplateFallbackCak)
// gatewayMacsecCak.Crn = core.StringPtr(macsecCak)
// // Construct an instance of the GatewayMacsecConfigTemplate model
// gatewayMacsecConfigPatchTemplate := new(directlinkv1.GatewayMacsecConfigPatchTemplate)
// gatewayMacsecConfigPatchTemplate.FallbackCak = gatewayMacsecCak
// gatewayId := os.Getenv("GATEWAY_ID")
// patchGatewayOptions := service.NewUpdateGatewayOptions(gatewayId)
// result, detailedResponse, err := service.UpdateGateway(patchGatewayOptions.SetName(updatedGatewayName).SetMacsecConfig(gatewayMacsecConfigPatchTemplate))
// Expect(err).To(BeNil())
// Expect(detailedResponse.StatusCode).To(Equal(200))
// Expect(*result.ID).To(Equal(gatewayId))
// Expect(*result.Name).To(Equal(updatedGatewayName))
// Expect(*result.MacsecConfig.Active).To(Equal(true))
// Expect(*result.MacsecConfig.PrimaryCak.Crn).To(Equal(macsecCak))
// Expect(*result.MacsecConfig.FallbackCak.Crn).To(Equal(macsecCak))
// Expect(*result.MacsecConfig.SakExpiryTime).To(Equal(macsecSakExpiryTime))
// Expect(*result.MacsecConfig.WindowSize).To(Equal(macsecWindowSize))
// })
// It("Successfully waits for macsec enabled gateway to be provisioned state", func() {
// shouldSkipTest()
// getGatewayOptions := service.NewGetGatewayOptions(os.Getenv("GATEWAY_ID"))
// // before a dedicated gateway can be deleted, it needs to have operational_status of provisioned. We need to wait for
// // the new gateway to go to provisioned so we can delete it.
// timer := 0
// for {
// // Get the current status for the gateway
// result, detailedResponse, err := service.GetGateway(getGatewayOptions)
// Expect(err).To(BeNil())
// Expect(detailedResponse.StatusCode).To(Equal(200))
// Expect(*result.Name).To(Equal(updatedGatewayName))
// Expect(*result.BgpAsn).To(Equal(bgpAsn))
// Expect(*result.Global).To(Equal(true))
// Expect(*result.Metered).To(Equal(metered))
// Expect(*result.SpeedMbps).To(Equal(speedMbps))
// Expect(*result.BgpCerCidr).NotTo(BeEmpty())
// Expect(*result.BgpIbmCidr).NotTo(Equal(""))
// Expect(*result.BgpIbmAsn).NotTo(Equal(0))
// Expect(*result.BgpStatus).To(Equal("idle"))
// Expect(*result.CreatedAt).NotTo(Equal(""))
// Expect(*result.Crn).To(HavePrefix("crn:v1"))
// Expect(*result.ResourceGroup.ID).NotTo(Equal(""))
// Expect(*result.Type).To(Equal("dedicated"))
// Expect(*result.ProviderApiManaged).To(Equal(false))
// Expect(*result.MacsecConfig.Active).To(Equal(true))
// Expect(*result.MacsecConfig.PrimaryCak.Crn).To(Equal(macsecCak))
// Expect(*result.MacsecConfig.FallbackCak.Crn).To(Equal(macsecCak))
// Expect(*result.MacsecConfig.SakExpiryTime).To(Equal(macsecSakExpiryTime))
// Expect(*result.MacsecConfig.WindowSize).To(Equal(macsecWindowSize))
// // if operational status is "provisioned" then we are done
// if *result.OperationalStatus == "provisioned" {
// Expect(*result.OperationalStatus).To(Equal("provisioned"))
// break
// }
// // not provisioned yet, see if we have reached the timeout value. If so, exit with failure
// if timer > 24 { // 2 min timer (24x5sec)
// Expect(*result.OperationalStatus).To(Equal("provisioned")) // timed out fail if status is not provisioned
// break
// } else {
// // Still exists, wait 5 sec
// time.Sleep(time.Duration(5) * time.Second)
// timer = timer + 1
// }
// }
// })
// It("Successfully deletes macsec enabled gateway gateway", func() {
// shouldSkipTest()
// gatewayId := os.Getenv("GATEWAY_ID")
// deteleGatewayOptions := service.NewDeleteGatewayOptions(gatewayId)
// detailedResponse, err := service.DeleteGateway(deteleGatewayOptions)
// Expect(err).To(BeNil())
// Expect(detailedResponse.StatusCode).To(Equal(204))
// })
// })
})
Describe("Offering Types", func() {
Context("Locations", func() {
It("should fetch the locations for the type dedicated", func() {
shouldSkipTest()
listOfferingTypeLocationsOptions := service.NewListOfferingTypeLocationsOptions("dedicated")
result, detailedResponse, err := service.ListOfferingTypeLocations(listOfferingTypeLocationsOptions)
Expect(err).To(BeNil())
Expect(detailedResponse.StatusCode).To(Equal(200))
Expect(len(result.Locations)).Should(BeNumerically(">", 0))
os.Setenv("OT_DEDICATED_LOCATION_DISPLAY_NAME", *result.Locations[0].DisplayName)
os.Setenv("OT_DEDICATED_LOCATION_NAME", *result.Locations[0].Name)
Expect(*result.Locations[0].BillingLocation).NotTo(Equal(""))
Expect(*result.Locations[0].BuildingColocationOwner).NotTo(Equal(""))
Expect(*result.Locations[0].LocationType).NotTo(Equal(""))
// Expect(*result.Locations[0].Market).NotTo(Equal(""))
Expect(*result.Locations[0].MarketGeography).NotTo(Equal(""))
Expect(*result.Locations[0].Mzr).NotTo(Equal(""))
Expect(*result.Locations[0].OfferingType).To(Equal("dedicated"))
Expect(*result.Locations[0].ProvisionEnabled).NotTo(BeNil())
Expect(*result.Locations[0].VpcRegion).NotTo(Equal(""))
})
It("should fetch the locations for the type connect", func() {
shouldSkipTest()
listOfferingTypeLocationsOptions := service.NewListOfferingTypeLocationsOptions("connect")
result, detailedResponse, err := service.ListOfferingTypeLocations(listOfferingTypeLocationsOptions)
Expect(err).To(BeNil())
Expect(detailedResponse.StatusCode).To(Equal(200))
Expect(len(result.Locations)).Should(BeNumerically(">", 0))
os.Setenv("OT_CONNECT_LOCATION_DISPLAY_NAME", *result.Locations[0].DisplayName)
os.Setenv("OT_CONNECT_LOCATION_NAME", *result.Locations[0].Name)
Expect(*result.Locations[0].BillingLocation).NotTo(Equal(""))
Expect(*result.Locations[0].LocationType).NotTo(Equal(""))
// Expect(*result.Locations[0].Market).NotTo(Equal(""))
Expect(*result.Locations[0].MarketGeography).NotTo(Equal(""))
Expect(*result.Locations[0].Mzr).NotTo(Equal(""))
Expect(*result.Locations[0].OfferingType).To(Equal("connect"))
Expect(*result.Locations[0].ProvisionEnabled).NotTo(BeNil())
Expect(*result.Locations[0].VpcRegion).NotTo(Equal(""))
})
It("should return an error for invalid location type", func() {
shouldSkipTest()
listOfferingTypeLocationsOptions := service.NewListOfferingTypeLocationsOptions("RANDOM")
result, detailedResponse, err := service.ListOfferingTypeLocations(listOfferingTypeLocationsOptions)
Expect(err).NotTo(BeNil())
Expect(err.Error()).To(Equal("offering_type_location: RANDOM"))
Expect(detailedResponse.StatusCode).To(Equal(404))
Expect(result).To(BeNil())
})
})
Context("Cross Connect Routers", func() {
It("should list the location info for type dedicated and location short name", func() {
shouldSkipTest()
listOfferingTypeLocationCrossConnectRoutersOptions := service.NewListOfferingTypeLocationCrossConnectRoutersOptions("dedicated", os.Getenv("OT_DEDICATED_LOCATION_NAME"))
result, detailedResponse, err := service.ListOfferingTypeLocationCrossConnectRouters(listOfferingTypeLocationCrossConnectRoutersOptions)
Expect(err).To(BeNil())
Expect(detailedResponse.StatusCode).To(Equal(200))
Expect(len(result.CrossConnectRouters)).Should(BeNumerically(">", 0))
Expect(*result.CrossConnectRouters[0].RouterName).NotTo(Equal(""))
Expect(*result.CrossConnectRouters[0].TotalConnections).Should(BeNumerically(">=", 0))
})
It("should list the location info for type dedicated and location display name", func() {
shouldSkipTest()
listOfferingTypeLocationCrossConnectRoutersOptions := service.NewListOfferingTypeLocationCrossConnectRoutersOptions("dedicated", os.Getenv("OT_DEDICATED_LOCATION_DISPLAY_NAME"))
result, detailedResponse, err := service.ListOfferingTypeLocationCrossConnectRouters(listOfferingTypeLocationCrossConnectRoutersOptions)
Expect(err).To(BeNil())
Expect(detailedResponse.StatusCode).To(Equal(200))
Expect(len(result.CrossConnectRouters)).Should(BeNumerically(">", 0))
Expect(*result.CrossConnectRouters[0].RouterName).NotTo(Equal(""))
Expect(*result.CrossConnectRouters[0].TotalConnections).Should(BeNumerically(">=", 0))
})
It("should return proper error when unsupported offering type CONNECT is provided", func() {
shouldSkipTest()
listOfferingTypeLocationCrossConnectRoutersOptions := service.NewListOfferingTypeLocationCrossConnectRoutersOptions("connect", os.Getenv("OT_CONNECT_LOCATION_NAME"))
result, detailedResponse, err := service.ListOfferingTypeLocationCrossConnectRouters(listOfferingTypeLocationCrossConnectRoutersOptions)
Expect(err).NotTo(BeNil())
Expect(err.Error()).To(Equal("The supplied OfferingType is not supported for this call"))
Expect(detailedResponse.StatusCode).To(Equal(400))
Expect(result).To(BeNil())
})
It("should return proper error when incorrect offering type is provided", func() {
shouldSkipTest()
listOfferingTypeLocationCrossConnectRoutersOptions := service.NewListOfferingTypeLocationCrossConnectRoutersOptions("random", os.Getenv("OT_CONNECT_LOCATION_DISPLAY_NAME"))
result, detailedResponse, err := service.ListOfferingTypeLocationCrossConnectRouters(listOfferingTypeLocationCrossConnectRoutersOptions)
Expect(err).NotTo(BeNil())
Expect(err.Error()).To(Equal("Invalid Direct Link Offering Type."))
Expect(detailedResponse.StatusCode).To(Equal(400))
Expect(result).To(BeNil())
})
It("should return proper error when incorrect location is provided", func() {
shouldSkipTest()
listOfferingTypeLocationCrossConnectRoutersOptions := service.NewListOfferingTypeLocationCrossConnectRoutersOptions("dedicated", "florida")
result, detailedResponse, err := service.ListOfferingTypeLocationCrossConnectRouters(listOfferingTypeLocationCrossConnectRoutersOptions)
Expect(err).NotTo(BeNil())
Expect(err.Error()).To(Equal("Classic Location not found: florida"))
Expect(detailedResponse.StatusCode).To(Equal(404))
Expect(result).To(BeNil())
})
})
Context("Offering Speeds", func() {
It("should fetch the offering speeds for the type dedicated", func() {
shouldSkipTest()
listOfferingTypeSpeedsOptions := service.NewListOfferingTypeSpeedsOptions("dedicated")
result, detailedResponse, err := service.ListOfferingTypeSpeeds(listOfferingTypeSpeedsOptions)
Expect(err).To(BeNil())
Expect(detailedResponse.StatusCode).To(Equal(200))
Expect(len(result.Speeds)).Should(BeNumerically(">", 0))
})
It("should fetch the offering speeds for the type connect", func() {
shouldSkipTest()
listOfferingTypeSpeedsOptions := service.NewListOfferingTypeSpeedsOptions("connect")
result, detailedResponse, err := service.ListOfferingTypeSpeeds(listOfferingTypeSpeedsOptions)
Expect(err).To(BeNil())
Expect(detailedResponse.StatusCode).To(Equal(200))
Expect(len(result.Speeds)).Should(BeNumerically(">", 0))
})
It("should proper error for invalid offering type", func() {
shouldSkipTest()
listOfferingTypeSpeedsOptions := service.NewListOfferingTypeSpeedsOptions("random")
result, detailedResponse, err := service.ListOfferingTypeSpeeds(listOfferingTypeSpeedsOptions)
Expect(err).NotTo(BeNil())
Expect(err.Error()).To(Equal("Cannot find OfferingType"))
Expect(detailedResponse.StatusCode).To(Equal(404))
Expect(result).To(BeNil())
})
})
})
Describe("Ports", func() {
It("should fetch the ports", func() {
shouldSkipTest()
listPortsOptions := service.NewListPortsOptions()
result, detailedResponse, err := service.ListPorts(listPortsOptions)
Expect(err).To(BeNil())
Expect(detailedResponse.StatusCode).To(Equal(200))
Expect(len(result.Ports)).Should(BeNumerically(">", 0))
Expect(*result.Ports[0].ID).NotTo(Equal(""))
Expect(*result.Ports[0].DirectLinkCount).Should(BeNumerically(">=", 0))
Expect(*result.Ports[0].Label).NotTo(Equal(""))
Expect(*result.Ports[0].LocationDisplayName).NotTo(Equal(""))
Expect(*result.Ports[0].LocationName).NotTo(Equal(""))
Expect(*result.Ports[0].ProviderName).NotTo(Equal(""))
Expect(len(result.Ports[0].SupportedLinkSpeeds)).Should(BeNumerically(">=", 0))
os.Setenv("PORT_ID", *result.Ports[0].ID)
os.Setenv("PORT_LOCATION_DISPLAY_NAME", *result.Ports[0].LocationDisplayName)
os.Setenv("PORT_LOCATION_NAME", *result.Ports[0].LocationName)
os.Setenv("PORT_LABEL", *result.Ports[0].Label)
})
It("should fetch the port by ID", func() {
shouldSkipTest()
portId := os.Getenv("PORT_ID")
locationDisplayName := os.Getenv("PORT_LOCATION_DISPLAY_NAME")
locationName := os.Getenv("PORT_LOCATION_NAME")
label := os.Getenv("PORT_LABEL")
getPortOptions := service.NewGetPortOptions(portId)
result, detailedResponse, err := service.GetPort(getPortOptions)
Expect(err).To(BeNil())
Expect(detailedResponse.StatusCode).To(Equal(200))
Expect(*result.ID).To(Equal(portId))
Expect(*result.LocationDisplayName).To(Equal(locationDisplayName))
Expect(*result.LocationName).To(Equal(locationName))
Expect(*result.Label).To(Equal(label))
Expect(*result.DirectLinkCount).Should(BeNumerically(">=", 0))
Expect(*result.ProviderName).NotTo(Equal(""))
Expect(len(result.SupportedLinkSpeeds)).Should(BeNumerically(">=", 0))
})
})
Describe("Direct Link Virtual Connections", func() {
timestamp := time.Now().Unix()
gatewayName := "GO-INT-VC-SDK-" + strconv.FormatInt(timestamp, 10)
bgpAsn := int64(64999)
crossConnectRouter := "LAB-xcr01.dal09"
global := true
locationName := os.Getenv("LOCATION_NAME")
speedMbps := int64(1000)
metered := false
carrierName := "carrier1"
customerName := "customer1"
gatewayType := "dedicated"
Context("Create gateway", func() {
gateway, _ := service.NewGatewayTemplateGatewayTypeDedicatedTemplate(bgpAsn, global, metered, gatewayName, speedMbps, gatewayType, carrierName, crossConnectRouter, customerName, locationName)
createGatewayOptions := service.NewCreateGatewayOptions(gateway)
It("Successfully created a gateway", func() {
shouldSkipTest()
result, detailedResponse, err := service.CreateGateway(createGatewayOptions)
Expect(err).To(BeNil())
Expect(detailedResponse.StatusCode).To(Equal(201))
os.Setenv("GATEWAY_ID", *result.ID)
Expect(*result.Name).To(Equal(gatewayName))
Expect(*result.BgpAsn).To(Equal(bgpAsn))
Expect(*result.Global).To(Equal(global))
Expect(*result.Metered).To(Equal(metered))
Expect(*result.SpeedMbps).To(Equal(speedMbps))
Expect(*result.Type).To(Equal(gatewayType))
Expect(*result.CrossConnectRouter).To(Equal(crossConnectRouter))
Expect(*result.LocationName).To(Equal(locationName))
})
It("Successfully create a CLASSIC virtual connection", func() {
shouldSkipTest()
vcName := "GO-INT-CLASSIC-VC-SDK-" + strconv.FormatInt(timestamp, 10)
createGatewayVCOptions := service.NewCreateGatewayVirtualConnectionOptions(os.Getenv("GATEWAY_ID"), vcName, directlinkv1.CreateGatewayVirtualConnectionOptions_Type_Classic)
result, detailedResponse, err := service.CreateGatewayVirtualConnection(createGatewayVCOptions)
Expect(err).To(BeNil())
Expect(detailedResponse.StatusCode).To(Equal(201))
os.Setenv("CLASSIC_VC_ID", *result.ID)
Expect(*result.ID).NotTo(Equal(""))
Expect(*result.Name).To(Equal(vcName))
Expect(*result.Type).To(Equal(directlinkv1.CreateGatewayVirtualConnectionOptions_Type_Classic))
Expect(*result.CreatedAt).NotTo(Equal(""))
Expect(*result.Status).To(Equal("pending"))
})
It("Successfully get a CLASSIC virtual connection", func() {
shouldSkipTest()
vcName := "GO-INT-CLASSIC-VC-SDK-" + strconv.FormatInt(timestamp, 10)
getGatewayVCOptions := service.NewGetGatewayVirtualConnectionOptions(os.Getenv("GATEWAY_ID"), os.Getenv("CLASSIC_VC_ID"))
result, detailedResponse, err := service.GetGatewayVirtualConnection(getGatewayVCOptions)
Expect(err).To(BeNil())
Expect(detailedResponse.StatusCode).To(Equal(200))
Expect(*result.ID).To(Equal(os.Getenv("CLASSIC_VC_ID")))
Expect(*result.Name).To(Equal(vcName))
Expect(*result.Type).To(Equal(directlinkv1.CreateGatewayVirtualConnectionOptions_Type_Classic))
Expect(*result.CreatedAt).NotTo(Equal(""))
Expect(*result.Status).To(Equal("pending"))
})
It("Successfully create a Gen 2 VPC virtual connection", func() {
shouldSkipTest()
vcName := "GO-INT-GEN2-VPC-VC-SDK-" + strconv.FormatInt(timestamp, 10)
vpcCrn := os.Getenv("GEN2_VPC_CRN")
createGatewayVCOptions := service.NewCreateGatewayVirtualConnectionOptions(os.Getenv("GATEWAY_ID"), vcName, directlinkv1.CreateGatewayVirtualConnectionOptions_Type_Vpc)
createGatewayVCOptionsWithNetworkID := createGatewayVCOptions.SetNetworkID(vpcCrn)
result, detailedResponse, err := service.CreateGatewayVirtualConnection(createGatewayVCOptionsWithNetworkID)
Expect(err).To(BeNil())
Expect(detailedResponse.StatusCode).To(Equal(201))
// save the id so it can be deleted later
os.Setenv("GEN2_VPC_VC_ID", *result.ID)
Expect(*result.ID).NotTo(Equal(""))
Expect(*result.Name).To(Equal(vcName))
Expect(*result.CreatedAt).NotTo(Equal(""))
Expect(*result.Status).To(Equal("pending"))
Expect(*result.Type).To(Equal(directlinkv1.CreateGatewayVirtualConnectionOptions_Type_Vpc))
Expect(*result.NetworkID).To(Equal(vpcCrn))
})
It("Successfully get a Gen 2 VPC virtual connection", func() {
shouldSkipTest()
getGatewayVCOptions := service.NewGetGatewayVirtualConnectionOptions(os.Getenv("GATEWAY_ID"), os.Getenv("GEN2_VPC_VC_ID"))
result, detailedResponse, err := service.GetGatewayVirtualConnection(getGatewayVCOptions)
Expect(err).To(BeNil())
Expect(detailedResponse.StatusCode).To(Equal(200))
Expect(*result.ID).To(Equal(os.Getenv("GEN2_VPC_VC_ID")))
Expect(*result.Name).To(Equal("GO-INT-GEN2-VPC-VC-SDK-" + strconv.FormatInt(timestamp, 10)))
Expect(*result.CreatedAt).NotTo(Equal(""))
Expect(*result.Status).To(Equal("pending"))
Expect(*result.Type).To(Equal(directlinkv1.CreateGatewayVirtualConnectionOptions_Type_Vpc))
Expect(*result.NetworkID).To(Equal(os.Getenv("GEN2_VPC_CRN")))
})
It("Successfully list the virtual connections for a gateway", func() {
shouldSkipTest()
listVcOptions := service.NewListGatewayVirtualConnectionsOptions(os.Getenv("GATEWAY_ID"))
result, detailedResponse, err := service.ListGatewayVirtualConnections(listVcOptions)
Expect(err).To(BeNil())
Expect(detailedResponse.StatusCode).To(Equal(200))
vcs := result.VirtualConnections
// two VCs were created for the GW, so we should expect 2
Expect(len(vcs)).Should(BeNumerically("==", 2))
for _, vc := range vcs {
if *vc.ID == os.Getenv("GEN2_VPC_VC_ID") {
Expect(*vc.Name).To(Equal("GO-INT-GEN2-VPC-VC-SDK-" + strconv.FormatInt(timestamp, 10)))
Expect(*vc.CreatedAt).NotTo(Equal(""))
Expect(*vc.Status).To(Equal("pending"))
Expect(*vc.Type).To(Equal(directlinkv1.CreateGatewayVirtualConnectionOptions_Type_Vpc))
Expect(*vc.NetworkID).To(Equal(os.Getenv("GEN2_VPC_CRN")))
} else {
Expect(*vc.ID).To(Equal(os.Getenv("CLASSIC_VC_ID")))
Expect(*vc.Name).To(Equal("GO-INT-CLASSIC-VC-SDK-" + strconv.FormatInt(timestamp, 10)))
Expect(*vc.Type).To(Equal(directlinkv1.CreateGatewayVirtualConnectionOptions_Type_Classic))
Expect(*vc.CreatedAt).NotTo(Equal(""))
Expect(*vc.Status).To(Equal("pending"))
}
}
})
It("Successfully Update a virtual connection name", func() {
shouldSkipTest()
gatewayId := os.Getenv("GATEWAY_ID")
vcId := os.Getenv("GEN2_VPC_VC_ID")
vcName := "GO-INT-GEN2-VPC-VC-PATCH-SDK-" + strconv.FormatInt(timestamp, 10)
patchGatewayOptions := service.NewUpdateGatewayVirtualConnectionOptions(gatewayId, vcId)
patchGatewayOptions = patchGatewayOptions.SetName(vcName)
result, detailedResponse, err := service.UpdateGatewayVirtualConnection(patchGatewayOptions)
Expect(err).To(BeNil())
Expect(detailedResponse.StatusCode).To(Equal(200))
Expect(*result.ID).To(Equal(vcId))
Expect(*result.Name).To(Equal(vcName))
Expect(*result.CreatedAt).NotTo(Equal(""))
Expect(*result.Status).To(Equal("pending"))
Expect(*result.Type).To(Equal(directlinkv1.CreateGatewayVirtualConnectionOptions_Type_Vpc))
Expect(*result.NetworkID).To(Equal(os.Getenv("GEN2_VPC_CRN")))
})
It("Fail to Update a virtual connection status", func() {
shouldSkipTest()
gatewayId := os.Getenv("GATEWAY_ID")
vcId := os.Getenv("GEN2_VPC_VC_ID")
patchGatewayOptions := service.NewUpdateGatewayVirtualConnectionOptions(gatewayId, vcId)
patchGatewayOptions = patchGatewayOptions.SetStatus(directlinkv1.UpdateGatewayVirtualConnectionOptions_Status_Rejected)
result, detailedResponse, err := service.UpdateGatewayVirtualConnection(patchGatewayOptions)
// GW owner is not allowed to change the status, but the test calls the API with the status parameter to valid it is allowed.
Expect(result).To(BeNil())
Expect(err).NotTo(BeNil())
Expect(err.Error()).To(Equal("gateway owner can't patch vc status."))
Expect(detailedResponse.StatusCode).To(Equal(400))
})
It("Successfully delete a CLASSIC virtual connection for a gateway", func() {
shouldSkipTest()
gatewayId := os.Getenv("GATEWAY_ID")
vcId := os.Getenv("CLASSIC_VC_ID")
deleteClassicVCOptions := service.NewDeleteGatewayVirtualConnectionOptions(gatewayId, vcId)
detailedResponse, err := service.DeleteGatewayVirtualConnection(deleteClassicVCOptions)
Expect(err).To(BeNil())
Expect(detailedResponse.StatusCode).To(Equal(204))
})
It("Successfully waits for CLASSIC virtual connection to report as deleted", func() {
shouldSkipTest()
getGatewayVCOptions := service.NewGetGatewayVirtualConnectionOptions(os.Getenv("GATEWAY_ID"), os.Getenv("CLASSIC_VC_ID"))
// VC delete might not be instantaneous. Poll the VC looking for a not found. Fail after 2 min
timer := 0
for {
// Get the current rc for the VC
_, detailedResponse, _ := service.GetGatewayVirtualConnection(getGatewayVCOptions)
// if 404 then we are done
if detailedResponse.StatusCode == 404 {
Expect(detailedResponse.StatusCode).To(Equal(404)) // response is 404, exit success
break
}
// other than 404, see if we have reached the timeout value. If so, exit with failure
if timer > 24 { // 2 min timer (24x5sec)
Expect(detailedResponse.StatusCode).To(Equal(404)) // timed out fail if code is not 404
break
} else {
// Still exists, wait 5 sec
time.Sleep(time.Duration(5) * time.Second)
timer = timer + 1
}
}
})
It("Successfully deletes GEN 2 VPC virtual connection for a gateway", func() {
shouldSkipTest()
gatewayId := os.Getenv("GATEWAY_ID")
vcId := os.Getenv("GEN2_VPC_VC_ID")
deleteVpcVcOptions := service.NewDeleteGatewayVirtualConnectionOptions(gatewayId, vcId)
detailedResponse, err := service.DeleteGatewayVirtualConnection(deleteVpcVcOptions)
Expect(err).To(BeNil())
Expect(detailedResponse.StatusCode).To(Equal(204))
})
It("Successfully waits for GEN 2 VPC virtual connection to report as deleted", func() {
shouldSkipTest()
getGatewayVCOptions := service.NewGetGatewayVirtualConnectionOptions(os.Getenv("GATEWAY_ID"), os.Getenv("GEN2_VPC_VC_ID"))
// VC delete might not be instantaneous. Poll the VC looking for a not found. Fail after 2 min
timer := 0
for {
// Get the current rc for the VC
_, detailedResponse, _ := service.GetGatewayVirtualConnection(getGatewayVCOptions)
// if 404 then we are done
if detailedResponse.StatusCode == 404 {
Expect(detailedResponse.StatusCode).To(Equal(404)) // response is 404, exit success
break
}
// other than 404, see if we have reached the timeout value. If so, exit with failure
if timer > 24 { // 2 min timer (24x5 sec)
Expect(detailedResponse.StatusCode).To(Equal(404)) // timed out fail if code is not 404
break
} else {
// Still exists, wait 5 sec
time.Sleep(time.Duration(5) * time.Second)
timer = timer + 1
}
}
})
It("Successfully deletes a gateway", func() {
shouldSkipTest()
gatewayId := os.Getenv("GATEWAY_ID")
deteleGatewayOptions := service.NewDeleteGatewayOptions(gatewayId)
detailedResponse, err := service.DeleteGateway(deteleGatewayOptions)
Expect(err).To(BeNil())
Expect(detailedResponse.StatusCode).To(Equal(204))
})
})
})
Describe("LOA and Completion Notice", func() {
timestamp := time.Now().Unix()
gatewayName := "GO-INT-LOA-SDK-" + strconv.FormatInt(timestamp, 10)
bgpAsn := int64(64999)
crossConnectRouter := "LAB-xcr01.dal09"
global := true
locationName := os.Getenv("LOCATION_NAME")
speedMbps := int64(1000)
metered := false
carrierName := "carrier1"
customerName := "customer1"
gatewayType := "dedicated"
// notes about LOA and CN testing. When a GW is created, a github issue is also created by dl-rest. The issue is used for managing the LOA and CN. In normal operation,
// an LOA is added to the issue via manual GH interaction. After that occurs and the GH label changed, then CN upload is allowed. Since we do not have the ability to
// do the manual steps for integration testing, the test will only do the following
// - Issue GET LOA for a gateway. It will expect a 404 error since no one has added the LOA to the GH issue
// - PUT a completion notice to the gw. It will fail with a 412 error because the GH issue and GW status are in the wrong state due to no manual interaction
// - GET CN for a gw. It will expect a 404 since the CN could not be uploaded
//
Context("Create gateway", func() {
It("Successfully created a gateway", func() {
shouldSkipTest()
gateway, _ := service.NewGatewayTemplateGatewayTypeDedicatedTemplate(bgpAsn, global, metered, gatewayName, speedMbps, gatewayType, carrierName, crossConnectRouter, customerName, locationName)
createGatewayOptions := service.NewCreateGatewayOptions(gateway)
result, detailedResponse, err := service.CreateGateway(createGatewayOptions)
Expect(err).To(BeNil())
Expect(detailedResponse.StatusCode).To(Equal(201))
os.Setenv("GATEWAY_ID", *result.ID)
})
It("Successfully call loa", func() {
shouldSkipTest()
listLOAOptions := service.NewListGatewayLetterOfAuthorizationOptions(os.Getenv("GATEWAY_ID"))
result, detailedResponse, err := service.ListGatewayLetterOfAuthorization(listLOAOptions)
Expect(err).NotTo(BeNil())
Expect(err.Error()).To(Equal("Please check whether the resource you are requesting exists."))
Expect(detailedResponse.StatusCode).To(Equal(404))
Expect(result).To(BeNil())
})
It("Successfully call PUT completion notice", func() {
shouldSkipTest()
buffer, err := ioutil.ReadFile("completion_notice.pdf")
Expect(err).To(BeNil())
r := ioutil.NopCloser(bytes.NewReader(buffer))
createCNOptions := service.NewCreateGatewayCompletionNoticeOptions(os.Getenv("GATEWAY_ID"))
createCNOptions.SetUpload(r)
detailedResponse, err := service.CreateGatewayCompletionNotice(createCNOptions)
Expect(err).NotTo(BeNil())
Expect(err.Error()).To(Equal("Invalid gateway status to upload completion notice."))
Expect(detailedResponse.StatusCode).To(Equal(412))
})
It("Successfully call completion notice", func() {
shouldSkipTest()
listCNOptions := service.NewListGatewayCompletionNoticeOptions(os.Getenv("GATEWAY_ID"))
result, detailedResponse, err := service.ListGatewayCompletionNotice(listCNOptions)
Expect(err).NotTo(BeNil())
Expect(err.Error()).To(Equal("Please check whether the resource you are requesting exists."))
Expect(detailedResponse.StatusCode).To(Equal(404))
Expect(result).To(BeNil())
})
It("Successfully deletes a gateway", func() {
shouldSkipTest()
gatewayId := os.Getenv("GATEWAY_ID")
deteleGatewayOptions := service.NewDeleteGatewayOptions(gatewayId)
detailedResponse, err := service.DeleteGateway(deteleGatewayOptions)
Expect(err).To(BeNil())
Expect(detailedResponse.StatusCode).To(Equal(204))
})
})
})
Describe("BGP MD5", func() {
timestamp := time.Now().Unix()
gatewayName := "GO-INT-MD5-SDK-" + strconv.FormatInt(timestamp, 10)
bgpAsn := int64(64999)
crossConnectRouter := "LAB-xcr01.dal09"
global := true
locationName := os.Getenv("LOCATION_NAME")
speedMbps := int64(1000)
metered := false
carrierName := "carrier1"
customerName := "customer1"
gatewayType := "dedicated"
authCrn := os.Getenv("AUTHENTICATION_KEY")
Context("Create a Gateway with Authentication Key", func() {
It("should successfully create a gateway", func() {
shouldSkipTest()
// gateway, _ := service.NewGatewayTemplateGatewayTypeDedicatedTemplate(bgpAsn, global, metered, gatewayName, speedMbps, gatewayType, carrierName, crossConnectRouter, customerName, locationName)
authenticationKey, _ := service.NewGatewayTemplateAuthenticationKey(authCrn)
gatewayTemplateModel := new(directlinkv1.GatewayTemplateGatewayTypeDedicatedTemplate)
gatewayTemplateModel.AuthenticationKey = authenticationKey
gatewayTemplateModel.BgpAsn = core.Int64Ptr(int64(64999))
gatewayTemplateModel.Global = core.BoolPtr(true)
gatewayTemplateModel.Metered = core.BoolPtr(false)
gatewayTemplateModel.Name = core.StringPtr(gatewayName)
gatewayTemplateModel.SpeedMbps = core.Int64Ptr(int64(1000))
gatewayTemplateModel.Type = core.StringPtr(gatewayType)
gatewayTemplateModel.CarrierName = core.StringPtr(carrierName)
gatewayTemplateModel.CrossConnectRouter = core.StringPtr(crossConnectRouter)
gatewayTemplateModel.CustomerName = core.StringPtr(customerName)
gatewayTemplateModel.LocationName = core.StringPtr(locationName)
createGatewayOptions := service.NewCreateGatewayOptions(gatewayTemplateModel)
result, resp, err := service.CreateGateway(createGatewayOptions)
Expect(err).To(BeNil())
Expect(resp.StatusCode).To(Equal(201))
os.Setenv("GATEWAY_ID", *result.ID)
Expect(*result.Name).To(Equal(gatewayName))
Expect(*result.AuthenticationKey.Crn).To(Equal(authCrn))
Expect(*result.BgpAsn).To(Equal(bgpAsn))
Expect(*result.Global).To(Equal(global))
Expect(*result.Metered).To(Equal(metered))
Expect(*result.SpeedMbps).To(Equal(speedMbps))
Expect(*result.Type).To(Equal(gatewayType))
Expect(*result.CrossConnectRouter).To(Equal(crossConnectRouter))
Expect(*result.LocationName).To(Equal(locationName))
Expect(*result.LocationDisplayName).NotTo(Equal(""))
Expect(*result.BgpCerCidr).NotTo(BeEmpty())
Expect(*result.BgpIbmCidr).NotTo(Equal(""))
Expect(*result.BgpIbmAsn).NotTo(Equal(""))
Expect(*result.BgpStatus).To(Equal("idle"))
Expect(*result.CreatedAt).NotTo(Equal(""))
Expect(*result.Crn).To(HavePrefix("crn:v1"))
Expect(*result.LinkStatus).To(Equal("down"))
Expect(*result.OperationalStatus).To(Equal("awaiting_loa"))
Expect(*result.ResourceGroup.ID).NotTo(Equal(""))
})
})
Context("Update the Authentication key for the gateway", func() {
It("should successfully clear the auth key", func() {
authKey, _ := service.NewGatewayPatchTemplateAuthenticationKey("")
gatewayId := os.Getenv("GATEWAY_ID")
updateGatewayOptions := service.NewUpdateGatewayOptions(gatewayId).SetAuthenticationKey(authKey)
res, resp, err := service.UpdateGateway(updateGatewayOptions)
Expect(err).To(BeNil())
Expect(resp.StatusCode).To(Equal(200))
Expect(*res.ID).To(Equal(gatewayId))
Expect(res.AuthenticationKey).To(BeNil())
Expect(*res.Name).To(Equal(gatewayName))
})
})
Context("Delete a gateway", func() {
It("Successfully deletes a gateway", func() {
shouldSkipTest()
gatewayId := os.Getenv("GATEWAY_ID")
deteleGatewayOptions := service.NewDeleteGatewayOptions(gatewayId)
detailedResponse, err := service.DeleteGateway(deteleGatewayOptions)
Expect(err).To(BeNil())
Expect(detailedResponse.StatusCode).To(Equal(204))
})
})
})
})
| [
"\"SERVICE_URL\"",
"\"IAMAPIKEY\"",
"\"SERVICE_URL\"",
"\"LOCATION_NAME\"",
"\"GATEWAY_ID\"",
"\"GATEWAY_ID\"",
"\"GATEWAY_ID\"",
"\"GATEWAY_ID\"",
"\"GATEWAY_ID\"",
"\"GATEWAY_ID\"",
"\"GATEWAY_ID\"",
"\"LOCATION_NAME\"",
"\"MACSEC_CAK\"",
"\"GATEWAY_ID\"",
"\"GATEWAY_ID\"",
"\"GATEWAY_ID\"",
"\"OT_DEDICATED_LOCATION_NAME\"",
"\"OT_DEDICATED_LOCATION_DISPLAY_NAME\"",
"\"OT_CONNECT_LOCATION_NAME\"",
"\"OT_CONNECT_LOCATION_DISPLAY_NAME\"",
"\"PORT_ID\"",
"\"PORT_LOCATION_DISPLAY_NAME\"",
"\"PORT_LOCATION_NAME\"",
"\"PORT_LABEL\"",
"\"LOCATION_NAME\"",
"\"GATEWAY_ID\"",
"\"GATEWAY_ID\"",
"\"CLASSIC_VC_ID\"",
"\"CLASSIC_VC_ID\"",
"\"GEN2_VPC_CRN\"",
"\"GATEWAY_ID\"",
"\"GATEWAY_ID\"",
"\"GEN2_VPC_VC_ID\"",
"\"GEN2_VPC_VC_ID\"",
"\"GEN2_VPC_CRN\"",
"\"GATEWAY_ID\"",
"\"GEN2_VPC_VC_ID\"",
"\"GEN2_VPC_CRN\"",
"\"CLASSIC_VC_ID\"",
"\"GATEWAY_ID\"",
"\"GEN2_VPC_VC_ID\"",
"\"GEN2_VPC_CRN\"",
"\"GATEWAY_ID\"",
"\"GEN2_VPC_VC_ID\"",
"\"GATEWAY_ID\"",
"\"CLASSIC_VC_ID\"",
"\"GATEWAY_ID\"",
"\"CLASSIC_VC_ID\"",
"\"GATEWAY_ID\"",
"\"GEN2_VPC_VC_ID\"",
"\"GATEWAY_ID\"",
"\"GEN2_VPC_VC_ID\"",
"\"GATEWAY_ID\"",
"\"LOCATION_NAME\"",
"\"GATEWAY_ID\"",
"\"GATEWAY_ID\"",
"\"GATEWAY_ID\"",
"\"GATEWAY_ID\"",
"\"LOCATION_NAME\"",
"\"AUTHENTICATION_KEY\"",
"\"GATEWAY_ID\"",
"\"GATEWAY_ID\""
]
| []
| [
"CLASSIC_VC_ID",
"OT_DEDICATED_LOCATION_DISPLAY_NAME",
"AUTHENTICATION_KEY",
"OT_CONNECT_LOCATION_NAME",
"GEN2_VPC_VC_ID",
"OT_DEDICATED_LOCATION_NAME",
"SERVICE_URL",
"OT_CONNECT_LOCATION_DISPLAY_NAME",
"PORT_LOCATION_NAME",
"PORT_LOCATION_DISPLAY_NAME",
"PORT_LABEL",
"MACSEC_CAK",
"IAMAPIKEY",
"PORT_ID",
"LOCATION_NAME",
"GEN2_VPC_CRN",
"GATEWAY_ID"
]
| [] | ["CLASSIC_VC_ID", "OT_DEDICATED_LOCATION_DISPLAY_NAME", "AUTHENTICATION_KEY", "OT_CONNECT_LOCATION_NAME", "GEN2_VPC_VC_ID", "OT_DEDICATED_LOCATION_NAME", "SERVICE_URL", "OT_CONNECT_LOCATION_DISPLAY_NAME", "PORT_LOCATION_NAME", "PORT_LOCATION_DISPLAY_NAME", "PORT_LABEL", "MACSEC_CAK", "IAMAPIKEY", "PORT_ID", "LOCATION_NAME", "GEN2_VPC_CRN", "GATEWAY_ID"] | go | 17 | 0 | |
spnego_gokrb5.go | //go:build !windows
// +build !windows
package spnego
import (
"net/http"
"os"
"os/user"
"strings"
"github.com/jcmturner/gokrb5/v8/client"
"github.com/jcmturner/gokrb5/v8/config"
"github.com/jcmturner/gokrb5/v8/credentials"
"github.com/jcmturner/gokrb5/v8/spnego"
)
type krb5 struct {
cfg *config.Config
cl *client.Client
}
// New constructs OS specific implementation of spnego.Provider interface
func New() Provider {
return &krb5{}
}
func (k *krb5) makeCfg() error {
if k.cfg != nil {
return nil
}
cfgPath := os.Getenv("KRB5_CONFIG")
if _, err := os.Stat(cfgPath); os.IsNotExist(err) {
cfgPath = "/etc/krb5.conf" // ToDo: Macs and Windows have different path, also some Unix may have /etc/krb5/krb5.conf
}
cfg, err := config.Load(cfgPath)
if err != nil {
return err
}
k.cfg = cfg
return nil
}
func (k *krb5) makeClient() error {
u, err := user.Current()
if err != nil {
return err
}
ccpath := "/tmp/krb5cc_" + u.Uid
ccname := os.Getenv("KRB5CCNAME")
if strings.HasPrefix(ccname, "FILE:") {
ccpath = strings.SplitN(ccname, ":", 2)[1]
}
ccache, err := credentials.LoadCCache(ccpath)
if err != nil {
return err
}
k.cl, err = client.NewFromCCache(ccache, k.cfg, client.DisablePAFXFAST(true))
return err
}
func (k *krb5) SetSPNEGOHeader(req *http.Request) error {
h, err := canonicalizeHostname(req.URL.Hostname())
if err != nil {
return err
}
header, err := k.GetSPNEGOHeader(h)
if err != nil {
return err
}
req.Header.Set(spnego.HTTPHeaderAuthRequest, header)
return nil
}
func (k *krb5) GetSPNEGOHeader(hostname string) (string, error) {
if err := k.makeCfg(); err != nil {
return "", err
}
if err := k.makeClient(); err != nil {
return "", err
}
// gokrb5 requires http.Request, but not really needs
req, _ := http.NewRequest(http.MethodGet, "http://localhost", nil)
if err := spnego.SetSPNEGOHeader(k.cl, req, "HTTP/"+hostname); err != nil {
return "", err
}
return req.Header.Get(spnego.HTTPHeaderAuthRequest), nil
}
| [
"\"KRB5_CONFIG\"",
"\"KRB5CCNAME\""
]
| []
| [
"KRB5_CONFIG",
"KRB5CCNAME"
]
| [] | ["KRB5_CONFIG", "KRB5CCNAME"] | go | 2 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.