filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
test/integration/master/master_test.go
|
// +build integration,!no-etcd
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package master
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"net"
"net/http"
"os"
"strings"
"sync"
"testing"
"time"
"github.com/ghodss/yaml"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
restclient "k8s.io/client-go/rest"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/testapi"
"k8s.io/kubernetes/pkg/api/v1"
clienttypedv1 "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/test/integration"
"k8s.io/kubernetes/test/integration/framework"
)
func testPrefix(t *testing.T, prefix string) {
_, s := framework.RunAMaster(nil)
defer s.Close()
resp, err := http.Get(s.URL + prefix)
if err != nil {
t.Fatalf("unexpected error getting %s prefix: %v", prefix, err)
}
if resp.StatusCode != http.StatusOK {
t.Fatalf("got status %v instead of 200 OK", resp.StatusCode)
}
}
func TestAutoscalingPrefix(t *testing.T) {
testPrefix(t, "/apis/autoscaling/")
}
func TestBatchPrefix(t *testing.T) {
testPrefix(t, "/apis/batch/")
}
func TestAppsPrefix(t *testing.T) {
testPrefix(t, "/apis/apps/")
}
func TestExtensionsPrefix(t *testing.T) {
testPrefix(t, "/apis/extensions/")
}
func TestEmptyList(t *testing.T) {
_, s := framework.RunAMaster(nil)
defer s.Close()
u := s.URL + "/api/v1/namespaces/default/pods"
resp, err := http.Get(u)
if err != nil {
t.Fatalf("unexpected error getting %s: %v", u, err)
}
if resp.StatusCode != http.StatusOK {
t.Fatalf("got status %v instead of 200 OK", resp.StatusCode)
}
defer resp.Body.Close()
data, _ := ioutil.ReadAll(resp.Body)
decodedData := map[string]interface{}{}
if err := json.Unmarshal(data, &decodedData); err != nil {
t.Logf("body: %s", string(data))
t.Fatalf("got error decoding data: %v", err)
}
if items, ok := decodedData["items"]; !ok {
t.Logf("body: %s", string(data))
t.Fatalf("missing items field in empty list (all lists should return an items field)")
} else if items == nil {
t.Logf("body: %s", string(data))
t.Fatalf("nil items field from empty list (all lists should return non-nil empty items lists)")
}
}
func TestWatchSucceedsWithoutArgs(t *testing.T) {
_, s := framework.RunAMaster(nil)
defer s.Close()
resp, err := http.Get(s.URL + "/api/v1/namespaces?watch=1")
if err != nil {
t.Fatalf("unexpected error getting experimental prefix: %v", err)
}
if resp.StatusCode != http.StatusOK {
t.Fatalf("got status %v instead of 200 OK", resp.StatusCode)
}
resp.Body.Close()
}
var hpaV1 string = `
{
"apiVersion": "autoscaling/v1",
"kind": "HorizontalPodAutoscaler",
"metadata": {
"name": "test-hpa",
"namespace": "default"
},
"spec": {
"scaleTargetRef": {
"kind": "ReplicationController",
"name": "test-hpa",
"namespace": "default"
},
"minReplicas": 1,
"maxReplicas": 10,
"targetCPUUtilizationPercentage": 50
}
}
`
func autoscalingPath(resource, namespace, name string) string {
return testapi.Autoscaling.ResourcePath(resource, namespace, name)
}
func batchPath(resource, namespace, name string) string {
return testapi.Batch.ResourcePath(resource, namespace, name)
}
func extensionsPath(resource, namespace, name string) string {
return testapi.Extensions.ResourcePath(resource, namespace, name)
}
func TestAutoscalingGroupBackwardCompatibility(t *testing.T) {
_, s := framework.RunAMaster(nil)
defer s.Close()
transport := http.DefaultTransport
requests := []struct {
verb string
URL string
body string
expectedStatusCodes map[int]bool
expectedVersion string
}{
{"POST", autoscalingPath("horizontalpodautoscalers", api.NamespaceDefault, ""), hpaV1, integration.Code201, ""},
{"GET", autoscalingPath("horizontalpodautoscalers", api.NamespaceDefault, ""), "", integration.Code200, testapi.Autoscaling.GroupVersion().String()},
{"GET", extensionsPath("horizontalpodautoscalers", api.NamespaceDefault, ""), "", integration.Code200, testapi.Extensions.GroupVersion().String()},
}
for _, r := range requests {
bodyBytes := bytes.NewReader([]byte(r.body))
req, err := http.NewRequest(r.verb, s.URL+r.URL, bodyBytes)
if err != nil {
t.Logf("case %v", r)
t.Fatalf("unexpected error: %v", err)
}
func() {
resp, err := transport.RoundTrip(req)
defer resp.Body.Close()
if err != nil {
t.Logf("case %v", r)
t.Fatalf("unexpected error: %v", err)
}
b, _ := ioutil.ReadAll(resp.Body)
body := string(b)
if _, ok := r.expectedStatusCodes[resp.StatusCode]; !ok {
t.Logf("case %v", r)
t.Errorf("Expected status one of %v, but got %v", r.expectedStatusCodes, resp.StatusCode)
t.Errorf("Body: %v", body)
}
if !strings.Contains(body, "\"apiVersion\":\""+r.expectedVersion) {
t.Logf("case %v", r)
t.Errorf("Expected version %v, got body %v", r.expectedVersion, body)
}
}()
}
}
func TestAccept(t *testing.T) {
_, s := framework.RunAMaster(nil)
defer s.Close()
resp, err := http.Get(s.URL + "/api/")
if err != nil {
t.Fatalf("unexpected error getting api: %v", err)
}
if resp.StatusCode != http.StatusOK {
t.Fatalf("got status %v instead of 200 OK", resp.StatusCode)
}
body, _ := ioutil.ReadAll(resp.Body)
if resp.Header.Get("Content-Type") != "application/json" {
t.Errorf("unexpected content: %s", body)
}
if err := json.Unmarshal(body, &map[string]interface{}{}); err != nil {
t.Fatal(err)
}
req, err := http.NewRequest("GET", s.URL+"/api/", nil)
if err != nil {
t.Fatal(err)
}
req.Header.Set("Accept", "application/yaml")
resp, err = http.DefaultClient.Do(req)
if err != nil {
t.Fatal(err)
}
body, _ = ioutil.ReadAll(resp.Body)
if resp.Header.Get("Content-Type") != "application/yaml" {
t.Errorf("unexpected content: %s", body)
}
t.Logf("body: %s", body)
if err := yaml.Unmarshal(body, &map[string]interface{}{}); err != nil {
t.Fatal(err)
}
req, err = http.NewRequest("GET", s.URL+"/api/", nil)
if err != nil {
t.Fatal(err)
}
req.Header.Set("Accept", "application/json, application/yaml")
resp, err = http.DefaultClient.Do(req)
if err != nil {
t.Fatal(err)
}
body, _ = ioutil.ReadAll(resp.Body)
if resp.Header.Get("Content-Type") != "application/json" {
t.Errorf("unexpected content: %s", body)
}
t.Logf("body: %s", body)
if err := yaml.Unmarshal(body, &map[string]interface{}{}); err != nil {
t.Fatal(err)
}
req, err = http.NewRequest("GET", s.URL+"/api/", nil)
if err != nil {
t.Fatal(err)
}
req.Header.Set("Accept", "application") // not a valid media type
resp, err = http.DefaultClient.Do(req)
if err != nil {
t.Fatal(err)
}
if resp.StatusCode != http.StatusNotAcceptable {
t.Errorf("unexpected error from the server")
}
}
func countEndpoints(eps *api.Endpoints) int {
count := 0
for i := range eps.Subsets {
count += len(eps.Subsets[i].Addresses) * len(eps.Subsets[i].Ports)
}
return count
}
func TestMasterService(t *testing.T) {
_, s := framework.RunAMaster(framework.NewIntegrationTestMasterConfig())
defer s.Close()
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(api.GroupName).GroupVersion}})
err := wait.Poll(time.Second, time.Minute, func() (bool, error) {
svcList, err := client.Core().Services(api.NamespaceDefault).List(api.ListOptions{})
if err != nil {
t.Errorf("unexpected error: %v", err)
return false, nil
}
found := false
for i := range svcList.Items {
if svcList.Items[i].Name == "kubernetes" {
found = true
break
}
}
if found {
ep, err := client.Core().Endpoints(api.NamespaceDefault).Get("kubernetes", metav1.GetOptions{})
if err != nil {
return false, nil
}
if countEndpoints(ep) == 0 {
return false, fmt.Errorf("no endpoints for kubernetes service: %v", ep)
}
return true, nil
}
return false, nil
})
if err != nil {
t.Errorf("unexpected error: %v", err)
}
}
func TestServiceAlloc(t *testing.T) {
cfg := framework.NewIntegrationTestMasterConfig()
_, cidr, err := net.ParseCIDR("192.168.0.0/29")
if err != nil {
t.Fatalf("bad cidr: %v", err)
}
cfg.ServiceIPRange = *cidr
_, s := framework.RunAMaster(cfg)
defer s.Close()
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(api.GroupName).GroupVersion}})
svc := func(i int) *api.Service {
return &api.Service{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("svc-%v", i),
},
Spec: api.ServiceSpec{
Type: api.ServiceTypeClusterIP,
Ports: []api.ServicePort{
{Port: 80},
},
},
}
}
// Wait until the default "kubernetes" service is created.
if err = wait.Poll(250*time.Millisecond, time.Minute, func() (bool, error) {
_, err := client.Core().Services(api.NamespaceDefault).Get("kubernetes", metav1.GetOptions{})
if err != nil && !errors.IsNotFound(err) {
return false, err
}
return !errors.IsNotFound(err), nil
}); err != nil {
t.Fatalf("creating kubernetes service timed out")
}
// make 5 more services to take up all IPs
for i := 0; i < 5; i++ {
if _, err := client.Core().Services(api.NamespaceDefault).Create(svc(i)); err != nil {
t.Error(err)
}
}
// Make another service. It will fail because we're out of cluster IPs
if _, err := client.Core().Services(api.NamespaceDefault).Create(svc(8)); err != nil {
if !strings.Contains(err.Error(), "range is full") {
t.Errorf("unexpected error text: %v", err)
}
} else {
svcs, err := client.Core().Services(api.NamespaceAll).List(api.ListOptions{})
if err != nil {
t.Fatalf("unexpected success, and error getting the services: %v", err)
}
allIPs := []string{}
for _, s := range svcs.Items {
allIPs = append(allIPs, s.Spec.ClusterIP)
}
t.Fatalf("unexpected creation success. The following IPs exist: %#v. It should only be possible to allocate 2 IP addresses in this cluster.\n\n%#v", allIPs, svcs)
}
// Delete the first service.
if err := client.Core().Services(api.NamespaceDefault).Delete(svc(1).ObjectMeta.Name, nil); err != nil {
t.Fatalf("got unexpected error: %v", err)
}
// This time creating the second service should work.
if _, err := client.Core().Services(api.NamespaceDefault).Create(svc(8)); err != nil {
t.Fatalf("got unexpected error: %v", err)
}
}
// TestUpdateNodeObjects represents a simple version of the behavior of node checkins at steady
// state. This test allows for easy profiling of a realistic master scenario for baseline CPU
// in very large clusters. It is disabled by default - start a kube-apiserver and pass
// UPDATE_NODE_APISERVER as the host value.
func TestUpdateNodeObjects(t *testing.T) {
server := os.Getenv("UPDATE_NODE_APISERVER")
if len(server) == 0 {
t.Skip("UPDATE_NODE_APISERVER is not set")
}
c := clienttypedv1.NewForConfigOrDie(&restclient.Config{
QPS: 10000,
Host: server,
ContentConfig: restclient.ContentConfig{
AcceptContentTypes: "application/vnd.kubernetes.protobuf",
ContentType: "application/vnd.kubernetes.protobuf",
},
})
nodes := 400
listers := 5
watchers := 50
iterations := 10000
for i := 0; i < nodes*6; i++ {
c.Nodes().Delete(fmt.Sprintf("node-%d", i), nil)
_, err := c.Nodes().Create(&v1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("node-%d", i),
},
})
if err != nil {
t.Fatal(err)
}
}
for k := 0; k < listers; k++ {
go func(lister int) {
for i := 0; i < iterations; i++ {
_, err := c.Nodes().List(v1.ListOptions{})
if err != nil {
fmt.Printf("[list:%d] error after %d: %v\n", lister, i, err)
break
}
time.Sleep(time.Duration(lister)*10*time.Millisecond + 1500*time.Millisecond)
}
}(k)
}
for k := 0; k < watchers; k++ {
go func(lister int) {
w, err := c.Nodes().Watch(v1.ListOptions{})
if err != nil {
fmt.Printf("[watch:%d] error: %v", k, err)
return
}
i := 0
for r := range w.ResultChan() {
i++
if _, ok := r.Object.(*v1.Node); !ok {
fmt.Printf("[watch:%d] unexpected object after %d: %#v\n", lister, i, r)
}
if i%100 == 0 {
fmt.Printf("[watch:%d] iteration %d ...\n", lister, i)
}
}
fmt.Printf("[watch:%d] done\n", lister)
}(k)
}
var wg sync.WaitGroup
wg.Add(nodes - listers)
for j := 0; j < nodes; j++ {
go func(node int) {
var lastCount int
for i := 0; i < iterations; i++ {
if i%100 == 0 {
fmt.Printf("[%d] iteration %d ...\n", node, i)
}
if i%20 == 0 {
_, err := c.Nodes().List(v1.ListOptions{})
if err != nil {
fmt.Printf("[%d] error after %d: %v\n", node, i, err)
break
}
}
r, err := c.Nodes().List(v1.ListOptions{
FieldSelector: fmt.Sprintf("metadata.name=node-%d", node),
ResourceVersion: "0",
})
if err != nil {
fmt.Printf("[%d] error after %d: %v\n", node, i, err)
break
}
if len(r.Items) != 1 {
fmt.Printf("[%d] error after %d: unexpected list count\n", node, i)
break
}
n, err := c.Nodes().Get(fmt.Sprintf("node-%d", node), metav1.GetOptions{})
if err != nil {
fmt.Printf("[%d] error after %d: %v\n", node, i, err)
break
}
if len(n.Status.Conditions) != lastCount {
fmt.Printf("[%d] worker set %d, read %d conditions\n", node, lastCount, len(n.Status.Conditions))
break
}
previousCount := lastCount
switch {
case i%4 == 0:
lastCount = 1
n.Status.Conditions = []v1.NodeCondition{
{
Type: v1.NodeReady,
Status: v1.ConditionTrue,
Reason: "foo",
},
}
case i%4 == 1:
lastCount = 2
n.Status.Conditions = []v1.NodeCondition{
{
Type: v1.NodeReady,
Status: v1.ConditionFalse,
Reason: "foo",
},
{
Type: v1.NodeDiskPressure,
Status: v1.ConditionTrue,
Reason: "bar",
},
}
case i%4 == 1:
lastCount = 0
n.Status.Conditions = nil
}
if _, err := c.Nodes().UpdateStatus(n); err != nil {
if !errors.IsConflict(err) {
fmt.Printf("[%d] error after %d: %v\n", node, i, err)
break
}
lastCount = previousCount
}
}
wg.Done()
fmt.Printf("[%d] done\n", node)
}(j)
}
wg.Wait()
}
|
[
"\"UPDATE_NODE_APISERVER\""
] |
[] |
[
"UPDATE_NODE_APISERVER"
] |
[]
|
["UPDATE_NODE_APISERVER"]
|
go
| 1 | 0 | |
tests/test_agent.py
|
import os
import pytest
from notion_df.agent import download
NOTION_API_KEY = os.environ.get("NOTION_API_KEY")
NOTION_LARGE_DF = os.environ.get("NOTION_LARGE_DF")
NOTION_LARGE_DF_ROWS = 150
def test_nrows():
if not NOTION_LARGE_DF or not NOTION_API_KEY:
pytest.skip("API key not provided")
df = download(NOTION_LARGE_DF, api_key=NOTION_API_KEY)
assert len(df) == NOTION_LARGE_DF_ROWS
df = download(NOTION_LARGE_DF, nrows=101, api_key=NOTION_API_KEY)
assert len(df) == 101
df = download(NOTION_LARGE_DF, nrows=15, api_key=NOTION_API_KEY)
assert len(df) == 15
|
[] |
[] |
[
"NOTION_API_KEY",
"NOTION_LARGE_DF"
] |
[]
|
["NOTION_API_KEY", "NOTION_LARGE_DF"]
|
python
| 2 | 0 | |
yzlabot/__init__.py
|
import logging
import os
import re
import i18n
import discord
from discord.ext import commands
from .help import YLHelpCommand
intents = discord.Intents.none()
intents.emojis = True
intents.guilds = True
intents.members = True
intents.voice_states = True
intents.messages = True
intents.reactions = True
YB_BOT = commands.Bot(
command_prefix=commands.when_mentioned_or("!"),
help_command=YLHelpCommand(),
case_insensitive=True,
activity=discord.Game("YZ-LABO BOT SYSTEM"),
intents=intents
)
def i18n_setup():
i18n.set('filename_format', '{locale}.{format}')
i18n.load_path.append(
os.path.join(os.path.dirname(__file__), "..", "locale"))
i18n.set("locale",
os.environ.get("YB_LANG")
if os.environ.get("YB_LANG") is not None else "jp")
i18n.set("fallback", "jp")
i18n_setup()
# noinspection SpellCheckingInspection
RYTHM_CMD = {
"play", "disconnect", "np", "aliases", "ping",
"skip", "seek", "soundcloud", "remove", "loopqueue",
"search", "stats", "loop", "donate", "shard",
"join", "lyrics", "resume", "settings", "move",
"forward", "skipto", "clear", "replay", "clean",
"pause", "removedupes", "volume", "rewind", "playtop",
"playskip", "invite", "shuffle", "queue", "leavecleanup",
"bass", "bb", "cl", "dc", "leave",
"dis", "fuckoff", "patreon", "effect", "skip",
"fs", "fwd", "save", "yoink", "links",
"summon", "fuckon", "lc", "repeat", "lq",
"queueloop", "l", "ly", "m", "mv",
"weeb", "np", "stop", "p", "ps",
"pskip", "playnow", "pn", "psotd", "psotm",
"psotw", "pt", "ptop", "purge", "q",
"rm", "rmd", "rd", "drm", "re",
"res", "continue", "rwd", "find", "setting",
"random", "st", "sad", "sc", "vol",
"skip", "next", "s"
}
@YB_BOT.event
async def on_ready():
logging.info(f"Logged in as {YB_BOT.user}")
app_info = await YB_BOT.application_info()
logging.info(f"Application ID: {app_info.id}")
if app_info.bot_public:
logging.warning(
"BOT is set to public BOT. "
"We recommend that you disable this setting.")
logging.info("YZ-LABOT is getting ready!")
@YB_BOT.command(
brief=i18n.t("command.add_guild.brief"),
description=i18n.t("command.add_guild.description")
)
@commands.is_owner()
@commands.dm_only()
async def add_guild(ctx: commands.Context):
app_info = await YB_BOT.application_info()
required_permission = discord.Permissions(
administrator=True
)
await ctx.send(discord.utils.oauth_url(
app_info.id,
permissions=required_permission))
@YB_BOT.event
async def on_command(ctx: commands.Context):
logging.info(
f"{ctx.author.name}#{ctx.author.discriminator} "
f"has sent command: {ctx.command}")
@YB_BOT.event
async def on_command_error(ctx: commands.Context, error):
if isinstance(error, commands.CommandNotFound):
cmd_re = re.fullmatch("Command \"(.+)\" is not found", str(error))
if cmd_re is not None and cmd_re.group(1) in RYTHM_CMD:
embed = discord.Embed(
title=i18n.t("error.rythm_hint.title"),
description=i18n.t("error.rythm_hint.description"),
color=discord.Colour.orange()
)
await ctx.send(i18n.t("error.unknown_command",
mention=ctx.message.author.mention),
embed=embed)
else:
await ctx.send(i18n.t("error.unknown_command",
mention=ctx.message.author.mention))
elif isinstance(error, commands.CommandInvokeError):
logging.error(f"[Internal error] {str(error)}")
try:
await ctx.send(i18n.t("error.invoke_message",
mention=ctx.message.author.mention))
except (discord.Forbidden, discord.HTTPException):
logging.error("Can't send message. ignore.")
try:
app_info = await YB_BOT.application_info()
except discord.HTTPException:
logging.error(
"Can't fetch application info. Don't send error report.")
else:
try:
embed = discord.embeds.Embed(
title=i18n.t("error.invoke_dm_title"),
description=f"```\n{error}\n```",
color=discord.Colour.dark_purple()
)
await app_info.owner.send(embed=embed)
except discord.Forbidden:
logging.error(
"Can't send error message "
"because owner's DM is not allowed. ignore.")
except discord.HTTPException:
logging.error(
"Can't send error message. ignore.")
elif isinstance(error, commands.MissingRequiredArgument):
await ctx.send(i18n.t("error.missing_args",
mention=ctx.message.author.mention))
elif isinstance(error, commands.BadUnionArgument):
await ctx.send(i18n.t("error.not_valid_type",
mention=ctx.message.author.mention))
elif isinstance(error, commands.errors.PrivateMessageOnly):
await ctx.send(i18n.t("error.dm_only",
mention=ctx.message.author.mention))
elif isinstance(error, commands.errors.CheckFailure):
await ctx.send(i18n.t("error.check_forbidden",
mention=ctx.message.author.mention))
else:
logging.error(
f"Command has raised error: {error} ({error.__class__.__name__})")
|
[] |
[] |
[
"YB_LANG"
] |
[]
|
["YB_LANG"]
|
python
| 1 | 0 | |
orc8r/cloud/go/tools/swaggergen/generate/rewrite_test.go
|
/*
* Copyright 2020 The Magma Authors.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package generate_test
import (
"io/ioutil"
"os"
"path/filepath"
"sort"
"strings"
"testing"
"magma/orc8r/cloud/go/tools/swaggergen/generate"
"github.com/stretchr/testify/assert"
)
func TestRewriteGeneratedRefs(t *testing.T) {
runRewriteTestCase(t, "../testdata/importer2.yml", "../testdata/importer2")
runRewriteTestCase(t, "../testdata/importer.yml", "../testdata/importer")
runRewriteTestCase(t, "../testdata/base.yml", "../testdata/base")
}
func runRewriteTestCase(t *testing.T, ymlFile string, outputDir string) {
defer cleanupActualFiles(outputDir)
err := generate.GenerateModels(ymlFile, "../testdata/template.yml", os.Getenv("MAGMA_ROOT"))
assert.NoError(t, err)
err = generate.RewriteGeneratedRefs(ymlFile, os.Getenv("MAGMA_ROOT"))
assert.NoError(t, err)
goldenFiles, actualFiles := []string{}, []string{}
err = filepath.Walk(outputDir, func(path string, _ os.FileInfo, _ error) error {
if strings.HasSuffix(path, "actual.golden") {
goldenFiles = append(goldenFiles, strings.TrimSuffix(path, ".golden"))
} else if strings.HasSuffix(path, ".actual") {
actualFiles = append(actualFiles, path)
}
return nil
})
assert.NoError(t, err)
sort.Strings(goldenFiles)
sort.Strings(actualFiles)
assert.Equal(t, goldenFiles, actualFiles)
// Verify contents of actual vs golden
for _, baseFilename := range goldenFiles {
goldenFileContents, err := ioutil.ReadFile(baseFilename + ".golden")
assert.NoError(t, err)
actualFileContents, err := ioutil.ReadFile(baseFilename)
assert.NoError(t, err)
assert.Equal(t, goldenFileContents, actualFileContents)
}
}
|
[
"\"MAGMA_ROOT\"",
"\"MAGMA_ROOT\""
] |
[] |
[
"MAGMA_ROOT"
] |
[]
|
["MAGMA_ROOT"]
|
go
| 1 | 0 | |
gewittergefahr/scripts/train_many_cnns_3d_gridrad.py
|
"""Trains many convolutional neural nets with native (3-D) GridRad images."""
import os
import pickle
import argparse
import traceback
from multiprocessing import Pool, Manager
import numpy
from gewittergefahr.gg_utils import time_conversion
from gewittergefahr.gg_utils import soundings
from gewittergefahr.gg_utils import file_system_utils
SEPARATOR_STRING = '\n\n' + '*' * 50 + '\n\n'
NUM_GPU_PER_NODE = 8
TIME_FORMAT = '%Y-%m-%d-%H%M%S'
FIRST_BATCH_NUMBER = 0
LAST_BATCH_NUMBER = int(1e12)
RADAR_HEIGHTS_M_AGL = numpy.linspace(1000, 12000, num=12, dtype=int)
SOUNDING_HEIGHTS_M_AGL = soundings.DEFAULT_HEIGHT_LEVELS_M_AGL + 0
RADAR_FIELDS_KEY = 'radar_field_names'
REFLECTIVITY_MASK_KEY = 'refl_masking_threshold_dbz'
ARGUMENT_FILES_ARG_NAME = 'argument_file_names'
ARGUMENT_FILES_HELP_STRING = (
'1-D list of paths to input files, each containing a dictionary of '
'arguments for the single-CNN script train_cnn_3d_gridrad.py. Each file '
'should be a Pickle file, containing only said dictionary.'
)
INPUT_ARG_PARSER = argparse.ArgumentParser()
INPUT_ARG_PARSER.add_argument(
'--' + ARGUMENT_FILES_ARG_NAME, type=str, nargs='+', required=True,
help=ARGUMENT_FILES_HELP_STRING
)
def _write_metadata_one_cnn(model_object, argument_dict):
"""Writes metadata for one CNN to file.
:param model_object: Untrained CNN (instance of `keras.models.Model` or
`keras.models.Sequential`).
:param argument_dict: See doc for `_train_one_cnn`.
:return: metadata_dict: See doc for `cnn.write_model_metadata`.
:return: training_option_dict: Same.
"""
from gewittergefahr.deep_learning import cnn
from gewittergefahr.deep_learning import input_examples
from gewittergefahr.deep_learning import \
training_validation_io as trainval_io
from gewittergefahr.scripts import deep_learning_helper as dl_helper
# Read input args.
radar_field_names = argument_dict[RADAR_FIELDS_KEY]
sounding_field_names = argument_dict[dl_helper.SOUNDING_FIELDS_ARG_NAME]
normalization_type_string = (
argument_dict[dl_helper.NORMALIZATION_TYPE_ARG_NAME]
)
normalization_file_name = (
argument_dict[dl_helper.NORMALIZATION_FILE_ARG_NAME]
)
min_normalized_value = argument_dict[dl_helper.MIN_NORM_VALUE_ARG_NAME]
max_normalized_value = argument_dict[dl_helper.MAX_NORM_VALUE_ARG_NAME]
target_name = argument_dict[dl_helper.TARGET_NAME_ARG_NAME]
shuffle_target = bool(argument_dict[dl_helper.SHUFFLE_TARGET_ARG_NAME])
downsampling_classes = numpy.array(
argument_dict[dl_helper.DOWNSAMPLING_CLASSES_ARG_NAME],
dtype=int
)
downsampling_fractions = numpy.array(
argument_dict[dl_helper.DOWNSAMPLING_FRACTIONS_ARG_NAME],
dtype=float
)
monitor_string = argument_dict[dl_helper.MONITOR_ARG_NAME]
weight_loss_function = bool(argument_dict[dl_helper.WEIGHT_LOSS_ARG_NAME])
refl_masking_threshold_dbz = argument_dict[REFLECTIVITY_MASK_KEY]
x_translations_pixels = numpy.array(
argument_dict[dl_helper.X_TRANSLATIONS_ARG_NAME], dtype=int
)
y_translations_pixels = numpy.array(
argument_dict[dl_helper.Y_TRANSLATIONS_ARG_NAME], dtype=int
)
ccw_rotation_angles_deg = numpy.array(
argument_dict[dl_helper.ROTATION_ANGLES_ARG_NAME], dtype=float
)
noise_standard_deviation = argument_dict[dl_helper.NOISE_STDEV_ARG_NAME]
num_noisings = argument_dict[dl_helper.NUM_NOISINGS_ARG_NAME]
flip_in_x = bool(argument_dict[dl_helper.FLIP_X_ARG_NAME])
flip_in_y = bool(argument_dict[dl_helper.FLIP_Y_ARG_NAME])
top_training_dir_name = argument_dict[dl_helper.TRAINING_DIR_ARG_NAME]
first_training_time_string = (
argument_dict[dl_helper.FIRST_TRAINING_TIME_ARG_NAME]
)
last_training_time_string = (
argument_dict[dl_helper.LAST_TRAINING_TIME_ARG_NAME]
)
num_examples_per_train_batch = (
argument_dict[dl_helper.NUM_EX_PER_TRAIN_ARG_NAME]
)
top_validation_dir_name = argument_dict[dl_helper.VALIDATION_DIR_ARG_NAME]
first_validation_time_string = (
argument_dict[dl_helper.FIRST_VALIDATION_TIME_ARG_NAME]
)
last_validation_time_string = (
argument_dict[dl_helper.LAST_VALIDATION_TIME_ARG_NAME]
)
num_examples_per_validn_batch = (
argument_dict[dl_helper.NUM_EX_PER_VALIDN_ARG_NAME]
)
num_epochs = argument_dict[dl_helper.NUM_EPOCHS_ARG_NAME]
num_training_batches_per_epoch = (
argument_dict[dl_helper.NUM_TRAINING_BATCHES_ARG_NAME]
)
num_validation_batches_per_epoch = (
argument_dict[dl_helper.NUM_VALIDATION_BATCHES_ARG_NAME]
)
output_dir_name = argument_dict[dl_helper.OUTPUT_DIR_ARG_NAME]
# Process input args.
first_training_time_unix_sec = time_conversion.string_to_unix_sec(
first_training_time_string, TIME_FORMAT
)
last_training_time_unix_sec = time_conversion.string_to_unix_sec(
last_training_time_string, TIME_FORMAT
)
first_validation_time_unix_sec = time_conversion.string_to_unix_sec(
first_validation_time_string, TIME_FORMAT
)
last_validation_time_unix_sec = time_conversion.string_to_unix_sec(
last_validation_time_string, TIME_FORMAT
)
if sounding_field_names[0] in ['', 'None']:
sounding_field_names = None
if len(downsampling_classes) > 1:
downsampling_dict = dict(list(zip(
downsampling_classes, downsampling_fractions
)))
else:
downsampling_dict = None
translate_flag = (
len(x_translations_pixels) > 1
or x_translations_pixels[0] != 0 or y_translations_pixels[0] != 0
)
if not translate_flag:
x_translations_pixels = None
y_translations_pixels = None
if len(ccw_rotation_angles_deg) == 1 and ccw_rotation_angles_deg[0] == 0:
ccw_rotation_angles_deg = None
if num_noisings <= 0:
num_noisings = 0
noise_standard_deviation = None
if refl_masking_threshold_dbz <= 0:
refl_masking_threshold_dbz = None
# Find training and validation files.
training_file_names = input_examples.find_many_example_files(
top_directory_name=top_training_dir_name, shuffled=True,
first_batch_number=FIRST_BATCH_NUMBER,
last_batch_number=LAST_BATCH_NUMBER,
raise_error_if_any_missing=False
)
validation_file_names = input_examples.find_many_example_files(
top_directory_name=top_validation_dir_name, shuffled=True,
first_batch_number=FIRST_BATCH_NUMBER,
last_batch_number=LAST_BATCH_NUMBER,
raise_error_if_any_missing=False
)
# Write metadata.
metadata_dict = {
cnn.NUM_EPOCHS_KEY: num_epochs,
cnn.NUM_TRAINING_BATCHES_KEY: num_training_batches_per_epoch,
cnn.NUM_VALIDATION_BATCHES_KEY: num_validation_batches_per_epoch,
cnn.MONITOR_STRING_KEY: monitor_string,
cnn.WEIGHT_LOSS_FUNCTION_KEY: weight_loss_function,
cnn.CONV_2D3D_KEY: False,
cnn.VALIDATION_FILES_KEY: validation_file_names,
cnn.FIRST_VALIDN_TIME_KEY: first_validation_time_unix_sec,
cnn.LAST_VALIDN_TIME_KEY: last_validation_time_unix_sec,
cnn.NUM_EX_PER_VALIDN_BATCH_KEY: num_examples_per_validn_batch
}
input_tensor = model_object.input
if isinstance(input_tensor, list):
input_tensor = input_tensor[0]
num_grid_rows = input_tensor.get_shape().as_list()[1]
num_grid_columns = input_tensor.get_shape().as_list()[2]
training_option_dict = {
trainval_io.EXAMPLE_FILES_KEY: training_file_names,
trainval_io.TARGET_NAME_KEY: target_name,
trainval_io.SHUFFLE_TARGET_KEY: shuffle_target,
trainval_io.FIRST_STORM_TIME_KEY: first_training_time_unix_sec,
trainval_io.LAST_STORM_TIME_KEY: last_training_time_unix_sec,
trainval_io.NUM_EXAMPLES_PER_BATCH_KEY: num_examples_per_train_batch,
trainval_io.RADAR_FIELDS_KEY: radar_field_names,
trainval_io.RADAR_HEIGHTS_KEY: RADAR_HEIGHTS_M_AGL,
trainval_io.SOUNDING_FIELDS_KEY: sounding_field_names,
trainval_io.SOUNDING_HEIGHTS_KEY: SOUNDING_HEIGHTS_M_AGL,
trainval_io.NUM_ROWS_KEY: num_grid_rows,
trainval_io.NUM_COLUMNS_KEY: num_grid_columns,
trainval_io.NORMALIZATION_TYPE_KEY: normalization_type_string,
trainval_io.NORMALIZATION_FILE_KEY: normalization_file_name,
trainval_io.MIN_NORMALIZED_VALUE_KEY: min_normalized_value,
trainval_io.MAX_NORMALIZED_VALUE_KEY: max_normalized_value,
trainval_io.BINARIZE_TARGET_KEY: False,
trainval_io.SAMPLING_FRACTIONS_KEY: downsampling_dict,
trainval_io.LOOP_ONCE_KEY: False,
trainval_io.REFLECTIVITY_MASK_KEY: refl_masking_threshold_dbz,
trainval_io.X_TRANSLATIONS_KEY: x_translations_pixels,
trainval_io.Y_TRANSLATIONS_KEY: y_translations_pixels,
trainval_io.ROTATION_ANGLES_KEY: ccw_rotation_angles_deg,
trainval_io.NOISE_STDEV_KEY: noise_standard_deviation,
trainval_io.NUM_NOISINGS_KEY: num_noisings,
trainval_io.FLIP_X_KEY: flip_in_x,
trainval_io.FLIP_Y_KEY: flip_in_y
}
file_system_utils.mkdir_recursive_if_necessary(
directory_name=output_dir_name
)
metafile_name = '{0:s}/model_metadata.p'.format(output_dir_name)
print('Writing metadata to: "{0:s}"...'.format(metafile_name))
cnn.write_model_metadata(
pickle_file_name=metafile_name, metadata_dict=metadata_dict,
training_option_dict=training_option_dict
)
return metadata_dict, training_option_dict
def _train_one_cnn(gpu_queue, argument_dict):
"""Trains single CNN with 3-D GridRad data.
:param gpu_queue: GPU queue (instance of `multiprocessing.Manager.Queue`).
:param argument_dict: Dictionary of CNN arguments, where each key is an
input arg to the script train_cnn_3d_gridrad.py.
"""
import keras
from keras import backend as K
import tensorflow
from gewittergefahr.deep_learning import cnn
from gewittergefahr.deep_learning import cnn_setup
from gewittergefahr.scripts import deep_learning_helper as dl_helper
gpu_index = -1
try:
# Deal with GPU business.
gpu_index = int(gpu_queue.get())
os.environ['CUDA_VISIBLE_DEVICES'] = '{0:d}'.format(gpu_index)
session_object = tensorflow.Session(
config=tensorflow.ConfigProto(
intra_op_parallelism_threads=7, inter_op_parallelism_threads=7,
allow_soft_placement=False, log_device_placement=False,
gpu_options=tensorflow.GPUOptions(allow_growth=True)
)
)
K.set_session(session_object)
# Read untrained model.
untrained_model_file_name = (
argument_dict[dl_helper.INPUT_MODEL_FILE_ARG_NAME]
)
with tensorflow.device('/gpu:0'):
print('Reading untrained model from: "{0:s}"...'.format(
untrained_model_file_name
))
model_object = cnn.read_model(untrained_model_file_name)
model_object.compile(
loss=keras.losses.binary_crossentropy,
optimizer=keras.optimizers.Adam(),
metrics=cnn_setup.DEFAULT_METRIC_FUNCTION_LIST
)
print(SEPARATOR_STRING)
model_object.summary()
print(SEPARATOR_STRING)
# Write metadata.
metadata_dict, training_option_dict = _write_metadata_one_cnn(
model_object=model_object, argument_dict=argument_dict
)
print('Training CNN on GPU {0:d}...'.format(gpu_index))
print(SEPARATOR_STRING)
# Train CNN.
output_dir_name = argument_dict[dl_helper.OUTPUT_DIR_ARG_NAME]
output_model_file_name = '{0:s}/model.h5'.format(output_dir_name)
history_file_name = '{0:s}/model_history.csv'.format(output_dir_name)
tensorboard_dir_name = '{0:s}/tensorboard'.format(output_dir_name)
cnn.train_cnn_2d_or_3d(
model_object=model_object, model_file_name=output_model_file_name,
history_file_name=history_file_name,
tensorboard_dir_name=tensorboard_dir_name,
num_epochs=metadata_dict[cnn.NUM_EPOCHS_KEY],
num_training_batches_per_epoch=
metadata_dict[cnn.NUM_TRAINING_BATCHES_KEY],
training_option_dict=training_option_dict,
monitor_string=metadata_dict[cnn.MONITOR_STRING_KEY],
weight_loss_function=metadata_dict[cnn.WEIGHT_LOSS_FUNCTION_KEY],
num_validation_batches_per_epoch=
metadata_dict[cnn.NUM_VALIDATION_BATCHES_KEY],
validation_file_names=metadata_dict[cnn.VALIDATION_FILES_KEY],
first_validn_time_unix_sec=metadata_dict[cnn.FIRST_VALIDN_TIME_KEY],
last_validn_time_unix_sec=metadata_dict[cnn.LAST_VALIDN_TIME_KEY],
num_examples_per_validn_batch=
metadata_dict[cnn.NUM_EX_PER_VALIDN_BATCH_KEY]
)
session_object.close()
del session_object
gpu_queue.put(gpu_index)
except Exception as this_exception:
if gpu_index >= 0:
gpu_queue.put(gpu_index)
print(traceback.format_exc())
raise this_exception
def _run(argument_file_names):
"""Trains many convolutional neural nets with native (3-D) GridRad images.
This is effectively the main method.
:param argument_file_names: See documentation at top of file.
"""
os.environ['CUDA_VISIBLE_DEVICES'] = ','.join([
'{0:d}'.format(j) for j in range(NUM_GPU_PER_NODE)
])
gpu_manager = Manager()
gpu_queue = gpu_manager.Queue()
gpu_pool = Pool(NUM_GPU_PER_NODE, maxtasksperchild=1)
for j in range(NUM_GPU_PER_NODE):
gpu_queue.put(j)
for this_arg_file_name in argument_file_names:
print('Reading single-CNN input args from: "{0:s}"...'.format(
this_arg_file_name
))
this_file_handle = open(this_arg_file_name, 'rb')
this_argument_dict = pickle.load(this_file_handle)
this_file_handle.close()
gpu_pool.apply_async(
func=_train_one_cnn, args=(gpu_queue, this_argument_dict)
)
gpu_pool.close()
gpu_pool.join()
del gpu_pool
del gpu_queue
del gpu_manager
if __name__ == '__main__':
INPUT_ARG_OBJECT = INPUT_ARG_PARSER.parse_args()
_run(
argument_file_names=getattr(INPUT_ARG_OBJECT, ARGUMENT_FILES_ARG_NAME)
)
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_VISIBLE_DEVICES"]
|
python
| 1 | 0 | |
pkg/provider/terraform/instance/main.go
|
package main
import (
"os"
"os/exec"
"time"
log "github.com/Sirupsen/logrus"
"github.com/docker/infrakit/pkg/cli"
instance_plugin "github.com/docker/infrakit/pkg/rpc/instance"
"github.com/spf13/cobra"
)
func mustHaveTerraform() {
// check if terraform exists
if _, err := exec.LookPath("terraform"); err != nil {
log.Error("Cannot find terraform. Please install at https://www.terraform.io/downloads.html")
os.Exit(1)
}
}
func getDir() string {
dir := os.Getenv("INFRAKIT_INSTANCE_TERRAFORM_DIR")
if dir != "" {
return dir
}
return os.TempDir()
}
func main() {
cmd := &cobra.Command{
Use: os.Args[0],
Short: "Terraform instance plugin",
}
name := cmd.Flags().String("name", "instance-terraform", "Plugin name to advertise for discovery")
logLevel := cmd.Flags().Int("log", cli.DefaultLogLevel, "Logging level. 0 is least verbose. Max is 5")
dir := cmd.Flags().String("dir", getDir(), "Dir for storing plan files")
pollInterval := cmd.Flags().Duration("poll-interval", 30*time.Second, "Terraform polling interval")
standalone := cmd.Flags().Bool("standalone", false, "Set if running standalone, disables manager leadership verification")
cmd.Run = func(c *cobra.Command, args []string) {
mustHaveTerraform()
cli.SetLogLevel(*logLevel)
cli.RunPlugin(*name, instance_plugin.PluginServer(NewTerraformInstancePlugin(*dir, *pollInterval, *standalone)))
}
cmd.AddCommand(cli.VersionCommand())
err := cmd.Execute()
if err != nil {
log.Error(err)
os.Exit(1)
}
}
|
[
"\"INFRAKIT_INSTANCE_TERRAFORM_DIR\""
] |
[] |
[
"INFRAKIT_INSTANCE_TERRAFORM_DIR"
] |
[]
|
["INFRAKIT_INSTANCE_TERRAFORM_DIR"]
|
go
| 1 | 0 | |
pipenv/environment.py
|
import contextlib
import importlib
import itertools
import json
import operator
import os
import site
import sys
from pathlib import Path
from sysconfig import get_paths, get_python_version
import pkg_resources
import pipenv
from pipenv.environments import is_type_checking
from pipenv.utils import make_posix, normalize_path, subprocess_run
from pipenv.vendor import vistir
from pipenv.vendor.cached_property import cached_property
from pipenv.vendor.packaging.utils import canonicalize_name
if is_type_checking():
from types import ModuleType
from typing import (
ContextManager, Dict, Generator, List, Optional, Set, Union
)
import pip_shims.shims
import tomlkit
from pipenv.project import Project, TPipfile, TSource
from pipenv.vendor.packaging.version import Version
BASE_WORKING_SET = pkg_resources.WorkingSet(sys.path)
# TODO: Unittests for this class
class Environment:
def __init__(
self,
prefix=None, # type: Optional[str]
python=None, # type: Optional[str]
is_venv=False, # type: bool
base_working_set=None, # type: pkg_resources.WorkingSet
pipfile=None, # type: Optional[Union[tomlkit.toml_document.TOMLDocument, TPipfile]]
sources=None, # type: Optional[List[TSource]]
project=None # type: Optional[Project]
):
super().__init__()
self._modules = {'pkg_resources': pkg_resources, 'pipenv': pipenv}
self.base_working_set = base_working_set if base_working_set else BASE_WORKING_SET
prefix = normalize_path(prefix)
self._python = None
if python is not None:
self._python = Path(python).absolute().as_posix()
self.is_venv = is_venv or prefix != normalize_path(sys.prefix)
if not sources:
sources = []
self.project = project
if project and not sources:
sources = project.sources
self.sources = sources
if project and not pipfile:
pipfile = project.parsed_pipfile
self.pipfile = pipfile
self.extra_dists = []
prefix = prefix if prefix else sys.prefix
self.prefix = Path(prefix)
self._base_paths = {}
if self.is_venv:
self._base_paths = self.get_paths()
self.sys_paths = get_paths()
def safe_import(self, name):
# type: (str) -> ModuleType
"""Helper utility for reimporting previously imported modules while inside the env"""
module = None
if name not in self._modules:
self._modules[name] = importlib.import_module(name)
module = self._modules[name]
if not module:
dist = next(iter(
dist for dist in self.base_working_set if dist.project_name == name
), None)
if dist:
dist.activate()
module = importlib.import_module(name)
return module
@classmethod
def resolve_dist(cls, dist, working_set):
# type: (pkg_resources.Distribution, pkg_resources.WorkingSet) -> Set[pkg_resources.Distribution]
"""Given a local distribution and a working set, returns all dependencies from the set.
:param dist: A single distribution to find the dependencies of
:type dist: :class:`pkg_resources.Distribution`
:param working_set: A working set to search for all packages
:type working_set: :class:`pkg_resources.WorkingSet`
:return: A set of distributions which the package depends on, including the package
:rtype: set(:class:`pkg_resources.Distribution`)
"""
deps = set()
deps.add(dist)
try:
reqs = dist.requires()
# KeyError = limited metadata can be found
except (KeyError, AttributeError, OSError): # The METADATA file can't be found
return deps
for req in reqs:
try:
dist = working_set.find(req)
except pkg_resources.VersionConflict:
# https://github.com/pypa/pipenv/issues/4549
# The requirement is already present with incompatible version.
continue
deps |= cls.resolve_dist(dist, working_set)
return deps
def extend_dists(self, dist):
# type: (pkg_resources.Distribution) -> None
extras = self.resolve_dist(dist, self.base_working_set)
self.extra_dists.append(dist)
if extras:
self.extra_dists.extend(extras)
def add_dist(self, dist_name):
# type: (str) -> None
dist = pkg_resources.get_distribution(pkg_resources.Requirement(dist_name))
self.extend_dists(dist)
@cached_property
def python_version(self):
# type: () -> str
with self.activated():
sysconfig = self.safe_import("sysconfig")
py_version = sysconfig.get_python_version()
return py_version
def find_libdir(self):
# type: () -> Optional[Path]
libdir = self.prefix / "lib"
return next(iter(list(libdir.iterdir())), None)
@property
def python_info(self):
# type: () -> Dict[str, str]
include_dir = self.prefix / "include"
if not os.path.exists(include_dir):
include_dirs = self.get_include_path()
if include_dirs:
include_path = include_dirs.get("include", include_dirs.get("platinclude"))
if not include_path:
return {}
include_dir = Path(include_path)
python_path = next(iter(list(include_dir.iterdir())), None)
if python_path and python_path.name.startswith("python"):
python_version = python_path.name.replace("python", "")
py_version_short, abiflags = python_version[:3], python_version[3:]
return {"py_version_short": py_version_short, "abiflags": abiflags}
return {}
def _replace_parent_version(self, path, replace_version):
# type: (str, str) -> str
if not os.path.exists(path):
base, leaf = os.path.split(path)
base, parent = os.path.split(base)
leaf = os.path.join(parent, leaf).replace(
replace_version, self.python_info.get("py_version_short", get_python_version())
)
return os.path.join(base, leaf)
return path
@cached_property
def base_paths(self):
# type: () -> Dict[str, str]
"""
Returns the context appropriate paths for the environment.
:return: A dictionary of environment specific paths to be used for installation operations
:rtype: dict
.. note:: The implementation of this is borrowed from a combination of pip and
virtualenv and is likely to change at some point in the future.
>>> from pipenv.core import project
>>> from pipenv.environment import Environment
>>> env = Environment(prefix=project.virtualenv_location, is_venv=True, sources=project.sources)
>>> import pprint
>>> pprint.pprint(env.base_paths)
{'PATH': '/home/hawk/.virtualenvs/pipenv-MfOPs1lW/bin::/bin:/usr/bin',
'PYTHONPATH': '/home/hawk/.virtualenvs/pipenv-MfOPs1lW/lib/python3.7/site-packages',
'data': '/home/hawk/.virtualenvs/pipenv-MfOPs1lW',
'include': '/home/hawk/.pyenv/versions/3.7.1/include/python3.7m',
'libdir': '/home/hawk/.virtualenvs/pipenv-MfOPs1lW/lib/python3.7/site-packages',
'platinclude': '/home/hawk/.pyenv/versions/3.7.1/include/python3.7m',
'platlib': '/home/hawk/.virtualenvs/pipenv-MfOPs1lW/lib/python3.7/site-packages',
'platstdlib': '/home/hawk/.virtualenvs/pipenv-MfOPs1lW/lib/python3.7',
'prefix': '/home/hawk/.virtualenvs/pipenv-MfOPs1lW',
'purelib': '/home/hawk/.virtualenvs/pipenv-MfOPs1lW/lib/python3.7/site-packages',
'scripts': '/home/hawk/.virtualenvs/pipenv-MfOPs1lW/bin',
'stdlib': '/home/hawk/.pyenv/versions/3.7.1/lib/python3.7'}
"""
prefix = make_posix(self.prefix.as_posix())
paths = {}
if self._base_paths:
paths = self._base_paths.copy()
else:
try:
paths = self.get_paths()
except Exception:
install_scheme = 'nt' if (os.name == 'nt') else 'posix_prefix'
paths = get_paths(install_scheme, vars={
'base': prefix,
'platbase': prefix,
})
current_version = get_python_version()
try:
for k in list(paths.keys()):
if not os.path.exists(paths[k]):
paths[k] = self._replace_parent_version(paths[k], current_version)
except OSError:
# Sometimes virtualenvs are made using virtualenv interpreters and there is no
# include directory, which will cause this approach to fail. This failsafe
# will make sure we fall back to the shell execution to find the real include path
paths = self.get_include_path()
paths.update(self.get_lib_paths())
paths["scripts"] = self.script_basedir
if not paths:
install_scheme = 'nt' if (os.name == 'nt') else 'posix_prefix'
paths = get_paths(install_scheme, vars={
'base': prefix,
'platbase': prefix,
})
if not os.path.exists(paths["purelib"]) and not os.path.exists(paths["platlib"]):
lib_paths = self.get_lib_paths()
paths.update(lib_paths)
paths["PATH"] = paths["scripts"] + os.pathsep + os.defpath
if "prefix" not in paths:
paths["prefix"] = prefix
purelib = paths["purelib"] = make_posix(paths["purelib"])
platlib = paths["platlib"] = make_posix(paths["platlib"])
if purelib == platlib:
lib_dirs = purelib
else:
lib_dirs = purelib + os.pathsep + platlib
paths["libdir"] = purelib
paths['PYTHONPATH'] = os.pathsep.join(["", ".", lib_dirs])
paths["libdirs"] = lib_dirs
return paths
@cached_property
def script_basedir(self):
# type: () -> str
"""Path to the environment scripts dir"""
prefix = make_posix(self.prefix.as_posix())
install_scheme = 'nt' if (os.name == 'nt') else 'posix_prefix'
paths = get_paths(install_scheme, vars={
'base': prefix,
'platbase': prefix,
})
return paths["scripts"]
@property
def python(self):
# type: () -> str
"""Path to the environment python"""
if self._python is not None:
return self._python
if os.name == "nt" and not self.is_venv:
py = Path(self.prefix).joinpath("python").absolute().as_posix()
else:
py = Path(self.script_basedir).joinpath("python").absolute().as_posix()
if not py:
py = Path(sys.executable).as_posix()
self._python = py
return py
@cached_property
def sys_path(self):
# type: () -> List[str]
"""
The system path inside the environment
:return: The :data:`sys.path` from the environment
:rtype: list
"""
from .vendor.vistir.compat import JSONDecodeError
current_executable = Path(sys.executable).as_posix()
if not self.python or self.python == current_executable:
return sys.path
elif any([sys.prefix == self.prefix, not self.is_venv]):
return sys.path
cmd_args = [self.python, "-c", "import json, sys; print(json.dumps(sys.path))"]
path, _ = vistir.misc.run(cmd_args, return_object=False, nospin=True, block=True, combine_stderr=False, write_to_stdout=False)
try:
path = json.loads(path.strip())
except JSONDecodeError:
path = sys.path
return path
def build_command(self, python_lib=False, python_inc=False, scripts=False, py_version=False):
# type: (bool, bool, bool, bool) -> str
"""Build the text for running a command in the given environment
:param python_lib: Whether to include the python lib dir commands, defaults to False
:type python_lib: bool, optional
:param python_inc: Whether to include the python include dir commands, defaults to False
:type python_inc: bool, optional
:param scripts: Whether to include the scripts directory, defaults to False
:type scripts: bool, optional
:param py_version: Whether to include the python version info, defaults to False
:type py_version: bool, optional
:return: A string representing the command to run
"""
pylib_lines = []
pyinc_lines = []
py_command = (
"import sysconfig, distutils.sysconfig, io, json, sys; paths = {{"
"%s }}; value = u'{{0}}'.format(json.dumps(paths));"
"fh = io.open('{0}', 'w'); fh.write(value); fh.close()"
)
distutils_line = "distutils.sysconfig.get_python_{0}(plat_specific={1})"
sysconfig_line = "sysconfig.get_path('{0}')"
if python_lib:
for key, var, val in (("pure", "lib", "0"), ("plat", "lib", "1")):
dist_prefix = f"{key}lib"
# XXX: We need to get 'stdlib' or 'platstdlib'
sys_prefix = "{}stdlib".format("" if key == "pure" else key)
pylib_lines.append(f"u'{dist_prefix}': u'{{{{0}}}}'.format({distutils_line.format(var, val)})")
pylib_lines.append(f"u'{sys_prefix}': u'{{{{0}}}}'.format({sysconfig_line.format(sys_prefix)})")
if python_inc:
for key, var, val in (("include", "inc", "0"), ("platinclude", "inc", "1")):
pylib_lines.append(f"u'{key}': u'{{{{0}}}}'.format({distutils_line.format(var, val)})")
lines = pylib_lines + pyinc_lines
if scripts:
lines.append("u'scripts': u'{{0}}'.format(%s)" % sysconfig_line.format("scripts"))
if py_version:
lines.append("u'py_version_short': u'{{0}}'.format(distutils.sysconfig.get_python_version()),")
lines_as_str = ",".join(lines)
py_command = py_command % lines_as_str
return py_command
def get_paths(self):
# type: () -> Optional[Dict[str, str]]
"""
Get the paths for the environment by running a subcommand
:return: The python paths for the environment
:rtype: Dict[str, str]
"""
tmpfile = vistir.path.create_tracked_tempfile(suffix=".json")
tmpfile.close()
tmpfile_path = make_posix(tmpfile.name)
py_command = self.build_command(python_lib=True, python_inc=True, scripts=True, py_version=True)
command = [self.python, "-c", py_command.format(tmpfile_path)]
c = subprocess_run(command)
if c.returncode == 0:
paths = {}
with open(tmpfile_path, "r", encoding="utf-8") as fh:
paths = json.load(fh)
if "purelib" in paths:
paths["libdir"] = paths["purelib"] = make_posix(paths["purelib"])
for key in ("platlib", "scripts", "platstdlib", "stdlib", "include", "platinclude"):
if key in paths:
paths[key] = make_posix(paths[key])
return paths
else:
vistir.misc.echo(f"Failed to load paths: {c.stderr}", fg="yellow")
vistir.misc.echo(f"Output: {c.stdout}", fg="yellow")
return None
def get_lib_paths(self):
# type: () -> Dict[str, str]
"""Get the include path for the environment
:return: The python include path for the environment
:rtype: Dict[str, str]
"""
tmpfile = vistir.path.create_tracked_tempfile(suffix=".json")
tmpfile.close()
tmpfile_path = make_posix(tmpfile.name)
py_command = self.build_command(python_lib=True)
command = [self.python, "-c", py_command.format(tmpfile_path)]
c = subprocess_run(command)
paths = None
if c.returncode == 0:
paths = {}
with open(tmpfile_path, "r", encoding="utf-8") as fh:
paths = json.load(fh)
if "purelib" in paths:
paths["libdir"] = paths["purelib"] = make_posix(paths["purelib"])
for key in ("platlib", "platstdlib", "stdlib"):
if key in paths:
paths[key] = make_posix(paths[key])
return paths
else:
vistir.misc.echo(f"Failed to load paths: {c.stderr}", fg="yellow")
vistir.misc.echo(f"Output: {c.stdout}", fg="yellow")
if not paths:
if not self.prefix.joinpath("lib").exists():
return {}
stdlib_path = next(iter([
p for p in self.prefix.joinpath("lib").iterdir()
if p.name.startswith("python")
]), None)
lib_path = None
if stdlib_path:
lib_path = next(iter([
p.as_posix() for p in stdlib_path.iterdir()
if p.name == "site-packages"
]))
paths = {"stdlib": stdlib_path.as_posix()}
if lib_path:
paths["purelib"] = lib_path
return paths
return {}
def get_include_path(self):
# type: () -> Optional[Dict[str, str]]
"""Get the include path for the environment
:return: The python include path for the environment
:rtype: Dict[str, str]
"""
tmpfile = vistir.path.create_tracked_tempfile(suffix=".json")
tmpfile.close()
tmpfile_path = make_posix(tmpfile.name)
py_command = (
"import distutils.sysconfig, io, json, sys; paths = {{u'include': "
"u'{{0}}'.format(distutils.sysconfig.get_python_inc(plat_specific=0)), "
"u'platinclude': u'{{0}}'.format(distutils.sysconfig.get_python_inc("
"plat_specific=1)) }}; value = u'{{0}}'.format(json.dumps(paths));"
"fh = io.open('{0}', 'w'); fh.write(value); fh.close()"
)
command = [self.python, "-c", py_command.format(tmpfile_path)]
c = subprocess_run(command)
if c.returncode == 0:
paths = []
with open(tmpfile_path, "r", encoding="utf-8") as fh:
paths = json.load(fh)
for key in ("include", "platinclude"):
if key in paths:
paths[key] = make_posix(paths[key])
return paths
else:
vistir.misc.echo(f"Failed to load paths: {c.stderr}", fg="yellow")
vistir.misc.echo(f"Output: {c.stdout}", fg="yellow")
return None
@cached_property
def sys_prefix(self):
# type: () -> str
"""
The prefix run inside the context of the environment
:return: The python prefix inside the environment
:rtype: :data:`sys.prefix`
"""
command = [self.python, "-c", "import sys; print(sys.prefix)"]
c = subprocess_run(command)
sys_prefix = Path(c.stdout.strip()).as_posix()
return sys_prefix
@cached_property
def paths(self):
# type: () -> Dict[str, str]
paths = {}
with vistir.contextmanagers.temp_environ(), vistir.contextmanagers.temp_path():
os.environ["PYTHONIOENCODING"] = vistir.compat.fs_str("utf-8")
os.environ["PYTHONDONTWRITEBYTECODE"] = vistir.compat.fs_str("1")
paths = self.base_paths
os.environ["PATH"] = paths["PATH"]
os.environ["PYTHONPATH"] = paths["PYTHONPATH"]
if "headers" not in paths:
paths["headers"] = paths["include"]
return paths
@property
def scripts_dir(self):
# type: () -> str
return self.paths["scripts"]
@property
def libdir(self):
# type: () -> str
purelib = self.paths.get("purelib", None)
if purelib and os.path.exists(purelib):
return "purelib", purelib
return "platlib", self.paths["platlib"]
@property
def pip_version(self):
# type: () -> Version
"""
Get the pip version in the environment. Useful for knowing which args we can use
when installing.
"""
from .vendor.packaging.version import parse as parse_version
pip = next(iter(
pkg for pkg in self.get_installed_packages() if pkg.key == "pip"
), None)
if pip is not None:
return parse_version(pip.version)
return parse_version("20.2")
def expand_egg_links(self):
# type: () -> None
"""
Expand paths specified in egg-link files to prevent pip errors during
reinstall
"""
prefixes = [
Path(prefix)
for prefix in self.base_paths["libdirs"].split(os.pathsep)
if vistir.path.is_in_path(prefix, self.prefix.as_posix())
]
for loc in prefixes:
if not loc.exists():
continue
for pth in loc.iterdir():
if not pth.suffix == ".egg-link":
continue
contents = [
vistir.path.normalize_path(line.strip())
for line in pth.read_text().splitlines()
]
pth.write_text("\n".join(contents))
def get_distributions(self):
# type: () -> Generator[pkg_resources.Distribution, None, None]
"""
Retrives the distributions installed on the library path of the environment
:return: A set of distributions found on the library path
:rtype: iterator
"""
pip_target_dir = os.environ.get('PIP_TARGET')
libdirs = [pip_target_dir] if pip_target_dir else self.base_paths["libdirs"].split(os.pathsep)
dists = (pkg_resources.find_distributions(libdir) for libdir in libdirs)
yield from itertools.chain.from_iterable(dists)
def find_egg(self, egg_dist):
# type: (pkg_resources.Distribution) -> str
"""Find an egg by name in the given environment"""
site_packages = self.libdir[1]
search_filename = f"{egg_dist.project_name}.egg-link"
try:
user_site = site.getusersitepackages()
except AttributeError:
user_site = site.USER_SITE
search_locations = [site_packages, user_site]
for site_directory in search_locations:
egg = os.path.join(site_directory, search_filename)
if os.path.isfile(egg):
return egg
def locate_dist(self, dist):
# type: (pkg_resources.Distribution) -> str
"""Given a distribution, try to find a corresponding egg link first.
If the egg - link doesn 't exist, return the supplied distribution."""
location = self.find_egg(dist)
return location or dist.location
def dist_is_in_project(self, dist):
# type: (pkg_resources.Distribution) -> bool
"""Determine whether the supplied distribution is in the environment."""
from .environments import normalize_pipfile_path as _normalized
prefixes = [
_normalized(prefix) for prefix in self.base_paths["libdirs"].split(os.pathsep)
if _normalized(prefix).startswith(_normalized(self.prefix.as_posix()))
]
location = self.locate_dist(dist)
if not location:
return False
location = _normalized(make_posix(location))
return any(location.startswith(prefix) for prefix in prefixes)
def get_installed_packages(self):
# type: () -> List[pkg_resources.Distribution]
"""Returns all of the installed packages in a given environment"""
workingset = self.get_working_set()
packages = [
pkg for pkg in workingset
if self.dist_is_in_project(pkg) and pkg.key != "python"
]
return packages
@contextlib.contextmanager
def get_finder(self, pre=False):
# type: (bool) -> ContextManager[pip_shims.shims.PackageFinder]
from .vendor.pip_shims.shims import InstallCommand, get_package_finder
pip_command = InstallCommand()
pip_args = self._modules["pipenv"].utils.prepare_pip_source_args(self.sources)
pip_options, _ = pip_command.parser.parse_args(pip_args)
pip_options.cache_dir = self.project.s.PIPENV_CACHE_DIR
pip_options.pre = self.pipfile.get("pre", pre)
with pip_command._build_session(pip_options) as session:
finder = get_package_finder(install_cmd=pip_command, options=pip_options, session=session)
yield finder
def get_package_info(self, pre=False):
# type: (bool) -> Generator[pkg_resources.Distribution, None, None]
from .vendor.pip_shims.shims import parse_version, pip_version
dependency_links = []
packages = self.get_installed_packages()
# This code is borrowed from pip's current implementation
if parse_version(pip_version) < parse_version("19.0"):
for dist in packages:
if dist.has_metadata('dependency_links.txt'):
dependency_links.extend(
dist.get_metadata_lines('dependency_links.txt')
)
with self.get_finder() as finder:
if parse_version(pip_version) < parse_version("19.0"):
finder.add_dependency_links(dependency_links)
for dist in packages:
typ = 'unknown'
all_candidates = finder.find_all_candidates(dist.key)
if not self.pipfile.get("pre", finder.allow_all_prereleases):
# Remove prereleases
all_candidates = [
candidate for candidate in all_candidates
if not candidate.version.is_prerelease
]
if not all_candidates:
continue
candidate_evaluator = finder.make_candidate_evaluator(project_name=dist.key)
best_candidate_result = candidate_evaluator.compute_best_candidate(all_candidates)
remote_version = best_candidate_result.best_candidate.version
if best_candidate_result.best_candidate.link.is_wheel:
typ = 'wheel'
else:
typ = 'sdist'
# This is dirty but makes the rest of the code much cleaner
dist.latest_version = remote_version
dist.latest_filetype = typ
yield dist
def get_outdated_packages(self, pre=False):
# type: (bool) -> List[pkg_resources.Distribution]
return [
pkg for pkg in self.get_package_info(pre=pre)
if pkg.latest_version._key > pkg.parsed_version._key
]
@classmethod
def _get_requirements_for_package(cls, node, key_tree, parent=None, chain=None):
if chain is None:
chain = [node.project_name]
d = node.as_dict()
if parent:
d['required_version'] = node.version_spec if node.version_spec else 'Any'
else:
d['required_version'] = d['installed_version']
get_children = lambda n: key_tree.get(n.key, []) # noqa
d['dependencies'] = [
cls._get_requirements_for_package(c, key_tree, parent=node,
chain=chain+[c.project_name])
for c in get_children(node)
if c.project_name not in chain
]
return d
def get_package_requirements(self, pkg=None):
from .vendor.pipdeptree import PackageDAG, flatten
packages = self.get_installed_packages()
if pkg:
packages = [p for p in packages if p.key == pkg]
tree = PackageDAG.from_pkgs(packages).sort()
branch_keys = {r.key for r in flatten(tree.values())}
if pkg is not None:
nodes = [p for p in tree.keys() if p.key == pkg]
else:
nodes = [p for p in tree.keys() if p.key not in branch_keys]
key_tree = {k.key: v for k, v in tree.items()}
return [self._get_requirements_for_package(p, key_tree) for p in nodes]
@classmethod
def reverse_dependency(cls, node):
new_node = {
"package_name": node["package_name"],
"installed_version": node["installed_version"],
"required_version": node["required_version"]
}
for dependency in node.get("dependencies", []):
for dep in cls.reverse_dependency(dependency):
new_dep = dep.copy()
new_dep["parent"] = (node["package_name"], node["installed_version"])
yield new_dep
yield new_node
def reverse_dependencies(self):
from vistir.misc import chunked, unnest
rdeps = {}
for req in self.get_package_requirements():
for d in self.reverse_dependency(req):
parents = None
name = d["package_name"]
pkg = {
name: {
"installed": d["installed_version"],
"required": d["required_version"]
}
}
parents = tuple(d.get("parent", ()))
pkg[name]["parents"] = parents
if rdeps.get(name):
if not (rdeps[name].get("required") or rdeps[name].get("installed")):
rdeps[name].update(pkg[name])
rdeps[name]["parents"] = rdeps[name].get("parents", ()) + parents
else:
rdeps[name] = pkg[name]
for k in list(rdeps.keys()):
entry = rdeps[k]
if entry.get("parents"):
rdeps[k]["parents"] = {
p for p, version in chunked(2, unnest(entry["parents"]))
}
return rdeps
def get_working_set(self):
"""Retrieve the working set of installed packages for the environment.
:return: The working set for the environment
:rtype: :class:`pkg_resources.WorkingSet`
"""
working_set = pkg_resources.WorkingSet(self.sys_path)
return working_set
def is_installed(self, pkgname):
"""Given a package name, returns whether it is installed in the environment
:param str pkgname: The name of a package
:return: Whether the supplied package is installed in the environment
:rtype: bool
"""
return any(d for d in self.get_distributions() if d.project_name == pkgname)
def is_satisfied(self, req):
match = next(
iter(
d for d in self.get_distributions()
if canonicalize_name(d.project_name) == req.normalized_name
), None
)
if match is not None:
if req.editable and req.line_instance.is_local and self.find_egg(match):
requested_path = req.line_instance.path
return requested_path and vistir.compat.samefile(requested_path, match.location)
elif match.has_metadata("direct_url.json"):
direct_url_metadata = json.loads(match.get_metadata("direct_url.json"))
commit_id = direct_url_metadata.get("vcs_info", {}).get("commit_id", "")
vcs_type = direct_url_metadata.get("vcs_info", {}).get("vcs", "")
_, pipfile_part = req.as_pipfile().popitem()
return (
vcs_type == req.vcs and commit_id == req.commit_hash
and direct_url_metadata["url"] == pipfile_part[req.vcs]
)
elif req.is_vcs or req.is_file_or_url:
return False
elif req.line_instance.specifiers is not None:
return req.line_instance.specifiers.contains(
match.version, prereleases=True
)
return True
return False
def run(self, cmd, cwd=os.curdir):
"""Run a command with :class:`~subprocess.Popen` in the context of the environment
:param cmd: A command to run in the environment
:type cmd: str or list
:param str cwd: The working directory in which to execute the command, defaults to :data:`os.curdir`
:return: A finished command object
:rtype: :class:`~subprocess.Popen`
"""
c = None
with self.activated():
script = vistir.cmdparse.Script.parse(cmd)
c = vistir.misc.run(script._parts, return_object=True, nospin=True, cwd=cwd, write_to_stdout=False)
return c
def run_py(self, cmd, cwd=os.curdir):
"""Run a python command in the environment context.
:param cmd: A command to run in the environment - runs with `python -c`
:type cmd: str or list
:param str cwd: The working directory in which to execute the command, defaults to :data:`os.curdir`
:return: A finished command object
:rtype: :class:`~subprocess.Popen`
"""
c = None
if isinstance(cmd, str):
script = vistir.cmdparse.Script.parse(f"{self.python} -c {cmd}")
else:
script = vistir.cmdparse.Script.parse([self.python, "-c"] + list(cmd))
with self.activated():
c = vistir.misc.run(script._parts, return_object=True, nospin=True, cwd=cwd, write_to_stdout=False)
return c
def run_activate_this(self):
"""Runs the environment's inline activation script"""
if self.is_venv:
activate_this = os.path.join(self.scripts_dir, "activate_this.py")
if not os.path.isfile(activate_this):
raise OSError(f"No such file: {activate_this!s}")
with open(activate_this) as f:
code = compile(f.read(), activate_this, "exec")
exec(code, dict(__file__=activate_this))
@contextlib.contextmanager
def activated(self, include_extras=True, extra_dists=None):
"""Helper context manager to activate the environment.
This context manager will set the following variables for the duration
of its activation:
* sys.prefix
* sys.path
* os.environ["VIRTUAL_ENV"]
* os.environ["PATH"]
In addition, it will make any distributions passed into `extra_dists` available
on `sys.path` while inside the context manager, as well as making `passa` itself
available.
The environment's `prefix` as well as `scripts_dir` properties are both prepended
to `os.environ["PATH"]` to ensure that calls to `~Environment.run()` use the
environment's path preferentially.
"""
if not extra_dists:
extra_dists = []
original_path = sys.path
original_prefix = sys.prefix
parent_path = Path(__file__).absolute().parent
vendor_dir = parent_path.joinpath("vendor").as_posix()
patched_dir = parent_path.joinpath("patched").as_posix()
parent_path = parent_path.as_posix()
self.add_dist("pip")
prefix = self.prefix.as_posix()
with vistir.contextmanagers.temp_environ(), vistir.contextmanagers.temp_path():
os.environ["PATH"] = os.pathsep.join([
vistir.compat.fs_str(self.script_basedir),
vistir.compat.fs_str(self.prefix.as_posix()),
os.environ.get("PATH", "")
])
os.environ["PYTHONIOENCODING"] = vistir.compat.fs_str("utf-8")
os.environ["PYTHONDONTWRITEBYTECODE"] = vistir.compat.fs_str("1")
if self.is_venv:
os.environ["PYTHONPATH"] = self.base_paths["PYTHONPATH"]
os.environ["VIRTUAL_ENV"] = vistir.compat.fs_str(prefix)
else:
if not self.project.s.PIPENV_USE_SYSTEM and not os.environ.get("VIRTUAL_ENV"):
os.environ["PYTHONPATH"] = self.base_paths["PYTHONPATH"]
os.environ.pop("PYTHONHOME", None)
sys.path = self.sys_path
sys.prefix = self.sys_prefix
site.addsitedir(self.base_paths["purelib"])
pip = self.safe_import("pip") # noqa
pip_vendor = self.safe_import("pip._vendor")
pep517_dir = os.path.join(os.path.dirname(pip_vendor.__file__), "pep517")
site.addsitedir(pep517_dir)
os.environ["PYTHONPATH"] = os.pathsep.join([
os.environ.get("PYTHONPATH", self.base_paths["PYTHONPATH"]), pep517_dir
])
if include_extras:
site.addsitedir(parent_path)
sys.path.extend([parent_path, patched_dir, vendor_dir])
extra_dists = list(self.extra_dists) + extra_dists
for extra_dist in extra_dists:
if extra_dist not in self.get_working_set():
extra_dist.activate(self.sys_path)
try:
yield
finally:
sys.path = original_path
sys.prefix = original_prefix
@cached_property
def finders(self):
from pipenv.vendor.pythonfinder import Finder
finders = [
Finder(path=self.base_paths["scripts"], global_search=gs, system=False)
for gs in (False, True)
]
return finders
@property
def finder(self):
return next(iter(self.finders), None)
def which(self, search, as_path=True):
find = operator.methodcaller("which", search)
result = next(iter(filter(None, (find(finder) for finder in self.finders))), None)
if not result:
result = self._which(search)
else:
if as_path:
result = str(result.path)
return result
def get_install_args(self, editable=False, setup_path=None):
install_arg = "install" if not editable else "develop"
install_keys = ["headers", "purelib", "platlib", "scripts", "data"]
install_args = [
self.environment.python, "-u", "-c", SETUPTOOLS_SHIM % setup_path,
install_arg, "--single-version-externally-managed", "--no-deps",
"--prefix={}".format(self.base_paths["prefix"]), "--no-warn-script-location"
]
for key in install_keys:
install_args.append(
f"--install-{key}={self.base_paths[key]}"
)
return install_args
def install(self, requirements):
if not isinstance(requirements, (tuple, list)):
requirements = [requirements]
with self.get_finder() as finder:
args = []
for format_control in ('no_binary', 'only_binary'):
formats = getattr(finder.format_control, format_control)
args.extend(('--' + format_control.replace('_', '-'),
','.join(sorted(formats or {':none:'}))))
if finder.index_urls:
args.extend(['-i', finder.index_urls[0]])
for extra_index in finder.index_urls[1:]:
args.extend(['--extra-index-url', extra_index])
else:
args.append('--no-index')
for link in finder.find_links:
args.extend(['--find-links', link])
for _, host, _ in finder.secure_origins:
args.extend(['--trusted-host', host])
if finder.allow_all_prereleases:
args.append('--pre')
if finder.process_dependency_links:
args.append('--process-dependency-links')
args.append('--')
args.extend(requirements)
out, _ = vistir.misc.run(args, return_object=False, nospin=True, block=True,
combine_stderr=False)
@contextlib.contextmanager
def uninstall(self, pkgname, *args, **kwargs):
"""A context manager which allows uninstallation of packages from the environment
:param str pkgname: The name of a package to uninstall
>>> env = Environment("/path/to/env/root")
>>> with env.uninstall("pytz", auto_confirm=True, verbose=False) as uninstaller:
cleaned = uninstaller.paths
>>> if cleaned:
print("uninstalled packages: %s" % cleaned)
"""
auto_confirm = kwargs.pop("auto_confirm", True)
verbose = kwargs.pop("verbose", False)
with self.activated():
monkey_patch = next(iter(
dist for dist in self.base_working_set
if dist.project_name == "recursive-monkey-patch"
), None)
if monkey_patch:
monkey_patch.activate()
pip_shims = self.safe_import("pip_shims")
pathset_base = pip_shims.UninstallPathSet
pathset_base._permitted = PatchedUninstaller._permitted
dist = next(
iter(d for d in self.get_working_set() if d.project_name == pkgname),
None
)
pathset = pathset_base.from_dist(dist)
if pathset is not None:
pathset.remove(auto_confirm=auto_confirm, verbose=verbose)
try:
yield pathset
except Exception:
if pathset is not None:
pathset.rollback()
else:
if pathset is not None:
pathset.commit()
if pathset is None:
return
class PatchedUninstaller:
def _permitted(self, path):
return True
SETUPTOOLS_SHIM = (
"import setuptools, tokenize;__file__=%r;"
"f=getattr(tokenize, 'open', open)(__file__);"
"code=f.read().replace('\\r\\n', '\\n');"
"f.close();"
"exec(compile(code, __file__, 'exec'))"
)
|
[] |
[] |
[
"PYTHONIOENCODING",
"PIP_TARGET",
"PYTHONDONTWRITEBYTECODE",
"VIRTUAL_ENV",
"PATH",
"PYTHONPATH"
] |
[]
|
["PYTHONIOENCODING", "PIP_TARGET", "PYTHONDONTWRITEBYTECODE", "VIRTUAL_ENV", "PATH", "PYTHONPATH"]
|
python
| 6 | 0 | |
src/scene_server/admin_server/synchronizer/handler/host.go
|
/*
* Tencent is pleased to support the open source community by making 蓝鲸 available.
* Copyright (C) 2017-2018 THL A29 Limited, a Tencent company. All rights reserved.
* Licensed under the MIT License (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
* http://opensource.org/licenses/MIT
* Unless required by applicable law or agreed to in writing, software distributed under
* the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*/
package handler
import (
"context"
"fmt"
"configcenter/src/auth/extensions"
authmeta "configcenter/src/auth/meta"
"configcenter/src/common/blog"
"configcenter/src/scene_server/admin_server/synchronizer/meta"
"configcenter/src/scene_server/admin_server/synchronizer/utils"
)
// HandleHostSync do sync host of one business
func (ih *IAMHandler) HandleHostSync(task *meta.WorkRequest) error {
businessSimplify := task.Data.(extensions.BusinessSimplify)
header := utils.NewAPIHeaderByBusiness(&businessSimplify)
// step1 get instances by business from core service
bizID := businessSimplify.BKAppIDField
hosts, err := ih.authManager.CollectHostByBusinessID(context.Background(), *header, bizID)
if err != nil {
blog.Errorf("get host by business %d failed, err: %+v", businessSimplify.BKAppIDField, err)
return err
}
resources, err := ih.authManager.MakeResourcesByHosts(context.Background(), *header, authmeta.EmptyAction, hosts...)
if err != nil {
blog.Errorf("make host resources failed, bizID: %d, err: %+v", businessSimplify.BKAppIDField, err)
return err
}
// step2 get host by business from iam
rs := &authmeta.ResourceAttribute{
Basic: authmeta.Basic{
Type: authmeta.HostInstance,
},
BusinessID: bizID,
}
taskName := fmt.Sprintf("sync host for business: %d", businessSimplify.BKAppIDField)
iamIDPrefix := ""
skipDeregister := false
return ih.diffAndSync(taskName, rs, iamIDPrefix, resources, skipDeregister)
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
plugin/viperplg/viper_test.go
|
package viperplg_test
import (
"fmt"
"os"
"os/exec"
"testing"
"time"
"github.com/freecloudio/server/plugin/viperplg"
"github.com/stretchr/testify/assert"
)
func TestSetCorrectArgsAndRead(t *testing.T) {
oldArgs := os.Args
defer func() { os.Args = oldArgs }()
sessionTokenLength := 4444
sessionExpiration := 1234
newArgs := os.Args[1:2]
newArgs = append(newArgs, fmt.Sprintf("--auth.session.token.length=%v", sessionTokenLength))
newArgs = append(newArgs, fmt.Sprintf("--auth.session.expiration=%v", sessionExpiration))
os.Args = newArgs
cfg := viperplg.InitViperConfig()
assert.Equal(t, sessionTokenLength, cfg.GetSessionTokenLength(), "Expect given token length to match parsed one")
assert.Equal(t, time.Duration(sessionExpiration)*time.Hour, cfg.GetSessionExpirationDuration(), "Expect given token expiration to match parsed one")
assert.Equal(t, time.Hour, cfg.GetSessionCleanupInterval(), "Expect not set config to have default")
}
func TestSetIncorrectArgs(t *testing.T) {
if os.Getenv("CALL_CONFIG") == "1" {
viperplg.InitViperConfig()
return
}
cmd := exec.Command(os.Args[0], "-test.run=TestSetIncorrectArgs", "--auth.session.token.length=WRONG")
cmd.Env = append(os.Environ(), "CALL_CONFIG=1")
err := cmd.Run()
if e, ok := err.(*exec.ExitError); ok && !e.Success() {
assert.Equal(t, 2, e.ExitCode(), "Expect exit code for wrong input")
return
}
t.Fatalf("Test ran with err %v, want exit status 2", err)
}
|
[
"\"CALL_CONFIG\""
] |
[] |
[
"CALL_CONFIG"
] |
[]
|
["CALL_CONFIG"]
|
go
| 1 | 0 | |
mmdet/utils/contextmanagers.py
|
# coding: utf-8
import asyncio
import contextlib
import logging
import os
import time
from typing import List
import torch
logger = logging.getLogger(__name__)
DEBUG_COMPLETED_TIME = bool(os.environ.get('DEBUG_COMPLETED_TIME', False))
@contextlib.asynccontextmanager
async def completed(trace_name='',
name='',
sleep_interval=0.05,
streams: List[torch.cuda.Stream] = None):
"""
Async context manager that waits for work to complete on
given CUDA streams.
"""
if not torch.cuda.is_available():
yield
return
stream_before_context_switch = torch.cuda.current_stream()
if not streams:
streams = [stream_before_context_switch]
else:
streams = [s if s else stream_before_context_switch for s in streams]
end_events = [
torch.cuda.Event(enable_timing=DEBUG_COMPLETED_TIME) for _ in streams
]
if DEBUG_COMPLETED_TIME:
start = torch.cuda.Event(enable_timing=True)
stream_before_context_switch.record_event(start)
cpu_start = time.monotonic()
logger.debug('%s %s starting, streams: %s', trace_name, name, streams)
grad_enabled_before = torch.is_grad_enabled()
try:
yield
finally:
current_stream = torch.cuda.current_stream()
assert current_stream == stream_before_context_switch
if DEBUG_COMPLETED_TIME:
cpu_end = time.monotonic()
for i, stream in enumerate(streams):
event = end_events[i]
stream.record_event(event)
grad_enabled_after = torch.is_grad_enabled()
# observed change of torch.is_grad_enabled() during concurrent run of
# async_test_bboxes code
assert (grad_enabled_before == grad_enabled_after
), 'Unexpected is_grad_enabled() value change'
are_done = [e.query() for e in end_events]
logger.debug('%s %s completed: %s streams: %s', trace_name, name,
are_done, streams)
with torch.cuda.stream(stream_before_context_switch):
while not all(are_done):
await asyncio.sleep(sleep_interval)
are_done = [e.query() for e in end_events]
logger.debug(
'%s %s completed: %s streams: %s',
trace_name,
name,
are_done,
streams,
)
current_stream = torch.cuda.current_stream()
assert current_stream == stream_before_context_switch
if DEBUG_COMPLETED_TIME:
cpu_time = (cpu_end - cpu_start) * 1000
stream_times_ms = ''
for i, stream in enumerate(streams):
elapsed_time = start.elapsed_time(end_events[i])
stream_times_ms += ' {} {:.2f} ms'.format(stream, elapsed_time)
logger.info('%s %s %.2f ms %s', trace_name, name, cpu_time,
stream_times_ms)
@contextlib.asynccontextmanager
async def concurrent(streamqueue: asyncio.Queue,
trace_name='concurrent',
name='stream'):
"""Run code concurrently in different streams.
:param streamqueue: asyncio.Queue instance.
Queue tasks define the pool of streams used for concurrent execution.
"""
if not torch.cuda.is_available():
yield
return
initial_stream = torch.cuda.current_stream()
with torch.cuda.stream(initial_stream):
stream = await streamqueue.get()
assert isinstance(stream, torch.cuda.Stream)
try:
with torch.cuda.stream(stream):
logger.debug('%s %s is starting, stream: %s', trace_name, name,
stream)
yield
current = torch.cuda.current_stream()
assert current == stream
logger.debug('%s %s has finished, stream: %s', trace_name,
name, stream)
finally:
streamqueue.task_done()
streamqueue.put_nowait(stream)
|
[] |
[] |
[
"DEBUG_COMPLETED_TIME"
] |
[]
|
["DEBUG_COMPLETED_TIME"]
|
python
| 1 | 0 | |
appdata_test.go
|
// Copyright (c) 2013-2014 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package btcutil_test
import (
"os"
"os/user"
"path/filepath"
"runtime"
"testing"
"unicode"
"github.com/pefish/btcutil"
)
// TestAppDataDir tests the API for AppDataDir to ensure it gives expected
// results for various operating systems.
func TestAppDataDir(t *testing.T) {
// App name plus upper and lowercase variants.
appName := "myapp"
appNameUpper := string(unicode.ToUpper(rune(appName[0]))) + appName[1:]
appNameLower := string(unicode.ToLower(rune(appName[0]))) + appName[1:]
// When we're on Windows, set the expected local and roaming directories
// per the environment vars. When we aren't on Windows, the function
// should return the current directory when forced to provide the
// Windows path since the environment variables won't exist.
winLocal := "."
winRoaming := "."
if runtime.GOOS == "windows" {
localAppData := os.Getenv("LOCALAPPDATA")
roamingAppData := os.Getenv("APPDATA")
if localAppData == "" {
localAppData = roamingAppData
}
winLocal = filepath.Join(localAppData, appNameUpper)
winRoaming = filepath.Join(roamingAppData, appNameUpper)
}
// Get the home directory to use for testing expected results.
var homeDir string
usr, err := user.Current()
if err != nil {
t.Errorf("user.Current: %v", err)
return
}
homeDir = usr.HomeDir
// Mac app data directory.
macAppData := filepath.Join(homeDir, "Library", "Application Support")
tests := []struct {
goos string
appName string
roaming bool
want string
}{
// Various combinations of application name casing, leading
// period, operating system, and roaming flags.
{"windows", appNameLower, false, winLocal},
{"windows", appNameUpper, false, winLocal},
{"windows", "." + appNameLower, false, winLocal},
{"windows", "." + appNameUpper, false, winLocal},
{"windows", appNameLower, true, winRoaming},
{"windows", appNameUpper, true, winRoaming},
{"windows", "." + appNameLower, true, winRoaming},
{"windows", "." + appNameUpper, true, winRoaming},
{"linux", appNameLower, false, filepath.Join(homeDir, "."+appNameLower)},
{"linux", appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)},
{"linux", "." + appNameLower, false, filepath.Join(homeDir, "."+appNameLower)},
{"linux", "." + appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)},
{"darwin", appNameLower, false, filepath.Join(macAppData, appNameUpper)},
{"darwin", appNameUpper, false, filepath.Join(macAppData, appNameUpper)},
{"darwin", "." + appNameLower, false, filepath.Join(macAppData, appNameUpper)},
{"darwin", "." + appNameUpper, false, filepath.Join(macAppData, appNameUpper)},
{"openbsd", appNameLower, false, filepath.Join(homeDir, "."+appNameLower)},
{"openbsd", appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)},
{"openbsd", "." + appNameLower, false, filepath.Join(homeDir, "."+appNameLower)},
{"openbsd", "." + appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)},
{"freebsd", appNameLower, false, filepath.Join(homeDir, "."+appNameLower)},
{"freebsd", appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)},
{"freebsd", "." + appNameLower, false, filepath.Join(homeDir, "."+appNameLower)},
{"freebsd", "." + appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)},
{"netbsd", appNameLower, false, filepath.Join(homeDir, "."+appNameLower)},
{"netbsd", appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)},
{"netbsd", "." + appNameLower, false, filepath.Join(homeDir, "."+appNameLower)},
{"netbsd", "." + appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)},
{"plan9", appNameLower, false, filepath.Join(homeDir, appNameLower)},
{"plan9", appNameUpper, false, filepath.Join(homeDir, appNameLower)},
{"plan9", "." + appNameLower, false, filepath.Join(homeDir, appNameLower)},
{"plan9", "." + appNameUpper, false, filepath.Join(homeDir, appNameLower)},
{"unrecognized", appNameLower, false, filepath.Join(homeDir, "."+appNameLower)},
{"unrecognized", appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)},
{"unrecognized", "." + appNameLower, false, filepath.Join(homeDir, "."+appNameLower)},
{"unrecognized", "." + appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)},
// No application name provided, so expect current directory.
{"windows", "", false, "."},
{"windows", "", true, "."},
{"linux", "", false, "."},
{"darwin", "", false, "."},
{"openbsd", "", false, "."},
{"freebsd", "", false, "."},
{"netbsd", "", false, "."},
{"plan9", "", false, "."},
{"unrecognized", "", false, "."},
// Single dot provided for application name, so expect current
// directory.
{"windows", ".", false, "."},
{"windows", ".", true, "."},
{"linux", ".", false, "."},
{"darwin", ".", false, "."},
{"openbsd", ".", false, "."},
{"freebsd", ".", false, "."},
{"netbsd", ".", false, "."},
{"plan9", ".", false, "."},
{"unrecognized", ".", false, "."},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
ret := btcutil.TstAppDataDir(test.goos, test.appName, test.roaming)
if ret != test.want {
t.Errorf("appDataDir #%d (%s) does not match - "+
"expected got %s, want %s", i, test.goos, ret,
test.want)
continue
}
}
}
|
[
"\"LOCALAPPDATA\"",
"\"APPDATA\""
] |
[] |
[
"APPDATA",
"LOCALAPPDATA"
] |
[]
|
["APPDATA", "LOCALAPPDATA"]
|
go
| 2 | 0 | |
scrape.go
|
package main
import (
"flag"
"fmt"
"io/ioutil"
"log"
"os"
"path/filepath"
"strconv"
"strings"
"github.com/remeh/mehtadata/db"
"github.com/remeh/mehtadata/model"
"github.com/remeh/mehtadata/scraper"
"github.com/remeh/mehtadata/thegamesdb"
)
// Scraping launches the scraping for
func Scraping(flags Flags) (int, error) {
// try to read in the env var if
// everything is available
// ----------------------
// mandatory
platformName := os.Getenv("PLATFORM")
p := os.Getenv("PLATFORM_ID")
output := os.Getenv("OUTPUT")
dir := os.Getenv("DIR")
exts := os.Getenv("EXTS")
w := os.Getenv("WIDTH")
if !StringsHasContent(platformName, p, output, exts) {
fmt.Println(`Missing parameter.
Mandatory:
PLATFORM : name of the platform in TheGamesDB to find data. Use -platforms for a list
PLATFORM_ID : id of the platform to scrape for
DIR : directory containing the executables
EXTS : extensions of the executable files, separated with a comma.
OUTPUT : output directory for images, etc.
Optional:
WIDTH : max width for the downloaded content (default: 768)
`)
}
// parse parameters
// ----------------------
if len(output) > 0 && string(output[len(output)-1]) != "/" {
output = output + "/"
}
if len(dir) > 0 && string(dir[len(dir)-1]) != "/" {
dir = dir + "/"
}
var err error
platformId := -1
width := 768
if platformId, err = strconv.Atoi(p); err != nil {
fmt.Errorf("Bad platform value.")
os.Exit(-1)
}
if len(w) > 0 {
if width, err = strconv.Atoi(w); err != nil {
fmt.Errorf("Unparseable width value.")
os.Exit(-1)
}
}
fmt.Printf("Launch scraping %d %d", platformId, width)
return scrape(flags, platformName, platformId, dir, exts, output, width)
}
func scrape(flags Flags, platformName string, platformId int, dir, extensions, output string, width int) (int, error) {
// Extensions array
split := strings.Split(extensions, ",")
exts := make([]string, len(split))
for i, v := range split {
exts[i] = strings.Trim(v, " ")
}
// Platforms array
split = strings.Split(platformName, ",")
platforms := make([]string, len(split))
for i, v := range split {
platforms[i] = strings.Trim(v, " ")
}
// look for files to proceed in the given directory if any
var filenames []string
if len(dir) > 0 {
filenames = lookForFiles(dir, exts)
} else {
if len(flag.Args()) == 0 {
fmt.Println("You should either use the DIR environment variable to provide a directory or provide filepath when calling mehtadata.")
os.Exit(1)
}
filenames = flag.Args()
}
gamesinfo := model.NewGamesinfo()
client := thegamesdb.NewClient()
for _, filename := range filenames {
gameinfo, err := client.Find(filename, platforms, dir, output, uint(width))
if err != nil {
log.Println("[err] Unable to find info for the game:", filename)
log.Println(err)
continue
}
// game scraped.
if len(gameinfo.Title) > 0 {
fmt.Printf("For '%s', scraped : '%s' on '%s'\n", filename, gameinfo.Title, gameinfo.Platform)
} else {
scraper.FillDefaults(dir, filename, &gameinfo)
fmt.Printf("Nothing found for '%s'\n", filename)
}
gamesinfo.AddGame(gameinfo)
}
// writes executables info
db.WriteDatabase(flags.DestSqlite, platformId, gamesinfo)
return 0, nil
}
func lookForFiles(directory string, extensions []string) []string {
results := make([]string, 0)
// list files in the directory
fileinfos, err := ioutil.ReadDir(directory)
if err != nil {
return results
}
// for every files existing in the directory
for _, fileinfo := range fileinfos {
// don't mind of directories and check that the extension is valid for this scrape session.
name := fileinfo.Name()
if !fileinfo.IsDir() {
// Check extensions
extension := strings.ToLower(filepath.Ext(name))
for _, e := range extensions {
if extension == strings.ToLower(e) {
results = append(results, name)
break
}
}
}
}
return results
}
|
[
"\"PLATFORM\"",
"\"PLATFORM_ID\"",
"\"OUTPUT\"",
"\"DIR\"",
"\"EXTS\"",
"\"WIDTH\""
] |
[] |
[
"OUTPUT",
"PLATFORM",
"PLATFORM_ID",
"DIR",
"EXTS",
"WIDTH"
] |
[]
|
["OUTPUT", "PLATFORM", "PLATFORM_ID", "DIR", "EXTS", "WIDTH"]
|
go
| 6 | 0 | |
cook/core/misc.py
|
import glob as _glob
import hashlib
import os
import platform
import random as _random
import struct
import subprocess
import sys
import threading
from . import log
system = platform.system()
linux = system == 'Linux'
windows = system == 'Windows'
mac = system == 'darwin'
if windows:
NEW_PROCESS_GROUP = dict(creationflags=subprocess.CREATE_NEW_PROCESS_GROUP)
else:
NEW_PROCESS_GROUP = dict(preexec_fn=os.setpgrp)
class Marked(str):
pass
def cache(func):
"""Thread-safe caching."""
lock = threading.Lock()
results = {}
def wrapper(*args, **kwargs):
identifier = checksum(args, kwargs)
if identifier in results:
return results[identifier]
with lock:
if identifier in results:
return results[identifier]
result = func(*args, **kwargs)
results[identifier] = result
return result
return wrapper
def checksum(*objects):
"""Calculate an MD5-checksum (128 bits) of the given object.
The types of the object and it's elements (in case of a container)
may only consist of:
- int
- None
- bool
- str
- bytes, bytearray, ...
- dict, ...
- set, frozenset, ...
- list, tuple, generator, ...
A TypeError will be raised if an unsupported type is encountered.
Please note that cyclic containers lead to an exceeding of the
maximum recursion depth, which raises to a RuntimeError.
The builtin hash-function cannot be used, since it is randomized
for strings, bytes and datetime objects and sometimes even slower
when trying to get a byte representation.
"""
hasher = hashlib.md5()
_checksum(hasher, objects)
return hasher.hexdigest()
def _checksum(hasher, obj):
if isinstance(obj, str):
hasher.update(b'\x00' + obj.encode())
elif isinstance(obj, bool):
hasher.update(b'\x01' if obj else b'\x02')
elif isinstance(obj, int):
if obj >= 0:
hasher.update(b'\x03')
else:
hasher.update(b'\x04')
obj *= -1
while obj > 255:
hasher.update(bytes([obj % 256]))
obj >>= 8
hasher.update(bytes([obj]))
elif isinstance(obj, float):
hasher.update(b'\x05')
hasher.update(struct.pack('d', obj))
elif obj is None:
hasher.update(b'\x06')
elif isinstance(obj, bytes) or isinstance(obj, bytearray):
hasher.update(b'\x07')
hasher.update(obj)
elif isinstance(obj, dict):
hasher.update(b'\x08')
_checksum(hasher, sorted(obj.items()))
elif isinstance(obj, set) or isinstance(obj, frozenset):
hasher.update(b'\x09')
_checksum(hasher, sorted(obj))
elif isinstance(obj, list) or isinstance(obj, tuple):
hasher.update(b'\x0a')
for element in obj:
hasher.update(b'\x0b')
_checksum(hasher, element)
else:
raise TypeError('Unsupported object type "{}".'.format(type(obj)))
def which(file, env=os.environ):
"""Tries to find the exact path for a given filename.
Returns None if no file was found.
"""
if file is None:
return None
for path in env.get('PATH', '').split(os.pathsep):
if path:
result = os.path.join(path, file)
if os.path.exists(result):
return os.path.realpath(result)
return None
if sys.version_info >= (3, 5):
def glob(pathname):
from . import loader
return absolute(filter(os.path.isfile, _glob.iglob(
loader.resolve(pathname), recursive=True)))
else:
def glob(pathname):
from . import loader
return absolute(filter(os.path.isfile, _iglob(
loader.resolve(pathname))))
def _iglob(pathname):
"""Return an iterator which yields the paths matching a pathname pattern.
The pattern may contain simple shell-style wildcards a la
fnmatch. However, unlike fnmatch, filenames starting with a
dot are special cases that are not matched by '*' and '?'
patterns.
If recursive is true, the pattern '**' will match any files and
zero or more directories and subdirectories.
Note: The recursive glob was introduced in Python 3.5. This is more
or less a straight back-port in order to support older versions.
"""
dirname, basename = os.path.split(pathname)
if not _glob.has_magic(pathname):
if basename:
if os.path.lexists(pathname):
yield pathname
else:
raise FileNotFoundError
else:
if os.path.isdir(dirname):
yield pathname
else:
raise NotADirectoryError
return
if not dirname:
if basename == '**':
for name in _glob2(dirname, basename):
yield name
else:
for name in _glob.glob1(dirname, basename):
yield name
return
if dirname != pathname and _glob.has_magic(dirname):
dirs = _iglob(dirname)
else:
dirs = [dirname]
if _glob.has_magic(basename):
if basename == '**':
glob_in_dir = _glob2
else:
glob_in_dir = _glob.glob1
else:
glob_in_dir = _glob.glob0(dirname, basename)
for dirname in dirs:
for name in glob_in_dir(dirname, basename):
yield os.path.join(dirname, name)
def _glob2(dirname, pattern):
if dirname:
yield pattern[:0]
for name in _rlistdir(dirname):
yield name
def _rlistdir(dirname):
if not dirname:
dirname = os.curdir
try:
names = os.listdir(dirname)
except os.error:
return
for x in names:
if not _glob._ishidden(x):
yield x
path = os.path.join(dirname, x) if dirname else x
for y in _rlistdir(path):
yield os.path.join(x, y)
def absolute(path_or_paths):
"""..."""
if path_or_paths is None:
return None
elif isinstance(path_or_paths, str):
return os.path.abspath(path_or_paths)
else:
return list(map(os.path.abspath, path_or_paths))
def relative(path_or_paths):
"""..."""
if path_or_paths is None:
return None
elif isinstance(path_or_paths, str):
return os.path.normpath(os.path.relpath(path_or_paths))
else:
return list(map(os.path.normpath, map(os.path.relpath, path_or_paths)))
def random(suffix=''):
return ''.join(_random.choice('abcdefghijklmnopqrstuvwxyz1234567890')
for _ in range(16)) + suffix
def extension(path):
return os.path.splitext(os.path.basename(path))[1][1:]
# TODO: Change name or remove / alternative.
def base_no_ext(path):
return os.path.splitext(os.path.basename(path))[0]
def is_inside(path, directory):
path = os.path.normpath(os.path.abspath(path)).split(os.sep)
directory = os.path.normpath(os.path.abspath(directory)).split(os.sep)
for n, component in enumerate(directory):
if path[n] != component:
return False
return True
class CallError(Exception):
def __init__(self, returned, command, output=None):
self.returned = returned
self.command = command
self.scommand = subprocess.list2cmdline(command)
self.output = output
def __str__(self):
cmdline = subprocess.list2cmdline(self.command)
return 'Command "{}" returned {}'.format(
cmdline, self.returned)
def call(command, cwd=None, env=None, timeout=None):
log.debug('CALL {}'.format(subprocess.list2cmdline(command)))
if env is None:
env = os.environ
try:
output = subprocess.check_output(
command, stderr=subprocess.STDOUT, env=env, cwd=cwd,
stdin=subprocess.DEVNULL, timeout=timeout,
**NEW_PROCESS_GROUP
)
return output.decode(errors='ignore')
except subprocess.CalledProcessError as e:
output = e.output.decode(errors='ignore')
raise CallError(e.returncode, e.cmd, output) from None
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
kuma/install.go
|
package kuma
import (
"archive/tar"
"bytes"
"compress/gzip"
"fmt"
"io"
"net/http"
"os"
"os/exec"
"path"
"runtime"
"github.com/layer5io/meshery-adapter-library/adapter"
"github.com/layer5io/meshery-adapter-library/status"
"github.com/layer5io/meshery-kuma/internal/config"
mesherykube "github.com/layer5io/meshkit/utils/kubernetes"
)
func (kuma *Kuma) installKuma(del bool, namespace string, version string) (string, error) {
st := status.Installing
if del {
st = status.Removing
}
err := kuma.Config.GetObject(adapter.MeshSpecKey, kuma)
if err != nil {
return st, ErrMeshConfig(err)
}
manifest, err := kuma.fetchManifest(version)
if err != nil {
kuma.Log.Error(ErrInstallKuma(err))
return st, ErrInstallKuma(err)
}
err = kuma.applyManifest(del, namespace, []byte(manifest))
if err != nil {
kuma.Log.Error(ErrInstallKuma(err))
return st, ErrInstallKuma(err)
}
if del {
return status.Removed, nil
}
return status.Installed, nil
}
func (kuma *Kuma) fetchManifest(version string) (string, error) {
var (
out bytes.Buffer
er bytes.Buffer
)
Executable, err := kuma.getExecutable(version)
if err != nil {
return "", ErrFetchManifest(err, err.Error())
}
// We need variable executable hence
// #nosec
command := exec.Command(Executable, "install", "control-plane")
command.Stdout = &out
command.Stderr = &er
err = command.Run()
if err != nil {
kuma.Log.Info(out.String())
return "", ErrFetchManifest(err, er.String())
}
return out.String(), nil
}
func (kuma *Kuma) applyManifest(del bool, namespace string, contents []byte) error {
err := kuma.MesheryKubeclient.ApplyManifest(contents, mesherykube.ApplyOptions{
Namespace: namespace,
Update: true,
Delete: del,
})
if err != nil {
return err
}
return nil
}
// getExecutable looks for the executable in
// 1. $PATH
// 2. Root config path
//
// If it doesn't find the executable in the path then it proceeds
// to download the binary from github releases and installs it
// in the root config path
func (kuma *Kuma) getExecutable(release string) (string, error) {
const binaryName = "kumactl"
alternateBinaryName := "kumactl-" + release
// Look for the executable in the path
kuma.Log.Info("Looking for kuma in the path...")
executable, err := exec.LookPath(binaryName)
if err == nil {
return executable, nil
}
executable, err = exec.LookPath(alternateBinaryName)
if err == nil {
return executable, nil
}
// Look for config in the root path
binPath := path.Join(config.RootPath(), "bin")
kuma.Log.Info("Looking for kuma in", binPath, "...")
executable = path.Join(binPath, alternateBinaryName)
if _, err := os.Stat(executable); err == nil {
return executable, nil
}
// Proceed to download the binary in the config root path
kuma.Log.Info("kuma not found in the path, downloading...")
res, err := downloadBinary(os.Getenv("DISTRO"), runtime.GOARCH, release)
if err != nil {
return "", ErrGetKumactl(err)
}
// Install the binary
kuma.Log.Info("Installing...")
if err = installBinary(path.Join(binPath, alternateBinaryName), runtime.GOOS, res); err != nil {
return "", ErrGetKumactl(err)
}
// Move binary to the right location
err = os.Rename(path.Join(binPath, alternateBinaryName, "kuma-"+release, "bin", "kumactl"), path.Join(binPath, "kumactl"))
if err != nil {
return "", ErrGetKumactl(err)
}
// Cleanup
kuma.Log.Info("Cleaning up...")
if err = os.RemoveAll(path.Join(binPath, alternateBinaryName)); err != nil {
return "", ErrGetKumactl(err)
}
if err = os.Rename(path.Join(binPath, "kumactl"), path.Join(binPath, alternateBinaryName)); err != nil {
return "", ErrGetKumactl(err)
}
// Set permissions
// Permsission has to be +x to be able to run the binary
// #nosec
if err = os.Chmod(path.Join(binPath, alternateBinaryName), 0750); err != nil {
return "", ErrGetKumactl(err)
}
kuma.Log.Info("Done")
return path.Join(binPath, alternateBinaryName), nil
}
func downloadBinary(platform, arch, release string) (*http.Response, error) {
var url = fmt.Sprintf("https://kong.bintray.com/kuma/kuma-%s-%s-%s.tar.gz", release, platform, arch)
// We need variable url hence
// #nosec
resp, err := http.Get(url)
if err != nil {
return nil, ErrDownloadBinary(err)
}
if resp.StatusCode == http.StatusNotFound {
return nil, ErrDownloadBinary(fmt.Errorf("binary not found, possibly the operating system is not supported"))
}
if resp.StatusCode != http.StatusOK {
return nil, ErrDownloadBinary(fmt.Errorf("bad status: %s", resp.Status))
}
return resp, nil
}
func installBinary(location, platform string, res *http.Response) error {
// Close the response body
defer func() {
if err := res.Body.Close(); err != nil {
fmt.Println(err)
}
}()
err := os.MkdirAll(location, 0750)
if err != nil {
return err
}
switch platform {
case "darwin":
fallthrough
case "linux":
uncompressedStream, err := gzip.NewReader(res.Body)
if err != nil {
return err
}
tarReader := tar.NewReader(uncompressedStream)
for {
header, err := tarReader.Next()
if err == io.EOF {
break
}
if err != nil {
return ErrInstallBinary(err)
}
switch header.Typeflag {
case tar.TypeDir:
// File traversal is required to store the binary at the right place
// #nosec
if err := os.MkdirAll(path.Join(location, header.Name), 0750); err != nil {
return ErrInstallBinary(err)
}
case tar.TypeReg:
// File traversal is required to store the binary at the right place
// #nosec
outFile, err := os.Create(path.Join(location, header.Name))
if err != nil {
return ErrInstallBinary(err)
}
// Trust kuma tar
// #nosec
if _, err := io.Copy(outFile, tarReader); err != nil {
return ErrInstallBinary(err)
}
if err = outFile.Close(); err != nil {
return ErrInstallBinary(err)
}
default:
return ErrInstallBinary(err)
}
}
case "windows":
}
return nil
}
|
[
"\"DISTRO\""
] |
[] |
[
"DISTRO"
] |
[]
|
["DISTRO"]
|
go
| 1 | 0 | |
src/pmdb/pmdb/wsgi.py
|
"""
WSGI config for pmdb project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "pmdb.settings")
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
git_config.py
|
#
# Copyright (C) 2008 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cPickle
import os
import re
import subprocess
import sys
try:
import threading as _threading
except ImportError:
import dummy_threading as _threading
import time
import urllib2
from signal import SIGTERM
from urllib2 import urlopen, HTTPError
from error import GitError, UploadError
from trace import Trace
from git_command import GitCommand
from git_command import ssh_sock
from git_command import terminate_ssh_clients
R_HEADS = 'refs/heads/'
R_TAGS = 'refs/tags/'
ID_RE = re.compile('^[0-9a-f]{40}$')
REVIEW_CACHE = dict()
def IsId(rev):
return ID_RE.match(rev)
def _key(name):
parts = name.split('.')
if len(parts) < 2:
return name.lower()
parts[ 0] = parts[ 0].lower()
parts[-1] = parts[-1].lower()
return '.'.join(parts)
class GitConfig(object):
_ForUser = None
@classmethod
def ForUser(cls):
if cls._ForUser is None:
cls._ForUser = cls(file = os.path.expanduser('~/.gitconfig'))
return cls._ForUser
@classmethod
def ForRepository(cls, gitdir, defaults=None):
return cls(file = os.path.join(gitdir, 'config'),
defaults = defaults)
def __init__(self, file, defaults=None, pickleFile=None):
self.file = file
self.defaults = defaults
self._cache_dict = None
self._section_dict = None
self._remotes = {}
self._branches = {}
if pickleFile is None:
self._pickle = os.path.join(
os.path.dirname(self.file),
'.repopickle_' + os.path.basename(self.file))
else:
self._pickle = pickleFile
def ClearCache(self):
if os.path.exists(self._pickle):
os.remove(self._pickle)
self._cache_dict = None
self._section_dict = None
self._remotes = {}
self._branches = {}
def Has(self, name, include_defaults = True):
"""Return true if this configuration file has the key.
"""
if _key(name) in self._cache:
return True
if include_defaults and self.defaults:
return self.defaults.Has(name, include_defaults = True)
return False
def GetBoolean(self, name):
"""Returns a boolean from the configuration file.
None : The value was not defined, or is not a boolean.
True : The value was set to true or yes.
False: The value was set to false or no.
"""
v = self.GetString(name)
if v is None:
return None
v = v.lower()
if v in ('true', 'yes'):
return True
if v in ('false', 'no'):
return False
return None
def GetString(self, name, all=False):
"""Get the first value for a key, or None if it is not defined.
This configuration file is used first, if the key is not
defined or all = True then the defaults are also searched.
"""
try:
v = self._cache[_key(name)]
except KeyError:
if self.defaults:
return self.defaults.GetString(name, all = all)
v = []
if not all:
if v:
return v[0]
return None
r = []
r.extend(v)
if self.defaults:
r.extend(self.defaults.GetString(name, all = True))
return r
def SetString(self, name, value):
"""Set the value(s) for a key.
Only this configuration file is modified.
The supplied value should be either a string,
or a list of strings (to store multiple values).
"""
key = _key(name)
try:
old = self._cache[key]
except KeyError:
old = []
if value is None:
if old:
del self._cache[key]
self._do('--unset-all', name)
elif isinstance(value, list):
if len(value) == 0:
self.SetString(name, None)
elif len(value) == 1:
self.SetString(name, value[0])
elif old != value:
self._cache[key] = list(value)
self._do('--replace-all', name, value[0])
for i in xrange(1, len(value)):
self._do('--add', name, value[i])
elif len(old) != 1 or old[0] != value:
self._cache[key] = [value]
self._do('--replace-all', name, value)
def GetRemote(self, name):
"""Get the remote.$name.* configuration values as an object.
"""
try:
r = self._remotes[name]
except KeyError:
r = Remote(self, name)
self._remotes[r.name] = r
return r
def GetBranch(self, name):
"""Get the branch.$name.* configuration values as an object.
"""
try:
b = self._branches[name]
except KeyError:
b = Branch(self, name)
self._branches[b.name] = b
return b
def GetSubSections(self, section):
"""List all subsection names matching $section.*.*
"""
return self._sections.get(section, set())
def HasSection(self, section, subsection = ''):
"""Does at least one key in section.subsection exist?
"""
try:
return subsection in self._sections[section]
except KeyError:
return False
@property
def _sections(self):
d = self._section_dict
if d is None:
d = {}
for name in self._cache.keys():
p = name.split('.')
if 2 == len(p):
section = p[0]
subsect = ''
else:
section = p[0]
subsect = '.'.join(p[1:-1])
if section not in d:
d[section] = set()
d[section].add(subsect)
self._section_dict = d
return d
@property
def _cache(self):
if self._cache_dict is None:
self._cache_dict = self._Read()
return self._cache_dict
def _Read(self):
d = self._ReadPickle()
if d is None:
d = self._ReadGit()
self._SavePickle(d)
return d
def _ReadPickle(self):
try:
if os.path.getmtime(self._pickle) \
<= os.path.getmtime(self.file):
os.remove(self._pickle)
return None
except OSError:
return None
try:
Trace(': unpickle %s', self.file)
fd = open(self._pickle, 'rb')
try:
return cPickle.load(fd)
finally:
fd.close()
except EOFError:
os.remove(self._pickle)
return None
except IOError:
os.remove(self._pickle)
return None
except cPickle.PickleError:
os.remove(self._pickle)
return None
def _SavePickle(self, cache):
try:
fd = open(self._pickle, 'wb')
try:
cPickle.dump(cache, fd, cPickle.HIGHEST_PROTOCOL)
finally:
fd.close()
except IOError:
if os.path.exists(self._pickle):
os.remove(self._pickle)
except cPickle.PickleError:
if os.path.exists(self._pickle):
os.remove(self._pickle)
def _ReadGit(self):
"""
Read configuration data from git.
This internal method populates the GitConfig cache.
"""
c = {}
d = self._do('--null', '--list')
if d is None:
return c
for line in d.rstrip('\0').split('\0'):
if '\n' in line:
key, val = line.split('\n', 1)
else:
key = line
val = None
if key in c:
c[key].append(val)
else:
c[key] = [val]
return c
def _do(self, *args):
command = ['config', '--file', self.file]
command.extend(args)
p = GitCommand(None,
command,
capture_stdout = True,
capture_stderr = True)
if p.Wait() == 0:
return p.stdout
else:
GitError('git config %s: %s' % (str(args), p.stderr))
class RefSpec(object):
"""A Git refspec line, split into its components:
forced: True if the line starts with '+'
src: Left side of the line
dst: Right side of the line
"""
@classmethod
def FromString(cls, rs):
lhs, rhs = rs.split(':', 2)
if lhs.startswith('+'):
lhs = lhs[1:]
forced = True
else:
forced = False
return cls(forced, lhs, rhs)
def __init__(self, forced, lhs, rhs):
self.forced = forced
self.src = lhs
self.dst = rhs
def SourceMatches(self, rev):
if self.src:
if rev == self.src:
return True
if self.src.endswith('/*') and rev.startswith(self.src[:-1]):
return True
return False
def DestMatches(self, ref):
if self.dst:
if ref == self.dst:
return True
if self.dst.endswith('/*') and ref.startswith(self.dst[:-1]):
return True
return False
def MapSource(self, rev):
if self.src.endswith('/*'):
return self.dst[:-1] + rev[len(self.src) - 1:]
return self.dst
def __str__(self):
s = ''
if self.forced:
s += '+'
if self.src:
s += self.src
if self.dst:
s += ':'
s += self.dst
return s
_master_processes = []
_master_keys = set()
_ssh_master = True
_master_keys_lock = None
def init_ssh():
"""Should be called once at the start of repo to init ssh master handling.
At the moment, all we do is to create our lock.
"""
global _master_keys_lock
assert _master_keys_lock is None, "Should only call init_ssh once"
_master_keys_lock = _threading.Lock()
def _open_ssh(host, port=None):
global _ssh_master
# Acquire the lock. This is needed to prevent opening multiple masters for
# the same host when we're running "repo sync -jN" (for N > 1) _and_ the
# manifest <remote fetch="ssh://xyz"> specifies a different host from the
# one that was passed to repo init.
_master_keys_lock.acquire()
try:
# Check to see whether we already think that the master is running; if we
# think it's already running, return right away.
if port is not None:
key = '%s:%s' % (host, port)
else:
key = host
if key in _master_keys:
return True
if not _ssh_master \
or 'GIT_SSH' in os.environ \
or sys.platform in ('win32', 'cygwin'):
# failed earlier, or cygwin ssh can't do this
#
return False
# We will make two calls to ssh; this is the common part of both calls.
command_base = ['ssh',
'-o','ControlPath %s' % ssh_sock(),
host]
if port is not None:
command_base[1:1] = ['-p',str(port)]
# Since the key wasn't in _master_keys, we think that master isn't running.
# ...but before actually starting a master, we'll double-check. This can
# be important because we can't tell that that '[email protected]' is the same
# as 'myhost.com' where "User git" is setup in the user's ~/.ssh/config file.
check_command = command_base + ['-O','check']
try:
Trace(': %s', ' '.join(check_command))
check_process = subprocess.Popen(check_command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
check_process.communicate() # read output, but ignore it...
isnt_running = check_process.wait()
if not isnt_running:
# Our double-check found that the master _was_ infact running. Add to
# the list of keys.
_master_keys.add(key)
return True
except Exception:
# Ignore excpetions. We we will fall back to the normal command and print
# to the log there.
pass
command = command_base[:1] + \
['-M', '-N'] + \
command_base[1:]
try:
Trace(': %s', ' '.join(command))
p = subprocess.Popen(command)
except Exception, e:
_ssh_master = False
print >>sys.stderr, \
'\nwarn: cannot enable ssh control master for %s:%s\n%s' \
% (host,port, str(e))
return False
_master_processes.append(p)
_master_keys.add(key)
time.sleep(1)
return True
finally:
_master_keys_lock.release()
def close_ssh():
global _master_keys_lock
terminate_ssh_clients()
for p in _master_processes:
try:
os.kill(p.pid, SIGTERM)
p.wait()
except OSError:
pass
del _master_processes[:]
_master_keys.clear()
d = ssh_sock(create=False)
if d:
try:
os.rmdir(os.path.dirname(d))
except OSError:
pass
# We're done with the lock, so we can delete it.
_master_keys_lock = None
URI_SCP = re.compile(r'^([^@:]*@?[^:/]{1,}):')
URI_ALL = re.compile(r'^([a-z][a-z+]*)://([^@/]*@?[^/]*)/')
def _preconnect(url):
m = URI_ALL.match(url)
if m:
scheme = m.group(1)
host = m.group(2)
if ':' in host:
host, port = host.split(':')
else:
port = None
if scheme in ('ssh', 'git+ssh', 'ssh+git'):
return _open_ssh(host, port)
return False
m = URI_SCP.match(url)
if m:
host = m.group(1)
return _open_ssh(host)
return False
class Remote(object):
"""Configuration options related to a remote.
"""
def __init__(self, config, name):
self._config = config
self.name = name
self.url = self._Get('url')
self.review = self._Get('review')
self.projectname = self._Get('projectname')
self.fetch = map(lambda x: RefSpec.FromString(x),
self._Get('fetch', all=True))
self._review_protocol = None
def _InsteadOf(self):
globCfg = GitConfig.ForUser()
urlList = globCfg.GetSubSections('url')
longest = ""
longestUrl = ""
for url in urlList:
key = "url." + url + ".insteadOf"
insteadOfList = globCfg.GetString(key, all=True)
for insteadOf in insteadOfList:
if self.url.startswith(insteadOf) \
and len(insteadOf) > len(longest):
longest = insteadOf
longestUrl = url
if len(longest) == 0:
return self.url
return self.url.replace(longest, longestUrl, 1)
def PreConnectFetch(self):
connectionUrl = self._InsteadOf()
return _preconnect(connectionUrl)
@property
def ReviewProtocol(self):
if self._review_protocol is None:
if self.review is None:
return None
u = self.review
if not u.startswith('http:') and not u.startswith('https:'):
u = 'http://%s' % u
if u.endswith('/Gerrit'):
u = u[:len(u) - len('/Gerrit')]
if not u.endswith('/ssh_info'):
if not u.endswith('/'):
u += '/'
u += 'ssh_info'
if u in REVIEW_CACHE:
info = REVIEW_CACHE[u]
self._review_protocol = info[0]
self._review_host = info[1]
self._review_port = info[2]
else:
try:
info = urlopen(u).read()
if info == 'NOT_AVAILABLE':
raise UploadError('%s: SSH disabled' % self.review)
if '<' in info:
# Assume the server gave us some sort of HTML
# response back, like maybe a login page.
#
raise UploadError('%s: Cannot parse response' % u)
self._review_protocol = 'ssh'
self._review_host = info.split(" ")[0]
self._review_port = info.split(" ")[1]
except urllib2.URLError, e:
raise UploadError('%s: %s' % (self.review, e.reason[1]))
except HTTPError, e:
if e.code == 404:
self._review_protocol = 'http-post'
self._review_host = None
self._review_port = None
else:
raise UploadError('Upload over ssh unavailable')
REVIEW_CACHE[u] = (
self._review_protocol,
self._review_host,
self._review_port)
return self._review_protocol
def SshReviewUrl(self, userEmail):
if self.ReviewProtocol != 'ssh':
return None
username = self._config.GetString('review.%s.username' % self.review)
if username is None:
username = userEmail.split("@")[0]
return 'ssh://%s@%s:%s/%s' % (
username,
self._review_host,
self._review_port,
self.projectname)
def ToLocal(self, rev):
"""Convert a remote revision string to something we have locally.
"""
if IsId(rev):
return rev
if rev.startswith(R_TAGS):
return rev
if not rev.startswith('refs/'):
rev = R_HEADS + rev
for spec in self.fetch:
if spec.SourceMatches(rev):
return spec.MapSource(rev)
raise GitError('remote %s does not have %s' % (self.name, rev))
def WritesTo(self, ref):
"""True if the remote stores to the tracking ref.
"""
for spec in self.fetch:
if spec.DestMatches(ref):
return True
return False
def ResetFetch(self, mirror=False):
"""Set the fetch refspec to its default value.
"""
if mirror:
dst = 'refs/heads/*'
else:
dst = 'refs/remotes/%s/*' % self.name
self.fetch = [RefSpec(True, 'refs/heads/*', dst)]
def Save(self):
"""Save this remote to the configuration.
"""
self._Set('url', self.url)
self._Set('review', self.review)
self._Set('projectname', self.projectname)
self._Set('fetch', map(lambda x: str(x), self.fetch))
def _Set(self, key, value):
key = 'remote.%s.%s' % (self.name, key)
return self._config.SetString(key, value)
def _Get(self, key, all=False):
key = 'remote.%s.%s' % (self.name, key)
return self._config.GetString(key, all = all)
class Branch(object):
"""Configuration options related to a single branch.
"""
def __init__(self, config, name):
self._config = config
self.name = name
self.merge = self._Get('merge')
r = self._Get('remote')
if r:
self.remote = self._config.GetRemote(r)
else:
self.remote = None
@property
def LocalMerge(self):
"""Convert the merge spec to a local name.
"""
if self.remote and self.merge:
return self.remote.ToLocal(self.merge)
return None
def Save(self):
"""Save this branch back into the configuration.
"""
if self._config.HasSection('branch', self.name):
if self.remote:
self._Set('remote', self.remote.name)
else:
self._Set('remote', None)
self._Set('merge', self.merge)
else:
fd = open(self._config.file, 'ab')
try:
fd.write('[branch "%s"]\n' % self.name)
if self.remote:
fd.write('\tremote = %s\n' % self.remote.name)
if self.merge:
fd.write('\tmerge = %s\n' % self.merge)
finally:
fd.close()
def _Set(self, key, value):
key = 'branch.%s.%s' % (self.name, key)
return self._config.SetString(key, value)
def _Get(self, key, all=False):
key = 'branch.%s.%s' % (self.name, key)
return self._config.GetString(key, all = all)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
2017/assignments/chatbot/chatbot.py
|
""" A neural chatbot using sequence to sequence model with
attentional decoder.
This is based on Google Translate Tensorflow model
https://github.com/tensorflow/models/blob/master/tutorials/rnn/translate/
Sequence to sequence model by Cho et al.(2014)
Created by Chip Huyen as the starter code for assignment 3,
class CS 20SI: "TensorFlow for Deep Learning Research"
cs20si.stanford.edu
This file contains the code to run the model.
See readme.md for instruction on how to run the starter code.
"""
from __future__ import division
from __future__ import print_function
import argparse
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
import random
import sys
import time
import numpy as np
import tensorflow as tf
from model import ChatBotModel
import config
import data
def _get_random_bucket(train_buckets_scale):
""" Get a random bucket from which to choose a training sample """
rand = random.random()
return min([i for i in range(len(train_buckets_scale))
if train_buckets_scale[i] > rand])
def _assert_lengths(encoder_size, decoder_size, encoder_inputs, decoder_inputs, decoder_masks):
""" Assert that the encoder inputs, decoder inputs, and decoder masks are
of the expected lengths """
if len(encoder_inputs) != encoder_size:
raise ValueError("Encoder length must be equal to the one in bucket,"
" %d != %d." % (len(encoder_inputs), encoder_size))
if len(decoder_inputs) != decoder_size:
raise ValueError("Decoder length must be equal to the one in bucket,"
" %d != %d." % (len(decoder_inputs), decoder_size))
if len(decoder_masks) != decoder_size:
raise ValueError("Weights length must be equal to the one in bucket,"
" %d != %d." % (len(decoder_masks), decoder_size))
def run_step(sess, model, encoder_inputs, decoder_inputs, decoder_masks, bucket_id, forward_only):
""" Run one step in training.
@forward_only: boolean value to decide whether a backward path should be created
forward_only is set to True when you just want to evaluate on the test set,
or when you want to the bot to be in chat mode. """
encoder_size, decoder_size = config.BUCKETS[bucket_id]
_assert_lengths(encoder_size, decoder_size, encoder_inputs, decoder_inputs, decoder_masks)
# input feed: encoder inputs, decoder inputs, target_weights, as provided.
input_feed = {}
for step in range(encoder_size):
input_feed[model.encoder_inputs[step].name] = encoder_inputs[step]
for step in range(decoder_size):
input_feed[model.decoder_inputs[step].name] = decoder_inputs[step]
input_feed[model.decoder_masks[step].name] = decoder_masks[step]
last_target = model.decoder_inputs[decoder_size].name
input_feed[last_target] = np.zeros([model.batch_size], dtype=np.int32)
# output feed: depends on whether we do a backward step or not.
if not forward_only:
output_feed = [model.train_ops[bucket_id], # update op that does SGD.
model.gradient_norms[bucket_id], # gradient norm.
model.losses[bucket_id]] # loss for this batch.
else:
output_feed = [model.losses[bucket_id]] # loss for this batch.
for step in range(decoder_size): # output logits.
output_feed.append(model.outputs[bucket_id][step])
outputs = sess.run(output_feed, input_feed)
if not forward_only:
return outputs[1], outputs[2], None # Gradient norm, loss, no outputs.
else:
return None, outputs[0], outputs[1:] # No gradient norm, loss, outputs.
def _get_buckets():
""" Load the dataset into buckets based on their lengths.
train_buckets_scale is the inverval that'll help us
choose a random bucket later on.
"""
test_buckets = data.load_data('test_ids.enc', 'test_ids.dec')
data_buckets = data.load_data('train_ids.enc', 'train_ids.dec')
train_bucket_sizes = [len(data_buckets[b]) for b in range(len(config.BUCKETS))]
print("Number of samples in each bucket:\n", train_bucket_sizes)
train_total_size = sum(train_bucket_sizes)
# list of increasing numbers from 0 to 1 that we'll use to select a bucket.
train_buckets_scale = [sum(train_bucket_sizes[:i + 1]) / train_total_size
for i in range(len(train_bucket_sizes))]
print("Bucket scale:\n", train_buckets_scale)
return test_buckets, data_buckets, train_buckets_scale
def _get_skip_step(iteration):
""" How many steps should the model train before it saves all the weights. """
if iteration < 100:
return 30
return 100
def _check_restore_parameters(sess, saver):
""" Restore the previously trained parameters if there are any. """
ckpt = tf.train.get_checkpoint_state(os.path.dirname(config.CPT_PATH + '/checkpoint'))
if ckpt and ckpt.model_checkpoint_path:
print("Loading parameters for the Chatbot")
saver.restore(sess, ckpt.model_checkpoint_path)
else:
print("Initializing fresh parameters for the Chatbot")
def _eval_test_set(sess, model, test_buckets):
""" Evaluate on the test set. """
for bucket_id in range(len(config.BUCKETS)):
if len(test_buckets[bucket_id]) == 0:
print(" Test: empty bucket %d" % (bucket_id))
continue
start = time.time()
encoder_inputs, decoder_inputs, decoder_masks = data.get_batch(test_buckets[bucket_id],
bucket_id,
batch_size=config.BATCH_SIZE)
_, step_loss, _ = run_step(sess, model, encoder_inputs, decoder_inputs,
decoder_masks, bucket_id, True)
print('Test bucket {}: loss {}, time {}'.format(bucket_id, step_loss, time.time() - start))
def train():
""" Train the bot """
test_buckets, data_buckets, train_buckets_scale = _get_buckets()
# in train mode, we need to create the backward path, so forwrad_only is False
model = ChatBotModel(False, config.BATCH_SIZE)
model.build_graph()
saver = tf.train.Saver()
with tf.Session() as sess:
print('Running session')
sess.run(tf.global_variables_initializer())
_check_restore_parameters(sess, saver)
iteration = model.global_step.eval()
total_loss = 0
while True:
skip_step = _get_skip_step(iteration)
bucket_id = _get_random_bucket(train_buckets_scale)
encoder_inputs, decoder_inputs, decoder_masks = data.get_batch(data_buckets[bucket_id],
bucket_id,
batch_size=config.BATCH_SIZE)
start = time.time()
_, step_loss, _ = run_step(sess, model, encoder_inputs, decoder_inputs, decoder_masks, bucket_id, False)
total_loss += step_loss
iteration += 1
if iteration % skip_step == 0:
print('Iter {}: loss {}, time {}'.format(iteration, total_loss/skip_step, time.time() - start))
start = time.time()
total_loss = 0
saver.save(sess, os.path.join(config.CPT_PATH, 'chatbot'), global_step=model.global_step)
if iteration % (10 * skip_step) == 0:
# Run evals on development set and print their loss
_eval_test_set(sess, model, test_buckets)
start = time.time()
sys.stdout.flush()
def _get_user_input():
""" Get user's input, which will be transformed into encoder input later """
print("> ", end="")
sys.stdout.flush()
return sys.stdin.readline()
def _find_right_bucket(length):
""" Find the proper bucket for an encoder input based on its length """
return min([b for b in range(len(config.BUCKETS))
if config.BUCKETS[b][0] >= length])
def _construct_response(output_logits, inv_dec_vocab):
""" Construct a response to the user's encoder input.
@output_logits: the outputs from sequence to sequence wrapper.
output_logits is decoder_size np array, each of dim 1 x DEC_VOCAB
This is a greedy decoder - outputs are just argmaxes of output_logits.
"""
print(output_logits[0])
outputs = [int(np.argmax(logit, axis=1)) for logit in output_logits]
# If there is an EOS symbol in outputs, cut them at that point.
if config.EOS_ID in outputs:
outputs = outputs[:outputs.index(config.EOS_ID)]
# Print out sentence corresponding to outputs.
return " ".join([tf.compat.as_str(inv_dec_vocab[output]) for output in outputs])
def chat():
""" in test mode, we don't to create the backward path
"""
_, enc_vocab = data.load_vocab(os.path.join(config.PROCESSED_PATH, 'vocab.enc'))
inv_dec_vocab, _ = data.load_vocab(os.path.join(config.PROCESSED_PATH, 'vocab.dec'))
model = ChatBotModel(True, batch_size=1)
model.build_graph()
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
_check_restore_parameters(sess, saver)
output_file = open(os.path.join(config.PROCESSED_PATH, config.OUTPUT_FILE), 'a+')
# Decode from standard input.
max_length = config.BUCKETS[-1][0]
print('Welcome to TensorBro. Say something. Enter to exit. Max length is', max_length)
while True:
line = _get_user_input()
if len(line) > 0 and line[-1] == '\n':
line = line[:-1]
if line == '':
break
output_file.write('HUMAN ++++ ' + line + '\n')
# Get token-ids for the input sentence.
token_ids = data.sentence2id(enc_vocab, str(line))
if (len(token_ids) > max_length):
print('Max length I can handle is:', max_length)
line = _get_user_input()
continue
# Which bucket does it belong to?
bucket_id = _find_right_bucket(len(token_ids))
# Get a 1-element batch to feed the sentence to the model.
encoder_inputs, decoder_inputs, decoder_masks = data.get_batch([(token_ids, [])],
bucket_id,
batch_size=1)
# Get output logits for the sentence.
_, _, output_logits = run_step(sess, model, encoder_inputs, decoder_inputs,
decoder_masks, bucket_id, True)
response = _construct_response(output_logits, inv_dec_vocab)
print(response)
output_file.write('BOT ++++ ' + response + '\n')
output_file.write('=============================================\n')
output_file.close()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--mode', choices={'train', 'chat'},
default='train', help="mode. if not specified, it's in the train mode")
args = parser.parse_args()
if not os.path.isdir(config.PROCESSED_PATH):
data.prepare_raw_data()
data.process_data()
print('Data ready!')
# create checkpoints folder if there isn't one already
data.make_dir(config.CPT_PATH)
if args.mode == 'train':
train()
elif args.mode == 'chat':
chat()
if __name__ == '__main__':
main()
|
[] |
[] |
[
"TF_CPP_MIN_LOG_LEVEL"
] |
[]
|
["TF_CPP_MIN_LOG_LEVEL"]
|
python
| 1 | 0 | |
pkg/rootless/rootless_linux.go
|
// +build linux,cgo
package rootless
import (
"bufio"
"bytes"
"fmt"
"io"
"io/ioutil"
"os"
"os/exec"
gosignal "os/signal"
"os/user"
"runtime"
"strconv"
"sync"
"unsafe"
"github.com/containers/podman/v3/pkg/errorhandling"
"github.com/containers/storage/pkg/idtools"
"github.com/containers/storage/pkg/unshare"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
/*
#cgo remoteclient CFLAGS: -Wall -Werror -DDISABLE_JOIN_SHORTCUT
#include <stdlib.h>
#include <sys/types.h>
extern uid_t rootless_uid();
extern uid_t rootless_gid();
extern int reexec_in_user_namespace(int ready, char *pause_pid_file_path, char *file_to_read, int fd);
extern int reexec_in_user_namespace_wait(int pid, int options);
extern int reexec_userns_join(int pid, char *pause_pid_file_path);
extern int is_fd_inherited(int fd);
*/
import "C"
const (
numSig = 65 // max number of signals
)
func runInUser() error {
return os.Setenv("_CONTAINERS_USERNS_CONFIGURED", "done")
}
var (
isRootlessOnce sync.Once
isRootless bool
)
// IsRootless tells us if we are running in rootless mode
func IsRootless() bool {
isRootlessOnce.Do(func() {
rootlessUIDInit := int(C.rootless_uid())
rootlessGIDInit := int(C.rootless_gid())
if rootlessUIDInit != 0 {
// This happens if we joined the user+mount namespace as part of
if err := os.Setenv("_CONTAINERS_USERNS_CONFIGURED", "done"); err != nil {
logrus.Errorf("failed to set environment variable %s as %s", "_CONTAINERS_USERNS_CONFIGURED", "done")
}
if err := os.Setenv("_CONTAINERS_ROOTLESS_UID", fmt.Sprintf("%d", rootlessUIDInit)); err != nil {
logrus.Errorf("failed to set environment variable %s as %d", "_CONTAINERS_ROOTLESS_UID", rootlessUIDInit)
}
if err := os.Setenv("_CONTAINERS_ROOTLESS_GID", fmt.Sprintf("%d", rootlessGIDInit)); err != nil {
logrus.Errorf("failed to set environment variable %s as %d", "_CONTAINERS_ROOTLESS_GID", rootlessGIDInit)
}
}
isRootless = os.Geteuid() != 0 || os.Getenv("_CONTAINERS_USERNS_CONFIGURED") != ""
if !isRootless {
hasCapSysAdmin, err := unshare.HasCapSysAdmin()
if err != nil {
logrus.Warnf("failed to read CAP_SYS_ADMIN presence for the current process")
}
if err == nil && !hasCapSysAdmin {
isRootless = true
}
}
})
return isRootless
}
// GetRootlessUID returns the UID of the user in the parent userNS
func GetRootlessUID() int {
uidEnv := os.Getenv("_CONTAINERS_ROOTLESS_UID")
if uidEnv != "" {
u, _ := strconv.Atoi(uidEnv)
return u
}
return os.Geteuid()
}
// GetRootlessGID returns the GID of the user in the parent userNS
func GetRootlessGID() int {
gidEnv := os.Getenv("_CONTAINERS_ROOTLESS_GID")
if gidEnv != "" {
u, _ := strconv.Atoi(gidEnv)
return u
}
/* If the _CONTAINERS_ROOTLESS_UID is set, assume the gid==uid. */
uidEnv := os.Getenv("_CONTAINERS_ROOTLESS_UID")
if uidEnv != "" {
u, _ := strconv.Atoi(uidEnv)
return u
}
return os.Getegid()
}
func tryMappingTool(uid bool, pid int, hostID int, mappings []idtools.IDMap) error {
var tool = "newuidmap"
if !uid {
tool = "newgidmap"
}
path, err := exec.LookPath(tool)
if err != nil {
return errors.Wrapf(err, "command required for rootless mode with multiple IDs")
}
appendTriplet := func(l []string, a, b, c int) []string {
return append(l, strconv.Itoa(a), strconv.Itoa(b), strconv.Itoa(c))
}
args := []string{path, fmt.Sprintf("%d", pid)}
args = appendTriplet(args, 0, hostID, 1)
for _, i := range mappings {
if hostID >= i.HostID && hostID < i.HostID+i.Size {
what := "UID"
where := "/etc/subuid"
if !uid {
what = "GID"
where = "/etc/subgid"
}
return errors.Errorf("invalid configuration: the specified mapping %d:%d in %q includes the user %s", i.HostID, i.Size, where, what)
}
args = appendTriplet(args, i.ContainerID+1, i.HostID, i.Size)
}
cmd := exec.Cmd{
Path: path,
Args: args,
}
if output, err := cmd.CombinedOutput(); err != nil {
logrus.Debugf("error from %s: %s", tool, output)
return errors.Wrapf(err, "cannot setup namespace using %s", tool)
}
return nil
}
// joinUserAndMountNS re-exec podman in a new userNS and join the user and mount
// namespace of the specified PID without looking up its parent. Useful to join directly
// the conmon process.
func joinUserAndMountNS(pid uint, pausePid string) (bool, int, error) {
hasCapSysAdmin, err := unshare.HasCapSysAdmin()
if err != nil {
return false, 0, err
}
if hasCapSysAdmin || os.Getenv("_CONTAINERS_USERNS_CONFIGURED") != "" {
return false, 0, nil
}
cPausePid := C.CString(pausePid)
defer C.free(unsafe.Pointer(cPausePid))
pidC := C.reexec_userns_join(C.int(pid), cPausePid)
if int(pidC) < 0 {
return false, -1, errors.Errorf("cannot re-exec process")
}
ret := C.reexec_in_user_namespace_wait(pidC, 0)
if ret < 0 {
return false, -1, errors.New("error waiting for the re-exec process")
}
return true, int(ret), nil
}
// GetConfiguredMappings returns the additional IDs configured for the current user.
func GetConfiguredMappings() ([]idtools.IDMap, []idtools.IDMap, error) {
var uids, gids []idtools.IDMap
username := os.Getenv("USER")
if username == "" {
var id string
if os.Geteuid() == 0 {
id = strconv.Itoa(GetRootlessUID())
} else {
id = strconv.Itoa(os.Geteuid())
}
userID, err := user.LookupId(id)
if err == nil {
username = userID.Username
}
}
mappings, err := idtools.NewIDMappings(username, username)
if err != nil {
logLevel := logrus.ErrorLevel
if os.Geteuid() == 0 && GetRootlessUID() == 0 {
logLevel = logrus.DebugLevel
}
logrus.StandardLogger().Logf(logLevel, "cannot find UID/GID for user %s: %v - check rootless mode in man pages.", username, err)
} else {
uids = mappings.UIDs()
gids = mappings.GIDs()
}
return uids, gids, nil
}
func copyMappings(from, to string) error {
content, err := ioutil.ReadFile(from)
if err != nil {
return err
}
// Both runc and crun check whether the current process is in a user namespace
// by looking up 4294967295 in /proc/self/uid_map. If the mappings would be
// copied as they are, the check in the OCI runtimes would fail. So just split
// it in two different ranges.
if bytes.Contains(content, []byte("4294967295")) {
content = []byte("0 0 1\n1 1 4294967294\n")
}
return ioutil.WriteFile(to, content, 0600)
}
func becomeRootInUserNS(pausePid, fileToRead string, fileOutput *os.File) (_ bool, _ int, retErr error) {
hasCapSysAdmin, err := unshare.HasCapSysAdmin()
if err != nil {
return false, 0, err
}
if hasCapSysAdmin || os.Getenv("_CONTAINERS_USERNS_CONFIGURED") != "" {
if os.Getenv("_CONTAINERS_USERNS_CONFIGURED") == "init" {
return false, 0, runInUser()
}
return false, 0, nil
}
cPausePid := C.CString(pausePid)
defer C.free(unsafe.Pointer(cPausePid))
cFileToRead := C.CString(fileToRead)
defer C.free(unsafe.Pointer(cFileToRead))
var fileOutputFD C.int
if fileOutput != nil {
fileOutputFD = C.int(fileOutput.Fd())
}
runtime.LockOSThread()
defer runtime.UnlockOSThread()
fds, err := unix.Socketpair(unix.AF_UNIX, unix.SOCK_DGRAM, 0)
if err != nil {
return false, -1, err
}
r, w := os.NewFile(uintptr(fds[0]), "sync host"), os.NewFile(uintptr(fds[1]), "sync child")
var pid int
defer errorhandling.CloseQuiet(r)
defer errorhandling.CloseQuiet(w)
defer func() {
toWrite := []byte("0")
if retErr != nil {
toWrite = []byte("1")
}
if _, err := w.Write(toWrite); err != nil {
logrus.Errorf("failed to write byte 0: %q", err)
}
if retErr != nil && pid > 0 {
if err := unix.Kill(pid, unix.SIGKILL); err != nil {
if err != unix.ESRCH {
logrus.Errorf("failed to cleanup process %d: %v", pid, err)
}
}
C.reexec_in_user_namespace_wait(C.int(pid), 0)
}
}()
pidC := C.reexec_in_user_namespace(C.int(r.Fd()), cPausePid, cFileToRead, fileOutputFD)
pid = int(pidC)
if pid < 0 {
return false, -1, errors.Errorf("cannot re-exec process")
}
uids, gids, err := GetConfiguredMappings()
if err != nil {
return false, -1, err
}
uidMap := fmt.Sprintf("/proc/%d/uid_map", pid)
gidMap := fmt.Sprintf("/proc/%d/gid_map", pid)
uidsMapped := false
if err := copyMappings("/proc/self/uid_map", uidMap); err == nil {
uidsMapped = true
}
if uids != nil && !uidsMapped {
err := tryMappingTool(true, pid, os.Geteuid(), uids)
// If some mappings were specified, do not ignore the error
if err != nil && len(uids) > 0 {
return false, -1, err
}
uidsMapped = err == nil
}
if !uidsMapped {
logrus.Warnf("using rootless single mapping into the namespace. This might break some images. Check /etc/subuid and /etc/subgid for adding sub*ids")
setgroups := fmt.Sprintf("/proc/%d/setgroups", pid)
err = ioutil.WriteFile(setgroups, []byte("deny\n"), 0666)
if err != nil {
return false, -1, errors.Wrapf(err, "cannot write setgroups file")
}
logrus.Debugf("write setgroups file exited with 0")
err = ioutil.WriteFile(uidMap, []byte(fmt.Sprintf("%d %d 1\n", 0, os.Geteuid())), 0666)
if err != nil {
return false, -1, errors.Wrapf(err, "cannot write uid_map")
}
logrus.Debugf("write uid_map exited with 0")
}
gidsMapped := false
if err := copyMappings("/proc/self/gid_map", gidMap); err == nil {
gidsMapped = true
}
if gids != nil && !gidsMapped {
err := tryMappingTool(false, pid, os.Getegid(), gids)
// If some mappings were specified, do not ignore the error
if err != nil && len(gids) > 0 {
return false, -1, err
}
gidsMapped = err == nil
}
if !gidsMapped {
err = ioutil.WriteFile(gidMap, []byte(fmt.Sprintf("%d %d 1\n", 0, os.Getegid())), 0666)
if err != nil {
return false, -1, errors.Wrapf(err, "cannot write gid_map")
}
}
_, err = w.Write([]byte("0"))
if err != nil {
return false, -1, errors.Wrapf(err, "write to sync pipe")
}
b := make([]byte, 1)
_, err = w.Read(b)
if err != nil {
return false, -1, errors.Wrapf(err, "read from sync pipe")
}
if fileOutput != nil {
ret := C.reexec_in_user_namespace_wait(pidC, 0)
if ret < 0 {
return false, -1, errors.New("error waiting for the re-exec process")
}
return true, 0, nil
}
if b[0] == '2' {
// We have lost the race for writing the PID file, as probably another
// process created a namespace and wrote the PID.
// Try to join it.
data, err := ioutil.ReadFile(pausePid)
if err == nil {
pid, err := strconv.ParseUint(string(data), 10, 0)
if err == nil {
return joinUserAndMountNS(uint(pid), "")
}
}
return false, -1, errors.Wrapf(err, "error setting up the process")
}
if b[0] != '0' {
return false, -1, errors.Wrapf(err, "error setting up the process")
}
c := make(chan os.Signal, 1)
signals := []os.Signal{}
for sig := 0; sig < numSig; sig++ {
if sig == int(unix.SIGTSTP) {
continue
}
signals = append(signals, unix.Signal(sig))
}
gosignal.Notify(c, signals...)
defer gosignal.Reset()
go func() {
for s := range c {
if s == unix.SIGCHLD || s == unix.SIGPIPE {
continue
}
if err := unix.Kill(int(pidC), s.(unix.Signal)); err != nil {
if err != unix.ESRCH {
logrus.Errorf("failed to propagate signal to child process %d: %v", int(pidC), err)
}
}
}
}()
ret := C.reexec_in_user_namespace_wait(pidC, 0)
if ret < 0 {
return false, -1, errors.New("error waiting for the re-exec process")
}
return true, int(ret), nil
}
// BecomeRootInUserNS re-exec podman in a new userNS. It returns whether podman was re-executed
// into a new user namespace and the return code from the re-executed podman process.
// If podman was re-executed the caller needs to propagate the error code returned by the child
// process.
func BecomeRootInUserNS(pausePid string) (bool, int, error) {
return becomeRootInUserNS(pausePid, "", nil)
}
// TryJoinFromFilePaths attempts to join the namespaces of the pid files in paths.
// This is useful when there are already running containers and we
// don't have a pause process yet. We can use the paths to the conmon
// processes to attempt joining their namespaces.
// If needNewNamespace is set, the file is read from a temporary user
// namespace, this is useful for containers that are running with a
// different uidmap and the unprivileged user has no way to read the
// file owned by the root in the container.
func TryJoinFromFilePaths(pausePidPath string, needNewNamespace bool, paths []string) (bool, int, error) {
if len(paths) == 0 {
return BecomeRootInUserNS(pausePidPath)
}
var lastErr error
var pausePid int
foundProcess := false
for _, path := range paths {
if !needNewNamespace {
data, err := ioutil.ReadFile(path)
if err != nil {
lastErr = err
continue
}
pausePid, err = strconv.Atoi(string(data))
if err != nil {
lastErr = errors.Wrapf(err, "cannot parse file %s", path)
continue
}
lastErr = nil
break
} else {
r, w, err := os.Pipe()
if err != nil {
lastErr = err
continue
}
defer errorhandling.CloseQuiet(r)
if _, _, err := becomeRootInUserNS("", path, w); err != nil {
w.Close()
lastErr = err
continue
}
if err := w.Close(); err != nil {
return false, 0, err
}
defer func() {
C.reexec_in_user_namespace_wait(-1, 0)
}()
b := make([]byte, 32)
n, err := r.Read(b)
if err != nil {
lastErr = errors.Wrapf(err, "cannot read %s\n", path)
continue
}
pausePid, err = strconv.Atoi(string(b[:n]))
if err == nil && unix.Kill(pausePid, 0) == nil {
foundProcess = true
lastErr = nil
break
}
}
}
if !foundProcess && pausePidPath != "" {
return BecomeRootInUserNS(pausePidPath)
}
if lastErr != nil {
return false, 0, lastErr
}
return joinUserAndMountNS(uint(pausePid), pausePidPath)
}
// ReadMappingsProc parses and returns the ID mappings at the specified path.
func ReadMappingsProc(path string) ([]idtools.IDMap, error) {
file, err := os.Open(path)
if err != nil {
return nil, err
}
defer file.Close()
mappings := []idtools.IDMap{}
buf := bufio.NewReader(file)
for {
line, _, err := buf.ReadLine()
if err != nil {
if err == io.EOF {
return mappings, nil
}
return nil, errors.Wrapf(err, "cannot read line from %s", path)
}
if line == nil {
return mappings, nil
}
containerID, hostID, size := 0, 0, 0
if _, err := fmt.Sscanf(string(line), "%d %d %d", &containerID, &hostID, &size); err != nil {
return nil, errors.Wrapf(err, "cannot parse %s", string(line))
}
mappings = append(mappings, idtools.IDMap{ContainerID: containerID, HostID: hostID, Size: size})
}
}
func matches(id int, configuredIDs []idtools.IDMap, currentIDs []idtools.IDMap) bool {
// The first mapping is the host user, handle it separately.
if currentIDs[0].HostID != id || currentIDs[0].Size != 1 {
return false
}
currentIDs = currentIDs[1:]
if len(currentIDs) != len(configuredIDs) {
return false
}
// It is fine to iterate sequentially as both slices are sorted.
for i := range currentIDs {
if currentIDs[i].HostID != configuredIDs[i].HostID {
return false
}
if currentIDs[i].Size != configuredIDs[i].Size {
return false
}
}
return true
}
// ConfigurationMatches checks whether the additional uids/gids configured for the user
// match the current user namespace.
func ConfigurationMatches() (bool, error) {
if !IsRootless() || os.Geteuid() != 0 {
return true, nil
}
uids, gids, err := GetConfiguredMappings()
if err != nil {
return false, err
}
currentUIDs, err := ReadMappingsProc("/proc/self/uid_map")
if err != nil {
return false, err
}
if !matches(GetRootlessUID(), uids, currentUIDs) {
return false, err
}
currentGIDs, err := ReadMappingsProc("/proc/self/gid_map")
if err != nil {
return false, err
}
return matches(GetRootlessGID(), gids, currentGIDs), nil
}
// IsFdInherited checks whether the fd is opened and valid to use
func IsFdInherited(fd int) bool {
return int(C.is_fd_inherited(C.int(fd))) > 0
}
|
[
"\"_CONTAINERS_USERNS_CONFIGURED\"",
"\"_CONTAINERS_ROOTLESS_UID\"",
"\"_CONTAINERS_ROOTLESS_GID\"",
"\"_CONTAINERS_ROOTLESS_UID\"",
"\"_CONTAINERS_USERNS_CONFIGURED\"",
"\"USER\"",
"\"_CONTAINERS_USERNS_CONFIGURED\"",
"\"_CONTAINERS_USERNS_CONFIGURED\""
] |
[] |
[
"_CONTAINERS_ROOTLESS_UID",
"_CONTAINERS_USERNS_CONFIGURED",
"USER",
"_CONTAINERS_ROOTLESS_GID"
] |
[]
|
["_CONTAINERS_ROOTLESS_UID", "_CONTAINERS_USERNS_CONFIGURED", "USER", "_CONTAINERS_ROOTLESS_GID"]
|
go
| 4 | 0 | |
integration_files/SweRV_EH1/google_riscv_dv/lm_run.py
|
"""
Copyright 2019 Google LLC
Copyright 2020 Lampro Mellon
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Regression script for RISC-V random instruction generator
"""
import argparse
import os
import random
import re
import sys
import logging
from scripts.lib import *
from scripts.spike_log_to_trace_csv import *
from scripts.ovpsim_log_to_trace_csv import *
from scripts.whisper_log_trace_csv import *
from scripts.sail_log_to_trace_csv import *
from scripts.instr_trace_compare import *
from types import SimpleNamespace
from pathlib import Path
LOGGER = logging.getLogger()
class SeedGen:
'''An object that will generate a pseudo-random seed for test iterations'''
def __init__(self, start_seed, fixed_seed, seed_yaml):
# These checks are performed with proper error messages at argument parsing
# time, but it can't hurt to do a belt-and-braces check here too.
assert fixed_seed is None or start_seed is None
self.fixed_seed = fixed_seed
self.start_seed = start_seed
self.rerun_seed = {} if seed_yaml is None else read_yaml(seed_yaml)
def get(self, test_id, test_iter):
'''Get the seed to use for the given test and iteration'''
if test_id in self.rerun_seed:
# Note that test_id includes the iteration index (well, the batch index,
# at any rate), so this makes sense even if test_iter > 0.
return self.rerun_seed[test_id]
if self.fixed_seed is not None:
# Checked at argument parsing time
assert test_iter == 0
return self.fixed_seed
if self.start_seed is not None:
return self.start_seed + test_iter
# If the user didn't specify seeds in some way, we generate a random seed
# every time
return random.getrandbits(31)
def get_generator_cmd(simulator, simulator_yaml, cov, exp, debug_cmd):
""" Setup the compile and simulation command for the generator
Args:
simulator : RTL simulator used to run instruction generator
simulator_yaml : RTL simulator configuration file in YAML format
cov : Enable functional coverage
exp : Use experimental version
debug_cmd : Produce the debug cmd log without running
Returns:
compile_cmd : RTL simulator command to compile the instruction generator
sim_cmd : RTL simulator command to run the instruction generator
"""
logging.info("Processing simulator setup file : %s" % simulator_yaml)
yaml_data = read_yaml(simulator_yaml)
# Search for matched simulator
for entry in yaml_data:
if entry['tool'] == simulator:
logging.info("Found matching simulator: %s" % entry['tool'])
compile_spec = entry['compile']
compile_cmd = compile_spec['cmd']
for i in range(len(compile_cmd)):
if ('cov_opts' in compile_spec) and cov:
compile_cmd[i] = re.sub('<cov_opts>', compile_spec['cov_opts'].rstrip(), compile_cmd[i])
else:
compile_cmd[i] = re.sub('<cov_opts>', '', compile_cmd[i])
if exp:
compile_cmd[i] += " +define+EXPERIMENTAL "
sim_cmd = entry['sim']['cmd']
if ('cov_opts' in entry['sim']) and cov:
sim_cmd = re.sub('<cov_opts>', entry['sim']['cov_opts'].rstrip(), sim_cmd)
else:
sim_cmd = re.sub('<cov_opts>', '', sim_cmd)
if 'env_var' in entry:
for env_var in entry['env_var'].split(','):
for i in range(len(compile_cmd)):
compile_cmd[i] = re.sub("<"+env_var+">", get_env_var(env_var, debug_cmd = debug_cmd),
compile_cmd[i])
sim_cmd = re.sub("<"+env_var+">", get_env_var(env_var, debug_cmd = debug_cmd), sim_cmd)
return compile_cmd, sim_cmd
logging.error("Cannot find RTL simulator %0s" % simulator)
sys.exit(RET_FAIL)
def parse_iss_yaml(iss, iss_yaml, isa, setting_dir, debug_cmd):
"""Parse ISS YAML to get the simulation command
Args:
iss : target ISS used to look up in ISS YAML
iss_yaml : ISS configuration file in YAML format
isa : ISA variant passed to the ISS
setting_dir : Generator setting directory
debug_cmd : Produce the debug cmd log without running
Returns:
cmd : ISS run command
"""
logging.info("Processing ISS setup file : %s" % iss_yaml)
yaml_data = read_yaml(iss_yaml)
# Search for matched ISS
for entry in yaml_data:
if entry['iss'] == iss:
logging.info("Found matching ISS: %s" % entry['iss'])
cmd = entry['cmd'].rstrip()
cmd = re.sub("\<path_var\>", get_env_var(entry['path_var'], debug_cmd = debug_cmd), cmd)
m = re.search(r"rv(?P<xlen>[0-9]+?)(?P<variant>[a-z]+?)$", isa)
if m:
cmd = re.sub("\<xlen\>", m.group('xlen'), cmd)
else:
logging.error("Illegal ISA %0s" % isa)
if iss == "ovpsim":
cmd = re.sub("\<cfg_path\>", setting_dir, cmd)
elif iss == "whisper":
if m:
# TODO: Support u/s mode
variant = re.sub('g', 'imafd', m.group('variant'))
cmd = re.sub("\<variant\>", variant, cmd)
else:
cmd = re.sub("\<variant\>", isa, cmd)
return cmd
logging.error("Cannot find ISS %0s" % iss)
sys.exit(RET_FAIL)
def get_iss_cmd(base_cmd, elf, log):
"""Get the ISS simulation command
Args:
base_cmd : Original command template
elf : ELF file to run ISS simualtion
log : ISS simulation log name
Returns:
cmd : Command for ISS simulation
"""
cmd = re.sub("\<elf\>", elf, base_cmd)
cmd += (" &> %s" % log)
return cmd
def do_compile(compile_cmd, test_list, core_setting_dir, cwd, ext_dir,
cmp_opts, output_dir, debug_cmd, lsf_cmd):
"""Compile the instruction generator
Args:
compile_cmd : Compile command for the generator
test_list : List of assembly programs to be compiled
core_setting_dir : Path for riscv_core_setting.sv
cwd : Filesystem path to RISCV-DV repo
ext_dir : User extension directory
cmd_opts : Compile options for the generator
output_dir : Output directory of the ELF files
debug_cmd : Produce the debug cmd log without running
lsf_cmd : LSF command used to run the instruction generator
"""
if (not((len(test_list) == 1) and (test_list[0]['test'] == 'riscv_csr_test'))):
logging.info("Building RISC-V instruction generator")
for cmd in compile_cmd:
cmd = re.sub("<out>", os.path.abspath(output_dir), cmd)
cmd = re.sub("<setting>", core_setting_dir, cmd)
if ext_dir == "":
cmd = re.sub("<user_extension>", "<cwd>/user_extension", cmd)
else:
cmd = re.sub("<user_extension>", ext_dir, cmd)
cmd = re.sub("<cwd>", cwd, cmd)
cmd = re.sub("<cmp_opts>", cmp_opts, cmd)
if lsf_cmd:
cmd = lsf_cmd + " " + cmd
run_parallel_cmd([cmd], debug_cmd = debug_cmd)
else:
logging.debug("Compile command: %s" % cmd)
run_cmd(cmd, debug_cmd = debug_cmd)
def run_csr_test(cmd_list, cwd, csr_file, isa, iterations, lsf_cmd,
end_signature_addr, timeout_s, output_dir, debug_cmd):
"""Run CSR test
It calls a separate python script to generate directed CSR test code,
located at scripts/gen_csr_test.py.
"""
cmd = "python3 " + cwd + "/scripts/gen_csr_test.py" + \
(" --csr_file %s" % csr_file) + \
(" --xlen %s" % re.search(r"(?P<xlen>[0-9]+)", isa).group("xlen")) + \
(" --iterations %i" % iterations) + \
(" --out %s/asm_tests" % output_dir) + \
(" --end_signature_addr %s" % end_signature_addr)
if lsf_cmd:
cmd_list.append(cmd)
else:
run_cmd(cmd, timeout_s, debug_cmd = debug_cmd)
def do_simulate(sim_cmd, test_list, cwd, sim_opts, seed_gen, csr_file,
isa, end_signature_addr, lsf_cmd, timeout_s, log_suffix,
batch_size, output_dir, verbose, check_return_code, debug_cmd):
"""Run the instruction generator
Args:
sim_cmd : Simulate command for the generator
test_list : List of assembly programs to be compiled
cwd : Filesystem path to RISCV-DV repo
sim_opts : Simulation options for the generator
seed_gen : A SeedGen seed generator
csr_file : YAML file containing description of all CSRs
isa : Processor supported ISA subset
end_signature_addr : Address that tests will write pass/fail signature to at end of test
lsf_cmd : LSF command used to run the instruction generator
timeout_s : Timeout limit in seconds
log_suffix : Simulation log file name suffix
batch_size : Number of tests to generate per run
output_dir : Output directory of the ELF files
check_return_code : Check return code of the command
debug_cmd : Produce the debug cmd log without running
"""
cmd_list = []
sim_cmd = re.sub("<out>", os.path.abspath(output_dir), sim_cmd)
sim_cmd = re.sub("<cwd>", cwd, sim_cmd)
sim_cmd = re.sub("<sim_opts>", sim_opts, sim_cmd)
logging.info("Running RISC-V instruction generator")
sim_seed = {}
for test in test_list:
iterations = test['iterations']
logging.info("Generating %d %s" % (iterations, test['test']))
if iterations > 0:
# Running a CSR test
if test['test'] == 'riscv_csr_test':
run_csr_test(cmd_list, cwd, csr_file, isa, iterations, lsf_cmd,
end_signature_addr, timeout_s, output_dir, debug_cmd)
else:
batch_cnt = 1
if batch_size > 0:
batch_cnt = int((iterations + batch_size - 1) / batch_size);
logging.info("Running %s with %0d batches" % (test['test'], batch_cnt))
for i in range(0, batch_cnt):
test_id = '%0s_%0d' % (test['test'], i)
rand_seed = seed_gen.get(test_id, i * batch_cnt)
if i < batch_cnt - 1:
test_cnt = batch_size
else:
test_cnt = iterations - i * batch_size;
cmd = lsf_cmd + " " + sim_cmd.rstrip() + \
(" +UVM_TESTNAME=%s " % test['gen_test']) + \
(" +num_of_tests=%i " % test_cnt) + \
(" +start_idx=%d " % (i*batch_size)) + \
(" +asm_file_name=%s/asm_tests/%s " % (output_dir, test['test'])) + \
(" -l %s/sim_%s_%d%s.log " % (output_dir, test['test'], i, log_suffix))
if verbose:
cmd += "+UVM_VERBOSITY=UVM_HIGH "
cmd = re.sub("<seed>", str(rand_seed), cmd)
cmd = re.sub("<test_id>", test_id, cmd)
sim_seed[test_id] = str(rand_seed)
if "gen_opts" in test:
cmd += test['gen_opts']
if not re.search("c", isa):
cmd += "+disable_compressed_instr=1 ";
if lsf_cmd:
cmd_list.append(cmd)
else:
logging.info("Running %s, batch %0d/%0d, test_cnt:%0d" %
(test['test'], i+1, batch_cnt, test_cnt))
run_cmd(cmd, timeout_s, check_return_code = check_return_code, debug_cmd = debug_cmd)
if sim_seed:
with open(('%s/seed.yaml' % os.path.abspath(output_dir)) , 'w') as outfile:
yaml.dump(sim_seed, outfile, default_flow_style=False)
if lsf_cmd:
run_parallel_cmd(cmd_list, timeout_s, check_return_code = check_return_code,
debug_cmd = debug_cmd)
def gen(test_list, argv, output_dir, cwd):
"""Run the instruction generator
Args:
test_list : List of assembly programs to be compiled
argv : Configuration arguments
output_dir : Output directory of the ELF files
cwd : Filesystem path to RISCV-DV repo
"""
check_return_code = True
if argv.simulator == "ius":
# Incisive return non-zero return code even test passes
check_return_code = False
logging.debug("Disable return_code checking for %s" % argv.simulator)
# Mutually exclusive options between compile_only and sim_only
if argv.co and argv.so:
logging.error("argument -co is not allowed with argument -so")
return
if ((argv.co == 0) and (len(test_list) == 0)):
return
# Setup the compile and simulation command for the generator
compile_cmd = []
sim_cmd = ""
compile_cmd, sim_cmd = get_generator_cmd(argv.simulator, argv.simulator_yaml, argv.cov,
argv.exp, argv.debug);
# Compile the instruction generator
if not argv.so:
do_compile(compile_cmd, test_list, argv.core_setting_dir, cwd, argv.user_extension_dir,
argv.cmp_opts, output_dir, argv.debug, argv.lsf_cmd)
# Run the instruction generator
if not argv.co:
seed_gen = SeedGen(argv.start_seed, argv.seed, argv.seed_yaml)
do_simulate(sim_cmd, test_list, cwd, argv.sim_opts, seed_gen, argv.csr_yaml,
argv.isa, argv.end_signature_addr, argv.lsf_cmd, argv.gen_timeout, argv.log_suffix,
argv.batch_size, output_dir, argv.verbose, check_return_code, argv.debug)
def gcc_compile(test_list, output_dir, isa, mabi, opts, gcc_user_extension_path, linker_path, debug_cmd):
"""Use riscv gcc toolchain to compile the assembly program
Args:
test_list : List of assembly programs to be compiled
output_dir : Output directory of the ELF files
isa : ISA variant passed to GCC
mabi : MABI variant passed to GCC
gcc_user_extension_path : Path to user extension directory containing defines files for tests etc
linker_path : Path to linker file to be used in compilation
debug_cmd : Produce the debug cmd log without running
"""
cwd = os.path.dirname(os.path.realpath(__file__))
for test in test_list:
for i in range(0, test['iterations']):
if 'no_gcc' in test and test['no_gcc'] == 1:
continue
prefix = ("%s/asm_tests/%s_%d" % (output_dir, test['test'], i))
asm = prefix + ".S"
elf = prefix + ".o"
exe = prefix + ".exe"
binary = prefix + ".bin"
dump = prefix + ".dump"
program_hex = ("%s/asm_tests/program_%s_%d.hex" % (output_dir, test['test'], i))
test_isa = isa
if not os.path.isfile(asm) and not debug_cmd:
logging.error("Cannot find assembly test: %s\n", asm)
sys.exit(RET_FAIL)
test_name = test.get('test')
cust_linker_path = "riscv_dv_extension/linker_scripts/%s.ld" % test_name
if os.path.exists(cust_linker_path):
linker_path = cust_linker_path
cmd = ("%s -static -mcmodel=medany -fvisibility=hidden -nostdlib -nostartfiles %s \
-I%s -T%s %s -o %s" % \
(get_env_var("RISCV_GCC", debug_cmd = debug_cmd), asm, gcc_user_extension_path, linker_path, opts, elf))
if 'gcc_opts' in test:
cmd += test['gcc_opts']
if 'gen_opts' in test:
# Disable compressed instruction
if re.search('disable_compressed_instr', test['gen_opts']):
test_isa = re.sub("c", "", test_isa)
# If march/mabi is not defined in the test gcc_opts, use the default
# setting from the command line.
if not re.search('march', cmd):
cmd += (" -march=%s" % test_isa)
if not re.search('mabi', cmd):
cmd += (" -mabi=%s" % mabi)
logging.info("Compiling %s" % asm)
run_cmd_output(cmd.split(), debug_cmd = debug_cmd)
# Convert the ELF to plain binary and hex
# Generating Binary
cmd = ("%s -O binary %s %s" % (get_env_var("RISCV_OBJCOPY", debug_cmd = debug_cmd), elf, binary))
run_cmd_output(cmd.split(), debug_cmd = debug_cmd)
# Generating .exe
if os.path.exists(cust_linker_path):
linker_path = cust_linker_path
cmd = ("%s -m elf32lriscv --discard-none -T%s -o %s %s" % (get_env_var("RISCV_LD", debug_cmd = debug_cmd), linker_path, exe, elf))
run_cmd_output(cmd.split(), debug_cmd = debug_cmd)
# Generating disassembly dump
cmd = ('%s --disassemble-all --disassemble-zeroes -M no-aliases --section=.text --section=.data %s > %s' % (get_env_var("RISCV_OBJDUMP", debug_cmd = debug_cmd), elf, dump))
os.system(cmd)
# Generating Program.hex
cmd = ('%s -O verilog %s %s' % (get_env_var("RISCV_OBJCOPY", debug_cmd = debug_cmd), exe, program_hex))
os.system(cmd)
def run_assembly(asm_test, iss_yaml, isa, mabi, gcc_opts, iss_opts, output_dir,
gcc_user_extension_path, linker_path, setting_dir, debug_cmd):
"""Run a directed assembly test with ISS
Args:
asm_test : Assembly test file
iss_yaml : ISS configuration file in YAML format
isa : ISA variant passed to the ISS
mabi : MABI variant passed to GCC
gcc_opts : User-defined options for GCC compilation
iss_opts : Instruction set simulators
output_dir : Output directory of compiled test files
gcc_user_extension_path : Path to user extension directory containing defines files for tests etc
linker_path : Path to linker file to be used in compilation
setting_dir : Generator setting directory
debug_cmd : Produce the debug cmd log without running
"""
if not (asm_test.endswith(".S") or asm_test.endswith(".s")):
logging.error("%s is not an assembly .S or .s file" % asm_test)
return
cwd = os.path.dirname(os.path.realpath(__file__))
asm_test = os.path.expanduser(asm_test)
# TODO (Haroon): Enable it after setting up spike simulation for directed asm tests
# report = ("%s/iss_regr.log" % output_dir).rstrip()
asm = re.sub(r"^.*\/", "", asm_test)
name_asm = asm.split(".")
test = name_asm[0]
if asm_test.endswith(".s"):
asm = re.sub(r"\.s$", "", asm)
else:
asm = re.sub(r"\.S$", "", asm)
prefix = ("%s/directed_asm_tests/%s" % (output_dir, asm))
elf = prefix + ".o"
cpp_s = prefix + ".cpp.s"
exe = prefix + ".exe"
binary = prefix + ".bin"
dump = prefix + ".dump"
program_hex = ("%s/directed_asm_tests/program_%s.hex" % (output_dir, asm))
# TODO (Haroon): Enable it after setting up spike simulation for directed asm tests
# iss_list = iss_opts.split(",")
run_cmd("mkdir -p %s/directed_asm_tests" % output_dir)
logging.info("Compiling assembly test : %s" % asm_test)
cust_linker_path = Path("directed_tests/asm/%s.ld" % test)
# gcc compilation
#logging.info("Generating elf")
if asm_test.endswith(".S"):
if os.path.exists(cust_linker_path):
linker_path = cust_linker_path
cmd = ("%s -static -mcmodel=medany -fvisibility=hidden -nostdlib -nostartfiles %s -I%s -T%s %s -o %s " % (get_env_var("RISCV_GCC", debug_cmd = debug_cmd), asm_test, gcc_user_extension_path, linker_path, gcc_opts, elf))
cmd += (" -march=%s" % isa)
cmd += (" -mabi=%s" % mabi)
run_cmd_output(cmd.split(), debug_cmd = debug_cmd)
elif asm_test.endswith(".s"):
#TODO (Najeeb do linked elf gen for spike)
cmd = ("%s/bin/riscv64-unknown-elf-cpp -I%s %s > %s" % (get_env_var("RISCV_TOOLCHAIN", debug_cmd = debug_cmd), gcc_user_extension_path, asm_test, cpp_s))
os.system(cmd)
cmd = ("%s/bin/riscv64-unknown-elf-as -march=%s %s -o %s" % (get_env_var("RISCV_TOOLCHAIN", debug_cmd = debug_cmd), isa, cpp_s, elf))
os.system(cmd)
# Convert the ELF to plain binary and hex
# Generating Binary
cmd = ("%s -O binary %s %s" % (get_env_var("RISCV_OBJCOPY", debug_cmd = debug_cmd), elf, binary))
run_cmd_output(cmd.split(), debug_cmd = debug_cmd)
# Generating .exe
if os.path.exists(cust_linker_path):
linker_path = cust_linker_path
cmd = ("%s -m elf32lriscv --discard-none -T%s -o %s %s" % (get_env_var("RISCV_LD", debug_cmd = debug_cmd), linker_path, exe, elf))
run_cmd_output(cmd.split(), debug_cmd = debug_cmd)
# Generating disassembly dump
cmd = ('%s --disassemble-all --disassemble-zeroes -M no-aliases --section=.text --section=.data %s > %s' % (get_env_var("RISCV_OBJDUMP", debug_cmd = debug_cmd), elf, dump))
os.system(cmd)
# Generating Program.hex
cmd = ('%s -O verilog %s %s' % (get_env_var("RISCV_OBJCOPY", debug_cmd = debug_cmd), exe, program_hex))
os.system(cmd)
# TODO (Haroon): Setup spike simulation for directed asm tests
'''
log_list = []
# ISS simulation
for iss in iss_list:
run_cmd("mkdir -p %s/%s_sim" % (output_dir, iss))
log = ("%s/%s_sim/%s.log" % (output_dir, iss, asm))
log_list.append(log)
base_cmd = parse_iss_yaml(iss, iss_yaml, isa, setting_dir, debug_cmd)
logging.info("[%0s] Running ISS simulation: %s" % (iss, elf))
cmd = get_iss_cmd(base_cmd, elf, log)
run_cmd(cmd, 10, debug_cmd = debug_cmd)
logging.info("[%0s] Running ISS simulation: %s ...done" % (iss, elf))
if len(iss_list) == 2:
compare_iss_log(iss_list, log_list, report)'''
def run_assembly_from_dir(asm_test_dir, iss_yaml, isa, mabi, gcc_opts, iss,
output_dir, gcc_user_extension_path, linker_path, setting_dir, debug_cmd):
"""Run a directed assembly test from a directory with spike
Args:
asm_test_dir : Assembly test file directory
iss_yaml : ISS configuration file in YAML format
isa : ISA variant passed to the ISS
mabi : MABI variant passed to GCC
gcc_opts : User-defined options for GCC compilation
iss : Instruction set simulators
output_dir : Output directory of compiled test files
gcc_user_extension_path : Path to user extension directory containing defines files for tests etc
linker_path : Path to linker file to be used in compilation
setting_dir : Generator setting directory
debug_cmd : Produce the debug cmd log without running
"""
# TODO (Haroon): Make it work for both assembly extensions (.s and .S)
result = run_cmd("find %s -name \"*.S\" -o -name \"*.s\"" % asm_test_dir)
test_name = test_entry.get('test')
cust_linker_path = Path("directed_tests/asm/%s.ld" % test_name)
if result:
asm_list = result.splitlines()
logging.info("Found %0d assembly tests under %s" %
(len(asm_list), asm_test_dir))
for asm_file in asm_list:
if os.path.exists(cust_linker_path):
linker_path = cust_linker_path
run_assembly(asm_file, iss_yaml, isa, mabi, gcc_opts, iss, output_dir,
gcc_user_extension_path, linker_path, setting_dir, debug_cmd)
# TODO (Haroon): Enable it after setting up spike simulation for directed asm tests
# if "," in iss:
# report = ("%s/iss_regr.log" % output_dir).rstrip()
# save_regr_report(report)
else:
logging.error("No assembly test(*.S or *.s) found under %s" % asm_test_dir)
def run_c(c_test, iss_yaml, isa, mabi, gcc_opts, iss_opts, output_dir,
gcc_user_extension_path, linker_path, setting_dir, debug_cmd):
"""Run a directed c test with ISS
Args:
c_test : C test file
iss_yaml : ISS configuration file in YAML format
isa : ISA variant passed to the ISS
mabi : MABI variant passed to GCC
gcc_opts : User-defined options for GCC compilation
iss_opts : Instruction set simulators
output_dir : Output directory of compiled test files
gcc_user_extension_path : Path to user extension directory containing defines files for tests etc
linker_path : Path to linker file to be used in compilation
setting_dir : Generator setting directory
debug_cmd : Produce the debug cmd log without running
"""
if not c_test.endswith(".c"):
logging.error("%s is not a .c file" % c_test)
return
cwd = os.path.dirname(os.path.realpath(__file__))
c_test = os.path.expanduser(c_test)
# TODO (Haroon): Enable it after setting up spike simulation for directed c tests
# report = ("%s/iss_regr.log" % output_dir).rstrip()
c = re.sub(r"^.*\/", "", c_test)
c = re.sub(r"\.c$", "", c)
name_c = c.split(".")
test = name_c[0]
prefix = ("%s/directed_c_tests/%s" % (output_dir, c))
elf = prefix + ".o"
binary = prefix + ".bin"
exe = prefix + ".exe"
dump = prefix + ".dump"
program_hex = ("%s/directed_c_tests/program_%s.hex" % (output_dir, c))
cust_linker_path = Path("directed_tests/c/%s.ld" % test)
# TODO (Haroon): Enable it after setting up spike simulation for directed c tests
# iss_list = iss_opts.split(",")
run_cmd("mkdir -p %s/directed_c_tests" % output_dir)
logging.info("Compiling c test : %s" % c_test)
# gcc compilation
#logging.info("Generating elf")
#TODO(Najeeb do linked elf generation)
if os.path.exists(cust_linker_path):
linker_path = cust_linker_path
cmd = ("%s -I%s -T%s -nostdlib -nostartfiles %s -march=%s -mabi=%s %s -o %s " % \
(get_env_var("RISCV_GCC", debug_cmd = debug_cmd), gcc_user_extension_path, linker_path, gcc_opts, isa, mabi, c_test, elf))
run_cmd_output(cmd.split(), debug_cmd = debug_cmd)
# Generating Binary
cmd = ("%s -O binary %s %s" % (get_env_var("RISCV_OBJCOPY", debug_cmd = debug_cmd), elf, binary))
run_cmd_output(cmd.split(), debug_cmd = debug_cmd)
# Generating .exe
if os.path.exists(cust_linker_path):
linker_path = cust_linker_path
cmd = ("%s -m elf32lriscv --discard-none -T%s -o %s %s" % (get_env_var("RISCV_LD", debug_cmd = debug_cmd), linker_path, exe, elf))
run_cmd_output(cmd.split(), debug_cmd = debug_cmd)
# Generating disassembly dump
cmd = ('%s --disassemble-all --disassemble-zeroes -M no-aliases --section=.text --section=.data %s > %s' % (get_env_var("RISCV_OBJDUMP", debug_cmd = debug_cmd), elf, dump))
os.system(cmd)
# Generating Program.hex
cmd = ('%s -O verilog %s %s' % (get_env_var("RISCV_OBJCOPY", debug_cmd = debug_cmd), exe, program_hex))
os.system(cmd)
# TODO (Haroon): Setup spike simulation for directed c tests
'''
log_list = []
# ISS simulation
for iss in iss_list:
run_cmd("mkdir -p %s/%s_sim" % (output_dir, iss))
log = ("%s/%s_sim/%s.log" % (output_dir, iss, c))
log_list.append(log)
base_cmd = parse_iss_yaml(iss, iss_yaml, isa, setting_dir, debug_cmd)
logging.info("[%0s] Running ISS simulation: %s" % (iss, elf))
cmd = get_iss_cmd(base_cmd, elf, log)
run_cmd(cmd, 10, debug_cmd = debug_cmd)
logging.info("[%0s] Running ISS simulation: %s ...done" % (iss, elf))
if len(iss_list) == 2:
compare_iss_log(iss_list, log_list, report)'''
def run_c_from_dir(c_test_dir, iss_yaml, isa, mabi, gcc_opts, iss,
output_dir, gcc_user_extension_path, linker_path, setting_dir, debug_cmd):
"""Run a directed c test from a directory with spike
Args:
c_test_dir : C test file directory
iss_yaml : ISS configuration file in YAML format
isa : ISA variant passed to the ISS
mabi : MABI variant passed to GCC
gcc_opts : User-defined options for GCC compilation
iss : Instruction set simulators
output_dir : Output directory of compiled test files
gcc_user_extension_path : Path to user extension directory containing defines files for tests etc
linker_path : Path to linker file to be used in compilation
setting_dir : Generator setting directory
debug_cmd : Produce the debug cmd log without running
"""
result = run_cmd("find %s -name \"*.c\"" % c_test_dir)
if result:
c_list = result.splitlines()
logging.info("Found %0d c tests under %s" %
(len(c_list), c_test_dir))
for c_file in c_list:
run_c(c_file, iss_yaml, isa, mabi, gcc_opts, iss, output_dir,
setting_dir, debug_cmd)
# TODO (Haroon): Enable it after setting up spike simulation for directed c tests
# TODO (Amna): Arguments in the run_c function call are not proper, if needed update accordingly
if "," in iss:
report = ("%s/iss_regr.log" % output_dir).rstrip()
save_regr_report(report)
else:
logging.error("No c test(*.c) found under %s" % c_test_dir)
def iss_sim(test_list, output_dir, iss_list, iss_yaml, iss_opts,
isa, setting_dir, timeout_s, debug_cmd):
"""Run ISS simulation with the generated test program
Args:
test_list : List of assembly programs to be compiled
output_dir : Output directory of the ELF files
iss_list : List of instruction set simulators
iss_yaml : ISS configuration file in YAML format
iss_opts : ISS command line options
isa : ISA variant passed to the ISS
setting_dir : Generator setting directory
timeout_s : Timeout limit in seconds
debug_cmd : Produce the debug cmd log without running
"""
for iss in iss_list.split(","):
log_dir = ("%s/%s_sim" % (output_dir, iss))
base_cmd = parse_iss_yaml(iss, iss_yaml, isa, setting_dir, debug_cmd)
logging.info("%s sim log dir: %s" % (iss, log_dir))
run_cmd_output(["mkdir", "-p", log_dir])
for test in test_list:
if 'no_iss' in test and test['no_iss'] == 1:
continue
else:
for i in range(0, test['iterations']):
if "asm_tests" in test.keys():
prefix = ("%s/directed_asm_tests/%s" % (output_dir, test['test']))
elif "c_tests" in test.keys():
prefix = ("%s/directed_c_tests/%s" % (output_dir, test['test']))
else:
prefix = ("%s/asm_tests/%s_%d" % (output_dir, test['test'], i) )
# prefix = ("%s/asm_tests/%s_%d" % (output_dir, test['test'], i))
elf = prefix + ".o"
log = ("%s/%s.%d.log" % (log_dir, test['test'], i))
cmd = get_iss_cmd(base_cmd, elf, log)
if 'iss_opts' in test:
cmd += ' '
cmd += test['iss_opts']
logging.info("Running %s sim: %s" % (iss, elf))
if iss == "ovpsim":
run_cmd(cmd, timeout_s, debug_cmd = debug_cmd)
else:
run_cmd(cmd, timeout_s, debug_cmd = debug_cmd)
logging.debug(cmd)
def iss_cmp(test_list, iss, output_dir, stop_on_first_error, exp, debug_cmd):
"""Compare ISS simulation reult
Args:
test_list : List of assembly programs to be compiled
iss : List of instruction set simulators
output_dir : Output directory of the ELF files
stop_on_first_error : will end run on first error detected
exp : Use experimental version
debug_cmd : Produce the debug cmd log without running
"""
if debug_cmd:
return
iss_list = iss.split(",")
if len(iss_list) != 2:
return
report = ("%s/iss_regr.log" % output_dir).rstrip()
run_cmd("rm -rf %s" % report)
for test in test_list:
for i in range(0, test['iterations']):
elf = ("%s/asm_tests/%s_%d.o" % (output_dir, test['test'], i))
logging.info("Comparing ISS sim result %s/%s : %s" %
(iss_list[0], iss_list[1], elf))
log_list = []
run_cmd(("echo 'Test binary: %s' >> %s" % (elf, report)))
for iss in iss_list:
log_list.append("%s/%s_sim/%s.%d.log" % (output_dir, iss, test['test'], i))
compare_iss_log(iss_list, log_list, report, stop_on_first_error, exp)
save_regr_report(report)
def compare_iss_log(iss_list, log_list, report, stop_on_first_error=0, exp=False):
if (len(iss_list) != 2 or len(log_list) != 2) :
logging.error("Only support comparing two ISS logs")
else:
csv_list = []
for i in range(2):
log = log_list[i]
csv = log.replace(".log", ".csv");
iss = iss_list[i]
csv_list.append(csv)
if iss == "spike":
process_spike_sim_log(log, csv)
elif iss == "ovpsim":
process_ovpsim_sim_log(log, csv, stop_on_first_error)
elif iss == "sail":
process_sail_sim_log(log, csv)
elif iss == "whisper":
process_whisper_sim_log(log, csv)
else:
logging.error("Unsupported ISS" % iss)
sys.exit(RET_FAIL)
result = compare_trace_csv(csv_list[0], csv_list[1], iss_list[0], iss_list[1], report)
logging.info(result)
def save_regr_report(report):
passed_cnt = run_cmd("grep PASSED %s | wc -l" % report).strip()
failed_cnt = run_cmd("grep FAILED %s | wc -l" % report).strip()
summary = ("%s PASSED, %s FAILED" % (passed_cnt, failed_cnt))
logging.info(summary)
run_cmd(("echo %s >> %s" % (summary, report)))
logging.info("ISS regression report is saved to %s" % report)
def read_seed(arg):
'''Read --seed or --seed_start'''
try:
seed = int(arg)
if seed < 0:
raise ValueError('bad seed')
return seed
except ValueError:
raise argparse.ArgumentTypeError('Bad seed ({}): '
'must be a non-negative integer.'
.format(arg))
def parse_args(cwd):
"""Create a command line parser.
Returns: The created parser.
"""
# Parse input arguments
parser = argparse.ArgumentParser()
parser.add_argument("--target", type=str, default="rv32imc",
help="Run the generator with pre-defined targets: \
rv32imc, rv32i, rv64imc, rv64gc")
parser.add_argument("-o", "--output", type=str,
help="Output directory name", dest="o")
parser.add_argument("-tl", "--testlist", type=str, default="",
help="Regression testlist", dest="testlist")
parser.add_argument("-tn", "--test", type=str, default="all",
help="Test name, 'all' means all tests in the list", dest="test")
parser.add_argument("-i", "--iterations", type=int, default=0,
help="Override the iteration count in the test list", dest="iterations")
parser.add_argument("-si", "--simulator", type=str, default="vcs",
help="Simulator used to run the generator, default VCS", dest="simulator")
parser.add_argument("--iss", type=str, default="spike",
help="RISC-V instruction set simulator: spike,ovpsim,sail")
parser.add_argument("-v", "--verbose", dest="verbose", action="store_true", default=False,
help="Verbose logging")
parser.add_argument("--co", dest="co", action="store_true", default=False,
help="Compile the generator only")
parser.add_argument("--cov", dest="cov", action="store_true", default=False,
help="Enable functional coverage")
parser.add_argument("--so", dest="so", action="store_true", default=False,
help="Simulate the generator only")
parser.add_argument("--cmp_opts", type=str, default="",
help="Compile options for the generator")
parser.add_argument("--sim_opts", type=str, default="",
help="Simulation options for the generator")
parser.add_argument("--gcc_opts", type=str, default="",
help="GCC compile options")
parser.add_argument("-s", "--steps", type=str, default="all",
help="Run steps: gen,gcc_compile,iss_sim,iss_cmp", dest="steps")
parser.add_argument("--lsf_cmd", type=str, default="",
help="LSF command. Run in local sequentially if lsf \
command is not specified")
parser.add_argument("--isa", type=str, default="",
help="RISC-V ISA subset")
parser.add_argument("-m", "--mabi", type=str, default="",
help="mabi used for compilation", dest="mabi")
parser.add_argument("--gen_timeout", type=int, default=360,
help="Generator timeout limit in seconds")
parser.add_argument("--end_signature_addr", type=str, default="0",
help="Address that privileged CSR test writes to at EOT")
parser.add_argument("--iss_opts", type=str, default="",
help="Any ISS command line arguments")
parser.add_argument("--iss_timeout", type=int, default=10,
help="ISS sim timeout limit in seconds")
parser.add_argument("--iss_yaml", type=str, default="",
help="ISS setting YAML")
parser.add_argument("--simulator_yaml", type=str, default="",
help="RTL simulator setting YAML")
parser.add_argument("--csr_yaml", type=str, default="",
help="CSR description file")
parser.add_argument("-ct", "--custom_target", type=str, default="",
help="Directory name of the custom target")
parser.add_argument("-cs", "--core_setting_dir", type=str, default="",
help="Path for the riscv_core_setting.sv")
parser.add_argument("-ext", "--user_extension_dir", type=str, default="",
help="Path for the user extension directory")
parser.add_argument("-gcc_ext", "--gcc_user_extension_path", type=str, default="",
help="Path for the custom user extension directory")
parser.add_argument("-lp", "--linker_path", type=str, default="",
help="Linker file Path")
parser.add_argument("--asm_tests", type=str, default="",
help="Directed assembly tests")
parser.add_argument("--c_tests", type=str, default="",
help="Directed c tests")
parser.add_argument("--log_suffix", type=str, default="",
help="Simulation log name suffix")
parser.add_argument("--exp", action="store_true", default=False,
help="Run generator with experimental features")
parser.add_argument("-bz", "--batch_size", type=int, default=0,
help="Number of tests to generate per run. You can split a big"
" job to small batches with this option")
parser.add_argument("--stop_on_first_error", dest="stop_on_first_error",
action="store_true", default=False,
help="Stop on detecting first error")
parser.add_argument("--noclean", action="store_true", default=True,
help="Do not clean the output of the previous runs")
parser.add_argument("--verilog_style_check", action="store_true", default=False,
help="Run verilog style check")
parser.add_argument("-d", "--debug", type=str, default="",
help="Generate debug command log file")
rsg = parser.add_argument_group('Random seeds',
'To control random seeds, use at most one '
'of the --start_seed, --seed or --seed_yaml '
'arguments. Since the latter two only give '
'a single seed for each test, they imply '
'--iterations=1.')
rsg.add_argument("--start_seed", type=read_seed,
help=("Randomization seed to use for first iteration of "
"each test. Subsequent iterations use seeds "
"counting up from there. Cannot be used with "
"--seed or --seed_yaml."))
rsg.add_argument("--seed", type=read_seed,
help=("Randomization seed to use for each test. "
"Implies --iterations=1. Cannot be used with "
"--start_seed or --seed_yaml."))
rsg.add_argument("--seed_yaml", type=str,
help=("Rerun the generator with the seed specification "
"from a prior regression. Implies --iterations=1. "
"Cannot be used with --start_seed or --seed."))
args = parser.parse_args()
if args.seed is not None and args.start_seed is not None:
logging.error('--start_seed and --seed are mutually exclusive.')
sys.exit(RET_FAIL)
if args.seed is not None:
if args.iterations == 0:
args.iterations = 1
elif args.iterations > 1:
logging.error('--seed is incompatible with setting --iterations '
'greater than 1.')
sys.exit(RET_FAIL)
# We've parsed all the arguments from the command line; default values
# can be set in the config file. Read that here.
load_config(args, cwd)
return args
def load_config(args, cwd):
"""
Load configuration from the command line and the configuration file.
Args:
args: Parsed command-line configuration
Returns:
Loaded configuration dictionary.
"""
if not args.linker_path:
args.linker_path = cwd + '/scripts/link.ld'
if not args.gcc_user_extension_path:
args.gcc_user_extension_path = cwd + '/user_extension/'
if args.debug:
args.debug = open(args.debug, "w")
if not args.csr_yaml:
args.csr_yaml = cwd + "/yaml/csr_template.yaml"
if not args.iss_yaml:
args.iss_yaml = cwd + "/yaml/iss.yaml"
if not args.simulator_yaml:
args.simulator_yaml = cwd + "/yaml/simulator.yaml"
# Keep the core_setting_dir option to be backward compatible, suggest to use
# --custom_target
if args.core_setting_dir:
if not args.custom_target:
args.custom_target = args.core_setting_dir
else:
args.core_setting_dir = args.custom_target
if not args.custom_target:
if not args.testlist:
args.testlist = cwd + "/target/"+ args.target +"/testlist.yaml"
args.core_setting_dir = cwd + "/target/"+ args.target
if args.target == "rv32imc":
args.mabi = "ilp32"
args.isa = "rv32imc"
elif args.target == "multi_harts":
args.mabi = "ilp32"
args.isa = "rv32gc"
elif args.target == "rv32imcb":
args.mabi = "ilp32"
args.isa = "rv32imcb"
elif args.target == "rv32i":
args.mabi = "ilp32"
args.isa = "rv32i"
elif args.target == "rv64imc":
args.mabi = "lp64"
args.isa = "rv64imc"
elif args.target == "rv64imcb":
args.mabi = "lp64"
args.isa = "rv64imcb"
elif args.target == "rv64gc":
args.mabi = "lp64"
args.isa = "rv64gc"
elif args.target == "rv64gcv":
args.mabi = "lp64"
args.isa = "rv64gcv"
elif args.target == "ml":
args.mabi = "lp64"
args.isa = "rv64imc"
else:
sys.exit("Unsupported pre-defined target: %0s" % args.target)
else:
if re.match(".*gcc_compile.*", args.steps) or re.match(".*iss_sim.*", args.steps):
if (not args.mabi) or (not args.isa):
sys.exit("mabi and isa must be specified for custom target %0s" % args.custom_target)
if not args.testlist:
args.testlist = args.custom_target + "/testlist.yaml"
def main():
"""This is the main entry point."""
try:
cwd = os.path.dirname(os.path.realpath(__file__))
os.environ["RISCV_DV_ROOT"] = cwd
args = parse_args(cwd)
setup_logging(args.verbose)
# Create output directory
output_dir = create_output(args.o, args.noclean)
if args.verilog_style_check:
logging.debug("Run style check")
style_err = run_cmd("verilog_style/run.sh")
if style_err: logging.info("Found style error: \nERROR: " + style_err)
# Run any handcoded/directed assembly tests specified by args.asm_tests
if args.asm_tests != "":
asm_test = args.asm_tests.split(',')
for path_asm_test in asm_test:
full_path = os.path.expanduser(path_asm_test)
# path_asm_test is a directory
if os.path.isdir(full_path):
run_assembly_from_dir(full_path, args.iss_yaml, args.isa, args.mabi,
args.gcc_opts, args.iss, output_dir, args.gcc_user_extension_path, args.linker_path,
args.core_setting_dir, args.debug)
# path_asm_test is an assembly file
elif os.path.isfile(full_path) or args.debug:
run_assembly(full_path, args.iss_yaml, args.isa, args.mabi, args.gcc_opts,
args.iss, output_dir, args.gcc_user_extension_path, args.linker_path, args.core_setting_dir, args.debug)
else:
logging.error('%s does not exist' % full_path)
sys.exit(RET_FAIL)
return
# Run any handcoded/directed c tests specified by args.c_tests
if args.c_tests != "":
c_test = args.c_tests.split(',')
for path_c_test in c_test:
full_path = os.path.expanduser(path_c_test)
# path_c_test is a directory
if os.path.isdir(full_path):
run_c_from_dir(full_path, args.iss_yaml, args.isa, args.mabi,
args.gcc_opts, args.iss, output_dir, args.gcc_user_extension_path, args.linker_path,
args.core_setting_dir, args.debug)
# path_c_test is a c file
elif os.path.isfile(full_path) or args.debug:
run_c(full_path, args.iss_yaml, args.isa, args.mabi, args.gcc_opts,
args.iss, output_dir, args.gcc_user_extension_path, args.linker_path, args.core_setting_dir, args.debug)
else:
logging.error('%s does not exist' % full_path)
sys.exit(RET_FAIL)
return
run_cmd_output(["mkdir", "-p", ("%s/asm_tests" % output_dir)])
# Process regression test list
matched_list = []
all_matched_list = []
# Any tests in the YAML test list that specify a directed assembly test
asm_directed_list = []
# Any tests in the YAML test list that specify a directed c test
c_directed_list = []
if not args.co:
process_regression_list(args.testlist, args.test, args.iterations, matched_list, cwd)
all_matched_list = matched_list.copy()
for t in list(matched_list):
# Check mutual exclusive between gen_test, asm_tests, and c_tests
if 'asm_tests' in t:
if 'gen_test' in t or 'c_tests' in t:
logging.error('asm_tests must not be defined in the testlist '
'together with the gen_test or c_tests field')
sys.exit(RET_FATAL)
asm_directed_list.append(t)
matched_list.remove(t)
if 'c_tests' in t:
if 'gen_test' in t or 'asm_tests' in t:
logging.error('c_tests must not be defined in the testlist '
'together with the gen_test or asm_tests field')
sys.exit(RET_FATAL)
c_directed_list.append(t)
matched_list.remove(t)
if len(matched_list) == 0 and len(asm_directed_list) == 0 and len(c_directed_list) == 0:
sys.exit("Cannot find %s in %s" % (args.test, args.testlist))
# Run instruction generator
if args.steps == "all" or re.match(".*gen.*", args.steps):
# Run any handcoded/directed assembly tests specified in YAML format
if len(asm_directed_list) != 0:
for test_entry in asm_directed_list:
gcc_opts = args.gcc_opts
gcc_opts += test_entry.get('gcc_opts', '')
path_asm_test = os.path.expanduser(test_entry.get('asm_tests'))
if path_asm_test:
# path_asm_test is a directory
if os.path.isdir(path_asm_test):
run_assembly_from_dir(path_asm_test, args.iss_yaml, args.isa, args.mabi,
gcc_opts, args.iss, output_dir, args.gcc_user_extension_path, args.linker_path,
args.core_setting_dir, args.debug)
# path_asm_test is an assembly file
elif os.path.isfile(path_asm_test):
run_assembly(path_asm_test, args.iss_yaml, args.isa, args.mabi, gcc_opts,
args.iss, output_dir, args.gcc_user_extension_path, args.linker_path, args.core_setting_dir, args.debug)
else:
if not args.debug:
logging.error('%s does not exist' % path_asm_test)
sys.exit(RET_FAIL)
# Run any handcoded/directed C tests specified in YAML format
if len(c_directed_list) != 0:
for test_entry in c_directed_list:
gcc_opts = args.gcc_opts
gcc_opts += test_entry.get('gcc_opts', '')
path_c_test = os.path.expanduser(test_entry.get('c_tests'))
if path_c_test:
# path_c_test is a directory
if os.path.isdir(path_c_test):
run_c_from_dir(path_c_test, args.iss_yaml, args.isa, args.mabi,
gcc_opts, args.iss, output_dir, args.gcc_user_extension_path, args.linker_path,
args.core_setting_dir, args.debug)
# path_c_test is a C file
elif os.path.isfile(path_c_test):
run_c(path_c_test, args.iss_yaml, args.isa, args.mabi, gcc_opts,
args.iss, output_dir, args.gcc_user_extension_path, args.linker_path, args.core_setting_dir, args.debug)
else:
if not args.debug:
logging.error('%s does not exist' % path_c_test)
sys.exit(RET_FAIL)
# Run remaining tests using the instruction generator
gen(matched_list, args, output_dir, cwd)
if not args.co:
# Compile the assembly program to ELF, convert to plain binary
if args.steps == "all" or re.match(".*gcc_compile.*", args.steps):
gcc_compile(matched_list, output_dir, args.isa, args.mabi,
args.gcc_opts, args.gcc_user_extension_path, args.linker_path, args.debug)
# Run ISS simulation
if args.steps == "all" or re.match(".*iss_sim.*", args.steps):
iss_sim(all_matched_list, output_dir, args.iss, args.iss_yaml, args.iss_opts,
args.isa, args.core_setting_dir, args.iss_timeout, args.debug)
# Compare ISS simulation result
if args.steps == "all" or re.match(".*iss_cmp.*", args.steps):
iss_cmp(matched_list, args.iss, output_dir, args.stop_on_first_error,
args.exp, args.debug)
sys.exit(RET_SUCCESS)
except KeyboardInterrupt:
logging.info("\nExited Ctrl-C from user request.")
sys.exit(130)
if __name__ == "__main__":
main()
|
[] |
[] |
[
"RISCV_DV_ROOT"
] |
[]
|
["RISCV_DV_ROOT"]
|
python
| 1 | 0 | |
internal/compute/output_command_test.go
|
package compute
import (
"context"
"os"
"regexp"
"testing"
"github.com/hexops/autogold"
"github.com/sourcegraph/sourcegraph/internal/api"
"github.com/sourcegraph/sourcegraph/internal/comby"
"github.com/sourcegraph/sourcegraph/internal/gitserver/gitdomain"
"github.com/sourcegraph/sourcegraph/internal/search/result"
"github.com/sourcegraph/sourcegraph/internal/vcs/git"
)
func Test_output(t *testing.T) {
test := func(input string, cmd *Output) string {
result, err := output(context.Background(), input, cmd.MatchPattern, cmd.OutputPattern, cmd.Separator)
if err != nil {
return err.Error()
}
return result.Value
}
autogold.Want(
"regexp search outputs only digits",
"(1)~(2)~(3)~").
Equal(t, test("a 1 b 2 c 3", &Output{
MatchPattern: &Regexp{Value: regexp.MustCompile(`(\d)`)},
OutputPattern: "($1)",
Separator: "~",
}))
// If we are not on CI skip the test if comby is not installed.
if os.Getenv("CI") == "" && !comby.Exists() {
t.Skip("comby is not installed on the PATH. Try running 'bash <(curl -sL get.comby.dev)'.")
}
autogold.Want(
"structural search output",
`train(regional, intercity)
train(commuter, lightrail)`).
Equal(t, test("Im a train. train(intercity, regional). choo choo. train(lightrail, commuter)", &Output{
MatchPattern: &Comby{Value: `train(:[x], :[y])`},
OutputPattern: "train(:[y], :[x])",
}))
}
func fileMatch(content string) result.Match {
git.Mocks.ReadFile = func(_ api.CommitID, _ string) ([]byte, error) {
return []byte(content), nil
}
return &result.FileMatch{
File: result.File{Path: "my/awesome/path"},
}
}
func commitMatch(content string) result.Match {
return &result.CommitMatch{
Commit: gitdomain.Commit{
Author: gitdomain.Signature{Name: "bob"},
Committer: &gitdomain.Signature{},
Message: gitdomain.Message(content),
},
}
}
func TestRun(t *testing.T) {
test := func(q string, m result.Match) string {
defer git.ResetMocks()
computeQuery, _ := Parse(q)
res, err := computeQuery.Command.Run(context.Background(), m)
if err != nil {
return err.Error()
}
return res.(*Text).Value
}
autogold.Want(
"template substitution regexp",
"(1)\n(2)\n(3)\n").
Equal(t, test(`content:output((\d) -> ($1))`, fileMatch("a 1 b 2 c 3")))
autogold.Want(
"template substitution regexp with commit author",
"bob: (1)\nbob: (2)\nbob: (3)\n").
Equal(t, test(`content:output((\d) -> $author: ($1))`, commitMatch("a 1 b 2 c 3")))
// If we are not on CI skip the test if comby is not installed.
if os.Getenv("CI") == "" && !comby.Exists() {
t.Skip("comby is not installed on the PATH. Try running 'bash <(curl -sL get.comby.dev)'.")
}
autogold.Want(
"template substitution structural",
">bar<").
Equal(t, test(`content:output.structural(foo(:[arg]) -> >:[arg]<)`, fileMatch("foo(bar)")))
}
|
[
"\"CI\"",
"\"CI\""
] |
[] |
[
"CI"
] |
[]
|
["CI"]
|
go
| 1 | 0 | |
Inspur/benchmarks/rnnt/implementations/pytorch/bind_launch_nf5688m6.py
|
import sys
import subprocess
import os
import socket
from argparse import ArgumentParser, REMAINDER
import torch
def parse_args():
"""
Helper function parsing the command line options
@retval ArgumentParser
"""
parser = ArgumentParser(description="PyTorch distributed training launch "
"helper utilty that will spawn up "
"multiple distributed processes")
# Optional arguments for the launch helper
parser.add_argument("--nnodes", type=int, default=1,
help="The number of nodes to use for distributed "
"training")
parser.add_argument("--node_rank", type=int, default=0,
help="The rank of the node for multi-node distributed "
"training")
parser.add_argument("--nproc_per_node", type=int, default=1,
help="The number of processes to launch on each node, "
"for GPU training, this is recommended to be set "
"to the number of GPUs in your system so that "
"each process can be bound to a single GPU.")
parser.add_argument("--master_addr", default="127.0.0.1", type=str,
help="Master node (rank 0)'s address, should be either "
"the IP address or the hostname of node 0, for "
"single node multi-proc training, the "
"--master_addr can simply be 127.0.0.1")
parser.add_argument("--master_port", default=29500, type=int,
help="Master node (rank 0)'s free port that needs to "
"be used for communciation during distributed "
"training")
parser.add_argument('--no_hyperthreads', action='store_true',
help='Flag to disable binding to hyperthreads')
parser.add_argument('--no_membind', action='store_true',
help='Flag to disable memory binding')
# non-optional arguments for binding
parser.add_argument("--nsockets_per_node", type=int, required=True,
help="Number of CPU sockets on a node")
parser.add_argument("--ncores_per_socket", type=int, required=True,
help="Number of CPU cores per socket")
# positional
parser.add_argument("training_script", type=str,
help="The full path to the single GPU training "
"program/script to be launched in parallel, "
"followed by all the arguments for the "
"training script")
# rest from the training program
parser.add_argument('training_script_args', nargs=REMAINDER)
return parser.parse_args()
def main():
args = parse_args()
# variables for numactrl binding
#NSOCKETS = args.nsockets_per_node
#NGPUS_PER_SOCKET = args.nproc_per_node // args.nsockets_per_node
#NCORES_PER_GPU = args.ncores_per_socket // NGPUS_PER_SOCKET
# world size in terms of number of processes
dist_world_size = args.nproc_per_node * args.nnodes
# set PyTorch distributed related environmental variables
current_env = os.environ.copy()
current_env["MASTER_ADDR"] = args.master_addr
current_env["MASTER_PORT"] = str(args.master_port)
current_env["WORLD_SIZE"] = str(dist_world_size)
processes = []
#custom binding for A100 GPUs on AMD ROME 2 socket systems
cpu_ranges = []
cpu_ranges.append([0,7,64,71]) #local_rank=0
cpu_ranges.append([8,15,72,79]) #local_rank=1
cpu_ranges.append([16,23,80,87]) #local_rank=2
cpu_ranges.append([24,31,88,95]) #local_rank=3
cpu_ranges.append([32,39,96,103]) #local_rank=4
cpu_ranges.append([40,47,104,111]) #local_rank=5
cpu_ranges.append([48,55,112,119]) #local_rank=6
cpu_ranges.append([56,63,120,127]) #local_rank=7
memnode = []
memnode.append(0) #local_rank=0
memnode.append(0) #local_rank=1
memnode.append(0) #local_rank=2
memnode.append(0) #local_rank=3
memnode.append(1) #local_rank=4
memnode.append(1) #local_rank=5
memnode.append(1) #local_rank=6
memnode.append(1) #local_rank=7
for local_rank in range(0, args.nproc_per_node):
# each process's rank
dist_rank = args.nproc_per_node * args.node_rank + local_rank
current_env["RANK"] = str(dist_rank)
# form numactrl binding command
#cpu_ranges = [local_rank * NCORES_PER_GPU,
# (local_rank + 1) * NCORES_PER_GPU - 1,
# local_rank * NCORES_PER_GPU + (NCORES_PER_GPU * NGPUS_PER_SOCKET * NSOCKETS),
# (local_rank + 1) * NCORES_PER_GPU + (NCORES_PER_GPU * NGPUS_PER_SOCKET * NSOCKETS) - 1]
numactlargs = []
if args.no_hyperthreads:
numactlargs += [ "--physcpubind={}-{}".format(*cpu_ranges[local_rank][0:2]) ]
else:
numactlargs += [ "--physcpubind={}-{},{}-{}".format(*cpu_ranges[local_rank]) ]
if not args.no_membind:
#memnode = local_rank // NGPUS_PER_SOCKET
numactlargs += [ "--membind={}".format(memnode[local_rank]) ]
# spawn the processes
cmd = [ "/usr/bin/numactl" ] \
+ numactlargs \
+ [ sys.executable,
"-u",
args.training_script,
"--local_rank={}".format(local_rank)
] \
+ args.training_script_args
process = subprocess.Popen(cmd, env=current_env)
processes.append(process)
#print(local_rank,cmd)
for process in processes:
process.wait()
if __name__ == "__main__":
main()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
tests/unit/gapic/aiplatform_v1/test_endpoint_service.py
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import packaging.version
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import future
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import operation_async # type: ignore
from google.api_core import operations_v1
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.aiplatform_v1.services.endpoint_service import (
EndpointServiceAsyncClient,
)
from google.cloud.aiplatform_v1.services.endpoint_service import EndpointServiceClient
from google.cloud.aiplatform_v1.services.endpoint_service import pagers
from google.cloud.aiplatform_v1.services.endpoint_service import transports
from google.cloud.aiplatform_v1.services.endpoint_service.transports.base import (
_GOOGLE_AUTH_VERSION,
)
from google.cloud.aiplatform_v1.types import accelerator_type
from google.cloud.aiplatform_v1.types import encryption_spec
from google.cloud.aiplatform_v1.types import endpoint
from google.cloud.aiplatform_v1.types import endpoint as gca_endpoint
from google.cloud.aiplatform_v1.types import endpoint_service
from google.cloud.aiplatform_v1.types import machine_resources
from google.cloud.aiplatform_v1.types import operation as gca_operation
from google.longrunning import operations_pb2
from google.oauth2 import service_account
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
import google.auth
# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively
# through google-api-core:
# - Delete the auth "less than" test cases
# - Delete these pytest markers (Make the "greater than or equal to" tests the default).
requires_google_auth_lt_1_25_0 = pytest.mark.skipif(
packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"),
reason="This test requires google-auth < 1.25.0",
)
requires_google_auth_gte_1_25_0 = pytest.mark.skipif(
packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"),
reason="This test requires google-auth >= 1.25.0",
)
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert EndpointServiceClient._get_default_mtls_endpoint(None) is None
assert (
EndpointServiceClient._get_default_mtls_endpoint(api_endpoint)
== api_mtls_endpoint
)
assert (
EndpointServiceClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
EndpointServiceClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
EndpointServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert (
EndpointServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
)
@pytest.mark.parametrize(
"client_class", [EndpointServiceClient, EndpointServiceAsyncClient,]
)
def test_endpoint_service_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "aiplatform.googleapis.com:443"
@pytest.mark.parametrize(
"transport_class,transport_name",
[
(transports.EndpointServiceGrpcTransport, "grpc"),
(transports.EndpointServiceGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_endpoint_service_client_service_account_always_use_jwt(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize(
"client_class", [EndpointServiceClient, EndpointServiceAsyncClient,]
)
def test_endpoint_service_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "aiplatform.googleapis.com:443"
def test_endpoint_service_client_get_transport_class():
transport = EndpointServiceClient.get_transport_class()
available_transports = [
transports.EndpointServiceGrpcTransport,
]
assert transport in available_transports
transport = EndpointServiceClient.get_transport_class("grpc")
assert transport == transports.EndpointServiceGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(EndpointServiceClient, transports.EndpointServiceGrpcTransport, "grpc"),
(
EndpointServiceAsyncClient,
transports.EndpointServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
@mock.patch.object(
EndpointServiceClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(EndpointServiceClient),
)
@mock.patch.object(
EndpointServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(EndpointServiceAsyncClient),
)
def test_endpoint_service_client_client_options(
client_class, transport_class, transport_name
):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(EndpointServiceClient, "get_transport_class") as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(EndpointServiceClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class()
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class()
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(
EndpointServiceClient,
transports.EndpointServiceGrpcTransport,
"grpc",
"true",
),
(
EndpointServiceAsyncClient,
transports.EndpointServiceGrpcAsyncIOTransport,
"grpc_asyncio",
"true",
),
(
EndpointServiceClient,
transports.EndpointServiceGrpcTransport,
"grpc",
"false",
),
(
EndpointServiceAsyncClient,
transports.EndpointServiceGrpcAsyncIOTransport,
"grpc_asyncio",
"false",
),
],
)
@mock.patch.object(
EndpointServiceClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(EndpointServiceClient),
)
@mock.patch.object(
EndpointServiceAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(EndpointServiceAsyncClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_endpoint_service_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(EndpointServiceClient, transports.EndpointServiceGrpcTransport, "grpc"),
(
EndpointServiceAsyncClient,
transports.EndpointServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_endpoint_service_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(EndpointServiceClient, transports.EndpointServiceGrpcTransport, "grpc"),
(
EndpointServiceAsyncClient,
transports.EndpointServiceGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_endpoint_service_client_client_options_credentials_file(
client_class, transport_class, transport_name
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_endpoint_service_client_client_options_from_dict():
with mock.patch(
"google.cloud.aiplatform_v1.services.endpoint_service.transports.EndpointServiceGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = EndpointServiceClient(
client_options={"api_endpoint": "squid.clam.whelk"}
)
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_create_endpoint(
transport: str = "grpc", request_type=endpoint_service.CreateEndpointRequest
):
client = EndpointServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_endpoint), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.create_endpoint(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == endpoint_service.CreateEndpointRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_create_endpoint_from_dict():
test_create_endpoint(request_type=dict)
def test_create_endpoint_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = EndpointServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_endpoint), "__call__") as call:
client.create_endpoint()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == endpoint_service.CreateEndpointRequest()
@pytest.mark.asyncio
async def test_create_endpoint_async(
transport: str = "grpc_asyncio", request_type=endpoint_service.CreateEndpointRequest
):
client = EndpointServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_endpoint), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.create_endpoint(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == endpoint_service.CreateEndpointRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_create_endpoint_async_from_dict():
await test_create_endpoint_async(request_type=dict)
def test_create_endpoint_field_headers():
client = EndpointServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = endpoint_service.CreateEndpointRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_endpoint), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.create_endpoint(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_endpoint_field_headers_async():
client = EndpointServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = endpoint_service.CreateEndpointRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_endpoint), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.create_endpoint(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_create_endpoint_flattened():
client = EndpointServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_endpoint), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_endpoint(
parent="parent_value", endpoint=gca_endpoint.Endpoint(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
assert args[0].endpoint == gca_endpoint.Endpoint(name="name_value")
def test_create_endpoint_flattened_error():
client = EndpointServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_endpoint(
endpoint_service.CreateEndpointRequest(),
parent="parent_value",
endpoint=gca_endpoint.Endpoint(name="name_value"),
)
@pytest.mark.asyncio
async def test_create_endpoint_flattened_async():
client = EndpointServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_endpoint), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_endpoint(
parent="parent_value", endpoint=gca_endpoint.Endpoint(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
assert args[0].endpoint == gca_endpoint.Endpoint(name="name_value")
@pytest.mark.asyncio
async def test_create_endpoint_flattened_error_async():
client = EndpointServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_endpoint(
endpoint_service.CreateEndpointRequest(),
parent="parent_value",
endpoint=gca_endpoint.Endpoint(name="name_value"),
)
def test_get_endpoint(
transport: str = "grpc", request_type=endpoint_service.GetEndpointRequest
):
client = EndpointServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_endpoint), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = endpoint.Endpoint(
name="name_value",
display_name="display_name_value",
description="description_value",
etag="etag_value",
)
response = client.get_endpoint(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == endpoint_service.GetEndpointRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, endpoint.Endpoint)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
assert response.etag == "etag_value"
def test_get_endpoint_from_dict():
test_get_endpoint(request_type=dict)
def test_get_endpoint_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = EndpointServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_endpoint), "__call__") as call:
client.get_endpoint()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == endpoint_service.GetEndpointRequest()
@pytest.mark.asyncio
async def test_get_endpoint_async(
transport: str = "grpc_asyncio", request_type=endpoint_service.GetEndpointRequest
):
client = EndpointServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_endpoint), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
endpoint.Endpoint(
name="name_value",
display_name="display_name_value",
description="description_value",
etag="etag_value",
)
)
response = await client.get_endpoint(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == endpoint_service.GetEndpointRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, endpoint.Endpoint)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
assert response.etag == "etag_value"
@pytest.mark.asyncio
async def test_get_endpoint_async_from_dict():
await test_get_endpoint_async(request_type=dict)
def test_get_endpoint_field_headers():
client = EndpointServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = endpoint_service.GetEndpointRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_endpoint), "__call__") as call:
call.return_value = endpoint.Endpoint()
client.get_endpoint(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_endpoint_field_headers_async():
client = EndpointServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = endpoint_service.GetEndpointRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_endpoint), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(endpoint.Endpoint())
await client.get_endpoint(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_endpoint_flattened():
client = EndpointServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_endpoint), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = endpoint.Endpoint()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_endpoint(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
def test_get_endpoint_flattened_error():
client = EndpointServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_endpoint(
endpoint_service.GetEndpointRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_endpoint_flattened_async():
client = EndpointServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_endpoint), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = endpoint.Endpoint()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(endpoint.Endpoint())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_endpoint(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
@pytest.mark.asyncio
async def test_get_endpoint_flattened_error_async():
client = EndpointServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_endpoint(
endpoint_service.GetEndpointRequest(), name="name_value",
)
def test_list_endpoints(
transport: str = "grpc", request_type=endpoint_service.ListEndpointsRequest
):
client = EndpointServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_endpoints), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = endpoint_service.ListEndpointsResponse(
next_page_token="next_page_token_value",
)
response = client.list_endpoints(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == endpoint_service.ListEndpointsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListEndpointsPager)
assert response.next_page_token == "next_page_token_value"
def test_list_endpoints_from_dict():
test_list_endpoints(request_type=dict)
def test_list_endpoints_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = EndpointServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_endpoints), "__call__") as call:
client.list_endpoints()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == endpoint_service.ListEndpointsRequest()
@pytest.mark.asyncio
async def test_list_endpoints_async(
transport: str = "grpc_asyncio", request_type=endpoint_service.ListEndpointsRequest
):
client = EndpointServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_endpoints), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
endpoint_service.ListEndpointsResponse(
next_page_token="next_page_token_value",
)
)
response = await client.list_endpoints(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == endpoint_service.ListEndpointsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListEndpointsAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_endpoints_async_from_dict():
await test_list_endpoints_async(request_type=dict)
def test_list_endpoints_field_headers():
client = EndpointServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = endpoint_service.ListEndpointsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_endpoints), "__call__") as call:
call.return_value = endpoint_service.ListEndpointsResponse()
client.list_endpoints(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_endpoints_field_headers_async():
client = EndpointServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = endpoint_service.ListEndpointsRequest()
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_endpoints), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
endpoint_service.ListEndpointsResponse()
)
await client.list_endpoints(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_endpoints_flattened():
client = EndpointServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_endpoints), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = endpoint_service.ListEndpointsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_endpoints(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
def test_list_endpoints_flattened_error():
client = EndpointServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_endpoints(
endpoint_service.ListEndpointsRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_endpoints_flattened_async():
client = EndpointServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_endpoints), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = endpoint_service.ListEndpointsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
endpoint_service.ListEndpointsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_endpoints(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].parent == "parent_value"
@pytest.mark.asyncio
async def test_list_endpoints_flattened_error_async():
client = EndpointServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_endpoints(
endpoint_service.ListEndpointsRequest(), parent="parent_value",
)
def test_list_endpoints_pager():
client = EndpointServiceClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_endpoints), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
endpoint_service.ListEndpointsResponse(
endpoints=[
endpoint.Endpoint(),
endpoint.Endpoint(),
endpoint.Endpoint(),
],
next_page_token="abc",
),
endpoint_service.ListEndpointsResponse(
endpoints=[], next_page_token="def",
),
endpoint_service.ListEndpointsResponse(
endpoints=[endpoint.Endpoint(),], next_page_token="ghi",
),
endpoint_service.ListEndpointsResponse(
endpoints=[endpoint.Endpoint(), endpoint.Endpoint(),],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_endpoints(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, endpoint.Endpoint) for i in results)
def test_list_endpoints_pages():
client = EndpointServiceClient(credentials=ga_credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_endpoints), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
endpoint_service.ListEndpointsResponse(
endpoints=[
endpoint.Endpoint(),
endpoint.Endpoint(),
endpoint.Endpoint(),
],
next_page_token="abc",
),
endpoint_service.ListEndpointsResponse(
endpoints=[], next_page_token="def",
),
endpoint_service.ListEndpointsResponse(
endpoints=[endpoint.Endpoint(),], next_page_token="ghi",
),
endpoint_service.ListEndpointsResponse(
endpoints=[endpoint.Endpoint(), endpoint.Endpoint(),],
),
RuntimeError,
)
pages = list(client.list_endpoints(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_endpoints_async_pager():
client = EndpointServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_endpoints), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
endpoint_service.ListEndpointsResponse(
endpoints=[
endpoint.Endpoint(),
endpoint.Endpoint(),
endpoint.Endpoint(),
],
next_page_token="abc",
),
endpoint_service.ListEndpointsResponse(
endpoints=[], next_page_token="def",
),
endpoint_service.ListEndpointsResponse(
endpoints=[endpoint.Endpoint(),], next_page_token="ghi",
),
endpoint_service.ListEndpointsResponse(
endpoints=[endpoint.Endpoint(), endpoint.Endpoint(),],
),
RuntimeError,
)
async_pager = await client.list_endpoints(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, endpoint.Endpoint) for i in responses)
@pytest.mark.asyncio
async def test_list_endpoints_async_pages():
client = EndpointServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_endpoints), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
endpoint_service.ListEndpointsResponse(
endpoints=[
endpoint.Endpoint(),
endpoint.Endpoint(),
endpoint.Endpoint(),
],
next_page_token="abc",
),
endpoint_service.ListEndpointsResponse(
endpoints=[], next_page_token="def",
),
endpoint_service.ListEndpointsResponse(
endpoints=[endpoint.Endpoint(),], next_page_token="ghi",
),
endpoint_service.ListEndpointsResponse(
endpoints=[endpoint.Endpoint(), endpoint.Endpoint(),],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_endpoints(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
def test_update_endpoint(
transport: str = "grpc", request_type=endpoint_service.UpdateEndpointRequest
):
client = EndpointServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_endpoint), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gca_endpoint.Endpoint(
name="name_value",
display_name="display_name_value",
description="description_value",
etag="etag_value",
)
response = client.update_endpoint(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == endpoint_service.UpdateEndpointRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gca_endpoint.Endpoint)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
assert response.etag == "etag_value"
def test_update_endpoint_from_dict():
test_update_endpoint(request_type=dict)
def test_update_endpoint_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = EndpointServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_endpoint), "__call__") as call:
client.update_endpoint()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == endpoint_service.UpdateEndpointRequest()
@pytest.mark.asyncio
async def test_update_endpoint_async(
transport: str = "grpc_asyncio", request_type=endpoint_service.UpdateEndpointRequest
):
client = EndpointServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_endpoint), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_endpoint.Endpoint(
name="name_value",
display_name="display_name_value",
description="description_value",
etag="etag_value",
)
)
response = await client.update_endpoint(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == endpoint_service.UpdateEndpointRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gca_endpoint.Endpoint)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
assert response.etag == "etag_value"
@pytest.mark.asyncio
async def test_update_endpoint_async_from_dict():
await test_update_endpoint_async(request_type=dict)
def test_update_endpoint_field_headers():
client = EndpointServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = endpoint_service.UpdateEndpointRequest()
request.endpoint.name = "endpoint.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_endpoint), "__call__") as call:
call.return_value = gca_endpoint.Endpoint()
client.update_endpoint(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "endpoint.name=endpoint.name/value",) in kw[
"metadata"
]
@pytest.mark.asyncio
async def test_update_endpoint_field_headers_async():
client = EndpointServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = endpoint_service.UpdateEndpointRequest()
request.endpoint.name = "endpoint.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_endpoint), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_endpoint.Endpoint()
)
await client.update_endpoint(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "endpoint.name=endpoint.name/value",) in kw[
"metadata"
]
def test_update_endpoint_flattened():
client = EndpointServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_endpoint), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gca_endpoint.Endpoint()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_endpoint(
endpoint=gca_endpoint.Endpoint(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].endpoint == gca_endpoint.Endpoint(name="name_value")
assert args[0].update_mask == field_mask_pb2.FieldMask(paths=["paths_value"])
def test_update_endpoint_flattened_error():
client = EndpointServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_endpoint(
endpoint_service.UpdateEndpointRequest(),
endpoint=gca_endpoint.Endpoint(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.asyncio
async def test_update_endpoint_flattened_async():
client = EndpointServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_endpoint), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gca_endpoint.Endpoint()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_endpoint.Endpoint()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_endpoint(
endpoint=gca_endpoint.Endpoint(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].endpoint == gca_endpoint.Endpoint(name="name_value")
assert args[0].update_mask == field_mask_pb2.FieldMask(paths=["paths_value"])
@pytest.mark.asyncio
async def test_update_endpoint_flattened_error_async():
client = EndpointServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_endpoint(
endpoint_service.UpdateEndpointRequest(),
endpoint=gca_endpoint.Endpoint(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
def test_delete_endpoint(
transport: str = "grpc", request_type=endpoint_service.DeleteEndpointRequest
):
client = EndpointServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_endpoint), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.delete_endpoint(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == endpoint_service.DeleteEndpointRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_delete_endpoint_from_dict():
test_delete_endpoint(request_type=dict)
def test_delete_endpoint_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = EndpointServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_endpoint), "__call__") as call:
client.delete_endpoint()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == endpoint_service.DeleteEndpointRequest()
@pytest.mark.asyncio
async def test_delete_endpoint_async(
transport: str = "grpc_asyncio", request_type=endpoint_service.DeleteEndpointRequest
):
client = EndpointServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_endpoint), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.delete_endpoint(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == endpoint_service.DeleteEndpointRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_delete_endpoint_async_from_dict():
await test_delete_endpoint_async(request_type=dict)
def test_delete_endpoint_field_headers():
client = EndpointServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = endpoint_service.DeleteEndpointRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_endpoint), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.delete_endpoint(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_endpoint_field_headers_async():
client = EndpointServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = endpoint_service.DeleteEndpointRequest()
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_endpoint), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.delete_endpoint(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_delete_endpoint_flattened():
client = EndpointServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_endpoint), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_endpoint(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
def test_delete_endpoint_flattened_error():
client = EndpointServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_endpoint(
endpoint_service.DeleteEndpointRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_delete_endpoint_flattened_async():
client = EndpointServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_endpoint), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_endpoint(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == "name_value"
@pytest.mark.asyncio
async def test_delete_endpoint_flattened_error_async():
client = EndpointServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_endpoint(
endpoint_service.DeleteEndpointRequest(), name="name_value",
)
def test_deploy_model(
transport: str = "grpc", request_type=endpoint_service.DeployModelRequest
):
client = EndpointServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.deploy_model), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.deploy_model(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == endpoint_service.DeployModelRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_deploy_model_from_dict():
test_deploy_model(request_type=dict)
def test_deploy_model_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = EndpointServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.deploy_model), "__call__") as call:
client.deploy_model()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == endpoint_service.DeployModelRequest()
@pytest.mark.asyncio
async def test_deploy_model_async(
transport: str = "grpc_asyncio", request_type=endpoint_service.DeployModelRequest
):
client = EndpointServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.deploy_model), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.deploy_model(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == endpoint_service.DeployModelRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_deploy_model_async_from_dict():
await test_deploy_model_async(request_type=dict)
def test_deploy_model_field_headers():
client = EndpointServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = endpoint_service.DeployModelRequest()
request.endpoint = "endpoint/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.deploy_model), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.deploy_model(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "endpoint=endpoint/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_deploy_model_field_headers_async():
client = EndpointServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = endpoint_service.DeployModelRequest()
request.endpoint = "endpoint/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.deploy_model), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.deploy_model(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "endpoint=endpoint/value",) in kw["metadata"]
def test_deploy_model_flattened():
client = EndpointServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.deploy_model), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.deploy_model(
endpoint="endpoint_value",
deployed_model=gca_endpoint.DeployedModel(
dedicated_resources=machine_resources.DedicatedResources(
machine_spec=machine_resources.MachineSpec(
machine_type="machine_type_value"
)
)
),
traffic_split={"key_value": 541},
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].endpoint == "endpoint_value"
assert args[0].deployed_model == gca_endpoint.DeployedModel(
dedicated_resources=machine_resources.DedicatedResources(
machine_spec=machine_resources.MachineSpec(
machine_type="machine_type_value"
)
)
)
assert args[0].traffic_split == {"key_value": 541}
def test_deploy_model_flattened_error():
client = EndpointServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.deploy_model(
endpoint_service.DeployModelRequest(),
endpoint="endpoint_value",
deployed_model=gca_endpoint.DeployedModel(
dedicated_resources=machine_resources.DedicatedResources(
machine_spec=machine_resources.MachineSpec(
machine_type="machine_type_value"
)
)
),
traffic_split={"key_value": 541},
)
@pytest.mark.asyncio
async def test_deploy_model_flattened_async():
client = EndpointServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.deploy_model), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.deploy_model(
endpoint="endpoint_value",
deployed_model=gca_endpoint.DeployedModel(
dedicated_resources=machine_resources.DedicatedResources(
machine_spec=machine_resources.MachineSpec(
machine_type="machine_type_value"
)
)
),
traffic_split={"key_value": 541},
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].endpoint == "endpoint_value"
assert args[0].deployed_model == gca_endpoint.DeployedModel(
dedicated_resources=machine_resources.DedicatedResources(
machine_spec=machine_resources.MachineSpec(
machine_type="machine_type_value"
)
)
)
assert args[0].traffic_split == {"key_value": 541}
@pytest.mark.asyncio
async def test_deploy_model_flattened_error_async():
client = EndpointServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.deploy_model(
endpoint_service.DeployModelRequest(),
endpoint="endpoint_value",
deployed_model=gca_endpoint.DeployedModel(
dedicated_resources=machine_resources.DedicatedResources(
machine_spec=machine_resources.MachineSpec(
machine_type="machine_type_value"
)
)
),
traffic_split={"key_value": 541},
)
def test_undeploy_model(
transport: str = "grpc", request_type=endpoint_service.UndeployModelRequest
):
client = EndpointServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.undeploy_model), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
response = client.undeploy_model(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == endpoint_service.UndeployModelRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
def test_undeploy_model_from_dict():
test_undeploy_model(request_type=dict)
def test_undeploy_model_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = EndpointServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.undeploy_model), "__call__") as call:
client.undeploy_model()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == endpoint_service.UndeployModelRequest()
@pytest.mark.asyncio
async def test_undeploy_model_async(
transport: str = "grpc_asyncio", request_type=endpoint_service.UndeployModelRequest
):
client = EndpointServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.undeploy_model), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
response = await client.undeploy_model(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == endpoint_service.UndeployModelRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
@pytest.mark.asyncio
async def test_undeploy_model_async_from_dict():
await test_undeploy_model_async(request_type=dict)
def test_undeploy_model_field_headers():
client = EndpointServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = endpoint_service.UndeployModelRequest()
request.endpoint = "endpoint/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.undeploy_model), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.undeploy_model(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "endpoint=endpoint/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_undeploy_model_field_headers_async():
client = EndpointServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = endpoint_service.UndeployModelRequest()
request.endpoint = "endpoint/value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.undeploy_model), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
await client.undeploy_model(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert ("x-goog-request-params", "endpoint=endpoint/value",) in kw["metadata"]
def test_undeploy_model_flattened():
client = EndpointServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.undeploy_model), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.undeploy_model(
endpoint="endpoint_value",
deployed_model_id="deployed_model_id_value",
traffic_split={"key_value": 541},
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].endpoint == "endpoint_value"
assert args[0].deployed_model_id == "deployed_model_id_value"
assert args[0].traffic_split == {"key_value": 541}
def test_undeploy_model_flattened_error():
client = EndpointServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.undeploy_model(
endpoint_service.UndeployModelRequest(),
endpoint="endpoint_value",
deployed_model_id="deployed_model_id_value",
traffic_split={"key_value": 541},
)
@pytest.mark.asyncio
async def test_undeploy_model_flattened_async():
client = EndpointServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.undeploy_model), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.undeploy_model(
endpoint="endpoint_value",
deployed_model_id="deployed_model_id_value",
traffic_split={"key_value": 541},
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].endpoint == "endpoint_value"
assert args[0].deployed_model_id == "deployed_model_id_value"
assert args[0].traffic_split == {"key_value": 541}
@pytest.mark.asyncio
async def test_undeploy_model_flattened_error_async():
client = EndpointServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.undeploy_model(
endpoint_service.UndeployModelRequest(),
endpoint="endpoint_value",
deployed_model_id="deployed_model_id_value",
traffic_split={"key_value": 541},
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.EndpointServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = EndpointServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.EndpointServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = EndpointServiceClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide scopes and a transport instance.
transport = transports.EndpointServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = EndpointServiceClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.EndpointServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = EndpointServiceClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.EndpointServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.EndpointServiceGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize(
"transport_class",
[
transports.EndpointServiceGrpcTransport,
transports.EndpointServiceGrpcAsyncIOTransport,
],
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = EndpointServiceClient(credentials=ga_credentials.AnonymousCredentials(),)
assert isinstance(client.transport, transports.EndpointServiceGrpcTransport,)
def test_endpoint_service_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.EndpointServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_endpoint_service_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.aiplatform_v1.services.endpoint_service.transports.EndpointServiceTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.EndpointServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"create_endpoint",
"get_endpoint",
"list_endpoints",
"update_endpoint",
"delete_endpoint",
"deploy_model",
"undeploy_model",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
# Additionally, the LRO client (a property) should
# also raise NotImplementedError
with pytest.raises(NotImplementedError):
transport.operations_client
@requires_google_auth_gte_1_25_0
def test_endpoint_service_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.aiplatform_v1.services.endpoint_service.transports.EndpointServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.EndpointServiceTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
@requires_google_auth_lt_1_25_0
def test_endpoint_service_base_transport_with_credentials_file_old_google_auth():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.aiplatform_v1.services.endpoint_service.transports.EndpointServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.EndpointServiceTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
def test_endpoint_service_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.aiplatform_v1.services.endpoint_service.transports.EndpointServiceTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.EndpointServiceTransport()
adc.assert_called_once()
@requires_google_auth_gte_1_25_0
def test_endpoint_service_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
EndpointServiceClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id=None,
)
@requires_google_auth_lt_1_25_0
def test_endpoint_service_auth_adc_old_google_auth():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
EndpointServiceClient()
adc.assert_called_once_with(
scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[
transports.EndpointServiceGrpcTransport,
transports.EndpointServiceGrpcAsyncIOTransport,
],
)
@requires_google_auth_gte_1_25_0
def test_endpoint_service_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class",
[
transports.EndpointServiceGrpcTransport,
transports.EndpointServiceGrpcAsyncIOTransport,
],
)
@requires_google_auth_lt_1_25_0
def test_endpoint_service_transport_auth_adc_old_google_auth(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus")
adc.assert_called_once_with(
scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.EndpointServiceGrpcTransport, grpc_helpers),
(transports.EndpointServiceGrpcAsyncIOTransport, grpc_helpers_async),
],
)
def test_endpoint_service_transport_create_channel(transport_class, grpc_helpers):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"aiplatform.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
scopes=["1", "2"],
default_host="aiplatform.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class",
[
transports.EndpointServiceGrpcTransport,
transports.EndpointServiceGrpcAsyncIOTransport,
],
)
def test_endpoint_service_grpc_transport_client_cert_source_for_mtls(transport_class):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds,
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback,
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert, private_key=expected_key
)
def test_endpoint_service_host_no_port():
client = EndpointServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="aiplatform.googleapis.com"
),
)
assert client.transport._host == "aiplatform.googleapis.com:443"
def test_endpoint_service_host_with_port():
client = EndpointServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="aiplatform.googleapis.com:8000"
),
)
assert client.transport._host == "aiplatform.googleapis.com:8000"
def test_endpoint_service_grpc_transport_channel():
channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.EndpointServiceGrpcTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_endpoint_service_grpc_asyncio_transport_channel():
channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.EndpointServiceGrpcAsyncIOTransport(
host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.EndpointServiceGrpcTransport,
transports.EndpointServiceGrpcAsyncIOTransport,
],
)
def test_endpoint_service_transport_channel_mtls_with_client_cert_source(
transport_class,
):
with mock.patch(
"grpc.ssl_channel_credentials", autospec=True
) as grpc_ssl_channel_cred:
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[
transports.EndpointServiceGrpcTransport,
transports.EndpointServiceGrpcAsyncIOTransport,
],
)
def test_endpoint_service_transport_channel_mtls_with_adc(transport_class):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_endpoint_service_grpc_lro_client():
client = EndpointServiceClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsClient,)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_endpoint_service_grpc_lro_async_client():
client = EndpointServiceAsyncClient(
credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsAsyncClient,)
# Ensure that subsequent calls to the property send the exact same object.
assert transport.operations_client is transport.operations_client
def test_endpoint_path():
project = "squid"
location = "clam"
endpoint = "whelk"
expected = "projects/{project}/locations/{location}/endpoints/{endpoint}".format(
project=project, location=location, endpoint=endpoint,
)
actual = EndpointServiceClient.endpoint_path(project, location, endpoint)
assert expected == actual
def test_parse_endpoint_path():
expected = {
"project": "octopus",
"location": "oyster",
"endpoint": "nudibranch",
}
path = EndpointServiceClient.endpoint_path(**expected)
# Check that the path construction is reversible.
actual = EndpointServiceClient.parse_endpoint_path(path)
assert expected == actual
def test_model_path():
project = "cuttlefish"
location = "mussel"
model = "winkle"
expected = "projects/{project}/locations/{location}/models/{model}".format(
project=project, location=location, model=model,
)
actual = EndpointServiceClient.model_path(project, location, model)
assert expected == actual
def test_parse_model_path():
expected = {
"project": "nautilus",
"location": "scallop",
"model": "abalone",
}
path = EndpointServiceClient.model_path(**expected)
# Check that the path construction is reversible.
actual = EndpointServiceClient.parse_model_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "squid"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = EndpointServiceClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "clam",
}
path = EndpointServiceClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = EndpointServiceClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "whelk"
expected = "folders/{folder}".format(folder=folder,)
actual = EndpointServiceClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "octopus",
}
path = EndpointServiceClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = EndpointServiceClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "oyster"
expected = "organizations/{organization}".format(organization=organization,)
actual = EndpointServiceClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "nudibranch",
}
path = EndpointServiceClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = EndpointServiceClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "cuttlefish"
expected = "projects/{project}".format(project=project,)
actual = EndpointServiceClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "mussel",
}
path = EndpointServiceClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = EndpointServiceClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "winkle"
location = "nautilus"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = EndpointServiceClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "scallop",
"location": "abalone",
}
path = EndpointServiceClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = EndpointServiceClient.parse_common_location_path(path)
assert expected == actual
def test_client_withDEFAULT_CLIENT_INFO():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.EndpointServiceTransport, "_prep_wrapped_messages"
) as prep:
client = EndpointServiceClient(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.EndpointServiceTransport, "_prep_wrapped_messages"
) as prep:
transport_class = EndpointServiceClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
cogs/wow/config.py
|
import os
mongodb_atlas = {
"connection_string": os.environ.get('MONGODB_CONNECTION_STRING'),
"database_name": "info-bot",
"news_collection_name": "wow-news",
"log_collections": {"commands": "user-commands-log", "updater": "push-updates-log"}
}
article_types = {
"HOTFIXES": "hotfixes",
"LATEST": "latest"
}
article_keys = {
"TYPE": "type",
"ID": "_id",
"TITLE": "title",
"DESCRIPTION": "description",
"DATETIME": "datetime",
"URL": "url",
"IMAGE_URL": "image_url"
}
news_cog = {
"embed_color": {
"r": 252,
"g": 186,
"b": 3
}
}
updater_cog = {
"news_channel_id": 823082892367364156,
"wow_role_id": 742188088461099148,
"refresh_rate_seconds": 5,
"embed_color": {
"r": 255,
"g": 75,
"b": 35
}
}
|
[] |
[] |
[
"MONGODB_CONNECTION_STRING"
] |
[]
|
["MONGODB_CONNECTION_STRING"]
|
python
| 1 | 0 | |
commands/util.go
|
package commands
import (
"context"
"net/url"
"os"
"path/filepath"
"strings"
"github.com/docker/buildx/build"
"github.com/docker/buildx/driver"
"github.com/docker/buildx/store"
"github.com/docker/buildx/util/platformutil"
"github.com/docker/buildx/util/progress"
"github.com/docker/cli/cli/command"
"github.com/docker/cli/cli/context/docker"
"github.com/docker/cli/cli/context/kubernetes"
ctxstore "github.com/docker/cli/cli/context/store"
dopts "github.com/docker/cli/opts"
dockerclient "github.com/docker/docker/client"
specs "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sync/errgroup"
"k8s.io/client-go/tools/clientcmd"
)
// getStore returns current builder instance store
func getStore(dockerCli command.Cli) (*store.Txn, func(), error) {
s, err := store.New(getConfigStorePath(dockerCli))
if err != nil {
return nil, nil, err
}
return s.Txn()
}
// getConfigStorePath will look for correct configuration store path;
// if `$BUILDX_CONFIG` is set - use it, otherwise use parent directory
// of Docker config file (i.e. `${DOCKER_CONFIG}/buildx`)
func getConfigStorePath(dockerCli command.Cli) string {
if buildxConfig := os.Getenv("BUILDX_CONFIG"); buildxConfig != "" {
logrus.Debugf("using config store %q based in \"$BUILDX_CONFIG\" environment variable", buildxConfig)
return buildxConfig
}
buildxConfig := filepath.Join(filepath.Dir(dockerCli.ConfigFile().Filename), "buildx")
logrus.Debugf("using default config store %q", buildxConfig)
return buildxConfig
}
// getCurrentEndpoint returns the current default endpoint value
func getCurrentEndpoint(dockerCli command.Cli) (string, error) {
name := dockerCli.CurrentContext()
if name != "default" {
return name, nil
}
de, err := getDockerEndpoint(dockerCli, name)
if err != nil {
return "", errors.Errorf("docker endpoint for %q not found", name)
}
return de, nil
}
// getDockerEndpoint returns docker endpoint string for given context
func getDockerEndpoint(dockerCli command.Cli, name string) (string, error) {
list, err := dockerCli.ContextStore().List()
if err != nil {
return "", err
}
for _, l := range list {
if l.Name == name {
ep, ok := l.Endpoints["docker"]
if !ok {
return "", errors.Errorf("context %q does not have a Docker endpoint", name)
}
typed, ok := ep.(docker.EndpointMeta)
if !ok {
return "", errors.Errorf("endpoint %q is not of type EndpointMeta, %T", ep, ep)
}
return typed.Host, nil
}
}
return "", nil
}
// validateEndpoint validates that endpoint is either a context or a docker host
func validateEndpoint(dockerCli command.Cli, ep string) (string, error) {
de, err := getDockerEndpoint(dockerCli, ep)
if err == nil && de != "" {
if ep == "default" {
return de, nil
}
return ep, nil
}
h, err := dopts.ParseHost(true, ep)
if err != nil {
return "", errors.Wrapf(err, "failed to parse endpoint %s", ep)
}
return h, nil
}
// getCurrentInstance finds the current builder instance
func getCurrentInstance(txn *store.Txn, dockerCli command.Cli) (*store.NodeGroup, error) {
ep, err := getCurrentEndpoint(dockerCli)
if err != nil {
return nil, err
}
ng, err := txn.Current(ep)
if err != nil {
return nil, err
}
if ng == nil {
ng, _ = getNodeGroup(txn, dockerCli, dockerCli.CurrentContext())
}
return ng, nil
}
// getNodeGroup returns nodegroup based on the name
func getNodeGroup(txn *store.Txn, dockerCli command.Cli, name string) (*store.NodeGroup, error) {
ng, err := txn.NodeGroupByName(name)
if err != nil {
if !os.IsNotExist(errors.Cause(err)) {
return nil, err
}
}
if ng != nil {
return ng, nil
}
if name == "default" {
name = dockerCli.CurrentContext()
}
list, err := dockerCli.ContextStore().List()
if err != nil {
return nil, err
}
for _, l := range list {
if l.Name == name {
return &store.NodeGroup{
Name: "default",
Nodes: []store.Node{
{
Name: "default",
Endpoint: name,
},
},
}, nil
}
}
return nil, errors.Errorf("no builder %q found", name)
}
// driversForNodeGroup returns drivers for a nodegroup instance
func driversForNodeGroup(ctx context.Context, dockerCli command.Cli, ng *store.NodeGroup, contextPathHash string) ([]build.DriverInfo, error) {
eg, _ := errgroup.WithContext(ctx)
dis := make([]build.DriverInfo, len(ng.Nodes))
var f driver.Factory
if ng.Driver != "" {
f = driver.GetFactory(ng.Driver, true)
if f == nil {
return nil, errors.Errorf("failed to find driver %q", f)
}
} else {
dockerapi, err := clientForEndpoint(dockerCli, ng.Nodes[0].Endpoint)
if err != nil {
return nil, err
}
f, err = driver.GetDefaultFactory(ctx, dockerapi, false)
if err != nil {
return nil, err
}
ng.Driver = f.Name()
}
for i, n := range ng.Nodes {
func(i int, n store.Node) {
eg.Go(func() error {
di := build.DriverInfo{
Name: n.Name,
Platform: n.Platforms,
}
defer func() {
dis[i] = di
}()
dockerapi, err := clientForEndpoint(dockerCli, n.Endpoint)
if err != nil {
di.Err = err
return nil
}
// TODO: replace the following line with dockerclient.WithAPIVersionNegotiation option in clientForEndpoint
dockerapi.NegotiateAPIVersion(ctx)
contextStore := dockerCli.ContextStore()
var kcc driver.KubeClientConfig
kcc, err = configFromContext(n.Endpoint, contextStore)
if err != nil {
// err is returned if n.Endpoint is non-context name like "unix:///var/run/docker.sock".
// try again with name="default".
// FIXME: n should retain real context name.
kcc, err = configFromContext("default", contextStore)
if err != nil {
logrus.Error(err)
}
}
tryToUseKubeConfigInCluster := false
if kcc == nil {
tryToUseKubeConfigInCluster = true
} else {
if _, err := kcc.ClientConfig(); err != nil {
tryToUseKubeConfigInCluster = true
}
}
if tryToUseKubeConfigInCluster {
kccInCluster := driver.KubeClientConfigInCluster{}
if _, err := kccInCluster.ClientConfig(); err == nil {
logrus.Debug("using kube config in cluster")
kcc = kccInCluster
}
}
d, err := driver.GetDriver(ctx, "buildx_buildkit_"+n.Name, f, dockerapi, dockerCli.ConfigFile(), kcc, n.Flags, n.ConfigFile, n.DriverOpts, n.Platforms, contextPathHash)
if err != nil {
di.Err = err
return nil
}
di.Driver = d
return nil
})
}(i, n)
}
if err := eg.Wait(); err != nil {
return nil, err
}
return dis, nil
}
func configFromContext(endpointName string, s ctxstore.Reader) (clientcmd.ClientConfig, error) {
if strings.HasPrefix(endpointName, "kubernetes://") {
u, _ := url.Parse(endpointName)
if kubeconfig := u.Query().Get("kubeconfig"); kubeconfig != "" {
clientConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
&clientcmd.ClientConfigLoadingRules{ExplicitPath: kubeconfig},
&clientcmd.ConfigOverrides{},
)
return clientConfig, nil
}
}
return kubernetes.ConfigFromContext(endpointName, s)
}
// clientForEndpoint returns a docker client for an endpoint
func clientForEndpoint(dockerCli command.Cli, name string) (dockerclient.APIClient, error) {
list, err := dockerCli.ContextStore().List()
if err != nil {
return nil, err
}
for _, l := range list {
if l.Name == name {
dep, ok := l.Endpoints["docker"]
if !ok {
return nil, errors.Errorf("context %q does not have a Docker endpoint", name)
}
epm, ok := dep.(docker.EndpointMeta)
if !ok {
return nil, errors.Errorf("endpoint %q is not of type EndpointMeta, %T", dep, dep)
}
ep, err := docker.WithTLSData(dockerCli.ContextStore(), name, epm)
if err != nil {
return nil, err
}
clientOpts, err := ep.ClientOpts()
if err != nil {
return nil, err
}
return dockerclient.NewClientWithOpts(clientOpts...)
}
}
ep := docker.Endpoint{
EndpointMeta: docker.EndpointMeta{
Host: name,
},
}
clientOpts, err := ep.ClientOpts()
if err != nil {
return nil, err
}
return dockerclient.NewClientWithOpts(clientOpts...)
}
func getInstanceOrDefault(ctx context.Context, dockerCli command.Cli, instance, contextPathHash string) ([]build.DriverInfo, error) {
var defaultOnly bool
if instance == "default" && instance != dockerCli.CurrentContext() {
return nil, errors.Errorf("use `docker --context=default buildx` to switch to default context")
}
if instance == "default" || instance == dockerCli.CurrentContext() {
instance = ""
defaultOnly = true
}
list, err := dockerCli.ContextStore().List()
if err != nil {
return nil, err
}
for _, l := range list {
if l.Name == instance {
return nil, errors.Errorf("use `docker --context=%s buildx` to switch to context %s", instance, instance)
}
}
if instance != "" {
return getInstanceByName(ctx, dockerCli, instance, contextPathHash)
}
return getDefaultDrivers(ctx, dockerCli, defaultOnly, contextPathHash)
}
func getInstanceByName(ctx context.Context, dockerCli command.Cli, instance, contextPathHash string) ([]build.DriverInfo, error) {
txn, release, err := getStore(dockerCli)
if err != nil {
return nil, err
}
defer release()
ng, err := txn.NodeGroupByName(instance)
if err != nil {
return nil, err
}
return driversForNodeGroup(ctx, dockerCli, ng, contextPathHash)
}
// getDefaultDrivers returns drivers based on current cli config
func getDefaultDrivers(ctx context.Context, dockerCli command.Cli, defaultOnly bool, contextPathHash string) ([]build.DriverInfo, error) {
txn, release, err := getStore(dockerCli)
if err != nil {
return nil, err
}
defer release()
if !defaultOnly {
ng, err := getCurrentInstance(txn, dockerCli)
if err != nil {
return nil, err
}
if ng != nil {
return driversForNodeGroup(ctx, dockerCli, ng, contextPathHash)
}
}
d, err := driver.GetDriver(ctx, "buildx_buildkit_default", nil, dockerCli.Client(), dockerCli.ConfigFile(), nil, nil, "", nil, nil, contextPathHash)
if err != nil {
return nil, err
}
return []build.DriverInfo{
{
Name: "default",
Driver: d,
},
}, nil
}
func loadInfoData(ctx context.Context, d *dinfo) error {
if d.di.Driver == nil {
return nil
}
info, err := d.di.Driver.Info(ctx)
if err != nil {
return err
}
d.info = info
if info.Status == driver.Running {
c, err := d.di.Driver.Client(ctx)
if err != nil {
return err
}
workers, err := c.ListWorkers(ctx)
if err != nil {
return errors.Wrap(err, "listing workers")
}
for _, w := range workers {
for _, p := range w.Platforms {
d.platforms = append(d.platforms, p)
}
}
d.platforms = platformutil.Dedupe(d.platforms)
}
return nil
}
func loadNodeGroupData(ctx context.Context, dockerCli command.Cli, ngi *nginfo) error {
eg, _ := errgroup.WithContext(ctx)
dis, err := driversForNodeGroup(ctx, dockerCli, ngi.ng, "")
if err != nil {
return err
}
ngi.drivers = make([]dinfo, len(dis))
for i, di := range dis {
d := di
ngi.drivers[i].di = &d
func(d *dinfo) {
eg.Go(func() error {
if err := loadInfoData(ctx, d); err != nil {
d.err = err
}
return nil
})
}(&ngi.drivers[i])
}
if eg.Wait(); err != nil {
return err
}
kubernetesDriverCount := 0
for _, di := range ngi.drivers {
if di.info != nil && len(di.info.DynamicNodes) > 0 {
kubernetesDriverCount++
}
}
isAllKubernetesDrivers := len(ngi.drivers) == kubernetesDriverCount
if isAllKubernetesDrivers {
var drivers []dinfo
var dynamicNodes []store.Node
for _, di := range ngi.drivers {
// dynamic nodes are used in Kubernetes driver.
// Kubernetes pods are dynamically mapped to BuildKit Nodes.
if di.info != nil && len(di.info.DynamicNodes) > 0 {
for i := 0; i < len(di.info.DynamicNodes); i++ {
// all []dinfo share *build.DriverInfo and *driver.Info
diClone := di
if pl := di.info.DynamicNodes[i].Platforms; len(pl) > 0 {
diClone.platforms = pl
}
drivers = append(drivers, di)
}
dynamicNodes = append(dynamicNodes, di.info.DynamicNodes...)
}
}
// not append (remove the static nodes in the store)
ngi.ng.Nodes = dynamicNodes
ngi.drivers = drivers
ngi.ng.Dynamic = true
}
return nil
}
func dockerAPI(dockerCli command.Cli) *api {
return &api{dockerCli: dockerCli}
}
type api struct {
dockerCli command.Cli
}
func (a *api) DockerAPI(name string) (dockerclient.APIClient, error) {
if name == "" {
name = a.dockerCli.CurrentContext()
}
return clientForEndpoint(a.dockerCli, name)
}
type dinfo struct {
di *build.DriverInfo
info *driver.Info
platforms []specs.Platform
err error
}
type nginfo struct {
ng *store.NodeGroup
drivers []dinfo
err error
}
func boot(ctx context.Context, ngi *nginfo) (bool, error) {
toBoot := make([]int, 0, len(ngi.drivers))
for i, d := range ngi.drivers {
if d.err != nil || d.di.Err != nil || d.di.Driver == nil || d.info == nil {
continue
}
if d.info.Status != driver.Running {
toBoot = append(toBoot, i)
}
}
if len(toBoot) == 0 {
return false, nil
}
printer := progress.NewPrinter(context.TODO(), os.Stderr, "auto")
baseCtx := ctx
eg, _ := errgroup.WithContext(ctx)
for _, idx := range toBoot {
func(idx int) {
eg.Go(func() error {
pw := progress.WithPrefix(printer, ngi.ng.Nodes[idx].Name, len(toBoot) > 1)
_, err := driver.Boot(ctx, baseCtx, ngi.drivers[idx].di.Driver, pw)
if err != nil {
ngi.drivers[idx].err = err
}
return nil
})
}(idx)
}
err := eg.Wait()
err1 := printer.Wait()
if err == nil {
err = err1
}
return true, err
}
|
[
"\"BUILDX_CONFIG\""
] |
[] |
[
"BUILDX_CONFIG"
] |
[]
|
["BUILDX_CONFIG"]
|
go
| 1 | 0 | |
standard-output.go
|
package logger
import (
"encoding/json"
"fmt"
"github.com/azer/is-terminal"
"os"
"strings"
"time"
)
func NewStandardOutput(file *os.File) OutputWriter {
var writer = StandardWriter{
ColorsEnabled: isterminal.IsTerminal(int(file.Fd())),
Target: file,
}
defaultOutputSettings := parseVerbosityLevel(os.Getenv("LOG_LEVEL"))
writer.Settings = parsePackageSettings(os.Getenv("LOG"), defaultOutputSettings)
return writer
}
type StandardWriter struct {
ColorsEnabled bool
Target *os.File
Settings map[string]*OutputSettings
}
func (standardWriter StandardWriter) Init() {}
func (sw StandardWriter) Write(log *Log) {
if sw.IsEnabled(log.Package, log.Level) {
fmt.Fprintln(sw.Target, sw.Format(log))
}
}
func (sw *StandardWriter) IsEnabled(logger, level string) bool {
settings := sw.LoggerSettings(logger)
if level == "INFO" {
return settings.Info
}
if level == "ERROR" {
return settings.Error
}
if level == "TIMER" {
return settings.Timer
}
return false
}
func (sw *StandardWriter) LoggerSettings(p string) *OutputSettings {
if settings, ok := sw.Settings[p]; ok {
return settings
}
// If there is a "*" (Select all) setting, return that
if settings, ok := sw.Settings["*"]; ok {
return settings
}
return muted
}
func (sw *StandardWriter) Format(log *Log) string {
if sw.ColorsEnabled {
return sw.PrettyFormat(log)
} else {
return sw.JSONFormat(log)
}
}
func (sw *StandardWriter) JSONFormat(log *Log) string {
str, err := json.Marshal(log)
if err != nil {
return fmt.Sprintf(`{ "logger-error": "%v" }`, err)
}
return string(str)
}
func (sw *StandardWriter) PrettyFormat(log *Log) string {
return fmt.Sprintf("%s %s %s%s",
time.Now().Format("15:04:05.000"),
sw.PrettyLabel(log),
log.Message,
sw.PrettyAttrs(log.Attrs))
}
func (sw *StandardWriter) PrettyAttrs(attrs *Attrs) string {
if attrs == nil {
return ""
}
result := ""
for key, val := range *attrs {
result = fmt.Sprintf("%s %s=%v", result, key, val)
}
return result
}
func (sw *StandardWriter) PrettyLabel(log *Log) string {
return fmt.Sprintf("%s%s%s:%s",
colorFor(log.Package),
log.Package,
sw.PrettyLabelExt(log),
reset)
}
func (sw *StandardWriter) PrettyLabelExt(log *Log) string {
if log.Level == "ERROR" {
return fmt.Sprintf("(%s!%s)", red, colorFor(log.Package))
}
if log.Level == "TIMER" {
return fmt.Sprintf("(%s%s%s)", reset, fmt.Sprintf("%v", time.Duration(log.ElapsedNano)), colorFor(log.Package))
}
return ""
}
// Accepts: foo,bar,qux@timer
// *
// *@error
// *@error,database@timer
func parsePackageSettings(input string, defaultOutputSettings *OutputSettings) map[string]*OutputSettings {
all := map[string]*OutputSettings{}
items := strings.Split(input, ",")
for _, item := range items {
name, verbosity := parsePackageName(item)
if verbosity == nil {
verbosity = defaultOutputSettings
}
all[name] = verbosity
}
return all
}
// Accepts: users
// database@timer
// server@error
func parsePackageName(input string) (string, *OutputSettings) {
parsed := strings.Split(input, "@")
name := strings.TrimSpace(parsed[0])
if len(parsed) > 1 {
return name, parseVerbosityLevel(parsed[1])
}
return name, nil
}
func parseVerbosityLevel(val string) *OutputSettings {
val = strings.ToUpper(strings.TrimSpace(val))
if val == "MUTE" {
return &OutputSettings{}
}
s := &OutputSettings{
Info: true,
Timer: true,
Error: true,
}
if val == "TIMER" {
s.Info = false
}
if val == "ERROR" {
s.Info = false
s.Timer = false
}
return s
}
|
[
"\"LOG_LEVEL\"",
"\"LOG\""
] |
[] |
[
"LOG",
"LOG_LEVEL"
] |
[]
|
["LOG", "LOG_LEVEL"]
|
go
| 2 | 0 | |
pkg/handlers/troubleshoot.go
|
package handlers
import (
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"time"
"github.com/gorilla/mux"
"github.com/pkg/errors"
downstream "github.com/replicatedhq/kots/pkg/kotsadmdownstream"
"github.com/replicatedhq/kots/pkg/kotsutil"
"github.com/replicatedhq/kots/pkg/logger"
"github.com/replicatedhq/kots/pkg/render/helper"
"github.com/replicatedhq/kots/pkg/store"
"github.com/replicatedhq/kots/pkg/supportbundle"
"github.com/replicatedhq/kots/pkg/supportbundle/types"
troubleshootanalyze "github.com/replicatedhq/troubleshoot/pkg/analyze"
troubleshootv1beta2 "github.com/replicatedhq/troubleshoot/pkg/apis/troubleshoot/v1beta2"
"github.com/replicatedhq/troubleshoot/pkg/convert"
redact2 "github.com/replicatedhq/troubleshoot/pkg/redact"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
k8sjson "k8s.io/apimachinery/pkg/runtime/serializer/json"
"k8s.io/client-go/kubernetes/scheme"
)
type GetSupportBundleResponse struct {
ID string `json:"id"`
Slug string `json:"slug"`
AppID string `json:"appId"`
Name string `json:"name"`
Size float64 `json:"size"`
Status string `json:"status"`
TreeIndex string `json:"treeIndex"`
CreatedAt time.Time `json:"createdAt"`
UploadedAt *time.Time `json:"uploadedAt"`
IsArchived bool `json:"isArchived"`
Analysis *types.SupportBundleAnalysis `json:"analysis"`
}
type GetSupportBundleFilesResponse struct {
Files map[string][]byte `json:"files"`
Success bool `json:"success"`
Error string `json:"error,omitempty"`
}
type ListSupportBundlesResponse struct {
SupportBundles []ResponseSupportBundle `json:"supportBundles"`
}
type ResponseSupportBundle struct {
ID string `json:"id"`
Slug string `json:"slug"`
AppID string `json:"appId"`
Name string `json:"name"`
Size float64 `json:"size"`
Status string `json:"status"`
CreatedAt time.Time `json:"createdAt"`
UploadedAt *time.Time `json:"uploadedAt"`
IsArchived bool `json:"isArchived"`
Analysis *types.SupportBundleAnalysis `json:"analysis"`
}
type GetSupportBundleCommandRequest struct {
Origin string `json:"origin"`
}
type GetSupportBundleCommandResponse struct {
Command []string `json:"command"`
}
type GetSupportBundleRedactionsResponse struct {
Redactions redact2.RedactionList `json:"redactions"`
Success bool `json:"success"`
Error string `json:"error,omitempty"`
}
type PutSupportBundleRedactions struct {
Redactions redact2.RedactionList `json:"redactions"`
}
func (h *Handler) GetSupportBundle(w http.ResponseWriter, r *http.Request) {
bundleSlug := mux.Vars(r)["bundleSlug"]
bundle, err := store.GetStore().GetSupportBundle(bundleSlug)
if err != nil {
logger.Error(err)
w.WriteHeader(http.StatusInternalServerError)
return
}
analysis, err := store.GetStore().GetSupportBundleAnalysis(bundle.ID)
if err != nil {
logger.Error(errors.Wrapf(err, "failed to get analysis for bundle %s", bundle.Slug))
}
getSupportBundleResponse := GetSupportBundleResponse{
ID: bundle.ID,
Slug: bundle.Slug,
AppID: bundle.AppID,
Name: bundle.Name,
Size: bundle.Size,
Status: bundle.Status,
TreeIndex: bundle.TreeIndex,
CreatedAt: bundle.CreatedAt,
UploadedAt: bundle.UploadedAt,
IsArchived: bundle.IsArchived,
Analysis: analysis,
}
JSON(w, http.StatusOK, getSupportBundleResponse)
}
func (h *Handler) GetSupportBundleFiles(w http.ResponseWriter, r *http.Request) {
getSupportBundleFilesResponse := GetSupportBundleFilesResponse{
Success: false,
}
bundleID := mux.Vars(r)["bundleId"]
filenames := r.URL.Query()["filename"]
files, err := supportbundle.GetFilesContents(bundleID, filenames)
if err != nil {
logger.Error(err)
getSupportBundleFilesResponse.Error = "failed to get files"
JSON(w, 500, getSupportBundleFilesResponse)
return
}
getSupportBundleFilesResponse.Success = true
getSupportBundleFilesResponse.Files = files
JSON(w, http.StatusOK, getSupportBundleFilesResponse)
}
func (h *Handler) ListSupportBundles(w http.ResponseWriter, r *http.Request) {
appSlug := mux.Vars(r)["appSlug"]
a, err := store.GetStore().GetAppFromSlug(appSlug)
if err != nil {
logger.Error(err)
w.WriteHeader(http.StatusInternalServerError)
return
}
supportBundles, err := store.GetStore().ListSupportBundles(a.ID)
if err != nil {
logger.Error(err)
w.WriteHeader(http.StatusInternalServerError)
return
}
responseSupportBundles := []ResponseSupportBundle{}
for _, bundle := range supportBundles {
analysis, err := store.GetStore().GetSupportBundleAnalysis(bundle.ID)
if err != nil {
logger.Error(errors.Wrapf(err, "failed to get analysis for bundle %s", bundle.Slug))
}
responseSupportBundle := ResponseSupportBundle{
ID: bundle.ID,
Slug: bundle.Slug,
AppID: bundle.AppID,
Name: bundle.Name,
Size: bundle.Size,
Status: bundle.Status,
CreatedAt: bundle.CreatedAt,
UploadedAt: bundle.UploadedAt,
IsArchived: bundle.IsArchived,
Analysis: analysis,
}
responseSupportBundles = append(responseSupportBundles, responseSupportBundle)
}
listSupportBundlesResponse := ListSupportBundlesResponse{
SupportBundles: responseSupportBundles,
}
JSON(w, http.StatusOK, listSupportBundlesResponse)
}
func (h *Handler) GetSupportBundleCommand(w http.ResponseWriter, r *http.Request) {
appSlug := mux.Vars(r)["appSlug"]
// in case of an error, return a generic command
response := GetSupportBundleCommandResponse{
Command: []string{
"curl https://krew.sh/support-bundle | bash",
fmt.Sprintf("kubectl support-bundle secret/%s/%s", os.Getenv("POD_NAMESPACE"), supportbundle.GetSpecSecretName(appSlug)),
},
}
getSupportBundleCommandRequest := GetSupportBundleCommandRequest{}
if err := json.NewDecoder(r.Body).Decode(&getSupportBundleCommandRequest); err != nil {
logger.Error(errors.Wrap(err, "failed to decode request"))
JSON(w, http.StatusOK, response)
return
}
foundApp, err := store.GetStore().GetAppFromSlug(appSlug)
if err != nil {
logger.Error(errors.Wrap(err, "failed to get app"))
JSON(w, http.StatusOK, response)
return
}
sequence := int64(0)
downstreams, err := store.GetStore().ListDownstreamsForApp(foundApp.ID)
if err != nil {
logger.Error(errors.Wrap(err, "failed to get downstreams for app"))
JSON(w, http.StatusOK, response)
return
} else if len(downstreams) > 0 {
currentVersion, err := downstream.GetCurrentVersion(foundApp.ID, downstreams[0].ClusterID)
if err != nil {
logger.Error(errors.Wrap(err, "failed to get deployed app sequence"))
JSON(w, http.StatusOK, response)
return
}
if currentVersion != nil {
sequence = currentVersion.Sequence
}
}
if err := createSupportBundle(foundApp.ID, sequence, getSupportBundleCommandRequest.Origin, false); err != nil {
logger.Error(errors.Wrap(err, "failed to create support bundle spec"))
JSON(w, http.StatusOK, response)
return
}
response.Command = supportbundle.GetBundleCommand(foundApp.Slug)
JSON(w, http.StatusOK, response)
}
func (h *Handler) DownloadSupportBundle(w http.ResponseWriter, r *http.Request) {
bundleID := mux.Vars(r)["bundleId"]
bundle, err := store.GetStore().GetSupportBundle(bundleID)
if err != nil {
logger.Error(err)
JSON(w, http.StatusInternalServerError, nil)
return
}
bundleArchive, err := store.GetStore().GetSupportBundleArchive(bundle.ID)
if err != nil {
logger.Error(err)
JSON(w, http.StatusInternalServerError, nil)
return
}
defer os.RemoveAll(bundleArchive)
f, err := os.Open(bundleArchive)
if err != nil {
logger.Error(err)
JSON(w, http.StatusInternalServerError, nil)
return
}
defer f.Close()
filename := fmt.Sprintf("supportbundle-%s.tar.gz", bundle.CreatedAt.Format("2006-01-02T15_04_05"))
w.Header().Set("Content-Type", "application/gzip")
w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=%s", filename))
w.WriteHeader(http.StatusOK)
io.Copy(w, f)
}
func (h *Handler) CollectSupportBundle(w http.ResponseWriter, r *http.Request) {
a, err := store.GetStore().GetApp(mux.Vars(r)["appId"])
if err != nil {
logger.Error(err)
w.WriteHeader(http.StatusInternalServerError)
return
}
if err := supportbundle.Collect(a.ID, mux.Vars(r)["clusterId"]); err != nil {
logger.Error(err)
w.WriteHeader(http.StatusInternalServerError)
return
}
JSON(w, http.StatusNoContent, "")
}
// UploadSupportBundle route is UNAUTHENTICATED
// This request comes from the `kubectl support-bundle` command.
func (h *Handler) UploadSupportBundle(w http.ResponseWriter, r *http.Request) {
bundleContents, err := ioutil.ReadAll(r.Body)
if err != nil {
logger.Error(errors.Wrap(err, "failed to read request body"))
w.WriteHeader(http.StatusInternalServerError)
return
}
tmpFile, err := ioutil.TempFile("", "kots")
if err != nil {
logger.Error(errors.Wrap(err, "failed to create temp file"))
w.WriteHeader(http.StatusInternalServerError)
return
}
defer os.RemoveAll(tmpFile.Name())
err = ioutil.WriteFile(tmpFile.Name(), bundleContents, 0644)
if err != nil {
logger.Error(errors.Wrap(err, "failed to save bundle to temp file"))
w.WriteHeader(http.StatusInternalServerError)
return
}
supportBundle, err := supportbundle.CreateBundle(mux.Vars(r)["bundleId"], mux.Vars(r)["appId"], tmpFile.Name())
if err != nil {
logger.Error(errors.Wrap(err, "failed to create support bundle"))
w.WriteHeader(http.StatusInternalServerError)
return
}
// we need the app archive to get the analyzers
foundApp, err := store.GetStore().GetApp(mux.Vars(r)["appId"])
if err != nil {
logger.Error(errors.Wrap(err, "failed to get app"))
w.WriteHeader(http.StatusInternalServerError)
return
}
archiveDir, err := ioutil.TempDir("", "kotsadm")
if err != nil {
logger.Error(errors.Wrap(err, "failed to create temp dir"))
w.WriteHeader(http.StatusInternalServerError)
return
}
defer os.RemoveAll(archiveDir)
err = store.GetStore().GetAppVersionArchive(foundApp.ID, foundApp.CurrentSequence, archiveDir)
if err != nil {
logger.Error(errors.Wrap(err, "failed to get app version archive"))
w.WriteHeader(http.StatusInternalServerError)
return
}
kotsKinds, err := kotsutil.LoadKotsKindsFromPath(archiveDir)
if err != nil {
logger.Error(errors.Wrap(err, "failed to load kots kinds from archive"))
w.WriteHeader(http.StatusInternalServerError)
return
}
analyzer := kotsKinds.Analyzer
// SupportBundle overwrites Analyzer if defined
if kotsKinds.SupportBundle != nil {
analyzer = kotsutil.SupportBundleToAnalyzer(kotsKinds.SupportBundle)
}
if analyzer == nil {
analyzer = &troubleshootv1beta2.Analyzer{
TypeMeta: metav1.TypeMeta{
APIVersion: "troubleshoot.sh/v1beta2",
Kind: "Analyzer",
},
ObjectMeta: metav1.ObjectMeta{
Name: "default-analyzers",
},
Spec: troubleshootv1beta2.AnalyzerSpec{
Analyzers: []*troubleshootv1beta2.Analyze{},
},
}
}
if err := supportbundle.InjectDefaultAnalyzers(analyzer); err != nil {
logger.Error(errors.Wrap(err, "failed to inject analyzers"))
w.WriteHeader(http.StatusInternalServerError)
return
}
s := k8sjson.NewYAMLSerializer(k8sjson.DefaultMetaFactory, scheme.Scheme, scheme.Scheme)
var b bytes.Buffer
if err := s.Encode(analyzer, &b); err != nil {
logger.Error(errors.Wrap(err, "failed to encode analyzers"))
w.WriteHeader(http.StatusInternalServerError)
return
}
renderedAnalyzers, err := helper.RenderAppFile(foundApp, nil, b.Bytes(), kotsKinds)
if err != nil {
logger.Error(errors.Wrap(err, "failed to render analyzers"))
w.WriteHeader(http.StatusInternalServerError)
return
}
analyzeResult, err := troubleshootanalyze.DownloadAndAnalyze(tmpFile.Name(), string(renderedAnalyzers))
if err != nil {
logger.Error(errors.Wrap(err, "failed to analyze"))
w.WriteHeader(http.StatusInternalServerError)
return
}
data := convert.FromAnalyzerResult(analyzeResult)
insights, err := json.MarshalIndent(data, "", " ")
if err != nil {
logger.Error(errors.Wrap(err, "failed to marshal result"))
w.WriteHeader(http.StatusInternalServerError)
return
}
if err := store.GetStore().SetSupportBundleAnalysis(supportBundle.ID, insights); err != nil {
logger.Error(errors.Wrap(err, "failed to save result"))
w.WriteHeader(http.StatusInternalServerError)
return
}
w.WriteHeader(http.StatusOK)
}
func (h *Handler) GetSupportBundleRedactions(w http.ResponseWriter, r *http.Request) {
getSupportBundleRedactionsResponse := GetSupportBundleRedactionsResponse{
Success: false,
}
bundleID := mux.Vars(r)["bundleId"]
redactions, err := store.GetStore().GetRedactions(bundleID)
if err != nil {
logger.Error(err)
getSupportBundleRedactionsResponse.Error = fmt.Sprintf("failed to find redactions for bundle %s", bundleID)
JSON(w, http.StatusBadRequest, getSupportBundleRedactionsResponse)
return
}
getSupportBundleRedactionsResponse.Success = true
getSupportBundleRedactionsResponse.Redactions = redactions
JSON(w, http.StatusOK, getSupportBundleRedactionsResponse)
}
// SetSupportBundleRedactions route is UNAUTHENTICATED
// This request comes from the `kubectl support-bundle` command.
func (h *Handler) SetSupportBundleRedactions(w http.ResponseWriter, r *http.Request) {
redactionsBody, err := ioutil.ReadAll(r.Body)
if err != nil {
logger.Error(err)
w.WriteHeader(http.StatusInternalServerError)
return
}
redactions := PutSupportBundleRedactions{}
err = json.Unmarshal(redactionsBody, &redactions)
if err != nil {
logger.Error(err)
w.WriteHeader(http.StatusBadRequest)
return
}
bundleID := mux.Vars(r)["bundleId"]
err = store.GetStore().SetRedactions(bundleID, redactions.Redactions)
if err != nil {
logger.Error(err)
w.WriteHeader(http.StatusInternalServerError)
return
}
w.WriteHeader(http.StatusCreated)
return
}
|
[
"\"POD_NAMESPACE\""
] |
[] |
[
"POD_NAMESPACE"
] |
[]
|
["POD_NAMESPACE"]
|
go
| 1 | 0 | |
router/default_test.go
|
package router
import (
"fmt"
"os"
"sync"
"testing"
"time"
"github.com/itzmanish/go-micro/v2/registry/memory"
)
func routerTestSetup() Router {
r := memory.NewRegistry()
return newRouter(Registry(r))
}
func TestRouterStartStop(t *testing.T) {
r := routerTestSetup()
if err := r.Start(); err != nil {
t.Errorf("failed to start router: %v", err)
}
_, err := r.Advertise()
if err != nil {
t.Errorf("failed to start advertising: %v", err)
}
if err := r.Stop(); err != nil {
t.Errorf("failed to stop router: %v", err)
}
if len(os.Getenv("IN_TRAVIS_CI")) == 0 {
t.Logf("TestRouterStartStop STOPPED")
}
}
func TestRouterAdvertise(t *testing.T) {
r := routerTestSetup()
// lower the advertise interval
AdvertiseEventsTick = 500 * time.Millisecond
if err := r.Start(); err != nil {
t.Errorf("failed to start router: %v", err)
}
ch, err := r.Advertise()
if err != nil {
t.Errorf("failed to start advertising: %v", err)
}
// receive announce event
ann := <-ch
if len(os.Getenv("IN_TRAVIS_CI")) == 0 {
t.Logf("received announce advert: %v", ann)
}
// Generate random unique routes
nrRoutes := 5
routes := make([]Route, nrRoutes)
route := Route{
Service: "dest.svc",
Address: "dest.addr",
Gateway: "dest.gw",
Network: "dest.network",
Router: "src.router",
Link: "local",
Metric: 10,
}
for i := 0; i < nrRoutes; i++ {
testRoute := route
testRoute.Service = fmt.Sprintf("%s-%d", route.Service, i)
routes[i] = testRoute
}
var advertErr error
createDone := make(chan bool)
errChan := make(chan error)
var wg sync.WaitGroup
wg.Add(1)
go func() {
wg.Done()
defer close(createDone)
for _, route := range routes {
if len(os.Getenv("IN_TRAVIS_CI")) == 0 {
t.Logf("Creating route %v", route)
}
if err := r.Table().Create(route); err != nil {
if len(os.Getenv("IN_TRAVIS_CI")) == 0 {
t.Logf("Failed to create route: %v", err)
}
errChan <- err
return
}
}
}()
var adverts int
readDone := make(chan bool)
wg.Add(1)
go func() {
defer func() {
wg.Done()
readDone <- true
}()
for advert := range ch {
select {
case advertErr = <-errChan:
t.Errorf("failed advertising events: %v", advertErr)
default:
// do nothing for now
if len(os.Getenv("IN_TRAVIS_CI")) == 0 {
t.Logf("Router advert received: %v", advert)
}
adverts += len(advert.Events)
}
return
}
}()
// done adding routes to routing table
<-createDone
// done reading adverts from the routing table
<-readDone
if adverts != nrRoutes {
t.Errorf("Expected %d adverts, received: %d", nrRoutes, adverts)
}
wg.Wait()
if err := r.Stop(); err != nil {
t.Errorf("failed to stop router: %v", err)
}
}
|
[
"\"IN_TRAVIS_CI\"",
"\"IN_TRAVIS_CI\"",
"\"IN_TRAVIS_CI\"",
"\"IN_TRAVIS_CI\"",
"\"IN_TRAVIS_CI\""
] |
[] |
[
"IN_TRAVIS_CI"
] |
[]
|
["IN_TRAVIS_CI"]
|
go
| 1 | 0 | |
test/functional/test_framework/test_framework.py
|
#!/usr/bin/env python3
# Copyright (c) 2014-2019 The TNGC Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Base class for RPC testing."""
import configparser
from enum import Enum
import argparse
import logging
import os
import pdb
import random
import re
import shutil
import subprocess
import sys
import tempfile
import time
from .authproxy import JSONRPCException
from . import coverage
from .p2p import NetworkThread
from .test_node import TestNode
from .util import (
MAX_NODES,
PortSeed,
assert_equal,
check_json_precision,
get_datadir_path,
initialize_datadir,
p2p_port,
wait_until_helper,
)
class TestStatus(Enum):
PASSED = 1
FAILED = 2
SKIPPED = 3
TEST_EXIT_PASSED = 0
TEST_EXIT_FAILED = 1
TEST_EXIT_SKIPPED = 77
TMPDIR_PREFIX = "tngc_func_test_"
class SkipTest(Exception):
"""This exception is raised to skip a test"""
def __init__(self, message):
self.message = message
class TNGCTestMetaClass(type):
"""Metaclass for TNGCTestFramework.
Ensures that any attempt to register a subclass of `TNGCTestFramework`
adheres to a standard whereby the subclass overrides `set_test_params` and
`run_test` but DOES NOT override either `__init__` or `main`. If any of
those standards are violated, a ``TypeError`` is raised."""
def __new__(cls, clsname, bases, dct):
if not clsname == 'TNGCTestFramework':
if not ('run_test' in dct and 'set_test_params' in dct):
raise TypeError("TNGCTestFramework subclasses must override "
"'run_test' and 'set_test_params'")
if '__init__' in dct or 'main' in dct:
raise TypeError("TNGCTestFramework subclasses may not override "
"'__init__' or 'main'")
return super().__new__(cls, clsname, bases, dct)
class TNGCTestFramework(metaclass=TNGCTestMetaClass):
"""Base class for a tngc test script.
Individual tngc test scripts should subclass this class and override the set_test_params() and run_test() methods.
Individual tests can also override the following methods to customize the test setup:
- add_options()
- setup_chain()
- setup_network()
- setup_nodes()
The __init__() and main() methods should not be overridden.
This class also contains various public and private helper methods."""
chain = None # type: str
setup_clean_chain = None # type: bool
def __init__(self):
"""Sets test framework defaults. Do not override this method. Instead, override the set_test_params() method"""
self.chain = 'regtest'
self.setup_clean_chain = False
self.nodes = []
self.network_thread = None
self.rpc_timeout = 60 # Wait for up to 60 seconds for the RPC server to respond
self.supports_cli = True
self.bind_to_localhost_only = True
self.parse_args()
self.default_wallet_name = "default_wallet" if self.options.descriptors else ""
self.wallet_data_filename = "wallet.dat"
# Optional list of wallet names that can be set in set_test_params to
# create and import keys to. If unset, default is len(nodes) *
# [default_wallet_name]. If wallet names are None, wallet creation is
# skipped. If list is truncated, wallet creation is skipped and keys
# are not imported.
self.wallet_names = None
self.set_test_params()
assert self.wallet_names is None or len(self.wallet_names) <= self.num_nodes
if self.options.timeout_factor == 0 :
self.options.timeout_factor = 99999
self.rpc_timeout = int(self.rpc_timeout * self.options.timeout_factor) # optionally, increase timeout by a factor
def main(self):
"""Main function. This should not be overridden by the subclass test scripts."""
assert hasattr(self, "num_nodes"), "Test must set self.num_nodes in set_test_params()"
try:
self.setup()
self.run_test()
except JSONRPCException:
self.log.exception("JSONRPC error")
self.success = TestStatus.FAILED
except SkipTest as e:
self.log.warning("Test Skipped: %s" % e.message)
self.success = TestStatus.SKIPPED
except AssertionError:
self.log.exception("Assertion failed")
self.success = TestStatus.FAILED
except KeyError:
self.log.exception("Key error")
self.success = TestStatus.FAILED
except subprocess.CalledProcessError as e:
self.log.exception("Called Process failed with '{}'".format(e.output))
self.success = TestStatus.FAILED
except Exception:
self.log.exception("Unexpected exception caught during testing")
self.success = TestStatus.FAILED
except KeyboardInterrupt:
self.log.warning("Exiting after keyboard interrupt")
self.success = TestStatus.FAILED
finally:
exit_code = self.shutdown()
sys.exit(exit_code)
def parse_args(self):
previous_releases_path = os.getenv("PREVIOUS_RELEASES_DIR") or os.getcwd() + "/releases"
parser = argparse.ArgumentParser(usage="%(prog)s [options]")
parser.add_argument("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave tngcds and test.* datadir on exit or error")
parser.add_argument("--noshutdown", dest="noshutdown", default=False, action="store_true",
help="Don't stop tngcds after the test execution")
parser.add_argument("--cachedir", dest="cachedir", default=os.path.abspath(os.path.dirname(os.path.realpath(__file__)) + "/../../cache"),
help="Directory for caching pregenerated datadirs (default: %(default)s)")
parser.add_argument("--tmpdir", dest="tmpdir", help="Root directory for datadirs")
parser.add_argument("-l", "--loglevel", dest="loglevel", default="INFO",
help="log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console. Note that logs at all levels are always written to the test_framework.log file in the temporary test directory.")
parser.add_argument("--tracerpc", dest="trace_rpc", default=False, action="store_true",
help="Print out all RPC calls as they are made")
parser.add_argument("--portseed", dest="port_seed", default=os.getpid(), type=int,
help="The seed to use for assigning port numbers (default: current process id)")
parser.add_argument("--previous-releases", dest="prev_releases", action="store_true",
default=os.path.isdir(previous_releases_path) and bool(os.listdir(previous_releases_path)),
help="Force test of previous releases (default: %(default)s)")
parser.add_argument("--coveragedir", dest="coveragedir",
help="Write tested RPC commands into this directory")
parser.add_argument("--configfile", dest="configfile",
default=os.path.abspath(os.path.dirname(os.path.realpath(__file__)) + "/../../config.ini"),
help="Location of the test framework config file (default: %(default)s)")
parser.add_argument("--pdbonfailure", dest="pdbonfailure", default=False, action="store_true",
help="Attach a python debugger if test fails")
parser.add_argument("--usecli", dest="usecli", default=False, action="store_true",
help="use tngc-cli instead of RPC for all commands")
parser.add_argument("--perf", dest="perf", default=False, action="store_true",
help="profile running nodes with perf for the duration of the test")
parser.add_argument("--valgrind", dest="valgrind", default=False, action="store_true",
help="run nodes under the valgrind memory error detector: expect at least a ~10x slowdown, valgrind 3.14 or later required")
parser.add_argument("--randomseed", type=int,
help="set a random seed for deterministically reproducing a previous test run")
parser.add_argument('--timeout-factor', dest="timeout_factor", type=float, default=1.0, help='adjust test timeouts by a factor. Setting it to 0 disables all timeouts')
group = parser.add_mutually_exclusive_group()
group.add_argument("--descriptors", default=False, action="store_true",
help="Run test using a descriptor wallet", dest='descriptors')
group.add_argument("--legacy-wallet", default=False, action="store_false",
help="Run test using legacy wallets", dest='descriptors')
self.add_options(parser)
self.options = parser.parse_args()
self.options.previous_releases_path = previous_releases_path
def setup(self):
"""Call this method to start up the test framework object with options set."""
PortSeed.n = self.options.port_seed
check_json_precision()
self.options.cachedir = os.path.abspath(self.options.cachedir)
config = configparser.ConfigParser()
config.read_file(open(self.options.configfile))
self.config = config
fname_tngcd = os.path.join(
config["environment"]["BUILDDIR"],
"src",
"tngcd" + config["environment"]["EXEEXT"],
)
fname_tngccli = os.path.join(
config["environment"]["BUILDDIR"],
"src",
"tngc-cli" + config["environment"]["EXEEXT"],
)
self.options.tngcd = os.getenv("TNGCD", default=fname_tngcd)
self.options.tngccli = os.getenv("TNGCCLI", default=fname_tngccli)
os.environ['PATH'] = os.pathsep.join([
os.path.join(config['environment']['BUILDDIR'], 'src'),
os.path.join(config['environment']['BUILDDIR'], 'src', 'qt'), os.environ['PATH']
])
# Set up temp directory and start logging
if self.options.tmpdir:
self.options.tmpdir = os.path.abspath(self.options.tmpdir)
os.makedirs(self.options.tmpdir, exist_ok=False)
else:
self.options.tmpdir = tempfile.mkdtemp(prefix=TMPDIR_PREFIX)
self._start_logging()
# Seed the PRNG. Note that test runs are reproducible if and only if
# a single thread accesses the PRNG. For more information, see
# https://docs.python.org/3/library/random.html#notes-on-reproducibility.
# The network thread shouldn't access random. If we need to change the
# network thread to access randomness, it should instantiate its own
# random.Random object.
seed = self.options.randomseed
if seed is None:
seed = random.randrange(sys.maxsize)
else:
self.log.debug("User supplied random seed {}".format(seed))
random.seed(seed)
self.log.debug("PRNG seed is: {}".format(seed))
self.log.debug('Setting up network thread')
self.network_thread = NetworkThread()
self.network_thread.start()
if self.options.usecli:
if not self.supports_cli:
raise SkipTest("--usecli specified but test does not support using CLI")
self.skip_if_no_cli()
self.skip_test_if_missing_module()
self.setup_chain()
self.setup_network()
self.success = TestStatus.PASSED
def shutdown(self):
"""Call this method to shut down the test framework object."""
if self.success == TestStatus.FAILED and self.options.pdbonfailure:
print("Testcase failed. Attaching python debugger. Enter ? for help")
pdb.set_trace()
self.log.debug('Closing down network thread')
self.network_thread.close()
if not self.options.noshutdown:
self.log.info("Stopping nodes")
if self.nodes:
self.stop_nodes()
else:
for node in self.nodes:
node.cleanup_on_exit = False
self.log.info("Note: tngcds were not stopped and may still be running")
should_clean_up = (
not self.options.nocleanup and
not self.options.noshutdown and
self.success != TestStatus.FAILED and
not self.options.perf
)
if should_clean_up:
self.log.info("Cleaning up {} on exit".format(self.options.tmpdir))
cleanup_tree_on_exit = True
elif self.options.perf:
self.log.warning("Not cleaning up dir {} due to perf data".format(self.options.tmpdir))
cleanup_tree_on_exit = False
else:
self.log.warning("Not cleaning up dir {}".format(self.options.tmpdir))
cleanup_tree_on_exit = False
if self.success == TestStatus.PASSED:
self.log.info("Tests successful")
exit_code = TEST_EXIT_PASSED
elif self.success == TestStatus.SKIPPED:
self.log.info("Test skipped")
exit_code = TEST_EXIT_SKIPPED
else:
self.log.error("Test failed. Test logging available at %s/test_framework.log", self.options.tmpdir)
self.log.error("")
self.log.error("Hint: Call {} '{}' to consolidate all logs".format(os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../combine_logs.py"), self.options.tmpdir))
self.log.error("")
self.log.error("If this failure happened unexpectedly or intermittently, please file a bug and provide a link or upload of the combined log.")
self.log.error(self.config['environment']['PACKAGE_BUGREPORT'])
self.log.error("")
exit_code = TEST_EXIT_FAILED
# Logging.shutdown will not remove stream- and filehandlers, so we must
# do it explicitly. Handlers are removed so the next test run can apply
# different log handler settings.
# See: https://docs.python.org/3/library/logging.html#logging.shutdown
for h in list(self.log.handlers):
h.flush()
h.close()
self.log.removeHandler(h)
rpc_logger = logging.getLogger("TNGCRPC")
for h in list(rpc_logger.handlers):
h.flush()
rpc_logger.removeHandler(h)
if cleanup_tree_on_exit:
shutil.rmtree(self.options.tmpdir)
self.nodes.clear()
return exit_code
# Methods to override in subclass test scripts.
def set_test_params(self):
"""Tests must this method to change default values for number of nodes, topology, etc"""
raise NotImplementedError
def add_options(self, parser):
"""Override this method to add command-line options to the test"""
pass
def skip_test_if_missing_module(self):
"""Override this method to skip a test if a module is not compiled"""
pass
def setup_chain(self):
"""Override this method to customize blockchain setup"""
self.log.info("Initializing test directory " + self.options.tmpdir)
if self.setup_clean_chain:
self._initialize_chain_clean()
else:
self._initialize_chain()
def setup_network(self):
"""Override this method to customize test network topology"""
self.setup_nodes()
# Connect the nodes as a "chain". This allows us
# to split the network between nodes 1 and 2 to get
# two halves that can work on competing chains.
#
# Topology looks like this:
# node0 <-- node1 <-- node2 <-- node3
#
# If all nodes are in IBD (clean chain from genesis), node0 is assumed to be the source of blocks (miner). To
# ensure block propagation, all nodes will establish outgoing connections toward node0.
# See fPreferredDownload in net_processing.
#
# If further outbound connections are needed, they can be added at the beginning of the test with e.g.
# self.connect_nodes(1, 2)
for i in range(self.num_nodes - 1):
self.connect_nodes(i + 1, i)
self.sync_all()
def setup_nodes(self):
"""Override this method to customize test node setup"""
extra_args = [[]] * self.num_nodes
if hasattr(self, "extra_args"):
extra_args = self.extra_args
self.add_nodes(self.num_nodes, extra_args)
self.start_nodes()
if self.is_wallet_compiled():
self.import_deterministic_coinbase_privkeys()
if not self.setup_clean_chain:
for n in self.nodes:
assert_equal(n.getblockchaininfo()["blocks"], 199)
# To ensure that all nodes are out of IBD, the most recent block
# must have a timestamp not too old (see IsInitialBlockDownload()).
self.log.debug('Generate a block with current time')
block_hash = self.nodes[0].generate(1)[0]
block = self.nodes[0].getblock(blockhash=block_hash, verbosity=0)
for n in self.nodes:
n.submitblock(block)
chain_info = n.getblockchaininfo()
assert_equal(chain_info["blocks"], 200)
assert_equal(chain_info["initialblockdownload"], False)
def import_deterministic_coinbase_privkeys(self):
for i in range(self.num_nodes):
self.init_wallet(i)
def init_wallet(self, i):
wallet_name = self.default_wallet_name if self.wallet_names is None else self.wallet_names[i] if i < len(self.wallet_names) else False
if wallet_name is not False:
n = self.nodes[i]
if wallet_name is not None:
n.createwallet(wallet_name=wallet_name, descriptors=self.options.descriptors, load_on_startup=True)
n.importprivkey(privkey=n.get_deterministic_priv_key().key, label='coinbase')
def run_test(self):
"""Tests must override this method to define test logic"""
raise NotImplementedError
# Public helper methods. These can be accessed by the subclass test scripts.
def add_nodes(self, num_nodes: int, extra_args=None, *, rpchost=None, binary=None, binary_cli=None, versions=None):
"""Instantiate TestNode objects.
Should only be called once after the nodes have been specified in
set_test_params()."""
def get_bin_from_version(version, bin_name, bin_default):
if not version:
return bin_default
return os.path.join(
self.options.previous_releases_path,
re.sub(
r'\.0$',
'', # remove trailing .0 for point releases
'v{}.{}.{}.{}'.format(
(version % 100000000) // 1000000,
(version % 1000000) // 10000,
(version % 10000) // 100,
(version % 100) // 1,
),
),
'bin',
bin_name,
)
if self.bind_to_localhost_only:
extra_confs = [["bind=127.0.0.1"]] * num_nodes
else:
extra_confs = [[]] * num_nodes
if extra_args is None:
extra_args = [[]] * num_nodes
if versions is None:
versions = [None] * num_nodes
if binary is None:
binary = [get_bin_from_version(v, 'tngcd', self.options.tngcd) for v in versions]
if binary_cli is None:
binary_cli = [get_bin_from_version(v, 'tngc-cli', self.options.tngccli) for v in versions]
assert_equal(len(extra_confs), num_nodes)
assert_equal(len(extra_args), num_nodes)
assert_equal(len(versions), num_nodes)
assert_equal(len(binary), num_nodes)
assert_equal(len(binary_cli), num_nodes)
for i in range(num_nodes):
test_node_i = TestNode(
i,
get_datadir_path(self.options.tmpdir, i),
chain=self.chain,
rpchost=rpchost,
timewait=self.rpc_timeout,
timeout_factor=self.options.timeout_factor,
tngcd=binary[i],
tngc_cli=binary_cli[i],
version=versions[i],
coverage_dir=self.options.coveragedir,
cwd=self.options.tmpdir,
extra_conf=extra_confs[i],
extra_args=extra_args[i],
use_cli=self.options.usecli,
start_perf=self.options.perf,
use_valgrind=self.options.valgrind,
descriptors=self.options.descriptors,
)
self.nodes.append(test_node_i)
if not test_node_i.version_is_at_least(170000):
# adjust conf for pre 17
conf_file = test_node_i.tngcconf
with open(conf_file, 'r', encoding='utf8') as conf:
conf_data = conf.read()
with open(conf_file, 'w', encoding='utf8') as conf:
conf.write(conf_data.replace('[regtest]', ''))
def start_node(self, i, *args, **kwargs):
"""Start a tngcd"""
node = self.nodes[i]
node.start(*args, **kwargs)
node.wait_for_rpc_connection()
if self.options.coveragedir is not None:
coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
def start_nodes(self, extra_args=None, *args, **kwargs):
"""Start multiple tngcds"""
if extra_args is None:
extra_args = [None] * self.num_nodes
assert_equal(len(extra_args), self.num_nodes)
try:
for i, node in enumerate(self.nodes):
node.start(extra_args[i], *args, **kwargs)
for node in self.nodes:
node.wait_for_rpc_connection()
except:
# If one node failed to start, stop the others
self.stop_nodes()
raise
if self.options.coveragedir is not None:
for node in self.nodes:
coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
def stop_node(self, i, expected_stderr='', wait=0):
"""Stop a tngcd test node"""
self.nodes[i].stop_node(expected_stderr, wait=wait)
self.nodes[i].wait_until_stopped()
def stop_nodes(self, wait=0):
"""Stop multiple tngcd test nodes"""
for node in self.nodes:
# Issue RPC to stop nodes
node.stop_node(wait=wait)
for node in self.nodes:
# Wait for nodes to stop
node.wait_until_stopped()
def restart_node(self, i, extra_args=None):
"""Stop and start a test node"""
self.stop_node(i)
self.start_node(i, extra_args)
def wait_for_node_exit(self, i, timeout):
self.nodes[i].process.wait(timeout)
def connect_nodes(self, a, b):
def connect_nodes_helper(from_connection, node_num):
ip_port = "127.0.0.1:" + str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
# poll until version handshake complete to avoid race conditions
# with transaction relaying
# See comments in net_processing:
# * Must have a version message before anything else
# * Must have a verack message before anything else
wait_until_helper(lambda: all(peer['version'] != 0 for peer in from_connection.getpeerinfo()))
wait_until_helper(lambda: all(peer['bytesrecv_per_msg'].pop('verack', 0) == 24 for peer in from_connection.getpeerinfo()))
connect_nodes_helper(self.nodes[a], b)
def disconnect_nodes(self, a, b):
def disconnect_nodes_helper(from_connection, node_num):
def get_peer_ids():
result = []
for peer in from_connection.getpeerinfo():
if "testnode{}".format(node_num) in peer['subver']:
result.append(peer['id'])
return result
peer_ids = get_peer_ids()
if not peer_ids:
self.log.warning("disconnect_nodes: {} and {} were not connected".format(
from_connection.index,
node_num,
))
return
for peer_id in peer_ids:
try:
from_connection.disconnectnode(nodeid=peer_id)
except JSONRPCException as e:
# If this node is disconnected between calculating the peer id
# and issuing the disconnect, don't worry about it.
# This avoids a race condition if we're mass-disconnecting peers.
if e.error['code'] != -29: # RPC_CLIENT_NODE_NOT_CONNECTED
raise
# wait to disconnect
wait_until_helper(lambda: not get_peer_ids(), timeout=5)
disconnect_nodes_helper(self.nodes[a], b)
def split_network(self):
"""
Split the network of four nodes into nodes 0/1 and 2/3.
"""
self.disconnect_nodes(1, 2)
self.sync_all(self.nodes[:2])
self.sync_all(self.nodes[2:])
def join_network(self):
"""
Join the (previously split) network halves together.
"""
self.connect_nodes(1, 2)
self.sync_all()
def sync_blocks(self, nodes=None, wait=1, timeout=60):
"""
Wait until everybody has the same tip.
sync_blocks needs to be called with an rpc_connections set that has least
one node already synced to the latest, stable tip, otherwise there's a
chance it might return before all nodes are stably synced.
"""
rpc_connections = nodes or self.nodes
timeout = int(timeout * self.options.timeout_factor)
stop_time = time.time() + timeout
while time.time() <= stop_time:
best_hash = [x.getbestblockhash() for x in rpc_connections]
if best_hash.count(best_hash[0]) == len(rpc_connections):
return
# Check that each peer has at least one connection
assert (all([len(x.getpeerinfo()) for x in rpc_connections]))
time.sleep(wait)
raise AssertionError("Block sync timed out after {}s:{}".format(
timeout,
"".join("\n {!r}".format(b) for b in best_hash),
))
def sync_mempools(self, nodes=None, wait=1, timeout=60, flush_scheduler=True):
"""
Wait until everybody has the same transactions in their memory
pools
"""
rpc_connections = nodes or self.nodes
timeout = int(timeout * self.options.timeout_factor)
stop_time = time.time() + timeout
while time.time() <= stop_time:
pool = [set(r.getrawmempool()) for r in rpc_connections]
if pool.count(pool[0]) == len(rpc_connections):
if flush_scheduler:
for r in rpc_connections:
r.syncwithvalidationinterfacequeue()
return
# Check that each peer has at least one connection
assert (all([len(x.getpeerinfo()) for x in rpc_connections]))
time.sleep(wait)
raise AssertionError("Mempool sync timed out after {}s:{}".format(
timeout,
"".join("\n {!r}".format(m) for m in pool),
))
def sync_all(self, nodes=None):
self.sync_blocks(nodes)
self.sync_mempools(nodes)
def wait_until(self, test_function, timeout=60):
return wait_until_helper(test_function, timeout=timeout, timeout_factor=self.options.timeout_factor)
# Private helper methods. These should not be accessed by the subclass test scripts.
def _start_logging(self):
# Add logger and logging handlers
self.log = logging.getLogger('TestFramework')
self.log.setLevel(logging.DEBUG)
# Create file handler to log all messages
fh = logging.FileHandler(self.options.tmpdir + '/test_framework.log', encoding='utf-8')
fh.setLevel(logging.DEBUG)
# Create console handler to log messages to stderr. By default this logs only error messages, but can be configured with --loglevel.
ch = logging.StreamHandler(sys.stdout)
# User can provide log level as a number or string (eg DEBUG). loglevel was caught as a string, so try to convert it to an int
ll = int(self.options.loglevel) if self.options.loglevel.isdigit() else self.options.loglevel.upper()
ch.setLevel(ll)
# Format logs the same as tngcd's debug.log with microprecision (so log files can be concatenated and sorted)
formatter = logging.Formatter(fmt='%(asctime)s.%(msecs)03d000Z %(name)s (%(levelname)s): %(message)s', datefmt='%Y-%m-%dT%H:%M:%S')
formatter.converter = time.gmtime
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
self.log.addHandler(fh)
self.log.addHandler(ch)
if self.options.trace_rpc:
rpc_logger = logging.getLogger("TNGCRPC")
rpc_logger.setLevel(logging.DEBUG)
rpc_handler = logging.StreamHandler(sys.stdout)
rpc_handler.setLevel(logging.DEBUG)
rpc_logger.addHandler(rpc_handler)
def _initialize_chain(self):
"""Initialize a pre-mined blockchain for use by the test.
Create a cache of a 199-block-long chain
Afterward, create num_nodes copies from the cache."""
CACHE_NODE_ID = 0 # Use node 0 to create the cache for all other nodes
cache_node_dir = get_datadir_path(self.options.cachedir, CACHE_NODE_ID)
assert self.num_nodes <= MAX_NODES
if not os.path.isdir(cache_node_dir):
self.log.debug("Creating cache directory {}".format(cache_node_dir))
initialize_datadir(self.options.cachedir, CACHE_NODE_ID, self.chain)
self.nodes.append(
TestNode(
CACHE_NODE_ID,
cache_node_dir,
chain=self.chain,
extra_conf=["bind=127.0.0.1"],
extra_args=['-disablewallet'],
rpchost=None,
timewait=self.rpc_timeout,
timeout_factor=self.options.timeout_factor,
tngcd=self.options.tngcd,
tngc_cli=self.options.tngccli,
coverage_dir=None,
cwd=self.options.tmpdir,
descriptors=self.options.descriptors,
))
self.start_node(CACHE_NODE_ID)
cache_node = self.nodes[CACHE_NODE_ID]
# Wait for RPC connections to be ready
cache_node.wait_for_rpc_connection()
# Set a time in the past, so that blocks don't end up in the future
cache_node.setmocktime(cache_node.getblockheader(cache_node.getbestblockhash())['time'])
# Create a 199-block-long chain; each of the 4 first nodes
# gets 25 mature blocks and 25 immature.
# The 4th node gets only 24 immature blocks so that the very last
# block in the cache does not age too much (have an old tip age).
# This is needed so that we are out of IBD when the test starts,
# see the tip age check in IsInitialBlockDownload().
for i in range(8):
cache_node.generatetoaddress(
nblocks=25 if i != 7 else 24,
address=TestNode.PRIV_KEYS[i % 4].address,
)
assert_equal(cache_node.getblockchaininfo()["blocks"], 199)
# Shut it down, and clean up cache directories:
self.stop_nodes()
self.nodes = []
def cache_path(*paths):
return os.path.join(cache_node_dir, self.chain, *paths)
os.rmdir(cache_path('wallets')) # Remove empty wallets dir
for entry in os.listdir(cache_path()):
if entry not in ['chainstate', 'blocks']: # Only keep chainstate and blocks folder
os.remove(cache_path(entry))
for i in range(self.num_nodes):
self.log.debug("Copy cache directory {} to node {}".format(cache_node_dir, i))
to_dir = get_datadir_path(self.options.tmpdir, i)
shutil.copytree(cache_node_dir, to_dir)
initialize_datadir(self.options.tmpdir, i, self.chain) # Overwrite port/rpcport in tngc.conf
def _initialize_chain_clean(self):
"""Initialize empty blockchain for use by the test.
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization."""
for i in range(self.num_nodes):
initialize_datadir(self.options.tmpdir, i, self.chain)
def skip_if_no_py3_zmq(self):
"""Attempt to import the zmq package and skip the test if the import fails."""
try:
import zmq # noqa
except ImportError:
raise SkipTest("python3-zmq module not available.")
def skip_if_no_tngcd_zmq(self):
"""Skip the running test if tngcd has not been compiled with zmq support."""
if not self.is_zmq_compiled():
raise SkipTest("tngcd has not been built with zmq enabled.")
def skip_if_no_wallet(self):
"""Skip the running test if wallet has not been compiled."""
if not self.is_wallet_compiled():
raise SkipTest("wallet has not been compiled.")
if self.options.descriptors:
self.skip_if_no_sqlite()
def skip_if_no_sqlite(self):
"""Skip the running test if sqlite has not been compiled."""
if not self.is_sqlite_compiled():
raise SkipTest("sqlite has not been compiled.")
def skip_if_no_wallet_tool(self):
"""Skip the running test if tngc-wallet has not been compiled."""
if not self.is_wallet_tool_compiled():
raise SkipTest("tngc-wallet has not been compiled")
def skip_if_no_cli(self):
"""Skip the running test if tngc-cli has not been compiled."""
if not self.is_cli_compiled():
raise SkipTest("tngc-cli has not been compiled.")
def skip_if_no_previous_releases(self):
"""Skip the running test if previous releases are not available."""
if not self.has_previous_releases():
raise SkipTest("previous releases not available or disabled")
def has_previous_releases(self):
"""Checks whether previous releases are present and enabled."""
if not os.path.isdir(self.options.previous_releases_path):
if self.options.prev_releases:
raise AssertionError("Force test of previous releases but releases missing: {}".format(
self.options.previous_releases_path))
return self.options.prev_releases
def is_cli_compiled(self):
"""Checks whether tngc-cli was compiled."""
return self.config["components"].getboolean("ENABLE_CLI")
def is_wallet_compiled(self):
"""Checks whether the wallet module was compiled."""
return self.config["components"].getboolean("ENABLE_WALLET")
def is_wallet_tool_compiled(self):
"""Checks whether tngc-wallet was compiled."""
return self.config["components"].getboolean("ENABLE_WALLET_TOOL")
def is_zmq_compiled(self):
"""Checks whether the zmq module was compiled."""
return self.config["components"].getboolean("ENABLE_ZMQ")
def is_sqlite_compiled(self):
"""Checks whether the wallet module was compiled."""
return self.config["components"].getboolean("USE_SQLITE")
|
[] |
[] |
[
"PREVIOUS_RELEASES_DIR",
"PATH",
"TNGCD",
"TNGCCLI"
] |
[]
|
["PREVIOUS_RELEASES_DIR", "PATH", "TNGCD", "TNGCCLI"]
|
python
| 4 | 0 | |
registry/kubernetes/client/client.go
|
package client
import (
"crypto/tls"
"errors"
"io/ioutil"
"net/http"
"os"
"path"
log "github.com/yadisnel/go-ms/v2/logger"
"github.com/yadisnel/go-ms/v2plugins/registry/kubernetes/v2/client/api"
"github.com/yadisnel/go-ms/v2plugins/registry/kubernetes/v2/client/watch"
)
var (
serviceAccountPath = "/var/run/secrets/kubernetes.io/serviceaccount"
ErrReadNamespace = errors.New("Could not read namespace from service account secret")
)
// Client ...
type client struct {
opts *api.Options
}
// ListPods ...
func (c *client) ListPods(labels map[string]string) (*PodList, error) {
var pods PodList
err := api.NewRequest(c.opts).Get().Resource("pods").Params(&api.Params{LabelSelector: labels}).Do().Into(&pods)
return &pods, err
}
// UpdatePod ...
func (c *client) UpdatePod(name string, p *Pod) (*Pod, error) {
var pod Pod
err := api.NewRequest(c.opts).Patch().Resource("pods").Name(name).Body(p).Do().Into(&pod)
return &pod, err
}
// WatchPods ...
func (c *client) WatchPods(labels map[string]string) (watch.Watch, error) {
return api.NewRequest(c.opts).Get().Resource("pods").Params(&api.Params{LabelSelector: labels}).Watch()
}
func detectNamespace() (string, error) {
nsPath := path.Join(serviceAccountPath, "namespace")
// Make sure it's a file and we can read it
if s, e := os.Stat(nsPath); e != nil {
return "", e
} else if s.IsDir() {
return "", ErrReadNamespace
}
// Read the file, and cast to a string
if ns, e := ioutil.ReadFile(nsPath); e != nil {
return string(ns), e
} else {
return string(ns), nil
}
}
// NewClientByHost sets up a client by host
func NewClientByHost(host string) Kubernetes {
tr := &http.Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: true,
},
DisableCompression: true,
}
c := &http.Client{
Transport: tr,
}
return &client{
opts: &api.Options{
Client: c,
Host: host,
Namespace: "default",
},
}
}
// NewClientInCluster should work similarily to the official api
// NewInClient by setting up a client configuration for use within
// a k8s pod.
func NewClientInCluster() Kubernetes {
host := "https://" + os.Getenv("KUBERNETES_SERVICE_HOST") + ":" + os.Getenv("KUBERNETES_SERVICE_PORT")
s, err := os.Stat(serviceAccountPath)
if err != nil {
log.Fatal(err)
}
if s == nil || !s.IsDir() {
log.Fatal(errors.New("no k8s service account found"))
}
token, err := ioutil.ReadFile(path.Join(serviceAccountPath, "token"))
if err != nil {
log.Fatal(err)
}
t := string(token)
ns, err := detectNamespace()
if err != nil {
log.Fatal(err)
}
crt, err := CertPoolFromFile(path.Join(serviceAccountPath, "ca.crt"))
if err != nil {
log.Fatal(err)
}
c := &http.Client{
Transport: &http.Transport{
TLSClientConfig: &tls.Config{
RootCAs: crt,
},
DisableCompression: true,
},
}
return &client{
opts: &api.Options{
Client: c,
Host: host,
Namespace: ns,
BearerToken: &t,
},
}
}
|
[
"\"KUBERNETES_SERVICE_HOST\"",
"\"KUBERNETES_SERVICE_PORT\""
] |
[] |
[
"KUBERNETES_SERVICE_HOST",
"KUBERNETES_SERVICE_PORT"
] |
[]
|
["KUBERNETES_SERVICE_HOST", "KUBERNETES_SERVICE_PORT"]
|
go
| 2 | 0 | |
qa/rpc-tests/p2p-acceptblock.py
|
#!/usr/bin/env python3
# Copyright (c) 2015-2016 The Gtacoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.mininode import *
from test_framework.test_framework import GtacoinTestFramework
from test_framework.util import *
import time
from test_framework.blocktools import create_block, create_coinbase
'''
AcceptBlockTest -- test processing of unrequested blocks.
Since behavior differs when receiving unrequested blocks from whitelisted peers
versus non-whitelisted peers, this tests the behavior of both (effectively two
separate tests running in parallel).
Setup: two nodes, node0 and node1, not connected to each other. Node0 does not
whitelist localhost, but node1 does. They will each be on their own chain for
this test.
We have one NodeConn connection to each, test_node and white_node respectively.
The test:
1. Generate one block on each node, to leave IBD.
2. Mine a new block on each tip, and deliver to each node from node's peer.
The tip should advance.
3. Mine a block that forks the previous block, and deliver to each node from
corresponding peer.
Node0 should not process this block (just accept the header), because it is
unrequested and doesn't have more work than the tip.
Node1 should process because this is coming from a whitelisted peer.
4. Send another block that builds on the forking block.
Node0 should process this block but be stuck on the shorter chain, because
it's missing an intermediate block.
Node1 should reorg to this longer chain.
4b.Send 288 more blocks on the longer chain.
Node0 should process all but the last block (too far ahead in height).
Send all headers to Node1, and then send the last block in that chain.
Node1 should accept the block because it's coming from a whitelisted peer.
5. Send a duplicate of the block in #3 to Node0.
Node0 should not process the block because it is unrequested, and stay on
the shorter chain.
6. Send Node0 an inv for the height 3 block produced in #4 above.
Node0 should figure out that Node0 has the missing height 2 block and send a
getdata.
7. Send Node0 the missing block again.
Node0 should process and the tip should advance.
'''
# TestNode: bare-bones "peer". Used mostly as a conduit for a test to sending
# p2p messages to a node, generating the messages in the main testing logic.
class TestNode(NodeConnCB):
def __init__(self):
NodeConnCB.__init__(self)
self.connection = None
self.ping_counter = 1
self.last_pong = msg_pong()
def add_connection(self, conn):
self.connection = conn
# Track the last getdata message we receive (used in the test)
def on_getdata(self, conn, message):
self.last_getdata = message
# Spin until verack message is received from the node.
# We use this to signal that our test can begin. This
# is called from the testing thread, so it needs to acquire
# the global lock.
def wait_for_verack(self):
while True:
with mininode_lock:
if self.verack_received:
return
time.sleep(0.05)
# Wrapper for the NodeConn's send_message function
def send_message(self, message):
self.connection.send_message(message)
def on_pong(self, conn, message):
self.last_pong = message
# Sync up with the node after delivery of a block
def sync_with_ping(self, timeout=30):
self.connection.send_message(msg_ping(nonce=self.ping_counter))
received_pong = False
sleep_time = 0.05
while not received_pong and timeout > 0:
time.sleep(sleep_time)
timeout -= sleep_time
with mininode_lock:
if self.last_pong.nonce == self.ping_counter:
received_pong = True
self.ping_counter += 1
return received_pong
class AcceptBlockTest(GtacoinTestFramework):
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("GTACOIND", "gtacoind"),
help="gtacoind binary to test")
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 2
def setup_network(self):
# Node0 will be used to test behavior of processing unrequested blocks
# from peers which are not whitelisted, while Node1 will be used for
# the whitelisted case.
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug"],
binary=self.options.testbinary))
self.nodes.append(start_node(1, self.options.tmpdir,
["-debug", "-whitelist=127.0.0.1"],
binary=self.options.testbinary))
def run_test(self):
# Setup the p2p connections and start up the network thread.
test_node = TestNode() # connects to node0 (not whitelisted)
white_node = TestNode() # connects to node1 (whitelisted)
connections = []
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node))
connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], white_node))
test_node.add_connection(connections[0])
white_node.add_connection(connections[1])
NetworkThread().start() # Start up network handling in another thread
# Test logic begins here
test_node.wait_for_verack()
white_node.wait_for_verack()
# 1. Have both nodes mine a block (leave IBD)
[ n.generate(1) for n in self.nodes ]
tips = [ int("0x" + n.getbestblockhash(), 0) for n in self.nodes ]
# 2. Send one block that builds on each tip.
# This should be accepted.
blocks_h2 = [] # the height 2 blocks on each node's chain
block_time = int(time.time()) + 1
for i in range(2):
blocks_h2.append(create_block(tips[i], create_coinbase(2), block_time))
blocks_h2[i].solve()
block_time += 1
test_node.send_message(msg_block(blocks_h2[0]))
white_node.send_message(msg_block(blocks_h2[1]))
[ x.sync_with_ping() for x in [test_node, white_node] ]
assert_equal(self.nodes[0].getblockcount(), 2)
assert_equal(self.nodes[1].getblockcount(), 2)
print("First height 2 block accepted by both nodes")
# 3. Send another block that builds on the original tip.
blocks_h2f = [] # Blocks at height 2 that fork off the main chain
for i in range(2):
blocks_h2f.append(create_block(tips[i], create_coinbase(2), blocks_h2[i].nTime+1))
blocks_h2f[i].solve()
test_node.send_message(msg_block(blocks_h2f[0]))
white_node.send_message(msg_block(blocks_h2f[1]))
[ x.sync_with_ping() for x in [test_node, white_node] ]
for x in self.nodes[0].getchaintips():
if x['hash'] == blocks_h2f[0].hash:
assert_equal(x['status'], "headers-only")
for x in self.nodes[1].getchaintips():
if x['hash'] == blocks_h2f[1].hash:
assert_equal(x['status'], "valid-headers")
print("Second height 2 block accepted only from whitelisted peer")
# 4. Now send another block that builds on the forking chain.
blocks_h3 = []
for i in range(2):
blocks_h3.append(create_block(blocks_h2f[i].sha256, create_coinbase(3), blocks_h2f[i].nTime+1))
blocks_h3[i].solve()
test_node.send_message(msg_block(blocks_h3[0]))
white_node.send_message(msg_block(blocks_h3[1]))
[ x.sync_with_ping() for x in [test_node, white_node] ]
# Since the earlier block was not processed by node0, the new block
# can't be fully validated.
for x in self.nodes[0].getchaintips():
if x['hash'] == blocks_h3[0].hash:
assert_equal(x['status'], "headers-only")
# But this block should be accepted by node0 since it has more work.
try:
self.nodes[0].getblock(blocks_h3[0].hash)
print("Unrequested more-work block accepted from non-whitelisted peer")
except:
raise AssertionError("Unrequested more work block was not processed")
# Node1 should have accepted and reorged.
assert_equal(self.nodes[1].getblockcount(), 3)
print("Successfully reorged to length 3 chain from whitelisted peer")
# 4b. Now mine 288 more blocks and deliver; all should be processed but
# the last (height-too-high) on node0. Node1 should process the tip if
# we give it the headers chain leading to the tip.
tips = blocks_h3
headers_message = msg_headers()
all_blocks = [] # node0's blocks
for j in range(2):
for i in range(288):
next_block = create_block(tips[j].sha256, create_coinbase(i + 4), tips[j].nTime+1)
next_block.solve()
if j==0:
test_node.send_message(msg_block(next_block))
all_blocks.append(next_block)
else:
headers_message.headers.append(CBlockHeader(next_block))
tips[j] = next_block
time.sleep(2)
for x in all_blocks:
try:
self.nodes[0].getblock(x.hash)
if x == all_blocks[287]:
raise AssertionError("Unrequested block too far-ahead should have been ignored")
except:
if x == all_blocks[287]:
print("Unrequested block too far-ahead not processed")
else:
raise AssertionError("Unrequested block with more work should have been accepted")
headers_message.headers.pop() # Ensure the last block is unrequested
white_node.send_message(headers_message) # Send headers leading to tip
white_node.send_message(msg_block(tips[1])) # Now deliver the tip
try:
white_node.sync_with_ping()
self.nodes[1].getblock(tips[1].hash)
print("Unrequested block far ahead of tip accepted from whitelisted peer")
except:
raise AssertionError("Unrequested block from whitelisted peer not accepted")
# 5. Test handling of unrequested block on the node that didn't process
# Should still not be processed (even though it has a child that has more
# work).
test_node.send_message(msg_block(blocks_h2f[0]))
# Here, if the sleep is too short, the test could falsely succeed (if the
# node hasn't processed the block by the time the sleep returns, and then
# the node processes it and incorrectly advances the tip).
# But this would be caught later on, when we verify that an inv triggers
# a getdata request for this block.
test_node.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 2)
print("Unrequested block that would complete more-work chain was ignored")
# 6. Try to get node to request the missing block.
# Poke the node with an inv for block at height 3 and see if that
# triggers a getdata on block 2 (it should if block 2 is missing).
with mininode_lock:
# Clear state so we can check the getdata request
test_node.last_getdata = None
test_node.send_message(msg_inv([CInv(2, blocks_h3[0].sha256)]))
test_node.sync_with_ping()
with mininode_lock:
getdata = test_node.last_getdata
# Check that the getdata includes the right block
assert_equal(getdata.inv[0].hash, blocks_h2f[0].sha256)
print("Inv at tip triggered getdata for unprocessed block")
# 7. Send the missing block for the third time (now it is requested)
test_node.send_message(msg_block(blocks_h2f[0]))
test_node.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 290)
print("Successfully reorged to longer chain from non-whitelisted peer")
[ c.disconnect_node() for c in connections ]
if __name__ == '__main__':
AcceptBlockTest().main()
|
[] |
[] |
[
"GTACOIND"
] |
[]
|
["GTACOIND"]
|
python
| 1 | 0 | |
app/config/settings.py
|
"""
Django settings for config project.
Generated by 'django-admin startproject' using Django 4.0.
For more information on this file, see
https://docs.djangoproject.com/en/4.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/4.0/ref/settings/
"""
import os
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/4.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-j)hs#zuh0(-09=oek_vzk)-3jjgfp#d9ks6b2x+0ssj%!i-nrl'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
DEFAULT_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles'
]
CUSTOM_APPS = [
"core"
]
THIRD_PARTY_APPS = []
INSTALLED_APPS = [*DEFAULT_APPS, *CUSTOM_APPS, *THIRD_PARTY_APPS]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'config.urls'
AUTH_USER_MODEL = 'core.Account' # Implementing Custom User Model
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'config.wsgi.application'
# Database
# https://docs.djangoproject.com/en/4.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'HOST':os.environ.get('DB_HOST'),
'NAME': os.environ.get('DB_NAME'),
'USER': os.environ.get('DB_USER'),
'PASSWORD':os.environ.get('DB_PASS')
}
}
# Password validation
# https://docs.djangoproject.com/en/4.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/4.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/4.0/howto/static-files/
STATIC_URL = 'static/'
# Default primary key field type
# https://docs.djangoproject.com/en/4.0/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
|
[] |
[] |
[
"DB_PASS",
"DB_USER",
"DB_NAME",
"DB_HOST"
] |
[]
|
["DB_PASS", "DB_USER", "DB_NAME", "DB_HOST"]
|
python
| 4 | 0 | |
pitchdeck/wsgi.py
|
"""
WSGI config for pitchdeck project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "pitchdeck.settings")
os.environ.setdefault("DJANGO_CONFIGURATION", "Dev")
from configurations.wsgi import get_wsgi_application # noqa
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
nexmo/__init__.py
|
from platform import python_version
import hashlib
import hmac
import jwt
import os
import requests
import sys
import time
from uuid import uuid4
import warnings
if sys.version_info[0] == 3:
string_types = (str, bytes)
else:
string_types = (unicode, str)
__version__ = '2.0.0'
class Error(Exception):
pass
class ClientError(Error):
pass
class ServerError(Error):
pass
class AuthenticationError(ClientError):
pass
class Client():
def __init__(self, **kwargs):
self.api_key = kwargs.get('key', None) or os.environ.get('NEXMO_API_KEY', None)
self.api_secret = kwargs.get('secret', None) or os.environ.get('NEXMO_API_SECRET', None)
self.signature_secret = kwargs.get('signature_secret', None) or os.environ.get('NEXMO_SIGNATURE_SECRET', None)
self.signature_method = kwargs.get('signature_method', None) or os.environ.get('NEXMO_SIGNATURE_METHOD', None)
if self.signature_method == 'md5':
self.signature_method = hashlib.md5
elif self.signature_method == 'sha1':
self.signature_method = hashlib.sha1
elif self.signature_method == 'sha256':
self.signature_method = hashlib.sha256
elif self.signature_method == 'sha512':
self.signature_method = hashlib.sha512
self.application_id = kwargs.get('application_id', None)
self.private_key = kwargs.get('private_key', None)
if isinstance(self.private_key, string_types) and '\n' not in self.private_key:
with open(self.private_key, 'rb') as key_file:
self.private_key = key_file.read()
self.host = 'rest.nexmo.com'
self.api_host = 'api.nexmo.com'
user_agent = 'nexmo-python/{0}/{1}'.format(__version__, python_version())
if 'app_name' in kwargs and 'app_version' in kwargs:
user_agent += '/{0}/{1}'.format(kwargs['app_name'], kwargs['app_version'])
self.headers = {'User-Agent': user_agent}
self.auth_params = {}
def auth(self, params=None, **kwargs):
self.auth_params = params or kwargs
def send_message(self, params):
return self.post(self.host, '/sms/json', params)
def get_balance(self):
return self.get(self.host, '/account/get-balance')
def get_country_pricing(self, country_code):
return self.get(self.host, '/account/get-pricing/outbound', {'country': country_code})
def get_prefix_pricing(self, prefix):
return self.get(self.host, '/account/get-prefix-pricing/outbound', {'prefix': prefix})
def get_sms_pricing(self, number):
return self.get(self.host, '/account/get-phone-pricing/outbound/sms', {'phone': number})
def get_voice_pricing(self, number):
return self.get(self.host, '/account/get-phone-pricing/outbound/voice', {'phone': number})
def update_settings(self, params=None, **kwargs):
return self.post(self.host, '/account/settings', params or kwargs)
def topup(self, params=None, **kwargs):
return self.post(self.host, '/account/top-up', params or kwargs)
def get_account_numbers(self, params=None, **kwargs):
return self.get(self.host, '/account/numbers', params or kwargs)
def get_available_numbers(self, country_code, params=None, **kwargs):
return self.get(self.host, '/number/search', dict(params or kwargs, country=country_code))
def buy_number(self, params=None, **kwargs):
return self.post(self.host, '/number/buy', params or kwargs)
def cancel_number(self, params=None, **kwargs):
return self.post(self.host, '/number/cancel', params or kwargs)
def update_number(self, params=None, **kwargs):
return self.post(self.host, '/number/update', params or kwargs)
def get_message(self, message_id):
return self.get(self.host, '/search/message', {'id': message_id})
def get_message_rejections(self, params=None, **kwargs):
return self.get(self.host, '/search/rejections', params or kwargs)
def search_messages(self, params=None, **kwargs):
return self.get(self.host, '/search/messages', params or kwargs)
def send_ussd_push_message(self, params=None, **kwargs):
return self.post(self.host, '/ussd/json', params or kwargs)
def send_ussd_prompt_message(self, params=None, **kwargs):
return self.post(self.host, '/ussd-prompt/json', params or kwargs)
def send_2fa_message(self, params=None, **kwargs):
return self.post(self.host, '/sc/us/2fa/json', params or kwargs)
def send_event_alert_message(self, params=None, **kwargs):
return self.post(self.host, '/sc/us/alert/json', params or kwargs)
def send_marketing_message(self, params=None, **kwargs):
return self.post(self.host, '/sc/us/marketing/json', params or kwargs)
def get_event_alert_numbers(self):
return self.get(self.host, '/sc/us/alert/opt-in/query/json')
def resubscribe_event_alert_number(self, params=None, **kwargs):
return self.post(self.host, '/sc/us/alert/opt-in/manage/json', params or kwargs)
def initiate_call(self, params=None, **kwargs):
return self.post(self.host, '/call/json', params or kwargs)
def initiate_tts_call(self, params=None, **kwargs):
return self.post(self.api_host, '/tts/json', params or kwargs)
def initiate_tts_prompt_call(self, params=None, **kwargs):
return self.post(self.api_host, '/tts-prompt/json', params or kwargs)
def start_verification(self, params=None, **kwargs):
return self.post(self.api_host, '/verify/json', params or kwargs)
def send_verification_request(self, params=None, **kwargs):
warnings.warn('nexmo.Client#send_verification_request is deprecated (use #start_verification instead)',
DeprecationWarning, stacklevel=2)
return self.post(self.api_host, '/verify/json', params or kwargs)
def check_verification(self, request_id, params=None, **kwargs):
return self.post(self.api_host, '/verify/check/json', dict(params or kwargs, request_id=request_id))
def check_verification_request(self, params=None, **kwargs):
warnings.warn('nexmo.Client#check_verification_request is deprecated (use #check_verification instead)',
DeprecationWarning, stacklevel=2)
return self.post(self.api_host, '/verify/check/json', params or kwargs)
def get_verification(self, request_id):
return self.get(self.api_host, '/verify/search/json', {'request_id': request_id})
def get_verification_request(self, request_id):
warnings.warn('nexmo.Client#get_verification_request is deprecated (use #get_verification instead)',
DeprecationWarning, stacklevel=2)
return self.get(self.api_host, '/verify/search/json', {'request_id': request_id})
def cancel_verification(self, request_id):
return self.post(self.api_host, '/verify/control/json', {'request_id': request_id, 'cmd': 'cancel'})
def trigger_next_verification_event(self, request_id):
return self.post(self.api_host, '/verify/control/json', {'request_id': request_id, 'cmd': 'trigger_next_event'})
def control_verification_request(self, params=None, **kwargs):
warnings.warn('nexmo.Client#control_verification_request is deprecated', DeprecationWarning, stacklevel=2)
return self.post(self.api_host, '/verify/control/json', params or kwargs)
def get_basic_number_insight(self, params=None, **kwargs):
return self.get(self.api_host, '/ni/basic/json', params or kwargs)
def get_standard_number_insight(self, params=None, **kwargs):
return self.get(self.api_host, '/ni/standard/json', params or kwargs)
def get_number_insight(self, params=None, **kwargs):
warnings.warn('nexmo.Client#get_number_insight is deprecated (use #get_standard_number_insight instead)',
DeprecationWarning, stacklevel=2)
return self.get(self.api_host, '/number/lookup/json', params or kwargs)
def get_advanced_number_insight(self, params=None, **kwargs):
return self.get(self.api_host, '/ni/advanced/json', params or kwargs)
def request_number_insight(self, params=None, **kwargs):
return self.post(self.host, '/ni/json', params or kwargs)
def get_applications(self, params=None, **kwargs):
return self.get(self.api_host, '/v1/applications', params or kwargs)
def get_application(self, application_id):
return self.get(self.api_host, '/v1/applications/' + application_id)
def create_application(self, params=None, **kwargs):
return self.post(self.api_host, '/v1/applications', params or kwargs)
def update_application(self, application_id, params=None, **kwargs):
return self.put(self.api_host, '/v1/applications/' + application_id, params or kwargs)
def delete_application(self, application_id):
return self.delete(self.api_host, '/v1/applications/' + application_id)
def create_call(self, params=None, **kwargs):
return self.__post('/v1/calls', params or kwargs)
def get_calls(self, params=None, **kwargs):
return self.__get('/v1/calls', params or kwargs)
def get_call(self, uuid):
return self.__get('/v1/calls/' + uuid)
def update_call(self, uuid, params=None, **kwargs):
return self.__put('/v1/calls/' + uuid, params or kwargs)
def send_audio(self, uuid, params=None, **kwargs):
return self.__put('/v1/calls/' + uuid + '/stream', params or kwargs)
def stop_audio(self, uuid):
return self.__delete('/v1/calls/' + uuid + '/stream')
def send_speech(self, uuid, params=None, **kwargs):
return self.__put('/v1/calls/' + uuid + '/talk', params or kwargs)
def stop_speech(self, uuid):
return self.__delete('/v1/calls/' + uuid + '/talk')
def send_dtmf(self, uuid, params=None, **kwargs):
return self.__put('/v1/calls/' + uuid + '/dtmf', params or kwargs)
def check_signature(self, params):
params = dict(params)
signature = params.pop('sig', '').lower()
return hmac.compare_digest(signature, self.signature(params))
def signature(self, params):
if self.signature_method:
hasher = hmac.new(self.signature_secret.encode(), digestmod=self.signature_method)
else:
hasher = hashlib.md5()
# Add timestamp if not already present
if not params.get("timestamp"):
params["timestamp"] = int(time.time())
for key in sorted(params):
value = params[key]
if isinstance(value, str):
value = value.replace('&', '_').replace('=', '_')
hasher.update('&{0}={1}'.format(key, value).encode('utf-8'))
if self.signature_method is None:
hasher.update(self.signature_secret.encode())
return hasher.hexdigest()
def get(self, host, request_uri, params=None):
uri = 'https://' + host + request_uri
params = dict(params or {}, api_key=self.api_key, api_secret=self.api_secret)
return self.parse(host, requests.get(uri, params=params, headers=self.headers))
def post(self, host, request_uri, params):
uri = 'https://' + host + request_uri
params = dict(params, api_key=self.api_key, api_secret=self.api_secret)
return self.parse(host, requests.post(uri, data=params, headers=self.headers))
def put(self, host, request_uri, params):
uri = 'https://' + host + request_uri
params = dict(params, api_key=self.api_key, api_secret=self.api_secret)
return self.parse(host, requests.put(uri, json=params, headers=self.headers))
def delete(self, host, request_uri):
uri = 'https://' + host + request_uri
params = dict(api_key=self.api_key, api_secret=self.api_secret)
return self.parse(host, requests.delete(uri, params=params, headers=self.headers))
def parse(self, host, response):
if response.status_code == 401:
raise AuthenticationError
elif response.status_code == 204:
return None
elif 200 <= response.status_code < 300:
return response.json()
elif 400 <= response.status_code < 500:
message = "{code} response from {host}".format(code=response.status_code, host=host)
raise ClientError(message)
elif 500 <= response.status_code < 600:
message = "{code} response from {host}".format(code=response.status_code, host=host)
raise ServerError(message)
def __get(self, request_uri, params=None):
uri = 'https://' + self.api_host + request_uri
return self.parse(self.api_host, requests.get(uri, params=params or {}, headers=self.__headers()))
def __post(self, request_uri, params):
uri = 'https://' + self.api_host + request_uri
return self.parse(self.api_host, requests.post(uri, json=params, headers=self.__headers()))
def __put(self, request_uri, params):
uri = 'https://' + self.api_host + request_uri
return self.parse(self.api_host, requests.put(uri, json=params, headers=self.__headers()))
def __delete(self, request_uri):
uri = 'https://' + self.api_host + request_uri
return self.parse(self.api_host, requests.delete(uri, headers=self.__headers()))
def __headers(self):
iat = int(time.time())
payload = dict(self.auth_params)
payload.setdefault('application_id', self.application_id)
payload.setdefault('iat', iat)
payload.setdefault('exp', iat + 60)
payload.setdefault('jti', str(uuid4()))
token = jwt.encode(payload, self.private_key, algorithm='RS256')
return dict(self.headers, Authorization=b'Bearer ' + token)
|
[] |
[] |
[
"NEXMO_SIGNATURE_METHOD",
"NEXMO_SIGNATURE_SECRET",
"NEXMO_API_KEY",
"NEXMO_API_SECRET"
] |
[]
|
["NEXMO_SIGNATURE_METHOD", "NEXMO_SIGNATURE_SECRET", "NEXMO_API_KEY", "NEXMO_API_SECRET"]
|
python
| 4 | 0 | |
hw5/meta/train_policy.py
|
"""
Original code from John Schulman for CS294 Deep Reinforcement Learning Spring 2017
Adapted for CS294-112 Fall 2017 by Abhishek Gupta and Joshua Achiam
Adapted for CS294-112 Fall 2018 by Michael Chang and Soroush Nasiriany
Adapted for use in CS294-112 Fall 2018 HW5 by Kate Rakelly and Michael Chang
"""
import numpy as np
import pdb
import random
import pickle
import tensorflow as tf
import tensorflow_probability as tfp
import gym
import logz
import scipy.signal
import os
import time
import inspect
from multiprocessing import Process
from replay_buffer import ReplayBuffer, PPOReplayBuffer
from point_mass import PointEnv
from point_mass_observed import ObservedPointEnv
tf.logging.set_verbosity(tf.logging.ERROR)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
#============================================================================================#
# Utilities
#============================================================================================#
def minimize_and_clip(optimizer, objective, var_list, clip_val=10):
"""
minimized `objective` using `optimizer` w.r.t. variables in
`var_list` while ensure the norm of the gradients for each
variable is clipped to `clip_val`
"""
gradients = optimizer.compute_gradients(objective, var_list=var_list)
for i, (grad, var) in enumerate(gradients):
if grad is not None:
gradients[i] = (tf.clip_by_norm(grad, clip_val), var)
return optimizer.apply_gradients(gradients)
def build_mlp(x, output_size, scope, n_layers, size, activation=tf.tanh, output_activation=None, regularizer=None):
"""
builds a feedforward neural network
arguments:
x: placeholder variable for the state (batch_size, input_size)
regularizer: regularization for weights
(see `build_policy()` for rest)
returns:
output placeholder of the network (the result of a forward pass)
"""
i = 0
for i in range(n_layers):
x = tf.layers.dense(inputs=x,units=size, activation=activation, name='fc{}'.format(i), kernel_regularizer=regularizer, bias_regularizer=regularizer)
x = tf.layers.dense(inputs=x, units=output_size, activation=output_activation, name='fc{}'.format(i + 1), kernel_regularizer=regularizer, bias_regularizer=regularizer)
return x
def build_rnn(x, h, output_size, scope, n_layers, size, activation=tf.tanh, output_activation=None, regularizer=None):
"""
builds a gated recurrent neural network
inputs are first embedded by an MLP then passed to a GRU cell
make MLP layers with `size` number of units
make the GRU with `output_size` number of units
use `activation` as the activation function for both MLP and GRU
arguments:
(see `build_policy()`)
hint: use `build_mlp()`
"""
#====================================================================================#
# ----------PROBLEM 2----------
#====================================================================================#
# YOUR CODE HERE
x = build_mlp(x, output_size, scope, n_layers, size, activation, activation, regularizer)
gru = tf.keras.layers.GRU(output_size, activation=activation, return_sequences=False, return_state=True)
x, h = gru(x, h)
return x, h
def build_policy(x, h, output_size, scope, n_layers, size, gru_size, recurrent=True, activation=tf.tanh, output_activation=None):
"""
build recurrent policy
arguments:
x: placeholder variable for the input, which has dimension (batch_size, history, input_size)
h: placeholder variable for the hidden state, which has dimension (batch_size, gru_size)
output_size: size of the output layer, same as action dimension
scope: variable scope of the network
n_layers: number of hidden layers (not counting recurrent units)
size: dimension of the hidden layer in the encoder
gru_size: dimension of the recurrent hidden state if there is one
recurrent: if the network should be recurrent or feedforward
activation: activation of the hidden layers
output_activation: activation of the ouput layers
returns:
output placeholder of the network (the result of a forward pass)
n.b. we predict both the mean and std of the gaussian policy, and we don't want the std to start off too large
initialize the last layer of the policy with a guassian init of mean 0 and std 0.01
"""
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
if recurrent:
x, h = build_rnn(x, h, gru_size, scope, n_layers, size, activation=activation, output_activation=activation)
else:
x = tf.reshape(x, (-1, x.get_shape()[1]*x.get_shape()[2]))
x = build_mlp(x, gru_size, scope, n_layers + 1, size, activation=activation, output_activation=activation)
x = tf.layers.dense(x, output_size, activation=output_activation, kernel_initializer=tf.initializers.truncated_normal(mean=0.0, stddev=0.01), bias_initializer=tf.zeros_initializer(), name='decoder')
return x, h
def build_critic(x, h, output_size, scope, n_layers, size, gru_size, recurrent=True, activation=tf.tanh, output_activation=None, regularizer=None):
"""
build recurrent critic
arguments:
regularizer: regularization for weights
(see `build_policy()` for rest)
n.b. the policy and critic should not share weights
"""
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
if recurrent:
x, h = build_rnn(x, h, gru_size, scope, n_layers, size, activation=activation, output_activation=output_activation, regularizer=regularizer)
else:
x = tf.reshape(x, (-1, x.get_shape()[1]*x.get_shape()[2]))
x = build_mlp(x, gru_size, scope, n_layers + 1, size, activation=activation, output_activation=activation, regularizer=regularizer)
x = tf.layers.dense(x, output_size, activation=output_activation, name='decoder', kernel_regularizer=regularizer, bias_regularizer=regularizer)
return x
def pathlength(path):
return len(path["reward"])
def discounted_return(reward, gamma):
discounts = gamma**np.arange(len(reward))
return sum(discounts * reward)
def discount_cumsum(x, discount):
return scipy.signal.lfilter([1], [1, float(-discount)], x[::-1], axis=0)[::-1]
def setup_logger(logdir, locals_):
# Configure output directory for logging
logz.configure_output_dir(logdir)
# Log experimental parameters
args = inspect.getargspec(train_PG)[0]
params = {k: locals_[k] if k in locals_ else None for k in args}
logz.save_params(params)
class Agent(object):
def __init__(self, computation_graph_args, sample_trajectory_args, estimate_return_args):
super(Agent, self).__init__()
self.ob_dim = computation_graph_args['ob_dim']
self.ac_dim = computation_graph_args['ac_dim']
self.task_dim = computation_graph_args['task_dim']
self.reward_dim = 1
self.terminal_dim = 1
self.meta_ob_dim = self.ob_dim + self.ac_dim + self.reward_dim + self.terminal_dim
self.scope = 'continuous_logits'
self.size = computation_graph_args['size']
self.gru_size = computation_graph_args['gru_size']
self.n_layers = computation_graph_args['n_layers']
self.learning_rate = computation_graph_args['learning_rate']
self.history = computation_graph_args['history']
self.num_value_iters = computation_graph_args['num_value_iters']
self.l2reg = computation_graph_args['l2reg']
self.recurrent = computation_graph_args['recurrent']
self.animate = sample_trajectory_args['animate']
self.max_path_length = sample_trajectory_args['max_path_length']
self.min_timesteps_per_batch = sample_trajectory_args['min_timesteps_per_batch']
self.grain_size = sample_trajectory_args['grain_size']
self.gamma = estimate_return_args['gamma']
self.nn_critic = estimate_return_args['nn_critic']
self.normalize_advantages = estimate_return_args['normalize_advantages']
self.replay_buffer = ReplayBuffer(100000, [self.history, self.meta_ob_dim], [self.ac_dim], self.gru_size, self.task_dim)
self.val_replay_buffer = ReplayBuffer(100000, [self.history, self.meta_ob_dim], [self.ac_dim], self.gru_size, self.task_dim)
def init_tf_sess(self):
tf_config = tf.ConfigProto(inter_op_parallelism_threads=1, intra_op_parallelism_threads=1)
tf_config.gpu_options.allow_growth = True # may need if using GPU
self.sess = tf.Session(config=tf_config)
self.sess.__enter__() # equivalent to `with self.sess:`
tf.global_variables_initializer().run() #pylint: disable=E1101
def define_placeholders(self):
"""
placeholders for batch batch observations / actions / advantages in policy gradient
loss function.
see Agent.build_computation_graph for notation
returns:
sy_ob_no: placeholder for meta-observations
sy_ac_na: placeholder for actions
sy_adv_n: placeholder for advantages
sy_hidden: placeholder for RNN hidden state
(PPO stuff)
sy_lp_n: placeholder for pre-computed log-probs
sy_fixed_lp_n: placeholder for pre-computed old log-probs
"""
sy_ob_no = tf.placeholder(shape=[None, self.history, self.meta_ob_dim], name="ob", dtype=tf.float32)
sy_ac_na = tf.placeholder(shape=[None, self.ac_dim], name="ac", dtype=tf.float32)
sy_adv_n = tf.placeholder(shape=[None], name="adv", dtype=tf.float32)
sy_hidden = tf.placeholder(shape=[None, self.gru_size], name="hidden", dtype=tf.float32)
sy_lp_n = tf.placeholder(shape=[None], name="logprob", dtype=tf.float32)
sy_fixed_lp_n = tf.placeholder(shape=[None], name="fixed_logprob", dtype=tf.float32)
return sy_ob_no, sy_ac_na, sy_adv_n, sy_hidden, sy_lp_n, sy_fixed_lp_n
def policy_forward_pass(self, sy_ob_no, sy_hidden):
"""
constructs the symbolic operation for the policy network outputs,
which are the parameters of the policy distribution p(a|s)
arguments:
sy_ob_no: (batch_size, self.history, self.meta_ob_dim)
sy_hidden: (batch_size, self.gru_size)
returns:
the parameters of the policy.
the parameters are a tuple (mean, log_std) of a Gaussian
distribution over actions. log_std should just be a trainable
variable, not a network output.
sy_mean: (batch_size, self.ac_dim)
sy_logstd: (batch_size, self.ac_dim)
"""
# ac_dim * 2 because we predict both mean and std
sy_policy_params, sy_hidden = build_policy(sy_ob_no, sy_hidden, self.ac_dim*2, self.scope, n_layers=self.n_layers, size=self.size, gru_size=self.gru_size, recurrent=self.recurrent)
return (sy_policy_params, sy_hidden)
def sample_action(self, policy_parameters):
"""
constructs a symbolic operation for stochastically sampling from the policy
distribution
arguments:
policy_parameters
mean, log_std) of a Gaussian distribution over actions
sy_mean: (batch_size, self.ac_dim)
sy_logstd: (batch_size, self.ac_dim)
returns:
sy_sampled_ac:
(batch_size, self.ac_dim)
"""
sy_mean, sy_logstd = policy_parameters
sy_sampled_ac = sy_mean + tf.exp(sy_logstd) * tf.random_normal(tf.shape(sy_mean), 0, 1)
return sy_sampled_ac
def get_log_prob(self, policy_parameters, sy_ac_na):
"""
constructs a symbolic operation for computing the log probability of a set of actions
that were actually taken according to the policy
arguments:
policy_parameters
mean, log_std) of a Gaussian distribution over actions
sy_mean: (batch_size, self.ac_dim)
sy_logstd: (batch_size, self.ac_dim)
sy_ac_na: (batch_size, self.ac_dim)
returns:
sy_lp_n: (batch_size)
"""
sy_mean, sy_logstd = policy_parameters
sy_lp_n = tfp.distributions.MultivariateNormalDiag(
loc=sy_mean, scale_diag=tf.exp(sy_logstd)).log_prob(sy_ac_na)
return sy_lp_n
def build_computation_graph(self):
"""
notes on notation:
Symbolic variables have the prefix sy_, to distinguish them from the numerical values
that are computed later in the function
prefixes and suffixes:
ob - observation
ac - action
_no - this tensor should have shape (batch self.size /n/, observation dim)
_na - this tensor should have shape (batch self.size /n/, action dim)
_n - this tensor should have shape (batch self.size /n/)
Note: batch self.size /n/ is defined at runtime, and until then, the shape for that axis
is None
----------------------------------------------------------------------------------
loss: a function of self.sy_lp_n and self.sy_adv_n that we will differentiate
to get the policy gradient.
"""
self.sy_ob_no, self.sy_ac_na, self.sy_adv_n, self.sy_hidden, self.sy_lp_n, self.sy_fixed_lp_n = self.define_placeholders()
# The policy takes in an observation and produces a distribution over the action space
policy_outputs = self.policy_forward_pass(self.sy_ob_no, self.sy_hidden)
self.policy_parameters = policy_outputs[:-1]
# unpack mean and variance
self.policy_parameters = tf.split(self.policy_parameters[0], 2, axis=1)
# We can sample actions from this action distribution.
# This will be called in Agent.sample_trajectory() where we generate a rollout.
self.sy_sampled_ac = self.sample_action(self.policy_parameters)
# We can also compute the logprob of the actions that were actually taken by the policy
# This is used in the loss function.
self.sy_lp_n = self.get_log_prob(self.policy_parameters, self.sy_ac_na)
# PPO critic update
critic_regularizer = tf.contrib.layers.l2_regularizer(1e-3) if self.l2reg else None
self.critic_prediction = tf.squeeze(build_critic(self.sy_ob_no, self.sy_hidden, 1, 'critic_network', n_layers=self.n_layers, size=self.size, gru_size=self.gru_size, recurrent=self.recurrent, regularizer=critic_regularizer))
self.sy_target_n = tf.placeholder(shape=[None], name="critic_target", dtype=tf.float32)
self.critic_loss = tf.losses.mean_squared_error(self.sy_target_n, self.critic_prediction)
self.critic_weights = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='critic_network')
self.critic_update_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.critic_loss)
# PPO actor update
self.sy_fixed_log_prob_n = tf.placeholder(shape=[None], name="fixed_log_prob", dtype=tf.float32)
self.policy_surr_loss = self.ppo_loss(self.sy_lp_n, self.sy_fixed_lp_n, self.sy_adv_n)
self.policy_weights = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.scope)
optimizer = tf.train.AdamOptimizer(self.learning_rate)
self.policy_update_op = minimize_and_clip(optimizer, self.policy_surr_loss, var_list=self.policy_weights, clip_val=40)
def sample_trajectories(self, itr, env, min_timesteps, is_evaluation=False):
# Collect paths until we have enough timesteps
timesteps_this_batch = 0
stats = []
while True:
animate_this_episode=(len(stats)==0 and (itr % 10 == 0) and self.animate)
steps, s = self.sample_trajectory(env, animate_this_episode, is_evaluation=is_evaluation)
stats += s
timesteps_this_batch += steps
if timesteps_this_batch > min_timesteps:
break
return stats, timesteps_this_batch
def sample_trajectory(self, env, animate_this_episode, is_evaluation):
"""
sample a task, then sample trajectories from that task until either
max(self.history, self.max_path_length) timesteps have been sampled
construct meta-observations by concatenating (s, a, r, d) into one vector
inputs to the policy should have the shape (batch_size, self.history, self.meta_ob_dim)
zero pad the input to maintain a consistent input shape
add the entire input as observation to the replay buffer, along with a, r, d
samples will be drawn from the replay buffer to update the policy
arguments:
env: the env to sample trajectories from
animate_this_episode: if True then render
val: whether this is training or evaluation
"""
env.reset_task(is_evaluation=is_evaluation, grain_size=self.grain_size)
stats = []
#====================================================================================#
# ----------PROBLEM 1----------
#====================================================================================#
ep_steps = 0
steps = 0
num_samples = max(self.history, self.max_path_length + 1)
meta_obs = np.zeros((num_samples + self.history + 1, self.meta_ob_dim))
rewards = []
while True:
if animate_this_episode:
env.render()
time.sleep(0.1)
if ep_steps == 0:
ob = env.reset()
# first meta ob has only the observation
# set a, r, d to zero, construct first meta observation in meta_obs
# YOUR CODE HERE
meta_obs[self.history + ep_steps - 1, :self.ob_dim] = ob
steps += 1
# index into the meta_obs array to get the window that ends with the current timestep
# please name the windowed observation `in_` for compatibilty with the code that adds to the replay buffer (lines 418, 420)
# YOUR CODE HERE
in_ = meta_obs[ep_steps:self.history + ep_steps]
hidden = np.zeros((1, self.gru_size), dtype=np.float32)
# get action from the policy
# YOUR CODE HERE
ac = self.sess.run(self.sy_sampled_ac, feed_dict={self.sy_ob_no:[in_], self.sy_hidden:hidden})[0]
# step the environment
# YOUR CODE HERE
next_ob, rew, done, _ = env.step(ac)
ep_steps += 1
done = bool(done) or ep_steps == self.max_path_length
# construct the meta-observation and add it to meta_obs
# YOUR CODE HERE
meta_obs[self.history + ep_steps - 1] = np.concatenate((next_ob, ac, [rew], [done]))
rewards.append(rew)
steps += 1
# add sample to replay buffer
if is_evaluation:
self.val_replay_buffer.add_sample(in_, ac, rew, done, hidden, env._goal)
else:
self.replay_buffer.add_sample(in_, ac, rew, done, hidden, env._goal)
# start new episode
if done:
# compute stats over trajectory
s = dict()
s['rewards']= rewards[-ep_steps:]
s['ep_len'] = ep_steps
stats.append(s)
ep_steps = 0
if steps >= num_samples:
break
return steps, stats
def compute_advantage(self, ob_no, re_n, hidden, masks, tau=0.95):
"""
computes generalized advantage estimation (GAE).
arguments:
ob_no: (bsize, history, ob_dim)
rewards: (bsize,)
masks: (bsize,)
values: (bsize,)
gamma: scalar
tau: scalar
output:
advantages: (bsize,)
returns: (bsize,)
requires:
self.gamma
"""
bsize = len(re_n)
rewards = np.squeeze(re_n)
masks = np.squeeze(masks)
values = self.sess.run(self.critic_prediction, feed_dict={self.sy_ob_no: ob_no, self.sy_hidden: hidden})[:,None]
gamma = self.gamma
assert rewards.shape == masks.shape == (bsize,)
assert values.shape == (bsize, 1)
bsize = len(rewards)
returns = np.empty((bsize,))
deltas = np.empty((bsize,))
advantages = np.empty((bsize,))
prev_return = 0
prev_value = 0
prev_advantage = 0
for i in reversed(range(bsize)):
returns[i] = rewards[i] + gamma * prev_return * masks[i]
deltas[i] = rewards[i] + gamma * prev_value * masks[i] - values[i]
advantages[i] = deltas[i] + gamma * tau * prev_advantage * masks[i]
prev_return = returns[i]
prev_value = values[i]
prev_advantage = advantages[i]
advantages = (advantages - np.mean(advantages, axis=0)) / np.std(advantages, axis=0)
return advantages, returns
def estimate_return(self, ob_no, re_n, hidden, masks):
"""
estimates the returns over a set of trajectories.
let sum_of_path_lengths be the sum of the lengths of the paths sampled from
Agent.sample_trajectories
let num_paths be the number of paths sampled from Agent.sample_trajectories
arguments:
ob_no: shape: (sum_of_path_lengths, history, meta_obs_dim)
re_n: length: num_paths. Each element in re_n is a numpy array
containing the rewards for the particular path
hidden: hidden state of recurrent policy
masks: terminals masks
returns:
q_n: shape: (sum_of_path_lengths). A single vector for the estimated q values
whose length is the sum of the lengths of the paths
adv_n: shape: (sum_of_path_lengths). A single vector for the estimated
advantages whose length is the sum of the lengths of the paths
"""
adv_n, q_n = self.compute_advantage(ob_no, re_n, hidden, masks)
return q_n, adv_n
def update_parameters(self, ob_no, hidden, ac_na, fixed_log_probs, q_n, adv_n):
"""
update the parameters of the policy and the critic,
with PPO update
arguments:
ob_no: (minibsize, history, meta_obs_dim)
hidden: shape: (minibsize, self.gru_size)
ac_na: (minibsize)
fixed_log_probs: (minibsize)
adv_n: shape: (minibsize)
q_n: shape: (sum_of_path_lengths)
returns:
nothing
"""
self.update_critic(ob_no, hidden, q_n)
self.update_policy(ob_no, hidden, ac_na, fixed_log_probs, adv_n)
def update_critic(self, ob_no, hidden, q_n):
"""
given:
self.num_value_iters
self.l2_reg
arguments:
ob_no: (minibsize, history, meta_obs_dim)
hidden: (minibsize, self.gru_size)
q_n: (minibsize)
requires:
self.num_value_iters
"""
target_n = (q_n - np.mean(q_n))/(np.std(q_n)+1e-8)
for k in range(self.num_value_iters):
critic_loss, _ = self.sess.run(
[self.critic_loss, self.critic_update_op],
feed_dict={self.sy_target_n: target_n, self.sy_ob_no: ob_no, self.sy_hidden: hidden})
return critic_loss
def update_policy(self, ob_no, hidden, ac_na, fixed_log_probs, advantages):
'''
arguments:
fixed_log_probs: (minibsize)
advantages: (minibsize)
hidden: (minibsize, self.gru_size)
'''
policy_surr_loss, _ = self.sess.run(
[self.policy_surr_loss, self.policy_update_op],
feed_dict={self.sy_ob_no: ob_no, self.sy_hidden: hidden, self.sy_ac_na: ac_na, self.sy_fixed_lp_n: fixed_log_probs, self.sy_adv_n: advantages})
return policy_surr_loss
def ppo_loss(self, log_probs, fixed_log_probs, advantages, clip_epsilon=0.1, entropy_coeff=1e-4):
"""
given:
clip_epsilon
arguments:
advantages (mini_bsize,)
states (mini_bsize,)
actions (mini_bsize,)
fixed_log_probs (mini_bsize,)
intermediate results:
states, actions --> log_probs
log_probs, fixed_log_probs --> ratio
advantages, ratio --> surr1
ratio, clip_epsilon, advantages --> surr2
surr1, surr2 --> policy_surr_loss
"""
ratio = tf.exp(log_probs - fixed_log_probs)
surr1 = ratio * advantages
surr2 = tf.clip_by_value(ratio, clip_value_min=1.0-clip_epsilon, clip_value_max=1.0+clip_epsilon) * advantages
policy_surr_loss = -tf.reduce_mean(tf.minimum(surr1, surr2))
probs = tf.exp(log_probs)
entropy = tf.reduce_sum(-(log_probs * probs))
policy_surr_loss -= entropy_coeff * entropy
return policy_surr_loss
def train_PG(
exp_name,
env_name,
n_iter,
gamma,
min_timesteps_per_batch,
mini_batch_size,
max_path_length,
learning_rate,
num_ppo_updates,
num_value_iters,
animate,
logdir,
normalize_advantages,
nn_critic,
seed,
n_layers,
size,
gru_size,
history,
num_tasks,
l2reg,
recurrent,
grain_size
):
start = time.time()
#========================================================================================#
# Set Up Logger
#========================================================================================#
setup_logger(logdir, locals())
#========================================================================================#
# Set Up Env
#========================================================================================#
# Make the gym environment
envs = {'pm': PointEnv,
'pm-obs': ObservedPointEnv,
}
env = envs[env_name](num_tasks, grain_size=grain_size)
# Set random seeds
tf.set_random_seed(seed)
np.random.seed(seed)
random.seed(seed)
env.seed(seed)
# Maximum length for episodes
max_path_length = max_path_length
# Observation and action sizes
ob_dim = env.observation_space.shape[0]
ac_dim = env.action_space.shape[0]
task_dim = len(env._goal) # rude, sorry
#========================================================================================#
# Initialize Agent
#========================================================================================#
computation_graph_args = {
'n_layers': n_layers,
'ob_dim': ob_dim,
'ac_dim': ac_dim,
'task_dim': task_dim,
'size': size,
'gru_size': gru_size,
'learning_rate': learning_rate,
'history': history,
'num_value_iters': num_value_iters,
'l2reg': l2reg,
'recurrent': recurrent,
}
sample_trajectory_args = {
'animate': animate,
'max_path_length': max_path_length,
'min_timesteps_per_batch': min_timesteps_per_batch,
'grain_size': grain_size
}
estimate_return_args = {
'gamma': gamma,
'nn_critic': nn_critic,
'normalize_advantages': normalize_advantages,
}
agent = Agent(computation_graph_args, sample_trajectory_args, estimate_return_args)
# build computation graph
agent.build_computation_graph()
# tensorflow: config, session, variable initialization
agent.init_tf_sess()
#========================================================================================#
# Training Loop
#========================================================================================#
def unpack_sample(data):
'''
unpack a sample from the replay buffer
'''
ob = data["observations"]
ac = data["actions"]
re = data["rewards"]
hi = data["hiddens"]
ma = 1 - data["terminals"]
return ob, ac, re, hi, ma
# construct PPO replay buffer, perhaps rude to do outside the agent
ppo_buffer = PPOReplayBuffer(agent.replay_buffer)
total_timesteps = 0
for itr in range(n_iter):
# for PPO: flush the replay buffer!
ppo_buffer.flush()
# sample trajectories to fill agent's replay buffer
print("********** Iteration %i ************"%itr)
stats = []
for _ in range(num_tasks):
s, timesteps_this_batch = agent.sample_trajectories(itr, env, min_timesteps_per_batch)
total_timesteps += timesteps_this_batch
stats += s
# compute the log probs, advantages, and returns for all data in agent's buffer
# store in ppo buffer for use in multiple ppo updates
# TODO: should move inside the agent probably
data = agent.replay_buffer.all_batch()
ob_no, ac_na, re_n, hidden, masks = unpack_sample(data)
fixed_log_probs = agent.sess.run(agent.sy_lp_n,
feed_dict={agent.sy_ob_no: ob_no, agent.sy_hidden: hidden, agent.sy_ac_na: ac_na})
q_n, adv_n = agent.estimate_return(ob_no, re_n, hidden, masks)
ppo_buffer.add_samples(fixed_log_probs, adv_n, q_n)
# update with mini-batches sampled from ppo buffer
for _ in range(num_ppo_updates):
data = ppo_buffer.random_batch(mini_batch_size)
ob_no, ac_na, re_n, hidden, masks = unpack_sample(data)
fixed_log_probs = data["log_probs"]
adv_n = data["advantages"]
q_n = data["returns"]
log_probs = agent.sess.run(agent.sy_lp_n,
feed_dict={agent.sy_ob_no: ob_no, agent.sy_hidden: hidden, agent.sy_ac_na: ac_na})
agent.update_parameters(ob_no, hidden, ac_na, fixed_log_probs, q_n, adv_n)
# compute validation statistics
print('Validating...')
val_stats = []
for _ in range(num_tasks):
vs, timesteps_this_batch = agent.sample_trajectories(itr, env, min_timesteps_per_batch // 10, is_evaluation=True)
val_stats += vs
# save trajectories for viz
#with open("output/{}-epoch{}.pkl".format(exp_name, itr), 'wb') as f:
#pickle.dump(agent.val_replay_buffer.all_batch(), f, pickle.HIGHEST_PROTOCOL)
#agent.val_replay_buffer.flush()
# Log TRAIN diagnostics
returns = [sum(s["rewards"]) for s in stats]
final_rewards = [s["rewards"][-1] for s in stats]
ep_lengths = [s['ep_len'] for s in stats]
logz.log_tabular("Time", time.time() - start)
logz.log_tabular("Iteration", itr)
logz.log_tabular("AverageReturn", np.mean(returns))
logz.log_tabular("FinalReward", np.mean(final_rewards))
logz.log_tabular("StdReturn", np.std(returns))
logz.log_tabular("MaxReturn", np.max(returns))
logz.log_tabular("MinReturn", np.min(returns))
logz.log_tabular("EpLenMean", np.mean(ep_lengths))
logz.log_tabular("EpLenStd", np.std(ep_lengths))
logz.log_tabular("TimestepsThisBatch", timesteps_this_batch)
logz.log_tabular("TimestepsSoFar", total_timesteps)
# Log VAL diagnostics
val_returns = [sum(s["rewards"]) for s in val_stats]
val_final_rewards = [s["rewards"][-1] for s in val_stats]
logz.log_tabular("ValAverageReturn", np.mean(val_returns))
logz.log_tabular("ValFinalReward", np.mean(val_final_rewards))
logz.dump_tabular()
logz.pickle_tf_vars()
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('env_name', type=str)
parser.add_argument('--exp_name', type=str, default='exp')
parser.add_argument('--render', action='store_true')
parser.add_argument('--discount', type=float, default=0.99)
parser.add_argument('--n_iter', '-n', type=int, default=100)
parser.add_argument('--batch_size', '-pb', type=int, default=10000)
parser.add_argument('--mini_batch_size', '-mpb', type=int, default=64)
parser.add_argument('--num_tasks', '-nt', type=int, default=1)
parser.add_argument('--ep_len', '-ep', type=int, default=20)
parser.add_argument('--learning_rate', '-lr', type=float, default=5e-4)
parser.add_argument('--num_value_iters', '-nvu', type=int, default=1)
parser.add_argument('--dont_normalize_advantages', '-dna', action='store_true')
parser.add_argument('--nn_critic', '-bl', action='store_true')
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--n_experiments', '-e', type=int, default=1)
parser.add_argument('--n_layers', '-l', type=int, default=1)
parser.add_argument('--size', '-s', type=int, default=64)
parser.add_argument('--gru_size', '-rs', type=int, default=32)
parser.add_argument('--history', '-ho', type=int, default=1)
parser.add_argument('--l2reg', '-reg', action='store_true')
parser.add_argument('--recurrent', '-rec', action='store_true')
parser.add_argument('--grain_size', '-gs', type=int, default=1)
parser.add_argument('--gpu', type=int, default=0)
args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu)
if not(os.path.exists('data')):
os.makedirs('data')
logdir = args.env_name + '_' + args.exp_name# + '_' + time.strftime("%d-%m-%Y_%H-%M-%S")
logdir = os.path.join('data', logdir)
if not(os.path.exists(logdir)):
os.makedirs(logdir)
max_path_length = args.ep_len if args.ep_len > 0 else None
processes = []
for e in range(args.n_experiments):
seed = args.seed + 10*e
print('Running experiment with seed %d'%seed)
def train_func():
train_PG(
exp_name=args.exp_name,
env_name=args.env_name,
n_iter=args.n_iter,
gamma=args.discount,
min_timesteps_per_batch=args.batch_size // args.num_tasks,
mini_batch_size=args.mini_batch_size,
max_path_length=max_path_length,
learning_rate=args.learning_rate,
num_ppo_updates=(args.batch_size // args.mini_batch_size) * 5,
num_value_iters=args.num_value_iters,
animate=args.render,
logdir=os.path.join(logdir,'%d'%seed),
normalize_advantages=not(args.dont_normalize_advantages),
nn_critic=args.nn_critic,
seed=seed,
n_layers=args.n_layers,
size=args.size,
gru_size=args.gru_size,
history=args.history,
num_tasks=args.num_tasks,
l2reg=args.l2reg,
recurrent=args.recurrent,
grain_size=args.grain_size
)
# # Awkward hacky process runs, because Tensorflow does not like
# # repeatedly calling train_PG in the same thread.
p = Process(target=train_func, args=tuple())
p.start()
processes.append(p)
# if you comment in the line below, then the loop will block
# until this process finishes
# p.join()
for p in processes:
p.join()
if __name__ == "__main__":
main()
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES",
"TF_CPP_MIN_LOG_LEVEL"
] |
[]
|
["CUDA_VISIBLE_DEVICES", "TF_CPP_MIN_LOG_LEVEL"]
|
python
| 2 | 0 | |
cli/cmd/send_event_approvalFinished.go
|
package cmd
import (
"bufio"
"encoding/json"
"errors"
"fmt"
keptnv2 "github.com/keptn/go-utils/pkg/lib/v0_2_0"
"github.com/keptn/keptn/cli/pkg/common"
"net/url"
"os"
"strconv"
"strings"
"text/tabwriter"
cloudevents "github.com/cloudevents/sdk-go/v2"
"github.com/google/uuid"
apimodels "github.com/keptn/go-utils/pkg/api/models"
apiutils "github.com/keptn/go-utils/pkg/api/utils"
"github.com/keptn/keptn/cli/pkg/credentialmanager"
"github.com/keptn/keptn/cli/pkg/logging"
"github.com/mitchellh/mapstructure"
"github.com/spf13/cobra"
)
type sendApprovalFinishedStruct struct {
Project *string `json:"project"`
Stage *string `json:"stage"`
Service *string `json:"service"`
ID *string `json:"id"`
Labels *map[string]string `json:"labels"`
}
var sendApprovalFinishedOptions sendApprovalFinishedStruct
var approvalFinishedCmd = &cobra.Command{
Use: "approval.finished",
Short: "Sends an approval.finished event to Keptn in order to confirm an open approval " +
"with the specified ID in the provided project and stage",
Long: `Sends an approval.finished event to Keptn in order to confirm an open approval with the specified ID in the provided project and stage.
* This command takes the project (*--project*) and stage (*--stage*).
* It is optional to specify the ID (*--id*) of the corresponding approval.triggered event. If the ID is not provided, the command asks the user which open approval should be accepted or declined.
* The open approval.triggered events and their ID can be retrieved using the "keptn get event approval.triggered --project=<project> --stage=<stage>" command.
`,
Example: `keptn send event approval.finished --project=sockshop --stage=hardening --id=1234-5678-9123`,
RunE: func(cmd *cobra.Command, args []string) error {
if err := deSendEventApprovalFinishedPreRunCheck(); err != nil {
return err
}
return sendApprovalFinishedEvent(sendApprovalFinishedOptions)
},
SilenceUsage: true,
}
func deSendEventApprovalFinishedPreRunCheck() error {
if *sendApprovalFinishedOptions.ID == "" && *sendApprovalFinishedOptions.Service == "" {
logging.PrintLog("Either ID or service must be provided", logging.InfoLevel)
return errors.New("either ID or service must be provided")
} else if *sendApprovalFinishedOptions.ID != "" && *sendApprovalFinishedOptions.Service != "" {
logging.PrintLog("Either ID or service must be provided", logging.InfoLevel)
return errors.New("either ID or service must be provided")
}
return nil
}
func sendApprovalFinishedEvent(sendApprovalFinishedOptions sendApprovalFinishedStruct) error {
var endPoint url.URL
var apiToken string
var err error
if !mocking {
endPoint, apiToken, err = credentialmanager.NewCredentialManager(false).GetCreds(namespace)
} else {
endPointPtr, _ := url.Parse(os.Getenv("MOCK_SERVER"))
endPoint = *endPointPtr
apiToken = ""
}
if err != nil {
return errors.New(authErrorMsg)
}
logging.PrintLog("Starting to send approval.finished event", logging.InfoLevel)
if endPointErr := checkEndPointStatus(endPoint.String()); endPointErr != nil {
return fmt.Errorf("Error connecting to server: %s"+endPointErrorReasons,
endPointErr)
}
apiHandler := apiutils.NewAuthenticatedAPIHandler(endPoint.String(), apiToken, "x-token", nil, endPoint.Scheme)
eventHandler := apiutils.NewAuthenticatedEventHandler(endPoint.String(), apiToken, "x-token", nil, endPoint.Scheme)
logging.PrintLog(fmt.Sprintf("Connecting to server %s", endPoint.String()), logging.VerboseLevel)
var keptnContext string
var triggeredID string
var approvalFinishedEvent *keptnv2.ApprovalFinishedEventData
if *sendApprovalFinishedOptions.ID != "" {
keptnContext, triggeredID, approvalFinishedEvent, err = getApprovalFinishedForID(eventHandler, sendApprovalFinishedOptions)
} else if *sendApprovalFinishedOptions.Service != "" {
scHandler := apiutils.NewAuthenticatedShipyardControllerHandler(endPoint.String(), apiToken, "x-token", nil, endPoint.Scheme)
keptnContext, triggeredID, approvalFinishedEvent, err = getApprovalFinishedForService(eventHandler,
scHandler, sendApprovalFinishedOptions)
}
if err != nil {
return err
}
if approvalFinishedEvent == nil {
return nil
}
startedEvent := getApprovalStartedEvent(approvalFinishedEvent.EventData)
if _, err := sendEvent(keptnContext, triggeredID, keptnv2.GetStartedEventType(keptnv2.ApprovalTaskName), startedEvent, apiHandler); err != nil {
return err
}
responseEvent, err := sendEvent(keptnContext, triggeredID, keptnv2.GetFinishedEventType(keptnv2.ApprovalTaskName), approvalFinishedEvent, apiHandler)
if err != nil {
return err
}
if responseEvent == nil {
logging.PrintLog("No event returned", logging.QuietLevel)
return nil
}
return nil
}
func sendEvent(keptnContext, triggeredID, eventType string, approvalFinishedEvent interface{}, apiHandler *apiutils.APIHandler) (*apimodels.EventContext, error) {
ID := uuid.New().String()
source, _ := url.Parse("https://github.com/keptn/keptn/cli#" + eventType)
sdkEvent := cloudevents.NewEvent()
sdkEvent.SetID(ID)
sdkEvent.SetType(eventType)
sdkEvent.SetSource(source.String())
sdkEvent.SetDataContentType(cloudevents.ApplicationJSON)
sdkEvent.SetExtension("shkeptncontext", keptnContext)
sdkEvent.SetExtension("triggeredid", triggeredID)
sdkEvent.SetData(cloudevents.ApplicationJSON, approvalFinishedEvent)
eventByte, err := json.Marshal(sdkEvent)
if err != nil {
return nil, fmt.Errorf("Failed to marshal cloud event. %s", err.Error())
}
apiEvent := apimodels.KeptnContextExtendedCE{}
err = json.Unmarshal(eventByte, &apiEvent)
if err != nil {
return nil, fmt.Errorf("Failed to map cloud event to API event model. %s", err.Error())
}
responseEvent, errorObj := apiHandler.SendEvent(apiEvent)
if errorObj != nil {
logging.PrintLog("Send "+eventType+" was unsuccessful", logging.QuietLevel)
return nil, fmt.Errorf("Send %s was unsuccessful. %s", eventType, *errorObj.Message)
}
return responseEvent, nil
}
func getApprovalStartedEvent(inputEvent keptnv2.EventData) *keptnv2.ApprovalStartedEventData {
startedEvent := &keptnv2.ApprovalStartedEventData{
EventData: keptnv2.EventData{
Project: inputEvent.Project,
Stage: inputEvent.Stage,
Service: inputEvent.Service,
Labels: inputEvent.Labels,
Status: inputEvent.Status,
},
}
return startedEvent
}
func getApprovalFinishedForService(eventHandler *apiutils.EventHandler, scHandler *apiutils.ShipyardControllerHandler,
approvalFinishedOptions sendApprovalFinishedStruct) (string, string, *keptnv2.ApprovalFinishedEventData, error) {
allEvents, err := scHandler.GetOpenTriggeredEvents(apiutils.EventFilter{
Project: *approvalFinishedOptions.Project,
Stage: *approvalFinishedOptions.Stage,
Service: *approvalFinishedOptions.Service,
EventType: keptnv2.GetTriggeredEventType(keptnv2.ApprovalTaskName),
})
if err != nil {
logging.PrintLog("Open approval.triggered event for service "+*approvalFinishedOptions.Service+" could not be retrieved: "+err.Error(), logging.InfoLevel)
return "", "", nil, err
}
if len(allEvents) == 0 {
logging.PrintLog("No open approval.triggered event for service "+*approvalFinishedOptions.Service+" has been found", logging.InfoLevel)
return "", "", nil, nil
}
// print all available options
printApprovalOptions(allEvents, eventHandler, approvalFinishedOptions)
// select option
nrOfOptions := len(allEvents)
selectedOption, err := selectApprovalOption(nrOfOptions)
if err != nil {
return "", "", nil, err
}
index := selectedOption - 1
eventToBeApproved := allEvents[index]
// approve or decline?
approve := approveOrDecline()
approvalTriggeredEvent := &keptnv2.ApprovalTriggeredEventData{}
err = mapstructure.Decode(eventToBeApproved.Data, approvalTriggeredEvent)
if err != nil {
logging.PrintLog("Cannot decode approval.triggered event: "+err.Error(), logging.InfoLevel)
return "", "", nil, err
}
var approvalResult keptnv2.ResultType
if approve {
approvalResult = keptnv2.ResultPass
} else {
approvalResult = keptnv2.ResultFailed
}
approvalFinishedEvent := &keptnv2.ApprovalFinishedEventData{
EventData: keptnv2.EventData{
Project: approvalTriggeredEvent.Project,
Stage: approvalTriggeredEvent.Stage,
Service: approvalTriggeredEvent.Service,
Labels: approvalTriggeredEvent.Labels,
Status: keptnv2.StatusSucceeded,
Result: approvalResult,
Message: "",
},
}
return eventToBeApproved.Shkeptncontext, eventToBeApproved.ID, approvalFinishedEvent, nil
}
func approveOrDecline() bool {
var approve bool
keepAsking := true
for keepAsking {
logging.PrintLog("Do you want to (a)pprove or (d)ecline: ", logging.InfoLevel)
reader := bufio.NewReader(os.Stdin)
in, err := reader.ReadString('\n')
if err != nil {
logging.PrintLog("Invalid option. Please enter either 'a' to approve, or 'd' to decline", logging.InfoLevel)
}
in = strings.TrimSpace(in)
if in != "a" && in != "d" {
logging.PrintLog("Invalid option. Please enter either 'a' to approve, or 'd' to decline", logging.InfoLevel)
} else {
keepAsking = false
}
if in == "a" {
approve = true
} else if in == "d" {
approve = false
}
}
return approve
}
func selectApprovalOption(nrOfOptions int) (int, error) {
var selectedOption int
keepAsking := true
for keepAsking {
logging.PrintLog("Select the option to approve or decline: ", logging.InfoLevel)
reader := bufio.NewReader(os.Stdin)
in, err := reader.ReadString('\n')
if err != nil {
logging.PrintLog(fmt.Sprintf("Invalid option. Please enter a value between 1 and %d", nrOfOptions), logging.InfoLevel)
}
in = strings.TrimSpace(in)
selectedOption, err = strconv.Atoi(in)
if err != nil || selectedOption < 1 || selectedOption > nrOfOptions {
logging.PrintLog(fmt.Sprintf("Invalid option. Please enter a value between 1 and %d", nrOfOptions), logging.InfoLevel)
} else {
keepAsking = false
}
}
return selectedOption, nil
}
func printApprovalOptions(approvals []*apimodels.KeptnContextExtendedCE, eventHandler *apiutils.EventHandler, approvalFinishedOptions sendApprovalFinishedStruct) {
// initialize tabwriter
w := new(tabwriter.Writer)
// minwidth, tabwidth, padding, padchar, flags
w.Init(os.Stdout, 8, 8, 2, '\t', 0)
defer w.Flush()
fmt.Fprintf(w, "\n %s\t%s\t%s\t", "OPTION", "IMAGE", "EVALUATION")
for index, approval := range approvals {
score := getScoreForApprovalTriggeredEvent(eventHandler, approvalFinishedOptions, approval)
commitID := getApprovalImageEvent(approval)
appendOptionToWriter(w, index, commitID, score)
}
fmt.Fprintf(w, "\n")
}
func appendOptionToWriter(w *tabwriter.Writer, index int, commitID, score string) {
fmt.Fprintf(w, "\n (%d)\t%s\t%s\t", index+1, commitID, score)
}
func getScoreForApprovalTriggeredEvent(eventHandler *apiutils.EventHandler, approvalFinishedOptions sendApprovalFinishedStruct, approval *apimodels.KeptnContextExtendedCE) string {
score := "n/a"
evaluationDoneEvents, errorObj := eventHandler.GetEvents(&apiutils.EventFilter{
Project: *approvalFinishedOptions.Project,
Stage: *approvalFinishedOptions.Stage,
Service: *approvalFinishedOptions.Service,
EventType: keptnv2.GetFinishedEventType(keptnv2.EvaluationTaskName),
KeptnContext: approval.Shkeptncontext,
})
if errorObj != nil {
return score
}
if len(evaluationDoneEvents) == 0 {
return score
}
evaluationDoneData := &keptnv2.EvaluationFinishedEventData{}
err := mapstructure.Decode(evaluationDoneEvents[0].Data, evaluationDoneData)
if err != nil {
return score
}
score = fmt.Sprintf("%f", evaluationDoneData.Evaluation.Score)
return score
}
func getApprovalImageEvent(approval *apimodels.KeptnContextExtendedCE) string {
unknownImage := "n/a"
// the approval.triggered event should also include the configurationChange property (see https://github.com/keptn/keptn/issues/3199)
// therefore, we can cast its data property to a DeploymentTriggeredEventData struct and use the property from this struct
deploymentTriggeredData := &keptnv2.DeploymentTriggeredEventData{}
err := common.DecodeKeptnEventData(deploymentTriggeredData, approval.Data)
if err != nil {
return unknownImage
}
if deploymentTriggeredData.ConfigurationChange.Values != nil {
if image, ok := deploymentTriggeredData.ConfigurationChange.Values["image"].(string); ok {
return image
}
}
return unknownImage
}
func getApprovalFinishedForID(eventHandler *apiutils.EventHandler, sendApprovalFinishedOptions sendApprovalFinishedStruct) (string,
string, *keptnv2.ApprovalFinishedEventData, error) {
events, errorObj := eventHandler.GetEvents(&apiutils.EventFilter{
Project: *sendApprovalFinishedOptions.Project,
Stage: *sendApprovalFinishedOptions.Stage,
EventType: keptnv2.GetTriggeredEventType(keptnv2.ApprovalTaskName),
EventID: *sendApprovalFinishedOptions.ID,
})
if errorObj != nil {
logging.PrintLog("Cannot retrieve approval.triggered event with ID "+*sendApprovalFinishedOptions.ID+": "+*errorObj.Message, logging.InfoLevel)
return "", "", nil, errors.New(*errorObj.Message)
}
if len(events) == 0 {
logging.PrintLog("No open approval.triggered event with the ID "+*sendApprovalFinishedOptions.ID+" has been found", logging.InfoLevel)
return "", "", nil, nil
}
approvalTriggeredEvent := &keptnv2.ApprovalTriggeredEventData{}
if err := common.DecodeKeptnEventData(events[0].Data, approvalTriggeredEvent); err != nil {
logging.PrintLog("Cannot decode approval.triggered event: "+err.Error(), logging.InfoLevel)
return "", "", nil, err
}
approvalFinishedEvent := &keptnv2.ApprovalFinishedEventData{
EventData: keptnv2.EventData{
Project: approvalTriggeredEvent.Project,
Stage: approvalTriggeredEvent.Stage,
Service: approvalTriggeredEvent.Service,
Labels: approvalTriggeredEvent.Labels,
Status: keptnv2.StatusSucceeded,
Result: keptnv2.ResultPass,
Message: "",
},
}
return events[0].Shkeptncontext, events[0].ID, approvalFinishedEvent, nil
}
func init() {
sendEventCmd.AddCommand(approvalFinishedCmd)
sendApprovalFinishedOptions.Project = approvalFinishedCmd.Flags().StringP("project", "", "",
"The project containing the service to be approved")
approvalFinishedCmd.MarkFlagRequired("project")
sendApprovalFinishedOptions.Stage = approvalFinishedCmd.Flags().StringP("stage", "", "",
"The stage containing the service to be approved")
approvalFinishedCmd.MarkFlagRequired("stage")
sendApprovalFinishedOptions.Service = approvalFinishedCmd.Flags().StringP("service", "", "",
"The service to be approved")
sendApprovalFinishedOptions.ID = approvalFinishedCmd.Flags().StringP("id", "", "",
"The ID of the approval.triggered event to be approved")
// approvalFinishedCmd.MarkFlagRequired("id")
sendApprovalFinishedOptions.Labels = approvalFinishedCmd.Flags().StringToStringP("labels", "l", nil, "Additional labels to be provided for the service that is to be approved")
}
|
[
"\"MOCK_SERVER\""
] |
[] |
[
"MOCK_SERVER"
] |
[]
|
["MOCK_SERVER"]
|
go
| 1 | 0 | |
src/core/toga/__init__.py
|
import importlib
import os
import sys
from .constants import *
# Work around import loop issues (toga -> platform -> toga.interface) import
# all these things before we import the platform stuff
import toga.interface.app # NOQA
__all__ = [
'__version__',
'platform'
]
# Examples of valid version strings
# __version__ = '1.2.3.dev1' # Development release 1
# __version__ = '1.2.3a1' # Alpha Release 1
# __version__ = '1.2.3b1' # Beta Release 1
# __version__ = '1.2.3rc1' # RC Release 1
# __version__ = '1.2.3' # Final Release
# __version__ = '1.2.3.post1' # Post Release 1
__version__ = '0.2.4'
platform = None
def set_platform(module_name=None, local_vars=locals()):
"Configures toga to use the specfied platform module"
# Note - locals is deliberately passed in as an argument; because it is
# a dictionary, this results in the module level locals dictionary being
# bound as a local variable in this method -- and a persistent one,
# because it was evaluated and bound at time of method import.
# First check for an environment variable setting the platform
if module_name is None:
platform_name = os.environ.get('TOGA_PLATFORM')
# If we don't have a manually defined platform, attempt to
# autodetect and set the platform
if platform_name is None:
if sys.platform == 'ios':
platform_name = 'iOS'
elif sys.platform == 'tvos':
platform_name = 'tvOS'
elif sys.platform == 'watchos':
platform_name = 'watchOS'
elif sys.platform == 'android':
platform_name = 'android'
elif sys.platform == 'darwin':
platform_name = 'cocoa'
elif sys.platform in ('linux', 'linux2'):
platform_name = 'gtk'
elif sys.platform == 'win32':
platform_name = 'win32'
else:
raise RuntimeError("Couldn't identify a supported host platform.")
module_name = 'toga_' + platform_name
# # Purge any existing platform symbols in the toga module
# if local_vars['platform']:
# for symbol in local_vars['platform'].__all__:
# # Exclude __version__ from the list of symbols that is
# # ported, because toga itself has a __version__ identifier.
# if symbol != '__version__':
# local_vars.pop(symbol)
# Import the new platform module
try:
# Set the new platform module into the module namespace
local_vars['platform'] = importlib.import_module(module_name)
# Export all the symbols *except* for __version__ from the platform module
# The platform has it's own version identifier.
for symbol in local_vars['platform'].__all__:
if symbol != '__version__':
local_vars[symbol] = getattr(platform, symbol)
except ImportError as e:
if e.name == module_name:
locals['platform'] = None
print("Couldn't import %s platform module; try running 'pip install %s'." % (module_name[5:], module_name))
sys.exit(-1)
else:
raise
# On first import, do an autodetection of platform.
set_platform()
|
[] |
[] |
[
"TOGA_PLATFORM"
] |
[]
|
["TOGA_PLATFORM"]
|
python
| 1 | 0 | |
cachet_url_monitor/configuration.py
|
#!/usr/bin/env python
import abc
import copy
import logging
import os
import re
import time
import requests
from yaml import dump
from yaml import load
import latency_unit
import status as st
# This is the mandatory fields that must be in the configuration file in this
# same exact structure.
configuration_mandatory_fields = {
'endpoint': ['url', 'method', 'timeout', 'expectation'],
'cachet': ['api_url', 'token', 'component_id'],
'frequency': []}
class ConfigurationValidationError(Exception):
"""Exception raised when there's a validation error."""
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class ComponentNonexistentError(Exception):
"""Exception raised when the component does not exist."""
def __init__(self, component_id):
self.component_id = component_id
def __str__(self):
return repr('Component with id [%d] does not exist.' % (self.component_id,))
class MetricNonexistentError(Exception):
"""Exception raised when the component does not exist."""
def __init__(self, metric_id):
self.metric_id = metric_id
def __str__(self):
return repr('Metric with id [%d] does not exist.' % (self.metric_id,))
def get_current_status(endpoint_url, component_id, headers):
"""Retrieves the current status of the component that is being monitored. It will fail if the component does
not exist or doesn't respond with the expected data.
:return component status.
"""
get_status_request = requests.get('%s/components/%s' % (endpoint_url, component_id), headers=headers)
if get_status_request.ok:
# The component exists.
return get_status_request.json()['data']['status']
else:
raise ComponentNonexistentError(component_id)
def normalize_url(url):
"""If passed url doesn't include schema return it with default one - http."""
if not url.lower().startswith('http'):
return 'http://%s' % url
return url
class Configuration(object):
"""Represents a configuration file, but it also includes the functionality
of assessing the API and pushing the results to cachet.
"""
def __init__(self, config_file):
self.logger = logging.getLogger('cachet_url_monitor.configuration.Configuration')
self.config_file = config_file
self.data = load(file(self.config_file, 'r'))
# Exposing the configuration to confirm it's parsed as expected.
self.print_out()
# We need to validate the configuration is correct and then validate the component actually exists.
self.validate()
# We store the main information from the configuration file, so we don't keep reading from the data dictionary.
self.headers = {'X-Cachet-Token': os.environ.get('CACHET_TOKEN') or self.data['cachet']['token']}
self.endpoint_method = os.environ.get('ENDPOINT_METHOD') or self.data['endpoint']['method']
self.endpoint_url = os.environ.get('ENDPOINT_URL') or self.data['endpoint']['url']
self.endpoint_url = normalize_url(self.endpoint_url)
self.endpoint_timeout = os.environ.get('ENDPOINT_TIMEOUT') or self.data['endpoint'].get('timeout') or 1
self.api_url = os.environ.get('CACHET_API_URL') or self.data['cachet']['api_url']
self.component_id = os.environ.get('CACHET_COMPONENT_ID') or self.data['cachet']['component_id']
self.metric_id = os.environ.get('CACHET_METRIC_ID') or self.data['cachet'].get('metric_id')
if self.metric_id is not None:
self.default_metric_value = self.get_default_metric_value(self.metric_id)
# The latency_unit configuration is not mandatory and we fallback to seconds, by default.
self.latency_unit = os.environ.get('LATENCY_UNIT') or self.data['cachet'].get('latency_unit') or 's'
# We need the current status so we monitor the status changes. This is necessary for creating incidents.
self.status = get_current_status(self.api_url, self.component_id, self.headers)
# Get remaining settings
self.public_incidents = int(
os.environ.get('CACHET_PUBLIC_INCIDENTS') or self.data['cachet']['public_incidents'])
self.logger.info('Monitoring URL: %s %s' % (self.endpoint_method, self.endpoint_url))
self.expectations = [Expectaction.create(expectation) for expectation in self.data['endpoint']['expectation']]
for expectation in self.expectations:
self.logger.info('Registered expectation: %s' % (expectation,))
def get_default_metric_value(self, metric_id):
"""Returns default value for configured metric."""
get_metric_request = requests.get('%s/metrics/%s' % (self.api_url, metric_id), headers=self.headers)
if get_metric_request.ok:
return get_metric_request.json()['data']['default_value']
else:
raise MetricNonexistentError(metric_id)
def get_action(self):
"""Retrieves the action list from the configuration. If it's empty, returns an empty list.
:return: The list of actions, which can be an empty list.
"""
if self.data['cachet'].get('action') is None:
return []
else:
return self.data['cachet']['action']
def validate(self):
"""Validates the configuration by verifying the mandatory fields are
present and in the correct format. If the validation fails, a
ConfigurationValidationError is raised. Otherwise nothing will happen.
"""
configuration_errors = []
for key, sub_entries in configuration_mandatory_fields.iteritems():
if key not in self.data:
configuration_errors.append(key)
for sub_key in sub_entries:
if sub_key not in self.data[key]:
configuration_errors.append('%s.%s' % (key, sub_key))
if ('endpoint' in self.data and 'expectation' in
self.data['endpoint']):
if (not isinstance(self.data['endpoint']['expectation'], list) or
(isinstance(self.data['endpoint']['expectation'], list) and
len(self.data['endpoint']['expectation']) == 0)):
configuration_errors.append('endpoint.expectation')
if len(configuration_errors) > 0:
raise ConfigurationValidationError(
'Config file [%s] failed validation. Missing keys: %s' % (self.config_file,
', '.join(configuration_errors)))
def evaluate(self):
"""Sends the request to the URL set in the configuration and executes
each one of the expectations, one by one. The status will be updated
according to the expectation results.
"""
try:
self.request = requests.request(self.endpoint_method, self.endpoint_url, timeout=self.endpoint_timeout)
self.current_timestamp = int(time.time())
except requests.ConnectionError:
self.message = 'The URL is unreachable: %s %s' % (self.endpoint_method, self.endpoint_url)
self.logger.warning(self.message)
self.status = st.COMPONENT_STATUS_PARTIAL_OUTAGE
return
except requests.HTTPError:
self.message = 'Unexpected HTTP response'
self.logger.exception(self.message)
self.status = st.COMPONENT_STATUS_PARTIAL_OUTAGE
return
except requests.Timeout:
self.message = 'Request timed out'
self.logger.warning(self.message)
self.status = st.COMPONENT_STATUS_PERFORMANCE_ISSUES
return
# We initially assume the API is healthy.
self.status = st.COMPONENT_STATUS_OPERATIONAL
self.message = ''
for expectation in self.expectations:
status = expectation.get_status(self.request)
# The greater the status is, the worse the state of the API is.
if status > self.status:
self.status = status
self.message = expectation.get_message(self.request)
self.logger.info(self.message)
def print_out(self):
self.logger.info('Current configuration:\n%s' % (self.__repr__()))
def __repr__(self):
temporary_data = copy.deepcopy(self.data)
# Removing the token so we don't leak it in the logs.
del temporary_data['cachet']['token']
return dump(temporary_data, default_flow_style=False)
def push_status(self):
"""Pushes the status of the component to the cachet server. It will update the component
status based on the previous call to evaluate().
"""
params = {'id': self.component_id, 'status': self.status}
component_request = requests.put('%s/components/%d' % (self.api_url, self.component_id), params=params,
headers=self.headers)
if component_request.ok:
# Successful update
self.logger.info('Component update: status [%d]' % (self.status,))
else:
# Failed to update the API status
self.logger.warning('Component update failed with status [%d]: API'
' status: [%d]' % (component_request.status_code, self.status))
def push_metrics(self):
"""Pushes the total amount of seconds the request took to get a response from the URL.
It only will send a request if the metric id was set in the configuration.
In case of failed connection trial pushes the default metric value.
"""
if 'metric_id' in self.data['cachet'] and hasattr(self, 'request'):
# We convert the elapsed time from the request, in seconds, to the configured unit.
value = self.default_metric_value if self.status != 1 else latency_unit.convert_to_unit(self.latency_unit,
self.request.elapsed.total_seconds())
params = {'id': self.metric_id, 'value': value,
'timestamp': self.current_timestamp}
metrics_request = requests.post('%s/metrics/%d/points' % (self.api_url, self.metric_id), params=params,
headers=self.headers)
if metrics_request.ok:
# Successful metrics upload
self.logger.info('Metric uploaded: %.6f seconds' % (value,))
else:
self.logger.warning('Metric upload failed with status [%d]' %
(metrics_request.status_code,))
def push_incident(self):
"""If the component status has changed, we create a new incident (if this is the first time it becomes unstable)
or updates the existing incident once it becomes healthy again.
"""
if hasattr(self, 'incident_id') and self.status == st.COMPONENT_STATUS_OPERATIONAL:
# If the incident already exists, it means it was unhealthy but now it's healthy again.
params = {'status': 4, 'visible': self.public_incidents, 'component_id': self.component_id,
'component_status': self.status,
'notify': True}
incident_request = requests.put('%s/incidents/%d' % (self.api_url, self.incident_id), params=params,
headers=self.headers)
if incident_request.ok:
# Successful metrics upload
self.logger.info(
'Incident updated, API healthy again: component status [%d], message: "%s"' % (
self.status, self.message))
del self.incident_id
else:
self.logger.warning('Incident update failed with status [%d], message: "%s"' % (
incident_request.status_code, self.message))
elif not hasattr(self, 'incident_id') and self.status != st.COMPONENT_STATUS_OPERATIONAL:
# This is the first time the incident is being created.
params = {'name': 'URL unavailable', 'message': self.message, 'status': 1, 'visible': self.public_incidents,
'component_id': self.component_id, 'component_status': self.status, 'notify': True}
incident_request = requests.post('%s/incidents' % (self.api_url,), params=params, headers=self.headers)
if incident_request.ok:
# Successful incident upload.
self.incident_id = incident_request.json()['data']['id']
self.logger.info(
'Incident uploaded, API unhealthy: component status [%d], message: "%s"' % (
self.status, self.message))
else:
self.logger.warning(
'Incident upload failed with status [%d], message: "%s"' % (
incident_request.status_code, self.message))
class Expectaction(object):
"""Base class for URL result expectations. Any new excpectation should extend
this class and the name added to create() method.
"""
@staticmethod
def create(configuration):
"""Creates a list of expectations based on the configuration types
list.
"""
expectations = {
'HTTP_STATUS': HttpStatus,
'LATENCY': Latency,
'REGEX': Regex
}
return expectations.get(configuration['type'])(configuration)
@abc.abstractmethod
def get_status(self, response):
"""Returns the status of the API, following cachet's component status
documentation: https://docs.cachethq.io/docs/component-statuses
"""
@abc.abstractmethod
def get_message(self, response):
"""Gets the error message."""
class HttpStatus(Expectaction):
def __init__(self, configuration):
self.status_range = HttpStatus.parse_range(configuration['status_range'])
@staticmethod
def parse_range(range_string):
statuses = range_string.split("-")
if len(statuses) == 1:
# When there was no range given, we should treat the first number as a single status check.
return (int(statuses[0]), int(statuses[0]) + 1)
else:
# We shouldn't look into more than one value, as this is a range value.
return (int(statuses[0]), int(statuses[1]))
def get_status(self, response):
if response.status_code >= self.status_range[0] and response.status_code < self.status_range[1]:
return st.COMPONENT_STATUS_OPERATIONAL
else:
return st.COMPONENT_STATUS_PARTIAL_OUTAGE
def get_message(self, response):
return 'Unexpected HTTP status (%s)' % (response.status_code,)
def __str__(self):
return repr('HTTP status range: %s' % (self.status_range,))
class Latency(Expectaction):
def __init__(self, configuration):
self.threshold = configuration['threshold']
def get_status(self, response):
if response.elapsed.total_seconds() <= self.threshold:
return st.COMPONENT_STATUS_OPERATIONAL
else:
return st.COMPONENT_STATUS_PERFORMANCE_ISSUES
def get_message(self, response):
return 'Latency above threshold: %.4f seconds' % (response.elapsed.total_seconds(),)
def __str__(self):
return repr('Latency threshold: %.4f seconds' % (self.threshold,))
class Regex(Expectaction):
def __init__(self, configuration):
self.regex_string = configuration['regex']
self.regex = re.compile(configuration['regex'], re.UNICODE + re.DOTALL)
def get_status(self, response):
if self.regex.match(response.text):
return st.COMPONENT_STATUS_OPERATIONAL
else:
return st.COMPONENT_STATUS_PARTIAL_OUTAGE
def get_message(self, response):
return 'Regex did not match anything in the body'
def __str__(self):
return repr('Regex: %s' % (self.regex_string,))
|
[] |
[] |
[
"ENDPOINT_URL",
"CACHET_PUBLIC_INCIDENTS",
"ENDPOINT_TIMEOUT",
"CACHET_API_URL",
"ENDPOINT_METHOD",
"LATENCY_UNIT",
"CACHET_COMPONENT_ID",
"CACHET_METRIC_ID",
"CACHET_TOKEN"
] |
[]
|
["ENDPOINT_URL", "CACHET_PUBLIC_INCIDENTS", "ENDPOINT_TIMEOUT", "CACHET_API_URL", "ENDPOINT_METHOD", "LATENCY_UNIT", "CACHET_COMPONENT_ID", "CACHET_METRIC_ID", "CACHET_TOKEN"]
|
python
| 9 | 0 | |
yac8e.py
|
"""
Copyright (C) 2012-2019 Craig Thomas
This project uses an MIT style license - see LICENSE for details.
A simple Chip 8 emulator - see the README file for more information.
"""
# I M P O R T S ###############################################################
import argparse
import os
# G L O B A L S ###############################################################
os.environ['PYGAME_HIDE_SUPPORT_PROMPT'] = "hide"
# F U N C T I O N S ##########################################################
def parse_arguments():
"""
Parses the command-line arguments passed to the emulator.
:return: the parsed command-line arguments
"""
parser = argparse.ArgumentParser(
description="Starts a simple Chip 8 "
"emulator. See README.md for more information, and LICENSE for "
"terms of use.")
parser.add_argument(
"rom", help="the ROM file to load on startup")
parser.add_argument(
"--scale", help="the scale factor to apply to the display "
"(default is 5)", type=int, default=5, dest="scale")
parser.add_argument(
"--delay", help="sets the CPU operation to take at least "
"the specified number of milliseconds to execute (default is 1)",
type=int, default=1, dest="op_delay")
return parser.parse_args()
# M A I N #####################################################################
if __name__ == "__main__":
from chip8.emulator import main_loop
main_loop(parse_arguments())
# E N D O F F I L E #######################################################
|
[] |
[] |
[
"PYGAME_HIDE_SUPPORT_PROMPT"
] |
[]
|
["PYGAME_HIDE_SUPPORT_PROMPT"]
|
python
| 1 | 0 | |
StrictSubSet.py
|
A = set(map(int, input().split()))
sets = list()
for _ in range(int(input())):
cmd = set(map(int, input().split()))
sets.append(cmd)
isFinished = list()
for i in range(len(sets)):
if len(A) > len(sets[i]):
if not A.issuperset(sets[i]):
print('False')
exit()
print('True')
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
manage.py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "APMS.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
config/scanner.go
|
package config
import (
"fmt"
"io/ioutil"
"os"
"path"
"path/filepath"
"strings"
multierror "github.com/hashicorp/go-multierror"
log "github.com/sirupsen/logrus"
)
type scanner struct {
config *Config
templater *renderer
path string
}
func newConfigScanner(directory string, config *Config, templater *renderer) *scanner {
return &scanner{
config: config,
templater: templater,
path: directory,
}
}
func (s *scanner) scan() error {
p, err := os.Open(s.path)
if err != nil {
return err
}
i, err := p.Stat()
if err != nil {
return err
}
if i.IsDir() {
return s.scanDirectory(s.path)
}
return s.readAndProcess(s.path)
}
// scanDirectory ...
func (s *scanner) scanDirectory(directory string) error {
log.Debugf("Scanning directory %s", directory)
d, err := os.Open(directory)
if err != nil {
return err
}
d.Close()
fi, err := ioutil.ReadDir(directory)
if err != nil {
return err
}
var result error
for _, fi := range fi {
pathName := path.Clean(directory + "/" + fi.Name())
// regular file
if fi.Mode().IsRegular() {
if !s.shouldProcess(pathName) {
continue
}
if err := s.readAndProcess(pathName); err != nil {
result = multierror.Append(result, fmt.Errorf("[%s] %s", strings.TrimPrefix(directory, pathName), err))
}
continue
}
// directory
if fi.IsDir() {
if err := s.scanDirectory(pathName); err != nil {
result = multierror.Append(result, err)
}
continue
}
// something else, ignore it
log.Debugf("Ignoring path %s/%s", directory, fi.Name())
}
return result
}
func (s *scanner) shouldProcess(file string) bool {
ext := path.Ext(file)
// we only allow HCL and CTMPL files to be processed
if ext != ".hcl" && ext != ".ctmpl" {
log.Debugf("Ignoring file %s (only .hcl or .ctmpl is acceptable file extensions)", file)
return false
}
// files with .var.hcl suffix is considered variable files
// and should not be processed any further
if strings.HasSuffix(file, ".var.hcl") {
log.Debugf("Skipping file %s, is a configuration file", file)
return false
}
// don't process files that was provided as variable files
// since their syntax is different
absPath, _ := filepath.Abs(file)
for _, configFile := range s.templater.readConfigFiles {
if configFile == absPath {
log.Debugf("Skipping file %s, is a configuration file", file)
return false
}
}
return true
}
func (s *scanner) readAndProcess(file string) error {
content, err := s.readFile(file)
if err != nil {
return err
}
content, err = s.templater.renderContent(content, file, 0)
if err != nil {
return err
}
relativeFile := strings.TrimPrefix(strings.TrimPrefix(file, s.path), "/")
if os.Getenv("PRINT_CONTENT") == "1" {
log.WithField("file", relativeFile).Debug(content)
}
list, err := s.config.parseContent(content, relativeFile)
if err != nil {
return err
}
return s.config.processContent(list, relativeFile)
}
// Read File Content
func (s *scanner) readFile(file string) (string, error) {
log.Debugf("Reading file %s", file)
// read file from disk
content, err := ioutil.ReadFile(file)
if err != nil {
return "", err
}
return string(content), nil
}
|
[
"\"PRINT_CONTENT\""
] |
[] |
[
"PRINT_CONTENT"
] |
[]
|
["PRINT_CONTENT"]
|
go
| 1 | 0 | |
gpt_neo_xl_deepspeed.py
|
import os
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = '9994'
os.environ['RANK'] = "0"
os.environ['LOCAL_RANK'] = "0"
os.environ['WORLD_SIZE'] = "1" # parallalism
import pandas as pd
import torch
from torch.utils.data import Dataset, random_split
from transformers import GPT2Tokenizer, TrainingArguments, Trainer, GPTNeoForCausalLM
torch.manual_seed(42)
tokenizer = GPT2Tokenizer.from_pretrained("EleutherAI/gpt-neo-2.7B", bos_token='<|endoftext|>',
eos_token='<|endoftext|>', pad_token='<|pad|>')
model = GPTNeoForCausalLM.from_pretrained("EleutherAI/gpt-neo-2.7B").cuda()
model.resize_token_embeddings(len(tokenizer))
descriptions = pd.read_csv('netflix_titles.csv')['description']
max_length = max([len(tokenizer.encode(description)) for description in descriptions])
print("Max length: {}".format(max_length))
class NetflixDataset(Dataset):
def __init__(self, txt_list, tokenizer, max_length):
self.input_ids = []
self.attn_masks = []
self.labels = []
for txt in txt_list:
encodings_dict = tokenizer('<|endoftext|>' + txt + '<|endoftext|>', truncation=True,
max_length=max_length, padding="max_length")
self.input_ids.append(torch.tensor(encodings_dict['input_ids']))
self.attn_masks.append(torch.tensor(encodings_dict['attention_mask']))
def __len__(self):
return len(self.input_ids)
def __getitem__(self, idx):
return self.input_ids[idx], self.attn_masks[idx]
dataset = NetflixDataset(descriptions, tokenizer, max_length=max_length)
train_size = int(0.9 * len(dataset))
train_dataset, val_dataset = random_split(dataset, [train_size, len(dataset) - train_size])
# batch sizes were 15
training_args = TrainingArguments(output_dir='./results', num_train_epochs=5, logging_steps=300, save_steps=300,
per_device_train_batch_size=2, per_device_eval_batch_size=2,warmup_steps=100,
weight_decay=0.01, logging_dir='./logs', deepspeed='./ds_config.json')
Trainer(model=model, args=training_args, train_dataset=train_dataset,
eval_dataset=val_dataset, data_collator=lambda data: {'input_ids': torch.stack([f[0] for f in data]),
'attention_mask': torch.stack([f[1] for f in data]),
'labels': torch.stack([f[0] for f in data])}).train()
generated = tokenizer("<|endoftext|> ", return_tensors="pt").input_ids.cuda()
sample_outputs = model.generate(generated, do_sample=True, top_k=50,
max_length=300, top_p=0.95, temperature=1.9, num_return_sequences=20)
for i, sample_output in enumerate(sample_outputs):
print("{}: {}".format(i, tokenizer.decode(sample_output, skip_special_tokens=True)))
|
[] |
[] |
[
"MASTER_ADDR",
"MASTER_PORT",
"LOCAL_RANK",
"RANK",
"WORLD_SIZE"
] |
[]
|
["MASTER_ADDR", "MASTER_PORT", "LOCAL_RANK", "RANK", "WORLD_SIZE"]
|
python
| 5 | 0 | |
kpi/kpi/wsgi.py
|
"""
WSGI config for kpi project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/4.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE',
'kpi.settings')
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
cli/config/configStruct.go
|
package config
import (
"fmt"
"github.com/op/go-logging"
"github.com/up9inc/mizu/cli/config/configStructs"
"github.com/up9inc/mizu/cli/mizu"
v1 "k8s.io/api/core/v1"
"k8s.io/client-go/util/homedir"
"os"
"path"
"path/filepath"
)
const (
MizuResourcesNamespaceConfigName = "mizu-resources-namespace"
ConfigFilePathCommandName = "config-path"
KubeConfigPathConfigName = "kube-config-path"
)
type ConfigStruct struct {
Tap configStructs.TapConfig `yaml:"tap"`
Version configStructs.VersionConfig `yaml:"version"`
View configStructs.ViewConfig `yaml:"view"`
Logs configStructs.LogsConfig `yaml:"logs"`
Auth configStructs.AuthConfig `yaml:"auth"`
Config configStructs.ConfigConfig `yaml:"config,omitempty"`
AgentImage string `yaml:"agent-image,omitempty" readonly:""`
ImagePullPolicyStr string `yaml:"image-pull-policy" default:"Always"`
MizuResourcesNamespace string `yaml:"mizu-resources-namespace" default:"mizu"`
Telemetry bool `yaml:"telemetry" default:"true"`
DumpLogs bool `yaml:"dump-logs" default:"false"`
KubeConfigPathStr string `yaml:"kube-config-path"`
ConfigFilePath string `yaml:"config-path,omitempty" readonly:""`
HeadlessMode bool `yaml:"headless" default:"false"`
LogLevelStr string `yaml:"log-level,omitempty" default:"INFO" readonly:""`
ServiceMap bool `yaml:"service-map,omitempty" default:"false" readonly:""`
OAS bool `yaml:"oas,omitempty" default:"false" readonly:""`
}
func (config *ConfigStruct) validate() error {
if _, err := logging.LogLevel(config.LogLevelStr); err != nil {
return fmt.Errorf("%s is not a valid log level, err: %v", config.LogLevelStr, err)
}
return nil
}
func (config *ConfigStruct) SetDefaults() {
config.AgentImage = fmt.Sprintf("3b295/mizu-%s:%s", mizu.Branch, mizu.SemVer)
config.ConfigFilePath = path.Join(mizu.GetMizuFolderPath(), "config.yaml")
}
func (config *ConfigStruct) ImagePullPolicy() v1.PullPolicy {
return v1.PullPolicy(config.ImagePullPolicyStr)
}
func (config *ConfigStruct) IsNsRestrictedMode() bool {
return config.MizuResourcesNamespace != "mizu" // Notice "mizu" string must match the default MizuResourcesNamespace
}
func (config *ConfigStruct) KubeConfigPath() string {
if config.KubeConfigPathStr != "" {
return config.KubeConfigPathStr
}
envKubeConfigPath := os.Getenv("KUBECONFIG")
if envKubeConfigPath != "" {
return envKubeConfigPath
}
home := homedir.HomeDir()
return filepath.Join(home, ".kube", "config")
}
func (config *ConfigStruct) LogLevel() logging.Level {
logLevel, _ := logging.LogLevel(config.LogLevelStr)
return logLevel
}
|
[
"\"KUBECONFIG\""
] |
[] |
[
"KUBECONFIG"
] |
[]
|
["KUBECONFIG"]
|
go
| 1 | 0 | |
lstm.py
|
import os
import numpy as np
import torch
import torch.nn as nn
import torchvision.transforms as transforms
from PIL import Image
from torch.autograd import Variable
from torch.utils.data import DataLoader, Dataset
import resnet
import myResnet
import myInceptionNet
torch.cuda.empty_cache()
"""Load datasets"""
NUMBER = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
ALPHABET = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u',
'v', 'w', 'x', 'y', 'z']
# NONE = ['NONE'] # label for empty space
NONE = ['-'] # label for empty space
ALL_CHAR_SET = NUMBER + ALPHABET + NONE
ALL_CHAR_SET_LEN = len(ALL_CHAR_SET)
MAX_CAPTCHA = 7
# print(ALL_CHAR_SET.index('NONE'))
def encode(a):
onehot = [0] * ALL_CHAR_SET_LEN
idx = ALL_CHAR_SET.index(a)
onehot[idx] += 1
return onehot
# modified dataset class
class Mydataset(Dataset):
def __init__(self, img_path, label_path, is_train=True, transform=None):
self.path = img_path
self.label_path = label_path
if is_train:
self.img = os.listdir(self.path)[:10000]
self.labels = open(self.label_path, 'r').read().split('\n')[:-1][:10000]
else:
self.img = os.listdir(self.path)[:1000]
self.labels = open(self.label_path, 'r').read().split('\n')[:-1][:1000]
self.transform = transform
self.max_length = MAX_CAPTCHA
def __getitem__(self, idx):
img_path = self.img[idx]
img = Image.open(f'{self.path}/{self.img[idx]}')
img = img.convert('L')
label = self.labels[idx]
label_oh = []
# one-hot for each character
for i in range(self.max_length):
if i < len(label):
label_oh += encode(label[i])
else:
# label_oh += [0]*ALL_CHAR_SET_LEN
# label_oh += encode('NONE')
label_oh += encode('-')
if self.transform is not None:
img = self.transform(img)
return img, np.array(label_oh), label
def __len__(self):
return len(self.img)
transform = transforms.Compose([
transforms.Resize([160, 60]),
transforms.ToTensor(),
##############################################################################
transforms.Normalize((0.8958,), (0.1360,)),
##############################################################################
])
"""Loading DATA"""
# Change to your own data foler path!
gPath = ''
train_ds = Mydataset(gPath + 'Data/train/', gPath + 'Data/train.txt', transform=transform)
test_ds = Mydataset(gPath + 'Data/test/', gPath + 'Data/test.txt', False, transform)
# train_dl = DataLoader(train_ds, batch_size=256, num_workers=4)
# test_dl = DataLoader(test_ds, batch_size=1, num_workers=4)
train_dl = DataLoader(train_ds, batch_size=128)
test_dl = DataLoader(test_ds, batch_size=1)
# mean = 0.
# std = 0.
# for step, i in enumerate(train_dl):
# img, label_oh, label = i
# img = Variable(img).cuda()
# mean += img.mean()
# std += img.std()
# print(step)
# mean /= step
# std /= step
# print("mean=", mean, " std=", std)
"""To CUDA for local run"""
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
GPUID = '1' # define GPUID
os.environ["CUDA_VISIBLE_DEVICES"] = str(GPUID)
"""Problem 1: Design LSTM model for catcha image recognition. (10 points)"""
class LSTM(nn.Module):
def __init__(self, cnn_dim, hidden_size, vocab_size, num_layers=1):
super(LSTM, self).__init__()
# define the properties
self.cnn_dim = cnn_dim
self.hidden_size = hidden_size
self.vocab_size = vocab_size
# lstm cell
self.lstm_cell = nn.LSTMCell(input_size=self.vocab_size, hidden_size=hidden_size)
# self.rnn = nn.RNN(input_size=self.vocab_size, hidden_size=hidden_size, batch_first=True)
# output fully connected layer
self.fc_in = nn.Linear(in_features=self.cnn_dim, out_features=self.vocab_size)
self.fc_out = nn.Linear(in_features=self.hidden_size, out_features=self.vocab_size)
# embedding layer
self.embed = nn.Embedding(num_embeddings=self.vocab_size, embedding_dim=self.vocab_size)
# activations
self.softmax = nn.Softmax(dim=1)
def forward(self, features, captions, isTraining):
batch_size = features.size(0)
cnn_dim = features.size(1)
hidden_state = torch.zeros((batch_size, self.hidden_size)).cuda()
cell_state = torch.zeros((batch_size, self.hidden_size)).cuda()
# define the output tensor placeholder
outputs = torch.zeros((batch_size, captions.size(1), self.vocab_size)).cuda() # 128 * 7 * 37
# embed the captions
# captions_embed = self.embed(captions) # when training, i will use label one-hot vector
##############################################################################
avgpool = nn.AdaptiveAvgPool2d(1) # 128 x 512 x 5 x 2
features = avgpool(features) # 128 x 512 x 1 x 1
features = torch.flatten(features, 1) # 128 x 512
features = self.fc_in(features) # 512 -> 37 can you ...?
# features = self.softmax(features)
# for each word: 128(batch) * 7 (chars) * 37 (prob)
for t in range(captions.size(1)): # 0 ~ 6
if t == 0:
# t=0, input = features
hidden_state, cell_state = self.lstm_cell(features, (hidden_state, cell_state))
else:
# t > 1, input = embedded label one hot(t-1) or output(t-1)
# by using Embed lookup table, the same characters are updated simultaneously
# teacher forcer
if isTraining:
hidden_state, cell_state = self.lstm_cell(captions[:, t - 1, :], (hidden_state, cell_state))
else:
hidden_state, cell_state = self.lstm_cell(outputs[:, t - 1, :], (hidden_state, cell_state))
out = self.fc_out(hidden_state) # 8 -> 37 predict next state
# out = self.softmax(out) # 0 ~ 1 activate
outputs[:, t, :] = out
outputs = self.softmax(outputs)
##############################################################################
return outputs
"""Problem 2:
* 1.Connect CNN model to the desinged LSTM model.
* 2.Replace ResNet to your own CNN model from Assignment3.
"""
##############################################################################
"""ResNet"""
# CNN
# cnn_model_path = './trained_cnn_model_threshold_myresnet_testset.pth'
betternet = myResnet.betternet()
betternet.conv1 = nn.Conv2d(1, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)
betternet.fc = nn.Linear(in_features=512, out_features=ALL_CHAR_SET_LEN * MAX_CAPTCHA, bias=True)
# betternet.load_state_dict(torch.load(cnn_model_path))
betternet.to(device)
# betternet.eval()
##############################################################################
# LSTM
cnn_dim = 512 # resnet18-512
hidden_size = 8 # 8->37
vocab_size = 37 # ALL_CHAR_SET_LEN
lstm = LSTM(cnn_dim=cnn_dim, hidden_size=hidden_size, vocab_size=vocab_size)
lstm = lstm.to(device)
# loss, optimizer
##############################################################################
# optimizer = torch.optim.SGD(params, lr=0.01, momentum=0.9, weight_decay=0.9)
params = list(betternet.parameters()) + list(lstm.parameters())
loss_func = nn.MultiLabelSoftMarginLoss()
nn.utils.clip_grad_norm_(params, 5)
optimizer = torch.optim.Adam(lstm.parameters(), lr=0.001)
##############################################################################
def get_char_count(arg1):
c0 = ALL_CHAR_SET[np.argmax(arg1.cpu().tolist()[0:ALL_CHAR_SET_LEN])]
c1 = ALL_CHAR_SET[np.argmax(arg1.cpu().tolist()[ALL_CHAR_SET_LEN:ALL_CHAR_SET_LEN * 2])]
c2 = ALL_CHAR_SET[np.argmax(arg1.cpu().tolist()[ALL_CHAR_SET_LEN * 2:ALL_CHAR_SET_LEN * 3])]
c3 = ALL_CHAR_SET[np.argmax(arg1.cpu().tolist()[ALL_CHAR_SET_LEN * 3:ALL_CHAR_SET_LEN * 4])]
c4 = ALL_CHAR_SET[np.argmax(arg1.cpu().tolist()[ALL_CHAR_SET_LEN * 4:ALL_CHAR_SET_LEN * 5])]
c5 = ALL_CHAR_SET[np.argmax(arg1.cpu().tolist()[ALL_CHAR_SET_LEN * 5:ALL_CHAR_SET_LEN * 6])]
c6 = ALL_CHAR_SET[np.argmax(arg1.cpu().tolist()[ALL_CHAR_SET_LEN * 6:ALL_CHAR_SET_LEN * 7])]
return c0, c1, c2, c3, c4, c5, c6
"""step3: Find hyper-parameters."""
"""TRAINING"""
print_interval = 30
max_epoch = 1000
# max_epoch = 1
threshold = 1e-4
isConverged = False
for epoch in range(max_epoch):
if isConverged:
break
for step, i in enumerate(train_dl):
img, label_oh, label = i
img = Variable(img).cuda()
label_oh = Variable(label_oh.float()).cuda()
batch_size, _ = label_oh.shape
pred_cnn, feature = betternet(img)
##############################################################################
optimizer.zero_grad()
label_oh_reshape = label_oh.reshape(batch_size, MAX_CAPTCHA, ALL_CHAR_SET_LEN) # (128, 259) -> (128, 7, 37)
# caption_oh = torch.argmax(label_oh_reshape, 2) # one-hot word index
# caption_target = caption_oh[:, 1:].to(device) # 1~6
# caption_target = label_oh[:, vocab_size:].to(device) # (128, 259) -> (128, 222)
caption_train = label_oh_reshape.to(device)
# caption_train = label_oh_reshape[:, :label_oh_reshape.shape[1]-1, :].to(device) # (128, 6, 37) ... 0~5
pred_lstm = lstm(feature, caption_train, True) # (128, 512, 5, 2), (128, 7, 37), float
# outputs = lstm(feature, caption_train, True) # (128, 512, 5, 2), (128, 7), float
outputs = torch.flatten(pred_lstm, 1) # 128, 222
# for end-to-end learning
loss = loss_func(outputs, label_oh) # softmax(128, 259) vs. one-hot(128, 259)
if loss < threshold:
isConverged = True
loss.backward()
optimizer.step()
##############################################################################
if (step + 1) % print_interval == 0:
print('epoch:', epoch + 1, 'step:', step + 1, 'loss:', loss.item())
if (epoch + 1) % 100 == 0:
print("predict:", outputs[0, :])
print("target:", label_oh[0, :])
cnn_model_path = './trained_cnn_model_end-to-end.pth'
lstm_model_path = './trained_lstm_model.pth'
print('Finished Training')
torch.save(betternet.state_dict(), cnn_model_path)
torch.save(lstm.state_dict(), lstm_model_path)
print('Saved Trained Model')
"""TEST"""
char_correct = 0
word_correct = 0
total = 0
betternet.eval()
lstm.eval()
with torch.no_grad():
for step, (img, label_oh, label) in enumerate(test_dl):
char_count = 0
img = Variable(img).cuda()
label_oh = Variable(label_oh.float()).cuda()
pred_cnn, feature = betternet(img)
batch_size, _ = label_oh.shape
caption = pred_cnn.reshape(batch_size, MAX_CAPTCHA, ALL_CHAR_SET_LEN) # 1, 7, 37 reshape
# caption = torch.argmax(caption, 2)
# use predicted words by CNN
pred_lstm = lstm(feature, caption, False) # (1, 7, 37) #but captions will not be used
pred_lstm = torch.flatten(pred_lstm, 1) # (1, 259)
label_len = label[0]
outputs = pred_lstm.squeeze(0)
label_oh = label_oh.squeeze(0)
c0, c1, c2, c3, c4, c5, c6 = get_char_count(outputs)
d0, d1, d2, d3, d4, d5, d6 = get_char_count(label_oh)
c = '%s%s%s%s%s%s%s' % (c0, c1, c2, c3, c4, c5, c6)
d = '%s%s%s%s%s%s%s' % (d0, d1, d2, d3, d4, d5, d6)
char_count += (c0 == d0) + (c1 == d1) + (c2 == d2) + (c3 == d3) + (c4 == d4) + (c5 == d5) + (c6 == d6)
char_correct += char_count
print("predict:", c)
print("label:", d)
if bool(str(label[0]) in str(c)):
word_correct += 1
total += 1
print(char_correct)
print(word_correct)
print((word_correct / total) * 100.0, '%')
"""END TEST"""
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_VISIBLE_DEVICES"]
|
python
| 1 | 0 | |
executioncontext/azure.go
|
package executioncontext
import (
"os"
"github.com/cjlapao/common-go/helper"
)
// AzureClientContext entity
type AzureClientContext struct {
TenantID string `json:"tenantId"`
SubscriptionID string `json:"subscriptionId"`
ClientID string `json:"clientId"`
ClientSecret string `json:"clientSecret"`
ResourceGroup string `json:"resourceGroup"`
Storage *AzureStorageContext `json:"storage"`
}
// AzureStorageContext entity
type AzureStorageContext struct {
PrimaryAccountKey string `json:"primaryAccountKey"`
SecondaryAccountKey string `json:"secondaryAccountKey"`
AccountName string `json:"storageAccount"`
ContainerName string `json:"storageContainer"`
FileName string `json:"fileName"`
FromPath string `json:"fromPath"`
ToFileName string `json:"toFileName"`
ToPath string `json:"toPath"`
}
func (c *AzureClientContext) IsValid() bool {
if c.ClientID != "" && c.ClientSecret != "" && c.TenantID != "" && c.SubscriptionID != "" {
return true
}
return false
}
func (e *Context) GetAzureContext() {
azureContext := AzureClientContext{
TenantID: helper.GetFlagValue("tenantId", ""),
SubscriptionID: helper.GetFlagValue("subscriptionId", ""),
ClientID: helper.GetFlagValue("clientId", ""),
ClientSecret: helper.GetFlagValue("clientSecret", ""),
ResourceGroup: helper.GetFlagValue("resourceGroup", ""),
}
e.AzureClient = azureContext
e.SetAzureEnvironmentKeys()
e.SetAzureStorageContext(nil)
}
func (e *Context) SetAzureEnvironmentKeys() {
subscriptionID := os.Getenv("DT_AZURE_SUBSCRIPTION_ID")
tenantID := os.Getenv("DT_AZURE_TENANT_ID")
clientID := os.Getenv("DT_AZURE_CLIENT_ID")
clientSecret := os.Getenv("DT_AZURE_CLIENT_SECRET")
resourceGroup := os.Getenv("DT_AZURE_RESOURCE_GROUP")
if len(subscriptionID) > 0 {
e.AzureClient.SubscriptionID = subscriptionID
}
if len(tenantID) > 0 {
e.AzureClient.TenantID = tenantID
}
if len(clientID) > 0 {
e.AzureClient.ClientID = clientID
}
if len(clientSecret) > 0 {
e.AzureClient.ClientSecret = clientSecret
}
if len(resourceGroup) > 0 {
e.AzureClient.ResourceGroup = resourceGroup
}
}
func (e *Context) SetAzureStorageContext(context *AzureStorageContext) {
if context != nil {
e.AzureClient.Storage = context
} else {
storageCtx := AzureStorageContext{
AccountName: helper.GetFlagValue("storageAccount", ""),
ContainerName: helper.GetFlagValue("storageContainer", ""),
FileName: helper.GetFlagValue("blobName", ""),
ToFileName: helper.GetFlagValue("downloadBlobHas", ""),
FromPath: helper.GetFlagValue("uploadFrom", ""),
ToPath: helper.GetFlagValue("downloadBlobTo", ""),
}
e.AzureClient.Storage = &storageCtx
}
}
|
[
"\"DT_AZURE_SUBSCRIPTION_ID\"",
"\"DT_AZURE_TENANT_ID\"",
"\"DT_AZURE_CLIENT_ID\"",
"\"DT_AZURE_CLIENT_SECRET\"",
"\"DT_AZURE_RESOURCE_GROUP\""
] |
[] |
[
"DT_AZURE_CLIENT_SECRET",
"DT_AZURE_TENANT_ID",
"DT_AZURE_SUBSCRIPTION_ID",
"DT_AZURE_CLIENT_ID",
"DT_AZURE_RESOURCE_GROUP"
] |
[]
|
["DT_AZURE_CLIENT_SECRET", "DT_AZURE_TENANT_ID", "DT_AZURE_SUBSCRIPTION_ID", "DT_AZURE_CLIENT_ID", "DT_AZURE_RESOURCE_GROUP"]
|
go
| 5 | 0 | |
infrastructure-provisioning/src/general/scripts/aws/dataengine_configure.py
|
#!/usr/bin/python3
# *****************************************************************************
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# ******************************************************************************
import datalab.actions_lib
import datalab.fab
import datalab.meta_lib
import json
import logging
import multiprocessing
import os
import sys
import traceback
from fabric import *
import subprocess
def configure_slave(slave_number, data_engine):
slave_name = data_engine['slave_node_name'] + '{}'.format(slave_number + 1)
slave_hostname = datalab.meta_lib.get_instance_private_ip_address(data_engine['tag_name'], slave_name)
try:
logging.info('[CREATING DATALAB SSH USER ON SLAVE NODE]')
print('[CREATING DATALAB SSH USER ON SLAVE NODE]')
params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format(
master_node_hostname, "{}{}.pem".format(os.environ['conf_key_dir'], data_engine['key_name']),
data_engine['initial_user'], data_engine['datalab_ssh_user'], data_engine['sudo_group'])
try:
subprocess.run("~/scripts/{}.py {}".format('create_ssh_user', params), shell=True, check=True)
except:
traceback.print_exc()
raise Exception
except Exception as err:
clear_resources()
datalab.fab.append_result("Failed to create ssh user on slave.", str(err))
sys.exit(1)
try:
logging.info('[CLEANING INSTANCE FOR SLAVE NODE]')
print('[CLEANING INSTANCE FOR SLAVE NODE]')
params = '--hostname {} --keyfile {} --os_user {} --application {}' \
.format(slave_hostname, keyfile_name, data_engine['datalab_ssh_user'], os.environ['application'])
try:
subprocess.run("~/scripts/{}.py {}".format('common_clean_instance', params), shell=True, check=True)
except:
traceback.print_exc()
raise Exception
except Exception as err:
clear_resources()
datalab.fab.append_result("Failed to clean slave instance.", str(err))
sys.exit(1)
try:
logging.info('[CONFIGURE PROXY ON SLAVE NODE]')
print('[CONFIGURE PROXY ON ON SLAVE NODE]')
additional_config = {"proxy_host": edge_instance_hostname, "proxy_port": "3128"}
params = "--hostname {} --instance_name {} --keyfile {} --additional_config '{}' --os_user {}"\
.format(slave_hostname, slave_name, keyfile_name, json.dumps(additional_config),
data_engine['datalab_ssh_user'])
try:
subprocess.run("~/scripts/{}.py {}".format('common_configure_proxy', params), shell=True, check=True)
except:
traceback.print_exc()
raise Exception
except Exception as err:
clear_resources()
datalab.fab.append_result("Failed to configure proxy on slave.", str(err))
sys.exit(1)
try:
logging.info('[INSTALLING PREREQUISITES ON SLAVE NODE]')
print('[INSTALLING PREREQUISITES ON SLAVE NODE]')
params = "--hostname {} --keyfile {} --user {} --region {} --edge_private_ip {}". \
format(slave_hostname, keyfile_name, data_engine['datalab_ssh_user'], data_engine['region'],
edge_instance_private_ip)
try:
subprocess.run("~/scripts/{}.py {}".format('install_prerequisites', params), shell=True, check=True)
except:
traceback.print_exc()
raise Exception
except Exception as err:
clear_resources()
datalab.fab.append_result("Failed to install prerequisites on slave.", str(err))
sys.exit(1)
try:
logging.info('[CONFIGURE SLAVE NODE {}]'.format(slave + 1))
print('[CONFIGURE SLAVE NODE {}]'.format(slave + 1))
params = "--hostname {} --keyfile {} --region {} --spark_version {} --hadoop_version {} --os_user {} " \
"--scala_version {} --r_mirror {} --master_ip {} --node_type {}". \
format(slave_hostname, keyfile_name, data_engine['region'], os.environ['notebook_spark_version'],
os.environ['notebook_hadoop_version'], data_engine['datalab_ssh_user'],
os.environ['notebook_scala_version'], os.environ['notebook_r_mirror'], master_node_hostname,
'slave')
try:
subprocess.run("~/scripts/{}.py {}".format('configure_dataengine', params), shell=True, check=True)
except:
traceback.print_exc()
raise Exception
except Exception as err:
clear_resources()
datalab.fab.append_result("Failed to configure slave node.", str(err))
sys.exit(1)
try:
print('[INSTALLING USERs KEY]')
logging.info('[INSTALLING USERs KEY]')
additional_config = {"user_keyname": data_engine['user_keyname'], "user_keydir": os.environ['conf_key_dir']}
params = "--hostname {} --keyfile {} --additional_config '{}' --user {}".format(
slave_hostname, keyfile_name, json.dumps(additional_config), data_engine['datalab_ssh_user'])
try:
subprocess.run("~/scripts/{}.py {}".format('install_user_key', params), shell=True, check=True)
except:
traceback.print_exc()
raise Exception
except Exception as err:
clear_resources()
datalab.fab.append_result("Failed install users key on slave node.", str(err))
sys.exit(1)
def clear_resources():
datalab.actions_lib.remove_ec2(data_engine['tag_name'], data_engine['master_node_name'])
for i in range(data_engine['instance_count'] - 1):
slave_name = data_engine['slave_node_name'] + '{}'.format(i + 1)
datalab.actions_lib.remove_ec2(data_engine['tag_name'], slave_name)
if __name__ == "__main__":
local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['project_name'],
os.environ['request_id'])
local_log_filepath = "/logs/" + os.environ['conf_resource'] + "/" + local_log_filename
logging.basicConfig(format='%(levelname)-8s [%(asctime)s] %(message)s',
level=logging.INFO,
filename=local_log_filepath)
try:
print('Generating infrastructure names and tags')
data_engine = dict()
if 'exploratory_name' in os.environ:
data_engine['exploratory_name'] = os.environ['exploratory_name']
else:
data_engine['exploratory_name'] = ''
if 'computational_name' in os.environ:
data_engine['computational_name'] = os.environ['computational_name']
else:
data_engine['computational_name'] = ''
data_engine['service_base_name'] = (os.environ['conf_service_base_name'])
data_engine['project_name'] = os.environ['project_name']
data_engine['endpoint_name'] = os.environ['endpoint_name']
data_engine['tag_name'] = data_engine['service_base_name'] + '-tag'
data_engine['key_name'] = os.environ['conf_key_name']
data_engine['region'] = os.environ['aws_region']
data_engine['network_type'] = os.environ['conf_network_type']
data_engine['cluster_name'] = "{}-{}-{}-de-{}".format(data_engine['service_base_name'],
data_engine['project_name'],
data_engine['endpoint_name'],
data_engine['computational_name'])
data_engine['master_node_name'] = data_engine['cluster_name'] + '-m'
data_engine['slave_node_name'] = data_engine['cluster_name'] + '-s'
data_engine['master_size'] = os.environ['aws_dataengine_master_shape']
data_engine['slave_size'] = os.environ['aws_dataengine_slave_shape']
data_engine['dataengine_master_security_group_name'] = '{}-{}-{}-de-master-sg' \
.format(data_engine['service_base_name'], data_engine['project_name'], data_engine['endpoint_name'])
data_engine['dataengine_slave_security_group_name'] = '{}-{}-{}-de-slave-sg' \
.format(data_engine['service_base_name'], data_engine['project_name'], data_engine['endpoint_name'])
data_engine['tag_name'] = data_engine['service_base_name'] + '-tag'
tag = {"Key": data_engine['tag_name'],
"Value": "{}-{}-{}-subnet".format(data_engine['service_base_name'], data_engine['project_name'],
data_engine['endpoint_name'])}
data_engine['subnet_cidr'] = datalab.meta_lib.get_subnet_by_tag(tag)
data_engine['notebook_dataengine_role_profile_name'] = '{}-{}-{}-nb-de-profile' \
.format(data_engine['service_base_name'], data_engine['project_name'], data_engine['endpoint_name'])
data_engine['instance_count'] = int(os.environ['dataengine_instance_count'])
master_node_hostname = datalab.meta_lib.get_instance_hostname(data_engine['tag_name'],
data_engine['master_node_name'])
data_engine['datalab_ssh_user'] = os.environ['conf_os_user']
data_engine['user_keyname'] = data_engine['project_name']
keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
edge_instance_name = '{0}-{1}-{2}-edge'.format(data_engine['service_base_name'],
data_engine['project_name'], data_engine['endpoint_name'])
edge_instance_hostname = datalab.meta_lib.get_instance_hostname(data_engine['tag_name'], edge_instance_name)
edge_instance_private_ip = datalab.meta_lib.get_instance_ip_address(data_engine['tag_name'],
edge_instance_name).get('Private')
data_engine['edge_instance_hostname'] = datalab.meta_lib.get_instance_hostname(data_engine['tag_name'],
edge_instance_name)
if os.environ['conf_os_family'] == 'debian':
data_engine['initial_user'] = 'ubuntu'
data_engine['sudo_group'] = 'sudo'
if os.environ['conf_os_family'] == 'redhat':
data_engine['initial_user'] = 'ec2-user'
data_engine['sudo_group'] = 'wheel'
except Exception as err:
data_engine['tag_name'] = data_engine['service_base_name'] + '-tag'
data_engine['cluster_name'] = "{}-{}-{}-de-{}".format(data_engine['service_base_name'],
data_engine['project_name'],
data_engine['endpoint_name'],
data_engine['computational_name'])
data_engine['master_node_name'] = data_engine['cluster_name'] + '-m'
data_engine['slave_node_name'] = data_engine['cluster_name'] + '-s'
clear_resources()
datalab.fab.append_result("Failed to generate variables dictionary.", str(err))
sys.exit(1)
try:
logging.info('[CREATING DATALAB SSH USER ON MASTER NODE]')
print('[CREATING DATALAB SSH USER ON MASTER NODE]')
params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format(
master_node_hostname, "{}{}.pem".format(os.environ['conf_key_dir'], data_engine['key_name']),
data_engine['initial_user'], data_engine['datalab_ssh_user'], data_engine['sudo_group'])
try:
subprocess.run("~/scripts/{}.py {}".format('create_ssh_user', params), shell=True, check=True)
except:
traceback.print_exc()
raise Exception
except Exception as err:
clear_resources()
datalab.fab.append_result("Failed to create ssh user on master.", str(err))
sys.exit(1)
try:
logging.info('[CLEANING INSTANCE FOR MASTER NODE]')
print('[CLEANING INSTANCE FOR MASTER NODE]')
params = '--hostname {} --keyfile {} --os_user {} --application {}' \
.format(master_node_hostname, keyfile_name, data_engine['datalab_ssh_user'], os.environ['application'])
try:
subprocess.run("~/scripts/{}.py {}".format('common_clean_instance', params), shell=True, check=True)
except:
traceback.print_exc()
raise Exception
except Exception as err:
clear_resources()
datalab.fab.append_result("Failed to clean master instance.", str(err))
sys.exit(1)
try:
logging.info('[CONFIGURE PROXY ON MASTER NODE]')
print('[CONFIGURE PROXY ON ON MASTER NODE]')
additional_config = {"proxy_host": edge_instance_hostname, "proxy_port": "3128"}
params = "--hostname {} --instance_name {} --keyfile {} --additional_config '{}' --os_user {}"\
.format(master_node_hostname, data_engine['master_node_name'], keyfile_name, json.dumps(additional_config),
data_engine['datalab_ssh_user'])
try:
subprocess.run("~/scripts/{}.py {}".format('common_configure_proxy', params), shell=True, check=True)
except:
traceback.print_exc()
raise Exception
except Exception as err:
clear_resources()
datalab.fab.append_result("Failed to configure proxy on master.", str(err))
sys.exit(1)
try:
logging.info('[INSTALLING PREREQUISITES ON MASTER NODE]')
print('[INSTALLING PREREQUISITES ON MASTER NODE]')
params = "--hostname {} --keyfile {} --user {} --region {} --edge_private_ip {}". \
format(master_node_hostname, keyfile_name, data_engine['datalab_ssh_user'], data_engine['region'],
edge_instance_private_ip)
try:
subprocess.run("~/scripts/{}.py {}".format('install_prerequisites', params), shell=True, check=True)
except:
traceback.print_exc()
raise Exception
except Exception as err:
clear_resources()
datalab.fab.append_result("Failed to install prerequisites on master.", str(err))
sys.exit(1)
try:
print('[INSTALLING USERs KEY on MASTER NODE]')
logging.info('[INSTALLING USERs KEY]')
additional_config = {"user_keyname": data_engine['user_keyname'], "user_keydir": os.environ['conf_key_dir']}
params = "--hostname {} --keyfile {} --additional_config '{}' --user {}".format(
master_node_hostname, keyfile_name, json.dumps(additional_config), data_engine['datalab_ssh_user'])
try:
subprocess.run("~/scripts/{}.py {}".format('install_user_key', params), shell=True, check=True)
except:
traceback.print_exc()
raise Exception
except Exception as err:
clear_resources()
datalab.fab.append_result("Failed install users key on master node.", str(err))
sys.exit(1)
try:
logging.info('[CONFIGURE MASTER NODE]')
print('[CONFIGURE MASTER NODE]')
params = "--hostname {} --keyfile {} --region {} --spark_version {} --hadoop_version {} --os_user {} " \
"--scala_version {} --r_mirror {} --master_ip {} --node_type {}".\
format(master_node_hostname, keyfile_name, data_engine['region'], os.environ['notebook_spark_version'],
os.environ['notebook_hadoop_version'], data_engine['datalab_ssh_user'],
os.environ['notebook_scala_version'], os.environ['notebook_r_mirror'], master_node_hostname,
'master')
try:
subprocess.run("~/scripts/{}.py {}".format('configure_dataengine', params), shell=True, check=True)
except:
traceback.print_exc()
raise Exception
except Exception as err:
datalab.fab.append_result("Failed to configure master node", str(err))
clear_resources()
sys.exit(1)
try:
jobs = []
for slave in range(data_engine['instance_count'] - 1):
p = multiprocessing.Process(target=configure_slave, args=(slave, data_engine))
jobs.append(p)
p.start()
for job in jobs:
job.join()
for job in jobs:
if job.exitcode != 0:
raise Exception
except Exception as err:
datalab.fab.append_result("Failed to configure slave nodes.", str(err))
clear_resources()
sys.exit(1)
try:
print('[SETUP EDGE REVERSE PROXY TEMPLATE]')
logging.info('[SETUP EDGE REVERSE PROXY TEMPLATE]')
notebook_instance_ip = datalab.meta_lib.get_instance_private_ip_address('Name',
os.environ['notebook_instance_name'])
additional_info = {
"computational_name": data_engine['computational_name'],
"master_node_hostname": master_node_hostname,
"notebook_instance_ip": notebook_instance_ip,
"instance_count": data_engine['instance_count'],
"master_node_name": data_engine['master_node_name'],
"slave_node_name": data_engine['slave_node_name'],
"tensor": False
}
params = "--edge_hostname {} " \
"--keyfile {} " \
"--os_user {} " \
"--type {} " \
"--exploratory_name {} " \
"--additional_info '{}'"\
.format(edge_instance_hostname,
keyfile_name,
data_engine['datalab_ssh_user'],
'spark',
data_engine['exploratory_name'],
json.dumps(additional_info))
try:
subprocess.run("~/scripts/{}.py {}".format('common_configure_reverse_proxy', params), shell=True, check=True)
except:
datalab.fab.append_result("Failed edge reverse proxy template")
raise Exception
except Exception as err:
datalab.fab.append_result("Failed to configure reverse proxy.", str(err))
clear_resources()
sys.exit(1)
try:
ip_address = datalab.meta_lib.get_instance_ip_address(data_engine['tag_name'],
data_engine['master_node_name']).get('Private')
spark_master_url = "http://" + ip_address + ":8080"
spark_master_access_url = "https://{}/{}_{}/".format(data_engine['edge_instance_hostname'],
data_engine['exploratory_name'],
data_engine['computational_name'])
logging.info('[SUMMARY]')
print('[SUMMARY]')
print("Service base name: {}".format(data_engine['service_base_name']))
print("Region: {}".format(data_engine['region']))
print("Cluster name: {}".format(data_engine['cluster_name']))
print("Master node shape: {}".format(data_engine['master_size']))
print("Slave node shape: {}".format(data_engine['slave_size']))
print("Instance count: {}".format(str(data_engine['instance_count'])))
with open("/root/result.json", 'w') as result:
res = {"hostname": data_engine['cluster_name'],
"instance_id": datalab.meta_lib.get_instance_by_name(data_engine['tag_name'],
data_engine['master_node_name']),
"key_name": data_engine['key_name'],
"Action": "Create new Data Engine",
"computational_url": [
{"description": "Apache Spark Master",
"url": spark_master_access_url},
#{"description": "Apache Spark Master (via tunnel)",
#"url": spark_master_url}
]}
print(json.dumps(res))
result.write(json.dumps(res))
except Exception as err:
datalab.fab.append_result("Error with writing results", str(err))
clear_resources()
sys.exit(1)
|
[] |
[] |
[
"aws_dataengine_master_shape",
"application",
"conf_os_family",
"notebook_instance_name",
"conf_key_dir",
"notebook_scala_version",
"conf_service_base_name",
"notebook_spark_version",
"computational_name",
"exploratory_name",
"request_id",
"notebook_hadoop_version",
"endpoint_name",
"conf_os_user",
"conf_network_type",
"conf_resource",
"notebook_r_mirror",
"project_name",
"aws_dataengine_slave_shape",
"dataengine_instance_count",
"conf_key_name",
"aws_region"
] |
[]
|
["aws_dataengine_master_shape", "application", "conf_os_family", "notebook_instance_name", "conf_key_dir", "notebook_scala_version", "conf_service_base_name", "notebook_spark_version", "computational_name", "exploratory_name", "request_id", "notebook_hadoop_version", "endpoint_name", "conf_os_user", "conf_network_type", "conf_resource", "notebook_r_mirror", "project_name", "aws_dataengine_slave_shape", "dataengine_instance_count", "conf_key_name", "aws_region"]
|
python
| 22 | 0 | |
vendor/github.com/google/go-containerregistry/cmd/ko/test/main.go
|
// Copyright 2018 Google LLC All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"io/ioutil"
"log"
"os"
"path/filepath"
)
func main() {
dp := os.Getenv("KO_DATA_PATH")
file := filepath.Join(dp, "kenobi")
bytes, err := ioutil.ReadFile(file)
if err != nil {
log.Fatalf("Error reading %q: %v", file, err)
}
log.Printf(string(bytes))
}
|
[
"\"KO_DATA_PATH\""
] |
[] |
[
"KO_DATA_PATH"
] |
[]
|
["KO_DATA_PATH"]
|
go
| 1 | 0 | |
daemon/daemon.go
|
// Copyright (c) 2020-2021 Tigera, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package daemon
import (
"context"
"errors"
"fmt"
"math/rand"
"os"
"os/exec"
"os/signal"
"runtime"
"runtime/debug"
"sync"
"syscall"
"time"
"github.com/prometheus/client_golang/prometheus"
log "github.com/sirupsen/logrus"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
apiv3 "github.com/projectcalico/api/pkg/apis/projectcalico/v3"
"github.com/projectcalico/libcalico-go/lib/apiconfig"
libapiv3 "github.com/projectcalico/libcalico-go/lib/apis/v3"
"github.com/projectcalico/libcalico-go/lib/backend"
bapi "github.com/projectcalico/libcalico-go/lib/backend/api"
"github.com/projectcalico/libcalico-go/lib/backend/k8s"
"github.com/projectcalico/libcalico-go/lib/backend/model"
"github.com/projectcalico/libcalico-go/lib/backend/syncersv1/felixsyncer"
"github.com/projectcalico/libcalico-go/lib/backend/syncersv1/updateprocessors"
"github.com/projectcalico/libcalico-go/lib/backend/watchersyncer"
client "github.com/projectcalico/libcalico-go/lib/clientv3"
cerrors "github.com/projectcalico/libcalico-go/lib/errors"
"github.com/projectcalico/libcalico-go/lib/health"
lclogutils "github.com/projectcalico/libcalico-go/lib/logutils"
"github.com/projectcalico/libcalico-go/lib/options"
"github.com/projectcalico/libcalico-go/lib/set"
"github.com/projectcalico/pod2daemon/binder"
"github.com/projectcalico/typha/pkg/discovery"
"github.com/projectcalico/typha/pkg/syncclient"
"github.com/projectcalico/felix/buildinfo"
"github.com/projectcalico/felix/calc"
"github.com/projectcalico/felix/config"
_ "github.com/projectcalico/felix/config"
dp "github.com/projectcalico/felix/dataplane"
"github.com/projectcalico/felix/jitter"
"github.com/projectcalico/felix/logutils"
"github.com/projectcalico/felix/policysync"
"github.com/projectcalico/felix/proto"
"github.com/projectcalico/felix/statusrep"
"github.com/projectcalico/felix/usagerep"
)
const (
// Our default value for GOGC if it is not set. This is the percentage that heap usage must
// grow by to trigger a garbage collection. Go's default is 100, meaning that 50% of the
// heap can be lost to garbage. We reduce it to this value to trade increased CPU usage for
// lower occupancy.
defaultGCPercent = 20
// String sent on the failure report channel to indicate we're shutting down for config
// change.
reasonConfigChanged = "config changed"
reasonFatalError = "fatal error"
// Process return code used to report a config change. This is the same as the code used
// by SIGHUP, which means that the wrapper script also restarts Felix on a SIGHUP.
configChangedRC = 129
// Grace period we allow for graceful shutdown before panicking.
gracefulShutdownTimeout = 30 * time.Second
)
// Run is the entry point to run a Felix instance.
//
// Its main role is to sequence Felix's startup by:
//
// Initialising early logging config (log format and early debug settings).
//
// Parsing command line parameters.
//
// Loading datastore configuration from the environment or config file.
//
// Loading more configuration from the datastore (this is retried until success).
//
// Starting the configured internal (golang) or external dataplane driver.
//
// Starting the background processing goroutines, which load and keep in sync with the
// state from the datastore, the "calculation graph".
//
// Starting the usage reporting and prometheus metrics endpoint threads (if configured).
//
// Then, it defers to monitorAndManageShutdown(), which blocks until one of the components
// fails, then attempts a graceful shutdown. At that point, all the processing is in
// background goroutines.
//
// To avoid having to maintain rarely-used code paths, Felix handles updates to its
// main config parameters by exiting and allowing itself to be restarted by the init
// daemon.
func Run(configFile string, gitVersion string, buildDate string, gitRevision string) {
// Go's RNG is not seeded by default. Do that now.
rand.Seed(time.Now().UTC().UnixNano())
// Special-case handling for environment variable-configured logging:
// Initialise early so we can trace out config parsing.
logutils.ConfigureEarlyLogging()
ctx := context.Background()
if os.Getenv("GOGC") == "" {
// Tune the GC to trade off a little extra CPU usage for significantly lower
// occupancy at high scale. This is worthwhile because Felix runs per-host so
// any occupancy improvement is multiplied by the number of hosts.
log.Debugf("No GOGC value set, defaulting to %d%%.", defaultGCPercent)
debug.SetGCPercent(defaultGCPercent)
}
if len(buildinfo.GitVersion) == 0 && len(gitVersion) != 0 {
buildinfo.GitVersion = gitVersion
buildinfo.BuildDate = buildDate
buildinfo.GitRevision = gitRevision
}
buildInfoLogCxt := log.WithFields(log.Fields{
"version": buildinfo.GitVersion,
"builddate": buildinfo.BuildDate,
"gitcommit": buildinfo.GitRevision,
"GOMAXPROCS": runtime.GOMAXPROCS(0),
})
buildInfoLogCxt.Info("Felix starting up")
// Health monitoring, for liveness and readiness endpoints. The following loop can take a
// while before the datastore reports itself as ready - for example when there is data that
// needs to be migrated from a previous version - and we still want to Felix to report
// itself as live (but not ready) while we are waiting for that. So we create the
// aggregator upfront and will start serving health status over HTTP as soon as we see _any_
// config that indicates that.
healthAggregator := health.NewHealthAggregator()
const healthName = "felix-startup"
// Register this function as a reporter of liveness and readiness, with no timeout.
healthAggregator.RegisterReporter(healthName, &health.HealthReport{Live: true, Ready: true}, 0)
// Log out the kubernetes server details that we use in BPF mode.
log.WithFields(log.Fields{
"KUBERNETES_SERVICE_HOST": os.Getenv("KUBERNETES_SERVICE_HOST"),
"KUBERNETES_SERVICE_PORT": os.Getenv("KUBERNETES_SERVICE_PORT"),
}).Info("Kubernetes server override env vars.")
// Load the configuration from all the different sources including the
// datastore and merge. Keep retrying on failure. We'll sit in this
// loop until the datastore is ready.
log.Info("Loading configuration...")
var backendClient bapi.Client
var v3Client client.Interface
var datastoreConfig apiconfig.CalicoAPIConfig
var configParams *config.Config
var typhaAddr string
var numClientsCreated int
var k8sClientSet *kubernetes.Clientset
var kubernetesVersion string
configRetry:
for {
if numClientsCreated > 60 {
// If we're in a restart loop, periodically exit (so we can be restarted) since
// - it may solve the problem if there's something wrong with our process
// - it prevents us from leaking connections to the datastore.
exitWithCustomRC(configChangedRC, "Restarting to avoid leaking datastore connections")
}
// Make an initial report that says we're live but not yet ready.
healthAggregator.Report(healthName, &health.HealthReport{Live: true, Ready: false})
// Load locally-defined config, including the datastore connection
// parameters. First the environment variables.
configParams = config.New()
envConfig := config.LoadConfigFromEnvironment(os.Environ())
// Then, the config file.
log.Infof("Loading config file: %v", configFile)
fileConfig, err := config.LoadConfigFile(configFile)
if err != nil {
log.WithError(err).WithField("configFile", configFile).Error(
"Failed to load configuration file")
time.Sleep(1 * time.Second)
continue configRetry
}
// Parse and merge the local config.
_, err = configParams.UpdateFrom(envConfig, config.EnvironmentVariable)
if err != nil {
log.WithError(err).WithField("configFile", configFile).Error(
"Failed to parse configuration environment variable")
time.Sleep(1 * time.Second)
continue configRetry
}
_, err = configParams.UpdateFrom(fileConfig, config.ConfigFile)
if err != nil {
log.WithError(err).WithField("configFile", configFile).Error(
"Failed to parse configuration file")
time.Sleep(1 * time.Second)
continue configRetry
}
// Each time round this loop, check that we're serving health reports if we should
// be, or cancel any existing server if we should not be serving any more.
healthAggregator.ServeHTTP(configParams.HealthEnabled, configParams.HealthHost, configParams.HealthPort)
// We should now have enough config to connect to the datastore
// so we can load the remainder of the config.
datastoreConfig = configParams.DatastoreConfig()
// Can't dump the whole config because it may have sensitive information...
log.WithField("datastore", datastoreConfig.Spec.DatastoreType).Info("Connecting to datastore")
v3Client, err = client.New(datastoreConfig)
if err != nil {
log.WithError(err).Error("Failed to create datastore client")
time.Sleep(1 * time.Second)
continue configRetry
}
log.Info("Created datastore client")
numClientsCreated++
backendClient = v3Client.(interface{ Backend() bapi.Client }).Backend()
for {
globalConfig, hostConfig, err := loadConfigFromDatastore(
ctx, backendClient, datastoreConfig, configParams.FelixHostname)
if err == ErrNotReady {
log.Warn("Waiting for datastore to be initialized (or migrated)")
time.Sleep(1 * time.Second)
healthAggregator.Report(healthName, &health.HealthReport{Live: true, Ready: true})
continue
} else if err != nil {
log.WithError(err).Error("Failed to get config from datastore")
time.Sleep(1 * time.Second)
continue configRetry
}
_, err = configParams.UpdateFrom(globalConfig, config.DatastoreGlobal)
if err != nil {
log.WithError(err).Error("Failed update global config from datastore")
time.Sleep(1 * time.Second)
continue configRetry
}
_, err = configParams.UpdateFrom(hostConfig, config.DatastorePerHost)
if err != nil {
log.WithError(err).Error("Failed update host config from datastore")
time.Sleep(1 * time.Second)
continue configRetry
}
break
}
err = configParams.Validate()
if err != nil {
log.WithError(err).Error("Failed to parse/validate configuration from datastore.")
time.Sleep(1 * time.Second)
continue configRetry
}
// We now have some config flags that affect how we configure the syncer.
// After loading the config from the datastore, reconnect, possibly with new
// config. We don't need to re-load the configuration _again_ because the
// calculation graph will spot if the config has changed since we were initialised.
datastoreConfig = configParams.DatastoreConfig()
backendClient, err = backend.NewClient(datastoreConfig)
if err != nil {
log.WithError(err).Error("Failed to (re)connect to datastore")
time.Sleep(1 * time.Second)
continue configRetry
}
numClientsCreated++
// Try to get a Kubernetes client. This is needed for discovering Typha and for the BPF mode of the dataplane.
k8sClientSet = nil
if kc, ok := backendClient.(*k8s.KubeClient); ok {
// Opportunistically share the k8s client with the datastore driver. This is the best option since
// it reduces the number of connections and it lets us piggy-back on the datastore driver's config.
log.Info("Using Kubernetes datastore driver, sharing Kubernetes client with datastore driver.")
k8sClientSet = kc.ClientSet
} else {
// Not using KDD, fall back on trying to get a Kubernetes client from the environment.
log.Info("Not using Kubernetes datastore driver, trying to get a Kubernetes client...")
k8sconf, err := rest.InClusterConfig()
if err != nil {
log.WithError(err).Info("Kubernetes in-cluster config not available. " +
"Assuming we're not in a Kubernetes deployment.")
} else {
k8sClientSet, err = kubernetes.NewForConfig(k8sconf)
if err != nil {
log.WithError(err).Error("Got in-cluster config but failed to create Kubernetes client.")
time.Sleep(1 * time.Second)
continue configRetry
}
}
}
if k8sClientSet != nil {
serverVersion, err := k8sClientSet.Discovery().ServerVersion()
if err != nil {
log.WithError(err).Error("Couldn't read server version from server")
}
log.Infof("Server Version: %#v\n", *serverVersion)
kubernetesVersion = serverVersion.GitVersion
} else {
log.Info("no Kubernetes client available")
}
// If we're configured to discover Typha, do that now so we can retry if we fail.
typhaAddr, err = discoverTyphaAddr(configParams, k8sClientSet)
if err != nil {
log.WithError(err).Error("Typha discovery enabled but discovery failed.")
time.Sleep(1 * time.Second)
continue configRetry
}
break configRetry
}
if numClientsCreated > 2 {
// We don't have a way to close datastore connection so, if we reconnected after
// a failure to load config, restart felix to avoid leaking connections.
exitWithCustomRC(configChangedRC, "Restarting to avoid leaking datastore connections")
}
if configParams.BPFEnabled {
// Check for BPF dataplane support before we do anything that relies on the flag being set one way or another.
if err := dp.SupportsBPF(); err != nil {
log.Error("BPF dataplane mode enabled but not supported by the kernel. Disabling BPF mode.")
_, err := configParams.OverrideParam("BPFEnabled", "false")
if err != nil {
log.WithError(err).Panic("Bug: failed to override config parameter")
}
}
}
// We're now both live and ready.
healthAggregator.Report(healthName, &health.HealthReport{Live: true, Ready: true})
// Enable or disable the health HTTP server according to coalesced config.
healthAggregator.ServeHTTP(configParams.HealthEnabled, configParams.HealthHost, configParams.HealthPort)
// If we get here, we've loaded the configuration successfully.
// Update log levels before we do anything else.
logutils.ConfigureLogging(configParams)
// Since we may have enabled more logging, log with the build context
// again.
buildInfoLogCxt.WithField("config", configParams).Info(
"Successfully loaded configuration.")
if configParams.DebugPanicAfter > 0 {
log.WithField("delay", configParams.DebugPanicAfter).Warn("DebugPanicAfter is set, will panic after delay!")
go panicAfter(configParams.DebugPanicAfter)
}
if configParams.DebugSimulateDataRace {
log.Warn("DebugSimulateDataRace is set, will start some racing goroutines!")
simulateDataRace()
}
// Start up the dataplane driver. This may be the internal go-based driver or an external
// one.
var dpDriver dp.DataplaneDriver
var dpDriverCmd *exec.Cmd
failureReportChan := make(chan string)
configChangedRestartCallback := func() {
failureReportChan <- reasonConfigChanged
time.Sleep(gracefulShutdownTimeout)
log.Panic("Graceful shutdown took too long")
}
fatalErrorCallback := func(err error) {
log.WithError(err).Error("Shutting down due to fatal error")
failureReportChan <- reasonFatalError
time.Sleep(gracefulShutdownTimeout)
log.Panic("Graceful shutdown took too long")
}
dpDriver, dpDriverCmd = dp.StartDataplaneDriver(
configParams.Copy(), // Copy to avoid concurrent access.
healthAggregator,
configChangedRestartCallback,
fatalErrorCallback,
k8sClientSet)
// Initialise the glue logic that connects the calculation graph to/from the dataplane driver.
log.Info("Connect to the dataplane driver.")
var connToUsageRepUpdChan chan map[string]string
if configParams.UsageReportingEnabled {
// Make a channel for the connector to use to send updates to the usage reporter.
// (Otherwise, we pass in a nil channel, which disables such updates.)
connToUsageRepUpdChan = make(chan map[string]string, 1)
}
dpConnector := newConnector(
configParams.Copy(), // Copy to avoid concurrent access.
connToUsageRepUpdChan,
backendClient,
v3Client,
dpDriver,
failureReportChan)
// If enabled, create a server for the policy sync API. This allows clients to connect to
// Felix over a socket and receive policy updates.
var policySyncServer *policysync.Server
var policySyncProcessor *policysync.Processor
var policySyncAPIBinder binder.Binder
calcGraphClientChannels := []chan<- interface{}{dpConnector.ToDataplane}
if configParams.IsLeader() && configParams.PolicySyncPathPrefix != "" {
log.WithField("policySyncPathPrefix", configParams.PolicySyncPathPrefix).Info(
"Policy sync API enabled. Creating the policy sync server.")
toPolicySync := make(chan interface{})
policySyncUIDAllocator := policysync.NewUIDAllocator()
policySyncProcessor = policysync.NewProcessor(toPolicySync)
policySyncServer = policysync.NewServer(
policySyncProcessor.JoinUpdates,
policySyncUIDAllocator.NextUID,
)
policySyncAPIBinder = binder.NewBinder(configParams.PolicySyncPathPrefix)
policySyncServer.RegisterGrpc(policySyncAPIBinder.Server())
calcGraphClientChannels = append(calcGraphClientChannels, toPolicySync)
}
// Now create the calculation graph, which receives updates from the
// datastore and outputs dataplane updates for the dataplane driver.
//
// The Syncer has its own thread and we use an extra thread for the
// Validator, just to pipeline that part of the calculation then the
// main calculation graph runs in a single thread for simplicity.
// The output of the calculation graph arrives at the dataplane
// connection via channel.
//
// Syncer -chan-> Validator -chan-> Calc graph -chan-> dataplane
// KVPair KVPair protobufs
// Get a Syncer from the datastore, or a connection to our remote sync daemon, Typha,
// which will feed the calculation graph with updates, bringing Felix into sync.
var syncer Startable
var typhaConnection *syncclient.SyncerClient
syncerToValidator := calc.NewSyncerCallbacksDecoupler()
if typhaAddr != "" {
// Use a remote Syncer, via the Typha server.
log.WithField("addr", typhaAddr).Info("Connecting to Typha.")
typhaConnection = syncclient.New(
typhaAddr,
buildinfo.GitVersion,
configParams.FelixHostname,
fmt.Sprintf("Revision: %s; Build date: %s",
buildinfo.GitRevision, buildinfo.BuildDate),
syncerToValidator,
&syncclient.Options{
ReadTimeout: configParams.TyphaReadTimeout,
WriteTimeout: configParams.TyphaWriteTimeout,
KeyFile: configParams.TyphaKeyFile,
CertFile: configParams.TyphaCertFile,
CAFile: configParams.TyphaCAFile,
ServerCN: configParams.TyphaCN,
ServerURISAN: configParams.TyphaURISAN,
},
)
} else {
// Use the syncer locally.
syncer = felixsyncer.New(backendClient, datastoreConfig.Spec, syncerToValidator, configParams.IsLeader())
log.Info("using resource updates where applicable")
configParams.SetUseNodeResourceUpdates(true)
}
log.WithField("syncer", syncer).Info("Created Syncer")
// Start the background processing threads.
if syncer != nil {
log.Infof("Starting the datastore Syncer")
syncer.Start()
} else {
log.Infof("Starting the Typha connection")
err := typhaConnection.Start(context.Background())
if err != nil {
log.WithError(err).Error("Failed to connect to Typha. Retrying...")
startTime := time.Now()
for err != nil && time.Since(startTime) < 30*time.Second {
// Set Ready to false and Live to true when unable to connect to typha
healthAggregator.Report(healthName, &health.HealthReport{Live: true, Ready: false})
err = typhaConnection.Start(context.Background())
if err == nil {
break
}
log.WithError(err).Debug("Retrying Typha connection")
time.Sleep(1 * time.Second)
}
if err != nil {
log.WithError(err).Fatal("Failed to connect to Typha")
} else {
log.Info("Connected to Typha after retries.")
healthAggregator.Report(healthName, &health.HealthReport{Live: true, Ready: true})
}
}
supportsNodeResourceUpdates, err := typhaConnection.SupportsNodeResourceUpdates(10 * time.Second)
if err != nil {
log.WithError(err).Error("Did not get hello message from Typha in time, assuming it does not support node resource updates")
return
}
log.Debugf("Typha supports node resource updates: %v", supportsNodeResourceUpdates)
configParams.SetUseNodeResourceUpdates(supportsNodeResourceUpdates)
go func() {
typhaConnection.Finished.Wait()
failureReportChan <- "Connection to Typha failed"
}()
}
// Create the ipsets/active policy calculation graph, which will
// do the dynamic calculation of ipset memberships and active policies
// etc.
asyncCalcGraph := calc.NewAsyncCalcGraph(
configParams.Copy(), // Copy to avoid concurrent access.
calcGraphClientChannels,
healthAggregator,
)
if configParams.UsageReportingEnabled {
// Usage reporting enabled, add stats collector to graph. When it detects an update
// to the stats, it makes a callback, which we use to send an update on a channel.
// We use a buffered channel here to avoid blocking the calculation graph.
statsChanIn := make(chan calc.StatsUpdate, 1)
statsCollector := calc.NewStatsCollector(func(stats calc.StatsUpdate) error {
statsChanIn <- stats
return nil
})
statsCollector.RegisterWith(asyncCalcGraph.CalcGraph)
// Rather than sending the updates directly to the usage reporting thread, we
// decouple with an extra goroutine. This prevents blocking the calculation graph
// goroutine if the usage reporting goroutine is blocked on IO, for example.
// Using a buffered channel wouldn't work here because the usage reporting
// goroutine can block for a long time on IO so we could build up a long queue.
statsChanOut := make(chan calc.StatsUpdate)
go func() {
var statsChanOutOrNil chan calc.StatsUpdate
var stats calc.StatsUpdate
for {
select {
case stats = <-statsChanIn:
// Got a stats update, activate the output channel.
log.WithField("stats", stats).Debug("Buffer: stats update received")
statsChanOutOrNil = statsChanOut
case statsChanOutOrNil <- stats:
// Passed on the update, deactivate the output channel until
// the next update.
log.WithField("stats", stats).Debug("Buffer: stats update sent")
statsChanOutOrNil = nil
}
}
}()
usageRep := usagerep.New(
usagerep.StaticItems{KubernetesVersion: kubernetesVersion},
configParams.UsageReportingInitialDelaySecs,
configParams.UsageReportingIntervalSecs,
statsChanOut,
connToUsageRepUpdChan,
)
go usageRep.PeriodicallyReportUsage(context.Background())
} else {
// Usage reporting disabled, but we still want a stats collector for the
// felix_cluster_* metrics. Register a no-op function as the callback.
statsCollector := calc.NewStatsCollector(func(stats calc.StatsUpdate) error {
return nil
})
statsCollector.RegisterWith(asyncCalcGraph.CalcGraph)
}
// Create the validator, which sits between the syncer and the
// calculation graph.
validator := calc.NewValidationFilter(asyncCalcGraph)
go syncerToValidator.SendTo(validator)
asyncCalcGraph.Start()
log.Infof("Started the processing graph")
var stopSignalChans []chan<- *sync.WaitGroup
if configParams.EndpointReportingEnabled {
delay := configParams.EndpointReportingDelaySecs
log.WithField("delay", delay).Info(
"Endpoint status reporting enabled, starting status reporter")
dpConnector.statusReporter = statusrep.NewEndpointStatusReporter(
configParams.FelixHostname,
configParams.OpenstackRegion,
dpConnector.StatusUpdatesFromDataplane,
dpConnector.InSync,
dpConnector.datastore,
delay,
delay*180,
)
dpConnector.statusReporter.Start()
}
// Start communicating with the dataplane driver.
dpConnector.Start()
if policySyncProcessor != nil {
log.WithField("policySyncPathPrefix", configParams.PolicySyncPathPrefix).Info(
"Policy sync API enabled. Starting the policy sync server.")
policySyncProcessor.Start()
sc := make(chan *sync.WaitGroup)
stopSignalChans = append(stopSignalChans, sc)
go policySyncAPIBinder.SearchAndBind(sc)
}
// Send the opening message to the dataplane driver, giving it its
// config.
dpConnector.ToDataplane <- &proto.ConfigUpdate{
Config: configParams.RawValues(),
}
if configParams.PrometheusMetricsEnabled {
log.Info("Prometheus metrics enabled. Starting server.")
gaugeHost := prometheus.NewGauge(prometheus.GaugeOpts{
Name: "felix_host",
Help: "Configured Felix hostname (as a label), typically used in grouping/aggregating stats; the label defaults to the hostname of the host but can be overridden by configuration. The value of the gauge is always set to 1.",
ConstLabels: prometheus.Labels{"host": configParams.FelixHostname},
})
gaugeHost.Set(1)
prometheus.MustRegister(gaugeHost)
go dp.ServePrometheusMetrics(configParams)
}
// Register signal handlers to dump memory/CPU profiles.
logutils.RegisterProfilingSignalHandlers(configParams)
// Now monitor the worker process and our worker threads and shut
// down the process gracefully if they fail.
monitorAndManageShutdown(failureReportChan, dpDriverCmd, stopSignalChans)
}
func monitorAndManageShutdown(failureReportChan <-chan string, driverCmd *exec.Cmd, stopSignalChans []chan<- *sync.WaitGroup) {
// Ask the runtime to tell us if we get a term/int signal.
signalChan := make(chan os.Signal, 1)
signal.Notify(signalChan, syscall.SIGTERM)
signal.Notify(signalChan, syscall.SIGINT)
signal.Notify(signalChan, syscall.SIGHUP)
// Start a background thread to tell us when the dataplane driver stops.
// If the driver stops unexpectedly, we'll terminate this process.
// If this process needs to stop, we'll kill the driver and then wait
// for the message from the background thread.
driverStoppedC := make(chan bool)
go func() {
if driverCmd == nil {
log.Info("No driver process to monitor")
return
}
err := driverCmd.Wait()
log.WithError(err).Warn("Driver process stopped")
driverStoppedC <- true
}()
// Wait for one of the channels to give us a reason to shut down.
driverAlreadyStopped := driverCmd == nil
receivedFatalSignal := false
var reason string
select {
case <-driverStoppedC:
reason = "Driver stopped"
driverAlreadyStopped = true
case sig := <-signalChan:
if sig == syscall.SIGHUP {
log.Warning("Received a SIGHUP, treating as a request to reload config")
reason = reasonConfigChanged
} else {
reason = fmt.Sprintf("Received OS signal %v", sig)
receivedFatalSignal = true
}
case reason = <-failureReportChan:
}
logCxt := log.WithField("reason", reason)
logCxt.Warn("Felix is shutting down")
// Notify other components to stop. Each notified component must call Done() on the wait
// group when it has completed its shutdown.
var stopWG sync.WaitGroup
for _, c := range stopSignalChans {
stopWG.Add(1)
select {
case c <- &stopWG:
default:
stopWG.Done()
}
}
stopWG.Wait()
if !driverAlreadyStopped {
// Driver may still be running, just in case the driver is
// unresponsive, start a thread to kill this process if we
// don't manage to kill the driver.
logCxt.Info("Driver still running, trying to shut it down...")
giveUpOnSigTerm := make(chan bool)
go func() {
time.Sleep(4 * time.Second)
giveUpOnSigTerm <- true
time.Sleep(1 * time.Second)
log.Fatal("Failed to wait for driver to exit, giving up.")
}()
// Signal to the driver to exit.
err := driverCmd.Process.Signal(syscall.SIGTERM)
if err != nil {
logCxt.Error("failed to signal driver to exit")
}
select {
case <-driverStoppedC:
logCxt.Info("Driver shut down after SIGTERM")
case <-giveUpOnSigTerm:
logCxt.Error("Driver did not respond to SIGTERM, sending SIGKILL")
_ = driverCmd.Process.Kill()
<-driverStoppedC
logCxt.Info("Driver shut down after SIGKILL")
}
}
if !receivedFatalSignal {
// We're exiting due to a failure or a config change, wait
// a couple of seconds to ensure that we don't go into a tight
// restart loop (which would make the init daemon in calico/node give
// up trying to restart us).
logCxt.Info("Sleeping to avoid tight restart loop.")
go func() {
time.Sleep(2 * time.Second)
if reason == reasonConfigChanged {
exitWithCustomRC(configChangedRC, "Exiting for config change")
return
}
logCxt.Fatal("Exiting.")
}()
for {
sig := <-signalChan
if sig == syscall.SIGHUP {
logCxt.Warning("Ignoring SIGHUP because we're already shutting down")
continue
}
logCxt.WithField("signal", sig).Fatal(
"Signal received while shutting down, exiting immediately")
}
}
logCxt.Fatal("Exiting immediately")
}
func exitWithCustomRC(rc int, message string) {
// Since log writing is done a background thread, we set the force-flush flag on this log to ensure that
// all the in-flight logs get written before we exit.
log.WithFields(log.Fields{
"rc": rc,
lclogutils.FieldForceFlush: true,
}).Info(message)
os.Exit(rc)
}
var (
ErrNotReady = errors.New("datastore is not ready or has not been initialised")
)
func loadConfigFromDatastore(
ctx context.Context, client bapi.Client, cfg apiconfig.CalicoAPIConfig, hostname string,
) (globalConfig, hostConfig map[string]string, err error) {
// The configuration is split over 3 different resource types and 4 different resource
// instances in the v3 data model:
// - ClusterInformation (global): name "default"
// - FelixConfiguration (global): name "default"
// - FelixConfiguration (per-host): name "node.<hostname>"
// - Node (per-host): name: <hostname>
// Get the global values and host specific values separately. We re-use the updateprocessor
// logic to convert the single v3 resource to a set of v1 key/values.
hostConfig = make(map[string]string)
globalConfig = make(map[string]string)
var ready bool
err = getAndMergeConfig(
ctx, client, globalConfig,
apiv3.KindClusterInformation, "default",
updateprocessors.NewClusterInfoUpdateProcessor(),
&ready,
)
if err != nil {
return
}
if !ready {
// The ClusterInformation struct should contain the ready flag, if it is not set, abort.
err = ErrNotReady
return
}
err = getAndMergeConfig(
ctx, client, globalConfig,
apiv3.KindFelixConfiguration, "default",
updateprocessors.NewFelixConfigUpdateProcessor(),
&ready,
)
if err != nil {
return
}
err = getAndMergeConfig(
ctx, client, hostConfig,
apiv3.KindFelixConfiguration, "node."+hostname,
updateprocessors.NewFelixConfigUpdateProcessor(),
&ready,
)
if err != nil {
return
}
err = getAndMergeConfig(
ctx, client, hostConfig,
libapiv3.KindNode, hostname,
updateprocessors.NewFelixNodeUpdateProcessor(cfg.Spec.K8sUsePodCIDR),
&ready,
)
if err != nil {
return
}
return
}
// getAndMergeConfig gets the v3 resource configuration extracts the separate config values
// (where each configuration value is stored in a field of the v3 resource Spec) and merges into
// the supplied map, as required by our v1-style configuration loader.
func getAndMergeConfig(
ctx context.Context, client bapi.Client, config map[string]string,
kind string, name string,
configConverter watchersyncer.SyncerUpdateProcessor,
ready *bool,
) error {
logCxt := log.WithFields(log.Fields{"kind": kind, "name": name})
cfg, err := client.Get(ctx, model.ResourceKey{
Kind: kind,
Name: name,
Namespace: "",
}, "")
if err != nil {
switch err.(type) {
case cerrors.ErrorResourceDoesNotExist:
logCxt.Info("No config of this type")
return nil
default:
logCxt.WithError(err).Info("Failed to load config from datastore")
return err
}
}
// Re-use the update processor logic implemented for the Syncer. We give it a v3 config
// object in a KVPair and it uses the annotations defined on it to split it into v1-style
// KV pairs. Log any errors - but don't fail completely to avoid cyclic restarts.
v1kvs, err := configConverter.Process(cfg)
if err != nil {
logCxt.WithError(err).Error("Failed to convert configuration")
}
// Loop through the converted values and update our config map with values from either the
// Global or Host configs.
for _, v1KV := range v1kvs {
if _, ok := v1KV.Key.(model.ReadyFlagKey); ok {
logCxt.WithField("ready", v1KV.Value).Info("Loaded ready flag")
if v1KV.Value == true {
*ready = true
}
} else if v1KV.Value != nil {
switch k := v1KV.Key.(type) {
case model.GlobalConfigKey:
config[k.Name] = v1KV.Value.(string)
case model.HostConfigKey:
config[k.Name] = v1KV.Value.(string)
default:
logCxt.WithField("KV", v1KV).Debug("Skipping config - not required for initial loading")
}
}
}
return nil
}
type DataplaneConnector struct {
config *config.Config
configUpdChan chan<- map[string]string
ToDataplane chan interface{}
StatusUpdatesFromDataplane chan interface{}
InSync chan bool
failureReportChan chan<- string
dataplane dp.DataplaneDriver
datastore bapi.Client
datastorev3 client.Interface
statusReporter *statusrep.EndpointStatusReporter
datastoreInSync bool
firstStatusReportSent bool
wireguardStatUpdateFromDataplane chan *proto.WireguardStatusUpdate
}
type Startable interface {
Start()
}
func newConnector(configParams *config.Config,
configUpdChan chan<- map[string]string,
datastore bapi.Client,
datastorev3 client.Interface,
dataplane dp.DataplaneDriver,
failureReportChan chan<- string,
) *DataplaneConnector {
felixConn := &DataplaneConnector{
config: configParams,
configUpdChan: configUpdChan,
datastore: datastore,
datastorev3: datastorev3,
ToDataplane: make(chan interface{}),
StatusUpdatesFromDataplane: make(chan interface{}),
InSync: make(chan bool, 1),
failureReportChan: failureReportChan,
dataplane: dataplane,
wireguardStatUpdateFromDataplane: make(chan *proto.WireguardStatusUpdate, 1),
}
return felixConn
}
func (fc *DataplaneConnector) readMessagesFromDataplane() {
defer func() {
fc.shutDownProcess("Failed to read messages from dataplane")
}()
log.Info("Reading from dataplane driver pipe...")
ctx := context.Background()
for {
payload, err := fc.dataplane.RecvMessage()
if err != nil {
log.WithError(err).Error("Failed to read from front-end socket")
fc.shutDownProcess("Failed to read from front-end socket")
}
log.WithField("payload", payload).Debug("New message from dataplane")
switch msg := payload.(type) {
case *proto.ProcessStatusUpdate:
fc.handleProcessStatusUpdate(ctx, msg)
case *proto.WorkloadEndpointStatusUpdate:
if fc.statusReporter != nil {
fc.StatusUpdatesFromDataplane <- msg
}
case *proto.WorkloadEndpointStatusRemove:
if fc.statusReporter != nil {
fc.StatusUpdatesFromDataplane <- msg
}
case *proto.HostEndpointStatusUpdate:
if fc.statusReporter != nil {
fc.StatusUpdatesFromDataplane <- msg
}
case *proto.HostEndpointStatusRemove:
if fc.statusReporter != nil {
fc.StatusUpdatesFromDataplane <- msg
}
case *proto.WireguardStatusUpdate:
fc.wireguardStatUpdateFromDataplane <- msg
default:
log.WithField("msg", msg).Warning("Unknown message from dataplane")
}
log.Debug("Finished handling message from front-end")
}
}
func (fc *DataplaneConnector) handleProcessStatusUpdate(ctx context.Context, msg *proto.ProcessStatusUpdate) {
log.Debugf("Status update from dataplane driver: %v", *msg)
statusReport := model.StatusReport{
Timestamp: msg.IsoTimestamp,
UptimeSeconds: msg.Uptime,
FirstUpdate: !fc.firstStatusReportSent,
}
kv := model.KVPair{
Key: model.ActiveStatusReportKey{Hostname: fc.config.FelixHostname, RegionString: model.RegionString(fc.config.OpenstackRegion)},
Value: &statusReport,
TTL: fc.config.ReportingTTLSecs,
}
applyCtx, cancel := context.WithTimeout(ctx, 2*time.Second)
_, err := fc.datastore.Apply(applyCtx, &kv)
cancel()
if err != nil {
if _, ok := err.(cerrors.ErrorOperationNotSupported); ok {
log.Debug("Datastore doesn't support status reports.")
return // and it won't support the last status key either.
} else {
log.Warningf("Failed to write status to datastore: %v", err)
}
} else {
fc.firstStatusReportSent = true
}
kv = model.KVPair{
Key: model.LastStatusReportKey{Hostname: fc.config.FelixHostname, RegionString: model.RegionString(fc.config.OpenstackRegion)},
Value: &statusReport,
}
applyCtx, cancel = context.WithTimeout(ctx, 2*time.Second)
_, err = fc.datastore.Apply(applyCtx, &kv)
cancel()
if err != nil {
log.Warningf("Failed to write status to datastore: %v", err)
}
}
func (fc *DataplaneConnector) reconcileWireguardStatUpdate(dpPubKey string) error {
// In case of a recoverable failure (ErrorResourceUpdateConflict), retry update 3 times.
for iter := 0; iter < 3; iter++ {
// Read node resource from datastore and compare it with the publicKey from dataplane.
getCtx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
node, err := fc.datastorev3.Nodes().Get(getCtx, fc.config.FelixHostname, options.GetOptions{})
cancel()
if err != nil {
switch err.(type) {
case cerrors.ErrorResourceDoesNotExist:
if dpPubKey != "" {
// If the node doesn't exist but non-empty public-key need to be set.
log.Panic("v3 node resource must exist for Wireguard.")
} else {
// No node with empty dataplane update implies node resource
// doesn't need to be processed further.
log.Debug("v3 node resource doesn't need any update")
return nil
}
}
// return error here so we can retry in some time.
log.WithError(err).Info("Failed to read node resource")
return err
}
// Check if the public-key needs to be updated.
storedPublicKey := node.Status.WireguardPublicKey
if storedPublicKey != dpPubKey {
updateCtx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
node.Status.WireguardPublicKey = dpPubKey
_, err := fc.datastorev3.Nodes().Update(updateCtx, node, options.SetOptions{})
cancel()
if err != nil {
// check if failure is recoverable
switch err.(type) {
case cerrors.ErrorResourceUpdateConflict:
log.Debug("Update conflict, retrying update")
continue
}
// retry in some time.
log.WithError(err).Info("Failed updating node resource")
return err
}
log.Debugf("Updated Wireguard public-key from %s to %s", storedPublicKey, dpPubKey)
}
break
}
return nil
}
func (fc *DataplaneConnector) handleWireguardStatUpdateFromDataplane() {
var current *proto.WireguardStatusUpdate
var ticker *jitter.Ticker
var retryC <-chan time.Time
for {
// Block until we either get an update or it's time to retry a failed update.
select {
case current = <-fc.wireguardStatUpdateFromDataplane:
log.Debugf("Wireguard status update from dataplane driver: %s", current.PublicKey)
case <-retryC:
log.Debug("retrying failed Wireguard status update")
}
if ticker != nil {
ticker.Stop()
}
// Try and reconcile the current wireguard status data.
err := fc.reconcileWireguardStatUpdate(current.PublicKey)
if err == nil {
current = nil
retryC = nil
ticker = nil
} else {
// retry reconciling between 2-4 seconds.
ticker = jitter.NewTicker(2*time.Second, 2*time.Second)
retryC = ticker.C
}
}
}
var handledConfigChanges = set.From("CalicoVersion", "ClusterGUID", "ClusterType")
func (fc *DataplaneConnector) sendMessagesToDataplaneDriver() {
defer func() {
fc.shutDownProcess("Failed to send messages to dataplane")
}()
var config map[string]string
for {
msg := <-fc.ToDataplane
switch msg := msg.(type) {
case *proto.InSync:
log.Info("Datastore now in sync.")
if !fc.datastoreInSync {
log.Info("Datastore in sync for first time, sending message to status reporter.")
fc.datastoreInSync = true
fc.InSync <- true
}
case *proto.ConfigUpdate:
if config != nil {
log.WithFields(log.Fields{
"old": config,
"new": msg.Config,
}).Info("Config updated, checking whether we need to restart")
restartNeeded := false
for kNew, vNew := range msg.Config {
logCxt := log.WithFields(log.Fields{"key": kNew, "new": vNew})
if vOld, prs := config[kNew]; !prs {
logCxt = logCxt.WithField("updateType", "add")
} else if vNew != vOld {
logCxt = logCxt.WithFields(log.Fields{"old": vOld, "updateType": "update"})
} else {
continue
}
if handledConfigChanges.Contains(kNew) {
logCxt.Info("Config change can be handled without restart")
continue
}
logCxt.Warning("Config change requires restart")
restartNeeded = true
}
for kOld, vOld := range config {
logCxt := log.WithFields(log.Fields{"key": kOld, "old": vOld, "updateType": "delete"})
if _, prs := msg.Config[kOld]; prs {
// Key was present in the message so we've handled above.
continue
}
if handledConfigChanges.Contains(kOld) {
logCxt.Info("Config change can be handled without restart")
continue
}
logCxt.Warning("Config change requires restart")
restartNeeded = true
}
if restartNeeded {
fc.shutDownProcess("config changed")
}
}
// Take a copy of the config to compare against next time.
config = make(map[string]string)
for k, v := range msg.Config {
config[k] = v
}
if fc.configUpdChan != nil {
// Send the config over to the usage reporter.
fc.configUpdChan <- config
}
case *calc.DatastoreNotReady:
log.Warn("Datastore became unready, need to restart.")
fc.shutDownProcess("datastore became unready")
}
if err := fc.dataplane.SendMessage(msg); err != nil {
fc.shutDownProcess("Failed to write to dataplane driver")
}
}
}
func (fc *DataplaneConnector) shutDownProcess(reason string) {
// Send a failure report to the managed shutdown thread then give it
// a few seconds to do the shutdown.
fc.failureReportChan <- reason
time.Sleep(5 * time.Second)
// The graceful shutdown failed, terminate the process.
log.Panic("Managed shutdown failed. Panicking.")
}
func (fc *DataplaneConnector) Start() {
// Start a background thread to write to the dataplane driver.
go fc.sendMessagesToDataplaneDriver()
// Start background thread to read messages from dataplane driver.
go fc.readMessagesFromDataplane()
// Start a background thread to handle Wireguard update to Node.
go fc.handleWireguardStatUpdateFromDataplane()
}
func discoverTyphaAddr(configParams *config.Config, k8sClientSet kubernetes.Interface) (string, error) {
typhaDiscoveryOpts := configParams.TyphaDiscoveryOpts()
typhaDiscoveryOpts = append(typhaDiscoveryOpts, discovery.WithKubeClient(k8sClientSet))
return discovery.DiscoverTyphaAddr(typhaDiscoveryOpts...)
}
|
[
"\"GOGC\"",
"\"KUBERNETES_SERVICE_HOST\"",
"\"KUBERNETES_SERVICE_PORT\""
] |
[] |
[
"GOGC",
"KUBERNETES_SERVICE_HOST",
"KUBERNETES_SERVICE_PORT"
] |
[]
|
["GOGC", "KUBERNETES_SERVICE_HOST", "KUBERNETES_SERVICE_PORT"]
|
go
| 3 | 0 | |
vendor/github.com/joaosoft/manager/utils.go
|
package manager
import (
"bufio"
"encoding/json"
"io/ioutil"
"os"
)
func GetEnv() string {
env := os.Getenv("env")
if env == "" {
env = "local"
}
return env
}
func Exists(file string) bool {
if _, err := os.Stat(file); err != nil {
if os.IsNotExist(err) {
return false
}
}
return true
}
func ReadFile(fileName string, obj interface{}) ([]byte, error) {
var err error
if !Exists(fileName) {
fileName = global[path_key].(string) + fileName
}
file, err := os.Open(fileName)
if err != nil {
return nil, err
}
data, err := ioutil.ReadAll(file)
if err != nil {
return nil, err
}
if obj != nil {
if err := json.Unmarshal(data, obj); err != nil {
return nil, err
}
}
return data, nil
}
func ReadFileLines(fileName string) ([]string, error) {
lines := make([]string, 0)
if !Exists(fileName) {
fileName = global[path_key].(string) + fileName
}
file, err := os.Open(fileName)
if err != nil {
return nil, err
}
defer file.Close()
scanner := bufio.NewScanner(file)
for scanner.Scan() {
lines = append(lines, scanner.Text())
}
if err := scanner.Err(); err != nil {
return nil, err
}
return lines, nil
}
func WriteFile(fileName string, obj interface{}) error {
if !Exists(fileName) {
fileName = global[path_key].(string) + fileName
}
jsonBytes, _ := json.MarshalIndent(obj, "", " ")
if err := ioutil.WriteFile(fileName, jsonBytes, 0644); err != nil {
return err
}
return nil
}
|
[
"\"env\""
] |
[] |
[
"env"
] |
[]
|
["env"]
|
go
| 1 | 0 | |
pkg/subctl/cmd/cloud/rhos/rhos.go
|
/*
SPDX-License-Identifier: Apache-2.0
Copyright Contributors to the Submariner project.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package rhos provides common functionality to run cloud prepare/cleanup on RHOS Clusters.
package rhos
import (
"encoding/json"
"os"
"path/filepath"
"github.com/gophercloud/utils/openstack/clientconfig"
"github.com/pkg/errors"
"github.com/spf13/cobra"
"github.com/submariner-io/admiral/pkg/reporter"
"github.com/submariner-io/admiral/pkg/util"
"github.com/submariner-io/cloud-prepare/pkg/api"
"github.com/submariner-io/cloud-prepare/pkg/k8s"
"github.com/submariner-io/cloud-prepare/pkg/ocp"
"github.com/submariner-io/cloud-prepare/pkg/rhos"
"github.com/submariner-io/submariner-operator/internal/restconfig"
"github.com/submariner-io/submariner-operator/pkg/subctl/cmd/utils"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/kubernetes"
)
const (
infraIDFlag = "infra-id"
regionFlag = "region"
projectIDFlag = "project-id"
cloudEntryFlag = "cloud-entry"
)
var (
infraID string
region string
projectID string
ocpMetadataFile string
cloudEntry string
)
// AddRHOSFlags adds basic flags needed by RHOS.
func AddRHOSFlags(command *cobra.Command) {
command.Flags().StringVar(&infraID, infraIDFlag, "", "RHOS infra ID")
command.Flags().StringVar(®ion, regionFlag, "", "RHOS region")
command.Flags().StringVar(&projectID, projectIDFlag, "", "RHOS project ID")
command.Flags().StringVar(&ocpMetadataFile, "ocp-metadata", "",
"OCP metadata.json file (or the directory containing it) from which to read the RHOS infra ID "+
"and region from (takes precedence over the specific flags)")
command.Flags().StringVar(&cloudEntry, cloudEntryFlag, "", "the cloud entry to use")
}
// RunOnRHOS runs the given function on RHOS, supplying it with a cloud instance connected to RHOS and a reporter that writes to CLI.
// The functions makes sure that infraID and region are specified, and extracts the credentials from a secret in order to connect to RHOS.
func RunOnRHOS(restConfigProducer restconfig.Producer, gwInstanceType string, dedicatedGWNodes bool, status reporter.Interface,
function func(api.Cloud, api.GatewayDeployer, reporter.Interface) error,
) error {
if ocpMetadataFile != "" {
err := initializeFlagsFromOCPMetadata(ocpMetadataFile)
region = os.Getenv("OS_REGION_NAME")
utils.ExitOnError("Failed to read RHOS Cluster information from OCP metadata file", err)
} else {
utils.ExpectFlag(infraIDFlag, infraID)
utils.ExpectFlag(regionFlag, region)
utils.ExpectFlag(projectIDFlag, projectID)
}
status.Start("Retrieving RHOS credentials from your RHOS configuration")
// Using RHOS default "openstack", if not specified
if cloudEntry == "" {
cloudEntry = "openstack"
}
opts := &clientconfig.ClientOpts{
Cloud: cloudEntry,
}
providerClient, err := clientconfig.AuthenticatedClient(opts)
if err != nil {
return status.Error(err, "error initializing RHOS Client")
}
status.End()
k8sConfig, err := restConfigProducer.ForCluster()
if err != nil {
return status.Error(err, "error initializing Kubernetes config")
}
clientSet, err := kubernetes.NewForConfig(k8sConfig.Config)
if err != nil {
return status.Error(err, "error creating Kubernetes client")
}
k8sClientSet := k8s.NewInterface(clientSet)
restMapper, err := util.BuildRestMapper(k8sConfig.Config)
if err != nil {
return status.Error(err, "error creating REST mapper")
}
dynamicClient, err := dynamic.NewForConfig(k8sConfig.Config)
if err != nil {
return status.Error(err, "error creating dynamic client")
}
cloudInfo := rhos.CloudInfo{
Client: providerClient,
InfraID: infraID,
Region: region,
K8sClient: k8sClientSet,
}
rhosCloud := rhos.NewCloud(cloudInfo)
msDeployer := ocp.NewK8sMachinesetDeployer(restMapper, dynamicClient)
gwDeployer := rhos.NewOcpGatewayDeployer(cloudInfo, msDeployer, projectID, gwInstanceType,
"", cloudEntry, dedicatedGWNodes)
return function(rhosCloud, gwDeployer, status)
}
func initializeFlagsFromOCPMetadata(metadataFile string) error {
fileInfo, err := os.Stat(metadataFile)
if err != nil {
return errors.Wrapf(err, "failed to stat file %q", metadataFile)
}
if fileInfo.IsDir() {
metadataFile = filepath.Join(metadataFile, "metadata.json")
}
data, err := os.ReadFile(metadataFile)
if err != nil {
return errors.Wrapf(err, "error reading file %q", metadataFile)
}
var metadata struct {
InfraID string `json:"infraID"`
RHOS struct {
ProjectID string `json:"projectID"`
} `json:"rhos"`
}
err = json.Unmarshal(data, &metadata)
if err != nil {
return errors.Wrap(err, "error unmarshalling data")
}
infraID = metadata.InfraID
projectID = metadata.RHOS.ProjectID
return nil
}
|
[
"\"OS_REGION_NAME\""
] |
[] |
[
"OS_REGION_NAME"
] |
[]
|
["OS_REGION_NAME"]
|
go
| 1 | 0 | |
src/finitestate/common/aws/athena.py
|
import concurrent.futures
import datetime
import logging
import os
import random
import re
import string
import time
from codecs import getreader
from contextlib import closing
from functools import lru_cache
from typing import Any, Dict, List, Generator, Optional, Union
from finitestate.common.aws.s3 import get_bucket_and_key_from_uri
from finitestate.common.retry_utils import retry
logger = logging.getLogger(__name__)
class BucketingConfig(object):
def __init__(self, columns: Union[str, List[str]], count: int):
if isinstance(columns, str):
self.columns = [columns]
else:
self.columns = columns
self.count = count
def quote_strings(x):
if isinstance(x, str):
return "'{x}'".format(x=x)
return x
def build_ctas(target_database: str,
target_table: str,
target_path: str,
format: str = 'parquet',
compression: str = 'SNAPPY',
bucketing_config: BucketingConfig = None,
source_database: str = None,
source_table: str = None,
columns: Union[str, List[str]] = None,
query: str = None):
storage_options = {
'format': quote_strings(format),
'external_location': quote_strings(target_path)
}
if (format and (format.lower() == 'parquet' or format.lower() == 'orc')) and compression is not None:
storage_options['{format}_compression'.format(format=format)] = quote_strings(compression)
if bucketing_config:
storage_options['bucketed_by'] = 'ARRAY[{}]'.format(','.join([quote_strings(c) for c in bucketing_config.columns]))
storage_options['bucket_count'] = bucketing_config.count
storage_stanza = "WITH ({})".format(', '.join(['{k}={v}'.format(k=k, v=v) for k, v in storage_options.items()]))
if not query:
if not columns:
columns = ['*']
if isinstance(columns, str):
columns = [columns]
query = 'SELECT {columns} FROM {source_database}.{source_table}'.format(
source_database=source_database,
source_table=source_table,
columns=', '.join(columns)
)
template = 'CREATE TABLE {target_database}.{target_table} {storage_stanza} ' \
'AS {query}'
return template.format(target_database=target_database,
target_table=target_table,
storage_stanza=storage_stanza,
query=query)
__athena_client = None
__glue_client = None
__s3_resource = None
def get_athena_client():
global __athena_client
if not __athena_client:
import boto3
__athena_client = boto3.client('athena', endpoint_url=os.environ.get('ATHENA_ENDPOINT_URL'))
return __athena_client
def get_glue_client():
global __glue_client
if not __glue_client:
import boto3
__glue_client = boto3.client('glue', endpoint_url=os.environ.get('GLUE_ENDPOINT_URL'))
return __glue_client
def get_s3_resource():
global __s3_resource
if not __s3_resource:
import boto3
__s3_resource = boto3.resource('s3', endpoint_url=os.environ.get('S3_ENDPOINT_URL'))
return __s3_resource
def get_s3_client():
return get_s3_resource().meta.client
def submit_query(database_name: str, query_string: str, output_location: str) -> str:
"""
Asynchronously submits a SQL statement to Athena.
:param database_name: The Glue database name
:param query_string: The SQL statement to execute
:param output_location: The S3 location where Athena should store its results
:return: The Athena query execution ID
"""
query = get_athena_client().start_query_execution(
QueryString=query_string,
QueryExecutionContext={"Database": database_name},
ResultConfiguration={"OutputLocation": output_location},
)
query_execution_id = query["QueryExecutionId"]
logger.info(f"Submitted Athena query {query_execution_id} : {query_string}")
return query_execution_id
def wait_for_query(query_execution_id: str, sleep_sec: int = None):
"""
Waits (blocks) for an asynchronous Athena query to complete.
:param query_execution_id: The Athena query ID to wait for
:param sleep_sec: The number of seconds to sleep between calls to the Athena API to check on the query
:raises Exception: on query entering FAILED or CANCELLED status
"""
while True:
check_response = get_athena_client().get_query_execution(
QueryExecutionId=query_execution_id
)
query_state = check_response["QueryExecution"]["Status"]["State"]
if query_state == "QUEUED" or query_state == "RUNNING":
time.sleep(sleep_sec or 3)
elif query_state == "SUCCEEDED":
break
elif query_state == "FAILED" or query_state == "CANCELLED":
raise Exception("Athena query failed: {}".format(query_execution_id))
@lru_cache(maxsize=1000)
def __parser_for_athena_type(athena_type) -> type:
if athena_type in ['char', 'varchar', 'string']:
return str
if athena_type in ['tinyint', 'smallint', 'int', 'integer', 'bigint']:
return int
if athena_type == 'boolean':
return bool
if athena_type in ['double', 'float', 'real']:
return float
raise ValueError(f'Unsupported Athena type: {athena_type}')
def __as_dict(row_data, columns):
def as_python_type(cell, column) -> Any:
value = cell.get('VarCharValue')
if value is not None:
return __parser_for_athena_type(column['Type'].lower())(value)
return None
return {column['Name']: as_python_type(cell, column) for cell, column in zip(row_data, columns)}
def __is_header(row_data, columns):
return all([cell.get('VarCharValue') == column['Name'] for cell, column in zip(row_data, columns)])
END_OF_VALUE = re.compile(r'"(,|$)')
def split_csv_line(line: str) -> List[Optional[str]]:
"""
No, the author of this code was not unaware of csv.Reader. Unfortunately, csv.Reader turns None into ''
and it may be important for clients of this code to differentiate between missing (None) and blank ('') values
in the result set being parsed, so we do it ourselves.
"""
if not line:
return [None]
original = line
line = line.rstrip()
output = []
tail = []
while line[-1] == ',':
line = line[:-1]
tail.append(None)
while line:
ch = line[0]
if ch == ',':
output.append(None)
line = line[1:]
elif ch == '"':
end = END_OF_VALUE.search(line, pos=1)
if not end:
raise ValueError(f'Failed to find the end of quoted field while splitting {original}')
output.append(line[1:end.start()])
line = line[end.end():]
else:
raise ValueError(f'Unexpected character {ch} encountered while splitting {original}')
return output + tail
def stream_results(query_execution_id: str):
"""
Reads an Athena result set by directly accessing the CSV file on S3.
"""
def get_columns(query_execution_id: str):
return get_athena_client().get_query_results(QueryExecutionId=query_execution_id, MaxResults=1)['ResultSet']['ResultSetMetadata']['ColumnInfo']
def get_location(query_execution_id: str):
response = get_athena_client().get_query_execution(QueryExecutionId=query_execution_id)
return response['QueryExecution']['ResultConfiguration']['OutputLocation']
with concurrent.futures.ThreadPoolExecutor() as pool:
get_columns_future = pool.submit(get_columns, query_execution_id)
get_location_future = pool.submit(get_location, query_execution_id)
bucket, key = get_bucket_and_key_from_uri(get_location_future.result())
python_type_mapper = {
column['Name']: __parser_for_athena_type(column['Type'].lower()) for column in get_columns_future.result()
}
logger.debug(f'Reading Athena query results from s3://{bucket}/{key}')
with closing(getreader('utf-8')(get_s3_client().get_object(Bucket=bucket, Key=key)['Body'])) as lines:
column_names = split_csv_line(next(lines))
for line in lines:
yield {
k: python_type_mapper[k](v) if v is not None else None for k, v in zip(column_names, split_csv_line(line))
}
def stream_results_from_api(query_execution_id: str, page_size: int = None) -> Generator[Dict[str, Any], None, None]:
"""
Reads an Athena result set by paging through it with calls to get_query_results. The performance of this
approach is notably slower than direct S3 access for large result sets, because AWS allows a maximum page size
of 1,000 rows.
"""
paginator = get_athena_client().get_paginator('get_query_results')
pages = paginator.paginate(QueryExecutionId=query_execution_id, PaginationConfig={'PageSize': page_size or 1000})
for page in pages:
columns = page['ResultSet']['ResultSetMetadata']['ColumnInfo']
rows = page['ResultSet']['Rows']
start = 1 if __is_header(rows[0]['Data'], columns) else 0
for row in rows[start:]:
yield __as_dict(row['Data'], columns)
def get_table_base_path(database_name: str, table_name: str):
response = get_glue_client().get_table(
DatabaseName=database_name,
Name=table_name
)
return response["Table"]["Parameters"]["fs_base_path"]
def ctas_rebuild_table(database_name: str, table_name: str, query: str, bucketing_config: BucketingConfig = None, format: str = 'parquet'):
# Use a CTAS to create a temporary table with the latest content for the real table
temp_target_table = "temp_{table}_ctas_{guid}".format(
table=table_name,
guid=''.join(random.sample(string.ascii_lowercase, 6))
)
target_path = os.path.join(
get_table_base_path(database_name, table_name),
"date={date}".format(date=datetime.datetime.utcnow().strftime('%Y-%m-%d-%H-%M-%S'))
)
target_bucket, _ = get_bucket_and_key_from_uri(target_path)
athena_results = f"s3://{target_bucket}/_athena_results"
# Create the temp table
ctas = build_ctas(
target_database=database_name,
target_table=temp_target_table,
target_path=target_path,
bucketing_config=bucketing_config,
query=query,
format=format,
)
# Drop the temp table (leaving the data behind)
drop = "DROP TABLE IF EXISTS {database}.{table}".format(
database=database_name,
table=temp_target_table
)
# Update the storage location of the main table to use the newly generated data
update = "ALTER TABLE {database}.{table} SET LOCATION '{location}'".format(
database=database_name,
table=table_name,
location=target_path
)
# If we retry after a transient Athena failure, we may need to purge existing partial output.
def s3_rmr(target_path: str):
bucket, key = get_bucket_and_key_from_uri(target_path)
if not bucket or not key:
raise ValueError("Unsupported target_path value: {}".format(target_path))
for o in get_s3_resource().Bucket(bucket).objects.filter(Prefix=key):
o.delete()
# Athena Queries will sometimes fail with "Query exhausted resources at this scale factor" errors,
# and the recommendation is to retry with back-off.
@retry(max_retries=5)
def process(statements):
# In the case of a retry, we may need to purge existing partial output.
s3_rmr(target_path)
for statement in statements:
wait_for_query(submit_query(database_name=database_name, query_string=statement, output_location=athena_results))
process([ctas, drop, update])
|
[] |
[] |
[
"S3_ENDPOINT_URL",
"ATHENA_ENDPOINT_URL",
"GLUE_ENDPOINT_URL"
] |
[]
|
["S3_ENDPOINT_URL", "ATHENA_ENDPOINT_URL", "GLUE_ENDPOINT_URL"]
|
python
| 3 | 0 | |
exps/stage3_root2/config.py
|
# encoding: utf-8
import os, getpass
import os.path as osp
import argparse
from easydict import EasyDict as edict
from dataset.data_settings import load_dataset
from cvpack.utils.pyt_utils import ensure_dir
class Config:
# -------- Directoy Config -------- #
ROOT_DIR = '/home/panzhiyu/project/3d_pose/SMAP' #os.environ['PROJECT_HOME']
OUTPUT_DIR = osp.join(ROOT_DIR, 'model_logs_1020_singletestv2', osp.split(osp.split(osp.realpath(__file__))[0])[1])
TEST_DIR = osp.join(OUTPUT_DIR, 'log_dir')
TENSORBOARD_DIR = osp.join(OUTPUT_DIR, 'tb_dir')
# -------- Data Config -------- #
DATALOADER = edict()
DATALOADER.NUM_WORKERS = 8
DATALOADER.ASPECT_RATIO_GROUPING = False
DATALOADER.SIZE_DIVISIBILITY = 0
DATASET = edict()
DATASET.NAME = 'MIX'
dataset = load_dataset(DATASET.NAME)
DATASET.KEYPOINT = dataset.KEYPOINT
DATASET.PAF = dataset.PAF
DATASET.ROOT_IDX = dataset.ROOT_IDX # pelvis or neck
DATASET.MAX_PEOPLE = 10
DATASET.CAM = [(0,3),(0,6),(0,12),(0,13),(0,23)] # [(0,1),(0,5),(0,7),(0,15),(0,20)]
# DATASET.CAM = [0,1,2,3,4]
INPUT = edict()
INPUT.NORMALIZE = True
INPUT.MEANS = [0.406, 0.456, 0.485] # bgr
INPUT.STDS = [0.225, 0.224, 0.229]
INPUT_SHAPE = dataset.INPUT_SHAPE
OUTPUT_SHAPE = dataset.OUTPUT_SHAPE
# -------- Model Config -------- #
MODEL = edict()
MODEL.STAGE_NUM = 3
MODEL.UPSAMPLE_CHANNEL_NUM = 256
MODEL.DEVICE = 'cuda'
MODEL.WEIGHT = '/home/panzhiyu/project/3d_pose/SMAP/SMAP_model.pth' #None # osp.join(ROOT_DIR, 'lib/models/resnet-50_rename.pth')
# -------- Training Config -------- #
SOLVER = edict()
SOLVER.IMG_PER_GPU = 64 # 32 for gnn
SOLVER.BASE_LR = 1e-3 #2e-4
SOLVER.CHECKPOINT_PERIOD = 4800
SOLVER.MAX_ITER = 96000 # max iteration num
SOLVER.WEIGHT_DECAY = 8e-6
SOLVER.WARMUP_FACTOR = 0.1
SOLVER.WARMUP_ITERS = 2400
LOSS = edict()
LOSS.OHKM = True
LOSS.TOPK = 8
LOSS.COARSE_TO_FINE = True
WITH_MDS = True
RUN_EFFICIENT = False
Pretrained = False
# -------- Test Config -------- #
TEST = edict()
TEST.IMG_PER_GPU = 3
TEST.ROOT_PATH = '/Extra/panzhiyu/CMU_data' #'/Extra/panzhiyu/Shelf' #'/Extra/panzhiyu/Shelf' # '/Extra/panzhiyu/CMU_data'## '/Extra/panzhiyu/CMU_data'# '/Extra/panzhiyu/CampusSeq1'
TEST.JSON_PATH = osp.join(TEST.ROOT_PATH, 'cmu_data_train_multi.pkl') # campus_meta_multi.pkl '' cmu_data_train_new5_multi.pkl cmu_data_test_multi.pkl cmu_data_gnn_new5temp1_multi.pkl
# cmu_data_gnn_final_multi.pkl cmu_data_test_multi.pkl cmu_data_train_multi.pkl shelf_meta_multi.pkl cmu_data_test_multi.pkl
config = Config()
cfg = config
def link_log_dir():
if not osp.exists('./log'):
ensure_dir(config.OUTPUT_DIR)
cmd = 'ln -s ' + config.OUTPUT_DIR + ' log'
os.system(cmd)
def make_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
'-log', '--linklog', default=False, action='store_true')
return parser
if __name__ == '__main__':
parser = make_parser()
args = parser.parse_args()
if args.linklog:
link_log_dir()
|
[] |
[] |
[
"PROJECT_HOME"
] |
[]
|
["PROJECT_HOME"]
|
python
| 1 | 0 | |
handler.py
|
import boto3
from botocore.exceptions import ClientError
import json
import os
import time
import uuid
import decimal
import requests
client = boto3.client('ses')
sender = os.environ['SENDER_EMAIL']
subject = os.environ['EMAIL_SUBJECT']
configset = os.environ['CONFIG_SET']
convertkit_key = os.environ['CONVERTKIT_KEY']
convertkit_form = os.environ['CONVERTKIT_FORM']
charset = 'UTF-8'
dynamodb = boto3.resource('dynamodb')
def sendMail(event, context):
print(event)
try:
data = event['body']
content = 'From: ' + data['firstname'] + ' ' + data['lastname'] + \
'<br/>Email: ' + data['email'] + '<br/>Message: ' + data['message']
saveToDynamoDB(data)
response = sendMailToUser(data, content)
convertkit = addSubscriberPerson(data)
except ClientError as e:
print(e.response['Error']['Message'])
else:
print("Email sent! Message Id:"),
print(response['MessageId'])
return "Email sent!"
def list(event, context):
table = dynamodb.Table(os.environ['DYNAMODB_TABLE'])
# fetch all records from database
result = table.scan()
#return response
return {
"statusCode": 200,
"body": result['Items']
}
def saveToDynamoDB(data):
timestamp = int(time.time() * 1000)
# Insert details into DynamoDB Table
table = dynamodb.Table(os.environ['DYNAMODB_TABLE'])
item = {
'id': str(uuid.uuid1()),
'firstname': data['firstname'],
'lastname': data['lastname'],
'email': data['email'],
'message': data['message'],
'createdAt': timestamp,
'updatedAt': timestamp
}
table.put_item(Item=item)
return
def addSubscriberPerson(data):
body = {
"first_name": data['firstname'],
"email": data['email'],
"api_key": convertkit_key
}
return requests.post('https://api.convertkit.com/v3/forms/' + str(convertkit_form) + '/subscribe', json=body)
def sendMailToUser(data, content):
# Send Email using SES
return client.send_email(
Source=sender,
Destination={
'ToAddresses': [sender],
},
Message={
'Subject': {
'Charset': charset,
'Data': subject
},
'Body': {
'Html': {
'Charset': charset,
'Data': content
},
'Text': {
'Charset': charset,
'Data': content
}
}
}
)
|
[] |
[] |
[
"SENDER_EMAIL",
"CONVERTKIT_FORM",
"EMAIL_SUBJECT",
"DYNAMODB_TABLE",
"CONVERTKIT_KEY",
"CONFIG_SET"
] |
[]
|
["SENDER_EMAIL", "CONVERTKIT_FORM", "EMAIL_SUBJECT", "DYNAMODB_TABLE", "CONVERTKIT_KEY", "CONFIG_SET"]
|
python
| 6 | 0 | |
grpclog/loggerv2.go
|
/*
*
* Copyright 2017 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package grpclog
import (
"io"
"io/ioutil"
"log"
"os"
"strconv"
"google.golang.org/grpc/v2/internal/grpclog"
)
// LoggerV2 does underlying logging work for grpclog.
type LoggerV2 interface {
// Info logs to INFO log. Arguments are handled in the manner of fmt.Print.
Info(args ...interface{})
// Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println.
Infoln(args ...interface{})
// Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf.
Infof(format string, args ...interface{})
// Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print.
Warning(args ...interface{})
// Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println.
Warningln(args ...interface{})
// Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf.
Warningf(format string, args ...interface{})
// Error logs to ERROR log. Arguments are handled in the manner of fmt.Print.
Error(args ...interface{})
// Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println.
Errorln(args ...interface{})
// Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf.
Errorf(format string, args ...interface{})
// Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print.
// gRPC ensures that all Fatal logs will exit with os.Exit(1).
// Implementations may also call os.Exit() with a non-zero exit code.
Fatal(args ...interface{})
// Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println.
// gRPC ensures that all Fatal logs will exit with os.Exit(1).
// Implementations may also call os.Exit() with a non-zero exit code.
Fatalln(args ...interface{})
// Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf.
// gRPC ensures that all Fatal logs will exit with os.Exit(1).
// Implementations may also call os.Exit() with a non-zero exit code.
Fatalf(format string, args ...interface{})
// V reports whether verbosity level l is at least the requested verbose level.
V(l int) bool
}
// SetLoggerV2 sets logger that is used in grpc to a V2 logger.
// Not mutex-protected, should be called before any gRPC functions.
func SetLoggerV2(l LoggerV2) {
if _, ok := l.(*componentData); ok {
panic("cannot use component logger as grpclog logger")
}
grpclog.Logger = l
grpclog.DepthLogger, _ = l.(grpclog.DepthLoggerV2)
}
const (
// infoLog indicates Info severity.
infoLog int = iota
// warningLog indicates Warning severity.
warningLog
// errorLog indicates Error severity.
errorLog
// fatalLog indicates Fatal severity.
fatalLog
)
// severityName contains the string representation of each severity.
var severityName = []string{
infoLog: "INFO",
warningLog: "WARNING",
errorLog: "ERROR",
fatalLog: "FATAL",
}
// loggerT is the default logger used by grpclog.
type loggerT struct {
m []*log.Logger
v int
}
// NewLoggerV2 creates a loggerV2 with the provided writers.
// Fatal logs will be written to errorW, warningW, infoW, followed by exit(1).
// Error logs will be written to errorW, warningW and infoW.
// Warning logs will be written to warningW and infoW.
// Info logs will be written to infoW.
func NewLoggerV2(infoW, warningW, errorW io.Writer) LoggerV2 {
return NewLoggerV2WithVerbosity(infoW, warningW, errorW, 0)
}
// NewLoggerV2WithVerbosity creates a loggerV2 with the provided writers and
// verbosity level.
func NewLoggerV2WithVerbosity(infoW, warningW, errorW io.Writer, v int) LoggerV2 {
var m []*log.Logger
m = append(m, log.New(infoW, severityName[infoLog]+": ", log.LstdFlags))
m = append(m, log.New(io.MultiWriter(infoW, warningW), severityName[warningLog]+": ", log.LstdFlags))
ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal.
m = append(m, log.New(ew, severityName[errorLog]+": ", log.LstdFlags))
m = append(m, log.New(ew, severityName[fatalLog]+": ", log.LstdFlags))
return &loggerT{m: m, v: v}
}
// newLoggerV2 creates a loggerV2 to be used as default logger.
// All logs are written to stderr.
func newLoggerV2() LoggerV2 {
errorW := ioutil.Discard
warningW := ioutil.Discard
infoW := ioutil.Discard
logLevel := os.Getenv("GRPC_GO_LOG_SEVERITY_LEVEL")
switch logLevel {
case "", "ERROR", "error": // If env is unset, set level to ERROR.
errorW = os.Stderr
case "WARNING", "warning":
warningW = os.Stderr
case "INFO", "info":
infoW = os.Stderr
}
var v int
vLevel := os.Getenv("GRPC_GO_LOG_VERBOSITY_LEVEL")
if vl, err := strconv.Atoi(vLevel); err == nil {
v = vl
}
return NewLoggerV2WithVerbosity(infoW, warningW, errorW, v)
}
func (g *loggerT) Info(args ...interface{}) {
g.m[infoLog].Print(args...)
}
func (g *loggerT) Infoln(args ...interface{}) {
g.m[infoLog].Println(args...)
}
func (g *loggerT) Infof(format string, args ...interface{}) {
g.m[infoLog].Printf(format, args...)
}
func (g *loggerT) Warning(args ...interface{}) {
g.m[warningLog].Print(args...)
}
func (g *loggerT) Warningln(args ...interface{}) {
g.m[warningLog].Println(args...)
}
func (g *loggerT) Warningf(format string, args ...interface{}) {
g.m[warningLog].Printf(format, args...)
}
func (g *loggerT) Error(args ...interface{}) {
g.m[errorLog].Print(args...)
}
func (g *loggerT) Errorln(args ...interface{}) {
g.m[errorLog].Println(args...)
}
func (g *loggerT) Errorf(format string, args ...interface{}) {
g.m[errorLog].Printf(format, args...)
}
func (g *loggerT) Fatal(args ...interface{}) {
g.m[fatalLog].Fatal(args...)
// No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit().
}
func (g *loggerT) Fatalln(args ...interface{}) {
g.m[fatalLog].Fatalln(args...)
// No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit().
}
func (g *loggerT) Fatalf(format string, args ...interface{}) {
g.m[fatalLog].Fatalf(format, args...)
// No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit().
}
func (g *loggerT) V(l int) bool {
return l <= g.v
}
// DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements
// DepthLoggerV2, the below functions will be called with the appropriate stack
// depth set for trivial functions the logger may ignore.
//
// Experimental
//
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
// later release.
type DepthLoggerV2 interface {
LoggerV2
// InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Print.
InfoDepth(depth int, args ...interface{})
// WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Print.
WarningDepth(depth int, args ...interface{})
// ErrorDetph logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Print.
ErrorDepth(depth int, args ...interface{})
// FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Print.
FatalDepth(depth int, args ...interface{})
}
|
[
"\"GRPC_GO_LOG_SEVERITY_LEVEL\"",
"\"GRPC_GO_LOG_VERBOSITY_LEVEL\""
] |
[] |
[
"GRPC_GO_LOG_SEVERITY_LEVEL",
"GRPC_GO_LOG_VERBOSITY_LEVEL"
] |
[]
|
["GRPC_GO_LOG_SEVERITY_LEVEL", "GRPC_GO_LOG_VERBOSITY_LEVEL"]
|
go
| 2 | 0 | |
client/apps/get_cluster_apps_v5_responses.go
|
// Code generated by go-swagger; DO NOT EDIT.
package apps
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"fmt"
"io"
"github.com/go-openapi/runtime"
strfmt "github.com/go-openapi/strfmt"
models "github.com/giantswarm/gsclientgen/v2/models"
)
// GetClusterAppsV5Reader is a Reader for the GetClusterAppsV5 structure.
type GetClusterAppsV5Reader struct {
formats strfmt.Registry
}
// ReadResponse reads a server response into the received o.
func (o *GetClusterAppsV5Reader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
switch response.Code() {
case 200:
result := NewGetClusterAppsV5OK()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return result, nil
case 401:
result := NewGetClusterAppsV5Unauthorized()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
default:
result := NewGetClusterAppsV5Default(response.Code())
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
if response.Code()/100 == 2 {
return result, nil
}
return nil, result
}
}
// NewGetClusterAppsV5OK creates a GetClusterAppsV5OK with default headers values
func NewGetClusterAppsV5OK() *GetClusterAppsV5OK {
return &GetClusterAppsV5OK{}
}
/*GetClusterAppsV5OK handles this case with default header values.
Cluster apps
*/
type GetClusterAppsV5OK struct {
Payload models.V4GetClusterAppsResponse
}
func (o *GetClusterAppsV5OK) Error() string {
return fmt.Sprintf("[GET /v5/clusters/{cluster_id}/apps/][%d] getClusterAppsV5OK %+v", 200, o.Payload)
}
func (o *GetClusterAppsV5OK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
// response payload
if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewGetClusterAppsV5Unauthorized creates a GetClusterAppsV5Unauthorized with default headers values
func NewGetClusterAppsV5Unauthorized() *GetClusterAppsV5Unauthorized {
return &GetClusterAppsV5Unauthorized{}
}
/*GetClusterAppsV5Unauthorized handles this case with default header values.
Permission denied
*/
type GetClusterAppsV5Unauthorized struct {
Payload *models.V4GenericResponse
}
func (o *GetClusterAppsV5Unauthorized) Error() string {
return fmt.Sprintf("[GET /v5/clusters/{cluster_id}/apps/][%d] getClusterAppsV5Unauthorized %+v", 401, o.Payload)
}
func (o *GetClusterAppsV5Unauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(models.V4GenericResponse)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewGetClusterAppsV5Default creates a GetClusterAppsV5Default with default headers values
func NewGetClusterAppsV5Default(code int) *GetClusterAppsV5Default {
return &GetClusterAppsV5Default{
_statusCode: code,
}
}
/*GetClusterAppsV5Default handles this case with default header values.
error
*/
type GetClusterAppsV5Default struct {
_statusCode int
Payload *models.V4GenericResponse
}
// Code gets the status code for the get cluster apps v5 default response
func (o *GetClusterAppsV5Default) Code() int {
return o._statusCode
}
func (o *GetClusterAppsV5Default) Error() string {
return fmt.Sprintf("[GET /v5/clusters/{cluster_id}/apps/][%d] getClusterAppsV5 default %+v", o._statusCode, o.Payload)
}
func (o *GetClusterAppsV5Default) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(models.V4GenericResponse)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
insator-service-broker.py
|
# Importing some necessary libraries
import os # to obtain environment info
from flask import Flask,jsonify,request,abort,make_response
from flask_basicauth import BasicAuth
import json
import insator_plans
import service
import requests
# from cloudant import Cloudant
#############################################################
# Database Setup : cloudant nosql db
#############################################################
# db_name = 'insatordb'
# client = None
# db = None
# if 'VCAP_SERVICES' in os.environ:
# vcap = json.loads(os.getenv('VCAP_SERVICES'))
# print('Found VCAP_SERVICES')
# if 'cloudantNoSQLDB' in vcap:
# creds = vcap['cloudantNoSQLDB'][0]['credentials']
# user = creds['username']
# password = creds['password']
# url = 'https://' + creds['host']
# client = Cloudant(user, password, url=url, connect=True)
# db = client.create_database(db_name, throw_on_exists=False)
# elif os.path.isfile('vcap-local.json'):
# with open('vcap-local.json') as f:
# vcap = json.load(f)
# print('Found local VCAP_SERVICES')
# creds = vcap['services']['cloudantNoSQLDB'][0]['credentials']
# user = creds['username']
# password = creds['password']
# url = 'https://' + creds['host']
# client = Cloudant(user, password, url=url, connect=True)
# db = client.create_database(db_name, throw_on_exists=False)
#############################################################
# Global Variables
#############################################################
# Start Flask
app = Flask(__name__)
# Which CF Service Broker API version do we support?
X_BROKER_API_VERSION = 2.11
X_BROKER_API_VERSION_NAME = 'X-Broker-Api-Version'
# Configure our test username
app.config['BASIC_AUTH_USERNAME'] = 'insator-user'
app.config['BASIC_AUTH_PASSWORD'] = 'insator-pwd'
# Switch off pretty printing of JSON data
app.config['JSONIFY_PRETTYPRINT_REGULAR']=False
basic_auth = BasicAuth(app)
# Some constants we are going to use, save some typing
jsonheaders = {'Content-Type': 'application/json'}
empty_result={}
# Get service information if on Bluemix
if 'VCAP_APPLICATION' in os.environ:
# get app URL
service_base=json.loads(os.environ['VCAP_APPLICATION'])['application_uris'][0]
else:
# we are local, so set service base
service_base = "localhost:5000"
# Available Locales
AVAILABLE_LOCALES = ['kr', 'en']
#############################################################
# Global Variables : Insator Specific
#############################################################
service_dashboard = 'http://www.samsung.com/global/support/globalcontact.html'
# service_dashboard = 'http://fido-ui-service.mybluemix.net'
# service_dashboard = "http://"+service_base+"/my-service//dashboard/"
########################################################
# Implement Cloud Foundry Broker API
# In thise file:
# * catalog - return service information including related service plans
# * provision - create the service (add it to the Cloud Foundry / Bluemix catalog)
# * deprovision - delete the service (remove it from the catalog)
# * bind - bind/link a service to an app
# * unbind - remove the linkage to an app
########################################################
#
# Catalog
#
@app.route('/v2/catalog', methods=['GET'])
@basic_auth.required
def catalog():
# Return the catalog of services handled by this broker
#
# GET /v2/catalog:
#
# HEADER:
# X-Broker-Api-Version: <version>
#
# return:
# JSON document with details about the
# services offered through this broker
api_version = request.headers.get('X-Broker-Api-Version')
locale = get_locale()
# Check broker API version
if not api_version or float(api_version) < X_BROKER_API_VERSION:
abort(412, "Precondition failed. Missing or incompatible %s. Expecting version %0.1f or later" % (X_BROKER_API_VERSION_NAME, X_BROKER_API_VERSION))
services={"services": [service.insatorsvc(locale)]}
return jsonify(services)
#
# Provision
#
@app.route('/v2/service_instances/<instance_id>', methods=['PUT'])
@basic_auth.required
def provision(instance_id):
# Provision an instance of this service for the org/space
# as provided in the JSON data
#
# PUT /v2/service_instances/<instance_id>:
# <instance_id> provided by Bluemix Cloud Controller,
# used for future requests like bind, unbind and deprovision
#
# BODY:
# {
# "service_id": "<service-guid>",
# "plan_id": "<plan-guid>",
# "organization_guid": "<org-guid>",
# "space_guid": "<space-guid>"
# }
#
# return:
# JSON document with service details
if request.headers['Content-Type'] != 'application/json':
abort(415, 'Unsupported Content-Type: expecting application/json')
# provision the service by calling out to the service itself
# not done here to keep the code simple for the tutorial
# get the JSON document in the BODY
provision_details = request.get_json(force=True)
print("Provision details : ", provision_details)
print("In provision instance_id : ", instance_id)
# if client:
# apikey_data = {'API_Key':'1234567890'}
# rp_id = {'rp_id':'0987654321'}
# db.create_document(apikey_data)
# db.create_document(rp_id)
# else:
# print('No database')
# return basic service information
new_service = { "dashboard_url": service_dashboard }
return jsonify(new_service)
#
# Deprovision
#
@app.route('/v2/service_instances/<instance_id>', methods=['DELETE'])
@basic_auth.required
def deprovision(instance_id):
# Deprovision an existing instance of this service
#
# DELETE /v2/service_instances/<instance_id>:
# <instance_id> is the Cloud Controller provided
# value used to provision the instance
#
# return:
# An empty JSON document is expected
# deprovision would call the service here
# not done to keep our code simple for the tutorial
return jsonify(empty_result)
#
# Bind
#
@app.route('/v2/service_instances/<instance_id>/service_bindings/<binding_id>', methods=['PUT'])
@basic_auth.required
def bind(instance_id, binding_id):
# Bind an existing instance with the given org and space
#
# PUT /v2/service_instances/<instance_id>/service_bindings/<binding_id>:
# <instance_id> is the Cloud Controller provided
# value used to provision the instance
# <binding_id> is provided by the Cloud Controller
# and will be used for future unbind requests
#
# BODY:
# {
# "plan_id": "<plan-guid>",
# "service_id": "<service-guid>",
# "app_guid": "<app-guid>"
# }
#
# return:
# JSON document with credentails and access details
# for the service based on this binding
# http://docs.cloudfoundry.org/services/binding-credentials.html
if request.headers['Content-Type'] != 'application/json':
abort(415, 'Unsupported Content-Type: expecting application/json')
# get the JSON document in the BODY
binding_details = request.get_json()
print("Binding details: " , binding_details)
# Sample credential
result={"credentials":
{
"statusCode": "1200",
"id": '4fc8a-b5b1-25fab8b410ee',
"status": "ENABLED",
"name": "myFakeAppService",
"apiKey": '5cf4-4f1f-4b0f-911f-e1d0f1',
"statusMessage": "success",
"createUserId": "admin"
}
}
return make_response(jsonify(result),201)
#
# Unbind
#
@app.route('/v2/service_instances/<instance_id>/service_bindings/<binding_id>', methods=['DELETE'])
@basic_auth.required
def unbind(instance_id, binding_id):
# Unbind an existing instance associated with an app
#
# DELETE /v2/service_instances/<instance_id>/service_bindings/<binding_id>:
# <instance_id> and <binding_id> are provided by the Cloud Controller
#
# return:
# An empty JSON document is expected
return jsonify(empty_result)
########################################################
# Service-related functions for some additional testing
#
#
########################################################
@app.route('/my-service/dashboard/<instance_id>', methods=['GET'])
def dashboard(instance_id):
# hardcoded HTML, but could be a rendered template, too
# Consider offering customized page for different instances
dashboard_page = "<img src='http://contents.dt.co.kr/images/201510/2015102802101860727001[2].jpg' />"
dashboard_page += "<h3>Welcome!!</h3> You discovered the dashboard for instance : " + instance_id
dashboard_page += "<img src='http://news.samsungsds.com/wp-content/uploads/2016/10/19-2.jpg' />"
return dashboard_page
########################################################
# Check a locale
#
#
########################################################
def get_locale(self):
return request.accept_languages.best_match(AVAILABLE_LOCALES)
########################################################
# Catch-all section - return HTML page for testing
#
#
########################################################
@app.route('/', defaults={'path': ''})
@app.route('/<path:path>')
def catch_all(path):
page = '<title>insator Service Broker</title>'
page += '<h2>This is a sample service broker for Samsung SDS : insator Solution</h2>'
page += '<p>See for details.</p>'
page += '<p>You requested path: /%s </p>' % path
page += '<p> Browser Language : %s </p>' % request.accept_languages
page += '<p> Browser Language best_match : %s </p>' % request.accept_languages.best_match(['de', 'fr', 'en', 'ko'])
return page
port = os.getenv('PORT', '5000')
if __name__ == "__main__":
app.run(host='0.0.0.0', port=int(port),threaded=True)
app.run(host='0.0.0.0', port=int(port),debug=True,threaded=True)
# app.run(debug=True, threaded=True)
|
[] |
[] |
[
"PORT",
"VCAP_APPLICATION",
"VCAP_SERVICES"
] |
[]
|
["PORT", "VCAP_APPLICATION", "VCAP_SERVICES"]
|
python
| 3 | 0 | |
cmd/hdfs/df.go
|
package main
import (
"fmt"
"os"
"text/tabwriter"
"github.com/stubey/hdfs/v2"
)
func df(humanReadable bool) {
client, err := getClient("")
if err != nil {
fatal(err)
}
var fs hdfs.FsInfo
fs, err = client.StatFs()
if err != nil {
fatal(err)
}
tw := tabwriter.NewWriter(os.Stdout, 3, 8, 0, ' ', tabwriter.AlignRight)
fmt.Fprintf(tw, "Filesystem \tSize \tUsed \tAvailable \t Use%%\n")
if humanReadable {
fmt.Fprintf(tw, "%v \t%v \t%v \t%v \t%d%%\n",
os.Getenv("HADOOP_NAMENODE"),
formatBytes(fs.Capacity),
formatBytes(fs.Used),
formatBytes(fs.Remaining),
100 * fs.Used / fs.Capacity)
} else {
fmt.Fprintf(tw, "%v \t%v \t %v \t %v \t%d%%\n",
os.Getenv("HADOOP_NAMENODE"),
fs.Capacity,
fs.Used,
fs.Remaining,
100 * fs.Used / fs.Capacity)
}
tw.Flush()
}
|
[
"\"HADOOP_NAMENODE\"",
"\"HADOOP_NAMENODE\""
] |
[] |
[
"HADOOP_NAMENODE"
] |
[]
|
["HADOOP_NAMENODE"]
|
go
| 1 | 0 | |
tools/protodoc/protodoc.py
|
# protoc plugin to map from FileDescriptorProtos to Envoy doc style RST.
# See https://github.com/google/protobuf/blob/master/src/google/protobuf/descriptor.proto
# for the underlying protos mentioned in this file. See
# https://www.sphinx-doc.org/en/master/usage/restructuredtext/basics.html for Sphinx RST syntax.
from collections import defaultdict
import json
import functools
import os
import pathlib
import sys
from google.protobuf import json_format
from bazel_tools.tools.python.runfiles import runfiles
import yaml
from jinja2 import Template
# We have to do some evil things to sys.path due to the way that Python module
# resolution works; we have both tools/ trees in bazel_tools and envoy. By
# default, Bazel leaves us with a sys.path in which the @bazel_tools repository
# takes precedence. Now that we're done with importing runfiles above, we can
# just remove it from the sys.path.
sys.path = [p for p in sys.path if not p.endswith('bazel_tools')]
from tools.api_proto_plugin import annotations
from tools.api_proto_plugin import plugin
from tools.api_proto_plugin import visitor
from tools.config_validation import validate_fragment
from tools.protodoc import manifest_pb2
from udpa.annotations import security_pb2
from udpa.annotations import status_pb2
from validate import validate_pb2
# Namespace prefix for Envoy core APIs.
ENVOY_API_NAMESPACE_PREFIX = '.envoy.api.v2.'
# Last documented v2 api version
ENVOY_LAST_V2_VERSION = "1.17.2"
# Namespace prefix for Envoy top-level APIs.
ENVOY_PREFIX = '.envoy.'
# Namespace prefix for WKTs.
WKT_NAMESPACE_PREFIX = '.google.protobuf.'
# Namespace prefix for RPCs.
RPC_NAMESPACE_PREFIX = '.google.rpc.'
# http://www.fileformat.info/info/unicode/char/2063/index.htm
UNICODE_INVISIBLE_SEPARATOR = u'\u2063'
# Template for formating extension descriptions.
EXTENSION_TEMPLATE = Template(
"""
.. _extension_{{extension}}:
This extension may be referenced by the qualified name ``{{extension}}``
.. note::
{{status}}
{{security_posture}}
.. tip::
This extension extends and can be used with the following extension {% if categories|length > 1 %}categories{% else %}category{% endif %}:
{% for cat in categories %}
- :ref:`{{cat}} <extension_category_{{cat}}>`
{% endfor %}
""")
# Template for formating an extension category.
EXTENSION_CATEGORY_TEMPLATE = Template(
"""
.. _extension_category_{{category}}:
.. tip::
This extension category has the following known extensions:
{% for ext in extensions %}
- :ref:`{{ext}} <extension_{{ext}}>`
{% endfor %}
""")
# A map from the extension security postures (as defined in the
# envoy_cc_extension build macro) to human readable text for extension docs.
EXTENSION_SECURITY_POSTURES = {
'robust_to_untrusted_downstream':
'This extension is intended to be robust against untrusted downstream traffic. It '
'assumes that the upstream is trusted.',
'robust_to_untrusted_downstream_and_upstream':
'This extension is intended to be robust against both untrusted downstream and '
'upstream traffic.',
'requires_trusted_downstream_and_upstream':
'This extension is not hardened and should only be used in deployments'
' where both the downstream and upstream are trusted.',
'unknown':
'This extension has an unknown security posture and should only be '
'used in deployments where both the downstream and upstream are '
'trusted.',
'data_plane_agnostic':
'This extension does not operate on the data plane and hence is intended to be robust against untrusted traffic.',
}
# A map from the extension status value to a human readable text for extension
# docs.
EXTENSION_STATUS_VALUES = {
'alpha':
'This extension is functional but has not had substantial production burn time, use only with this caveat.',
'wip':
'This extension is work-in-progress. Functionality is incomplete and it is not intended for production use.',
}
EXTENSION_DB = json.loads(pathlib.Path(os.getenv('EXTENSION_DB_PATH')).read_text())
# create an index of extension categories from extension db
EXTENSION_CATEGORIES = {}
for _k, _v in EXTENSION_DB.items():
for _cat in _v['categories']:
EXTENSION_CATEGORIES.setdefault(_cat, []).append(_k)
V2_LINK_TEMPLATE = Template(
"""
This documentation is for the Envoy v3 API.
As of Envoy v1.18 the v2 API has been removed and is no longer supported.
If you are upgrading from v2 API config you may wish to view the v2 API documentation:
:ref:`{{v2_text}} <{{v2_url}}>`
""")
class ProtodocError(Exception):
"""Base error class for the protodoc module."""
def hide_not_implemented(comment):
"""Should a given type_context.Comment be hidden because it is tagged as [#not-implemented-hide:]?"""
return annotations.NOT_IMPLEMENTED_HIDE_ANNOTATION in comment.annotations
def github_url(text, type_context):
"""Obtain data plane API Github URL by path from a TypeContext.
Args:
type_context: type_context.TypeContext for node.
Returns:
A string with a corresponding data plane API GitHub Url.
"""
return f":repo:`{text} <api/{type_context.source_code_info.name}#L{type_context.location.span[0]}>`"
def format_comment_with_annotations(comment, type_name=''):
"""Format a comment string with additional RST for annotations.
Args:
comment: comment string.
type_name: optional, 'message' or 'enum' may be specified for additional
message/enum specific annotations.
Returns:
A string with additional RST from annotations.
"""
alpha_warning = ''
if annotations.ALPHA_ANNOTATION in comment.annotations:
experimental_warning = (
'.. warning::\n This API is alpha and is not covered by the :ref:`threat model <arch_overview_threat_model>`.\n\n'
)
formatted_extension = ''
if annotations.EXTENSION_ANNOTATION in comment.annotations:
extension = comment.annotations[annotations.EXTENSION_ANNOTATION]
formatted_extension = format_extension(extension)
formatted_extension_category = ''
if annotations.EXTENSION_CATEGORY_ANNOTATION in comment.annotations:
for category in comment.annotations[annotations.EXTENSION_CATEGORY_ANNOTATION].split(","):
formatted_extension_category += format_extension_category(category)
comment = annotations.without_annotations(strip_leading_space(comment.raw) + '\n')
return alpha_warning + comment + formatted_extension + formatted_extension_category
def map_lines(f, s):
"""Apply a function across each line in a flat string.
Args:
f: A string transform function for a line.
s: A string consisting of potentially multiple lines.
Returns:
A flat string with f applied to each line.
"""
return '\n'.join(f(line) for line in s.split('\n'))
def indent(spaces, line):
"""Indent a string."""
return ' ' * spaces + line
def indent_lines(spaces, lines):
"""Indent a list of strings."""
return map(functools.partial(indent, spaces), lines)
def format_internal_link(text, ref):
return ':ref:`%s <%s>`' % (text, ref)
def format_external_link(text, ref):
return '`%s <%s>`_' % (text, ref)
def format_header(style, text):
"""Format RST header.
Args:
style: underline style, e.g. '=', '-'.
text: header text
Returns:
RST formatted header.
"""
return '%s\n%s\n\n' % (text, style * len(text))
def format_extension(extension):
"""Format extension metadata as RST.
Args:
extension: the name of the extension, e.g. com.acme.foo.
Returns:
RST formatted extension description.
"""
try:
extension_metadata = EXTENSION_DB[extension]
status = EXTENSION_STATUS_VALUES.get(extension_metadata['status'], '')
security_posture = EXTENSION_SECURITY_POSTURES[extension_metadata['security_posture']]
categories = extension_metadata["categories"]
except KeyError as e:
sys.stderr.write(
f"\n\nDid you forget to add '{extension}' to source/extensions/extensions_build_config.bzl?\n\n"
)
exit(1) # Raising the error buries the above message in tracebacks.
return EXTENSION_TEMPLATE.render(
extension=extension,
status=status,
security_posture=security_posture,
categories=categories)
def format_extension_category(extension_category):
"""Format extension metadata as RST.
Args:
extension_category: the name of the extension_category, e.g. com.acme.
Returns:
RST formatted extension category description.
"""
try:
extensions = EXTENSION_CATEGORIES[extension_category]
except KeyError as e:
raise ProtodocError(f"\n\nUnable to find extension category: {extension_category}\n\n")
return EXTENSION_CATEGORY_TEMPLATE.render(
category=extension_category, extensions=sorted(extensions))
def format_header_from_file(style, source_code_info, proto_name, v2_link):
"""Format RST header based on special file level title
Args:
style: underline style, e.g. '=', '-'.
source_code_info: SourceCodeInfo object.
proto_name: If the file_level_comment does not contain a user specified
title, use this as page title.
Returns:
RST formatted header, and file level comment without page title strings.
"""
anchor = format_anchor(file_cross_ref_label(proto_name))
stripped_comment = annotations.without_annotations(
strip_leading_space('\n'.join(c + '\n' for c in source_code_info.file_level_comments)))
formatted_extension = ''
if annotations.EXTENSION_ANNOTATION in source_code_info.file_level_annotations:
extension = source_code_info.file_level_annotations[annotations.EXTENSION_ANNOTATION]
formatted_extension = format_extension(extension)
if annotations.DOC_TITLE_ANNOTATION in source_code_info.file_level_annotations:
return anchor + format_header(
style, source_code_info.file_level_annotations[annotations.DOC_TITLE_ANNOTATION]
) + v2_link + "\n\n" + formatted_extension, stripped_comment
return anchor + format_header(
style, proto_name) + v2_link + "\n\n" + formatted_extension, stripped_comment
def format_field_type_as_json(type_context, field):
"""Format FieldDescriptorProto.Type as a pseudo-JSON string.
Args:
type_context: contextual information for message/enum/field.
field: FieldDescriptor proto.
Return: RST formatted pseudo-JSON string representation of field type.
"""
if type_name_from_fqn(field.type_name) in type_context.map_typenames:
return '"{...}"'
if field.label == field.LABEL_REPEATED:
return '[]'
if field.type == field.TYPE_MESSAGE:
return '"{...}"'
return '"..."'
def format_message_as_json(type_context, msg):
"""Format a message definition DescriptorProto as a pseudo-JSON block.
Args:
type_context: contextual information for message/enum/field.
msg: message definition DescriptorProto.
Return: RST formatted pseudo-JSON string representation of message definition.
"""
lines = []
for index, field in enumerate(msg.field):
field_type_context = type_context.extend_field(index, field.name)
leading_comment = field_type_context.leading_comment
if hide_not_implemented(leading_comment):
continue
lines.append('"%s": %s' % (field.name, format_field_type_as_json(type_context, field)))
if lines:
return '.. code-block:: json\n\n {\n' + ',\n'.join(indent_lines(4, lines)) + '\n }\n\n'
return ""
def normalize_field_type_name(field_fqn):
"""Normalize a fully qualified field type name, e.g.
.envoy.foo.bar.
Strips leading ENVOY_API_NAMESPACE_PREFIX and ENVOY_PREFIX.
Args:
field_fqn: a fully qualified type name from FieldDescriptorProto.type_name.
Return: Normalized type name.
"""
if field_fqn.startswith(ENVOY_API_NAMESPACE_PREFIX):
return field_fqn[len(ENVOY_API_NAMESPACE_PREFIX):]
if field_fqn.startswith(ENVOY_PREFIX):
return field_fqn[len(ENVOY_PREFIX):]
return field_fqn
def normalize_type_context_name(type_name):
"""Normalize a type name, e.g.
envoy.foo.bar.
Strips leading ENVOY_API_NAMESPACE_PREFIX and ENVOY_PREFIX.
Args:
type_name: a name from a TypeContext.
Return: Normalized type name.
"""
return normalize_field_type_name(qualify_type_name(type_name))
def qualify_type_name(type_name):
return '.' + type_name
def type_name_from_fqn(fqn):
return fqn[1:]
def format_field_type(type_context, field):
"""Format a FieldDescriptorProto type description.
Adds cross-refs for message types.
TODO(htuch): Add cross-refs for enums as well.
Args:
type_context: contextual information for message/enum/field.
field: FieldDescriptor proto.
Return: RST formatted field type.
"""
if field.type_name.startswith(ENVOY_API_NAMESPACE_PREFIX) or field.type_name.startswith(
ENVOY_PREFIX):
type_name = normalize_field_type_name(field.type_name)
if field.type == field.TYPE_MESSAGE:
if type_context.map_typenames and type_name_from_fqn(
field.type_name) in type_context.map_typenames:
return 'map<%s, %s>' % tuple(
map(
functools.partial(format_field_type, type_context),
type_context.map_typenames[type_name_from_fqn(field.type_name)]))
return format_internal_link(type_name, message_cross_ref_label(type_name))
if field.type == field.TYPE_ENUM:
return format_internal_link(type_name, enum_cross_ref_label(type_name))
elif field.type_name.startswith(WKT_NAMESPACE_PREFIX):
wkt = field.type_name[len(WKT_NAMESPACE_PREFIX):]
return format_external_link(
wkt, 'https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#%s'
% wkt.lower())
elif field.type_name.startswith(RPC_NAMESPACE_PREFIX):
rpc = field.type_name[len(RPC_NAMESPACE_PREFIX):]
return format_external_link(
rpc, 'https://cloud.google.com/natural-language/docs/reference/rpc/google.rpc#%s'
% rpc.lower())
elif field.type_name:
return field.type_name
pretty_type_names = {
field.TYPE_DOUBLE: 'double',
field.TYPE_FLOAT: 'float',
field.TYPE_INT32: 'int32',
field.TYPE_SFIXED32: 'int32',
field.TYPE_SINT32: 'int32',
field.TYPE_FIXED32: 'uint32',
field.TYPE_UINT32: 'uint32',
field.TYPE_INT64: 'int64',
field.TYPE_SFIXED64: 'int64',
field.TYPE_SINT64: 'int64',
field.TYPE_FIXED64: 'uint64',
field.TYPE_UINT64: 'uint64',
field.TYPE_BOOL: 'bool',
field.TYPE_STRING: 'string',
field.TYPE_BYTES: 'bytes',
}
if field.type in pretty_type_names:
return format_external_link(
pretty_type_names[field.type],
'https://developers.google.com/protocol-buffers/docs/proto#scalar')
raise ProtodocError('Unknown field type ' + str(field.type))
def strip_leading_space(s):
"""Remove leading space in flat comment strings."""
return map_lines(lambda s: s[1:], s)
def file_cross_ref_label(msg_name):
"""File cross reference label."""
return 'envoy_v3_api_file_%s' % msg_name
def message_cross_ref_label(msg_name):
"""Message cross reference label."""
return 'envoy_v3_api_msg_%s' % msg_name
def enum_cross_ref_label(enum_name):
"""Enum cross reference label."""
return 'envoy_v3_api_enum_%s' % enum_name
def field_cross_ref_label(field_name):
"""Field cross reference label."""
return 'envoy_v3_api_field_%s' % field_name
def enum_value_cross_ref_label(enum_value_name):
"""Enum value cross reference label."""
return 'envoy_v3_api_enum_value_%s' % enum_value_name
def format_anchor(label):
"""Format a label as an Envoy API RST anchor."""
return '.. _%s:\n\n' % label
def format_security_options(security_option, field, type_context, edge_config):
sections = []
if security_option.configure_for_untrusted_downstream:
sections.append(
indent(
4, 'This field should be configured in the presence of untrusted *downstreams*.'))
if security_option.configure_for_untrusted_upstream:
sections.append(
indent(4, 'This field should be configured in the presence of untrusted *upstreams*.'))
if edge_config.note:
sections.append(indent(4, edge_config.note))
example_dict = json_format.MessageToDict(edge_config.example)
validate_fragment.validate_fragment(field.type_name[1:], example_dict)
field_name = type_context.name.split('.')[-1]
example = {field_name: example_dict}
sections.append(
indent(4, 'Example configuration for untrusted environments:\n\n')
+ indent(4, '.. code-block:: yaml\n\n')
+ '\n'.join(indent_lines(6,
yaml.dump(example).split('\n'))))
return '.. attention::\n' + '\n\n'.join(sections)
def format_field_as_definition_list_item(
outer_type_context, type_context, field, protodoc_manifest):
"""Format a FieldDescriptorProto as RST definition list item.
Args:
outer_type_context: contextual information for enclosing message.
type_context: contextual information for message/enum/field.
field: FieldDescriptorProto.
protodoc_manifest: tools.protodoc.Manifest for proto.
Returns:
RST formatted definition list item.
"""
field_annotations = []
anchor = format_anchor(field_cross_ref_label(normalize_type_context_name(type_context.name)))
if field.options.HasExtension(validate_pb2.rules):
rule = field.options.Extensions[validate_pb2.rules]
if ((rule.HasField('message') and rule.message.required)
or (rule.HasField('duration') and rule.duration.required)
or (rule.HasField('string') and rule.string.min_len > 0)
or (rule.HasField('string') and rule.string.min_bytes > 0)
or (rule.HasField('repeated') and rule.repeated.min_items > 0)):
field_annotations = ['*REQUIRED*']
leading_comment = type_context.leading_comment
formatted_leading_comment = format_comment_with_annotations(leading_comment)
if hide_not_implemented(leading_comment):
return ''
if field.HasField('oneof_index'):
oneof_context = outer_type_context.extend_oneof(
field.oneof_index, type_context.oneof_names[field.oneof_index])
oneof_comment = oneof_context.leading_comment
formatted_oneof_comment = format_comment_with_annotations(oneof_comment)
if hide_not_implemented(oneof_comment):
return ''
# If the oneof only has one field and marked required, mark the field as required.
if len(type_context.oneof_fields[field.oneof_index]) == 1 and type_context.oneof_required[
field.oneof_index]:
field_annotations = ['*REQUIRED*']
if len(type_context.oneof_fields[field.oneof_index]) > 1:
# Fields in oneof shouldn't be marked as required when we have oneof comment below it.
field_annotations = []
oneof_template = '\nPrecisely one of %s must be set.\n' if type_context.oneof_required[
field.oneof_index] else '\nOnly one of %s may be set.\n'
formatted_oneof_comment += oneof_template % ', '.join(
format_internal_link(
f,
field_cross_ref_label(
normalize_type_context_name(outer_type_context.extend_field(i, f).name)))
for i, f in type_context.oneof_fields[field.oneof_index])
else:
formatted_oneof_comment = ''
# If there is a udpa.annotations.security option, include it after the comment.
if field.options.HasExtension(security_pb2.security):
manifest_description = protodoc_manifest.fields.get(type_context.name)
if not manifest_description:
raise ProtodocError('Missing protodoc manifest YAML for %s' % type_context.name)
formatted_security_options = format_security_options(
field.options.Extensions[security_pb2.security], field, type_context,
manifest_description.edge_config)
else:
formatted_security_options = ''
pretty_label_names = {
field.LABEL_OPTIONAL: '',
field.LABEL_REPEATED: '**repeated** ',
}
comment = '(%s) ' % ', '.join(
[pretty_label_names[field.label] + format_field_type(type_context, field)]
+ field_annotations) + formatted_leading_comment
return anchor + field.name + '\n' + map_lines(
functools.partial(indent, 2),
comment + formatted_oneof_comment) + formatted_security_options
def format_message_as_definition_list(type_context, msg, protodoc_manifest):
"""Format a DescriptorProto as RST definition list.
Args:
type_context: contextual information for message/enum/field.
msg: DescriptorProto.
protodoc_manifest: tools.protodoc.Manifest for proto.
Returns:
RST formatted definition list item.
"""
type_context.oneof_fields = defaultdict(list)
type_context.oneof_required = defaultdict(bool)
type_context.oneof_names = defaultdict(list)
for index, field in enumerate(msg.field):
if field.HasField('oneof_index'):
leading_comment = type_context.extend_field(index, field.name).leading_comment
if hide_not_implemented(leading_comment):
continue
type_context.oneof_fields[field.oneof_index].append((index, field.name))
for index, oneof_decl in enumerate(msg.oneof_decl):
if oneof_decl.options.HasExtension(validate_pb2.required):
type_context.oneof_required[index] = oneof_decl.options.Extensions[
validate_pb2.required]
type_context.oneof_names[index] = oneof_decl.name
return '\n'.join(
format_field_as_definition_list_item(
type_context, type_context.extend_field(index, field.name), field, protodoc_manifest)
for index, field in enumerate(msg.field)) + '\n'
def format_enum_value_as_definition_list_item(type_context, enum_value):
"""Format a EnumValueDescriptorProto as RST definition list item.
Args:
type_context: contextual information for message/enum/field.
enum_value: EnumValueDescriptorProto.
Returns:
RST formatted definition list item.
"""
anchor = format_anchor(
enum_value_cross_ref_label(normalize_type_context_name(type_context.name)))
default_comment = '*(DEFAULT)* ' if enum_value.number == 0 else ''
leading_comment = type_context.leading_comment
formatted_leading_comment = format_comment_with_annotations(leading_comment)
if hide_not_implemented(leading_comment):
return ''
comment = default_comment + UNICODE_INVISIBLE_SEPARATOR + formatted_leading_comment
return anchor + enum_value.name + '\n' + map_lines(functools.partial(indent, 2), comment)
def format_enum_as_definition_list(type_context, enum):
"""Format a EnumDescriptorProto as RST definition list.
Args:
type_context: contextual information for message/enum/field.
enum: DescriptorProto.
Returns:
RST formatted definition list item.
"""
return '\n'.join(
format_enum_value_as_definition_list_item(
type_context.extend_enum_value(index, enum_value.name), enum_value)
for index, enum_value in enumerate(enum.value)) + '\n'
def format_proto_as_block_comment(proto):
"""Format a proto as a RST block comment.
Useful in debugging, not usually referenced.
"""
return '\n\nproto::\n\n' + map_lines(functools.partial(indent, 2), str(proto)) + '\n'
class RstFormatVisitor(visitor.Visitor):
"""Visitor to generate a RST representation from a FileDescriptor proto.
See visitor.Visitor for visitor method docs comments.
"""
def __init__(self):
r = runfiles.Create()
with open(r.Rlocation('envoy/docs/v2_mapping.json'), 'r') as f:
self.v2_mapping = json.load(f)
with open(r.Rlocation('envoy/docs/protodoc_manifest.yaml'), 'r') as f:
# Load as YAML, emit as JSON and then parse as proto to provide type
# checking.
protodoc_manifest_untyped = yaml.safe_load(f.read())
self.protodoc_manifest = manifest_pb2.Manifest()
json_format.Parse(json.dumps(protodoc_manifest_untyped), self.protodoc_manifest)
def visit_enum(self, enum_proto, type_context):
normal_enum_type = normalize_type_context_name(type_context.name)
anchor = format_anchor(enum_cross_ref_label(normal_enum_type))
header = format_header('-', 'Enum %s' % normal_enum_type)
proto_link = github_url("f[{normal_enum_type} proto]", type_context) + '\n\n'
leading_comment = type_context.leading_comment
formatted_leading_comment = format_comment_with_annotations(leading_comment, 'enum')
if hide_not_implemented(leading_comment):
return ''
return anchor + header + proto_link + formatted_leading_comment + format_enum_as_definition_list(
type_context, enum_proto)
def visit_message(self, msg_proto, type_context, nested_msgs, nested_enums):
# Skip messages synthesized to represent map types.
if msg_proto.options.map_entry:
return ''
normal_msg_type = normalize_type_context_name(type_context.name)
anchor = format_anchor(message_cross_ref_label(normal_msg_type))
header = format_header('-', normal_msg_type)
proto_link = github_url(f"[{normal_msg_type} proto]", type_context) + '\n\n'
leading_comment = type_context.leading_comment
formatted_leading_comment = format_comment_with_annotations(leading_comment, 'message')
if hide_not_implemented(leading_comment):
return ''
return anchor + header + proto_link + formatted_leading_comment + format_message_as_json(
type_context, msg_proto) + format_message_as_definition_list(
type_context, msg_proto,
self.protodoc_manifest) + '\n'.join(nested_msgs) + '\n' + '\n'.join(nested_enums)
def visit_file(self, file_proto, type_context, services, msgs, enums):
has_messages = True
if all(len(msg) == 0 for msg in msgs) and all(len(enum) == 0 for enum in enums):
has_messages = False
v2_link = ""
if file_proto.name in self.v2_mapping:
v2_filepath = f"envoy_api_file_{self.v2_mapping[file_proto.name]}"
v2_text = v2_filepath.split('/', 1)[1]
v2_url = f"v{ENVOY_LAST_V2_VERSION}:{v2_filepath}"
v2_link = V2_LINK_TEMPLATE.render(v2_url=v2_url, v2_text=v2_text)
# TODO(mattklein123): The logic in both the doc and transform tool around files without messages
# is confusing and should be cleaned up. This is a stop gap to have titles for all proto docs
# in the common case.
if (has_messages and not annotations.DOC_TITLE_ANNOTATION
in type_context.source_code_info.file_level_annotations
and file_proto.name.startswith('envoy')):
raise ProtodocError(
'Envoy API proto file missing [#protodoc-title:] annotation: {}'.format(
file_proto.name))
# Find the earliest detached comment, attribute it to file level.
# Also extract file level titles if any.
header, comment = format_header_from_file(
'=', type_context.source_code_info, file_proto.name, v2_link)
# If there are no messages, we don't include in the doc tree (no support for
# service rendering yet). We allow these files to be missing from the
# toctrees.
if not has_messages:
header = ':orphan:\n\n' + header
warnings = ''
if file_proto.options.HasExtension(status_pb2.file_status):
if file_proto.options.Extensions[status_pb2.file_status].work_in_progress:
warnings += (
'.. warning::\n This API is work-in-progress and is '
'subject to breaking changes.\n\n')
# debug_proto = format_proto_as_block_comment(file_proto)
return header + warnings + comment + '\n'.join(msgs) + '\n'.join(enums) # + debug_proto
def main():
plugin.plugin([plugin.direct_output_descriptor('.rst', RstFormatVisitor)])
if __name__ == '__main__':
main()
|
[] |
[] |
[
"EXTENSION_DB_PATH"
] |
[]
|
["EXTENSION_DB_PATH"]
|
python
| 1 | 0 | |
tests/yessssms_test.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for YesssSMS Module."""
#
# pylint: disable-msg=C0103
import os
import sys
from unittest import mock
import YesssSMS
from YesssSMS.CLI import CLI
from YesssSMS.CLI import run as cli_run
from YesssSMS.const import (
CONFIG_FILE_CONTENT,
CONFIG_FILE_PATHS,
PROVIDER_URLS,
TEST_FORM_TOKEN_SAMPLE,
VERSION,
_UNSUPPORTED_CHARS_STRING,
)
import pytest
import requests
import requests_mock
PROVIDER = PROVIDER_URLS["yesss"]
_LOGIN_URL = PROVIDER["LOGIN_URL"]
_LOGOUT_URL = PROVIDER["LOGOUT_URL"]
_KONTOMANAGER_URL = PROVIDER["KONTOMANAGER_URL"]
_SMS_FORM_URL = PROVIDER["WEBSMS_FORM_URL"]
_SEND_SMS_URL = PROVIDER["SEND_SMS_URL"]
# make sure env is empty
os.environ = {}
LOGIN = "06641234567"
YESSS_PASSWD = "testpasswd"
YESSS_TO = "06501234567"
@pytest.fixture
def valid_connection():
"""Decorate connection to be valid."""
sms = YesssSMS.YesssSMS("", "", provider="yesss")
with requests_mock.Mocker() as m:
m.register_uri(
"POST",
# pylint: disable=protected-access
sms._login_url,
status_code=302,
# pylint: disable=protected-access
headers={"location": sms._kontomanager},
)
m.register_uri(
"GET",
# pylint: disable=protected-access
sms._kontomanager,
status_code=200,
text="test..." + LOGIN + "</a>",
)
m.register_uri(
"GET",
# pylint: disable=protected-access
sms._sms_form_url,
status_code=200,
text=TEST_FORM_TOKEN_SAMPLE,
)
m.register_uri(
"POST",
# pylint: disable=protected-access
sms._send_sms_url,
status_code=200,
text="<h1>Ihre SMS wurde erfolgreich " + "verschickt!</h1>",
)
m.register_uri(
"GET",
# pylint: disable=protected-access
sms._logout_url,
status_code=200,
)
yield
@pytest.fixture
def valid_mock_connection():
"""Decorate connection to be mocked."""
# sms = YesssSMS.YesssSMS("", "", provider="yesss")
with requests_mock.Mocker() as m:
m.register_uri(
"POST",
# pylint: disable=protected-access
"mock://kontomanager.at/index.php",
status_code=302,
# pylint: disable=protected-access
headers={"location": "mock://kontomanager.at/kundendaten.php"},
)
m.register_uri(
"GET",
# pylint: disable=protected-access
"mock://kontomanager.at/kundendaten.php",
status_code=200,
text="test..." + LOGIN + "</a>",
)
m.register_uri(
"GET",
# pylint: disable=protected-access
"mock://kontomanager.at/websms.php",
status_code=200,
text=TEST_FORM_TOKEN_SAMPLE,
)
m.register_uri(
"POST",
# pylint: disable=protected-access
"mock://kontomanager.at/websms_send.php",
status_code=200,
text="<h1>Ihre SMS wurde erfolgreich " + "verschickt!</h1>",
)
m.register_uri(
"GET",
# pylint: disable=protected-access
"mock://kontomanager.at/index.php?dologout=2",
status_code=200,
)
yield
@pytest.fixture
def valid_goood_mock_connection():
"""Decorate connection to be mocked and working."""
# sms = YesssSMS.YesssSMS("", "", provider="yesss")
with requests_mock.Mocker() as m:
m.register_uri(
"POST",
# pylint: disable=protected-access
"https://goood.kontomanager.at/index.php",
status_code=302,
# pylint: disable=protected-access
headers={"location": "https://goood.kontomanager.at/kundendaten.php"},
)
m.register_uri(
"GET",
# pylint: disable=protected-access
"https://goood.kontomanager.at/kundendaten.php",
status_code=200,
text="test..." + LOGIN + "</a>",
)
m.register_uri(
"GET",
# pylint: disable=protected-access
"https://goood.kontomanager.at/websms.php",
status_code=200,
text=TEST_FORM_TOKEN_SAMPLE,
)
m.register_uri(
"POST",
# pylint: disable=protected-access
"https://goood.kontomanager.at/websms_send.php",
status_code=200,
text="<h1>Ihre SMS wurde erfolgreich " + "verschickt!</h1>",
)
m.register_uri(
"GET",
# pylint: disable=protected-access
"https://goood.kontomanager.at/index.php?dologout=2",
status_code=200,
)
yield
@pytest.fixture
def valid_wowww_mock_connection():
"""Decorate connection to be mocked and working."""
# sms = YesssSMS.YesssSMS("", "", provider="yesss")
with requests_mock.Mocker() as m:
m.register_uri(
"POST",
# pylint: disable=protected-access
"https://wowww.kontomanager.at/index.php",
status_code=302,
# pylint: disable=protected-access
headers={"location": "https://wowww.kontomanager.at/kundendaten.php"},
)
m.register_uri(
"GET",
# pylint: disable=protected-access
"https://wowww.kontomanager.at/kundendaten.php",
status_code=200,
text="test..." + LOGIN + "</a>",
)
m.register_uri(
"GET",
# pylint: disable=protected-access
"https://wowww.kontomanager.at/websms.php",
status_code=200,
text=TEST_FORM_TOKEN_SAMPLE,
)
m.register_uri(
"POST",
# pylint: disable=protected-access
"https://wowww.kontomanager.at/websms_send.php",
status_code=200,
text="<h1>Ihre SMS wurde erfolgreich " + "verschickt!</h1>",
)
m.register_uri(
"GET",
# pylint: disable=protected-access
"https://wowww.kontomanager.at/index.php?dologout=2",
status_code=200,
)
yield
@pytest.fixture
def invalid_login(valid_connection):
"""Decorate connection to be mocked and invalid."""
# sms = YesssSMS.YesssSMS("", "")
with requests_mock.Mocker() as m:
m.register_uri(
"POST",
# pylint: disable=protected-access
_LOGIN_URL,
status_code=200,
text="Bla bla<strong>Login nicht erfolgreichBlaBla",
)
yield
@pytest.fixture(name="connection_error")
def simulate_connection_error(valid_connection):
"""Simulate a connection error with requests."""
path = "YesssSMS.YesssSMS._login"
with mock.patch(path, side_effect=requests.exceptions.ConnectionError()):
yield
@pytest.fixture(name="suspended_error")
def simulate_suspended_error(valid_connection):
"""Simulate a suspended error."""
path = "YesssSMS.YesssSMS._login"
with mock.patch(path, side_effect=YesssSMS.YesssSMS.AccountSuspendedError()):
yield
@pytest.fixture(name="sending_error")
def simulate_sending_error(valid_connection):
"""Simulate a sending error."""
path = "YesssSMS.YesssSMS.send"
with mock.patch(path, side_effect=YesssSMS.YesssSMS.SMSSendingError()):
yield
@pytest.fixture(name="unsupported_chars_error")
def simulate_unsupported_chars_error(valid_connection):
"""Simulate a sending error."""
path = "YesssSMS.YesssSMS.send"
with mock.patch(path, side_effect=YesssSMS.YesssSMS.UnsupportedCharsError()):
yield
@pytest.fixture(name="empty_message_error")
def simulate_empty_message_error(valid_connection):
"""Simulate a empty_message error."""
path = "YesssSMS.YesssSMS.send"
with mock.patch(path, side_effect=YesssSMS.YesssSMS.EmptyMessageError):
yield
@pytest.fixture
def mocked_config_file_custom_provider():
"""Mock config file with custom data."""
data = """[YESSSSMS]
LOGIN = 06501234567
PASSWD = MySecre3tPassw0rd
DEFAULT_TO = +43664123123123
# MVNO = FANTASYMOBILE
[YESSSSMS_PROVIDER_URLS]
LOGIN_URL = mock://kontomanager.at/index.php
LOGOUT_URL = mock://kontomanager.at/index.php?dologout=2
KONTOMANAGER_URL = mock://kontomanager.at/kundendaten.php
WEBSMS_FORM_URL = mock://kontomanager.at/websms.php
SEND_SMS_URL = mock://kontomanager.at/websms_send.php
"""
with mock.patch(
"configparser.open",
# "builtins.open",
mock.mock_open(read_data=data),
):
yield
@pytest.fixture
def mocked_config_file_error():
"""Mock config file with erroneous data."""
data = """
LOGIN = 06501234567
PASSWD = MySecre3tPassw0rd
"""
with mock.patch(
"configparser.open",
# "builtins.open",
mock.mock_open(read_data=data),
):
yield
@pytest.fixture
def mocked_config_file():
"""Mock config file with data."""
data = """[YESSSSMS]
LOGIN = 03211234567
PASSWD = MySecr3t
DEFAULT_TO = +43664123123123
MVNO = GOOOD
"""
with mock.patch(
"configparser.open",
# "builtins.open",
mock.mock_open(read_data=data),
):
yield
@pytest.fixture
def config():
"""Mock config file with data."""
data = """[YESSSSMS]
LOGIN = 03211234567
PASSWD = MySecr3t
DEFAULT_TO = +43664123123123
MVNO = YESSS
"""
with mock.patch(
"configparser.open",
# "builtins.open",
mock.mock_open(read_data=data),
):
yield
@pytest.fixture
def environment_vars_set():
"""Mock env vars YESSSSMS_LOGIN and YESSSSMS_PASSWD."""
os.environ["YESSSSMS_LOGIN"] = "03211234567"
os.environ["YESSSSMS_PASSWD"] = "MySecr3t"
os.environ["YESSSSMS_PROVIDER"] = "goood"
os.environ["YESSSSMS_RECIPIENT"] = "066356789789"
@pytest.fixture
def environment_vars_set_wowww():
"""Mock env vars YESSSSMS_LOGIN and YESSSSMS_PASSWD."""
os.environ["YESSSSMS_LOGIN"] = "03211234567"
os.environ["YESSSSMS_PASSWD"] = "MySecr3t"
os.environ["YESSSSMS_PROVIDER"] = "wowww"
os.environ["YESSSSMS_RECIPIENT"] = "066356789780"
@mock.patch("YesssSMS.CLI.CONFIG_FILE_PATHS", ["testconfigfile.conf"])
@pytest.fixture(name="config_file")
def mocked_read_config():
"""Mock config file read."""
# login, passwd, DEFAULT_RECIPIENT, PROVIDER, CUSTOM_PROVIDER_URLS
data = ("03141592653", "MySecr3t", None, "yesss", None)
with mock.patch("YesssSMS.CLI.CLI.read_config_files", return_value=data):
yield
def test_cli_mocked_config_file(
valid_mock_connection, mocked_config_file_custom_provider
):
"""Test CLI config file."""
if int(sys.version.split(".")[1]) < 7: # don't run test on 3.6
pytest.skip("issue with mock_open")
testargs = ["yessssms", "-m", "Bilde mir nicht ein was rechts zu wissen"]
with mock.patch.object(sys, "argv", testargs):
cli = CLI()
print(cli.config_files)
assert cli.yessssms._logindata["login_rufnummer"] == "06501234567"
assert cli.yessssms._logindata["login_passwort"] == "MySecre3tPassw0rd"
assert cli.yessssms._login_url == "mock://kontomanager.at/index.php"
assert cli.yessssms._logout_url == "mock://kontomanager.at/index.php?dologout=2"
assert cli.yessssms._kontomanager == "mock://kontomanager.at/kundendaten.php"
assert cli.yessssms._sms_form_url == "mock://kontomanager.at/websms.php"
assert cli.yessssms._send_sms_url == "mock://kontomanager.at/websms_send.php"
assert cli.recipient == "+43664123123123"
assert cli.message == "Bilde mir nicht ein was rechts zu wissen"
def test_goood_cli_mocked_config_file(valid_goood_mock_connection, mocked_config_file):
"""Test CLI config file."""
if int(sys.version.split(".")[1]) < 7: # don't run test on 3.6
pytest.skip("issue with mock_open")
testargs = ["yessssms", "-m", "Bilde mir nicht ein was rechts zu wissen"]
with mock.patch.object(sys, "argv", testargs):
cli = CLI()
print(cli.config_files)
assert cli.yessssms._logindata["login_rufnummer"] == "03211234567"
assert cli.yessssms._logindata["login_passwort"] == "MySecr3t"
assert cli.yessssms._provider == "goood"
assert cli.yessssms._login_url == "https://goood.kontomanager.at/index.php"
assert (
cli.yessssms._logout_url
== "https://goood.kontomanager.at/index.php?dologout=2"
)
assert (
cli.yessssms._kontomanager
== "https://goood.kontomanager.at/kundendaten.php"
)
assert cli.yessssms._sms_form_url == "https://goood.kontomanager.at/websms.php"
assert (
cli.yessssms._send_sms_url
== "https://goood.kontomanager.at/websms_send.php"
)
assert cli.recipient == "+43664123123123"
assert cli.message == "Bilde mir nicht ein was rechts zu wissen"
def test_cli_mocked_config_file_error(
valid_mock_connection, mocked_config_file_error, capsys
):
"""Test CLI config file error."""
if int(sys.version.split(".")[1]) < 7: # don't run test on 3.6
pytest.skip("issue with mock_open")
testargs = [
"yessssms",
"-m",
"Bilde mir nicht ein was rechts zu wissen",
"-c",
"/tmp/custom_settings.conf",
]
with mock.patch.object(sys, "argv", testargs):
with pytest.raises(SystemExit) as wrapped_e:
CLI()
assert "error: missing settings or invalid settings." in capsys.readouterr().out
assert wrapped_e.type == SystemExit
assert wrapped_e.value.code == 8
assert "/tmp/custom_settings.conf" in CONFIG_FILE_PATHS
def test_cli_suspended_error(
valid_mock_connection, mocked_config_file_custom_provider, suspended_error, capsys
):
"""Test CLI suspended error."""
if int(sys.version.split(".")[1]) < 7: # don't run test on 3.6
pytest.skip("issue with mock_open")
testargs = ["yessssms", "-m", "Bilde mir nicht ein was rechts zu wissen"]
with mock.patch.object(sys, "argv", testargs):
with pytest.raises(SystemExit) as wrapped_e:
CLI()
assert wrapped_e.type == SystemExit
assert wrapped_e.value.code == 4
assert (
"error: your account was suspended because of 3 failed login attempts."
in capsys.readouterr().out
)
def test_cli_sending_error(
valid_mock_connection, mocked_config_file_custom_provider, sending_error, capsys
):
"""Test CLI SMS sending error."""
if int(sys.version.split(".")[1]) < 7: # don't run test on 3.6
pytest.skip("issue with mock_open")
testargs = ["yessssms", "-m", "Bilde mir nicht ein was rechts zu wissen"]
with mock.patch.object(sys, "argv", testargs):
with pytest.raises(SystemExit) as wrapped_e:
CLI()
assert wrapped_e.type == SystemExit
assert wrapped_e.value.code == 5
assert "error: could not send SMS" in capsys.readouterr().out
def test_cli_unsupported_chars_error(
valid_mock_connection,
mocked_config_file_custom_provider,
unsupported_chars_error,
capsys,
):
"""Test CLI unsupported chars error."""
if int(sys.version.split(".")[1]) < 7: # don't run test on 3.6
pytest.skip("issue with mock_open")
testargs = ["yessssms", "-m", "Bilde mir nicht ein was rechts zu wissen"]
with mock.patch.object(sys, "argv", testargs):
with pytest.raises(SystemExit) as wrapped_e:
CLI()
assert wrapped_e.type == SystemExit
assert wrapped_e.value.code == 6
assert "error: message contains unsupported character(s)" in capsys.readouterr().out
def test_cli_empty_message_error(
valid_mock_connection,
mocked_config_file_custom_provider,
empty_message_error,
capsys,
):
"""Test CLI empty_message error."""
if int(sys.version.split(".")[1]) < 7: # don't run test on 3.6
pytest.skip("issue with mock_open")
testargs = ["yessssms", "-m", "Bilde mir nicht ein was rechts zu wissen"]
with mock.patch.object(sys, "argv", testargs):
with pytest.raises(SystemExit) as wrapped_e:
CLI()
assert wrapped_e.type == SystemExit
assert wrapped_e.value.code == 7
assert "error: cannot send empty message" in capsys.readouterr().out
def test_connection_error(config, connection_error):
"""Test connection error."""
sms = YesssSMS.YesssSMS(LOGIN, YESSS_PASSWD)
with pytest.raises(YesssSMS.YesssSMS.ConnectionError):
sms.login_data_valid()
def test_cli_config_file(valid_connection, config_file):
"""Test CLI config file."""
testargs = ["yessssms", "-m", "Blablabla", "-t", "03141512345"]
with mock.patch.object(sys, "argv", testargs):
cli = CLI()
assert cli.message == "Blablabla"
assert cli.recipient == "03141512345"
assert cli.yessssms._logindata["login_rufnummer"] == "03141592653"
assert cli.yessssms._logindata["login_passwort"] == "MySecr3t"
assert cli.yessssms._provider == "yesss"
def test_cli_connection_error(config, connection_error, capsys):
"""Test connection error."""
testargs = [
"yessssms",
"--test",
"-l",
"06641234567",
"-p",
"passw0rd",
"-t",
"+43676564736",
]
with mock.patch.object(sys, "argv", testargs):
with pytest.raises(SystemExit) as wrapped_e:
CLI()
assert wrapped_e.type == SystemExit
assert wrapped_e.value.code == 3
assert "error: could not connect to provider. " in capsys.readouterr().out
def test_login_url_getter(
config,
):
"""Test login url getter."""
sms = YesssSMS.YesssSMS(LOGIN, YESSS_PASSWD)
login_url = sms.get_login_url()
assert login_url == YesssSMS.const.PROVIDER_URLS["yesss"]["LOGIN_URL"]
def test_provider_getter(
config,
):
"""Test provider getter."""
sms = YesssSMS.YesssSMS(LOGIN, YESSS_PASSWD, provider="goood")
provider = sms.get_provider()
# pylint: disable=protected-access
assert provider == sms._provider
def test_credentials_work(
config,
):
"""Test for working credentials."""
with requests_mock.Mocker() as m:
sms = YesssSMS.YesssSMS(LOGIN, YESSS_PASSWD)
m.register_uri(
"POST",
# pylint: disable=protected-access
_LOGIN_URL,
status_code=302,
# pylint: disable=protected-access
headers={"location": sms._kontomanager},
)
m.register_uri(
"GET",
# pylint: disable=protected-access
sms._kontomanager,
status_code=200,
)
m.register_uri(
"GET",
# pylint: disable=protected-access
sms._logout_url,
status_code=200,
)
assert sms.version() == VERSION
print(
"user: {}, pass: {}, to: {}".format(
LOGIN, YESSS_PASSWD[0] + (len(YESSS_PASSWD) - 1) * "*", YESSS_TO
)
)
assert sms.login_data_valid() is True
# pylint: disable=protected-access
assert isinstance(sms._logindata["login_rufnummer"], str)
# pylint: disable=protected-access
assert isinstance(sms._logindata["login_passwort"], str)
# pylint: disable=protected-access
assert len(sms._logindata["login_rufnummer"]) > 10
# pylint: disable=protected-access
assert sms._logindata["login_passwort"]
def test_login(
config,
):
"""Test if login works."""
with requests_mock.Mocker() as m:
sms = YesssSMS.YesssSMS(LOGIN, YESSS_PASSWD)
m.register_uri(
"POST",
# pylint: disable=protected-access
sms._login_url,
status_code=302,
# pylint: disable=protected-access
headers={"location": sms._kontomanager},
)
m.register_uri(
"GET",
# pylint: disable=protected-access
sms._kontomanager,
status_code=200,
text="test..." + LOGIN + "</a>",
)
m.register_uri(
"GET",
# pylint: disable=protected-access
sms._logout_url,
status_code=200,
)
# pylint: disable=protected-access
session, request = sms._login(requests.Session(), get_request=True)
# pylint: disable=protected-access
session.get(sms._logout_url)
# pylint: disable=protected-access
assert sms._logindata["login_rufnummer"][-7:] + "</a>" in request.text
# pylint: disable=protected-access
assert request.url == sms._kontomanager
def test_empty_message(config, valid_connection):
"""Test error handling for empty message."""
sms = YesssSMS.YesssSMS(LOGIN, YESSS_PASSWD)
with pytest.raises(ValueError):
sms.send(YESSS_TO, "")
with pytest.raises(sms.EmptyMessageError):
sms.send(YESSS_TO, "")
def test_login_error(
config,
):
"""Test error handling of faulty login."""
with requests_mock.Mocker() as m:
sms = YesssSMS.YesssSMS("0000000000", "2d4faa0ea6f55813")
m.register_uri(
"POST",
# pylint: disable=protected-access
sms._login_url,
status_code=200,
text="<strong>Login nicht erfolgreich",
)
m.register_uri(
"GET",
# pylint: disable=protected-access
sms._logout_url,
status_code=200,
)
with pytest.raises(sms.LoginError):
sms.send(YESSS_TO, "test")
def test_login_empty_password_error(
config,
):
"""Test error handling of empty password."""
with pytest.raises(YesssSMS.YesssSMS.MissingLoginCredentialsError):
_ = YesssSMS.YesssSMS("0000000000", None)
def test_login_empty_login_error(invalid_login): # xxxxxx
"""Test error handling of empty login."""
sms = YesssSMS.YesssSMS("", "2d4faa0ea6f55813")
with pytest.raises(sms.LoginError):
sms.send(YESSS_TO, "test")
def test_no_recipient_error(
config,
):
"""Test error handling of no recipient."""
sms = YesssSMS.YesssSMS("0000000000", "2d4faa0ea6f55813")
with pytest.raises(sms.NoRecipientError):
sms.send("", "test")
def test_recipient_not_str_error(
config,
):
"""Test error handling of wrong recipient data type."""
sms = YesssSMS.YesssSMS("0000000000", "2d4faa0ea6f55813")
with pytest.raises(ValueError):
sms.send(176264916361239, "test")
def test_message_sending_error(
config,
):
"""Test handling of status codes other than 200 and 302."""
with requests_mock.Mocker() as m:
sms = YesssSMS.YesssSMS("0000000000", "2d4faa0ea6f55813")
m.register_uri(
"POST",
# pylint: disable=protected-access
sms._login_url,
status_code=302,
# pylint: disable=protected-access
headers={"location": sms._kontomanager},
)
m.register_uri(
"GET",
# pylint: disable=protected-access
sms._kontomanager,
status_code=200,
)
m.register_uri(
"GET",
# pylint: disable=protected-access
sms._sms_form_url,
status_code=200,
text=TEST_FORM_TOKEN_SAMPLE,
)
m.register_uri(
"POST",
# pylint: disable=protected-access
sms._send_sms_url,
status_code=400,
text="<h1>OOOOPS</h1>",
)
m.register_uri(
"GET",
# pylint: disable=protected-access
sms._logout_url,
status_code=200,
)
with pytest.raises(sms.SMSSendingError):
sms.send(YESSS_TO, "test")
def test_unsupported_chars_error(
config,
):
"""Test error handling for unsupported chars."""
with requests_mock.Mocker() as m:
sms = YesssSMS.YesssSMS("0000000000", "2d4faa0ea6f55813")
m.register_uri(
"POST",
# pylint: disable=protected-access
sms._login_url,
status_code=302,
# pylint: disable=protected-access
headers={"location": sms._kontomanager},
)
m.register_uri(
"GET",
# pylint: disable=protected-access
sms._kontomanager,
status_code=200,
)
m.register_uri(
"GET",
# pylint: disable=protected-access
sms._sms_form_url,
status_code=200,
text=TEST_FORM_TOKEN_SAMPLE,
)
m.register_uri(
"POST",
# pylint: disable=protected-access
sms._send_sms_url,
status_code=200,
text=_UNSUPPORTED_CHARS_STRING,
)
m.register_uri(
"GET",
# pylint: disable=protected-access
sms._logout_url,
status_code=200,
)
with pytest.raises(sms.UnsupportedCharsError):
sms.send(YESSS_TO, "test")
def test_sms_sending_error(
config,
):
"""Test error handling for missing success string."""
with requests_mock.Mocker() as m:
sms = YesssSMS.YesssSMS("0000000000", "2d4faa0ea6f55813")
m.register_uri(
"POST",
# pylint: disable=protected-access
sms._login_url,
status_code=302,
# pylint: disable=protected-access
headers={"location": sms._kontomanager},
)
m.register_uri(
"GET",
# pylint: disable=protected-access
sms._kontomanager,
status_code=200,
)
m.register_uri(
"GET",
# pylint: disable=protected-access
sms._sms_form_url,
status_code=200,
text=TEST_FORM_TOKEN_SAMPLE,
)
m.register_uri(
"POST",
# pylint: disable=protected-access
sms._send_sms_url,
status_code=200,
text="some text i'm not looking for",
)
m.register_uri(
"GET",
# pylint: disable=protected-access
sms._logout_url,
status_code=200,
)
with pytest.raises(sms.SMSSendingError):
sms.send(YESSS_TO, "test")
def test_login_suspended_error(
config,
):
"""Test error handling for suspended account."""
with requests_mock.Mocker() as m:
# non existing user and password
sms = YesssSMS.YesssSMS(LOGIN, YESSS_PASSWD)
m.register_uri(
"POST",
# pylint: disable=protected-access
sms._login_url,
status_code=200,
text="<strong>Login nicht erfolgreich bla Wegen "
"3 ungültigen Login-Versuchen ist Ihr Account "
"für eine Stunde gesperrt.",
)
m.register_uri(
"GET",
# pylint: disable=protected-access
sms._logout_url,
status_code=200,
)
assert sms.login_data_valid() is False
# LoginError
with pytest.raises(sms.AccountSuspendedError):
sms.send(YESSS_TO, "test")
assert sms.account_is_suspended() is True
def test_send_sms(
config,
):
"""Test SMS sending."""
with requests_mock.Mocker() as m:
sms = YesssSMS.YesssSMS(LOGIN, YESSS_PASSWD)
m.register_uri(
"POST",
# pylint: disable=protected-access
sms._login_url,
status_code=302,
# pylint: disable=protected-access
headers={"location": sms._kontomanager},
)
m.register_uri(
"GET",
# pylint: disable=protected-access
sms._kontomanager,
status_code=200,
)
m.register_uri(
"GET",
# pylint: disable=protected-access
sms._sms_form_url,
status_code=200,
text=TEST_FORM_TOKEN_SAMPLE,
)
m.register_uri(
"POST",
# pylint: disable=protected-access
sms._send_sms_url,
status_code=200,
text="<h1>Ihre SMS wurde erfolgreich " + "verschickt!</h1>",
)
m.register_uri(
"GET",
# pylint: disable=protected-access
sms._logout_url,
status_code=200,
)
try:
sms.send(
YESSS_TO,
"testing YesssSMS version {}, seems to work! :)".format(sms.version()),
)
except (ValueError, RuntimeError):
pytest.fail("Exception raised while sending SMS")
def test_cli_print_config_file(config, capsys):
"""Test for correct config file output."""
CLI.print_config_file()
captured = capsys.readouterr()
assert captured.out == CONFIG_FILE_CONTENT
def test_cli_version_info(config, capsys):
"""Test for correct version info print."""
CLI.version_info()
captured = capsys.readouterr()
assert captured.out == "yessssms " + VERSION + "\n"
def test_cli_boolean_args(
config,
):
"""Test parser for boolean arguments."""
args = CLI.parse_args(["--version"])
assert args.version is True
args = CLI.parse_args(["--test"])
assert args.test is True
args = CLI.parse_args(["--print-config-file"])
assert args.print_config_file is True
args = CLI.parse_args(["-T"])
assert args.check_login is True
def test_cli_argparse(
config,
):
"""Test parser for different arguments."""
args = CLI.parse_args(["-t", "0664123456"])
assert args.recipient == "0664123456"
args = CLI.parse_args(["--to", "0664123456"])
assert args.recipient == "0664123456"
args = CLI.parse_args(["-l", "0676456789123"])
assert args.login == "0676456789123"
args = CLI.parse_args(["--login", "0676456789123"])
assert args.login == "0676456789123"
args = CLI.parse_args(["-p", "s3cret..11"])
assert args.password == "s3cret..11"
args = CLI.parse_args(["--password", "s3cret..11"])
assert args.password == "s3cret..11"
args = CLI.parse_args(["-c", ".yessssms.config"])
assert args.configfile == ".yessssms.config"
args = CLI.parse_args(["--configfile", ".yessssms.config"])
assert args.configfile == ".yessssms.config"
args = CLI.parse_args(["--message", "testmessage 123 - can you see this?"])
assert args.message == "testmessage 123 - can you see this?"
args = CLI.parse_args(["-m", "testmessage 123 - can you see this?"])
assert args.message == "testmessage 123 - can you see this?"
args = CLI.parse_args(["--mvno", "YESSS"])
assert args.provider == "YESSS"
args = CLI.parse_args(["--mvno", "EDUCOM"])
assert args.provider == "EDUCOM"
args = CLI.parse_args(["--mvno", "SIMfonie"])
assert args.provider == "SIMfonie"
args = CLI.parse_args(["--mvno", "BLABLABLA"])
assert args.provider == "BLABLABLA"
def test_cli_with_test_args(
config,
):
"""Test command line arguments with --test."""
testargs = [
"yessssms",
"--test",
"-l",
"06641234567",
"-p",
"passw0rd",
"-t",
"+43676564736",
]
with mock.patch.object(sys, "argv", testargs):
with requests_mock.Mocker() as m:
m.register_uri(
"POST",
_LOGIN_URL,
status_code=302,
# pylint: disable=protected-access
headers={"location": _KONTOMANAGER_URL},
)
m.register_uri("GET", _KONTOMANAGER_URL, status_code=200)
m.register_uri(
"GET",
# pylint: disable=protected-access
_SMS_FORM_URL,
status_code=200,
text=TEST_FORM_TOKEN_SAMPLE,
)
m.register_uri(
"POST",
_SEND_SMS_URL,
status_code=200,
text="<h1>Ihre SMS wurde erfolgreich " + "verschickt!</h1>",
)
m.register_uri("GET", _LOGOUT_URL, status_code=200)
val = CLI().exit_status
assert val == 0
def test_cli_with_printconfigfile_arg(config, capsys):
"""Test print-config-file parameter."""
testargs = ["yessssms", "--print-config-file"]
with mock.patch.object(sys, "argv", testargs):
CLI()
captured = capsys.readouterr()
assert captured.out == CONFIG_FILE_CONTENT
def test_cli_with_version_arg(config, capsys):
"""Test version cli argument."""
testargs = ["yessssms", "--version"]
with mock.patch.object(sys, "argv", testargs):
CLI()
captured = capsys.readouterr()
assert captured.out == "yessssms " + VERSION + "\n"
def test_cli_with_no_arg(config, capsys):
"""Test handling of no arguments."""
testargs = ["yessssms"]
with mock.patch.object(sys, "argv", testargs):
CLI()
captured = capsys.readouterr()
assert "usage: yessssms " in captured.out
def test_cli_with_test_login_arg(config, capsys):
"""Test check-login argument."""
testargs = ["yessssms", "-m", "test", "-l", "06641234567", "-p", "passw0rd", "-T"]
with mock.patch.object(sys, "argv", testargs):
with requests_mock.Mocker() as m:
m.register_uri(
"POST",
# pylint: disable=protected-access
_LOGIN_URL,
status_code=302,
# pylint: disable=protected-access
headers={"location": _KONTOMANAGER_URL},
)
m.register_uri(
"GET",
# pylint: disable=protected-access
_KONTOMANAGER_URL,
status_code=200,
)
m.register_uri(
"GET",
# pylint: disable=protected-access
_SMS_FORM_URL,
status_code=200,
text=TEST_FORM_TOKEN_SAMPLE,
)
m.register_uri(
"POST",
# pylint: disable=protected-access
_SEND_SMS_URL,
status_code=200,
text="<h1>Ihre SMS wurde erfolgreich " + "verschickt!</h1>",
)
m.register_uri(
"GET",
# pylint: disable=protected-access
_LOGOUT_URL,
status_code=200,
)
val = CLI().exit_status
captured = capsys.readouterr()
assert val == 0
assert captured.out == "ok: login data is valid.\n"
def test_cli_with_invalid_test_login_arg(config, capsys):
"""Test check-login argument."""
testargs = ["yessssms", "-m", "test", "-l", "06641234567", "-p", "passw0rd", "-T"]
with mock.patch.object(sys, "argv", testargs):
with requests_mock.Mocker() as m:
m.register_uri(
"POST",
# pylint: disable=protected-access
_LOGIN_URL,
status_code=200,
text="Bla bla<strong>Login nicht erfolgreichBlaBla",
)
m.register_uri(
"GET",
# pylint: disable=protected-access
_LOGOUT_URL,
status_code=200,
)
val = CLI().exit_status
captured = capsys.readouterr()
assert val == 1
assert "error: login data is NOT valid" in captured.out
@mock.patch("YesssSMS.CLI.CONFIG_FILE_PATHS", [])
def test_cli_with_no_login_or_password(config, capsys, valid_connection):
"""Test empty login parameters."""
testargs = ["yessssms", "-m", "test"] # "-l", "\"\"", "-p", "\"\""]
# print("test:..." + str(YesssSMS.const.CONFIG_FILE_PATHS))
with (mock.patch.object(sys, "argv", testargs)):
with pytest.raises(SystemExit) as wrapped_e:
CLI()
assert wrapped_e.type == SystemExit
assert wrapped_e.value.code == 2
captured = capsys.readouterr()
assert "error: no username or password defined " in captured.out
def test_cli_with_mvno_arg_error(
config,
):
"""Test command line arguments with wrong --mvno."""
from YesssSMS.YesssSMS import YesssSMS
testargs = [
"yessssms",
"--test",
"-l",
"06641234567",
"-p",
"passw0rd",
"-t",
"+43676564736",
"--mvno",
"UNKNOWN_provider",
]
with mock.patch.object(sys, "argv", testargs):
with pytest.raises(YesssSMS.UnsupportedProviderError):
cli_run()
def test_cli_stdin(
config,
):
"""Test command line with stdin."""
from YesssSMS.YesssSMS import MAX_MESSAGE_LENGTH_STDIN
testargs = ["yessssms", "--test", "-l", "06641234567", "-p", "passw0rd", "-m", "-"]
in_message = """Da steh’ ich nun, ich armer Thor!
Und bin so klug als wie zuvor;
Heiße Magister, heiße Doctor gar,
Und ziehe schon an die zehen Jahr,
Herauf, herab und quer und krumm,
Meine Schüler an der Nase herum –
Und sehe, daß wir nichts wissen können!
Das will mir schier das Herz verbrennen.
Zwar bin ich gescheidter als alle die Laffen,
Doctoren, Magister, Schreiber und Pfaffen;
Mich plagen keine Scrupel noch Zweifel,
Fürchte mich weder vor Hölle noch Teufel –
Dafür ist mir auch alle Freud’ entrissen,
Bilde mir nicht ein was rechts zu wissen,
Bilde mir nicht ein, ich könnte was lehren,
Die Menschen zu bessern und zu bekehren.
Auch hab’ ich weder Gut noch Geld,
Noch Ehr’ und Herrlichkeit der Welt.
Es möchte kein Hund so länger leben!
Drum hab’ ich mich der Magie ergeben,
Ob mir durch Geistes Kraft und Mund
Nicht manch Geheimniß würde kund;
Daß ich nicht mehr mit sauerm Schweiß,
Zu sagen brauche, was ich nicht weiß;"""
with mock.patch.object(sys, "argv", testargs):
with mock.patch.object(sys, "stdin", in_message):
with requests_mock.Mocker() as m:
m.register_uri(
"POST",
# pylint: disable=protected-access
_LOGIN_URL,
status_code=302,
# pylint: disable=protected-access
headers={"location": _KONTOMANAGER_URL},
)
m.register_uri(
"GET",
# pylint: disable=protected-access
_KONTOMANAGER_URL,
status_code=200,
)
m.register_uri(
"GET",
# pylint: disable=protected-access
_SMS_FORM_URL,
status_code=200,
text=TEST_FORM_TOKEN_SAMPLE,
)
m.register_uri(
"POST",
# pylint: disable=protected-access
_SEND_SMS_URL,
status_code=200,
text="<h1>Ihre SMS wurde erfolgreich " + "verschickt!</h1>",
)
m.register_uri(
"GET",
# pylint: disable=protected-access
_LOGOUT_URL,
status_code=200,
)
message = CLI().message
assert message.startswith(
"""Da steh’ ich nun, ich armer Thor!
Und bin so klug als wie zuvor;"""
)
assert message == in_message[:MAX_MESSAGE_LENGTH_STDIN]
def test_cli_with_mvno_educom_arg(
config,
):
"""Test command line arguments with --mvno."""
from YesssSMS.const import PROVIDER_URLS
provider = PROVIDER_URLS["EDUCOM".lower()]
login_url = provider["LOGIN_URL"]
logout_url = provider["LOGOUT_URL"]
kontomanager_url = provider["KONTOMANAGER_URL"]
sms_form_url = provider["WEBSMS_FORM_URL"]
send_sms_url = provider["SEND_SMS_URL"]
testargs = [
"yessssms",
"--test",
"-l",
"06641234567",
"-p",
"passw0rd",
"-t",
"+43676564736",
"--mvno",
"EDUCOM",
]
with mock.patch.object(sys, "argv", testargs):
with requests_mock.Mocker() as m:
m.register_uri(
"GET",
# pylint: disable=protected-access
sms_form_url,
status_code=200,
text=TEST_FORM_TOKEN_SAMPLE,
)
m.register_uri(
"POST",
login_url,
status_code=302,
# pylint: disable=protected-access
headers={"location": kontomanager_url},
)
m.register_uri("GET", kontomanager_url, status_code=200)
m.register_uri(
"POST",
send_sms_url,
status_code=200,
text="<h1>Ihre SMS wurde erfolgreich " + "verschickt!</h1>",
)
m.register_uri("GET", logout_url, status_code=200)
sms = CLI().yessssms
assert "educom" == sms._provider
assert login_url == sms._login_url
assert logout_url == sms._logout_url
assert kontomanager_url == sms._kontomanager
assert send_sms_url == sms._send_sms_url
assert sms_form_url == sms._sms_form_url
assert login_url == "https://educom.kontomanager.at/index.php"
assert logout_url == "https://educom.kontomanager.at/index.php?dologout=2"
assert kontomanager_url == "https://educom.kontomanager.at/kundendaten.php"
assert send_sms_url == "https://educom.kontomanager.at/websms_send.php"
assert sms_form_url == "https://educom.kontomanager.at/websms.php"
def test_cli_with_mvno_simfonie_arg(
config,
):
"""Test command line arguments with --mvno."""
from YesssSMS.const import PROVIDER_URLS
provider = PROVIDER_URLS["SIMfonie".lower()]
login_url = provider["LOGIN_URL"]
logout_url = provider["LOGOUT_URL"]
kontomanager_url = provider["KONTOMANAGER_URL"]
sms_form_url = provider["WEBSMS_FORM_URL"]
send_sms_url = provider["SEND_SMS_URL"]
testargs = [
"yessssms",
"--test",
"-l",
"06641234567",
"-p",
"passw0rd",
"-t",
"+43676564736",
"--mvno",
"SIMfonie",
]
with mock.patch.object(sys, "argv", testargs):
with requests_mock.Mocker() as m:
m.register_uri(
"GET",
# pylint: disable=protected-access
sms_form_url,
status_code=200,
text=TEST_FORM_TOKEN_SAMPLE,
)
m.register_uri(
"POST",
login_url,
status_code=302,
# pylint: disable=protected-access
headers={"location": kontomanager_url},
)
m.register_uri("GET", kontomanager_url, status_code=200)
m.register_uri(
"POST",
send_sms_url,
status_code=200,
text="<h1>Ihre SMS wurde erfolgreich " + "verschickt!</h1>",
)
m.register_uri("GET", logout_url, status_code=200)
sms = CLI().yessssms
assert "simfonie" == sms._provider
assert login_url == sms._login_url
assert logout_url == sms._logout_url
assert kontomanager_url == sms._kontomanager
assert send_sms_url == sms._send_sms_url
assert sms_form_url == sms._sms_form_url
assert login_url == "https://simfonie.kontomanager.at/index.php"
assert logout_url == "https://simfonie.kontomanager.at/index.php?dologout=2"
assert (
kontomanager_url == "https://simfonie.kontomanager.at/kundendaten.php"
)
assert sms_form_url == "https://simfonie.kontomanager.at/websms.php"
assert send_sms_url == "https://simfonie.kontomanager.at/websms_send.php"
def test_cli_with_mvno_div_arg(
config,
):
"""Test command line arguments with --mvno."""
from YesssSMS.const import PROVIDER_URLS
all_providers = PROVIDER_URLS.keys()
for provider in all_providers:
current_provider = PROVIDER_URLS[provider.lower()]
login_url = current_provider["LOGIN_URL"]
logout_url = current_provider["LOGOUT_URL"]
kontomanager_url = current_provider["KONTOMANAGER_URL"]
sms_form_url = current_provider["WEBSMS_FORM_URL"]
send_sms_url = current_provider["SEND_SMS_URL"]
testargs = [
"yessssms",
"--test",
"-l",
"06641234567",
"-p",
"passw0rd",
"-t",
"+43676564736",
"--mvno",
provider.upper(),
]
with mock.patch.object(sys, "argv", testargs):
with requests_mock.Mocker() as m:
m.register_uri(
"GET",
# pylint: disable=protected-access
sms_form_url,
status_code=200,
text=TEST_FORM_TOKEN_SAMPLE,
)
m.register_uri(
"POST",
login_url,
status_code=302,
# pylint: disable=protected-access
headers={"location": kontomanager_url},
)
m.register_uri("GET", kontomanager_url, status_code=200)
m.register_uri(
"POST",
send_sms_url,
status_code=200,
text="<h1>Ihre SMS wurde erfolgreich " + "verschickt!</h1>",
)
m.register_uri("GET", logout_url, status_code=200)
cli = CLI()
sms = cli.yessssms
assert provider == sms._provider
assert login_url == sms._login_url
assert logout_url == sms._logout_url
assert kontomanager_url == sms._kontomanager
assert send_sms_url == sms._send_sms_url
assert sms_form_url == sms._sms_form_url
def test_default_config_file_paths(
config,
):
"""Test default config file paths."""
assert "~/.config/yessssms.conf" in CONFIG_FILE_PATHS
assert "/etc/yessssms.conf" in CONFIG_FILE_PATHS
def test_custom_provider_setting(
config,
):
"""Test custom provider setting."""
sms = YesssSMS.YesssSMS(
LOGIN,
YESSS_PASSWD,
custom_provider={
"LOGIN_URL": "https://example.com/login",
"LOGOUT_URL": "https://example.com/logout",
"KONTOMANAGER_URL": "https://example.com/kontomanager",
"WEBSMS_FORM_URL": "https://example.com/send_websms",
"SEND_SMS_URL": "https://example.com/websms",
},
)
assert sms._login_url == "https://example.com/login"
assert sms._logout_url == "https://example.com/logout"
assert sms._kontomanager == "https://example.com/kontomanager"
assert sms._sms_form_url == "https://example.com/send_websms"
assert sms._send_sms_url == "https://example.com/websms"
def test_env_var_settings_set(config, environment_vars_set_wowww):
"""Test setting of environment variables in YesssSMS class."""
sms = YesssSMS.YesssSMS()
assert sms._logindata["login_rufnummer"] == "03211234567"
assert sms._logindata["login_passwort"] == "MySecr3t"
assert sms._provider == "wowww"
os.environ["YESSSSMS_PROVIDER"] = "goood"
sms = YesssSMS.YesssSMS("123456", "password")
assert sms._logindata["login_rufnummer"] == "03211234567"
assert sms._logindata["login_passwort"] == "MySecr3t"
assert sms._provider == "goood"
del os.environ["YESSSSMS_PROVIDER"]
sms = YesssSMS.YesssSMS("123456")
assert sms._logindata["login_rufnummer"] == "03211234567"
assert sms._logindata["login_passwort"] == "MySecr3t"
assert sms._provider == "yesss"
del os.environ["YESSSSMS_LOGIN"]
sms = YesssSMS.YesssSMS("123456", "password")
assert sms._logindata["login_rufnummer"] == "123456"
assert sms._logindata["login_passwort"] == "password"
assert sms._provider == "yesss"
def test_read_no_env_config():
"""Test setting of environment variables in CLI."""
data = ""
with mock.patch(
"configparser.open",
# "builtins.open",
mock.mock_open(read_data=data),
):
testargs = ["yessssms", "-m", "Bilde mir nicht ein was rechts zu wissen"]
with (mock.patch.object(sys, "argv", testargs)):
with pytest.raises(SystemExit) as wrapped_e:
cli = CLI()
assert cli.read_env_config() is None
assert wrapped_e.type == SystemExit
assert wrapped_e.value.code == 2
def test_read_env_config1(valid_wowww_mock_connection, environment_vars_set_wowww):
"""Test setting of environment variables in CLI."""
testargs = ["yessssms", "-m", "Bilde mir nicht ein was rechts zu wissen"]
with (mock.patch.object(sys, "argv", testargs)):
cli = CLI()
(login, passwd, rec, prov, custom_urls) = cli.read_env_config()
assert login == "03211234567"
assert passwd == "MySecr3t"
assert rec == "066356789780"
assert prov == "wowww"
assert custom_urls is None
def test_read_env_config2(config, environment_vars_set_wowww):
"""Test setting of environment variables in CLI."""
sms = YesssSMS.YesssSMS()
assert sms._provider == "wowww"
def test_read_env_config3(config, environment_vars_set):
"""Test setting of environment variables in CLI."""
os.environ["YESSSSMS_PROVIDER"] = "goood"
sms = YesssSMS.YesssSMS()
assert sms._provider == "goood"
def test_read_env_config4(config, environment_vars_set):
"""Test setting of environment variables in CLI."""
del os.environ["YESSSSMS_PROVIDER"]
sms = YesssSMS.YesssSMS()
assert sms._provider == "yesss"
def test_csrf_token_error_1(
config,
valid_connection,
):
"""Test error from csrf token handling."""
with requests_mock.Mocker() as m:
sms = YesssSMS.YesssSMS(
"0000000000",
"2d4faa0ea6f55813",
)
from YesssSMS.const import PROVIDER_URLS
provider = PROVIDER_URLS[sms.get_provider().lower()]
login_url = provider["LOGIN_URL"]
kontomanager_url = provider["KONTOMANAGER_URL"]
send_sms_url = provider["SEND_SMS_URL"]
m.register_uri(
"POST",
login_url,
status_code=302,
# pylint: disable=protected-access
headers={"location": kontomanager_url},
)
m.register_uri("GET", kontomanager_url, status_code=200)
csrf_text = (
"<form action='websms_send.php' name='sms' id='smsform_not_found'"
" method='post' onSubmit=\"return validate()\">"
'<input type="hidden" name="token_not_found" value="'
"f2ca1bb6c7e907d06dafe4687e579fc"
"e76b37e4e93b7605022da52e6ccc26fd2\"><div class='form-group'>"
" <div class='input-row'>"
)
m.register_uri(
"GET",
# pylint: disable=protected-access
sms._sms_form_url,
status_code=500,
text=csrf_text,
)
m.register_uri(
"POST",
send_sms_url,
status_code=200,
text="<h1>Ihre SMS wurde erfolgreich " + "verschickt!</h1>",
)
m.register_uri(
"GET",
# pylint: disable=protected-access
sms._logout_url,
status_code=200,
)
with pytest.raises(sms.SMSSendingError) as ex:
sms.send(YESSS_TO, "test")
assert str(ex).startswith(
"<ExceptionInfo SMSSendingError('YesssSMS: could not get token (1)"
)
def test_csrf_token_error_2(
config,
valid_connection,
):
"""Test error from csrf token handling."""
with requests_mock.Mocker() as m:
sms = YesssSMS.YesssSMS(
"0000000000",
"2d4faa0ea6f55813",
)
from YesssSMS.const import PROVIDER_URLS
provider = PROVIDER_URLS[sms.get_provider().lower()]
login_url = provider["LOGIN_URL"]
kontomanager_url = provider["KONTOMANAGER_URL"]
send_sms_url = provider["SEND_SMS_URL"]
m.register_uri(
"POST",
login_url,
status_code=302,
# pylint: disable=protected-access
headers={"location": kontomanager_url},
)
m.register_uri("GET", kontomanager_url, status_code=200)
csrf_text = (
"<form action='websms_send.php' name='sms' id='smsform_not_found'"
" method='post' onSubmit=\"return validate()\">"
'<input type="hidden" name="token_not_found" value="'
"f2ca1bb6c7e907d06dafe4687e579fc"
"e76b37e4e93b7605022da52e6ccc26fd2\"><div class='form-group'>"
" <div class='input-row'>"
)
m.register_uri(
"GET",
# pylint: disable=protected-access
sms._sms_form_url,
status_code=200,
text=csrf_text,
)
m.register_uri(
"POST",
send_sms_url,
status_code=200,
text="<h1>Ihre SMS wurde erfolgreich " + "verschickt!</h1>",
)
m.register_uri(
"GET",
# pylint: disable=protected-access
sms._logout_url,
status_code=200,
)
with pytest.raises(sms.SMSSendingError) as ex:
sms.send(YESSS_TO, "test")
assert str(ex).startswith(
'<ExceptionInfo SMSSendingError("YesssSMS: could not get token (2):'
)
# assert str(ex).startswith("YesssSMS: could not get token (2)")
def test_csrf_token_error_3(
config,
valid_connection,
):
"""Test error from csrf token handling."""
with requests_mock.Mocker() as m:
sms = YesssSMS.YesssSMS(
"0000000000",
"2d4faa0ea6f55813",
)
from YesssSMS.const import PROVIDER_URLS
provider = PROVIDER_URLS[sms.get_provider().lower()]
login_url = provider["LOGIN_URL"]
kontomanager_url = provider["KONTOMANAGER_URL"]
send_sms_url = provider["SEND_SMS_URL"]
m.register_uri(
"POST",
login_url,
status_code=302,
# pylint: disable=protected-access
headers={"location": kontomanager_url},
)
m.register_uri("GET", kontomanager_url, status_code=200)
csrf_text = (
"<form action='websms_send.php' name='sms' id='smsform'"
" method='post' onSubmit=\"return validate()\">"
'<input type="hidden" name="token" value="'
"\"><div class='form-group'>"
" <div class='input-row'>"
)
m.register_uri(
"GET",
# pylint: disable=protected-access
sms._sms_form_url,
status_code=200,
text=csrf_text,
)
m.register_uri(
"POST",
send_sms_url,
status_code=200,
text="<h1>Ihre SMS wurde erfolgreich " + "verschickt!</h1>",
)
m.register_uri(
"GET",
# pylint: disable=protected-access
sms._logout_url,
status_code=200,
)
with pytest.raises(sms.SMSSendingError) as ex:
sms.send(YESSS_TO, "test")
assert str(ex).startswith(
"<ExceptionInfo SMSSendingError('YesssSMS: could not get token (3)'"
)
|
[] |
[] |
[
"YESSSSMS_PROVIDER",
"YESSSSMS_RECIPIENT",
"YESSSSMS_PASSWD",
"YESSSSMS_LOGIN"
] |
[]
|
["YESSSSMS_PROVIDER", "YESSSSMS_RECIPIENT", "YESSSSMS_PASSWD", "YESSSSMS_LOGIN"]
|
python
| 4 | 0 | |
appdata_test.go
|
// Copyright (c) 2013-2014 The btcsuite developers
// Copyright (c) 2018 The Flo developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package floutil_test
import (
"os"
"os/user"
"path/filepath"
"runtime"
"testing"
"unicode"
"github.com/bitspill/floutil"
)
// TestAppDataDir tests the API for AppDataDir to ensure it gives expected
// results for various operating systems.
func TestAppDataDir(t *testing.T) {
// App name plus upper and lowercase variants.
appName := "myapp"
appNameUpper := string(unicode.ToUpper(rune(appName[0]))) + appName[1:]
appNameLower := string(unicode.ToLower(rune(appName[0]))) + appName[1:]
// When we're on Windows, set the expected local and roaming directories
// per the environment vars. When we aren't on Windows, the function
// should return the current directory when forced to provide the
// Windows path since the environment variables won't exist.
winLocal := "."
winRoaming := "."
if runtime.GOOS == "windows" {
localAppData := os.Getenv("LOCALAPPDATA")
roamingAppData := os.Getenv("APPDATA")
if localAppData == "" {
localAppData = roamingAppData
}
winLocal = filepath.Join(localAppData, appNameUpper)
winRoaming = filepath.Join(roamingAppData, appNameUpper)
}
// Get the home directory to use for testing expected results.
var homeDir string
usr, err := user.Current()
if err != nil {
t.Errorf("user.Current: %v", err)
return
}
homeDir = usr.HomeDir
// Mac app data directory.
macAppData := filepath.Join(homeDir, "Library", "Application Support")
tests := []struct {
goos string
appName string
roaming bool
want string
}{
// Various combinations of application name casing, leading
// period, operating system, and roaming flags.
{"windows", appNameLower, false, winLocal},
{"windows", appNameUpper, false, winLocal},
{"windows", "." + appNameLower, false, winLocal},
{"windows", "." + appNameUpper, false, winLocal},
{"windows", appNameLower, true, winRoaming},
{"windows", appNameUpper, true, winRoaming},
{"windows", "." + appNameLower, true, winRoaming},
{"windows", "." + appNameUpper, true, winRoaming},
{"linux", appNameLower, false, filepath.Join(homeDir, "."+appNameLower)},
{"linux", appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)},
{"linux", "." + appNameLower, false, filepath.Join(homeDir, "."+appNameLower)},
{"linux", "." + appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)},
{"darwin", appNameLower, false, filepath.Join(macAppData, appNameUpper)},
{"darwin", appNameUpper, false, filepath.Join(macAppData, appNameUpper)},
{"darwin", "." + appNameLower, false, filepath.Join(macAppData, appNameUpper)},
{"darwin", "." + appNameUpper, false, filepath.Join(macAppData, appNameUpper)},
{"openbsd", appNameLower, false, filepath.Join(homeDir, "."+appNameLower)},
{"openbsd", appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)},
{"openbsd", "." + appNameLower, false, filepath.Join(homeDir, "."+appNameLower)},
{"openbsd", "." + appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)},
{"freebsd", appNameLower, false, filepath.Join(homeDir, "."+appNameLower)},
{"freebsd", appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)},
{"freebsd", "." + appNameLower, false, filepath.Join(homeDir, "."+appNameLower)},
{"freebsd", "." + appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)},
{"netbsd", appNameLower, false, filepath.Join(homeDir, "."+appNameLower)},
{"netbsd", appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)},
{"netbsd", "." + appNameLower, false, filepath.Join(homeDir, "."+appNameLower)},
{"netbsd", "." + appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)},
{"plan9", appNameLower, false, filepath.Join(homeDir, appNameLower)},
{"plan9", appNameUpper, false, filepath.Join(homeDir, appNameLower)},
{"plan9", "." + appNameLower, false, filepath.Join(homeDir, appNameLower)},
{"plan9", "." + appNameUpper, false, filepath.Join(homeDir, appNameLower)},
{"unrecognized", appNameLower, false, filepath.Join(homeDir, "."+appNameLower)},
{"unrecognized", appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)},
{"unrecognized", "." + appNameLower, false, filepath.Join(homeDir, "."+appNameLower)},
{"unrecognized", "." + appNameUpper, false, filepath.Join(homeDir, "."+appNameLower)},
// No application name provided, so expect current directory.
{"windows", "", false, "."},
{"windows", "", true, "."},
{"linux", "", false, "."},
{"darwin", "", false, "."},
{"openbsd", "", false, "."},
{"freebsd", "", false, "."},
{"netbsd", "", false, "."},
{"plan9", "", false, "."},
{"unrecognized", "", false, "."},
// Single dot provided for application name, so expect current
// directory.
{"windows", ".", false, "."},
{"windows", ".", true, "."},
{"linux", ".", false, "."},
{"darwin", ".", false, "."},
{"openbsd", ".", false, "."},
{"freebsd", ".", false, "."},
{"netbsd", ".", false, "."},
{"plan9", ".", false, "."},
{"unrecognized", ".", false, "."},
}
t.Logf("Running %d tests", len(tests))
for i, test := range tests {
ret := floutil.TstAppDataDir(test.goos, test.appName, test.roaming)
if ret != test.want {
t.Errorf("appDataDir #%d (%s) does not match - "+
"expected got %s, want %s", i, test.goos, ret,
test.want)
continue
}
}
}
|
[
"\"LOCALAPPDATA\"",
"\"APPDATA\""
] |
[] |
[
"APPDATA",
"LOCALAPPDATA"
] |
[]
|
["APPDATA", "LOCALAPPDATA"]
|
go
| 2 | 0 | |
cmd/gravity-exporter-stan/gravity-exporter-stan.go
|
package main
import (
"fmt"
"os"
"strings"
log "github.com/sirupsen/logrus"
"github.com/spf13/viper"
app "github.com/BrobridgeOrg/gravity-exporter-stan/pkg/app/instance"
)
func init() {
debugLevel := log.InfoLevel
switch os.Getenv("GRAVITY_DEBUG") {
case log.TraceLevel.String():
debugLevel = log.TraceLevel
case log.DebugLevel.String():
debugLevel = log.DebugLevel
case log.ErrorLevel.String():
debugLevel = log.ErrorLevel
}
log.SetLevel(debugLevel)
fmt.Printf("Debug level is set to \"%s\"\n", debugLevel.String())
// From the environment
viper.SetEnvPrefix("GRAVITY_EXPORTER_STAN")
viper.SetEnvKeyReplacer(strings.NewReplacer(".", "_"))
viper.AutomaticEnv()
// From config file
viper.SetConfigName("config")
viper.AddConfigPath("./")
viper.AddConfigPath("./configs")
if err := viper.ReadInConfig(); err != nil {
log.Warn("No configuration file was loaded")
}
}
func main() {
// Initializing application
a := app.NewAppInstance()
err := a.Init()
if err != nil {
log.Fatal(err)
return
}
// Starting application
err = a.Run()
if err != nil {
log.Fatal(err)
return
}
}
|
[
"\"GRAVITY_DEBUG\""
] |
[] |
[
"GRAVITY_DEBUG"
] |
[]
|
["GRAVITY_DEBUG"]
|
go
| 1 | 0 | |
python-micro-service-master/.venv/lib/site-packages/winpty/ptyprocess.py
|
# -*- coding: utf-8 -*-
# Standard library imports
import codecs
import os
import shlex
import signal
import socket
import subprocess
import threading
import time
try:
from shutil import which
except ImportError:
from backports.shutil_which import which
# Local imports
from .winpty_wrapper import PTY, PY2
class PtyProcess(object):
"""This class represents a process running in a pseudoterminal.
The main constructor is the :meth:`spawn` classmethod.
"""
def __init__(self, pty):
assert isinstance(pty, PTY)
self.pty = pty
self.pid = pty.pid
self.read_blocking = bool(os.environ.get('PYWINPTY_BLOCK', 1))
self.closed = False
self.flag_eof = False
self.decoder = codecs.getincrementaldecoder('utf-8')(errors='strict')
# Used by terminate() to give kernel time to update process status.
# Time in seconds.
self.delayafterterminate = 0.1
# Used by close() to give kernel time to update process status.
# Time in seconds.
self.delayafterclose = 0.1
# Set up our file reader sockets.
self._server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._server.bind(("10.0.0.7", 0))
address = self._server.getsockname()
self._server.listen(1)
# Read from the pty in a thread.
self._thread = threading.Thread(target=_read_in_thread,
args=(address, self.pty, self.read_blocking))
self._thread.setDaemon(True)
self._thread.start()
self.fileobj, _ = self._server.accept()
self.fd = self.fileobj.fileno()
@classmethod
def spawn(cls, argv, cwd=None, env=None, dimensions=(24, 80)):
"""Start the given command in a child process in a pseudo terminal.
This does all the setting up the pty, and returns an instance of
PtyProcess.
Dimensions of the psuedoterminal used for the subprocess can be
specified as a tuple (rows, cols), or the default (24, 80) will be
used.
"""
if isinstance(argv, str):
argv = shlex.split(argv, posix=False)
if not isinstance(argv, (list, tuple)):
raise TypeError("Expected a list or tuple for argv, got %r" % argv)
# Shallow copy of argv so we can modify it
argv = argv[:]
command = argv[0]
env = env or os.environ
path = env.get('PATH', os.defpath)
command_with_path = which(command, path=path)
if command_with_path is None:
raise FileNotFoundError(
'The command was not found or was not ' +
'executable: %s.' % command
)
command = command_with_path
argv[0] = command
cmdline = ' ' + subprocess.list2cmdline(argv[1:])
cwd = cwd or os.getcwd()
proc = PTY(dimensions[1], dimensions[0])
# Create the environemnt string.
envStrs = []
for (key, value) in env.items():
envStrs.append('%s=%s' % (key, value))
env = '\0'.join(envStrs) + '\0'
if PY2:
command = _unicode(command)
cwd = _unicode(cwd)
cmdline = _unicode(cmdline)
env = _unicode(env)
if len(argv) == 1:
proc.spawn(command, cwd=cwd, env=env)
else:
proc.spawn(command, cwd=cwd, env=env, cmdline=cmdline)
inst = cls(proc)
inst._winsize = dimensions
# Set some informational attributes
inst.argv = argv
if env is not None:
inst.env = env
if cwd is not None:
inst.launch_dir = cwd
return inst
@property
def exitstatus(self):
"""The exit status of the process.
"""
return self.pty.exitstatus
def fileno(self):
"""This returns the file descriptor of the pty for the child.
"""
return self.fd
def close(self, force=False):
"""This closes the connection with the child application. Note that
calling close() more than once is valid. This emulates standard Python
behavior with files. Set force to True if you want to make sure that
the child is terminated (SIGKILL is sent if the child ignores
SIGINT)."""
if not self.closed:
self.pty.close()
self.fileobj.close()
self._server.close()
# Give kernel time to update process status.
time.sleep(self.delayafterclose)
if self.isalive():
if not self.terminate(force):
raise IOError('Could not terminate the child.')
self.fd = -1
self.closed = True
del self.pty
self.pty = None
def __del__(self):
"""This makes sure that no system resources are left open. Python only
garbage collects Python objects. OS file descriptors are not Python
objects, so they must be handled explicitly. If the child file
descriptor was opened outside of this class (passed to the constructor)
then this does not close it.
"""
# It is possible for __del__ methods to execute during the
# teardown of the Python VM itself. Thus self.close() may
# trigger an exception because os.close may be None.
try:
self.close()
except Exception:
pass
def flush(self):
"""This does nothing. It is here to support the interface for a
File-like object. """
pass
def isatty(self):
"""This returns True if the file descriptor is open and connected to a
tty(-like) device, else False."""
return self.isalive()
def read(self, size=1024):
"""Read and return at most ``size`` characters from the pty.
Can block if there is nothing to read. Raises :exc:`EOFError` if the
terminal was closed.
"""
data = self.fileobj.recv(size)
if not data:
self.flag_eof = True
raise EOFError('Pty is closed')
return self.decoder.decode(data, final=False)
def readline(self):
"""Read one line from the pseudoterminal as bytes.
Can block if there is nothing to read. Raises :exc:`EOFError` if the
terminal was closed.
"""
buf = []
while 1:
try:
ch = self.read(1)
except EOFError:
return ''.join(buf)
buf.append(ch)
if ch == '\n':
return ''.join(buf)
def write(self, s):
"""Write the string ``s`` to the pseudoterminal.
Returns the number of bytes written.
"""
if not self.isalive():
raise EOFError('Pty is closed')
if PY2:
s = _unicode(s)
success, nbytes = self.pty.write(s)
if not success:
raise IOError('Write failed')
return nbytes
def terminate(self, force=False):
"""This forces a child process to terminate."""
if not self.isalive():
return True
self.kill(signal.SIGINT)
time.sleep(self.delayafterterminate)
if not self.isalive():
return True
if force:
self.kill(signal.SIGKILL)
time.sleep(self.delayafterterminate)
if not self.isalive():
return True
else:
return False
def wait(self):
"""This waits until the child exits. This is a blocking call. This will
not read any data from the child.
"""
while self.isalive():
time.sleep(0.1)
return self.exitstatus
def isalive(self):
"""This tests if the child process is running or not. This is
non-blocking. If the child was terminated then this will read the
exitstatus or signalstatus of the child. This returns True if the child
process appears to be running or False if not.
"""
return self.pty and self.pty.isalive()
def kill(self, sig=None):
"""Kill the process with the given signal.
"""
os.kill(self.pid, sig)
def sendcontrol(self, char):
'''Helper method that wraps send() with mnemonic access for sending control
character to the child (such as Ctrl-C or Ctrl-D). For example, to send
Ctrl-G (ASCII 7, bell, '\a')::
child.sendcontrol('g')
See also, sendintr() and sendeof().
'''
char = char.lower()
a = ord(char)
if 97 <= a <= 122:
a = a - ord('a') + 1
byte = bytes([a])
return self.pty.write(byte.decode('utf-8')), byte
d = {'@': 0, '`': 0,
'[': 27, '{': 27,
'\\': 28, '|': 28,
']': 29, '}': 29,
'^': 30, '~': 30,
'_': 31,
'?': 127}
if char not in d:
return 0, b''
byte = bytes([d[char]])
return self.pty.write(byte.decode('utf-8')), byte
def sendeof(self):
"""This sends an EOF to the child. This sends a character which causes
the pending parent output buffer to be sent to the waiting child
program without waiting for end-of-line. If it is the first character
of the line, the read() in the user program returns 0, which signifies
end-of-file. This means to work as expected a sendeof() has to be
called at the beginning of a line. This method does not send a newline.
It is the responsibility of the caller to ensure the eof is sent at the
beginning of a line."""
# Send control character 4 (Ctrl-D)
self.pty.write('\x04'), '\x04'
def sendintr(self):
"""This sends a SIGINT to the child. It does not require
the SIGINT to be the first character on a line. """
# Send control character 3 (Ctrl-C)
self.pty.write('\x03'), '\x03'
def eof(self):
"""This returns True if the EOF exception was ever raised.
"""
return self.flag_eof
def getwinsize(self):
"""Return the window size of the pseudoterminal as a tuple (rows, cols).
"""
return self._winsize
def setwinsize(self, rows, cols):
"""Set the terminal window size of the child tty.
"""
self._winsize = (rows, cols)
self.pty.set_size(cols, rows)
def _read_in_thread(address, pty, blocking):
"""Read data from the pty in a thread.
"""
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect(address)
while 1:
data = pty.read(4096, blocking=blocking)
if not data and not pty.isalive():
while not data and not pty.iseof():
data += pty.read(4096, blocking=blocking)
if not data:
try:
client.send(b'')
except socket.error:
pass
break
try:
client.send(data)
except socket.error:
break
client.close()
def _unicode(s):
"""Ensure that a string is Unicode on Python 2.
"""
if isinstance(s, unicode): # noqa E891
return s
return s.decode('utf-8')
|
[] |
[] |
[
"PYWINPTY_BLOCK"
] |
[]
|
["PYWINPTY_BLOCK"]
|
python
| 1 | 0 | |
main.go
|
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"context"
"flag"
"fmt"
"os"
"runtime"
"strings"
"github.com/spf13/pflag"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
k8sruntime "k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/cache"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/webhook"
otelv1alpha1 "github.com/open-telemetry/opentelemetry-operator/apis/v1alpha1"
"github.com/open-telemetry/opentelemetry-operator/controllers"
"github.com/open-telemetry/opentelemetry-operator/internal/config"
"github.com/open-telemetry/opentelemetry-operator/internal/version"
"github.com/open-telemetry/opentelemetry-operator/internal/webhookhandler"
"github.com/open-telemetry/opentelemetry-operator/pkg/autodetect"
collectorupgrade "github.com/open-telemetry/opentelemetry-operator/pkg/collector/upgrade"
"github.com/open-telemetry/opentelemetry-operator/pkg/instrumentation"
instrumentationupgrade "github.com/open-telemetry/opentelemetry-operator/pkg/instrumentation/upgrade"
"github.com/open-telemetry/opentelemetry-operator/pkg/sidecar"
// +kubebuilder:scaffold:imports
)
var (
scheme = k8sruntime.NewScheme()
setupLog = ctrl.Log.WithName("setup")
)
func init() {
utilruntime.Must(clientgoscheme.AddToScheme(scheme))
utilruntime.Must(otelv1alpha1.AddToScheme(scheme))
// +kubebuilder:scaffold:scheme
}
func main() {
// registers any flags that underlying libraries might use
opts := zap.Options{}
opts.BindFlags(flag.CommandLine)
pflag.CommandLine.AddGoFlagSet(flag.CommandLine)
v := version.Get()
// add flags related to this operator
var metricsAddr string
var enableLeaderElection bool
var collectorImage string
var targetAllocatorImage string
var autoInstrumentationJava string
pflag.StringVar(&metricsAddr, "metrics-addr", ":8080", "The address the metric endpoint binds to.")
pflag.BoolVar(&enableLeaderElection, "enable-leader-election", false,
"Enable leader election for controller manager. "+
"Enabling this will ensure there is only one active controller manager.")
pflag.StringVar(&collectorImage, "collector-image", fmt.Sprintf("otel/opentelemetry-collector:%s", v.OpenTelemetryCollector), "The default OpenTelemetry collector image. This image is used when no image is specified in the CustomResource.")
pflag.StringVar(&targetAllocatorImage, "target-allocator-image", fmt.Sprintf("quay.io/opentelemetry/target-allocator:%s", v.TargetAllocator), "The default OpenTelemetry target allocator image. This image is used when no image is specified in the CustomResource.")
pflag.StringVar(&autoInstrumentationJava, "auto-instrumentation-java-image", fmt.Sprintf("ghcr.io/open-telemetry/opentelemetry-operator/autoinstrumentation-java:%s", v.JavaAutoInstrumentation), "The default OpenTelemetry Java instrumentation image. This image is used when no image is specified in the CustomResource.")
logger := zap.New(zap.UseFlagOptions(&opts))
ctrl.SetLogger(logger)
logger.Info("Starting the OpenTelemetry Operator",
"opentelemetry-operator", v.Operator,
"opentelemetry-collector", collectorImage,
"opentelemetry-targetallocator", targetAllocatorImage,
"auto-instrumentation-java", autoInstrumentationJava,
"build-date", v.BuildDate,
"go-version", v.Go,
"go-arch", runtime.GOARCH,
"go-os", runtime.GOOS,
)
restConfig := ctrl.GetConfigOrDie()
// builds the operator's configuration
ad, err := autodetect.New(restConfig)
if err != nil {
setupLog.Error(err, "failed to setup auto-detect routine")
os.Exit(1)
}
cfg := config.New(
config.WithLogger(ctrl.Log.WithName("config")),
config.WithVersion(v),
config.WithCollectorImage(collectorImage),
config.WithTargetAllocatorImage(targetAllocatorImage),
config.WithAutoDetect(ad),
)
pflag.CommandLine.AddFlagSet(cfg.FlagSet())
pflag.Parse()
watchNamespace, found := os.LookupEnv("WATCH_NAMESPACE")
if found {
setupLog.Info("watching namespace(s)", "namespaces", watchNamespace)
} else {
setupLog.Info("the env var WATCH_NAMESPACE isn't set, watching all namespaces")
}
mgrOptions := ctrl.Options{
Scheme: scheme,
MetricsBindAddress: metricsAddr,
Port: 9443,
LeaderElection: enableLeaderElection,
LeaderElectionID: "9f7554c3.opentelemetry.io",
Namespace: watchNamespace,
}
if strings.Contains(watchNamespace, ",") {
mgrOptions.Namespace = ""
mgrOptions.NewCache = cache.MultiNamespacedCacheBuilder(strings.Split(watchNamespace, ","))
}
mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), mgrOptions)
if err != nil {
setupLog.Error(err, "unable to start manager")
os.Exit(1)
}
// run the auto-detect mechanism for the configuration
err = mgr.Add(manager.RunnableFunc(func(_ context.Context) error {
return cfg.StartAutoDetect()
}))
if err != nil {
setupLog.Error(err, "failed to start the auto-detect mechanism")
}
// adds the upgrade mechanism to be executed once the manager is ready
err = mgr.Add(manager.RunnableFunc(func(c context.Context) error {
return collectorupgrade.ManagedInstances(c, ctrl.Log.WithName("collector-upgrade"), v, mgr.GetClient())
}))
if err != nil {
setupLog.Error(err, "failed to upgrade managed instances")
}
// adds the upgrade mechanism to be executed once the manager is ready
err = mgr.Add(manager.RunnableFunc(func(c context.Context) error {
u := &instrumentationupgrade.InstrumentationUpgrade{
Logger: ctrl.Log.WithName("instrumentation-upgrade"),
DefaultAutoInstrJava: autoInstrumentationJava,
Client: mgr.GetClient(),
}
return u.ManagedInstances(c)
}))
if err != nil {
setupLog.Error(err, "failed to upgrade managed instances")
}
if err = controllers.NewReconciler(controllers.Params{
Client: mgr.GetClient(),
Log: ctrl.Log.WithName("controllers").WithName("OpenTelemetryCollector"),
Scheme: mgr.GetScheme(),
Config: cfg,
Recorder: mgr.GetEventRecorderFor("opentelemetry-operator"),
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "OpenTelemetryCollector")
os.Exit(1)
}
if os.Getenv("ENABLE_WEBHOOKS") != "false" {
if err = (&otelv1alpha1.OpenTelemetryCollector{}).SetupWebhookWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create webhook", "webhook", "OpenTelemetryCollector")
os.Exit(1)
}
if err = (&otelv1alpha1.Instrumentation{
ObjectMeta: metav1.ObjectMeta{
Annotations: map[string]string{otelv1alpha1.AnnotationDefaultAutoInstrumentationJava: autoInstrumentationJava},
},
}).SetupWebhookWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create webhook", "webhook", "Instrumentation")
os.Exit(1)
}
mgr.GetWebhookServer().Register("/mutate-v1-pod", &webhook.Admission{
Handler: webhookhandler.NewWebhookHandler(cfg, ctrl.Log.WithName("pod-webhook"), mgr.GetClient(),
[]webhookhandler.PodMutator{
sidecar.NewMutator(logger, cfg, mgr.GetClient()),
instrumentation.NewMutator(logger, mgr.GetClient()),
}),
})
}
// +kubebuilder:scaffold:builder
setupLog.Info("starting manager")
if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil {
setupLog.Error(err, "problem running manager")
os.Exit(1)
}
}
|
[
"\"ENABLE_WEBHOOKS\""
] |
[] |
[
"ENABLE_WEBHOOKS"
] |
[]
|
["ENABLE_WEBHOOKS"]
|
go
| 1 | 0 | |
golang/revdial/vendor/github.com/golang/build/vcs-test/vcweb/main.go
|
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"crypto/tls"
"flag"
"fmt"
"html"
"log"
"net"
"net/http"
"os"
"path/filepath"
"regexp"
"sort"
"strings"
"text/tabwriter"
"time"
"github.com/coreos/go-systemd/activation"
"github.com/coreos/go-systemd/daemon"
"golang.org/x/build/autocertcache"
"golang.org/x/crypto/acme"
"golang.org/x/crypto/acme/autocert"
)
var (
dir = flag.String("d", "/tmp/vcweb", "directory holding vcweb data")
staging = flag.Bool("staging", false, "use staging letsencrypt server")
)
var buildInfo string
func usage() {
fmt.Fprintf(os.Stderr, "usage: vcsweb [-d dir] [-staging]\n")
os.Exit(2)
}
var isLoadDir = map[string]bool{
"go": true,
"git": true,
"hg": true,
"svn": true,
"fossil": true,
"bzr": true,
}
func main() {
flag.Usage = usage
flag.Parse()
if flag.NArg() != 0 {
usage()
}
if err := os.MkdirAll(*dir, 0777); err != nil {
log.Fatal(err)
}
http.Handle("/go/", http.StripPrefix("/go/", http.FileServer(http.Dir(filepath.Join(*dir, "go")))))
http.Handle("/git/", gitHandler())
http.Handle("/hg/", hgHandler())
http.Handle("/svn/", svnHandler())
http.Handle("/fossil/", fossilHandler())
http.Handle("/bzr/", bzrHandler())
handler := logger(http.HandlerFunc(loadAndHandle))
// If running under systemd, listen on 80 and 443 and serve TLS.
if listeners, _ := activation.Listeners(true); len(listeners) == 2 {
// Want listeners[0] is port 80, listeners[1] is port 443.
// There's no guaranteed order of the listeners!
// Sometimes we get 80, 443; other times we get 443, 80.
names := strings.Split(os.Getenv("LISTEN_FDNAMES"), ":")
if strings.Contains(names[0], "https") {
listeners[0], listeners[1] = listeners[1], listeners[0]
}
go func() {
log.Fatal(http.Serve(listeners[0], handler))
}()
dir := acme.LetsEncryptURL
if *staging {
dir = "https://acme-staging.api.letsencrypt.org/directory"
}
m := autocert.Manager{
Client: &acme.Client{DirectoryURL: dir},
Cache: autocertcache.NewGoogleCloudStorageCache(client, "vcs-test-autocert"),
Prompt: autocert.AcceptTOS,
HostPolicy: autocert.HostWhitelist("vcs-test.golang.org"),
}
mRSA := autocert.Manager{
Client: &acme.Client{DirectoryURL: dir},
Cache: autocertcache.NewGoogleCloudStorageCache(client, "vcs-test-autocert-rsa"),
Prompt: autocert.AcceptTOS,
HostPolicy: autocert.HostWhitelist("vcs-test.golang.org"),
ForceRSA: true,
}
s := &http.Server{
Addr: ":https",
Handler: handler,
TLSConfig: &tls.Config{
MinVersion: tls.VersionSSL30,
GetCertificate: fallbackSNI(mRSA.GetCertificate, m.GetCertificate, "vcs-test.golang.org"),
},
}
dt, err := daemon.SdWatchdogEnabled(true)
if err != nil {
log.Fatal(err)
}
daemon.SdNotify(false, "READY=1")
go func() {
for range time.NewTicker(dt / 2).C {
daemon.SdNotify(false, "WATCHDOG=1")
}
}()
log.Fatal(s.ServeTLS(listeners[1], "", ""))
}
// Local development on :8088.
l, err := net.Listen("tcp", "127.0.0.1:8088")
if err != nil {
log.Fatal(err)
}
log.Fatal(http.Serve(l, handler))
}
var nameRE = regexp.MustCompile(`^[a-zA-Z0-9_\-]+$`)
func loadAndHandle(w http.ResponseWriter, r *http.Request) {
if r.URL.Path == "/tls" {
handleTLS(w, r)
return
}
addTLSLog(w, r)
if r.URL.Path == "/" {
overview(w, r)
return
}
elem := strings.Split(r.URL.Path, "/")
if len(elem) >= 3 && elem[0] == "" && isLoadDir[elem[1]] && nameRE.MatchString(elem[2]) {
loadFS(elem[1], elem[2], r.URL.Query().Get("vcweb-force-reload") == "1" || r.URL.Query().Get("go-get") == "1")
}
http.DefaultServeMux.ServeHTTP(w, r)
}
func overview(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, "<html>\n")
fmt.Fprintf(w, "<title>vcs-test.golang.org</title>\n<pre>\n")
fmt.Fprintf(w, "<b>vcs-test.golang.org</b>\n\n")
fmt.Fprintf(w, "This server serves various version control repos for testing the go command.\n\n")
fmt.Fprintf(w, "Date: %s\n", time.Now().Format(time.UnixDate))
fmt.Fprintf(w, "Build: %s\n\n", html.EscapeString(buildInfo))
fmt.Fprintf(w, "<b>cache</b>\n")
var all []string
cache.Lock()
for name, entry := range cache.entry {
all = append(all, fmt.Sprintf("%s\t%x\t%s\n", name, entry.md5, entry.expire.Format(time.UnixDate)))
}
cache.Unlock()
sort.Strings(all)
tw := tabwriter.NewWriter(w, 1, 8, 1, '\t', 0)
for _, line := range all {
tw.Write([]byte(line))
}
tw.Flush()
}
func fallbackSNI(getCertRSA, getCert func(*tls.ClientHelloInfo) (*tls.Certificate, error), host string) func(*tls.ClientHelloInfo) (*tls.Certificate, error) {
return func(hello *tls.ClientHelloInfo) (*tls.Certificate, error) {
saveHello(hello)
if hello.ServerName == "" {
h := *hello
hello = &h
hello.ServerName = host
}
var cert *tls.Certificate
var err error
if len(hello.SupportedVersions) > 0 && hello.SupportedVersions[0] >= tls.VersionTLS12 {
cert, err = getCert(hello)
if strings.HasSuffix(hello.ServerName, ".acme.invalid") && err != nil {
cert, err = getCertRSA(hello)
}
} else {
cert, err = getCertRSA(hello)
}
if err != nil {
fmt.Fprintf(os.Stderr, "getCert: %v\n", err)
}
return cert, err
}
}
type loggingResponseWriter struct {
code int
size int64
http.ResponseWriter
}
func (l *loggingResponseWriter) WriteHeader(code int) {
l.code = code
l.ResponseWriter.WriteHeader(code)
}
func (l *loggingResponseWriter) Write(data []byte) (int, error) {
n, err := l.ResponseWriter.Write(data)
l.size += int64(n)
return n, err
}
func dashOr(s string) string {
if s == "" {
return "-"
}
return s
}
func logger(h http.Handler) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
l := &loggingResponseWriter{
code: 200,
ResponseWriter: w,
}
startTime := time.Now().Format("02/Jan/2006:15:04:05 -0700")
defer func() {
err := recover()
if err != nil {
l.code = 999
}
fmt.Fprintf(os.Stderr, "%s - - [%s] %q %03d %d %q %q %q\n",
dashOr(r.RemoteAddr),
startTime,
r.Method+" "+r.URL.String()+" "+r.Proto,
l.code,
l.size,
r.Header.Get("Referer"),
r.Header.Get("User-Agent"),
r.Host)
if err != nil {
panic(err)
}
}()
h.ServeHTTP(l, r)
}
}
|
[
"\"LISTEN_FDNAMES\""
] |
[] |
[
"LISTEN_FDNAMES"
] |
[]
|
["LISTEN_FDNAMES"]
|
go
| 1 | 0 | |
modules/swagger-generator/src/main/java/io/swagger/generator/Bootstrap.java
|
/**
* Copyright 2016 SmartBear Software
* <p>
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package io.swagger.generator;
import java.io.IOException;
import java.io.InputStream;
import java.net.URI;
import java.net.URISyntaxException;
import javax.servlet.ServletConfig;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServlet;
import org.apache.commons.io.IOUtils;
import org.apache.commons.lang3.StringUtils;
public class Bootstrap extends HttpServlet {
private static final long serialVersionUID = 1400930071893332856L;
@Override
public void init(ServletConfig config) throws ServletException {
DynamicSwaggerConfig bc = new DynamicSwaggerConfig();
String hostString = System.getenv("GENERATOR_HOST");
if (!StringUtils.isBlank(hostString)) {
try {
URI hostURI = new URI(hostString);
String scheme = hostURI.getScheme();
if (scheme != null) {
bc.setSchemes(new String[] { scheme });
}
String authority = hostURI.getAuthority();
if (authority != null) {
// In Swagger host refers to host _and_ port, a.k.a. the URI authority
bc.setHost(authority);
}
bc.setBasePath(hostURI.getPath() + "/api");
} catch(URISyntaxException e) {
System.out.println("Could not parse configured GENERATOR_HOST as URL: " + e.getMessage());
}
} else {
bc.setBasePath("/api");
}
bc.setTitle("Swagger Generator");
bc.setDescription("This is an online swagger codegen server. You can find out more "
+ "at https://github.com/swagger-api/swagger-codegen or on [irc.freenode.net, #swagger](http://swagger.io/irc/).");
bc.setTermsOfServiceUrl("http://swagger.io/terms/");
bc.setContact("[email protected]");
bc.setLicense("Apache 2.0");
InputStream stream = getClass().getResourceAsStream("/version.prop");
if (stream == null) {
bc.setVersion("0.0.0");
} else {
try {
bc.setVersion(IOUtils.toString(stream, "UTF-8"));
stream.close();
} catch (IOException e) {
bc.setVersion("0.0.0");
}
}
bc.setLicenseUrl("http://www.apache.org/licenses/LICENSE-2.0.html");
bc.setResourcePackage("io.swagger.generator.resource");
bc.setScan(true);
}
}
|
[
"\"GENERATOR_HOST\""
] |
[] |
[
"GENERATOR_HOST"
] |
[]
|
["GENERATOR_HOST"]
|
java
| 1 | 0 | |
pkg/scalers/azure_eventhub_scaler_test.go
|
package scalers
import (
"context"
"fmt"
"net/http"
"net/url"
"os"
"testing"
"github.com/kedacore/keda/v2/pkg/scalers/azure"
eventhub "github.com/Azure/azure-event-hubs-go/v3"
"github.com/Azure/azure-storage-blob-go/azblob"
)
const (
eventHubConsumerGroup = "testEventHubConsumerGroup"
eventHubConnectionSetting = "testEventHubConnectionSetting"
storageConnectionSetting = "testStorageConnectionSetting"
testEventHubNamespace = "kedatesteventhub"
testEventHubName = "eventhub1"
checkpointFormat = "{\"SequenceNumber\":%d,\"PartitionId\":\"%s\"}"
testContainerName = "azure-webjobs-eventhub"
)
type parseEventHubMetadataTestData struct {
metadata map[string]string
isError bool
}
type eventHubMetricIdentifier struct {
metadataTestData *parseEventHubMetadataTestData
scalerIndex int
name string
}
var sampleEventHubResolvedEnv = map[string]string{eventHubConnectionSetting: "none", storageConnectionSetting: "none"}
var parseEventHubMetadataDataset = []parseEventHubMetadataTestData{
{map[string]string{}, true},
// properly formed event hub metadata
{map[string]string{"storageConnectionFromEnv": storageConnectionSetting, "consumerGroup": eventHubConsumerGroup, "connectionFromEnv": eventHubConnectionSetting, "unprocessedEventThreshold": "15"}, false},
// missing event hub connection setting
{map[string]string{"storageConnectionFromEnv": storageConnectionSetting, "consumerGroup": eventHubConsumerGroup, "unprocessedEventThreshold": "15"}, true},
// missing storage connection setting
{map[string]string{"consumerGroup": eventHubConsumerGroup, "connectionFromEnv": eventHubConnectionSetting, "unprocessedEventThreshold": "15"}, true},
// missing event hub consumer group - should replace with default
{map[string]string{"storageConnectionFromEnv": storageConnectionSetting, "connectionFromEnv": eventHubConnectionSetting, "unprocessedEventThreshold": "15"}, false},
// missing unprocessed event threshold - should replace with default
{map[string]string{"storageConnectionFromEnv": storageConnectionSetting, "consumerGroup": eventHubConsumerGroup, "connectionFromEnv": eventHubConnectionSetting}, false},
// added blob container details
{map[string]string{"storageConnectionFromEnv": storageConnectionSetting, "consumerGroup": eventHubConsumerGroup, "connectionFromEnv": eventHubConnectionSetting, "blobContainer": testContainerName, "checkpointStrategy": "azureFunction"}, false},
}
var parseEventHubMetadataDatasetWithPodIdentity = []parseEventHubMetadataTestData{
{map[string]string{}, true},
// Even though connection string is provided, this should fail because the eventhub Namespace is not provided explicitly when using Pod Identity
{map[string]string{"storageConnectionFromEnv": storageConnectionSetting, "consumerGroup": eventHubConsumerGroup, "connectionFromEnv": eventHubConnectionSetting, "unprocessedEventThreshold": "15"}, true},
// properly formed event hub metadata with Pod Identity
{map[string]string{"storageConnectionFromEnv": storageConnectionSetting, "consumerGroup": eventHubConsumerGroup, "unprocessedEventThreshold": "15", "eventHubName": testEventHubName, "eventHubNamespace": testEventHubNamespace}, false},
// missing eventHubname
{map[string]string{"storageConnectionFromEnv": storageConnectionSetting, "consumerGroup": eventHubConsumerGroup, "unprocessedEventThreshold": "15", "eventHubNamespace": testEventHubNamespace}, true},
// missing eventHubNamespace
{map[string]string{"storageConnectionFromEnv": storageConnectionSetting, "consumerGroup": eventHubConsumerGroup, "unprocessedEventThreshold": "15", "eventHubName": testEventHubName}, true},
}
var eventHubMetricIdentifiers = []eventHubMetricIdentifier{
{&parseEventHubMetadataDataset[1], 0, "s0-azure-eventhub-testEventHubConsumerGroup"},
{&parseEventHubMetadataDataset[1], 1, "s1-azure-eventhub-testEventHubConsumerGroup"},
}
var testEventHubScaler = azureEventHubScaler{
metadata: &eventHubMetadata{
eventHubInfo: azure.EventHubInfo{
EventHubConnection: "none",
StorageConnection: "none",
},
},
}
func TestParseEventHubMetadata(t *testing.T) {
// Test first with valid resolved environment
for _, testData := range parseEventHubMetadataDataset {
_, err := parseAzureEventHubMetadata(&ScalerConfig{TriggerMetadata: testData.metadata, ResolvedEnv: sampleEventHubResolvedEnv, AuthParams: map[string]string{}})
if err != nil && !testData.isError {
t.Errorf("Expected success but got error: %s", err)
}
if testData.isError && err == nil {
t.Error("Expected error and got success")
}
}
for _, testData := range parseEventHubMetadataDatasetWithPodIdentity {
_, err := parseAzureEventHubMetadata(&ScalerConfig{TriggerMetadata: testData.metadata, ResolvedEnv: sampleEventHubResolvedEnv, AuthParams: map[string]string{}, PodIdentity: "Azure"})
if err != nil && !testData.isError {
t.Errorf("Expected success but got error: %s", err)
}
if testData.isError && err == nil {
t.Error("Expected error and got success")
}
}
}
func TestGetUnprocessedEventCountInPartition(t *testing.T) {
ctx := context.Background()
t.Log("This test will use the environment variable EVENTHUB_CONNECTION_STRING and STORAGE_CONNECTION_STRING if it is set.")
t.Log("If set, it will connect to the storage account and event hub to determine how many messages are in the event hub.")
t.Logf("EventHub has 1 message in partition 0 and 0 messages in partition 1")
eventHubKey := os.Getenv("AZURE_EVENTHUB_KEY")
storageConnectionString := os.Getenv("TEST_STORAGE_CONNECTION_STRING")
if eventHubKey != "" && storageConnectionString != "" {
eventHubConnectionString := fmt.Sprintf("Endpoint=sb://%s.servicebus.windows.net/;SharedAccessKeyName=RootManageSharedAccessKey;SharedAccessKey=%s;EntityPath=%s", testEventHubNamespace, eventHubKey, testEventHubName)
storageCredentials, endpoint, err := azure.ParseAzureStorageBlobConnection(ctx, http.DefaultClient, "none", storageConnectionString, "", "")
if err != nil {
t.Error(err)
t.FailNow()
}
t.Log("Creating event hub client...")
hubOption := eventhub.HubWithPartitionedSender("0")
client, err := eventhub.NewHubFromConnectionString(eventHubConnectionString, hubOption)
if err != nil {
t.Fatalf("Expected to create event hub client but got error: %s", err)
}
if eventHubConnectionString == "" {
t.Fatal("Event hub connection string needed for test")
}
if storageConnectionString == "" {
t.Fatal("Storage connection string needed for test")
}
// Can actually test that numbers return
testEventHubScaler.metadata.eventHubInfo.EventHubConnection = eventHubConnectionString
testEventHubScaler.metadata.eventHubInfo.StorageConnection = storageConnectionString
testEventHubScaler.client = client
testEventHubScaler.metadata.eventHubInfo.EventHubConsumerGroup = "$Default"
// Send 1 message to event hub first
t.Log("Sending message to event hub")
err = SendMessageToEventHub(client)
if err != nil {
t.Error(err)
}
// Create fake checkpoint with path azure-webjobs-eventhub/<eventhub-namespace-name>.servicebus.windows.net/<eventhub-name>/$Default
t.Log("Creating container..")
ctx, err := CreateNewCheckpointInStorage(endpoint, storageCredentials, client)
if err != nil {
t.Errorf("err creating container: %s", err)
}
partitionInfo0, err := testEventHubScaler.client.GetPartitionInformation(ctx, "0")
if err != nil {
t.Errorf("unable to get partitionRuntimeInfo for partition 0: %s", err)
}
partitionInfo1, err := testEventHubScaler.client.GetPartitionInformation(ctx, "0")
if err != nil {
t.Errorf("unable to get partitionRuntimeInfo for partition 1: %s", err)
}
unprocessedEventCountInPartition0, _, err0 := testEventHubScaler.GetUnprocessedEventCountInPartition(ctx, partitionInfo0)
unprocessedEventCountInPartition1, _, err1 := testEventHubScaler.GetUnprocessedEventCountInPartition(ctx, partitionInfo1)
if err0 != nil {
t.Errorf("Expected success but got error: %s", err0)
}
if err1 != nil {
t.Errorf("Expected success but got error: %s", err1)
}
if unprocessedEventCountInPartition0 != 1 {
t.Errorf("Expected 1 message in partition 0, got %d", unprocessedEventCountInPartition0)
}
if unprocessedEventCountInPartition1 != 0 {
t.Errorf("Expected 0 messages in partition 1, got %d", unprocessedEventCountInPartition1)
}
// Delete container - this will also delete checkpoint
t.Log("Deleting container...")
err = DeleteContainerInStorage(ctx, endpoint, storageCredentials)
if err != nil {
t.Error(err)
}
}
}
func TestGetUnprocessedEventCountIfNoCheckpointExists(t *testing.T) {
t.Log("This test will use the environment variable EVENTHUB_CONNECTION_STRING and STORAGE_CONNECTION_STRING if it is set.")
t.Log("If set, it will connect to the storage account and event hub to determine how many messages are in the event hub.")
t.Logf("EventHub has 1 message in partition 0 and 0 messages in partition 1")
eventHubKey := os.Getenv("AZURE_EVENTHUB_KEY")
storageConnectionString := os.Getenv("TEST_STORAGE_CONNECTION_STRING")
if eventHubKey != "" && storageConnectionString != "" {
eventHubConnectionString := fmt.Sprintf("Endpoint=sb://%s.servicebus.windows.net/;SharedAccessKeyName=RootManageSharedAccessKey;SharedAccessKey=%s;EntityPath=%s", testEventHubNamespace, eventHubKey, testEventHubName)
t.Log("Creating event hub client...")
hubOption := eventhub.HubWithPartitionedSender("0")
client, err := eventhub.NewHubFromConnectionString(eventHubConnectionString, hubOption)
if err != nil {
t.Errorf("Expected to create event hub client but got error: %s", err)
}
if eventHubConnectionString == "" {
t.Fatal("Event hub connection string needed for test")
}
if storageConnectionString == "" {
t.Fatal("Storage connection string needed for test")
}
// Can actually test that numbers return
testEventHubScaler.metadata.eventHubInfo.EventHubConnection = eventHubConnectionString
testEventHubScaler.metadata.eventHubInfo.StorageConnection = storageConnectionString
testEventHubScaler.client = client
testEventHubScaler.metadata.eventHubInfo.EventHubConsumerGroup = "$Default"
// Send 1 message to event hub first
t.Log("Sending message to event hub")
err = SendMessageToEventHub(client)
if err != nil {
t.Error(err)
}
ctx := context.Background()
partitionInfo0, err := testEventHubScaler.client.GetPartitionInformation(ctx, "0")
if err != nil {
t.Errorf("unable to get partitionRuntimeInfo for partition 0: %s", err)
}
partitionInfo1, err := testEventHubScaler.client.GetPartitionInformation(ctx, "1")
if err != nil {
t.Errorf("unable to get partitionRuntimeInfo for partition 1: %s", err)
}
unprocessedEventCountInPartition0, _, err0 := testEventHubScaler.GetUnprocessedEventCountInPartition(ctx, partitionInfo0)
unprocessedEventCountInPartition1, _, err1 := testEventHubScaler.GetUnprocessedEventCountInPartition(ctx, partitionInfo1)
if err0 != nil {
t.Errorf("Expected success but got error: %s", err0)
}
if err1 != nil {
t.Errorf("Expected success but got error: %s", err1)
}
if unprocessedEventCountInPartition0 != 1 {
t.Errorf("Expected 1 message in partition 0, got %d", unprocessedEventCountInPartition0)
}
if unprocessedEventCountInPartition1 != 0 {
t.Errorf("Expected 0 messages in partition 1, got %d", unprocessedEventCountInPartition1)
}
}
}
func TestGetUnprocessedEventCountWithoutCheckpointReturning1Message(t *testing.T) {
// After the first message the lastsequencenumber init to 0
partitionInfo := eventhub.HubPartitionRuntimeInformation{
PartitionID: "0",
LastSequenceNumber: 0,
BeginningSequenceNumber: 0,
}
unprocessedEventCountInPartition0 := GetUnprocessedEventCountWithoutCheckpoint(&partitionInfo)
if unprocessedEventCountInPartition0 != 1 {
t.Errorf("Expected 1 messages in partition 0, got %d", unprocessedEventCountInPartition0)
}
}
func TestGetUnprocessedEventCountWithoutCheckpointReturning0Message(t *testing.T) {
// An empty partition starts with an equal value on last-/beginning-sequencenumber other than 0
partitionInfo := eventhub.HubPartitionRuntimeInformation{
PartitionID: "0",
LastSequenceNumber: 255,
BeginningSequenceNumber: 255,
}
unprocessedEventCountInPartition0 := GetUnprocessedEventCountWithoutCheckpoint(&partitionInfo)
if unprocessedEventCountInPartition0 != 0 {
t.Errorf("Expected 0 messages in partition 0, got %d", unprocessedEventCountInPartition0)
}
}
func TestGetUnprocessedEventCountWithoutCheckpointReturning2Messages(t *testing.T) {
partitionInfo := eventhub.HubPartitionRuntimeInformation{
PartitionID: "0",
LastSequenceNumber: 1,
BeginningSequenceNumber: 0,
}
unprocessedEventCountInPartition0 := GetUnprocessedEventCountWithoutCheckpoint(&partitionInfo)
if unprocessedEventCountInPartition0 != 2 {
t.Errorf("Expected 0 messages in partition 0, got %d", unprocessedEventCountInPartition0)
}
}
func TestGetATotalLagOf20For2PartitionsOn100UnprocessedEvents(t *testing.T) {
lag := getTotalLagRelatedToPartitionAmount(100, 2, 10)
if lag != 20 {
t.Errorf("Expected a lag of 20 for 2 partitions, got %d", lag)
}
}
func TestGetATotalLagOf100For20PartitionsOn100UnprocessedEvents(t *testing.T) {
lag := getTotalLagRelatedToPartitionAmount(100, 20, 10)
if lag != 100 {
t.Errorf("Expected a lag of 100 for 20 partitions, got %d", lag)
}
}
func CreateNewCheckpointInStorage(endpoint *url.URL, credential azblob.Credential, client *eventhub.Hub) (context.Context, error) {
urlPath := fmt.Sprintf("%s.servicebus.windows.net/%s/$Default/", testEventHubNamespace, testEventHubName)
// Create container
ctx := context.Background()
path, _ := url.Parse(testContainerName)
url := endpoint.ResolveReference(path)
containerURL := azblob.NewContainerURL(*url, azblob.NewPipeline(credential, azblob.PipelineOptions{}))
_, err := containerURL.Create(ctx, azblob.Metadata{}, azblob.PublicAccessNone)
if err != nil {
return ctx, fmt.Errorf("failed to create container: %s", err)
}
// Create directory checkpoints will be in
err = os.MkdirAll(urlPath, 0777)
if err != nil {
return ctx, fmt.Errorf("Unable to create directory: %s", err)
}
defer os.RemoveAll(urlPath)
file, err := os.Create(fmt.Sprintf("%s/file", urlPath))
if err != nil {
return ctx, fmt.Errorf("Unable to create folder: %s", err)
}
defer file.Close()
blobFolderURL := containerURL.NewBlockBlobURL(urlPath)
// Upload file
_, err = azblob.UploadFileToBlockBlob(ctx, file, blobFolderURL, azblob.UploadToBlockBlobOptions{
BlockSize: 4 * 1024 * 1024,
Parallelism: 16})
if err != nil {
return ctx, fmt.Errorf("Err uploading file to blob: %s", err)
}
// Make checkpoint blob files
if err := CreatePartitionFile(ctx, urlPath, "0", containerURL, client); err != nil {
return ctx, fmt.Errorf("failed to create partitionID 0 file: %s", err)
}
if err := CreatePartitionFile(ctx, urlPath, "1", containerURL, client); err != nil {
return ctx, fmt.Errorf("failed to create partitionID 1 file: %s", err)
}
return ctx, nil
}
func CreatePartitionFile(ctx context.Context, urlPathToPartition string, partitionID string, containerURL azblob.ContainerURL, client *eventhub.Hub) error {
// Create folder structure
filePath := urlPathToPartition + partitionID
partitionInfo, err := client.GetPartitionInformation(ctx, partitionID)
if err != nil {
return fmt.Errorf("unable to get partition info: %s", err)
}
f, err := os.Create(partitionID)
if err != nil {
return fmt.Errorf("unable to create file: %s", err)
}
if partitionID == "0" {
_, err = f.WriteString(fmt.Sprintf(checkpointFormat, partitionInfo.LastSequenceNumber-1, partitionID))
if err != nil {
return fmt.Errorf("unable to write to file: %s", err)
}
} else {
_, err = f.WriteString(fmt.Sprintf(checkpointFormat, partitionInfo.LastSequenceNumber, partitionID))
if err != nil {
return fmt.Errorf("unable to write to file: %s", err)
}
}
// Write checkpoints to file
file, err := os.Open(partitionID)
if err != nil {
return fmt.Errorf("Unable to create file: %s", err)
}
defer file.Close()
blobFileURL := containerURL.NewBlockBlobURL(filePath)
// Upload folder
_, err = azblob.UploadFileToBlockBlob(ctx, file, blobFileURL, azblob.UploadToBlockBlobOptions{
BlockSize: 4 * 1024 * 1024,
Parallelism: 16})
if err != nil {
return fmt.Errorf("Err uploading file to blob: %s", err)
}
return nil
}
func SendMessageToEventHub(client *eventhub.Hub) error {
ctx := context.Background()
err := client.Send(ctx, eventhub.NewEventFromString("1"))
if err != nil {
return fmt.Errorf("Error sending msg: %s", err)
}
return nil
}
func DeleteContainerInStorage(ctx context.Context, endpoint *url.URL, credential azblob.Credential) error {
path, _ := url.Parse(testContainerName)
url := endpoint.ResolveReference(path)
containerURL := azblob.NewContainerURL(*url, azblob.NewPipeline(credential, azblob.PipelineOptions{}))
_, err := containerURL.Delete(ctx, azblob.ContainerAccessConditions{
ModifiedAccessConditions: azblob.ModifiedAccessConditions{},
})
if err != nil {
return fmt.Errorf("failed to delete container in blob storage: %s", err)
}
return nil
}
func TestEventHubGetMetricSpecForScaling(t *testing.T) {
for _, testData := range eventHubMetricIdentifiers {
meta, err := parseAzureEventHubMetadata(&ScalerConfig{TriggerMetadata: testData.metadataTestData.metadata, ResolvedEnv: sampleEventHubResolvedEnv, AuthParams: map[string]string{}, ScalerIndex: testData.scalerIndex})
if err != nil {
t.Fatal("Could not parse metadata:", err)
}
mockEventHubScaler := azureEventHubScaler{
metadata: meta,
client: nil,
httpClient: http.DefaultClient,
}
metricSpec := mockEventHubScaler.GetMetricSpecForScaling(context.Background())
metricName := metricSpec[0].External.Metric.Name
if metricName != testData.name {
t.Error("Wrong External metric source name:", metricName)
}
}
}
|
[
"\"AZURE_EVENTHUB_KEY\"",
"\"TEST_STORAGE_CONNECTION_STRING\"",
"\"AZURE_EVENTHUB_KEY\"",
"\"TEST_STORAGE_CONNECTION_STRING\""
] |
[] |
[
"AZURE_EVENTHUB_KEY",
"TEST_STORAGE_CONNECTION_STRING"
] |
[]
|
["AZURE_EVENTHUB_KEY", "TEST_STORAGE_CONNECTION_STRING"]
|
go
| 2 | 0 | |
internal/daemon/internal/deps/config.go
|
package deps
import (
"os"
"github.com/gritcli/grit/config"
"github.com/gritcli/grit/driver/registry"
"github.com/gritcli/grit/driver/sourcedriver/githubsource"
"github.com/gritcli/grit/driver/vcsdriver/gitvcs"
)
func init() {
Container.Provide(func() *registry.Registry {
r := ®istry.Registry{}
r.RegisterSourceDriver("github", githubsource.Registration)
r.RegisterVCSDriver("git", gitvcs.Registration)
return r
})
Container.Provide(func(r *registry.Registry) (config.Config, error) {
return config.Load(
os.Getenv("GRIT_CONFIG_DIR"),
r,
)
})
}
|
[
"\"GRIT_CONFIG_DIR\""
] |
[] |
[
"GRIT_CONFIG_DIR"
] |
[]
|
["GRIT_CONFIG_DIR"]
|
go
| 1 | 0 | |
sdks/go/sdk.go
|
// Copyright 2017 Google LLC All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package sdk is the Go game server sdk.
package sdk
import (
"context"
"fmt"
"io"
"os"
"time"
"github.com/pkg/errors"
"google.golang.org/grpc"
"agones.dev/agones/pkg/sdk"
)
// GameServerCallback is a function definition to be called
// when a GameServer CRD has been changed.
type GameServerCallback func(gs *sdk.GameServer)
// SDK is an instance of the Agones SDK.
type SDK struct {
client sdk.SDKClient
ctx context.Context
health sdk.SDK_HealthClient
alpha *Alpha
}
// NewSDK starts a new SDK instance, and connects to localhost
// on port "AGONES_SDK_GRPC_PORT" which by default is 9357.
// Blocks until connection and handshake are made.
// Times out after 30 seconds.
func NewSDK() (*SDK, error) {
p := os.Getenv("AGONES_SDK_GRPC_PORT")
if p == "" {
p = "9357"
}
addr := fmt.Sprintf("localhost:%s", p)
s := &SDK{
ctx: context.Background(),
}
// Block for at least 30 seconds.
ctx, cancel := context.WithTimeout(s.ctx, 30*time.Second)
defer cancel()
conn, err := grpc.DialContext(ctx, addr, grpc.WithBlock(), grpc.WithInsecure())
if err != nil {
return s, errors.Wrapf(err, "could not connect to %s", addr)
}
s.client = sdk.NewSDKClient(conn)
s.health, err = s.client.Health(s.ctx)
s.alpha = newAlpha(conn)
return s, errors.Wrap(err, "could not set up health check")
}
// Alpha returns the Alpha SDK.
func (s *SDK) Alpha() *Alpha {
return s.alpha
}
// Ready marks the Game Server as ready to receive connections.
func (s *SDK) Ready() error {
_, err := s.client.Ready(s.ctx, &sdk.Empty{})
return errors.Wrap(err, "could not send Ready message")
}
// Allocate self marks this gameserver as Allocated.
func (s *SDK) Allocate() error {
_, err := s.client.Allocate(s.ctx, &sdk.Empty{})
return errors.Wrap(err, "could not mark self as Allocated")
}
// Shutdown marks the Game Server as ready to shutdown.
func (s *SDK) Shutdown() error {
_, err := s.client.Shutdown(s.ctx, &sdk.Empty{})
return errors.Wrapf(err, "could not send Shutdown message")
}
// Reserve marks the Game Server as Reserved for a given duration, at which point
// it will return the GameServer to a Ready state.
// Do note, the smallest unit available in the time.Duration argument is a second.
func (s *SDK) Reserve(d time.Duration) error {
_, err := s.client.Reserve(s.ctx, &sdk.Duration{Seconds: int64(d.Seconds())})
return errors.Wrap(err, "could not send Reserve message")
}
// Health sends a ping to the sidecar health check to indicate that this Game Server is healthy.
func (s *SDK) Health() error {
return errors.Wrap(s.health.Send(&sdk.Empty{}), "could not send Health ping")
}
// SetLabel sets a metadata label on the `GameServer` with the prefix "agones.dev/sdk-".
func (s *SDK) SetLabel(key, value string) error {
kv := &sdk.KeyValue{Key: key, Value: value}
_, err := s.client.SetLabel(s.ctx, kv)
return errors.Wrap(err, "could not set label")
}
// SetAnnotation sets a metadata annotation on the `GameServer` with the prefix "agones.dev/sdk-".
func (s *SDK) SetAnnotation(key, value string) error {
kv := &sdk.KeyValue{Key: key, Value: value}
_, err := s.client.SetAnnotation(s.ctx, kv)
return errors.Wrap(err, "could not set annotation")
}
// GameServer retrieve the GameServer details.
func (s *SDK) GameServer() (*sdk.GameServer, error) {
gs, err := s.client.GetGameServer(s.ctx, &sdk.Empty{})
return gs, errors.Wrap(err, "could not retrieve gameserver")
}
// WatchGameServer asynchronously calls the given GameServerCallback with the current GameServer
// configuration when the backing GameServer configuration is updated.
// This function can be called multiple times to add more than one GameServerCallback.
func (s *SDK) WatchGameServer(f GameServerCallback) error {
stream, err := s.client.WatchGameServer(s.ctx, &sdk.Empty{})
if err != nil {
return errors.Wrap(err, "could not watch gameserver")
}
go func() {
for {
var gs *sdk.GameServer
gs, err = stream.Recv()
if err != nil {
if err == io.EOF {
_, _ = fmt.Fprintln(os.Stderr, "gameserver event stream EOF received")
return
}
_, _ = fmt.Fprintf(os.Stderr, "error watching GameServer: %s\n", err.Error())
// This is to wait for the reconnection, and not peg the CPU at 100%.
time.Sleep(time.Second)
continue
}
f(gs)
}
}()
return nil
}
|
[
"\"AGONES_SDK_GRPC_PORT\""
] |
[] |
[
"AGONES_SDK_GRPC_PORT"
] |
[]
|
["AGONES_SDK_GRPC_PORT"]
|
go
| 1 | 0 | |
train.py
|
# -*- coding: utf-8 -*-
import numpy as np
np.random.seed(111)
import argparse
import os
import json
from yolo.frontend import create_yolo, get_object_labels
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]="0"
argparser = argparse.ArgumentParser(
description='Train and validate YOLO_v2 model on any dataset')
argparser.add_argument(
'-c',
'--conf',
default="configs/from_scratch.json",
help='path to configuration file')
def setup_training(config_file):
"""make directory to save weights & its configuration """
import shutil
with open(config_file) as config_buffer:
config = json.loads(config_buffer.read())
dirname = config['train']['saved_folder']
if os.path.isdir(dirname):
print("{} is already exists. Weight file in directory will be overwritten".format(dirname))
else:
print("{} is created.".format(dirname, dirname))
os.makedirs(dirname)
print("Weight file and Config file will be saved in \"{}\"".format(dirname))
shutil.copyfile(config_file, os.path.join(dirname, "config.json"))
return config, os.path.join(dirname, "weights.h5")
if __name__ == '__main__':
args = argparser.parse_args()
config, weight_file = setup_training(args.conf)
if config['train']['is_only_detect']:
labels = ["object"]
else:
if config['model']['labels']:
labels = config['model']['labels']
else:
labels = get_object_labels(config['train']['train_annot_folder'])
print(labels)
# 1. Construct the model
yolo = create_yolo(config['model']['architecture'],
labels,
config['model']['input_size'],
config['model']['anchors'],
config['model']['coord_scale'],
config['model']['class_scale'],
config['model']['object_scale'],
config['model']['no_object_scale'])
# 2. Load the pretrained weights (if any)
yolo.load_weights(config['pretrained']['full'], by_name=True)
# 3. actual training
yolo.train(config['train']['train_image_folder'],
config['train']['train_annot_folder'],
config['train']['actual_epoch'],
weight_file,
config["train"]["batch_size"],
config["train"]["jitter"],
config['train']['learning_rate'],
config['train']['train_times'],
config['train']['valid_times'],
config['train']['valid_image_folder'],
config['train']['valid_annot_folder'],
config['train']['first_trainable_layer'],
config['train']['is_only_detect'])
# loss: 2.1691, train batch jitter=False
|
[] |
[] |
[
"CUDA_DEVICE_ORDER",
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_DEVICE_ORDER", "CUDA_VISIBLE_DEVICES"]
|
python
| 2 | 0 | |
jumpcloud/resource_user_test.go
|
package jumpcloud
import (
"fmt"
"os"
"testing"
"github.com/hashicorp/terraform/helper/acctest"
"github.com/hashicorp/terraform/helper/resource"
)
func TestAccUser(t *testing.T) {
rName := acctest.RandStringFromCharSet(10, acctest.CharSetAlpha)
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: nil,
Steps: []resource.TestStep{
{
// This test simply applys a user with the config from testAccUser
// and checks for the correct username and email in the state
// The resource is destroyed afterwards via the framework
Config: testAccUser(rName),
Check: resource.ComposeAggregateTestCheckFunc(
resource.TestCheckResourceAttr("jumpcloud_user.test_user", "username", rName),
resource.TestCheckResourceAttr("jumpcloud_user.test_user", "email", rName+"@testorg.com"),
),
},
},
})
}
// testAccPreCheck validates the necessary test API keys exist
// in the testing environment
func testAccPreCheck(t *testing.T) {
if v := os.Getenv("JUMPCLOUD_API_KEY"); v == "" {
t.Fatal("JUMPCLOUD_API_KEY= must be set for the acceptance tests")
}
}
func testAccUser(name string) string {
return fmt.Sprintf(`
resource "jumpcloud_user" "test_user" {
username = "%s"
email = "%[email protected]"
firstname = "Firstname"
lastname = "Lastname"
enable_mfa = true
}`, name, name,
)
}
|
[
"\"JUMPCLOUD_API_KEY\""
] |
[] |
[
"JUMPCLOUD_API_KEY"
] |
[]
|
["JUMPCLOUD_API_KEY"]
|
go
| 1 | 0 | |
cbinterface/modules/helpers.py
|
import os
import datetime
import logging
from configparser import ConfigParser
from dateutil import tz
from dateutil.zoneinfo import get_zonefile_instance
## -- Global variables -- ##
# Configuration
HOME_DIR = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
CONFIG_PATH = os.path.join(HOME_DIR, 'etc', 'config.ini')
# load the default config
CONFIG = ConfigParser()
CONFIG.read(CONFIG_PATH)
# get the default configuration file paths (allows for users to easily override settings)
# and re-load the config to account for all config items
CONFIG.read(CONFIG['DEFAULT']['config_path_list'].split(','))
DEFAULT_TIMEBASE = tz.gettz('GMT')
CONFIGURED_TIMEBASE = None
try:
CONFIGURED_TIMEBASE = CONFIG['DEFAULT']['time_zone']
zonenames = list(get_zonefile_instance().zones)
if CONFIGURED_TIMEBASE not in zonenames:
logging.error("'{}' not a recognized timezone. Using default timezone.".format(CONFIGURED_TIMEBASE))
CONFIGURED_TIMEBASE = DEFAULT_TIMEBASE
else:
CONFIGURED_TIMEBASE = tz.gettz(CONFIGURED_TIMEBASE)
except Exception as e:
logging.error("Exception occured setting CONFIGURED_TIMEZONE: {}".format(e))
CONFIGURED_TIMEBASE = DEFAULT_TIMEBASE
## -- Global helper functions -- ##
def as_configured_timezone(timestamp):
"""Convert timestamp to the configured default timezone.
"""
# the timestamps from CbR are not timezone aware, but they are GMT.
_time = timestamp.replace(tzinfo=DEFAULT_TIMEBASE)
if 'CBINTERFACE_TIMEZONE' in os.environ:
env_timebase = os.environ['CBINTERFACE_TIMEZONE']
zonenames = list(get_zonefile_instance().zones)
if env_timebase not in zonenames:
logging.error("'{}' not a recognized timezone. Using default timezone.".format(env_timebase))
return _time.strftime('%Y-%m-%d %H:%M:%S.%f%z')
else:
env_timebase = tz.gettz(env_timebase)
return _time.astimezone(env_timebase).strftime('%Y-%m-%d %H:%M:%S.%f%z')
elif CONFIGURED_TIMEBASE is not DEFAULT_TIMEBASE:
return _time.astimezone(CONFIGURED_TIMEBASE).strftime('%Y-%m-%d %H:%M:%S.%f%z')
else:
return _time.strftime('%Y-%m-%d %H:%M:%S.%f%z')
## OLD Stuff, not used anymore
## -- Intel/detection helpers -- ##
def perform_splunk_search(splunk_command, debug=None):
splunkargs = shlex.split(splunk_command)
splunk_output = None
if debug:
LOGGER.debug("Searching splunk with: {}".format(splunk_command))
try:
splunk_output = subprocess.check_output(splunkargs)
return splunk_output
except subprocess.CalledProcessError as e:
# error but continue
LOGGER.error("returncode: " + str(e.returncode) + "; with output: "
+ str(e.output) + "; for cmd: " + str(e.cmd) )
return None
def parse_splunk_results(results, debug=None):
result_dict = json.loads(results.decode("utf-8"))
result_list = result_dict['result']
hits = {'feed_hits': [], 'watchlist_hits': []}
for result in result_list:
full_result = json.loads(result['_raw'])
# CB redundant logging causes duplicates on feed hits sometimes
if 'feed' in result['notification_type']:
query_string = None
feed_name = None
feed_link = None
try:
query_string = full_result['ioc_query_string']
except KeyError as err:
if debug:
LOGGER.debug("KeyError: " + str(err))
try:
feed_name = full_result['feed_name']
except KeyError as err:
if debug:
LOGGER.debug("KeyError: " + str(err))
try:
feed_link = full_result['docs'][0]['alliance_link_'+feed_name]
except KeyError as err:
if debug:
LOGGER.debug("KeyError: " + str(err))
if len(full_result['docs']) > 1:
LOGGER.warn("result document list greater than one")
if feed_name is not None and feed_link is not None:
hits['feed_hits'].append((feed_name, feed_link, query_string))
elif 'watchlist' in result['notification_type']:
# for some reasone, some watchlists log both watchlist.hit.process
# and watchlist.storage.hit.process
if full_result['watchlist_name'] not in hits['watchlist_hits']:
hits['watchlist_hits'].append(full_result['watchlist_name'])
else:
LOGGER.error("Problem parsing splunk results.")
return hits
def splunk_search_worker(search_queue, result, debug=None):
while not search_queue.empty():
search_data = search_queue.get()
proc_guid = search_data[0]
search_text = search_data[1]
if debug:
LOGGER.debug("Kicking intel search for {} ...".format(proc_guid))
result[proc_guid] = perform_splunk_search(search_text, debug)
search_queue.task_done()
return True
def build_vx_queries(query_info):
# create the list of queries that we will make with the CB api
if not isinstance(query_info, ProcessList):
print("[ERROR] build_queries: input type not supported")
sys.exit(1)
queries = []
parent_name = None # keep track
for process in query_info:
pid_has_query = False
for child_proc in process.children:
queries.append("process_name:{} process_pid:{} childproc_name:{}".format(process.proc_name,
process.pid, child_proc.proc_name))
pid_has_query = True
break
if pid_has_query is False:
if parent_name: # not None
queries.append("process_name:{} process_pid:{} parent_name:{}".format(process.proc_name,
process.pid, parent_name))
else: # this query is not specific enough
queries.append("process_name:{} process_pid:{}".format(process.proc_name,
process.pid))
parent_name = process.proc_name
return queries
def get_vxstream_cb_guids(cb, vx_process_list):
ProcessQueryResults = []
for query in build_vx_queries(vx_process_list):
ProcessQueryResults = cb.select(Process).where(query).group_by('id')
for proc in ProcessQueryResults:
for vxp in vx_process_list:
if int(proc.process_pid) == int(vxp.pid):
vxp.id = proc.id
break
return
def parse_vxstream_report(cb, report_path):
json_report = None
try:
with open(report_path, 'r') as fp:
json_report = json.load(fp)
except Exception as e:
print("unable to load json from {}: {}".format(args.report_path, str(e)))
sys.exit(1)
process_list = ProcessList()
process_list_json = json_report["analysis"]["runtime"]["targets"]["target"]
if process_list_json:
if isinstance(process_list_json, dict):
process_list_json = [process_list_json]
for process in process_list_json:
command = process["name"] + " " + process["commandline"]
process_name = process["name"]
pid = process["pid"]
parent_pid = process["parentpid"]
#print("{} @ {}".format(pid, process["date"]))
new_process = ProcessWrapper(command, pid, parent_pid, process_name, None)
process_list.add_process(new_process)
# call structure() to build process relationship tree
process_list.structure() # come back to investigate why returning this output breaks script
get_vxstream_cb_guids(cb, process_list)
return process_list
def get_vxtream_cb_guids(report):
try:
cb = CbResponseAPI(profile='vxstream')
except:
LOGGER.error("Failure to get CbResponseAPI with 'vxstream' profile")
return 1
process_list = parse_vxstream_report(cb, report)
return [ p.id for p in process_list ]
|
[] |
[] |
[
"CBINTERFACE_TIMEZONE"
] |
[]
|
["CBINTERFACE_TIMEZONE"]
|
python
| 1 | 0 | |
languages/python/oso/oso/oso.py
|
"""Core oso functionality"""
__version__ = "0.20.0-beta"
import os
from typing import List, Any, Set
from polar import Polar, Variable, exceptions
from .exceptions import NotFoundError, ForbiddenError
class Oso(Polar):
"""The central object to manage application policy state, e.g.
the policy data, and verify requests.
>>> from oso import Oso
>>> Oso()
<oso.oso.Oso object at 0x...>
"""
def __init__(
self,
*,
forbidden_error=ForbiddenError,
not_found_error=NotFoundError,
read_action="read"
):
"""
Create an Oso object.
:param forbidden_error:
Optionally override the error class that is raised when an action is
unauthorized.
:param not_found_error:
Optionally override the error class that is raised by the
``authorize`` method when an action is unauthorized AND the actor
does not have permission to ``"read"`` the resource (and thus should
not know it exists).
:param read_action:
The action used by the ``authorize`` method to determine whether an
authorization failure should raise a ``NotFoundError`` or a
``ForbiddenError``.
"""
self._print_polar_log_message()
super().__init__()
self.forbidden_error = forbidden_error
self.not_found_error = not_found_error
self.read_action = read_action
def is_allowed(self, actor, action, resource) -> bool:
"""Evaluate whether ``actor`` is allowed to perform ``action`` on ``resource``.
Uses allow rules in the Polar policy to determine whether a request is
permitted. ``actor`` and ``resource`` should be classes that have been
registered with Polar using the :py:func:`register_class` function or
the ``polar_class`` decorator.
:param actor: The actor performing the request.
:param action: The action the actor is attempting to perform.
:param resource: The resource being accessed.
:return: ``True`` if the request is allowed, ``False`` otherwise.
"""
try:
next(self.query_rule("allow", actor, action, resource))
return True
except StopIteration:
return False
def get_allowed_actions(self, actor, resource, allow_wildcard=False) -> List[Any]:
"""Determine the actions ``actor`` is allowed to take on ``resource``.
Deprecated. Use ``authorized_actions`` instead.
"""
return list(self.authorized_actions(actor, resource, allow_wildcard))
def authorize(self, actor, action, resource, *, check_read=True):
"""Ensure that ``actor`` is allowed to perform ``action`` on
``resource``.
If the action is permitted with an ``allow`` rule in the policy, then
this method returns ``None``. If the action is not permitted by the
policy, this method will raise an error.
The error raised by this method depends on whether the actor can perform
the ``"read"`` action on the resource. If they cannot read the resource,
then a ``NotFound`` error is raised. Otherwise, a ``ForbiddenError`` is
raised.
:param actor: The actor performing the request.
:param action: The action the actor is attempting to perform.
:param resource: The resource being accessed.
:param check_read: If set to ``False``, a ``ForbiddenError`` is always
thrown on authorization failures, regardless of whether the actor can
read the resource. Default is ``True``.
:type check_read: bool
"""
if self.query_rule_once("allow", actor, action, resource):
return
if check_read and (
action == self.read_action
or not self.query_rule_once("allow", actor, self.read_action, resource)
):
raise self.not_found_error()
raise self.forbidden_error()
def authorize_request(self, actor, request):
"""Ensure that ``actor`` is allowed to send ``request`` to the server.
Checks the ``allow_request`` rule of a policy.
If the request is permitted with an ``allow_request`` rule in the
policy, then this method returns ``None``. Otherwise, this method raises
a ``ForbiddenError``.
:param actor: The actor performing the request.
:param request: An object representing the request that was sent by the
actor.
"""
if not self.query_rule_once("allow_request", actor, request):
raise self.forbidden_error()
def authorized_actions(self, actor, resource, allow_wildcard=False) -> Set[Any]:
"""Determine the actions ``actor`` is allowed to take on ``resource``.
Collects all actions allowed by allow rules in the Polar policy for the
given combination of actor and resource.
Identical to ``Oso.get_allowed_actions``.
:param actor: The actor for whom to collect allowed actions
:param resource: The resource being accessed
:param allow_wildcard: Flag to determine behavior if the policy
contains an "unconstrained" action that could represent any action:
``allow(_actor, _action, _resource)``. If ``True``, the method will
return ``["*"]``, if ``False`` (the default), the method will raise
an exception.
:type allow_wildcard: bool
:return: A set containing all allowed actions.
"""
results = self.query_rule("allow", actor, Variable("action"), resource)
actions = set()
for result in results:
action = result.get("bindings").get("action")
if isinstance(action, Variable):
if not allow_wildcard:
raise exceptions.OsoError(
"""The result of authorized_actions() contained an
"unconstrained" action that could represent any
action, but allow_wildcard was set to False. To fix,
set allow_wildcard to True and compare with the "*"
string."""
)
else:
return {"*"}
actions.add(action)
return actions
def authorize_field(self, actor, action, resource, field):
"""Ensure that ``actor`` is allowed to perform ``action`` on a given
``resource``'s ``field``.
If the action is permitted by an ``allow_field`` rule in the policy,
then this method returns ``None``. If the action is not permitted by the
policy, this method will raise a ``ForbiddenError``.
:param actor: The actor performing the request.
:param action: The action the actor is attempting to perform on the
field.
:param resource: The resource being accessed.
:param field: The name of the field being accessed.
"""
if not self.query_rule_once("allow_field", actor, action, resource, field):
raise self.forbidden_error()
def authorized_fields(
self, actor, action, resource, allow_wildcard=False
) -> Set[Any]:
"""Determine the fields of ``resource`` on which ``actor`` is allowed to
perform ``action``.
Uses ``allow_field`` rules in the policy to find all allowed fields.
:param actor: The actor for whom to collect allowed fields.
:param action: The action being taken on the fields.
:param resource: The resource being accessed.
:param allow_wildcard: Flag to determine behavior if the policy \
includes a wildcard field. E.g., a rule allowing any field: \
``allow_field(_actor, _action, _resource, _field)``. If ``True``, the \
method will return ``["*"]``, if ``False``, the method will raise an \
exception.
:type allow_wildcard: bool
:return: A set containing all allowed fields.
"""
results = self.query_rule(
"allow_field", actor, action, resource, Variable("field")
)
fields = set()
for result in results:
field = result.get("bindings").get("field")
if isinstance(field, Variable):
if not allow_wildcard:
raise exceptions.OsoError(
"""The result of authorized_fields() contained an
"unconstrained" field that could represent any
field, but allow_wildcard was set to False. To fix,
set allow_wildcard to True and compare with the "*"
string."""
)
else:
return {"*"}
fields.add(field)
return fields
def _print_polar_log_message(self):
if os.environ.get("POLAR_LOG", None):
print(
"Polar tracing enabled. Get help with "
+ "traces from our engineering team: https://help.osohq.com/trace"
)
Policy = Oso
|
[] |
[] |
[
"POLAR_LOG"
] |
[]
|
["POLAR_LOG"]
|
python
| 1 | 0 | |
test/test_jit_cuda_fuser.py
|
import unittest
import os
import random
import torch
from torch.nn import functional
from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR # TEST_WITH_ROCM
from torch.testing._internal.common_cuda import TEST_MULTIGPU
from torch.testing._internal.codegen.random_topo_test import runDefaultTestWithSeed
from torch.testing import FileCheck
from test_jit import JitTestCase, RUN_CUDA
from jit.test_fuser_common import TestFuserCommon # noqa: F401
import itertools
import numpy as np
import math
from typing import List
CUDA_MAJOR, CUDA_MINOR = (int(x) for x in torch.version.cuda.split('.'))
os.environ['PYTORCH_NVFUSER_DISABLE_FALLBACK'] = '1'
os.environ['PYTORCH_NVFUSER_DISABLE_FMA'] = '1'
os.environ['PYTORCH_NVFUSER_DISABLE_FASTMATH'] = '1'
os.environ['PYTORCH_NVFUSER_JIT_OPT_LEVEL'] = '0'
os.environ['PYTORCH_NVFUSER_DISABLE_RNG_UNROLL'] = '1'
if GRAPH_EXECUTOR == ProfilingMode.PROFILING:
torch._C._jit_set_texpr_fuser_enabled(False)
torch._C._jit_set_profiling_executor(True)
torch._C._jit_set_profiling_mode(True)
FUSION_GROUP = 'prim::CudaFusionGroup'
FUSION_GUARD = 'prim::CudaFusionGuard'
def is_pre_volta():
prop = torch.cuda.get_device_properties(torch.cuda.current_device())
return prop.major < 7
class TestCudaFuser(JitTestCase):
special_values = torch.tensor(
[float("-inf"), -10, -math.pi,
-1, -0.5, 0, 1, 0.5,
math.pi, 10, float("inf"),
float("nan")], dtype=torch.float, device='cuda')
int_types = [
torch.int8,
torch.uint8,
torch.int16,
torch.int32,
torch.int64
]
support_tensor_dtypes = [
torch.int32,
torch.int64,
torch.float16,
torch.float32,
torch.float64,
torch.bool
]
def _getSubgraphInFusion(self, graph):
num_node = 0
subgraph = None
def count(block, ret):
for n in block.nodes():
if n.kind() == FUSION_GROUP:
ret[0] = ret[0] + 1
self.assertTrue(n.hasAttribute('Subgraph'))
ret[1] = n.g('Subgraph')
for block in n.blocks():
count(block, ret)
ret = [num_node, subgraph]
count(graph, ret)
self.assertEqual(ret[0], 1)
return ret[1]
def setUp(self):
super(TestCudaFuser, self).setUp()
self.old_cpu_fuse = torch._C._jit_can_fuse_on_cpu()
self.old_gpu_fuse = torch._C._jit_can_fuse_on_gpu()
torch._C._jit_override_can_fuse_on_cpu(False)
torch._C._jit_override_can_fuse_on_gpu(False)
self.old_guard = torch._C._jit_set_nvfuser_guard_mode(False)
torch._C._debug_set_autodiff_subgraph_inlining(False)
if(RUN_CUDA):
self.old_nvfuser = torch._C._jit_set_nvfuser_enabled(True)
def tearDown(self):
if(RUN_CUDA):
torch._C._jit_set_nvfuser_enabled(self.old_nvfuser)
torch._C._jit_override_can_fuse_on_cpu(self.old_cpu_fuse)
torch._C._jit_override_can_fuse_on_gpu(self.old_gpu_fuse)
torch._C._jit_set_nvfuser_guard_mode(self.old_guard)
torch._C._debug_set_autodiff_subgraph_inlining(True)
super(TestCudaFuser, self).tearDown()
def _run_helper(self, jit_op, op, *args):
torch.cuda.manual_seed_all(123)
jit_o = jit_op(*args)
torch.cuda.manual_seed_all(123)
jit_o = jit_op(*args)
torch.cuda.manual_seed_all(123)
o = op(*args)
self.assertEqual(o, jit_o)
self.assertGraphContainsExactly(jit_op.graph_for(*args), FUSION_GUARD, 1, consider_subgraphs=True)
def _run_training_helper(self, jit_op, op, grads, *args):
torch.cuda.manual_seed_all(123)
jit_o = jit_op(*args)
jit_g = jit_o.backward(grads)
torch.cuda.manual_seed_all(123)
jit_o = jit_op(*args)
jit_g = jit_o.backward(grads)
torch.cuda.manual_seed_all(123)
jit_o = jit_op(*args)
jit_g = jit_o.backward(grads)
torch.cuda.manual_seed_all(123)
o = op(*args)
g = o.backward(grads)
self.assertEqual(o, jit_o)
self.assertEqual(g, jit_g)
self.assertGraphContainsExactly(jit_op.graph_for(*args), FUSION_GUARD, 1, consider_subgraphs=True)
bwd_graph = list(
list(jit_op.get_debug_state().execution_plans.values())[
0].code.grad_executor_states()[0].execution_plans.values()
)[0].graph
self.assertGraphContainsExactly(bwd_graph, FUSION_GUARD, 1, consider_subgraphs=True)
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_half(self):
def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor, alpha: float):
o_16 = torch.add(x, y)
o_32_a = torch.add(y, z, alpha=alpha)
o_32_b = torch.add(o_16, z)
return (o_16, o_32_a, o_32_b)
t_jit = torch.jit.script(t)
alpha = 0.5
# stick to integers, this avoid the numerical difference due to our
# promotion
x = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda")
y = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda")
z = torch.randint(0, 256, (4, 8)).to(dtype=torch.float16, device="cuda")
jit_o = t_jit(x, y, z, alpha)
jit_o = t_jit(x, y, z, alpha)
o = t(x, y, z, alpha)
for oo, jit_oo in zip(o, jit_o):
self.assertEqual(oo.dtype, jit_oo.dtype)
self.assertEqual(oo, jit_oo)
self.assertGraphContains(t_jit.graph_for(x, y, z, alpha), FUSION_GUARD)
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_const(self):
def t(x, y):
o = x + y
o = o + 2.0
return o
t_jit = torch.jit.script(t)
x = torch.randn(4, 8, dtype=torch.float, device="cuda")
y = torch.randn(4, 8, dtype=torch.float, device="cuda")
jit_o = t_jit(x, y)
jit_o = t_jit(x, y)
o = t(x, y)
self.assertEqual(o, jit_o)
self.assertGraphContains(t_jit.graph_for(x, y), FUSION_GUARD)
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_chunk(self):
def t(x, y, z, q):
o = x + q
x0, x1 = torch.chunk(o, 2)
o = x0 + x1
o = o + y
o = o * z
o = torch.relu(o)
return o
t_jit = torch.jit.script(t)
x = torch.randn(4, 8, dtype=torch.float, device="cuda")
y = torch.randn(2, 8, dtype=torch.float, device="cuda")
z = torch.randn(2, 8, dtype=torch.float, device="cuda")
q = torch.randn(4, 8, dtype=torch.float, device="cuda")
jit_o = t_jit(x, y, z, q)
jit_o = t_jit(x, y, z, q)
o = t(x, y, z, q)
self.assertEqual(o, jit_o)
self.assertGraphContains(t_jit.graph_for(x, y, z, q), FUSION_GUARD)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_reduction_dtypes(self):
for op in [torch.sum, torch.mean]:
for dtype in [torch.float16, torch.float32, torch.double]:
def make_func(op):
def func(x: torch.Tensor):
o = torch.mul(x, 1.0)
o = op(o, dim=[2])
return o
return func
x = torch.randn(8, 4, 16, dtype=dtype, device="cuda")
t = make_func(op)
t_jit = torch.jit.trace(t, x)
jit_o = t_jit(x)
jit_o = t_jit(x)
o = t(x)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertTrue(self._compare("comparing output failed", o, jit_o, 1e-4))
self.assertGraphContains(t_jit.graph_for(x), FUSION_GUARD)
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_scalar_input(self):
def t(x: torch.Tensor, y: torch.Tensor, z: float):
o = x + y
o = o + z
return o
t_jit = torch.jit.script(t)
x = torch.randn(4, 8, 32, 32, dtype=torch.float, device="cuda")
y = torch.randn(4, 8, 1, 32, dtype=torch.float, device="cuda")
y = y.expand(4, 8, 32, 32)
jit_o = t_jit(x, y, 2.0)
jit_o = t_jit(x, y, 2.0)
o = t(x, y, 2.0)
self.assertEqual(o, jit_o)
self.assertGraphContains(t_jit.graph_for(x, y, 2.0), FUSION_GUARD)
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_broadcasting_0(self):
def t(x: torch.Tensor, y: torch.Tensor, z: float):
o = x + y
o = o + z
return o
t_jit = torch.jit.script(t)
x = torch.randn(4, 8, 32, 32, dtype=torch.float, device="cuda")
y = torch.randn(32, 32, dtype=torch.float, device="cuda")
jit_o = t_jit(x, y, 2.0)
jit_o = t_jit(x, y, 2.0)
o = t(x, y, 2.0)
self.assertEqual(o, jit_o)
subgraph = self._getSubgraphInFusion(t_jit.graph_for(x, y, 2.0))
self.assertGraphContainsExactly(subgraph, 'aten::add', 2, consider_subgraphs=False)
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_broadcasting_1(self):
def t(x: torch.Tensor, y: torch.Tensor, z: float):
o = x + y
o = o + z
return o
t_jit = torch.jit.script(t)
x = torch.randn(4, 8, 32, 32, dtype=torch.float, device="cuda")
y = torch.randn(1, 32, 32, dtype=torch.float, device="cuda")
jit_o = t_jit(x, y, 2.0)
jit_o = t_jit(x, y, 2.0)
o = t(x, y, 2.0)
self.assertEqual(o, jit_o)
subgraph = self._getSubgraphInFusion(t_jit.graph_for(x, y, 2.0))
self.assertGraphContainsExactly(subgraph, 'aten::add', 2, consider_subgraphs=False)
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_broadcasting_2(self):
def t(x: torch.Tensor, y: torch.Tensor, z: float):
o = x + y
o = o + z
return o
t_jit = torch.jit.script(t)
x = torch.randn(4, 1, 32, 32, dtype=torch.float, device="cuda")
y = torch.randn(8, 32, 32, dtype=torch.float, device="cuda")
jit_o = t_jit(x, y, 2.0)
jit_o = t_jit(x, y, 2.0)
o = t(x, y, 2.0)
self.assertEqual(o, jit_o)
subgraph = self._getSubgraphInFusion(t_jit.graph_for(x, y, 2.0))
self.assertGraphContainsExactly(subgraph, 'aten::add', 2, consider_subgraphs=False)
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_broadcasting_3(self):
def t(x: torch.Tensor, y: torch.Tensor, z: float):
o = x + y
o = o + z
return o
t_jit = torch.jit.script(t)
x = torch.randn(8, 17, 8, dtype=torch.float, device="cuda")
y = torch.randn(8, 17, 1, dtype=torch.float, device="cuda")
jit_o = t_jit(x, y, 2.0)
jit_o = t_jit(x, y, 2.0)
o = t(x, y, 2.0)
self.assertEqual(o, jit_o)
subgraph = self._getSubgraphInFusion(t_jit.graph_for(x, y, 2.0))
self.assertGraphContainsExactly(subgraph, 'aten::add', 2, consider_subgraphs=False)
# test_broadcasting_partition_logic_X
# Testing partition logic that is capable to avoid creating unsupported
# broadcasting semantics in CudaFusionGroup
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_broadcasting_partition_logic_0(self):
def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor):
x = x + 12.0
o1 = x + y
o2 = x + z
o = o1 + o2
return o
t_jit = torch.jit.script(t)
x = torch.randn(4, 8, 6, 8, dtype=torch.float32, device="cuda")
y = torch.randn(8, 6, 8, dtype=torch.float32, device="cuda")
z = torch.randn(6, 8, dtype=torch.float32, device="cuda")
jit_o = t_jit(x, y, z)
jit_o = t_jit(x, y, z)
o = t(x, y, z)
self.assertEqual(o, jit_o)
subgraph = self._getSubgraphInFusion(t_jit.graph_for(x, y, z))
self.assertGraphContainsExactly(subgraph, 'aten::add', 4, consider_subgraphs=False)
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_broadcasting_partition_logic_1(self):
def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor):
x = x + 12.0
o1 = x + y
o2 = x + z
o = o1 + o2
return o
t_jit = torch.jit.script(t)
x = torch.randn(8, 6, 8, dtype=torch.float32, device="cuda")
y = torch.randn(4, 8, 6, 8, dtype=torch.float32, device="cuda")
z = torch.randn(4, 1, 6, 8, dtype=torch.float32, device="cuda")
jit_o = t_jit(x, y, z)
jit_o = t_jit(x, y, z)
o = t(x, y, z)
self.assertEqual(o, jit_o)
subgraph = self._getSubgraphInFusion(t_jit.graph_for(x, y, z))
self.assertGraphContainsExactly(subgraph, 'aten::add', 4, consider_subgraphs=False)
@unittest.skipIf(True, "Broadcast with different output not supported yet")
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_broadcasting_multiple_output_shape(self):
def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor):
o = x + 12
o1 = o + y
o2 = o + z
oo = o1.sum() + o2.sum()
return oo
t_jit = torch.jit.script(t)
x = torch.randn(32, 32, dtype=torch.float, device="cuda")
y = torch.randn(2, 32, 32, dtype=torch.float, device="cuda")
z = torch.randn(4, 32, 32, dtype=torch.float, device="cuda")
jit_o = t_jit(x, y, z)
jit_o = t_jit(x, y, z)
o = t(x, y, z)
self.assertEqual(o, jit_o)
# Currently cannot fuse this
self.assertGraphContains(t_jit.graph_for(x, y, z), FUSION_GUARD)
@unittest.skipIf(True, "broadcast on branches can't be resolved yet")
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_broadcasting_multiple_output(self):
def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor):
o = x + 12
o1 = o + y
o2 = o + z
oo = o1.sum() + o2.sum()
return oo
t_jit = torch.jit.script(t)
x = torch.randn(32, 32, dtype=torch.float, device="cuda")
y = torch.randn(4, 32, 32, dtype=torch.float, device="cuda")
z = torch.randn(4, 32, 32, dtype=torch.float, device="cuda")
jit_o = t_jit(x, y, z)
jit_o = t_jit(x, y, z)
o = t(x, y, z)
self.assertEqual(o, jit_o)
# Currently cannot fuse this
self.assertGraphContains(t_jit.graph_for(x, y, z), FUSION_GUARD)
def _unary_test_helper(self, operation):
def t(x: torch.Tensor, z: float):
o = x + z
o = operation(o)
return o
t_jit = torch.jit.script(t)
x = torch.randn(4, 8, 32, 32, dtype=torch.float, device="cuda")
jit_o = t_jit(x, 2.0)
jit_o = t_jit(x, 2.0)
o = t(x, 2.0)
self.assertEqual(o, jit_o)
self.assertGraphContains(t_jit.graph_for(x, 2.0), FUSION_GUARD)
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_unary_ops(self):
operations = [torch.neg,
torch.abs,
torch.log,
torch.log10,
torch.log1p,
torch.log2,
torch.lgamma,
torch.exp,
torch.expm1,
torch.erf,
torch.erfc,
torch.cos,
torch.acos,
torch.cosh,
torch.sin,
torch.asin,
torch.tan,
torch.atan,
torch.sqrt,
torch.rsqrt,
torch.ceil,
torch.floor,
torch.round,
torch.trunc,
torch.frac,
torch.reciprocal,
torch.relu,
torch.sigmoid,
torch.tanh,
torch.nn.functional.silu]
for op in operations:
self._unary_test_helper(op)
def _unary_type_test_helper(self, operation, dtype, random_data=True):
shape = (4, 8, 32, 32)
# need additional def of t for boolean ops
def t(x: torch.Tensor, y: torch.Tensor):
o = x * y
o = operation(o)
return o
y = torch.tensor([1], device="cuda").to(dtype)
if random_data:
x = torch.randn(shape, dtype=torch.float32, device="cuda")
if dtype in self.int_types:
# prefer a larger variance for integer types
x *= 5
x = x.to(dtype=dtype)
else:
x = self.special_values.to(dtype=dtype)
try:
ref = t(x, y)
except Exception:
# same way as TE checker, if eager mode throws, ignore this test
return
t_jit = torch.jit.script(t)
jit_o = t_jit(x, y)
jit_o = t_jit(x, y)
if dtype in self.support_tensor_dtypes:
self.assertGraphContains(t_jit.graph_for(x, y), FUSION_GUARD)
o = t(x, y)
self.assertEqual(o, jit_o, msg=f"""
failing case:
{dtype} {operation} {x}
""")
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_data_compatibility(self):
dtypes = [
*self.int_types,
torch.float16,
torch.float32,
torch.float64
]
operations = [torch.neg,
torch.abs,
torch.log,
torch.log10,
torch.log1p,
torch.log2,
torch.lgamma,
torch.exp,
torch.expm1,
torch.erf,
torch.erfc,
torch.cos,
torch.acos,
torch.cosh,
torch.sin,
torch.asin,
torch.tan,
torch.atan,
torch.sqrt,
torch.rsqrt,
torch.ceil,
torch.floor,
torch.round,
torch.trunc,
torch.frac,
torch.reciprocal,
torch.relu,
torch.sigmoid,
torch.tanh,
torch.nn.functional.silu]
prev_fallback = os.environ['PYTORCH_NVFUSER_DISABLE_FALLBACK']
os.environ['PYTORCH_NVFUSER_DISABLE_FALLBACK'] = '0'
for op, dtype in itertools.product(operations, dtypes):
self._unary_type_test_helper(op, dtype, False) # test special numbers
self._unary_type_test_helper(op, dtype) # test random data
os.environ['PYTORCH_NVFUSER_DISABLE_FALLBACK'] = prev_fallback
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_category_rule(self):
def run_tensor(x, z):
def t(x: torch.Tensor, z: torch.Tensor):
o = x + z
o = torch.abs(o)
return o
t_jit = torch.jit.script(t)
jit_o = t_jit(x, z)
jit_o = t_jit(x, z)
o = t(x, z)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(o, jit_o)
self.assertGraphContains(t_jit.graph_for(x, z), FUSION_GUARD)
def run_scalar(x, z):
def t(x: torch.Tensor, z: float):
o = x + z
o = torch.abs(o)
return o
t_jit = torch.jit.script(t)
jit_o = t_jit(x, z)
jit_o = t_jit(x, z)
o = t(x, z)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(o, jit_o)
self.assertGraphContains(t_jit.graph_for(x, z), FUSION_GUARD)
# n-dim with 0-dim (no type-promote)
x = torch.randn(4, 8, 32, 32, dtype=torch.float, device="cuda")
z = torch.tensor(2.0, dtype=torch.double, device="cuda")
run_tensor(x, z)
# n-dim with 0-dim (type-promote)
x = torch.randn(4, 8, 32, 32, device="cuda").to(dtype=torch.long)
z = torch.tensor(2.0, dtype=torch.double, device="cuda")
run_tensor(x, z)
# n-dim with n-dim (type-promote)
x = torch.randn(4, 8, 32, 32, dtype=torch.float, device="cuda")
z = torch.randn(4, 8, 32, 32, dtype=torch.double, device="cuda")
run_tensor(x, z)
# n-dim with scalar (no type-promote)
x = torch.randn(4, 8, 32, 32, dtype=torch.float16, device="cuda")
z = torch.tensor(3., dtype=torch.double)
run_scalar(x, z)
# n-dim with scalar (type-promote)
x = torch.randn(4, 8, 32, 32, device="cuda").to(dtype=torch.long)
z = torch.tensor(3., dtype=torch.double)
run_scalar(x, z)
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_unary_bitwise(self):
def bit_not(x: torch.Tensor):
return ~(x + 0)
jitted = torch.jit.script(bit_not)
x = torch.randn(4, 8, 32, 32, dtype=torch.float, device="cuda").mul(5).to(torch.long)
jit_o = bit_not(x)
jit_o = bit_not(x)
o = bit_not(x)
self.assertEqual(o, jit_o)
jitted.graph_for(x) # Shows up in second instance, not first
self.assertGraphContains(jitted.graph_for(x), FUSION_GUARD)
def bool_not(x: torch.Tensor, y: torch.Tensor):
return ~(x & y)
jitted = torch.jit.script(bool_not)
x = torch.rand(4, 8, 32, 32, dtype=torch.float, device="cuda").round().to(torch.bool)
y = torch.rand(4, 8, 32, 32, dtype=torch.float, device="cuda").round().to(torch.bool)
jit_o = bool_not(x, y)
jit_o = bool_not(x, y)
o = bool_not(x, y)
self.assertEqual(o, jit_o)
jitted.graph_for(x, y) # Shows up in second instance, not first
self.assertGraphContains(jitted.graph_for(x, y), FUSION_GUARD)
def _binary_test_helper(self, operation, dtype):
def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor):
o = x + z
o = operation(o, y)
return o
x = (torch.randn(4, 32, 32, dtype=torch.float, device="cuda") * 5).to(dtype)
y = (torch.randn(4, 32, 32, dtype=torch.float, device="cuda") * 5).to(dtype)
# Avoid division by zero for integer tensors
div_like = [torch.div, torch.fmod, torch.remainder]
if operation in div_like and (dtype == torch.int32 or dtype == torch.int64):
y[y == 0] = 1
z = torch.tensor([2], device="cuda").to(dtype)
o = t(x, y, z)
t_jit = torch.jit.script(t)
jit_o = t_jit(x, y, z)
jit_o = t_jit(x, y, z)
self.assertEqual(o, jit_o)
self.assertGraphContains(t_jit.graph_for(x, y, z), FUSION_GUARD)
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_binary_ops(self):
data_types = [
torch.float32,
torch.float64,
torch.int32,
torch.int64
]
# need some extra support
# to handle below with integer inputs, and they
# don't look like popular integer ops in models
# , TODO: insert assertions in cpp
# if decide not to fuse these on int
skip_for_integer = [
torch.atan2,
torch.fmod,
torch.pow,
torch.div
]
operations = [torch.div,
torch.mul,
torch.atan2,
torch.max,
torch.min,
torch.pow,
torch.remainder,
torch.fmod,
torch.eq,
torch.ne,
torch.ge,
torch.gt,
torch.le,
torch.lt]
for op, dtype in itertools.product(operations, data_types):
if (dtype not in self.int_types) or (op not in skip_for_integer):
self._binary_test_helper(op, dtype)
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_binary_bitwise(self):
def jit_or(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor):
return (x & y) | z
def jit_xor(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor):
return (x & y) ^ z
def jit_lshift(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor):
return (x & y) << z
def jit_rshift(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor):
return (x & y) >> z
for jit_func in [jit_or, jit_xor, jit_lshift, jit_rshift]:
x = torch.randn(4, 8, 32, 32, dtype=torch.float, device="cuda").mul(5).to(torch.long)
y = torch.randn(4, 8, 32, 32, dtype=torch.float, device="cuda").mul(5).to(torch.long)
z = torch.randn(4, 8, 32, 32, dtype=torch.float, device="cuda").mul(2).to(torch.long)
jitted = torch.jit.script(jit_func)
jit_o = jitted(x, y, z)
jit_o = jitted(x, y, z)
o = jit_func(x, y, z)
self.assertEqual(o, jit_o)
self.assertGraphContains(jitted.graph_for(x, y, z), FUSION_GUARD)
# We shouldn't need this redefinition of the function, but otherwise it won't recompile for a new type
def jit_or(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor):
return (x & y) | z
def jit_xor(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor):
return (x & y) ^ z
for jit_func in [jit_or, jit_xor]:
x = torch.rand(4, 2, dtype=torch.float, device="cuda").round().to(torch.bool)
y = torch.rand(4, 2, dtype=torch.float, device="cuda").round().to(torch.bool)
z = torch.rand(4, 2, dtype=torch.float, device="cuda").round().to(torch.bool)
jitted = torch.jit.script(jit_func)
jit_o = jitted(x, y, z)
jit_o = jitted(x, y, z)
o = jit_func(x, y, z)
self.assertEqual(o, jit_o)
self.assertGraphContains(jitted.graph_for(x, y, z), FUSION_GUARD)
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_type_as_op(self):
def t(x: torch.Tensor, y: torch.Tensor, z: float):
o = torch.lt(x, z)
o = o.type_as(y)
return o
t_jit = torch.jit.script(t)
x = torch.randn(4, 8, 32, 32, dtype=torch.float, device="cuda")
y = torch.randn(4, 8, 32, 32, dtype=torch.float, device="cuda")
jit_o = t_jit(x, y, 0.5)
jit_o = t_jit(x, y, 0.5)
o = t(x, y, 0.5)
self.assertEqual(o, jit_o)
self.assertGraphContains(t_jit.graph_for(x, y, 0.5), FUSION_GUARD)
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
# legacy fuser does not work for rand_like, see issue #34361
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING, "Requires fusion optimization pass to be effective")
def test_ternary_ops(self):
x = torch.randn(4, 8, 32, 32, dtype=torch.float, device="cuda")
y = torch.randn(4, 8, 32, 32, dtype=torch.float, device="cuda")
z = torch.randn(4, 8, 32, 32, dtype=torch.float, device="cuda")
cond = torch.randint(0, 2, (4, 8, 32, 32)).to(dtype=torch.bool, device="cuda")
def add(x: torch.Tensor, other: torch.Tensor, alpha: float):
o = torch.relu(x)
o = torch.add(o, other=other, alpha=alpha)
return o
add_jit = torch.jit.script(add)
self._run_helper(add_jit, add, x, y, 2.0)
def clamp0(x: torch.Tensor, f: float):
o = torch.rand_like(x)
o = o * torch.clamp(x, min=f)
return o
clamp0_jit = torch.jit.script(clamp0)
self._run_helper(clamp0_jit, clamp0, x, 0.5)
def clamp1(x: torch.Tensor, f: float, ff: float):
o = torch.rand_like(x)
o = o * torch.clamp(x, min=f, max=ff)
return o
clamp1_jit = torch.jit.script(clamp1)
self._run_helper(clamp1_jit, clamp1, x, -0.2, 0.7)
def threshold(x: torch.Tensor, th: float, val: float):
o = torch.rand_like(x)
o = x * torch.threshold(o, th, val)
return o
threshold_jit = torch.jit.script(threshold)
self._run_helper(threshold_jit, threshold, x, 0.2, 0.9)
def where(x: torch.Tensor, y: torch.Tensor, cond: torch.Tensor):
o = torch.rand_like(x)
o = o * torch.where(cond, x, y)
return o
where_jit = torch.jit.script(where)
self._run_helper(where_jit, where, x, y, cond)
def lerp(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor):
o = torch.rand_like(x)
o = o * torch.lerp(x, y, z)
return o
lerp_jit = torch.jit.script(lerp)
self._run_helper(lerp_jit, lerp, x, y, z)
def lerp_scale(x: torch.Tensor, y: torch.Tensor, z: float):
o = torch.rand_like(x)
o = o * torch.lerp(x, y, z)
return o
lerp_scale_jit = torch.jit.script(lerp_scale)
self._run_helper(lerp_scale_jit, lerp_scale, x, y, 0.5)
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING, "Requires profiling node to run cuda fuser")
def test_addcmul_ops(self):
x = torch.randn(4, 8, 32, 32, dtype=torch.float, device="cuda")
y = torch.randn(4, 8, 32, 32, dtype=torch.float, device="cuda")
z = torch.randn(4, 8, 32, 32, dtype=torch.float, device="cuda")
def addcmul(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor, value: float):
o = torch.add(x, 0.5)
o = torch.addcmul(o, y, z, value=value)
return o
addcmul_jit = torch.jit.script(addcmul)
self._run_helper(addcmul_jit, addcmul, x, y, z, 2.0)
def addcmul_no_alpha(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor):
o = torch.add(x, 0.5)
o = torch.addcmul(o, y, z)
return o
addcmul_no_alpha_jit = torch.jit.script(addcmul_no_alpha)
self._run_helper(addcmul_no_alpha_jit, addcmul_no_alpha, x, y, z)
def addcmul_const_alpha(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor):
o = torch.add(x, 0.5)
o = torch.addcmul(o, y, z, value=0.75)
return o
addcmul_const_alpha_jit = torch.jit.script(addcmul_const_alpha)
self._run_helper(addcmul_const_alpha_jit, addcmul_const_alpha, x, y, z)
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_dynamic_size(self):
old_guard = torch._C._jit_set_nvfuser_guard_mode(True)
torch._C._jit_set_bailout_depth(20)
def t(x: torch.Tensor, y: torch.Tensor, z: float):
o = x + y
o = o + z
return o
t_jit = torch.jit.script(t)
x = torch.randn(4, 8, 32, 32, dtype=torch.float, device="cuda")
y = torch.randn(32, 32, dtype=torch.float, device="cuda")
jit_o = t_jit(x, y, 2.0)
jit_o = t_jit(x, y, 2.0)
o = t(x, y, 2.0)
self.assertEqual(o, jit_o)
subgraph = self._getSubgraphInFusion(t_jit.graph_for(x, y, 2.0))
self.assertGraphContainsExactly(subgraph, 'aten::add', 2, consider_subgraphs=False)
# this test is not ideal, as we rely on the bailout to test it and we
# don't know a way to verify the bailout graph to validate the proper
# fusion.
x = torch.randn(8, 32, 16, 8, dtype=torch.float, device="cuda")
y = torch.randn(16, 8, dtype=torch.float, device="cuda")
jit_o = t_jit(x, y, 2.0)
jit_o = t_jit(x, y, 2.0)
o = t(x, y, 2.0)
self.assertEqual(o, jit_o)
self.assertGraphContains(t_jit.graph_for(x, y, 2.0), FUSION_GUARD)
x = torch.randn(8, 17, 8, dtype=torch.float, device="cuda")
y = torch.randn(8, 17, 1, dtype=torch.float, device="cuda")
jit_o = t_jit(x, y, 2.0)
jit_o = t_jit(x, y, 2.0)
o = t(x, y, 2.0)
self.assertEqual(o, jit_o)
self.assertGraphContains(t_jit.graph_for(x, y, 2.0), FUSION_GUARD)
torch._C._jit_set_nvfuser_guard_mode(old_guard)
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
def test_random_topo(self):
os.environ["PYTORCH_NVFUSER_DISABLE_FALLBACK"] = "1"
self.assertTrue(runDefaultTestWithSeed(28449))
def _compare(self, desc, inp1, inp2, error):
a = inp1.clone().detach().cpu().numpy()
b = inp2.clone().detach().cpu().numpy()
close = np.allclose(a, b, error, error)
if not close:
print(desc, close)
z = a - b
index = (np.abs(z) >= error + error * np.abs(b)).nonzero()
print("dif : ", z[index])
print("inp1 : ", a[index])
print("inp2 : ", b[index])
return close
# Permutation helper that applies binary operation between two tensors:
# 1. applies separate permutation `perm0` & `perm1` to two inputs
# 2. reduce dimension `broadcast_axis` of operand two to size 1
# The purpose of this test is to ensure permutation works well in
# complicated cases with arbitrary stride order and broadcasting dimensions
def _permutation_helper(self, sizes, broadcast_axis, dtype, device, perm0, perm1):
def t(x: torch.Tensor, y: torch.Tensor):
o = torch.add(x, y)
o = torch.relu(o)
return o
x = torch.randn([sizes[i] for i in perm0], dtype=dtype, device=device).permute(
[perm0.index(i) for i in range(len(sizes))])
if broadcast_axis >= 0:
sizes[broadcast_axis] = 1
y = torch.randn([sizes[i] for i in perm1], dtype=dtype, device=device).permute(
[perm1.index(i) for i in range(len(sizes))])
t_jit = torch.jit.script(t)
jit_o = t_jit(x, y)
jit_o = t_jit(x, y)
o = t(x, y)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(o, jit_o)
self.assertGraphContains(t_jit.graph_for(x, y), FUSION_GUARD)
# end-2-end test of permutation & contiguity handling in integration.
# we are testing inputs with all combination of permutation order, just to
# ensure that integration would be able to generate functionally correct
# kernels
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_binary_ops_permutation(self):
# note that num_dim is exclusive from len(x), so we are not reducing
# to single element (codegen limitation at this moment)
x = [7, 8, 12]
b_axes = range(-1, len(x))
for b_axis in b_axes:
for perm0 in itertools.permutations(range(len(x))):
for perm1 in itertools.permutations(range(len(x))):
x = [7, 8, 12]
self._permutation_helper(x, b_axis, torch.float32, "cuda", perm0, perm1)
def _reduction_helper(self, sizes, reduction_axis, dtype, device, perm0, perm1, keepdim=False):
class MyReduction(torch.nn.Module):
__constants__ = ['reduction_axis', 'keepdim']
def __init__(self):
super(MyReduction, self).__init__()
self.reduction_axis = reduction_axis
self.keepdim = keepdim
def forward(self, x: torch.Tensor, y: torch.Tensor):
o = torch.add(x, y)
o = torch.sum(o, dim=self.reduction_axis, keepdim=self.keepdim)
return o
t = MyReduction()
x = torch.randn([sizes[i] for i in perm0], dtype=dtype, device=device).permute(
[perm0.index(i) for i in range(len(sizes))])
y = torch.randn([sizes[i] for i in perm1], dtype=dtype, device=device).permute(
[perm1.index(i) for i in range(len(sizes))])
t_jit = torch.jit.script(t)
jit_o = t_jit(x, y)
jit_o = t_jit(x, y)
o = t(x, y)
self.assertEqual(o.dtype, jit_o.dtype)
# numerical issues here due to our scheduling.
# can't use `self.assertEqual(o, jit_o)`
self.assertTrue(self._compare("comparing output failed", o, jit_o, 1e-4))
self.assertGraphContains(t_jit.graph_for(x, y), FUSION_GUARD)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_reduction(self):
for x in ([7, 8, 12], [12, 8, 7, 9, 15], [128, 16, 8, 32]):
# note that num_dim is exclusive from len(x), so we are not reducing
# to single element (codegen limitation at this moment)
for num_reduce_dim in range(1, len(x)):
for axes in itertools.combinations(range(len(x)), num_reduce_dim):
for keepdim in (True, False):
perm0 = range(len(x))
perm1 = range(len(x))
self._reduction_helper(x, axes, torch.float32, "cuda", perm0, perm1, keepdim)
def _layer_norm_autodiff_helper(self, model, grad, shapes, args):
jit_model = torch.jit.script(model)
eps = np.random.random() * 1e-4
use_cudnn = bool(np.random.randint(0, 2))
# profile/optimization runs
for i in range(3):
jit_o = jit_model(shapes, *args, eps, use_cudnn)
jit_o.backward(grad)
ref_args = [t.detach().clone().requires_grad_() for t in args]
[t.grad.zero_() for t in args]
jit_o = jit_model(shapes, *args, eps, use_cudnn)
jit_o.backward(grad)
o = model(shapes, *ref_args, eps, use_cudnn)
o.backward(grad)
self.assertEqual(jit_o, o)
for arg, ref_arg in zip(args, ref_args):
self.assertEqual(arg.grad, ref_arg.grad)
# check fusion in fw & bw
g = jit_model.graph_for(shapes, *args, eps, use_cudnn)
for node in g.nodes():
n = node
dbg_state = jit_model.get_debug_state()
for val in dbg_state.execution_plans.values():
v = val
state2 = v.code.grad_executor_states()
for val in state2[0].execution_plans.values():
v2 = val
FileCheck().check(FUSION_GUARD).run(g)
FileCheck().check(FUSION_GUARD).run(v2.graph)
@unittest.skipIf(True, "PRs pending")
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_layer_norm_autodiff(self):
def t_wb(shapes: List[int], x, w, b, eps: float, cudnn: bool):
o = torch.layer_norm(x, shapes, w, b, eps, cudnn)
o = torch.relu(o)
return o
def t_w(shapes: List[int], x, w, eps: float, cudnn: bool):
o = torch.layer_norm(x, shapes, w, None, eps, cudnn)
o = torch.relu(o)
return o
def t_b(shapes: List[int], x, b, eps: float, cudnn: bool):
o = torch.layer_norm(x, shapes, None, b, eps, cudnn)
o = torch.relu(o)
return o
def t(shapes: List[int], x, eps: float, cudnn: bool):
o = torch.layer_norm(x, shapes, None, None, eps, cudnn)
o = torch.relu(o)
return o
model = {3: t_wb, 2: t_w, 1: t_b, 0: t}
for w, b in itertools.product([True, False], repeat=2):
batch = [4]
shapes = [2, 3, 4]
m = model[w * 2 + b]
grad = torch.randn(batch + shapes, dtype=torch.float32, device="cuda")
args = [torch.randn(batch + shapes, dtype=torch.float32, device="cuda").requires_grad_()]
if w:
args.append(torch.randn(shapes, dtype=torch.float32, device="cuda").requires_grad_())
if b:
args.append(torch.randn(shapes, dtype=torch.float32, device="cuda").requires_grad_())
self._layer_norm_autodiff_helper(m, grad, shapes, args)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_layer_norm_parser(self):
dtype = torch.float32
device = "cuda"
x = torch.randn([4, 4, 2], dtype=dtype, device=device)
w = torch.randn([4, 2], dtype=dtype, device=device)
b = torch.randn([4, 2], dtype=dtype, device=device)
def t(x: torch.Tensor, w: torch.Tensor, b: torch.Tensor):
o = torch.relu(x)
o = torch.layer_norm(o, [4, 2], w, b, 1e-5)
return o
o = t(x, w, b)
t_jit = torch.jit.script(t)
jit_o = t_jit(x, w, b)
jit_o = t_jit(x, w, b)
o = t(x, w, b)
self.assertGraphContains(t_jit.graph_for(x, w, b), FUSION_GUARD)
def _native_layer_norm_helper(self, shape, norm_shape, dtype, device, error, affine=True):
class MyLayerNorm(torch.nn.Module):
__constants__ = ['norm_shape']
def __init__(self, elementwise_affine=True):
super(MyLayerNorm, self).__init__()
self.norm_shape = norm_shape
if elementwise_affine:
self.weight = torch.randn(norm_shape, dtype=dtype, device=device)
self.bias = torch.randn(norm_shape, dtype=dtype, device=device)
with torch.no_grad():
self.weight.fill_(1)
self.bias.fill_(0)
else:
self.weight = None
self.bias = None
def forward(self, x: torch.Tensor):
o = torch.relu(x)
o = torch.native_layer_norm(o, self.norm_shape, self.weight, self.bias, 1e-5)
return o
t = MyLayerNorm(affine)
x = torch.randn(shape, dtype=dtype, device=device)
t_jit = torch.jit.script(t)
jit_o, jit_mean, jit_rstd = t_jit(x)
jit_o, jit_mean, jit_rstd = t_jit(x)
o, mean, rstd = t(x)
self.assertEqual(o.dtype, jit_o.dtype)
# numerical issues here due to our scheduling.
# can't use `self.assertEqual(o, jit_o)`
self.assertTrue(self._compare("comparing output failed", o, jit_o, error))
self.assertTrue(self._compare("comparing mean failed", mean, jit_mean, error))
self.assertTrue(self._compare("comparing rstd failed", rstd, jit_rstd, error))
self.assertGraphContains(t_jit.graph_for(x), FUSION_GUARD)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_native_layer_norm(self):
dims = 4
rnds = 3
for idx in range(rnds):
for offset in range(1, dims):
for affine in (True, False):
input_shape = [random.randint(10, 30) for idx in range(dims)]
norm_shape = [input_shape[idx] for idx in range(dims - offset, dims)]
self._native_layer_norm_helper(input_shape, norm_shape, torch.float32, "cuda", 1e-4, affine)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_native_layer_norm_half(self):
dims = 4
rnds = 3
for idx in range(rnds):
for offset in range(1, dims):
input_shape = [random.randint(10, 30) for idx in range(dims)]
norm_shape = [input_shape[idx] for idx in range(dims - offset, dims)]
self._native_layer_norm_helper(input_shape, norm_shape, torch.float16, "cuda", 5e-3)
def _norm_helper(self, shape, dtype, device, error, is_batch_norm_else_instance_norm):
class MyBatchNorm(torch.nn.Module):
def __init__(self):
super(MyBatchNorm, self).__init__()
def forward(self, x: torch.Tensor, r_mean: torch.Tensor, r_var: torch.Tensor):
o = torch.nn.functional.batch_norm(x, r_mean, r_var, training=True)
o = torch.relu(o)
return o
class MyInstanceNorm(torch.nn.Module):
def __init__(self):
super(MyInstanceNorm, self).__init__()
def forward(self, x: torch.Tensor, r_mean: torch.Tensor, r_var: torch.Tensor):
o = torch.nn.functional.instance_norm(x, r_mean, r_var, use_input_stats=True)
o = torch.relu(o)
return o
t = MyBatchNorm() if is_batch_norm_else_instance_norm else MyInstanceNorm()
x = torch.randn(shape, dtype=dtype, device=device)
running_mean = torch.zeros(shape[1], dtype=torch.float32, device=device)
running_var = torch.ones(shape[1], dtype=torch.float32, device=device)
t_jit = torch.jit.script(t)
eager_running_mean = running_mean.clone()
eager_running_var = running_var.clone()
jit_running_mean = running_mean.clone()
jit_running_var = running_var.clone()
jit_o = t_jit(x, running_mean.clone(), running_var.clone())
self.assertTrue(self._compare("prerun comparing running_mean failed", eager_running_mean, jit_running_mean, error))
self.assertTrue(self._compare("prerun comparing running_var failed", eager_running_var, jit_running_var, error))
jit_o = t_jit(x, jit_running_mean, jit_running_var)
o = t(x, eager_running_mean, eager_running_var)
self.assertEqual(o.dtype, jit_o.dtype)
# numerical issues here due to our scheduling.
# can't use `self.assertEqual(o, jit_o)`
self.assertTrue(self._compare("comparing output failed", o, jit_o, error))
self.assertTrue(self._compare("comparing running_mean failed", eager_running_mean, jit_running_mean, error))
self.assertTrue(self._compare("comparing running_var failed", eager_running_var, jit_running_var, error))
self.assertGraphContains(t_jit.graph_for(x, running_mean, running_var), FUSION_GUARD)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_norm(self):
output_elements = 10000
channel_sizes = [67, 457, 1024, 4096]
with torch.backends.cudnn.flags(enabled=False):
for is_batch_norm_else_instance_norm in [False, True]:
for dims in range(3, 6):
output_size = int(pow(output_elements, 1. / (dims - 1)))
for C in channel_sizes:
x = [output_size for idx in range(dims)]
x[1] = C
self._norm_helper(x, torch.float32, "cuda", 1e-4, is_batch_norm_else_instance_norm)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_norm_large(self):
output_elements = 262144
channel_sizes = 67, 457, 1024
for is_batch_norm_else_instance_norm in [True, False]:
for dims in range(3, 6):
output_size = int(pow(output_elements, 1. / (dims - 1)))
for C in channel_sizes:
x = [output_size for idx in range(dims)]
x[1] = C
self._norm_helper(x, torch.float32, "cuda", 1e-4, is_batch_norm_else_instance_norm)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_norm_half(self):
output_elements = 10000
channel_sizes = [67, 457, 1024, 4096]
with torch.backends.cudnn.flags(enabled=False):
for is_batch_norm_else_instance_norm in [False, True]:
for dims in range(3, 6):
output_size = int(pow(output_elements, 1. / (dims - 1)))
for C in channel_sizes:
x = [output_size for idx in range(dims)]
x[1] = C
self._norm_helper(x, torch.float16, "cuda", 5e-3, is_batch_norm_else_instance_norm)
def _softmax_helper(self, shape, reduction_axis, dtype, device, error):
class MySoftmax(torch.nn.Module):
__constants__ = ['reduction_axis']
def __init__(self):
super(MySoftmax, self).__init__()
self.reduction_axis = reduction_axis
def forward(self, x: torch.Tensor, y: torch.Tensor):
o = torch.add(x, y)
o = torch.nn.functional.softmax(o, dim=self.reduction_axis)
return o
t = MySoftmax()
x = torch.randn(shape, dtype=dtype, device=device)
y = torch.randn(shape, dtype=dtype, device=device)
t_jit = torch.jit.script(t)
jit_o = t_jit(x, y)
jit_o = t_jit(x, y)
o = t(x, y)
self.assertEqual(o.dtype, jit_o.dtype)
# numerical issues here due to our scheduling.
# can't use `self.assertEqual(o, jit_o)`
self.assertTrue(self._compare("comparing output failed", o, jit_o, error))
self.assertGraphContains(t_jit.graph_for(x, y), FUSION_GUARD)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_softmax(self):
output_size = 10000
dims = 4
output_size = int(pow(output_size, 1. / dims))
reduction_sizes = [67, 256, 1024, 4096]
for reduction_dim in range(dims):
for reduction_size in reduction_sizes:
x = [output_size for idx in range(dims)]
x[reduction_dim] = reduction_size
self._softmax_helper(x, reduction_dim, torch.float32, "cuda", 1e-4)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_softmax_half(self):
output_size = 10000
dims = 4
output_size = int(pow(output_size, 1. / dims))
reduction_sizes = [67, 256, 1024, 4096]
for reduction_dim in range(dims):
for reduction_size in reduction_sizes:
x = [output_size for idx in range(dims)]
x[reduction_dim] = reduction_size
self._softmax_helper(x, reduction_dim, torch.float16, "cuda", 5e-3)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_reduction_permutation(self):
x = [7, 8, 12]
# note that num_dim is exclusive from len(x), so we are not reducing
# to single element (codegen limitation at this moment)
for num_reduce_dim in range(1, len(x)):
for axes in itertools.combinations(range(len(x)), num_reduce_dim):
for perm0 in itertools.permutations(range(len(x))):
for perm1 in itertools.permutations(range(len(x))):
self._reduction_helper(x, axes, torch.float32, "cuda", perm0, perm1)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_reduction_multiple_output(self):
old_guard = torch._C._jit_set_nvfuser_guard_mode(True)
torch._C._jit_set_bailout_depth(20)
def t(x: torch.Tensor, y: torch.Tensor, scale: float, z: torch.Tensor):
o = torch.mul(x, y)
o = torch.mul(o, scale)
out1 = torch.mul(o, z)
out2 = torch.sum(out1, dim=[2])
return out1, out2
t_jit = torch.jit.script(t)
x = torch.randn(8, 4, 10, 16, dtype=torch.float, device="cuda")
y = torch.randn(8, 4, 10, 16, dtype=torch.float, device="cuda")
z = torch.randn(8, 4, 10, 16, dtype=torch.float, device="cuda")
scale = 0.5
jit_o = t_jit(x, y, scale, z)
jit_o = t_jit(x, y, scale, z)
o = t(x, y, scale, z)
for oo, jit_oo in zip(o, jit_o):
self.assertEqual(oo.dtype, jit_oo.dtype)
self.assertEqual(oo, jit_oo)
self.assertGraphContains(t_jit.graph_for(x, y, scale, z), FUSION_GUARD)
x = x.to(memory_format=torch.channels_last)
y = y.to(memory_format=torch.channels_last)
z = z.to(memory_format=torch.channels_last)
jit_o = t_jit(x, y, scale, z)
jit_o = t_jit(x, y, scale, z)
o = t(x, y, scale, z)
for oo, jit_oo in zip(o, jit_o):
self.assertEqual(oo.dtype, jit_oo.dtype)
self.assertEqual(oo, jit_oo)
self.assertGraphContains(t_jit.graph_for(x, y, scale, z), FUSION_GUARD)
torch._C._jit_set_nvfuser_guard_mode(old_guard)
@unittest.skipIf(True, "PRs pending")
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_channels_last_with_broadcast(self):
# setting this true forces a new graph to be generated with a new
# input a different broadcast shape
torch._C._jit_set_nvfuser_guard_mode(True)
def t(x: torch.Tensor, y: torch.Tensor):
o = torch.mul(x, y)
o = o + 2.0
return o
t_jit = torch.jit.script(t)
# Single Channel broadcasts
# Test 1
x = torch.randn(8, 4, 10, 16, dtype=torch.float, device="cuda")
x = x.to(memory_format=torch.channels_last)
y = torch.randn(8, 4, 10, 1, dtype=torch.float, device="cuda")
y = y.to(memory_format=torch.channels_last)
jit_o = t_jit(x, y)
jit_o = t_jit(x, y)
o = t(x, y)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(o.is_contiguous(memory_format=torch.channels_last),
jit_o.is_contiguous(memory_format=torch.channels_last))
self.assertEqual(o, jit_o)
# Test 2
y = torch.randn(8, 4, 1, 16, dtype=torch.float, device="cuda")
y = y.to(memory_format=torch.channels_last)
jit_o = t_jit(x, y)
jit_o = t_jit(x, y)
o = t(x, y)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(o.is_contiguous(memory_format=torch.channels_last),
jit_o.is_contiguous(memory_format=torch.channels_last))
self.assertEqual(o, jit_o)
# Test 3
y = torch.randn(8, 1, 10, 16, dtype=torch.float, device="cuda")
y = y.to(memory_format=torch.channels_last)
jit_o = t_jit(x, y)
jit_o = t_jit(x, y)
o = t(x, y)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(o.is_contiguous(memory_format=torch.channels_last),
jit_o.is_contiguous(memory_format=torch.channels_last))
self.assertEqual(o, jit_o)
# Test 3
y = torch.randn(1, 4, 10, 16, dtype=torch.float, device="cuda")
y = y.to(memory_format=torch.channels_last)
jit_o = t_jit(x, y)
jit_o = t_jit(x, y)
o = t(x, y)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(o.is_contiguous(memory_format=torch.channels_last),
jit_o.is_contiguous(memory_format=torch.channels_last))
self.assertEqual(o, jit_o)
'''
Currently, the JIT doesn't have tensor merge logic to handle adding
a broadcast tensor with more than one broadcast into a non-broadcast
tensor. Therefore, either of these tests can fail depending on the
sort implementation. The second test is known to fail.
# Two Channel broadcasts
# Test 1
y = torch.randn(8, 4, 1, 1, dtype=torch.float, device="cuda")
y = y.to(memory_format=torch.channels_last)
jit_o = t_jit(x, y)
jit_o = t_jit(x, y)
o = t(x, y)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(o.is_contiguous(memory_format=torch.channels_last),
jit_o.is_contiguous(memory_format=torch.channels_last))
self.assertEqual(o, jit_o)
# Test 2
y = torch.randn(8, 4, 1, 1, dtype=torch.float, device="cuda")
y = y.to(memory_format=torch.channels_last).transpose(2,3)
x = x.transpose(2,3)
jit_o = t_jit(x, y)
jit_o = t_jit(x, y)
o = t(x, y)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(o.is_contiguous(memory_format=torch.channels_last),
jit_o.is_contiguous(memory_format=torch.channels_last))
self.assertEqual(o, jit_o)
'''
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_pw_single_reduction_partition(self):
sizes = [2, 2, 2]
dtype = torch.float
device = "cuda"
x = torch.randn(sizes, dtype=dtype, device=device)
y = torch.randn(sizes, dtype=dtype, device=device)
z = torch.randn(sizes, dtype=dtype, device=device)
def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor):
o = torch.add(x, y)
o = torch.sum(o, dim=[0])
o = torch.add(o, z)
return o
t_jit = torch.jit.script(t)
jit_o = t_jit(x, y, z)
jit_o = t_jit(x, y, z)
o = t(x, y, z)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(o, jit_o)
self.assertGraphContains(t_jit.graph_for(x, y, z), FUSION_GUARD)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_permutation_preservation(self):
sizes = [2, 2, 2, 2]
dtype = torch.float
device = "cuda"
x = torch.randn(sizes, dtype=dtype, device=device).to(memory_format=torch.channels_last)
def t(x: torch.Tensor):
o = torch.relu(x)
o = torch.sum(o, dim=[0])
return o
t_jit = torch.jit.script(t)
jit_o = t_jit(x)
jit_o = t_jit(x)
o = t(x)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(o, jit_o)
self.assertGraphContains(t_jit.graph_for(x), FUSION_GUARD)
# we should preserve permutation to inputs
self.assertEqual(jit_o.stride(), (1, 4, 2))
def t(x: torch.Tensor):
o = torch.relu(x)
o = torch.add(o, 1.0)
return o
t_jit = torch.jit.script(t)
jit_o = t_jit(x)
jit_o = t_jit(x)
o = t(x)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(o, jit_o)
self.assertGraphContains(t_jit.graph_for(x), FUSION_GUARD)
self.assertTrue(jit_o.is_contiguous(memory_format=torch.channels_last))
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_normalization_partition(self):
sizes = [8, 8, 8]
dtype = torch.float
device = "cuda"
x = torch.randn(sizes, dtype=dtype, device=device)
y = torch.randn(sizes, dtype=dtype, device=device)
z = torch.randn(sizes, dtype=dtype, device=device)
r_m = torch.randn(8, dtype=dtype, device=device)
r_v = torch.randn(8, dtype=dtype, device=device)
def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor, r_mean: torch.Tensor, r_var: torch.Tensor):
o = torch.add(x, y)
o = torch.nn.functional.softmax(o, dim=0)
o = torch.add(o, z)
o = torch.nn.functional.batch_norm(o, r_mean, r_var, training=True)
return o
t_jit = torch.jit.script(t)
jit_o = t_jit(x, y, z, r_m, r_v)
jit_o = t_jit(x, y, z, r_m, r_v)
o = t(x, y, z, r_m, r_v)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(o, jit_o)
self.assertGraphContains(t_jit.graph_for(x, y, z, r_m, r_v), FUSION_GUARD)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_sum_to_one(self):
dtype = torch.float
device = "cuda"
x = torch.randn([4, 5, 6], dtype=dtype, device=device)
def t(x: torch.Tensor):
o = torch.add(x, 0)
o = torch.sum(o, dim=[0, 1, 2])
return o
t_jit = torch.jit.script(t)
jit_o = t_jit(x)
jit_o = t_jit(x)
o = t(x)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(o, jit_o)
self.assertGraphContains(t_jit.graph_for(x), FUSION_GUARD)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_single_reduction_broadcast(self):
dtype = torch.float
device = "cuda"
x = torch.randn([7, 4, 8], dtype=dtype, device=device)
y = torch.randn([4, 8], dtype=dtype, device=device)
z = torch.randn([1, 4, 8], dtype=dtype, device=device)
def t(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor):
o = torch.add(x, y)
o = torch.add(o, z)
o = torch.sum(o, dim=[0])
return o
t_jit = torch.jit.script(t)
jit_o = t_jit(x, y, z)
jit_o = t_jit(x, y, z)
o = t(x, y, z)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(o, jit_o)
self.assertGraphContains(t_jit.graph_for(x, y, z), FUSION_GUARD)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_trivial_reduction(self):
dtype = torch.float
device = "cuda"
x = torch.randn([1, 4, 8], dtype=dtype, device=device)
def t(x: torch.Tensor):
o = torch.add(x, 0)
o = torch.sum(o, dim=[0])
o = torch.sum(o, dim=[0])
return o
t_jit = torch.jit.script(t)
jit_o = t_jit(x)
jit_o = t_jit(x)
o = t(x)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(o, jit_o)
self.assertGraphContains(t_jit.graph_for(x), FUSION_GUARD)
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_profiling_node(self):
dtype = torch.float
device = "cuda"
x = torch.randn(4, 8, 8, 8, dtype=dtype, device=device)
def repro(x: torch.Tensor, alpha: float):
o = torch.rand_like(x)
o = torch.add(o, alpha)
return o
repro_jit = torch.jit.script(repro)
self._run_helper(repro_jit, repro, x, 0.6)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_reduction_sizes_op(self):
dtype = torch.float
device = "cuda"
x = torch.randn(2, 3, 4, 5, dtype=dtype, device=device)
y = torch.randn(2, 3, 4, 5, dtype=dtype, device=device)
def t(x: torch.Tensor, y: torch.Tensor):
o = x + y
o = torch.relu(o)
o = o.sum((1, 3))
return o.size()
t_jit = torch.jit.script(t)
jit_o = t_jit(x, y)
jit_o = t_jit(x, y)
o = t(x, y)
self.assertEqual(o, jit_o)
# since the output value is not used at all, the fusion operator should
# have been optimized away
self.assertGraphContainsExactly(t_jit.graph_for(x, y), FUSION_GUARD, 0)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_profile_ivalue(self):
dtype = torch.float
device = "cuda"
x = torch.randn([7, 4, 7], dtype=dtype, device=device)
y = torch.randn([7, 4, 7], dtype=dtype, device=device)
def t(x: torch.Tensor, y: torch.Tensor, dim: List[int], keepdim: bool):
o = torch.add(x, y)
o = o.sum(dim, keepdim=keepdim)
return o
t_jit = torch.jit.script(t)
jit_o = t_jit(x, y, (0, 1), False)
jit_o = t_jit(x, y, (0, 1), False)
o = t(x, y, (0, 1), False)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(o, jit_o)
self.assertGraphContains(t_jit.graph_for(x, y, (0, 1), False), FUSION_GUARD)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_sum_to_size(self):
dtype = torch.float
device = "cuda"
x = torch.randn([2, 4, 4], dtype=dtype, device=device)
y = torch.randn([2, 4, 4], dtype=dtype, device=device)
def t(x: torch.Tensor, y: torch.Tensor, new_size: List[int]):
o = torch.add(x, y)
o = o.sum_to_size(new_size)
return o
t_jit = torch.jit.script(t)
jit_o = t_jit(x, y, (4, 1))
jit_o = t_jit(x, y, (4, 1))
o = t(x, y, (4, 1))
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(o, jit_o)
self.assertGraphContains(t_jit.graph_for(x, y, (4, 1)), FUSION_GUARD)
# update shape: old kernel should handle dynamic shape well without
# recompilation
x = torch.randn([2, 5, 8], dtype=dtype, device=device)
y = torch.randn([2, 5, 8], dtype=dtype, device=device)
# (TODO) check executed kernel, should extend autograd.profiler to fused
# kernels
jit_o = t_jit(x, y, (5, 1))
o = t(x, y, (5, 1))
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(o, jit_o)
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_grad_sum_to_size(self):
dtype = torch.float
device = "cuda"
x = torch.randn([2, 4, 4], dtype=dtype, device=device).requires_grad_()
y = torch.randn([4], dtype=dtype, device=device).requires_grad_()
grad = torch.randn([2, 4, 4], dtype=dtype, device=device)
ref_x = x.detach().clone().requires_grad_()
ref_y = y.detach().clone().requires_grad_()
def t(x: torch.Tensor, y: torch.Tensor):
o = torch.add(x, y)
o = torch.relu(o)
return o
# profiling runs for forward & backward
t_jit = torch.jit.script(t)
jit_o = t_jit(x, y)
jit_o.backward(grad)
jit_o = t_jit(x, y)
jit_o.backward(grad)
x.grad = None
y.grad = None
jit_o = t_jit(x, y)
jit_o.backward(grad)
o = t(ref_x, ref_y)
o.backward(grad)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(o, jit_o)
self.assertEqual(x.grad, ref_x.grad)
self.assertEqual(y.grad, ref_y.grad)
bwd_graph = list(
list(t_jit.get_debug_state().execution_plans.values())[
0].code.grad_executor_states()[0].execution_plans.values()
)[0].graph
FileCheck().check(FUSION_GUARD).run(bwd_graph)
# update shape: old kernel should handle dynamic shape well without
# recompilation
x = torch.randn([2, 5, 8], dtype=dtype, device=device).requires_grad_()
y = torch.randn([8], dtype=dtype, device=device).requires_grad_()
ref_x = x.detach().clone().requires_grad_()
ref_y = y.detach().clone().requires_grad_()
grad = torch.randn([2, 5, 8], dtype=dtype, device=device)
jit_o = t_jit(x, y)
# (TODO) check executed kernel, should extend autograd.profiler to fused
# kernels
jit_o.backward(grad)
o = t(ref_x, ref_y)
o.backward(grad)
self.assertEqual(o.dtype, jit_o.dtype)
self.assertEqual(o, jit_o)
self.assertEqual(x.grad, ref_x.grad)
self.assertEqual(y.grad, ref_y.grad)
@unittest.skipIf(True, "PRs pending")
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_add_backward_with_alpha(self):
x = torch.randn(4, 2, dtype=torch.float32, device='cuda', requires_grad=True)
y = torch.randn(4, 2, dtype=torch.float32, device='cuda', requires_grad=True)
grad = torch.randn(4, 2, dtype=torch.float32, device='cuda')
# Test that a mul is not generated when not needed
# Alpha=1.0 or is not used
def test1(x: torch.Tensor, y: torch.Tensor):
o = torch.add(x, y, alpha=1.0)
o = o + 1.0
return o
test1_jit = torch.jit.script(test1)
for i in range(3):
jit_o = test1_jit(x, y)
jit_o.backward(grad)
bwd1_graph = list(
list(test1_jit.get_debug_state().execution_plans.values())[
0].code.grad_executor_states()[0].execution_plans.values()
)[0].graph
FileCheck().check_not("aten::mul_").run(bwd1_graph)
# Alpha is set to something other than 1.0
def test2(x: torch.Tensor, y: torch.Tensor):
o = torch.add(x, y, alpha=2.0)
o = o + 1.0
return o
test2_jit = torch.jit.script(test2)
for i in range(3):
jit_o = test2_jit(x, y)
jit_o.backward(grad)
bwd2_graph = list(
list(test2_jit.get_debug_state().execution_plans.values())[
0].code.grad_executor_states()[0].execution_plans.values()
)[0].graph
FileCheck().check("aten::mul_").run(bwd2_graph)
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_dropout_inference_fusion(self):
dtype = torch.float
device = "cuda"
x = torch.randn([10, 4, 8], dtype=dtype, device=device)
def t(x: torch.Tensor, p: float, train: bool):
o = torch.nn.functional.dropout(x, p, training=train)
o = o + 1.0
return o
t_jit = torch.jit.script(t)
self._run_helper(t_jit, t, x, 0.15, False)
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_dropout_train_nograd_fusion(self):
dtype = torch.float
device = "cuda"
x = torch.randn([10, 4, 8], dtype=dtype, device=device)
def t(x: torch.Tensor, p: float, train: bool):
o = torch.nn.functional.dropout(x, p, training=train)
o = o + 1.0
return o
t_jit = torch.jit.script(t)
self._run_helper(t_jit, t, x, 0.0, True)
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_dropout_train_nograd_prob_check(self):
dtype = torch.float
device = "cuda"
x = torch.randn([1024, 1024], dtype=dtype, device=device)
def t(x: torch.Tensor, p: float, train: bool):
o = torch.nn.functional.dropout(x, p, training=train)
o = o + 0.0
return o
t_jit = torch.jit.script(t)
for prob in [0.0, 0.15, 0.5, 0.85, 1.]:
torch.cuda.manual_seed_all(123)
jit_o = t_jit(x, prob, True)
torch.cuda.manual_seed_all(123)
jit_o = t_jit(x, prob, True)
self.assertTrue(jit_o.detach().isfinite().all().item())
num_elems = x.numel()
num_zeros = num_elems - jit_o.detach().count_nonzero().item()
percent_zeros = num_zeros / num_elems
self.assertTrue((percent_zeros >= (prob - 0.01)) and (percent_zeros <= (prob + 0.01)))
self.assertGraphContainsExactly(t_jit.graph_for(x, prob, True), FUSION_GUARD, 1, consider_subgraphs=True)
@unittest.skipIf(True, "PRs pending")
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_dropout_training_fusion(self):
dtype = torch.float
device = "cuda"
x = torch.randn([10, 4, 8], dtype=dtype, device=device, requires_grad=True)
grads = torch.randn([10, 4, 8], dtype=dtype, device=device)
def t(x: torch.Tensor, p: float, train: bool):
o = torch.nn.functional.dropout(x, p, training=train)
o = o * 1.0
return o
t_jit = torch.jit.script(t)
# The drop probability needs to be set to zero given that the order of picking random
# numbers between eager mode and the jit is different
self._run_training_helper(t_jit, t, grads, x, 0.0, True)
def t2(x: torch.Tensor, p: float, train: bool):
o = torch.nn.functional.softmax(x, dim=-1)
o = torch.nn.functional.dropout(o, p, training=train)
return o
t2_jit = torch.jit.script(t2)
# The drop probability needs to be set to zero given that the order of picking random
# numbers between eager mode and the jit is different
self._run_training_helper(t2_jit, t2, grads, x, 0.0, True)
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_gelu(self):
dtype = torch.float
device = "cuda"
x = torch.randn([1024, 1024], dtype=dtype, device=device, requires_grad=True)
grads = torch.randn([1024, 1024], dtype=dtype, device=device, requires_grad=False)
def t(x: torch.Tensor, fast : bool):
o = torch.nn.functional.gelu(x)
o = o * 1.0
return o
t_jit = torch.jit.script(t)
for approximate in [False, True]:
self._run_training_helper(t_jit, t, grads, x, approximate)
@unittest.skipIf(True, "PRs pending")
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_dropout_training_prob_check(self):
dtype = torch.float
device = "cuda"
x = torch.randn([1024, 1024], dtype=dtype, device=device, requires_grad=True)
x_nograd = torch.randn([1024, 1024], dtype=dtype, device=device)
def t(x: torch.Tensor, p: float, train: bool):
o = torch.nn.functional.dropout(x, p, training=train)
o = o + 0.0
return o
t_jit = torch.jit.script(t)
for prob in [0.0, 0.15, 0.5, 0.85, 1.]:
torch.cuda.manual_seed_all(123)
jit_o = t_jit(x, prob, True)
torch.cuda.manual_seed_all(123)
jit_o = t_jit(x, prob, True)
torch.cuda.manual_seed_all(123)
jit_o = t_jit(x, prob, True)
self.assertTrue(jit_o.detach().isfinite().all().item())
num_elems = x.numel()
num_zeros = num_elems - jit_o.detach().count_nonzero().item()
percent_zeros = num_zeros / num_elems
self.assertTrue((percent_zeros >= (prob - 0.01)) and (percent_zeros <= (prob + 0.01)))
self.assertGraphContainsExactly(t_jit.graph_for(x, prob, True), FUSION_GUARD, 1, consider_subgraphs=True)
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_linear(self):
in_feature = 2
out_feature = 8
x = torch.randn(4, in_feature, dtype=torch.float32, device='cuda')
weight = torch.randn(out_feature, in_feature, dtype=torch.float32, device='cuda')
bias = torch.randn(out_feature, dtype=torch.float32, device='cuda')
def t(x: torch.Tensor, weight: torch.Tensor, bias: torch.Tensor):
o = torch.nn.functional.linear(x, weight, bias)
o = torch.relu(o)
return o
# bias set to true.
t_jit = torch.jit.script(t)
jit_o = t_jit(x, weight, bias)
jit_o = t_jit(x, weight, bias)
o = t(x, weight, bias)
self.assertEqual(o, jit_o)
# since the output value is not used at all, the fusion operator should
# have been optimized away
self.assertGraphContainsExactly(t_jit.graph_for(x, weight, bias), FUSION_GUARD, 1)
@unittest.skipIf(True, "Requires further investigation")
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_backward_type(self):
# not super useful to check gradient of integer/bool, so skipping here
type_pairs = [
(torch.float, torch.half),
(torch.double, torch.half),
(torch.float, torch.double),
]
for x_type, y_type in type_pairs:
x = torch.randn(4, 2, dtype=x_type, device='cuda', requires_grad=True)
y = torch.randn(4, 2, dtype=y_type, device='cuda', requires_grad=True)
grad = torch.randn(4, 2, dtype=torch.float, device='cuda')
def test1(x: torch.Tensor, y: torch.Tensor):
o = torch.add(x, y)
o = torch.add(o, y)
o = torch.add(o, y)
o = torch.add(o, y)
o = o + 1.0
return o
test1_jit = torch.jit.script(test1)
for i in range(3):
jit_o = test1_jit(x, y)
jit_o.backward(grad)
bwd_graph = list(
list(test1_jit.get_debug_state().execution_plans.values())[
0].code.grad_executor_states()[0].execution_plans.values()
)[0].graph
FileCheck().check(FUSION_GROUP).run(bwd_graph)
self.assertEqual(x.grad.dtype, x.dtype)
self.assertEqual(y.grad.dtype, y.dtype)
@unittest.skipIf(True, "PRs pending")
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_autocast_1(self):
def t(x: torch.Tensor, y: torch.Tensor):
o = x * 2.0
o = torch.softmax(o, dim=-1)
o = o * 3.0
o = torch.matmul(o, y)
return o
x = torch.randn(8, 4, dtype=torch.half, device='cuda', requires_grad=True)
y = torch.randn(4, 4, dtype=torch.float, device='cuda', requires_grad=True)
grad = torch.randn(8, 4, dtype=torch.half, device='cuda', requires_grad=False)
t_jit = torch.jit.script(t)
for i in range(3):
with torch.cuda.amp.autocast():
jit_o = t_jit(x, y)
if i == 2 :
fwd_graph = t_jit.graph_for(x, y)
jit_o.backward(grad)
self.assertGraphContainsExactly(fwd_graph, FUSION_GUARD, 1, consider_subgraphs=True)
with torch.cuda.amp.autocast():
bwd_graph = list(
list(t_jit.get_debug_state().execution_plans.values())[
0].code.grad_executor_states()[0].execution_plans.values()
)[0].graph
FileCheck().check(FUSION_GROUP).run(bwd_graph)
self.assertEqual(jit_o.dtype, torch.half)
self.assertEqual(x.grad.dtype, x.dtype)
self.assertEqual(y.grad.dtype, y.dtype)
@unittest.skipIf(True, "PRs pending")
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_autocast_2(self):
def t(x: torch.Tensor):
o = x * 2.0
o = torch.softmax(o, dim=-1)
o = o * 3.0
o = torch.softmax(o, dim=-1)
o = o * 4.0
return o
x = torch.randn(8, 4, dtype=torch.half, device='cuda', requires_grad=True)
grad = torch.randn(8, 4, dtype=torch.float, device='cuda', requires_grad=False)
t_jit = torch.jit.script(t)
for i in range(3):
with torch.cuda.amp.autocast() :
jit_o = t_jit(x)
if i == 2 :
fwd_graph = t_jit.graph_for(x)
jit_o.backward(grad)
self.assertGraphContainsExactly(fwd_graph, FUSION_GUARD, 1, consider_subgraphs=True)
with torch.cuda.amp.autocast():
bwd_graph = list(
list(t_jit.get_debug_state().execution_plans.values())[
0].code.grad_executor_states()[0].execution_plans.values()
)[0].graph
FileCheck().check(FUSION_GROUP).run(bwd_graph)
self.assertEqual(jit_o.dtype, torch.float)
self.assertEqual(x.grad.dtype, x.dtype)
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_to_dtype_fp32_to_fp16(self):
def t(x: torch.Tensor):
o = x * 2.0
o = o.to(dtype=torch.half)
o = o * 3.0
return o
x = torch.randn(8, 4, dtype=torch.float, device='cuda')
t_jit = torch.jit.script(t)
for i in range(3):
jit_o = t_jit(x)
self.assertGraphContainsExactly(t_jit.graph_for(x), FUSION_GUARD, 1)
self.assertEqual(jit_o.dtype, torch.half)
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_to_dtype_fp16_to_fp32(self):
def t(x: torch.Tensor):
o = x * 2.0
o = o.to(dtype=torch.float)
o = o * 3.0
return o
x = torch.randn(8, 4, dtype=torch.half, device='cuda')
t_jit = torch.jit.script(t)
for i in range(3):
jit_o = t_jit(x)
self.assertGraphContainsExactly(t_jit.graph_for(x), FUSION_GUARD, 1)
self.assertEqual(jit_o.dtype, torch.float)
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_to_dtype_fp16_to_fp16(self):
def t(x: torch.Tensor):
o = x * 2.0
o = o.to(dtype=torch.half)
o = o * 3.0
return o
x = torch.randn(8, 4, dtype=torch.half, device='cuda')
t_jit = torch.jit.script(t)
for i in range(3):
jit_o = t_jit(x)
self.assertGraphContainsExactly(t_jit.graph_for(x), FUSION_GUARD, 1)
self.assertEqual(jit_o.dtype, torch.half)
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
@unittest.skipIf(not TEST_MULTIGPU, "requires multiple CUDA device")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_multiple_device_pw(self):
def t(x):
o = x + 1.0
o = torch.relu(o)
return o
x = torch.randn(2, dtype=torch.float32, device="cuda")
t_jit = torch.jit.script(t)
for i in range(3):
jit_o = t_jit(x)
self.assertGraphContainsExactly(t_jit.graph_for(x), FUSION_GUARD, 1)
torch.cuda.device(1)
x = x.to("cuda:1")
jit_o = t_jit(x)
@unittest.skipIf(True, "PRs pending")
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_graph_for_with_missing_optimized_engine(self):
x = torch.randn(8, 4, 2, dtype=torch.float, device="cuda").requires_grad_()
def t(x: torch.Tensor, flag: bool):
x = x + 1.0
x = torch.relu(x)
if flag:
o = x + 1.0
o = torch.relu(o)
else:
o = x + 2.0
o = torch.relu(o)
return o
t_jit = torch.jit.script(t)
jit_o = t_jit(x, False)
jit_o = t_jit(x, False)
jit_o = t_jit(x, True)
o = t(x, True)
self.assertEqual(o, jit_o)
# since the output value is not used at all, the fusion operator should
# have been optimized away
self.assertGraphContainsExactly(t_jit.graph_for(x, True), FUSION_GUARD, 1, True)
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_branches(self):
in_feature = 2
out_feature = 4
x = torch.randn(4, in_feature, dtype=torch.float32, device='cuda')
weight = torch.randn(out_feature, in_feature, dtype=torch.float32, device='cuda')
bias = torch.randn(out_feature, dtype=torch.float32, device='cuda')
def t(x: torch.Tensor, weight: torch.Tensor, bias: torch.Tensor, flag: bool):
if flag:
o = torch.nn.functional.linear(x, weight, bias)
o = o + 1.0
o = torch.relu(o)
else:
o = x.sum()
o = o + 2.0
o = torch.relu(o)
return o
t_jit = torch.jit.script(t)
jit_o = t_jit(x, weight, bias, True)
jit_o = t_jit(x, weight, bias, True)
o = t(x, weight, bias, True)
self.assertEqual(o, jit_o)
# since the output value is not used at all, the fusion operator should
# have been optimized away
self.assertGraphContainsExactly(t_jit.graph_for(x, weight, bias, True), FUSION_GUARD, 1)
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_scalar_tensor(self):
x = torch.empty([], device="cuda", dtype=torch.float32)
def t(x: torch.Tensor):
o = x + 1.0
o = torch.nn.functional.relu(o)
return o
# bias set to true.
t_jit = torch.jit.script(t)
jit_o = t_jit(x)
jit_o = t_jit(x)
o = t(x)
self.assertEqual(o, jit_o)
# since the output value is not used at all, the fusion operator should
# have been optimized away
self.assertGraphContainsExactly(t_jit.graph_for(x), FUSION_GUARD, 1)
@unittest.skipIf(os.environ.get('PYTORCH_NO_CUDA_MEMORY_CACHING') is not None,
"skipping graph_rng when caching allocator is disabled")
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
@unittest.skipIf(CUDA_MAJOR < 11, "requires CUDA11 or above")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_graph_rng(self):
self.assertTrue(torch._C._jit_nvfuser_enabled())
size = 10000
a = torch.randn((size,), device="cuda", dtype=torch.float)
def t(x):
o = x + 1.0
o = torch.nn.functional.dropout(o, p=0.1)
o = o + 1.0
o = torch.nn.functional.dropout(o, p=0.1)
return o
t_jit = torch.jit.script(t)
for _ in range(3):
t_jit(a)
self.assertGraphContainsExactly(t_jit.graph_for(a), FUSION_GUARD, 1)
# Control (jitted, ungraphed)
torch.cuda.manual_seed(5)
eager_out = a.clone()
for _ in range(3):
eager_out = t_jit(eager_out)
graph_in = a.clone()
g = torch.cuda.CUDAGraph()
s = torch.cuda.Stream()
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
torch.cuda.manual_seed(5)
g.capture_begin()
graph_out = t_jit(graph_in)
g.capture_end()
torch.cuda.current_stream().wait_stream(s)
# g is now a jitted, graphed version of t.
# Runs a (jitted, graphed) -> (jitted, ungraphed) -> (jitted, graphed) sequence.
# The ops in the overall sequence should be the same as Control.
g.replay()
# graph_out is now filled with g's result. Use it as ungraphed input.
out = t_jit(graph_out)
graph_in.copy_(out)
g.replay()
# If replay() updated RNG state correctly, graph_out should now equal eager_out
self.assertEqual(graph_out, eager_out)
def _test_batch_norm_impl_index_helper(self, batch, c, hw, affine=True, track_running_stats=True, train=True):
# enabling inlining to avoid counter increment in BN forward
torch._C._debug_set_autodiff_subgraph_inlining(True)
dtype = torch.float32
class MyModule(torch.nn.Module):
def __init__(self, num_features=10, affine=True, track_running_stats=True):
super(MyModule, self).__init__()
self.bn = torch.nn.BatchNorm2d(num_features,
1e-5,
affine=affine,
track_running_stats=track_running_stats).to(dtype=dtype)
def forward(self, x):
o = x * 1.0
o = self.bn(o)
return o
x = torch.randn(batch, c, hw, hw, dtype=torch.float, device="cuda").to(dtype=dtype).requires_grad_()
grad = torch.randint(-20, 20, (batch, c, hw, hw), device="cuda").to(dtype=dtype).div(-10)
my_module = MyModule(c, affine, track_running_stats).cuda()
ref_module = MyModule(c, affine, track_running_stats).cuda()
if not train:
my_module.eval()
ref_module.eval()
t_jit = torch.jit.script(my_module)
ref_module.load_state_dict(my_module.state_dict())
ref_x = x.detach().requires_grad_()
for i in range(0, 3):
jit_o = t_jit(x)
jit_o.backward(grad)
# TODO: remove this run?
o = ref_module(ref_x)
o.backward(grad)
has_affine = ref_module.bn.weight is not None
has_running_stats = ref_module.bn.running_mean is not None
if has_running_stats:
my_module.bn.running_mean.zero_()
my_module.bn.running_var.fill_(1.0)
ref_module.bn.running_mean.zero_()
ref_module.bn.running_var.fill_(1.0)
# Verify that when train is False, we don't have grad for weight/bias.
if has_affine and train:
my_module.bn.weight.grad.zero_()
my_module.bn.bias.grad.zero_()
ref_module.bn.weight.grad.zero_()
ref_module.bn.bias.grad.zero_()
x.grad.zero_()
ref_x.grad.zero_()
# real runs
jit_o = t_jit(x)
jit_o.backward(grad)
o = ref_module(ref_x)
o.backward(grad)
# assert forward graph fusion
self.assertGraphContainsExactly(t_jit.graph_for(x), FUSION_GUARD, 1, consider_subgraphs=True)
# assert backward graph fusion
bwd_graph = list(
list(t_jit.get_debug_state().execution_plans.values())[0].code.grad_executor_states()[0]
.execution_plans.values())[0].graph
self.assertGraphContainsExactly(bwd_graph, FUSION_GUARD, 1, consider_subgraphs=True)
self.assertTrue(self._compare("comparing output failed", jit_o, o, 1e-5))
self.assertTrue(self._compare("comparing input grad failed", x.grad, ref_x.grad, 1e-4))
# TODO: switch to welford and reduce this to 1e-5
# The 1e-3 looks bad, but we don't have welford in codegen, so numeric
# is very different between reference and codegen.
if has_affine and train:
self.assertTrue(self._compare("comparing weight grad failed",
my_module.bn.weight.grad,
ref_module.bn.weight.grad,
1e-3))
self.assertTrue(self._compare("comparing bias grad failed",
my_module.bn.bias.grad,
ref_module.bn.bias.grad,
1e-4))
if has_running_stats:
self.assertTrue(self._compare("comparing running_mean failed",
my_module.bn.running_mean,
ref_module.bn.running_mean,
1e-5))
self.assertTrue(self._compare("comparing running_var failed",
my_module.bn.running_var,
ref_module.bn.running_var,
1e-5))
@unittest.skipIf(True, "Requires further investigation")
@unittest.skipIf(is_pre_volta(), "reduction not supported in pre volta device")
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_batch_norm_impl_index_correctness(self):
with torch.backends.cudnn.flags(enabled=True):
batch = [2, 7, 16]
channels = [4, 89, 19, 32]
hw = [1, 8, 17, 32]
# avoid tolerance failure in CI
torch.cuda.manual_seed_all(211)
# failing sizes (2, 1, 1, 1)
# failing sizes (2, 89, 8, 8) training False, track True, affine: False
for b, c, hw in itertools.product(batch, channels, hw):
setups = [
[True, True],
[False, False],
[True, False],
[False, True]]
for training_and_track, affine in itertools.product(setups, [True, False]):
training, track_running_stats = training_and_track
self._test_batch_norm_impl_index_helper(b, c, hw, affine, track_running_stats, training)
@unittest.skipIf(True, "PRs pending")
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_softplus_fuser(self):
def shifted_softplus(x: torch.Tensor, shift: float):
return functional.softplus(x) - shift
jitted = torch.jit.script(shifted_softplus)
inp = torch.randn(4, 2, dtype=torch.float32, device="cuda").requires_grad_()
inp_ref = inp.detach().clone().requires_grad_()
grad = torch.randn(4, 2, dtype=torch.float32, device="cuda")
aten_o = shifted_softplus(inp_ref, 0.693147)
aten_o.backward(grad)
aten_grad = inp_ref.grad
for i in range(3):
jit_o = jitted(inp, 0.693147)
inp.grad = None # avoid accumulation on grad
jit_o.backward(grad)
jit_grad = inp.grad
assert torch.allclose(jit_o, aten_o)
assert torch.allclose(jit_grad, aten_grad)
self.assertGraphContains(jitted.graph_for(inp, 0.693147), FUSION_GROUP, True)
class TestPassManagerCudaFuser(JitTestCase):
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING,
"Requires fusion optimization pass to be effective")
def test_context_manager_test(self):
x = torch.randn(4, 8, dtype=torch.float, device="cuda")
y = torch.randn(4, 8, dtype=torch.float, device="cuda")
with torch.jit.fuser('fuser2'):
with torch.jit.fuser('fuser2'):
def t1(x, y):
o = x + y
o = o + 2.0
return o
t_jit = torch.jit.script(t1)
t_jit(x, y)
t_jit(x, y)
self.assertGraphContains(t_jit.graph_for(x, y), FUSION_GUARD)
def t2(x, y):
o = x + y
o = o + 3.0
return o
t_jit_2 = torch.jit.script(t2)
t_jit_2(x, y)
t_jit_2(x, y)
self.assertGraphContains(t_jit_2.graph_for(x, y), FUSION_GUARD)
def t3(x, y):
o = x + y
o = o + 4.0
return o
t_jit_3 = torch.jit.script(t3)
t_jit_3(x, y)
t_jit_3(x, y)
self.assertGraphContainsExactly(t_jit_3.graph_for(x, y), FUSION_GUARD, 0)
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
def test_register_fuser(self):
self.assertFalse(torch._C._jit_set_nvfuser_enabled(True))
self.assertTrue(torch._C._jit_nvfuser_enabled())
self.assertTrue(torch._C._jit_set_nvfuser_enabled(True))
self.assertTrue(torch._C._jit_nvfuser_enabled())
self.assertTrue(torch._C._jit_set_nvfuser_enabled(False))
self.assertFalse(torch._C._jit_nvfuser_enabled())
if __name__ == '__main__':
run_tests()
|
[] |
[] |
[
"PYTORCH_NVFUSER_DISABLE_FALLBACK",
"PYTORCH_NVFUSER_DISABLE_FMA",
"PYTORCH_NVFUSER_JIT_OPT_LEVEL",
"PYTORCH_NVFUSER_DISABLE_RNG_UNROLL",
"PYTORCH_NVFUSER_DISABLE_FASTMATH",
"PYTORCH_NO_CUDA_MEMORY_CACHING"
] |
[]
|
["PYTORCH_NVFUSER_DISABLE_FALLBACK", "PYTORCH_NVFUSER_DISABLE_FMA", "PYTORCH_NVFUSER_JIT_OPT_LEVEL", "PYTORCH_NVFUSER_DISABLE_RNG_UNROLL", "PYTORCH_NVFUSER_DISABLE_FASTMATH", "PYTORCH_NO_CUDA_MEMORY_CACHING"]
|
python
| 6 | 0 | |
controllers/composable_controller.go
|
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers
import (
"context"
"encoding/json"
"fmt"
"os"
"reflect"
"time"
"github.com/go-logr/logr"
ibmcloudv1alpha1 "github.com/ibm/composable/api/v1alpha1"
sdk "github.com/ibm/composable/sdk"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
discovery "k8s.io/client-go/discovery"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/source"
)
const (
path = "path"
kind = "kind"
apiVersion = "apiVersion"
spec = "spec"
status = "status"
state = "state"
controllerName = "Composable-controller"
// FailedStatus composable status
FailedStatus = "Failed"
// PendingStatus - indicates that the Composable object is pending for something
PendingStatus = "Pending"
// OnlineStatus - indicates that Composable successfully created underlying objects
OnlineStatus = "Online"
)
// ComposableReconciler reconciles a Composable object
type composableReconciler struct {
client.Client
log logr.Logger
scheme *runtime.Scheme
controller controller.Controller
resolver sdk.ResolveObject
}
// ManagerSettableReconciler - a Reconciler that can be added to a Manager
type ManagerSettableReconciler interface {
reconcile.Reconciler
SetupWithManager(mgr ctrl.Manager) error
}
var _ ManagerSettableReconciler = &composableReconciler{}
// NewReconciler ...
func NewReconciler(mgr ctrl.Manager) ManagerSettableReconciler {
cfg := mgr.GetConfig()
return &composableReconciler{
Client: mgr.GetClient(),
log: ctrl.Log.WithName("controllers").WithName("Composable"),
scheme: mgr.GetScheme(),
resolver: sdk.KubernetesResourceResolver{
Client: mgr.GetClient(),
ResourcesClient: discovery.NewDiscoveryClientForConfigOrDie(cfg),
},
}
}
func (r *composableReconciler) getController() controller.Controller {
return r.controller
}
func (r *composableReconciler) setController(controller controller.Controller) {
r.controller = controller
}
// Reconcile loop method
// +kubebuilder:rbac:groups=*,resources=*,verbs=*
// +kubebuilder:rbac:groups=ibmcloud.ibm.com,resources=composables/status,verbs=get;list;watch;create;update;patch;delete
func (r *composableReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
_ = context.Background()
_ = r.log.WithValues("composable", req.NamespacedName)
r.log.Info("Starting reconcile loop", "request", req)
defer r.log.Info("Finish reconcile loop", "request", req)
// Fetch the Composable instance
compInstance := &ibmcloudv1alpha1.Composable{}
err := r.Get(context.TODO(), req.NamespacedName, compInstance)
if err != nil {
if errors.IsNotFound(err) {
// Object not found, return.
// For additional cleanup logic use finalizers.
r.log.Info("Reconciled object is not found, return", "request", req)
return ctrl.Result{}, nil
}
// Error reading the object - requeue the request.
r.log.Error(err, "Get reconciled object returned", "object", req)
return ctrl.Result{}, err
}
status := ibmcloudv1alpha1.ComposableStatus{}
defer func() {
if len(status.Message) == 0 {
status.Message = time.Now().Format(time.RFC850)
}
// Set Composable object Status
if len(status.State) > 0 &&
((status.State != OnlineStatus && !reflect.DeepEqual(status, compInstance.Status)) ||
status.State == OnlineStatus && compInstance.Status.State != OnlineStatus) {
r.log.V(1).Info("Set status", "desired status", status, "object", req)
compInstance.Status.State = status.State
compInstance.Status.Message = status.Message
if err := r.Status().Update(context.Background(), compInstance); err != nil {
r.log.Info("Error in Update", "request", err.Error())
r.log.Error(err, "Update status", "desired status", status, "object", req, "compInstance", compInstance)
}
}
}()
// Validate the embedded template if Composable's admission control webhook is not running
if os.Getenv("ADMISSION_CONTROL") != "true" {
err := validateComposable(compInstance)
if err != nil {
status.State = FailedStatus
status.Message = "Request is malformed and failed validation. " + err.Error()
return ctrl.Result{}, nil
}
}
// If Status is not set, set it to Pending
if reflect.DeepEqual(compInstance.Status, ibmcloudv1alpha1.ComposableStatus{}) {
status.State = PendingStatus
status.Message = "Creating resource"
}
object, err := r.toJSONFromRaw(compInstance.Spec.Template)
if err != nil {
// we don't print the error, it was done in toJSONFromRaw
status.State = FailedStatus
status.Message = err.Error()
// we cannot return the error, because retries do not help
return ctrl.Result{}, nil
}
updated, err := r.updateObjectNamespace(object, compInstance.Namespace)
resource := &unstructured.Unstructured{}
resource.Object = make(map[string]interface{})
err = r.resolver.ResolveObject(context.TODO(), updated, &resource.Object)
if err != nil {
status.Message = err.Error()
status.State = FailedStatus
if sdk.IsRefNotFound(err) {
return ctrl.Result{}, err
}
return ctrl.Result{}, nil
}
// if createUnderlyingObject faces with errors, it will update the state
status.State = OnlineStatus
return ctrl.Result{}, r.createUnderlyingObject(*resource, compInstance, &status)
}
func (r *composableReconciler) updateObjectNamespace(object interface{}, composableNamespace string) (interface{}, error) {
objMap := object.(map[string]interface{})
if _, ok := objMap[sdk.Metadata]; !ok {
err := fmt.Errorf("Failed: Template has no metadata section")
return object, err
}
// the underlying object should be created in the same namespace as the Composable object
if metadata, ok := objMap[sdk.Metadata].(map[string]interface{}); ok {
if ns, ok := metadata[sdk.Namespace]; ok {
if composableNamespace != ns {
err := fmt.Errorf("Failed: Template defines a wrong namespace %v", ns)
return object, err
}
} else {
objMap[sdk.Metadata].(map[string]interface{})[sdk.Namespace] = composableNamespace
r.log.V(1).Info("objMap: ", "is", objMap)
return objMap, nil
}
} else {
err := fmt.Errorf("Failed: Template has an ill-defined metadata section")
return object, err
}
return object, nil
}
func (r *composableReconciler) createUnderlyingObject(resource unstructured.Unstructured,
compInstance *ibmcloudv1alpha1.Composable,
status *ibmcloudv1alpha1.ComposableStatus) error {
name, err := getName(resource.Object)
if err != nil {
status.State = FailedStatus
status.Message = err.Error()
return nil
}
r.log.V(1).Info("Resource name is: "+name, "comName", compInstance.Name)
namespace, err := sdk.GetNamespace(resource.Object)
if err != nil {
status.State = FailedStatus
status.Message = err.Error()
return nil
}
r.log.V(1).Info("Resource namespace is: "+namespace, "comName", compInstance.Name)
apiversion, ok := resource.Object[apiVersion].(string)
if !ok {
err := fmt.Errorf("The template has no apiVersion")
r.log.Error(err, "", "template", resource.Object, "comName", compInstance.Name)
status.State = FailedStatus
status.Message = err.Error()
return nil
}
r.log.V(1).Info("Resource apiversion is: "+apiversion, "comName", compInstance.Name)
kind, ok := resource.Object[kind].(string)
if !ok {
err := fmt.Errorf("The template has no kind")
r.log.Error(err, "", "template", resource.Object, "comName", compInstance.Name)
status.State = FailedStatus
status.Message = err.Error()
return nil
}
r.log.V(1).Info("Resource kind is: " + kind)
if err := controllerutil.SetControllerReference(compInstance, &resource, r.scheme); err != nil {
r.log.Error(err, "SetControllerReference returned error", "resource", resource, "comName", compInstance.Name)
status.State = FailedStatus
status.Message = err.Error()
return nil
}
underlyingObj := &unstructured.Unstructured{}
underlyingObj.SetAPIVersion(apiversion)
underlyingObj.SetKind(kind)
namespaced := types.NamespacedName{Name: name, Namespace: namespace}
r.log.Info("Get underlying resource", "resource", namespaced, "kind", kind, "apiVersion", apiversion)
err = r.Get(context.TODO(), namespaced, underlyingObj)
if err != nil {
if errors.IsNotFound(err) {
r.log.Info("Creating new underlying resource", "resource", namespaced, "kind", kind, "apiVersion", apiversion)
err = r.Create(context.TODO(), &resource)
if err != nil {
r.log.Error(err, "Cannot create new resource", "resource", namespaced, "kind", kind, "apiVersion", apiversion)
status.State = FailedStatus
status.Message = err.Error()
return err
}
// add watcher
err = r.controller.Watch(&source.Kind{Type: underlyingObj}, &handler.EnqueueRequestForOwner{
IsController: true,
OwnerType: &ibmcloudv1alpha1.Composable{},
})
if err != nil {
r.log.Error(err, "Cannot add watcher", "resource", namespaced, "kind", kind, "apiVersion", apiversion)
status.State = FailedStatus
status.Message = err.Error()
return err
}
} else {
r.log.Error(err, "Cannot get resource", "resource", namespaced, "kind", kind, "apiVersion", apiversion)
status.State = FailedStatus
status.Message = err.Error()
return err
}
} else {
// Update the found object and write the result back if there are any changes
if !reflect.DeepEqual(resource.Object[spec], underlyingObj.Object[spec]) {
underlyingObj.Object[spec] = resource.Object[spec]
//r.log.Info("Updating underlying resource spec", "currentSpec", resource.Object[spec], "newSpec", underlyingObj.Object[spec], "resource", namespaced, "kind", kind, "apiVersion", apiversion)
err = r.Update(context.TODO(), underlyingObj)
if err != nil {
status.State = FailedStatus
status.Message = err.Error()
return err
}
}
}
return nil
}
// SetupWithManager adds this controller to the manager
func (r *composableReconciler) SetupWithManager(mgr ctrl.Manager) error {
c, err := controller.New(controllerName, mgr, controller.Options{Reconciler: r})
if err != nil {
return err
}
r.setController(c)
// Watch for changes to Composable
err = c.Watch(&source.Kind{Type: &ibmcloudv1alpha1.Composable{}}, &handler.EnqueueRequestForObject{})
if err != nil {
//klog.Errorf("c.Watch returned %v\n", err)
return err
}
return nil
}
func (r *composableReconciler) toJSONFromRaw(content *runtime.RawExtension) (interface{}, error) {
var data interface{}
if err := json.Unmarshal(content.Raw, &data); err != nil {
r.log.Error(err, "json.Unmarshal error", "raw data", content.Raw)
return nil, err
}
return data, nil
}
func getName(obj map[string]interface{}) (string, error) {
metadata := obj[sdk.Metadata].(map[string]interface{})
if name, ok := metadata[sdk.Name]; ok {
return name.(string), nil
}
return "", fmt.Errorf("Failed: Template does not contain name")
}
func getState(obj map[string]interface{}) (string, error) {
if status, ok := obj[status].(map[string]interface{}); ok {
if state, ok := status[state]; ok {
return state.(string), nil
}
return "", fmt.Errorf("Failed: Composable doesn't contain status")
}
return "", fmt.Errorf("Failed: Composable doesn't contain state")
}
|
[
"\"ADMISSION_CONTROL\""
] |
[] |
[
"ADMISSION_CONTROL"
] |
[]
|
["ADMISSION_CONTROL"]
|
go
| 1 | 0 | |
junos/resource_application_test.go
|
package junos_test
import (
"os"
"testing"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource"
)
func TestAccJunosApplication_basic(t *testing.T) {
if os.Getenv("TESTACC_SWITCH") == "" {
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
Steps: []resource.TestStep{
{
Config: testAccJunosApplicationConfigCreate(),
Check: resource.ComposeTestCheckFunc(
resource.TestCheckResourceAttr("junos_application.testacc_app", "protocol", "tcp"),
resource.TestCheckResourceAttr("junos_application.testacc_app", "destination_port", "22"),
),
},
{
Config: testAccJunosApplicationConfigUpdate(),
Check: resource.ComposeTestCheckFunc(
resource.TestCheckResourceAttr("junos_application.testacc_app", "protocol", "tcp"),
resource.TestCheckResourceAttr("junos_application.testacc_app", "destination_port", "22"),
resource.TestCheckResourceAttr("junos_application.testacc_app", "source_port", "1024-65535"),
),
},
{
ResourceName: "junos_application.testacc_app",
ImportState: true,
ImportStateVerify: true,
},
},
})
}
}
func testAccJunosApplicationConfigCreate() string {
return `
resource "junos_application" "testacc_app" {
name = "testacc_app"
protocol = "tcp"
destination_port = 22
}
`
}
func testAccJunosApplicationConfigUpdate() string {
return `
resource "junos_application" "testacc_app" {
name = "testacc_app"
protocol = "tcp"
destination_port = "22"
source_port = "1024-65535"
}
`
}
|
[
"\"TESTACC_SWITCH\""
] |
[] |
[
"TESTACC_SWITCH"
] |
[]
|
["TESTACC_SWITCH"]
|
go
| 1 | 0 | |
train.py
|
#! /usr/bin/env python
"""
This script takes in a configuration file and produces the best model.
The configuration file is a json file and looks like this:
{
"model" : {
"architecture": "Full Yolo",
"input_size": 416,
"anchors": [0.57273, 0.677385, 1.87446, 2.06253, 3.33843, 5.47434, 7.88282, 3.52778, 9.77052, 9.16828],
"max_box_per_image": 10,
"labels": ["raccoon"]
},
"train": {
"train_image_folder": "/home/andy/data/raccoon_dataset/images/",
"train_annot_folder": "/home/andy/data/raccoon_dataset/anns/",
"train_times": 10,
"pretrained_weights": "",
"batch_size": 16,
"learning_rate": 1e-4,
"nb_epoch": 50,
"warmup_epochs": 3,
"object_scale": 5.0 ,
"no_object_scale": 1.0,
"coord_scale": 1.0,
"class_scale": 1.0,
"debug": true
},
"valid": {
"valid_image_folder": "",
"valid_annot_folder": "",
"valid_times": 1
}
}
"""
import argparse
import os
import numpy as np
from preprocessing import parse_annotation
from frontend import YOLO
import json
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]="0"
argparser = argparse.ArgumentParser(
description='Train and validate YOLO_v2 model on any dataset')
argparser.add_argument(
'-c',
'--conf',
help='path to configuration file')
def _main_(args):
config_path = args.conf
with open(config_path) as config_buffer:
config = json.loads(config_buffer.read())
###############################
# Parse the annotations
###############################
# parse annotations of the training set
train_imgs, train_labels = parse_annotation(config['train']['train_annot_folder'],
config['train']['train_image_folder'],
config['model']['labels'])
# parse annotations of the validation set, if any, otherwise split the training set
if os.path.exists(config['valid']['valid_annot_folder']):
valid_imgs, valid_labels = parse_annotation(config['valid']['valid_annot_folder'],
config['valid']['valid_image_folder'],
config['model']['labels'])
else:
train_valid_split = int(0.8*len(train_imgs))
np.random.shuffle(train_imgs)
valid_imgs = train_imgs[train_valid_split:]
train_imgs = train_imgs[:train_valid_split]
if len(config['model']['labels']) > 0:
overlap_labels = set(config['model']['labels']).intersection(set(train_labels.keys()))
print('Seen labels:\t' + str(train_labels))
print('Given labels:\t' + str(config['model']['labels']))
print('Overlap labels:\t' + str(overlap_labels))
if len(overlap_labels) < len(config['model']['labels']):
print('Some labels have no annotations! Please revise the list of labels in the config.json file!')
return
else:
print('No labels are provided. Train on all seen labels.')
config['model']['labels'] = train_labels.keys()
###############################
# Construct the model
###############################
yolo = YOLO(architecture = config['model']['architecture'],
input_size = config['model']['input_size'],
labels = config['model']['labels'],
max_box_per_image = config['model']['max_box_per_image'],
anchors = config['model']['anchors'])
###############################
# Load the pretrained weights (if any)
###############################
if os.path.exists(config['train']['pretrained_weights']):
print("Loading pre-trained weights in" + str(config['train']['pretrained_weights']))
yolo.load_weights(config['train']['pretrained_weights'])
###############################
# Start the training process
###############################
yolo.train(train_imgs = train_imgs,
valid_imgs = valid_imgs,
train_times = config['train']['train_times'],
valid_times = config['valid']['valid_times'],
nb_epoch = config['train']['nb_epoch'],
learning_rate = config['train']['learning_rate'],
batch_size = config['train']['batch_size'],
warmup_epochs = config['train']['warmup_epochs'],
object_scale = config['train']['object_scale'],
no_object_scale = config['train']['no_object_scale'],
coord_scale = config['train']['coord_scale'],
class_scale = config['train']['class_scale'],
saved_weights_name = config['train']['saved_weights_name'],
debug = config['train']['debug'])
if __name__ == '__main__':
args = argparser.parse_args()
_main_(args)
|
[] |
[] |
[
"CUDA_DEVICE_ORDER",
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_DEVICE_ORDER", "CUDA_VISIBLE_DEVICES"]
|
python
| 2 | 0 | |
python/test/testutil.py
|
#!/usr/bin/env python
#
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Test utilities for the Google App Engine Pipeline API."""
# Code originally from:
# http://code.google.com/p/pubsubhubbub/source/browse/trunk/hub/testutil.py
import logging
import os
import sys
import tempfile
class TestSetupMixin(object):
TEST_APP_ID = 'my-app-id'
TEST_VERSION_ID = 'my-version.1234'
def setUp(self):
super(TestSetupMixin, self).setUp()
from google.appengine.api import apiproxy_stub_map
from google.appengine.api import memcache
from google.appengine.api import queueinfo
from google.appengine.datastore import datastore_stub_util
from google.appengine.ext import testbed
from google.appengine.ext.testbed import TASKQUEUE_SERVICE_NAME
before_level = logging.getLogger().getEffectiveLevel()
os.environ['APPLICATION_ID'] = self.TEST_APP_ID
os.environ['CURRENT_VERSION_ID'] = self.TEST_VERSION_ID
os.environ['HTTP_HOST'] = '%s.appspot.com' % self.TEST_APP_ID
os.environ['DEFAULT_VERSION_HOSTNAME'] = os.environ['HTTP_HOST']
os.environ['CURRENT_MODULE_ID'] = 'foo-module'
try:
logging.getLogger().setLevel(100)
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.setup_env(app_id=self.TEST_APP_ID, overwrite=True)
self.testbed.init_memcache_stub()
hr_policy = datastore_stub_util.PseudoRandomHRConsistencyPolicy(probability=1)
self.testbed.init_datastore_v3_stub(consistency_policy=hr_policy)
self.testbed.init_taskqueue_stub()
root_path = os.path.realpath(os.path.dirname(__file__))
# Actually need to flush, even though we've reallocated. Maybe because the
# memcache stub's cache is at the module level, not the API stub?
memcache.flush_all()
finally:
logging.getLogger().setLevel(before_level)
define_queues=['other']
taskqueue_stub = apiproxy_stub_map.apiproxy.GetStub('taskqueue')
taskqueue_stub.queue_yaml_parser = (
lambda x: queueinfo.LoadSingleQueue(
'queue:\n- name: default\n rate: 1/s\n' +
'\n'.join('- name: %s\n rate: 1/s' % name
for name in define_queues)))
def tearDown(self):
super(TestSetupMixin, self).tearDown()
self.testbed.deactivate()
|
[] |
[] |
[
"HTTP_HOST",
"APPLICATION_ID",
"DEFAULT_VERSION_HOSTNAME",
"CURRENT_VERSION_ID",
"CURRENT_MODULE_ID"
] |
[]
|
["HTTP_HOST", "APPLICATION_ID", "DEFAULT_VERSION_HOSTNAME", "CURRENT_VERSION_ID", "CURRENT_MODULE_ID"]
|
python
| 5 | 0 | |
redis_wrapper.go
|
package rmq
import (
"fmt"
"os"
rdebug "runtime/debug"
"strconv"
"strings"
"time"
"github.com/go-redis/redis"
)
type RedisWrapper struct {
rawClient *redis.Client
}
func (wrapper RedisWrapper) Set(key string, value string, expiration time.Duration) bool {
return checkErr(wrapper.rawClient.Set(key, value, expiration).Err())
}
func (wrapper RedisWrapper) Del(key string) (affected int, ok bool) {
n, err := wrapper.rawClient.Del(key).Result()
ok = checkErr(err)
if !ok {
return 0, false
}
return int(n), ok
}
func (wrapper RedisWrapper) TTL(key string) (ttl time.Duration, ok bool) {
ttl, err := wrapper.rawClient.TTL(key).Result()
ok = checkErr(err)
if !ok {
return 0, false
}
return ttl, ok
}
func (wrapper RedisWrapper) LPush(key, value string) bool {
return checkErr(wrapper.rawClient.LPush(key, value).Err())
}
func (wrapper RedisWrapper) LLen(key string) (affected int, ok bool) {
n, err := wrapper.rawClient.LLen(key).Result()
ok = checkErr(err)
if !ok {
return 0, false
}
return int(n), ok
}
func (wrapper RedisWrapper) LRem(key string, count int, value string) (affected int, ok bool) {
n, err := wrapper.rawClient.LRem(key, int64(count), value).Result()
return int(n), checkErr(err)
}
func (wrapper RedisWrapper) LTrim(key string, start, stop int) {
checkErr(wrapper.rawClient.LTrim(key, int64(start), int64(stop)).Err())
}
func (wrapper RedisWrapper) RPopLPush(source, destination string) (value string, ok bool) {
value, err := wrapper.rawClient.RPopLPush(source, destination).Result()
return value, checkErr(err)
}
func (wrapper RedisWrapper) SAdd(key, value string) bool {
return checkErr(wrapper.rawClient.SAdd(key, value).Err())
}
func (wrapper RedisWrapper) SMembers(key string) []string {
members, err := wrapper.rawClient.SMembers(key).Result()
if ok := checkErr(err); !ok {
return []string{}
}
return members
}
func (wrapper RedisWrapper) SRem(key, value string) (affected int, ok bool) {
n, err := wrapper.rawClient.SRem(key, value).Result()
ok = checkErr(err)
if !ok {
return 0, false
}
return int(n), ok
}
func (wrapper RedisWrapper) FlushDb() {
wrapper.rawClient.FlushDb()
}
// checkErr returns true if there is no error, false if the result error is nil and panics if there's another error
func checkErr(err error) (ok bool) {
switch err {
case nil:
return true
case redis.Nil:
return false
default:
loggingJSON, _ := strconv.ParseBool(os.Getenv("loggingJSON"))
if loggingJSON {
stack := strings.Replace(strings.Replace(string(rdebug.Stack()), "\n", " ", -1), "\t", "", -1)
fmt.Printf(`{"timestamp":"%s","logger":"Quiq.rmq","level":"ERROR","message":"%s","stack":"%s"}`+"\n", time.Now().Format(time.RFC3339Nano), err, stack)
} else {
fmt.Printf("rmq redis error: %s\n", err)
}
return false
}
}
|
[
"\"loggingJSON\""
] |
[] |
[
"loggingJSON"
] |
[]
|
["loggingJSON"]
|
go
| 1 | 0 | |
projects/gateway/pkg/syncer/setup_syncer.go
|
package syncer
import (
"context"
"net/http"
"os"
"time"
"github.com/solo-io/gloo/projects/gateway/pkg/reconciler"
"go.uber.org/zap"
"github.com/solo-io/gloo/projects/gateway/pkg/services/k8sadmisssion"
"github.com/solo-io/gloo/projects/gateway/pkg/translator"
gatewayvalidation "github.com/solo-io/gloo/projects/gateway/pkg/validation"
"github.com/gogo/protobuf/types"
"github.com/solo-io/gloo/pkg/utils"
v1 "github.com/solo-io/gloo/projects/gateway/pkg/api/v1"
"github.com/solo-io/gloo/projects/gateway/pkg/defaults"
"github.com/solo-io/gloo/projects/gateway/pkg/propagator"
"github.com/solo-io/gloo/projects/gloo/pkg/api/grpc/validation"
gloov1 "github.com/solo-io/gloo/projects/gloo/pkg/api/v1"
"github.com/solo-io/gloo/projects/gloo/pkg/bootstrap"
gloodefaults "github.com/solo-io/gloo/projects/gloo/pkg/defaults"
"github.com/solo-io/go-utils/contextutils"
"github.com/solo-io/go-utils/errutils"
"github.com/solo-io/solo-kit/pkg/api/v1/clients"
"github.com/solo-io/solo-kit/pkg/api/v1/clients/kube"
"github.com/solo-io/solo-kit/pkg/api/v1/clients/memory"
"github.com/solo-io/solo-kit/pkg/api/v2/reporter"
"github.com/solo-io/solo-kit/pkg/errors"
"k8s.io/client-go/rest"
)
// TODO: switch AcceptAllResourcesByDefault to false after validation has been tested in user environments
var AcceptAllResourcesByDefault = true
// TODO: expose AllowMissingLinks as a setting
var AllowMissingLinks = true
func Setup(ctx context.Context, kubeCache kube.SharedCache, inMemoryCache memory.InMemoryResourceCache, settings *gloov1.Settings) error {
var (
cfg *rest.Config
)
consulClient, err := bootstrap.ConsulClientForSettings(ctx, settings)
if err != nil {
return err
}
params := bootstrap.NewConfigFactoryParams(
settings,
inMemoryCache,
kubeCache,
&cfg,
consulClient,
)
proxyFactory, err := bootstrap.ConfigFactoryForSettings(params, gloov1.ProxyCrd)
if err != nil {
return err
}
virtualServiceFactory, err := bootstrap.ConfigFactoryForSettings(params, v1.VirtualServiceCrd)
if err != nil {
return err
}
routeTableFactory, err := bootstrap.ConfigFactoryForSettings(params, v1.RouteTableCrd)
if err != nil {
return err
}
gatewayFactory, err := bootstrap.ConfigFactoryForSettings(params, v1.GatewayCrd)
if err != nil {
return err
}
refreshRate, err := types.DurationFromProto(settings.RefreshRate)
if err != nil {
return err
}
writeNamespace := settings.DiscoveryNamespace
if writeNamespace == "" {
writeNamespace = gloodefaults.GlooSystem
}
watchNamespaces := utils.ProcessWatchNamespaces(settings.WatchNamespaces, writeNamespace)
var validation *translator.ValidationOpts
validationCfg := settings.GetGateway().GetValidation()
if validationCfg != nil {
alwaysAcceptResources := AcceptAllResourcesByDefault
if alwaysAccept := validationCfg.AlwaysAccept; alwaysAccept != nil {
alwaysAcceptResources = alwaysAccept.GetValue()
}
allowMissingLinks := AllowMissingLinks
validation = &translator.ValidationOpts{
ProxyValidationServerAddress: validationCfg.GetProxyValidationServerAddr(),
ValidatingWebhookPort: defaults.ValidationWebhookBindPort,
ValidatingWebhookCertPath: validationCfg.GetValidationWebhookTlsCert(),
ValidatingWebhookKeyPath: validationCfg.GetValidationWebhookTlsKey(),
IgnoreProxyValidationFailure: validationCfg.GetIgnoreGlooValidationFailure(),
AlwaysAcceptResources: alwaysAcceptResources,
AllowMissingLinks: allowMissingLinks,
}
if validation.ProxyValidationServerAddress == "" {
validation.ProxyValidationServerAddress = defaults.GlooProxyValidationServerAddr
}
if overrideAddr := os.Getenv("PROXY_VALIDATION_ADDR"); overrideAddr != "" {
validation.ProxyValidationServerAddress = overrideAddr
}
if validation.ValidatingWebhookCertPath == "" {
validation.ValidatingWebhookCertPath = defaults.ValidationWebhookTlsCertPath
}
if validation.ValidatingWebhookKeyPath == "" {
validation.ValidatingWebhookKeyPath = defaults.ValidationWebhookTlsKeyPath
}
} else {
if validationMustStart := os.Getenv("VALIDATION_MUST_START"); validationMustStart != "" && validationMustStart != "false" {
return errors.Errorf("VALIDATION_MUST_START was set to true, but no validation configuration was provided in the settings. "+
"Ensure the v1.Settings %v contains the spec.gateway.validation config", settings.GetMetadata().Ref())
}
}
opts := translator.Opts{
WriteNamespace: writeNamespace,
WatchNamespaces: watchNamespaces,
Gateways: gatewayFactory,
VirtualServices: virtualServiceFactory,
RouteTables: routeTableFactory,
Proxies: proxyFactory,
WatchOpts: clients.WatchOpts{
Ctx: ctx,
RefreshRate: refreshRate,
},
DevMode: true,
ReadGatewaysFromAllNamespaces: settings.GetGateway().GetReadGatewaysFromAllNamespaces(),
Validation: validation,
}
return RunGateway(opts)
}
func RunGateway(opts translator.Opts) error {
opts.WatchOpts = opts.WatchOpts.WithDefaults()
opts.WatchOpts.Ctx = contextutils.WithLogger(opts.WatchOpts.Ctx, "gateway")
ctx := opts.WatchOpts.Ctx
gatewayClient, err := v1.NewGatewayClient(opts.Gateways)
if err != nil {
return err
}
if err := gatewayClient.Register(); err != nil {
return err
}
virtualServiceClient, err := v1.NewVirtualServiceClient(opts.VirtualServices)
if err != nil {
return err
}
if err := virtualServiceClient.Register(); err != nil {
return err
}
routeTableClient, err := v1.NewRouteTableClient(opts.RouteTables)
if err != nil {
return err
}
if err := routeTableClient.Register(); err != nil {
return err
}
proxyClient, err := gloov1.NewProxyClient(opts.Proxies)
if err != nil {
return err
}
if err := proxyClient.Register(); err != nil {
return err
}
rpt := reporter.NewReporter("gateway", gatewayClient.BaseClient(), virtualServiceClient.BaseClient(), routeTableClient.BaseClient())
writeErrs := make(chan error)
prop := propagator.NewPropagator("gateway", gatewayClient, virtualServiceClient, proxyClient, writeErrs)
txlator := translator.NewDefaultTranslator(opts)
var (
// this constructor should be called within a lock
validationClient validation.ProxyValidationServiceClient
ignoreProxyValidationFailure bool
allowMissingLinks bool
)
// construct the channel that resyncs the API Translator loop
// when the validation server sends a notification.
// this tells Gateway that the validation snapshot has changed
notifications := make(<-chan struct{})
if opts.Validation != nil {
validationClient, err = gatewayvalidation.NewConnectionRefreshingValidationClient(
gatewayvalidation.RetryOnUnavailableClientConstructor(ctx, opts.Validation.ProxyValidationServerAddress),
)
if err != nil {
return errors.Wrapf(err, "failed to initialize grpc connection to validation server.")
}
notifications, err = gatewayvalidation.MakeNotificationChannel(ctx, validationClient)
if err != nil {
return errors.Wrapf(err, "failed to read notifications from stream")
}
ignoreProxyValidationFailure = opts.Validation.IgnoreProxyValidationFailure
allowMissingLinks = opts.Validation.AllowMissingLinks
}
emitter := v1.NewApiEmitterWithEmit(virtualServiceClient, routeTableClient, gatewayClient, notifications)
validationSyncer := gatewayvalidation.NewValidator(gatewayvalidation.NewValidatorConfig(
txlator,
validationClient,
opts.WriteNamespace,
ignoreProxyValidationFailure,
allowMissingLinks,
))
proxyReconciler := reconciler.NewProxyReconciler(validationClient, proxyClient)
translatorSyncer := NewTranslatorSyncer(
opts.WriteNamespace,
proxyClient,
proxyReconciler,
gatewayClient,
virtualServiceClient,
rpt,
prop,
txlator)
gatewaySyncers := v1.ApiSyncers{
translatorSyncer,
validationSyncer,
}
eventLoop := v1.NewApiEventLoop(emitter, gatewaySyncers)
eventLoopErrs, err := eventLoop.Run(opts.WatchNamespaces, opts.WatchOpts)
if err != nil {
return err
}
go errutils.AggregateErrs(ctx, writeErrs, eventLoopErrs, "event_loop")
logger := contextutils.LoggerFrom(ctx)
go func() {
for {
select {
case err := <-writeErrs:
logger.Errorf("error: %v", err)
case <-ctx.Done():
return
}
}
}()
validationServerErr := make(chan error, 1)
if opts.Validation != nil {
validationWebhook, err := k8sadmisssion.NewGatewayValidatingWebhook(
k8sadmisssion.NewWebhookConfig(
ctx,
validationSyncer,
opts.WatchNamespaces,
opts.Validation.ValidatingWebhookPort,
opts.Validation.ValidatingWebhookCertPath,
opts.Validation.ValidatingWebhookKeyPath,
opts.Validation.AlwaysAcceptResources,
),
)
if err != nil {
return errors.Wrapf(err, "creating validating webhook")
}
go func() {
// close out validation server when context is cancelled
<-ctx.Done()
validationWebhook.Close()
}()
go func() {
contextutils.LoggerFrom(ctx).Infow("starting gateway validation server",
zap.Int("port", opts.Validation.ValidatingWebhookPort),
zap.String("cert", opts.Validation.ValidatingWebhookCertPath),
zap.String("key", opts.Validation.ValidatingWebhookKeyPath),
)
if err := validationWebhook.ListenAndServeTLS("", ""); err != nil && err != http.ErrServerClosed {
select {
case validationServerErr <- err:
default:
logger.DPanicw("failed to start validation webhook server", zap.Error(err))
}
}
}()
}
// give the validation server 100ms to start
select {
case err := <-validationServerErr:
return errors.Wrapf(err, "failed to start validation webhook server")
case <-time.After(time.Millisecond * 100):
}
return nil
}
|
[
"\"PROXY_VALIDATION_ADDR\"",
"\"VALIDATION_MUST_START\""
] |
[] |
[
"VALIDATION_MUST_START",
"PROXY_VALIDATION_ADDR"
] |
[]
|
["VALIDATION_MUST_START", "PROXY_VALIDATION_ADDR"]
|
go
| 2 | 0 | |
cmd/main.go
|
/*
* MinIO Client (C) 2014, 2015, 2016, 2017 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"bufio"
"bytes"
"fmt"
"os"
"path/filepath"
"runtime"
"sort"
"strconv"
"strings"
"time"
"github.com/cheggaaa/pb"
"github.com/minio/cli"
"github.com/minio/mc/pkg/console"
"github.com/minio/mc/pkg/probe"
"github.com/minio/minio/pkg/words"
"github.com/pkg/profile"
"golang.org/x/crypto/ssh/terminal"
completeinstall "github.com/posener/complete/cmd/install"
)
var (
// global flags for mc.
mcFlags = []cli.Flag{}
)
// Help template for mc
var mcHelpTemplate = `NAME:
{{.Name}} - {{.Usage}}
USAGE:
{{.Name}} {{if .VisibleFlags}}[FLAGS] {{end}}COMMAND{{if .VisibleFlags}} [COMMAND FLAGS | -h]{{end}} [ARGUMENTS...]
COMMANDS:
{{range .VisibleCommands}}{{join .Names ", "}}{{ "\t" }}{{.Usage}}
{{end}}{{if .VisibleFlags}}
GLOBAL FLAGS:
{{range .VisibleFlags}}{{.}}
{{end}}{{end}}
VERSION:
` + ReleaseTag +
`{{ "\n"}}{{range $key, $value := ExtraInfo}}
{{$key}}:
{{$value}}
{{end}}`
// Main starts mc application
func Main(args []string) {
if len(args) > 1 {
switch args[1] {
case "mc", "-install", "-uninstall":
mainComplete()
return
}
}
// Enable profiling supported modes are [cpu, mem, block].
// ``MC_PROFILER`` supported options are [cpu, mem, block].
switch os.Getenv("MC_PROFILER") {
case "cpu":
defer profile.Start(profile.CPUProfile, profile.ProfilePath(mustGetProfileDir())).Stop()
case "mem":
defer profile.Start(profile.MemProfile, profile.ProfilePath(mustGetProfileDir())).Stop()
case "block":
defer profile.Start(profile.BlockProfile, profile.ProfilePath(mustGetProfileDir())).Stop()
}
probe.Init() // Set project's root source path.
probe.SetAppInfo("Release-Tag", ReleaseTag)
probe.SetAppInfo("Commit", ShortCommitID)
// Fetch terminal size, if not available, automatically
// set globalQuiet to true.
if w, e := pb.GetTerminalWidth(); e != nil {
globalQuiet = true
} else {
globalTermWidth = w
}
// Set the mc app name.
appName := filepath.Base(args[0])
// Run the app - exit on error.
if err := registerApp(appName).Run(args); err != nil {
os.Exit(1)
}
}
// Function invoked when invalid command is passed.
func commandNotFound(ctx *cli.Context, command string) {
msg := fmt.Sprintf("`%s` is not a mc command. See `mc --help`.", command)
closestCommands := findClosestCommands(command)
if len(closestCommands) > 0 {
msg += fmt.Sprintf("\n\nDid you mean one of these?\n")
if len(closestCommands) == 1 {
cmd := closestCommands[0]
msg += fmt.Sprintf(" `%s`", cmd)
} else {
for _, cmd := range closestCommands {
msg += fmt.Sprintf(" `%s`\n", cmd)
}
}
}
fatalIf(errDummy().Trace(), msg)
}
// Check for sane config environment early on and gracefully report.
func checkConfig() {
// Refresh the config once.
loadMcConfig = loadMcConfigFactory()
// Ensures config file is sane.
config, err := loadMcConfig()
// Verify if the path is accesible before validating the config
fatalIf(err.Trace(mustGetMcConfigPath()), "Unable to access configuration file.")
// Validate and print error messges
ok, errMsgs := validateConfigFile(config)
if !ok {
var errorMsg bytes.Buffer
for index, errMsg := range errMsgs {
// Print atmost 10 errors
if index > 10 {
break
}
errorMsg.WriteString(errMsg + "\n")
}
console.Fatal(errorMsg.String())
}
}
func migrate() {
// Fix broken config files if any.
fixConfig()
// Migrate config files if any.
migrateConfig()
// Migrate session files if any.
migrateSession()
// Migrate shared urls if any.
migrateShare()
}
// Get os/arch/platform specific information.
// Returns a map of current os/arch/platform/memstats.
func getSystemData() map[string]string {
host, e := os.Hostname()
fatalIf(probe.NewError(e), "Unable to determine the hostname.")
memstats := &runtime.MemStats{}
runtime.ReadMemStats(memstats)
mem := fmt.Sprintf("Used: %s | Allocated: %s | UsedHeap: %s | AllocatedHeap: %s",
pb.Format(int64(memstats.Alloc)).To(pb.U_BYTES),
pb.Format(int64(memstats.TotalAlloc)).To(pb.U_BYTES),
pb.Format(int64(memstats.HeapAlloc)).To(pb.U_BYTES),
pb.Format(int64(memstats.HeapSys)).To(pb.U_BYTES))
platform := fmt.Sprintf("Host: %s | OS: %s | Arch: %s", host, runtime.GOOS, runtime.GOARCH)
goruntime := fmt.Sprintf("Version: %s | CPUs: %s", runtime.Version(), strconv.Itoa(runtime.NumCPU()))
return map[string]string{
"PLATFORM": platform,
"RUNTIME": goruntime,
"MEM": mem,
}
}
// initMC - initialize 'mc'.
func initMC() {
// Check if mc config exists.
if !isMcConfigExists() {
err := saveMcConfig(newMcConfig())
fatalIf(err.Trace(), "Unable to save new mc config.")
if !globalQuiet && !globalJSON {
console.Infoln("Configuration written to `" + mustGetMcConfigPath() + "`. Please update your access credentials.")
}
}
// Check if mc session directory exists.
if !isSessionDirExists() {
fatalIf(createSessionDir().Trace(), "Unable to create session config directory.")
}
// Check if mc share directory exists.
if !isShareDirExists() {
initShareConfig()
}
// Check if certs dir exists
if !isCertsDirExists() {
fatalIf(createCertsDir().Trace(), "Unable to create `CAs` directory.")
}
// Check if CAs dir exists
if !isCAsDirExists() {
fatalIf(createCAsDir().Trace(), "Unable to create `CAs` directory.")
}
// Load all authority certificates present in CAs dir
loadRootCAs()
}
func installAutoCompletion(ctx *cli.Context) {
if ctx.Bool("no-autocompletion") || ctx.GlobalBool("no-autocompletion") {
return
}
if globalQuiet || globalJSON || !terminal.IsTerminal(int(os.Stdout.Fd())) {
return
}
if runtime.GOOS == "windows" {
return
}
if completeinstall.IsInstalled("mc") {
return
}
for {
fmt.Printf("Install mc auto-completion in your shell ? (y/n): ")
reader := bufio.NewReader(os.Stdin)
char, _, err := reader.ReadRune()
if err != nil {
continue
}
switch char {
case 'y', 'Y':
// Install mc completion, ignore any error for now
err := completeinstall.Install("mc")
if err != nil {
errorIf(probe.NewError(err), "Unable to install mc auto-completion.")
} else {
console.Infoln("Auto-completion installed! Kindly restart your shell to load it.")
}
fallthrough
case 'n', 'N':
return
}
}
}
func registerBefore(ctx *cli.Context) error {
// Check if mc was compiled using a supported version of Golang.
checkGoVersion()
// Set the config directory.
setMcConfigDir(ctx.GlobalString("config-dir"))
// Migrate any old version of config / state files to newer format.
migrate()
// Set global flags.
setGlobalsFromContext(ctx)
// Initialize default config files.
initMC()
// Check if config can be read.
checkConfig()
// Install shell completions
installAutoCompletion(ctx)
return nil
}
// findClosestCommands to match a given string with commands trie tree.
func findClosestCommands(command string) []string {
var closestCommands []string
for _, value := range commandsTree.PrefixMatch(command) {
closestCommands = append(closestCommands, value.(string))
}
sort.Strings(closestCommands)
// Suggest other close commands - allow missed, wrongly added and even transposed characters
for _, value := range commandsTree.Walk(commandsTree.Root()) {
if sort.SearchStrings(closestCommands, value.(string)) < len(closestCommands) {
continue
}
// 2 is arbitrary and represents the max allowed number of typed errors
if words.DamerauLevenshteinDistance(command, value.(string)) < 2 {
closestCommands = append(closestCommands, value.(string))
}
}
return closestCommands
}
// Check for updates and print a notification message
func checkUpdate(ctx *cli.Context) {
// Do not print update messages, if quiet flag is set.
if ctx.Bool("quiet") || ctx.GlobalBool("quiet") {
// Its OK to ignore any errors during doUpdate() here.
if updateMsg, _, currentReleaseTime, latestReleaseTime, err := getUpdateInfo(2 * time.Second); err == nil {
printMsg(updateMessage{
Status: "success",
Message: updateMsg,
})
} else {
printMsg(updateMessage{
Status: "success",
Message: prepareUpdateMessage("Run `mc update`", latestReleaseTime.Sub(currentReleaseTime)),
})
}
}
}
var appCmds = []cli.Command{
lsCmd,
mbCmd,
rbCmd,
catCmd,
headCmd,
pipeCmd,
shareCmd,
cpCmd,
mirrorCmd,
findCmd,
sqlCmd,
statCmd,
diffCmd,
rmCmd,
eventCmd,
watchCmd,
policyCmd,
adminCmd,
sessionCmd,
configCmd,
updateCmd,
versionCmd,
}
func registerApp(name string) *cli.App {
for _, cmd := range appCmds {
registerCmd(cmd)
}
cli.HelpFlag = cli.BoolFlag{
Name: "help, h",
Usage: "show help",
}
cli.BashCompletionFlag = cli.BoolFlag{
Name: "compgen",
Usage: "enables bash-completion for all commands and subcommands",
Hidden: true,
}
app := cli.NewApp()
app.Name = name
app.Action = func(ctx *cli.Context) {
if strings.HasPrefix(ReleaseTag, "RELEASE.") {
// Check for new updates from dl.min.io.
checkUpdate(ctx)
}
cli.ShowAppHelp(ctx)
}
app.Before = registerBefore
app.ExtraInfo = func() map[string]string {
if globalDebug {
return getSystemData()
}
return make(map[string]string)
}
app.HideHelpCommand = true
app.Usage = "MinIO Client for cloud storage and filesystems."
app.Commands = commands
app.Author = "MinIO, Inc."
app.Version = ReleaseTag
app.Flags = append(mcFlags, globalFlags...)
app.CustomAppHelpTemplate = mcHelpTemplate
app.CommandNotFound = commandNotFound // handler function declared above.
app.EnableBashCompletion = true
return app
}
// mustGetProfilePath must get location that the profile will be written to.
func mustGetProfileDir() string {
return filepath.Join(mustGetMcConfigDir(), globalProfileDir)
}
|
[
"\"MC_PROFILER\""
] |
[] |
[
"MC_PROFILER"
] |
[]
|
["MC_PROFILER"]
|
go
| 1 | 0 | |
internal/docker/utils/module_utils.go
|
package utils
import (
"errors"
"fmt"
"github.com/docker/docker/api/types"
"github.com/spf13/viper"
"os"
"strings"
)
func BuildModuleEnvVars(vars *[]string) {
tokenVar := fmt.Sprintf("INTERNAL_TOKEN=%s", viper.GetString("internaltoken"))
*vars = append(*vars, tokenVar)
controllerVar := fmt.Sprintf("CONTROLLER_SVC_NAME=%s", os.Getenv("CONTROLLER_SVC_NAME"))
*vars = append(*vars, controllerVar)
controllerPortVar := fmt.Sprintf("CONTROLLER_PORT=%s", os.Getenv("CONTROLLER_PORT"))
*vars = append(*vars, controllerPortVar)
hostnameVar := fmt.Sprintf("HOST_DOMAIN=%s", os.Getenv("HOST_DOMAIN"))
*vars = append(*vars, hostnameVar)
}
func AddModuleBindPaths(binds *[]string, moduleId string) {
projectRoot := os.Getenv("PROJECT_ROOT")
for i, bind := range *binds {
(*binds)[i] = fmt.Sprintf("%s/%s/%s", projectRoot, moduleId, bind)
}
}
func AddModuleNetwork(networks []types.NetworkResource) (string, error) {
for _, val := range networks {
if strings.HasSuffix(val.Name, os.Getenv("MODULE_NETWORK_NAME")) {
return val.Name, nil
}
}
return "", errors.New("couldn't extract name of network")
}
func ValidImageRef(imageRef string) bool {
return strings.HasPrefix(imageRef, os.Getenv("DOCKER_PREFIX"))
}
|
[
"\"CONTROLLER_SVC_NAME\"",
"\"CONTROLLER_PORT\"",
"\"HOST_DOMAIN\"",
"\"PROJECT_ROOT\"",
"\"MODULE_NETWORK_NAME\"",
"\"DOCKER_PREFIX\""
] |
[] |
[
"CONTROLLER_SVC_NAME",
"CONTROLLER_PORT",
"HOST_DOMAIN",
"PROJECT_ROOT",
"MODULE_NETWORK_NAME",
"DOCKER_PREFIX"
] |
[]
|
["CONTROLLER_SVC_NAME", "CONTROLLER_PORT", "HOST_DOMAIN", "PROJECT_ROOT", "MODULE_NETWORK_NAME", "DOCKER_PREFIX"]
|
go
| 6 | 0 | |
buffer/in-vicinity-python/uno/uinit.py
|
#-*- coding : utf-8 -*-
# file : import uinit
import os
import sys
def import_uno():
# Add the URE_BOOTSTRAP environment variable #3
os.environ['URE_BOOTSTRAP'] = 'vnd.sun.star.pathname:c:\Program Files\OpenOffice.org 3\program\\fundamental.ini'
# Add the UNO_PATH environment variable #4
os.environ['UNO_PATH'] = 'c:\Program Files\OpenOffice.org 3\program\\'
# Add the PATH environment variable, but weed the duplicates first #5
new_paths_string = 'c:\Program Files\OpenOffice.org 3\\URE\\bin;c:\Program Files\OpenOffice.org 3\Basis\program;C:\WINDOWS\system32;C:\WINDOWS;C:\WINDOWS\System32\\Wbem;C:\Program Files\Common Files\Intuit\QBPOSSDKRuntime'
new_paths = new_paths_string.split(';')
existing_paths = os.environ['PATH'].split(';')
for path in new_paths:
if path not in existing_paths:
existing_paths.append(path)
os.environ['PATH'] = ';'.join(existing_paths)
# Add the uno location to PYTHONPATH #6
sys.path.append('C:\\Program Files\\OpenOffice.org 3\\Basis\\program')
return
''' функции-конветроры '''
def getPdfProps():
props = []
prop = PropertyValue()
prop.Name = "FilterName"
prop.Value = "writer_pdf_Export"
props.append(prop)
return tuple(props)
def getHtmlProps():
props = [] # это передается
# набираем порции
prop = PropertyValue()
prop.Name = "FilterName"
prop.Value = "HTML (StarWriter)"
# добавляем
props.append(prop)
# набираем порции
prop = PropertyValue()
prop.Name = "FilterName"
prop.Value = "writer_html_Export"
# добавляем
props.append(prop)
return tuple(props)
import_uno()
import uno
from com.sun.star.beans import PropertyValue
|
[] |
[] |
[
"URE_BOOTSTRAP",
"UNO_PATH",
"PATH"
] |
[]
|
["URE_BOOTSTRAP", "UNO_PATH", "PATH"]
|
python
| 3 | 0 | |
pkg/wal/wal_test.go
|
package wal
import (
"bytes"
"context"
"fmt"
"io"
"io/ioutil"
"log"
"math/rand"
"os"
"reflect"
"strings"
"sync"
"testing"
"time"
"github.com/oneconcern/datamon/pkg/model"
gcsStorage "cloud.google.com/go/storage"
irand "github.com/oneconcern/datamon/internal/rand"
"github.com/oneconcern/datamon/pkg/storage"
"github.com/oneconcern/datamon/pkg/storage/gcs"
"github.com/oneconcern/datamon/pkg/storage/localfs"
"github.com/segmentio/ksuid"
"github.com/spf13/afero"
"github.com/stretchr/testify/require"
"go.uber.org/zap"
"google.golang.org/api/option"
)
const (
longPath = "this/is/a/long/path/to/an/object/the/object/is/under/this/path/list/with/prefix/please/"
mutable = "mutable"
wal = "wal"
payload = "payload"
attrError = "getAttrErrorTest"
putError = "putErrorTest"
touchError = "touchErrorTest"
)
func constStringWithIndex(i int) string {
return longPath + fmt.Sprint(i)
}
func randSleep() {
r := rand.Intn(200) //#nosec
time.Sleep(time.Duration(r)*time.Millisecond + 1) // min 1 ms
}
func setup(t testing.TB, numOfObjects int) (storage.Store, func()) {
ctx := context.Background()
bucket := "deleteme-wal-test-" + irand.LetterString(15)
log.Printf("Created bucket %s ", bucket)
client, err := gcsStorage.NewClient(context.TODO(), option.WithScopes(gcsStorage.ScopeFullControl))
require.NoError(t, err)
err = client.Bucket(bucket).Create(ctx, "onec-co", nil)
require.NoError(t, err, "Failed to create bucket:"+bucket)
gcs, err := gcs.New(context.TODO(), bucket, "") // Use GOOGLE_APPLICATION_CREDENTIALS env variable
require.NoError(t, err, "failed to create gcs client")
wg := sync.WaitGroup{}
create := func(i int, wg *sync.WaitGroup) {
err = gcs.Put(ctx, constStringWithIndex(i), bytes.NewBufferString(constStringWithIndex(i)), storage.NoOverWrite)
require.NoError(t, err, "Index at: "+fmt.Sprint(i))
wg.Done()
}
for i := 0; i < numOfObjects; i++ {
// Use path as payload
wg.Add(1)
go create(i, &wg)
}
wg.Wait()
cleanup := func() {
delete := func(key string, wg *sync.WaitGroup) {
err = gcs.Delete(ctx, key)
require.NoError(t, err, "failed to delete:"+key)
wg.Done()
}
wg := sync.WaitGroup{}
for i := 0; i < numOfObjects; i++ {
wg.Add(1)
delete(constStringWithIndex(i), &wg)
}
wg.Wait()
// Delete any keys created outside of setup at the end of test.
var keys []string
keys, err = gcs.Keys(ctx)
for _, k := range keys {
wg.Add(1)
delete(k, &wg)
}
wg.Wait()
log.Printf("Delete bucket %s ", bucket)
err = client.Bucket(bucket).Delete(ctx)
require.NoError(t, err, "Failed to delete bucket:"+bucket)
}
return gcs, cleanup
}
func mustGetTestLogger(t *testing.T) *zap.Logger {
if isDebug := os.Getenv("DEBUG_TEST"); isDebug != "" {
l, err := zap.NewDevelopment()
require.NoError(t, err)
return l
}
return zap.NewNop()
}
func TestNewWAL1(t *testing.T) {
t.Parallel()
type args struct {
mutableStore storage.Store
walStore storage.Store
options []Option
}
l := mustGetTestLogger(t)
s1 := localfs.New(afero.NewOsFs())
s2 := localfs.New(afero.NewBasePathFs(afero.NewOsFs(), "base"))
tests := []struct {
name string
args args
want *WAL
}{
{
name: "all options",
args: args{
mutableStore: s1,
walStore: s2,
options: []Option{MaxConcurrency(11), TokenGeneratorPath("path")},
},
want: &WAL{
mutableStore: s1,
walStore: s2,
l: l,
maxConcurrency: 11,
connectionControl: make(chan struct{}, 11),
tokenGeneratorPath: "path",
},
},
{
name: "path",
args: args{
mutableStore: s1,
walStore: s2,
options: []Option{TokenGeneratorPath("path")},
},
want: &WAL{
mutableStore: s1,
walStore: s2,
l: l,
maxConcurrency: maxConcurrency,
tokenGeneratorPath: "path",
},
},
{
name: "concurrency ",
args: args{
mutableStore: s1,
walStore: s2,
options: []Option{MaxConcurrency(11)},
},
want: &WAL{
mutableStore: s1,
walStore: s2,
l: l,
maxConcurrency: 11,
tokenGeneratorPath: model.TokenGeneratorPath,
},
},
{
name: "default",
args: args{
mutableStore: s1,
walStore: s2,
options: []Option{},
},
want: &WAL{
mutableStore: s1,
walStore: s2,
l: l,
maxConcurrency: maxConcurrency,
tokenGeneratorPath: model.TokenGeneratorPath,
},
},
}
for _, test := range tests {
tt := test
tt.args.options = append(tt.args.options, Logger(l))
t.Run(tt.name, func(t *testing.T) {
if got := New(tt.args.mutableStore, tt.args.walStore, tt.args.options...); !(reflect.DeepEqual(got.maxConcurrency, tt.want.maxConcurrency) ||
reflect.DeepEqual(got.mutableStore, tt.want.mutableStore) ||
reflect.DeepEqual(got.walStore, tt.want.walStore) ||
reflect.DeepEqual(got.l, tt.want.l)) {
t.Errorf("New() = %v, want %v", got, tt.want)
}
})
}
}
func TestWAL_GetToken(t *testing.T) {
t.Parallel()
// Run against the real backend.
mutableStore, cleanupMutable := setup(t, 0)
defer cleanupMutable()
walStore, cleanupWal := setup(t, 0)
defer cleanupWal()
_ = mutableStore.Put(context.Background(), model.TokenGeneratorPath, strings.NewReader(""), storage.OverWrite)
l := mustGetTestLogger(t)
type fields struct {
mutableStore storage.Store
tokenGeneratorPath string
walStore storage.Store
maxConcurrency int
connectionControl chan struct{}
l *zap.Logger
}
type args struct {
ctx context.Context
}
tests := []struct {
name string
fields fields
args args
wantErr bool
}{
{
name: "Get a token",
fields: fields{
mutableStore: mutableStore,
tokenGeneratorPath: model.TokenGeneratorPath,
walStore: walStore,
maxConcurrency: maxConcurrency,
connectionControl: make(chan struct{}),
l: l,
},
args: args{
ctx: context.Background(),
},
wantErr: false,
},
{
name: "Get a second token",
fields: fields{
mutableStore: mutableStore,
tokenGeneratorPath: model.TokenGeneratorPath,
walStore: walStore,
maxConcurrency: maxConcurrency,
connectionControl: make(chan struct{}),
l: l,
},
args: args{
ctx: context.Background(),
},
wantErr: false,
},
{
name: "Get a third token",
fields: fields{
mutableStore: mutableStore,
tokenGeneratorPath: model.TokenGeneratorPath,
walStore: walStore,
maxConcurrency: maxConcurrency,
connectionControl: make(chan struct{}),
l: l,
},
args: args{
ctx: context.Background(),
},
wantErr: false,
},
}
token1 := ""
token2 := ""
for _, test := range tests {
tt := test
t.Run(tt.name, func(t *testing.T) {
var err error
token2 = token1
w := &WAL{
mutableStore: tt.fields.mutableStore,
tokenGeneratorPath: tt.fields.tokenGeneratorPath,
walStore: tt.fields.walStore,
maxConcurrency: tt.fields.maxConcurrency,
connectionControl: tt.fields.connectionControl,
l: tt.fields.l,
}
token1, err = w.getToken(tt.args.ctx)
if (err != nil) != tt.wantErr {
t.Errorf("getToken() error = %v, wantErr %v", err, tt.wantErr)
return
}
if token1 == "" {
t.Errorf("getToken() error = %v", err)
}
if token2 != "" {
ks1, err := ksuid.Parse(token1)
require.NoError(t, err, "Got error while converting token 1 to ksuid: %s", token1)
ks2, err := ksuid.Parse(token2)
require.NoError(t, err, "Got error while converting token2 to ksuid: %s", token2)
time1 := ks1.Time()
time2 := ks2.Time()
diff := time1.Sub(time2)
var second float64
require.Greater(t, diff.Seconds(), second) // KSUID is at the granularity of seconds.
}
time.Sleep(1 * time.Second) // KSUID is at the granularity of seconds.
})
}
}
type mockMutableStoreTestAdd struct {
storage.Store
createTime time.Time
updateTime time.Time
mutex sync.Mutex
storeType string
failGetAttr bool
failTouch bool
failPut bool
}
func (m *mockMutableStoreTestAdd) String() string {
panic("implement me")
}
func (m *mockMutableStoreTestAdd) Has(context.Context, string) (bool, error) {
panic("implement me")
}
func (m *mockMutableStoreTestAdd) Get(context.Context, string) (io.ReadCloser, error) {
panic("implement me")
}
func (m *mockMutableStoreTestAdd) GetAttr(context.Context, string) (storage.Attributes, error) {
if m.failGetAttr {
return storage.Attributes{}, fmt.Errorf(attrError)
}
if m.storeType != mutable {
return storage.Attributes{}, fmt.Errorf("getattr expected only on mutable store")
}
return storage.Attributes{
Created: m.createTime,
Updated: m.updateTime,
Owner: "",
}, nil
}
func (m *mockMutableStoreTestAdd) GetAt(context.Context, string) (io.ReaderAt, error) {
panic("implement me")
}
func (m *mockMutableStoreTestAdd) Touch(_ context.Context, path string) error {
if m.failTouch {
return fmt.Errorf(touchError)
}
if m.storeType != mutable {
return fmt.Errorf("touch expected only on mutable store")
}
if path != model.TokenGeneratorPath {
return fmt.Errorf("expected path: %s", model.TokenGeneratorPath)
}
m.mutex.Lock()
defer m.mutex.Unlock()
oldTime := m.updateTime
m.updateTime = time.Now() // Assume stable local clock
if m.updateTime.Before(oldTime) {
panic("local wall clock not stable")
}
return nil
}
func (m *mockMutableStoreTestAdd) Put(_ context.Context, key string, reader io.Reader, overwrite bool) error {
if m.failPut {
return fmt.Errorf(putError)
}
if m.storeType != wal {
return fmt.Errorf("put expected only on wal store")
}
_, err := ksuid.Parse(key)
if err != nil {
return err
}
b, err := ioutil.ReadAll(reader)
if err != nil {
return err
}
if payload != string(b) {
return fmt.Errorf("payload does not match: %s", string(b))
}
if overwrite == storage.OverWrite {
return fmt.Errorf("no overwrites expected")
}
return nil
}
func (m *mockMutableStoreTestAdd) Delete(context.Context, string) error {
panic("implement me")
}
func (m *mockMutableStoreTestAdd) Keys(context.Context) ([]string, error) {
panic("implement me")
}
func (m *mockMutableStoreTestAdd) Clear(context.Context) error {
panic("implement me")
}
func (m *mockMutableStoreTestAdd) KeysPrefix(ctx context.Context, pageToken string, prefix string, delimiter string, count int) ([]string, string, error) {
panic("implement me")
}
func TestWAL_Add(t *testing.T) {
t.Parallel()
type fields struct {
mutableStore storage.Store
tokenGeneratorPath string
walStore storage.Store
maxConcurrency int
connectionControl chan struct{}
l *zap.Logger
}
type args struct {
ctx context.Context
p string
}
l := mustGetTestLogger(t)
tests := []struct {
name string
fields fields
args args
wantErr bool
validateError func(err error) bool
errString string
}{
{
name: "add-success",
fields: fields{
mutableStore: &mockMutableStoreTestAdd{
createTime: time.Now(),
updateTime: time.Now(),
mutex: sync.Mutex{},
storeType: "mutable",
},
tokenGeneratorPath: model.TokenGeneratorPath,
walStore: &mockMutableStoreTestAdd{
createTime: time.Now(),
updateTime: time.Now(),
mutex: sync.Mutex{},
storeType: "wal",
},
maxConcurrency: 0,
connectionControl: nil,
l: l,
},
args: args{
ctx: nil,
p: payload,
},
wantErr: false,
},
{
name: "add-failure-attr",
fields: fields{
mutableStore: &mockMutableStoreTestAdd{
createTime: time.Now(),
updateTime: time.Now(),
mutex: sync.Mutex{},
storeType: "mutable",
failGetAttr: true,
},
tokenGeneratorPath: model.TokenGeneratorPath,
walStore: &mockMutableStoreTestAdd{
createTime: time.Now(),
updateTime: time.Now(),
mutex: sync.Mutex{},
storeType: "wal",
},
maxConcurrency: 0,
connectionControl: nil,
l: l,
},
args: args{
ctx: nil,
p: payload,
},
wantErr: true,
validateError: func(err error) bool {
return strings.Contains(err.Error(), attrError)
},
},
{
name: "add-failure-touch",
fields: fields{
mutableStore: &mockMutableStoreTestAdd{
createTime: time.Now(),
updateTime: time.Now(),
mutex: sync.Mutex{},
storeType: "mutable",
failTouch: true,
},
tokenGeneratorPath: model.TokenGeneratorPath,
walStore: &mockMutableStoreTestAdd{
createTime: time.Now(),
updateTime: time.Now(),
mutex: sync.Mutex{},
storeType: "wal",
},
maxConcurrency: 0,
connectionControl: nil,
l: l,
},
args: args{
ctx: nil,
p: payload,
},
wantErr: true,
validateError: func(err error) bool {
return strings.Contains(err.Error(), touchError)
},
},
{
name: "add-failure-put",
fields: fields{
mutableStore: &mockMutableStoreTestAdd{
createTime: time.Now(),
updateTime: time.Now(),
mutex: sync.Mutex{},
storeType: "mutable",
},
tokenGeneratorPath: model.TokenGeneratorPath,
walStore: &mockMutableStoreTestAdd{
createTime: time.Now(),
updateTime: time.Now(),
mutex: sync.Mutex{},
storeType: "wal",
failPut: true,
},
maxConcurrency: 0,
connectionControl: nil,
l: l,
},
args: args{
ctx: nil,
p: payload,
},
wantErr: true,
validateError: func(err error) bool {
return strings.Contains(err.Error(), putError)
},
},
}
for _, test := range tests {
tt := test
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
w := &WAL{
mutableStore: tt.fields.mutableStore,
tokenGeneratorPath: tt.fields.tokenGeneratorPath,
walStore: tt.fields.walStore,
maxConcurrency: tt.fields.maxConcurrency,
connectionControl: tt.fields.connectionControl,
l: tt.fields.l,
}
if _, err := w.Add(tt.args.ctx, tt.args.p); (err != nil) != tt.wantErr {
t.Errorf("Add() error = %v, wantErr %v", err, tt.wantErr)
} else if err != nil {
if !tt.validateError(err) {
t.Errorf("Error validation failed")
}
}
})
}
}
type mockMutableStoreTestListEntries struct {
storage.Store
keys []string
}
func (m *mockMutableStoreTestListEntries) generateLexicallySortedKeys(count int) {
m.keys = []string{}
t := time.Now()
a := t.Add(-1 * time.Hour)
key, _ := ksuid.NewRandomWithTime(a)
m.keys = append(m.keys, key.String())
delta := time.Hour
for i := 0; i < count-1; i++ {
delta += time.Hour
a = t.Add(delta)
key, _ := ksuid.NewRandomWithTime(a)
m.keys = append(m.keys, key.String())
}
}
func (m *mockMutableStoreTestListEntries) String() string {
panic("implement me")
}
func (m *mockMutableStoreTestListEntries) Has(context.Context, string) (bool, error) {
panic("implement me")
}
type rc struct {
s string
}
func (r *rc) Close() error {
return nil
}
func (r *rc) Read(p []byte) (n int, err error) {
e := model.Entry{
Token: r.s,
Payload: r.s,
}
b, err := model.MarshalWAL(&e)
if err != nil {
return 0, err
}
c := copy(p, b)
return c, io.EOF
}
func (m *mockMutableStoreTestListEntries) Get(_ context.Context, key string) (io.ReadCloser, error) {
randSleep()
return &rc{
s: key,
}, nil
}
func (m *mockMutableStoreTestListEntries) GetAttr(context.Context, string) (storage.Attributes, error) {
panic("implement me")
}
func (m *mockMutableStoreTestListEntries) GetAt(context.Context, string) (io.ReaderAt, error) {
panic("implement me")
}
func (m *mockMutableStoreTestListEntries) Touch(_ context.Context, path string) error {
panic("implement me")
}
func (m *mockMutableStoreTestListEntries) Put(_ context.Context, _ string, _ io.Reader, _ bool) error {
panic("implement me")
}
func (m *mockMutableStoreTestListEntries) Delete(context.Context, string) error {
panic("implement me")
}
func (m *mockMutableStoreTestListEntries) Keys(context.Context) ([]string, error) {
panic("implement me")
}
func (m *mockMutableStoreTestListEntries) Clear(context.Context) error {
panic("implement me")
}
func (m *mockMutableStoreTestListEntries) KeysPrefix(ctx context.Context,
pageToken string, prefix string, delimiter string, count int) ([]string, string, error) {
var keys []string
var next string
var found bool
i := 0
randSleep()
for {
if i == len(m.keys)-1 {
found = true
} else {
c := strings.Compare(m.keys[i+1], pageToken)
if !found && c >= 0 {
found = true
}
}
if !found {
i++
continue
}
keys = append(keys, m.keys[i])
if i == len(m.keys)-1 {
next = ""
return keys, next, nil
}
next = m.keys[i+1]
i++
if count == len(keys) {
return keys, next, nil
}
}
}
func TestWAL_ListEntries(t *testing.T) {
t.Parallel()
tests := []struct {
name string
keyCount int
expectError bool
errorValidation func(err error) bool
setupParams func(keys []string) (expected string, start string, max int, next string)
maxConnections int
}{
{
name: "List with defaults",
keyCount: 2048,
maxConnections: 1024,
setupParams: func(keys []string) (expected string, start string, max int, next string) {
return keys[0], keys[0], len(keys), keys[1000]
},
},
{
name: "List with lower max than total keys",
keyCount: 2048,
maxConnections: 1024,
setupParams: func(keys []string) (expected string, start string, max int, next string) {
return keys[0], keys[0], 900, keys[900]
},
},
{
name: "List with defaults, max concurrency at 1",
keyCount: 100,
maxConnections: 1,
setupParams: func(keys []string) (expected string, start string, max int, next string) {
return keys[0], keys[0], len(keys), ""
},
},
{
name: "List ksuid between 2 keys with ksuid reduced",
keyCount: 100,
maxConnections: 10,
setupParams: func(keys []string) (expected string, start string, max int, next string) {
k, _ := ksuid.Parse(keys[2])
s, _ := ksuid.NewRandomWithTime(k.Time().Add(-10 * time.Second))
return keys[1], s.String(), 2, keys[3]
},
},
{
name: "List ksuid between 2 keys with ksuid as is",
keyCount: 100,
maxConnections: 10,
setupParams: func(keys []string) (expected string, start string, max int, next string) {
return keys[1], keys[2], 2, keys[3]
},
},
{
name: "List 0",
keyCount: 100,
maxConnections: 10,
setupParams: func(keys []string) (expected string, start string, max int, next string) {
return keys[1], keys[2], 0, keys[0]
},
expectError: true,
errorValidation: func(err error) bool {
return strings.Contains(err.Error(), "max")
},
},
}
l := mustGetTestLogger(t)
for _, test := range tests {
tt := test
t.Run(tt.name, func(t *testing.T) {
t.Parallel()
s := mockMutableStoreTestListEntries{}
s.generateLexicallySortedKeys(tt.keyCount)
first, start, max, nextExpected := tt.setupParams(s.keys)
w := WAL{
walStore: &s,
connectionControl: make(chan struct{}, tt.maxConnections),
l: l,
}
keys, nextActual, err := w.ListEntries(context.Background(), start, max)
if err != nil {
if !tt.expectError {
t.Errorf("ListEntries() error = %v", err)
return
}
if !tt.errorValidation(err) {
t.Errorf("Failed to validate error")
}
return
}
if first != keys[0].Token {
t.Errorf("incorrect first entry. Actual:%s, expected:%s", keys[0].Token, first)
}
l.Debug("Next token", zap.String("actual", nextActual), zap.String("expected", nextExpected))
if nextActual != nextExpected {
t.Errorf("failed to get the correct next token, actual: %s, expected:%s", nextActual, nextExpected)
}
l.Debug("counts", zap.Int("max", max), zap.Int("length", len(keys)))
if max >= maxEntriesPerList {
if len(s.keys) >= maxEntriesPerList && len(keys) != maxEntriesPerList {
t.Errorf("maxEnteriesPerList should be the number of keys retured. Actual: %d, len:%d, Expected:%d", len(keys), len(s.keys), maxEntriesPerList)
}
if len(s.keys) < maxEntriesPerList && len(keys) != len(s.keys) {
t.Errorf("Number of returned keys should be the length of all keys. Actual: %d, len:%d, Expected:%d", len(keys), len(s.keys), maxEntriesPerList)
}
} else if max > 0 {
if (max < len(s.keys) && len(keys) != max) || (len(keys) > len(s.keys)) {
t.Errorf("total number of keys is incorrect. len: %d, actual:%d, max: %d", len(s.keys), len(keys), max)
}
}
})
}
}
|
[
"\"DEBUG_TEST\""
] |
[] |
[
"DEBUG_TEST"
] |
[]
|
["DEBUG_TEST"]
|
go
| 1 | 0 | |
dm/master/bootstrap_test.go
|
// Copyright 2020 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package master
import (
"context"
"io/ioutil"
"os"
"path/filepath"
"strconv"
"strings"
"time"
"github.com/golang/mock/gomock"
. "github.com/pingcap/check"
filter "github.com/pingcap/tidb-tools/pkg/binlog-filter"
"github.com/pingcap/dm/dm/config"
"github.com/pingcap/dm/dm/pb"
"github.com/pingcap/dm/dm/pbmock"
tcontext "github.com/pingcap/dm/pkg/context"
"github.com/pingcap/dm/pkg/log"
"github.com/pingcap/dm/pkg/terror"
)
const (
// do not forget to update this path if the file removed/renamed.
subTaskSampleFile = "../worker/subtask.toml"
)
func (t *testMaster) TestCollectSourceConfigFilesV1Import(c *C) {
s := testDefaultMasterServer(c)
defer s.Close()
s.cfg.V1SourcesPath = c.MkDir()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
tctx := tcontext.NewContext(ctx, log.L())
// no source file exist.
cfgs, err := s.collectSourceConfigFilesV1Import(tctx)
c.Assert(err, IsNil)
c.Assert(cfgs, HasLen, 0)
host := os.Getenv("MYSQL_HOST")
if host == "" {
host = "127.0.0.1"
}
port, _ := strconv.Atoi(os.Getenv("MYSQL_PORT"))
if port == 0 {
port = 3306
}
user := os.Getenv("MYSQL_USER")
if user == "" {
user = "root"
}
password := os.Getenv("MYSQL_PSWD")
// load a valid source file.
cfg1 := config.NewSourceConfig()
// fix empty map after marshal/unmarshal becomes nil
cfg1.From.Session = map[string]string{}
cfg1.Tracer = map[string]interface{}{}
cfg1.Filters = []*filter.BinlogEventRule{}
c.Assert(cfg1.LoadFromFile("./source.yaml"), IsNil)
cfg1.From.Host = host
cfg1.From.Port = port
cfg1.From.User = user
cfg1.From.Password = password
cfg2 := cfg1.Clone()
cfg2.SourceID = "mysql-replica-02"
// write into source files.
data1, err := cfg1.Yaml()
c.Assert(err, IsNil)
c.Assert(ioutil.WriteFile(filepath.Join(s.cfg.V1SourcesPath, "source1.yaml"), []byte(data1), 0644), IsNil)
data2, err := cfg2.Yaml()
c.Assert(err, IsNil)
c.Assert(ioutil.WriteFile(filepath.Join(s.cfg.V1SourcesPath, "source2.yaml"), []byte(data2), 0644), IsNil)
// collect again, two configs exist.
cfgs, err = s.collectSourceConfigFilesV1Import(tctx)
c.Assert(err, IsNil)
c.Assert(cfgs, HasLen, 2)
c.Assert(cfgs[cfg1.SourceID], DeepEquals, *cfg1)
c.Assert(cfgs[cfg2.SourceID], DeepEquals, *cfg2)
// put a invalid source file.
c.Assert(ioutil.WriteFile(filepath.Join(s.cfg.V1SourcesPath, "invalid.yaml"), []byte("invalid-source-data"), 0644), IsNil)
cfgs, err = s.collectSourceConfigFilesV1Import(tctx)
c.Assert(terror.ErrConfigYamlTransform.Equal(err), IsTrue)
c.Assert(cfgs, HasLen, 0)
}
func (t *testMaster) TestWaitWorkersReadyV1Import(c *C) {
oldWaitWorkerV1Timeout := waitWorkerV1Timeout
defer func() {
waitWorkerV1Timeout = oldWaitWorkerV1Timeout
}()
waitWorkerV1Timeout = 5 * time.Second
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
tctx := tcontext.NewContext(ctx, log.L())
s := testDefaultMasterServer(c)
defer s.Close()
s.cfg.V1SourcesPath = c.MkDir()
c.Assert(s.scheduler.Start(ctx, etcdTestCli), IsNil)
cfg1 := config.NewSourceConfig()
c.Assert(cfg1.LoadFromFile("./source.yaml"), IsNil)
cfg2 := cfg1.Clone()
cfg2.SourceID = "mysql-replica-02"
cfgs := map[string]config.SourceConfig{
cfg1.SourceID: *cfg1,
cfg2.SourceID: *cfg2,
}
// no worker registered, timeout.
err := s.waitWorkersReadyV1Import(tctx, cfgs)
c.Assert(err, ErrorMatches, ".*wait for DM-worker instances timeout.*")
// register one worker.
req1 := &pb.RegisterWorkerRequest{
Name: "worker-1",
Address: "127.0.0.1:8262",
}
resp1, err := s.RegisterWorker(ctx, req1)
c.Assert(err, IsNil)
c.Assert(resp1.Result, IsTrue)
// still timeout because no enough workers.
err = s.waitWorkersReadyV1Import(tctx, cfgs)
c.Assert(err, ErrorMatches, ".*wait for DM-worker instances timeout.*")
// register another worker.
go func() {
time.Sleep(1500 * time.Millisecond)
req2 := &pb.RegisterWorkerRequest{
Name: "worker-2",
Address: "127.0.0.1:8263",
}
resp2, err2 := s.RegisterWorker(ctx, req2)
c.Assert(err2, IsNil)
c.Assert(resp2.Result, IsTrue)
}()
err = s.waitWorkersReadyV1Import(tctx, cfgs)
c.Assert(err, IsNil)
}
func (t *testMaster) TestSubtaskCfgsStagesV1Import(c *C) {
var (
worker1Name = "worker-1"
worker1Addr = "127.0.0.1:8262"
worker2Name = "worker-2"
worker2Addr = "127.0.0.1:8263"
taskName1 = "task-1"
taskName2 = "task-2"
sourceID1 = "mysql-replica-01"
sourceID2 = "mysql-replica-02"
)
cfg11 := config.NewSubTaskConfig()
c.Assert(cfg11.DecodeFile(subTaskSampleFile, true), IsNil)
cfg11.Dir = "./dump_data"
cfg11.ChunkFilesize = "64"
cfg11.Name = taskName1
cfg11.SourceID = sourceID1
c.Assert(cfg11.Adjust(true), IsNil) // adjust again after manually modified some items.
data11, err := cfg11.Toml()
c.Assert(err, IsNil)
data11 = strings.ReplaceAll(data11, `chunk-filesize = "64"`, `chunk-filesize = 64`) // different type between v1.0.x and v2.0.x.
cfg12, err := cfg11.Clone()
c.Assert(err, IsNil)
cfg12.SourceID = sourceID2
data12, err := cfg12.Toml()
c.Assert(err, IsNil)
data12 = strings.ReplaceAll(data12, `chunk-filesize = "64"`, `chunk-filesize = 64`)
cfg21, err := cfg11.Clone()
c.Assert(err, IsNil)
cfg21.Dir = "./dump_data"
cfg21.Name = taskName2
c.Assert(cfg21.Adjust(true), IsNil)
data21, err := cfg21.Toml()
c.Assert(err, IsNil)
data21 = strings.ReplaceAll(data21, `chunk-filesize = "64"`, `chunk-filesize = 64`)
cfg22, err := cfg21.Clone()
c.Assert(err, IsNil)
cfg22.SourceID = sourceID2
data22, err := cfg22.Toml()
c.Assert(err, IsNil)
data22 = strings.ReplaceAll(data22, `chunk-filesize = "64"`, `chunk-filesize = 64`)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
tctx := tcontext.NewContext(ctx, log.L())
s := testDefaultMasterServer(c)
defer s.Close()
s.cfg.V1SourcesPath = c.MkDir()
c.Assert(s.scheduler.Start(ctx, etcdTestCli), IsNil)
// no workers exist, no config and status need to get.
cfgs, stages, err := s.getSubtaskCfgsStagesV1Import(tctx)
c.Assert(err, IsNil)
c.Assert(cfgs, HasLen, 0)
c.Assert(stages, HasLen, 0)
ctrl := gomock.NewController(c)
defer ctrl.Finish()
mockWCli1 := pbmock.NewMockWorkerClient(ctrl)
mockWCli2 := pbmock.NewMockWorkerClient(ctrl)
c.Assert(s.scheduler.AddWorker(worker1Name, worker1Addr), IsNil)
c.Assert(s.scheduler.AddWorker(worker2Name, worker2Addr), IsNil)
s.scheduler.SetWorkerClientForTest(worker1Name, newMockRPCClient(mockWCli1))
s.scheduler.SetWorkerClientForTest(worker2Name, newMockRPCClient(mockWCli2))
mockWCli1.EXPECT().OperateV1Meta(
gomock.Any(),
&pb.OperateV1MetaRequest{
Op: pb.V1MetaOp_GetV1Meta,
},
).Return(&pb.OperateV1MetaResponse{
Result: true,
Meta: map[string]*pb.V1SubTaskMeta{
taskName1: {
Op: pb.TaskOp_Start,
Stage: pb.Stage_Running,
Name: taskName1,
Task: []byte(data11),
},
taskName2: {
Op: pb.TaskOp_Pause,
Stage: pb.Stage_Paused,
Name: taskName2,
Task: []byte(data21),
},
},
}, nil)
mockWCli2.EXPECT().OperateV1Meta(
gomock.Any(),
&pb.OperateV1MetaRequest{
Op: pb.V1MetaOp_GetV1Meta,
},
).Return(&pb.OperateV1MetaResponse{
Result: true,
Meta: map[string]*pb.V1SubTaskMeta{
taskName1: {
Op: pb.TaskOp_Resume,
Stage: pb.Stage_Running,
Name: taskName1,
Task: []byte(data12),
},
taskName2: {
Op: pb.TaskOp_Start,
Stage: pb.Stage_Running,
Name: taskName2,
Task: []byte(data22),
},
},
}, nil)
// all workers return valid config and stage.
cfgs, stages, err = s.getSubtaskCfgsStagesV1Import(tctx)
c.Assert(err, IsNil)
c.Assert(cfgs, HasLen, 2)
c.Assert(stages, HasLen, 2)
c.Assert(cfgs[taskName1], HasLen, 2)
c.Assert(cfgs[taskName2], HasLen, 2)
c.Assert(cfgs[taskName1][sourceID1], DeepEquals, *cfg11)
c.Assert(cfgs[taskName1][sourceID2], DeepEquals, *cfg12)
c.Assert(cfgs[taskName2][sourceID1], DeepEquals, *cfg21)
c.Assert(cfgs[taskName2][sourceID2], DeepEquals, *cfg22)
c.Assert(stages[taskName1], HasLen, 2)
c.Assert(stages[taskName2], HasLen, 2)
c.Assert(stages[taskName1][sourceID1], Equals, pb.Stage_Running)
c.Assert(stages[taskName1][sourceID2], Equals, pb.Stage_Running)
c.Assert(stages[taskName2][sourceID1], Equals, pb.Stage_Paused)
c.Assert(stages[taskName2][sourceID2], Equals, pb.Stage_Running)
// one of workers return invalid config.
mockWCli1.EXPECT().OperateV1Meta(
gomock.Any(),
&pb.OperateV1MetaRequest{
Op: pb.V1MetaOp_GetV1Meta,
},
).Return(&pb.OperateV1MetaResponse{
Result: true,
Meta: map[string]*pb.V1SubTaskMeta{
taskName1: {
Op: pb.TaskOp_Start,
Stage: pb.Stage_Running,
Name: taskName1,
Task: []byte(data11),
},
taskName2: {
Op: pb.TaskOp_Pause,
Stage: pb.Stage_Paused,
Name: taskName2,
Task: []byte(data21),
},
},
}, nil)
mockWCli2.EXPECT().OperateV1Meta(
gomock.Any(),
&pb.OperateV1MetaRequest{
Op: pb.V1MetaOp_GetV1Meta,
},
).Return(&pb.OperateV1MetaResponse{
Result: true,
Meta: map[string]*pb.V1SubTaskMeta{
taskName1: {
Op: pb.TaskOp_Resume,
Stage: pb.Stage_Running,
Name: taskName1,
Task: []byte("invalid subtask data"),
},
taskName2: {
Op: pb.TaskOp_Start,
Stage: pb.Stage_Running,
Name: taskName2,
Task: []byte(data22),
},
},
}, nil)
cfgs, stages, err = s.getSubtaskCfgsStagesV1Import(tctx)
c.Assert(err, ErrorMatches, ".*fail to get subtask config and stage.*")
c.Assert(cfgs, HasLen, 0)
c.Assert(stages, HasLen, 0)
}
|
[
"\"MYSQL_HOST\"",
"\"MYSQL_PORT\"",
"\"MYSQL_USER\"",
"\"MYSQL_PSWD\""
] |
[] |
[
"MYSQL_PORT",
"MYSQL_USER",
"MYSQL_PSWD",
"MYSQL_HOST"
] |
[]
|
["MYSQL_PORT", "MYSQL_USER", "MYSQL_PSWD", "MYSQL_HOST"]
|
go
| 4 | 0 | |
workflows/pipe-common/pipeline/autoscaling/azureprovider.py
|
# Copyright 2017-2019 EPAM Systems, Inc. (https://www.epam.com/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import functools
import os
import sys
import uuid
from random import randint
import re
from azure.common.client_factory import get_client_from_auth_file
from azure.mgmt.resource import ResourceManagementClient
from azure.mgmt.compute import ComputeManagementClient
from azure.mgmt.network import NetworkManagementClient
from msrestazure.azure_exceptions import CloudError
from cloudprovider import AbstractInstanceProvider, LIMIT_EXCEEDED_ERROR_MASSAGE, LIMIT_EXCEEDED_EXIT_CODE
import utils
VM_NAME_PREFIX = "az-"
UUID_LENGHT = 16
LOW_PRIORITY_INSTANCE_ID_TEMPLATE = '(az-[a-z0-9]{16})[0-9A-Z]{6}'
def azure_resource_type_cmp(r1, r2):
if str(r1.type).split('/')[-1].startswith("virtualMachine"):
return -1
elif str(r1.type).split('/')[-1] == "networkInterfaces" and not str(r2.type).split('/')[-1].startswith("virtualMachine"):
return -1
return 0
class AzureInstanceProvider(AbstractInstanceProvider):
def __init__(self, zone):
self.zone = zone
self.resource_client = get_client_from_auth_file(ResourceManagementClient)
self.network_client = get_client_from_auth_file(NetworkManagementClient)
self.compute_client = get_client_from_auth_file(ComputeManagementClient)
self.resource_group_name = os.environ["AZURE_RESOURCE_GROUP"]
def run_instance(self, is_spot, bid_price, ins_type, ins_hdd, ins_img, ins_key, run_id, kms_encyr_key_id,
num_rep, time_rep, kube_ip, kubeadm_token):
try:
ins_key = utils.read_ssh_key(ins_key)
swap_size = utils.get_swap_size(self.zone, ins_type, is_spot, "AZURE")
user_data_script = utils.get_user_data_script(self.zone, ins_type, ins_img, kube_ip, kubeadm_token,
swap_size)
instance_name = "az-" + uuid.uuid4().hex[0:16]
if not is_spot:
self.__create_public_ip_address(instance_name, run_id)
self.__create_nic(instance_name, run_id)
return self.__create_vm(instance_name, run_id, ins_type, ins_img, ins_hdd, user_data_script,
ins_key, "pipeline", swap_size)
else:
return self.__create_low_priority_vm(instance_name, run_id, ins_type, ins_img, ins_hdd,
user_data_script, ins_key, "pipeline", swap_size)
except Exception as e:
self.__delete_all_by_run_id(run_id)
raise RuntimeError(e)
def verify_run_id(self, run_id):
utils.pipe_log('Checking if instance already exists for RunID {}'.format(run_id))
vm_name = None
private_ip = None
for resource in self.resource_client.resources.list(filter="tagName eq 'Name' and tagValue eq '" + run_id + "'"):
if str(resource.type).split('/')[-1] == "virtualMachines":
vm_name = resource.name
private_ip = self.network_client.network_interfaces \
.get(self.resource_group_name, vm_name + '-nic').ip_configurations[0].private_ip_address
break
if str(resource.type).split('/')[-1] == "virtualMachineScaleSet":
scale_set_name = resource.name
vm_name, private_ip = self.__get_instance_name_and_private_ip_from_vmss(scale_set_name)
break
return vm_name, private_ip
def get_instance_names(self, ins_id):
return ins_id, ins_id
def find_instance(self, run_id):
for resource in self.resource_client.resources.list(filter="tagName eq 'Name' and tagValue eq '" + run_id + "'"):
if str(resource.type).split('/')[-1] == "virtualMachines":
return resource.name
elif str(resource.type).split('/')[-1] == "virtualMachineScaleSets":
instance_name, _ = self.__get_instance_name_and_private_ip_from_vmss(resource.name)
return instance_name
return None
def find_nodes_with_run_id(self, run_id):
for resource in self.resource_client.resources.list(filter="tagName eq 'Name' and tagValue eq '" + run_id + "'"):
if str(resource.type).split('/')[-1] == "virtualMachines":
return [resource.name]
elif str(resource.type).split('/')[-1] == "virtualMachineScaleSets":
return self.generate_scale_set_vm_names(resource.name)
return []
def generate_scale_set_vm_names(self, scale_set_name):
return [scale_set_name + '%0*x' % (6, x) for x in range(0, 15)]
def check_instance(self, ins_id, run_id, num_rep, time_rep):
pass
def terminate_instance(self, ins_id):
low_priority_search = re.search(LOW_PRIORITY_INSTANCE_ID_TEMPLATE, ins_id)
if low_priority_search:
res_name = low_priority_search.group(1)
service = self.compute_client.virtual_machine_scale_sets
else:
res_name = low_priority_search.group(1)
service = self.compute_client.virtual_machines
resource = service.get(self.resource_group_name, res_name)
if resource and 'Name' in resource.tags:
self.__delete_all_by_run_id(resource.tags['Name'])
else:
service.delete(self.resource_group_name, ins_id).wait()
def terminate_instance_by_ip_or_name(self, node_internal_ip, node_name):
low_priority_search = re.search(LOW_PRIORITY_INSTANCE_ID_TEMPLATE, node_name)
if low_priority_search:
# just because we set computer_name_prefix in nodeup script,
# we know that it is the same with scale set name, so let's extract it
scale_set_name = low_priority_search.group(1)
info = self.compute_client.virtual_machine_scale_sets.get(self.resource_group_name, scale_set_name)
else:
info = self.compute_client.virtual_machines.get(self.resource_group_name, node_name)
if info is not None and "Name" in info.tags:
resources = []
for resource in self.resource_client.resources.list(filter="tagName eq 'Name' and tagValue eq '" + info.tags["Name"] + "'"):
resources.append(resource)
# we need to sort resources to be sure that vm and nic will be deleted first, because it has attached resorces(disks and ip)
resources.sort(key=functools.cmp_to_key(azure_resource_type_cmp))
for resource in resources:
self.resource_client.resources.delete(
resource_group_name=resource.id.split('/')[4],
resource_provider_namespace=resource.id.split('/')[6],
parent_resource_path='',
resource_type=str(resource.type).split('/')[-1],
resource_name=resource.name,
api_version=self.__resolve_azure_api(resource),
parameters=resource
).wait()
def find_and_tag_instance(self, old_id, new_id):
ins_id = None
for resource in self.resource_client.resources.list(
filter="tagName eq 'Name' and tagValue eq '" + old_id + "'"):
resource_group = resource.id.split('/')[4]
resource_type = str(resource.type).split('/')[-1]
if resource_type == "virtualMachines":
ins_id = resource.name
resource = self.compute_client.virtual_machines.get(resource_group, resource.name)
resource.tags["Name"] = new_id
self.compute_client.virtual_machines.create_or_update(resource_group, resource.name, resource)
elif resource_type == "virtualMachineScaleSets":
resource = self.compute_client.virtual_machine_scale_sets.get(resource_group, resource.name)
resource.tags["Name"] = new_id
self.compute_client.virtual_machine_scale_sets.create_or_update(resource_group, resource.name, resource)
ins_id, _ = self.__get_instance_name_and_private_ip_from_vmss(resource.name)
elif resource_type == "networkInterfaces":
resource = self.network_client.network_interfaces.get(resource_group, resource.name)
resource.tags["Name"] = new_id
self.network_client.network_interfaces.create_or_update(resource_group, resource.name, resource)
elif resource_type == "publicIPAddresses":
resource = self.network_client.public_ip_addresses.get(resource_group, resource.name)
resource.tags["Name"] = new_id
self.network_client.public_ip_addresses.create_or_update(resource_group, resource.name, resource)
elif resource_type == "disks":
resource = self.compute_client.disks.get(resource_group, resource.name)
resource.tags["Name"] = new_id
self.compute_client.disks.create_or_update(resource_group, resource.name, resource)
if ins_id is not None:
return ins_id
else:
raise RuntimeError("Failed to find instance {}".format(old_id))
def __create_public_ip_address(self, instance_name, run_id):
public_ip_addess_params = {
'location': self.zone,
'public_ip_allocation_method': 'Dynamic',
'dns_settings': {
'domain_name_label': instance_name
},
'tags': AzureInstanceProvider.get_tags(run_id)
}
creation_result = self.network_client.public_ip_addresses.create_or_update(
self.resource_group_name,
instance_name + '-ip',
public_ip_addess_params
)
return creation_result.result()
def __create_nic(self, instance_name, run_id):
public_ip_address = self.network_client.public_ip_addresses.get(
self.resource_group_name,
instance_name + '-ip'
)
subnet_info = self.__get_subnet_info()
security_group_info = self.__get_security_group_info()
nic_params = {
'location': self.zone,
'ipConfigurations': [{
'name': 'IPConfig',
'publicIpAddress': public_ip_address,
'subnet': {
'id': subnet_info.id
}
}],
"networkSecurityGroup": {
'id': security_group_info.id
},
'tags': AzureInstanceProvider.get_tags(run_id)
}
creation_result = self.network_client.network_interfaces.create_or_update(
self.resource_group_name,
instance_name + '-nic',
nic_params
)
return creation_result.result()
def __get_security_group_info(self):
security_groups = utils.get_security_groups(self.zone)
if len(security_groups) != 1:
raise AssertionError("Please specify only one security group!")
resource_group, secur_grp = AzureInstanceProvider.get_res_grp_and_res_name_from_string(security_groups[0], 'networkSecurityGroups')
security_group_info = self.network_client.network_security_groups.get(resource_group, secur_grp)
return security_group_info
def __get_subnet_info(self):
allowed_networks = utils.get_networks_config(self.zone)
if allowed_networks and len(allowed_networks) > 0:
az_num = randint(0, len(allowed_networks) - 1)
az_name = allowed_networks.items()[az_num][0]
subnet_id = allowed_networks.items()[az_num][1]
resource_group, network = AzureInstanceProvider.get_res_grp_and_res_name_from_string(az_name, 'virtualNetworks')
subnet = AzureInstanceProvider.get_subnet_name_from_id(subnet_id)
utils.pipe_log('- Networks list found, subnet {} in VNET {} will be used'.format(subnet_id, az_name))
else:
utils.pipe_log('- Networks list NOT found, trying to find network from region in the same resource group...')
resource_group, network, subnet = self.get_any_network_from_location(self.zone)
utils.pipe_log('- Network found, subnet {} in VNET {} will be used'.format(subnet, network))
if not resource_group or not network or not subnet:
raise RuntimeError(
"No networks with subnet found for location: {} in resourceGroup: {}".format(self.zone, self.resource_group_name))
subnet_info = self.network_client.subnets.get(resource_group, network, subnet)
return subnet_info
def __get_disk_type(self, instance_type):
disk_type = None
for sku in self.compute_client.resource_skus.list():
if sku.locations[0].lower() == self.zone.lower() and sku.resource_type.lower() == "virtualmachines" \
and sku.name.lower() == instance_type.lower():
for capability in sku.capabilities:
if capability.name.lower() == "premiumio":
disk_type = "Premium_LRS" if capability.value.lower() == "true" else "StandardSSD_LRS"
break
return disk_type
def __create_vm(self, instance_name, run_id, instance_type, instance_image, disk, user_data_script,
ssh_pub_key, user, swap_size):
nic = self.network_client.network_interfaces.get(
self.resource_group_name,
instance_name + '-nic'
)
resource_group, image_name = AzureInstanceProvider.get_res_grp_and_res_name_from_string(instance_image, 'images')
image = self.compute_client.images.get(
resource_group,
image_name
)
storage_profile = self.__get_storage_profile(disk, image, instance_type,
instance_name=instance_name, swap_size=swap_size)
vm_parameters = {
'location': self.zone,
'os_profile': self.__get_os_profile(instance_name, ssh_pub_key, user, user_data_script, 'computer_name'),
'hardware_profile': {
'vm_size': instance_type
},
'storage_profile': storage_profile,
'network_profile': {
'network_interfaces': [{
'id': nic.id
}]
},
'tags': AzureInstanceProvider.get_tags(run_id)
}
self.__create_node_resource(self.compute_client.virtual_machines, instance_name, vm_parameters)
private_ip = self.network_client.network_interfaces.get(
self.resource_group_name, instance_name + '-nic').ip_configurations[0].private_ip_address
return instance_name, private_ip
def __create_low_priority_vm(self, scale_set_name, run_id, instance_type, instance_image,
disk, user_data_script, ssh_pub_key, user, swap_size):
resource_group, image_name = AzureInstanceProvider.get_res_grp_and_res_name_from_string(instance_image, 'images')
image = self.compute_client.images.get(resource_group, image_name)
subnet_info = self.__get_subnet_info()
security_group_info = self.__get_security_group_info()
service = self.compute_client.virtual_machine_scale_sets
vmss_parameters = {
"location": self.zone,
"sku": {
"name": instance_type,
"capacity": "1"
},
"upgradePolicy": {
"mode": "Manual",
"automaticOSUpgrade": False
},
"properties": {
"overprovision": False,
"virtualMachineProfile": {
'priority': 'Low',
'evictionPolicy': 'delete',
'os_profile': self.__get_os_profile(scale_set_name, ssh_pub_key, user, user_data_script, 'computer_name_prefix'),
'storage_profile': self.__get_storage_profile(disk, image, instance_type, swap_size=swap_size),
"network_profile": {
"networkInterfaceConfigurations": [
{
"name": scale_set_name + "-nic",
"properties": {
"primary": True,
"networkSecurityGroup": {
"id": security_group_info.id
},
'dns_settings': {
'domain_name_label': scale_set_name
},
"ipConfigurations": [
{
"name": scale_set_name + "-ip",
"publicIPAddressConfiguration": {
"name": scale_set_name + "-publicip"
},
"properties": {
"subnet": {
"id": subnet_info.id
}
}
}
]
}
}
]
}
}
},
'tags': AzureInstanceProvider.get_tags(run_id)
}
self.__create_node_resource(service, scale_set_name, vmss_parameters)
return self.__get_instance_name_and_private_ip_from_vmss(scale_set_name)
def __create_node_resource(self, service, instance_name, node_parameters):
try:
creation_result = service.create_or_update(
self.resource_group_name,
instance_name,
node_parameters
)
creation_result.result()
except CloudError as client_error:
self.__delete_all_by_run_id(node_parameters['tags']['Name'])
error_message = client_error.__str__()
if 'OperationNotAllowed' in error_message or 'ResourceQuotaExceeded' in error_message:
utils.pipe_log_warn(LIMIT_EXCEEDED_ERROR_MASSAGE)
sys.exit(LIMIT_EXCEEDED_EXIT_CODE)
else:
raise client_error
def __get_os_profile(self, instance_name, ssh_pub_key, user, user_data_script, computer_name_parameter):
profile = {
computer_name_parameter: instance_name,
'admin_username': user,
"linuxConfiguration": {
"ssh": {
"publicKeys": [
{
"path": "/home/" + user + "/.ssh/authorized_keys",
"key_data": "{key}".format(key=ssh_pub_key)
}
]
},
"disablePasswordAuthentication": True,
},
"custom_data": base64.b64encode(user_data_script)
}
return profile
def __get_storage_profile(self, disk, image, instance_type, instance_name=None, swap_size=None):
disk_type = self.__get_disk_type(instance_type)
disk_name = None if instance_name is None else instance_name + "-data"
disk_lun = 62
data_disks = [self.__get_data_disk(disk, disk_type, disk_lun, disk_name=disk_name)]
if swap_size is not None and swap_size > 0:
swap_name = None if instance_name is None else instance_name + "-swap"
data_disks.append(self.__get_data_disk(swap_size, disk_type, disk_lun + 1, disk_name=swap_name))
return {
'image_reference': {
'id': image.id
},
"osDisk": {
"caching": "ReadWrite",
"managedDisk": {
"storageAccountType": disk_type
},
"createOption": "FromImage"
},
"dataDisks": data_disks
}
def __get_data_disk(self, size, disk_type, lun, disk_name=None):
disk = {
"diskSizeGB": size,
"lun": lun,
"createOption": "Empty",
"managedDisk": {
"storageAccountType": disk_type
}
}
if disk_name is not None:
disk["name"] = disk_name
return disk
def __get_instance_name_and_private_ip_from_vmss(self, scale_set_name):
vm_vmss_id = None
for vm in self.compute_client.virtual_machine_scale_set_vms.list(self.resource_group_name, scale_set_name):
vm_vmss_id = vm.instance_id
break
if vm_vmss_id is None:
raise RuntimeError('Failed to find instance in ScaleSet: {}. Seems that instance was preempted.'.format(scale_set_name))
instance_name = self.compute_client.virtual_machine_scale_set_vms \
.get_instance_view(self.resource_group_name, scale_set_name, vm_vmss_id) \
.additional_properties["computerName"]
private_ip = self.network_client.network_interfaces. \
get_virtual_machine_scale_set_ip_configuration(self.resource_group_name, scale_set_name, vm_vmss_id,
scale_set_name + "-nic", scale_set_name + "-ip") \
.private_ip_address
return instance_name, private_ip
def __resolve_azure_api(self, resource):
""" This method retrieves the latest non-preview api version for
the given resource (unless the preview version is the only available
api version) """
provider = self.resource_client.providers.get(resource.id.split('/')[6])
rt = next((t for t in provider.resource_types
if t.resource_type == '/'.join(resource.type.split('/')[1:])), None)
if rt and 'api_versions' in rt.__dict__:
api_version = [v for v in rt.__dict__['api_versions'] if 'preview' not in v.lower()]
return api_version[0] if api_version else rt.__dict__['api_versions'][0]
def __delete_all_by_run_id(self, run_id):
resources = []
resources.extend(self.resource_client.resources.list(filter="tagName eq 'Name' and tagValue eq '" + run_id + "'"))
# we need to sort resources to be sure that vm and nic will be deleted first,
# because it has attached resorces(disks and ip)
resources.sort(key=functools.cmp_to_key(azure_resource_type_cmp))
vm_name = resources[0].name if str(resources[0].type).split('/')[-1].startswith('virtualMachine') else resources[0].name[0:len(VM_NAME_PREFIX) + UUID_LENGHT]
if str(resources[0].type).split('/')[-1] == 'virtualMachines':
self.__detach_disks_and_nic(vm_name)
for resource in resources:
self.resource_client.resources.delete(
resource_group_name=resource.id.split('/')[4],
resource_provider_namespace=resource.id.split('/')[6],
parent_resource_path='',
resource_type=str(resource.type).split('/')[-1],
resource_name=resource.name,
api_version=self.__resolve_azure_api(resource),
parameters=resource
).wait()
def __detach_disks_and_nic(self, vm_name):
self.compute_client.virtual_machines.delete(self.resource_group_name, vm_name).wait()
try:
nic = self.network_client.network_interfaces.get(self.resource_group_name, vm_name + '-nic')
nic.ip_configurations[0].public_ip_address = None
self.network_client.network_interfaces.create_or_update(self.resource_group_name, vm_name + '-nic', nic).wait()
except Exception as e:
print e
def get_any_network_from_location(self, location):
resource_group, network, subnet = None, None, None
for vnet in self.resource_client.resources.list(filter="resourceType eq 'Microsoft.Network/virtualNetworks' "
"and location eq '{}' "
"and resourceGroup eq '{}'".format(location, self.resource_group_name)):
resource_group, network = AzureInstanceProvider.get_res_grp_and_res_name_from_string(vnet.id, 'virtualNetworks')
break
if not resource_group or not network:
return resource_group, network, subnet
for subnet_res in self.network_client.subnets.list(resource_group, network):
subnet = AzureInstanceProvider.get_subnet_name_from_id(subnet_res.id)
break
return resource_group, network, subnet
@staticmethod
def get_subnet_name_from_id(subnet_id):
if "/" not in subnet_id:
return subnet_id
subnet_params = subnet_id.split("/")
# according to /subscriptions/<sub>/resourceGroups/<res_grp>/providers/Microsoft.Network/virtualNetworks/<vnet>/subnets/<subnet>
if len(subnet_params) == 11 and subnet_params[3] == "resourceGroups" and subnet_params[9] == "subnets":
return subnet_params[10]
else:
raise RuntimeError("Subnet dont match form of the Azure ID "
"/subscriptions/<sub>/resourceGroups/<res_grp>/providers/Microsoft.Network/virtualNetworks/<vnet>/subnets/<subnet>: {}".format(subnet_id))
@staticmethod
def get_res_grp_and_res_name_from_string(resource_id, resource_type):
resource_params = resource_id.split("/")
if len(resource_params) == 2:
resource_group, resource = resource_params[0], resource_params[1]
# according to full ID form: /subscriptions/<sub-id>/resourceGroups/<res-grp>/providers/Microsoft.Compute/images/<image>
elif len(resource_params) == 9 and resource_params[3] == 'resourceGroups' and resource_params[7] == resource_type:
resource_group, resource = resource_params[4], resource_params[8]
else:
raise RuntimeError(
"Resource parameter doesn't match to Azure resource name convention: <resource_group>/<resource_name>"
" or full resource id: /subscriptions/<sub-id>/resourceGroups/<res-grp>/providers/Microsoft.Compute/<type>/<name>. "
"Node Up process will be stopped.")
return resource_group, resource
@staticmethod
def resource_tags():
tags = {}
_, config_tags = utils.load_cloud_config()
if config_tags is None:
return tags
for key, value in config_tags.iteritems():
tags.update({key: value})
return tags
@staticmethod
def run_id_tag(run_id):
return {
'Name': run_id,
}
@staticmethod
def get_tags(run_id):
tags = AzureInstanceProvider.run_id_tag(run_id)
res_tags = AzureInstanceProvider.resource_tags()
if res_tags:
tags.update(res_tags)
return tags
|
[] |
[] |
[
"AZURE_RESOURCE_GROUP"
] |
[]
|
["AZURE_RESOURCE_GROUP"]
|
python
| 1 | 0 | |
cmd/gateway-main.go
|
/*
* Minio Cloud Storage, (C) 2017, 2018 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"context"
"errors"
"fmt"
"net/url"
"os"
"os/signal"
"strings"
"syscall"
"github.com/gorilla/mux"
"github.com/minio/cli"
xhttp "github.com/minio/minio/cmd/http"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/certs"
)
func init() {
logger.Init(GOPATH, GOROOT)
logger.RegisterUIError(fmtError)
}
var (
gatewayCmd = cli.Command{
Name: "gateway",
Usage: "start object storage gateway",
Flags: append(serverFlags, globalFlags...),
HideHelpCommand: true,
}
)
// RegisterGatewayCommand registers a new command for gateway.
func RegisterGatewayCommand(cmd cli.Command) error {
cmd.Flags = append(append(cmd.Flags, append(cmd.Flags, serverFlags...)...), globalFlags...)
gatewayCmd.Subcommands = append(gatewayCmd.Subcommands, cmd)
return nil
}
// ParseGatewayEndpoint - Return endpoint.
func ParseGatewayEndpoint(arg string) (endPoint string, secure bool, err error) {
schemeSpecified := len(strings.Split(arg, "://")) > 1
if !schemeSpecified {
// Default connection will be "secure".
arg = "https://" + arg
}
u, err := url.Parse(arg)
if err != nil {
return "", false, err
}
switch u.Scheme {
case "http":
return u.Host, false, nil
case "https":
return u.Host, true, nil
default:
return "", false, fmt.Errorf("Unrecognized scheme %s", u.Scheme)
}
}
// ValidateGatewayArguments - Validate gateway arguments.
func ValidateGatewayArguments(serverAddr, endpointAddr string) error {
if err := CheckLocalServerAddr(serverAddr); err != nil {
return err
}
if endpointAddr != "" {
// Reject the endpoint if it points to the gateway handler itself.
sameTarget, err := sameLocalAddrs(endpointAddr, serverAddr)
if err != nil {
return err
}
if sameTarget {
return fmt.Errorf("endpoint points to the local gateway")
}
}
return nil
}
// StartGateway - handler for 'minio gateway <name>'.
func StartGateway(ctx *cli.Context, gw Gateway) {
if gw == nil {
logger.FatalIf(errUnexpected, "Gateway implementation not initialized")
}
// Disable logging until gateway initialization is complete, any
// error during initialization will be shown as a fatal message
logger.Disable = true
// Validate if we have access, secret set through environment.
gatewayName := gw.Name()
if ctx.Args().First() == "help" {
cli.ShowCommandHelpAndExit(ctx, gatewayName, 1)
}
// Get "json" flag from command line argument and
// enable json and quite modes if jason flag is turned on.
jsonFlag := ctx.IsSet("json") || ctx.GlobalIsSet("json")
if jsonFlag {
logger.EnableJSON()
}
// Get quiet flag from command line argument.
quietFlag := ctx.IsSet("quiet") || ctx.GlobalIsSet("quiet")
if quietFlag {
logger.EnableQuiet()
}
// Fetch address option
gatewayAddr := ctx.GlobalString("address")
if gatewayAddr == ":"+globalMinioPort {
gatewayAddr = ctx.String("address")
}
// Handle common command args.
handleCommonCmdArgs(ctx)
// Get port to listen on from gateway address
globalMinioHost, globalMinioPort = mustSplitHostPort(gatewayAddr)
// On macOS, if a process already listens on LOCALIPADDR:PORT, net.Listen() falls back
// to IPv6 address ie minio will start listening on IPv6 address whereas another
// (non-)minio process is listening on IPv4 of given port.
// To avoid this error situation we check for port availability.
logger.FatalIf(checkPortAvailability(globalMinioPort), "Unable to start the gateway")
// Create certs path.
logger.FatalIf(createConfigDir(), "Unable to create configuration directories")
// Check and load TLS certificates.
var err error
globalPublicCerts, globalTLSCerts, globalIsSSL, err = getTLSConfig()
logger.FatalIf(err, "Invalid TLS certificate file")
// Check and load Root CAs.
globalRootCAs, err = getRootCAs(getCADir())
logger.FatalIf(err, "Failed to read root CAs (%v)", err)
// Handle common env vars.
handleCommonEnvVars()
// Validate if we have access, secret set through environment.
if !globalIsEnvCreds {
logger.Fatal(uiErrEnvCredentialsMissingGateway(nil), "Unable to start gateway")
}
// Set system resources to maximum.
logger.LogIf(context.Background(), setMaxResources())
initNSLock(false) // Enable local namespace lock.
router := mux.NewRouter().SkipClean(true)
if globalEtcdClient != nil {
// Enable STS router if etcd is enabled.
registerSTSRouter(router)
// Enable admin router if etcd is enabled.
registerAdminRouter(router)
}
// Add healthcheck router
registerHealthCheckRouter(router)
// Add server metrics router
registerMetricsRouter(router)
// Register web router when its enabled.
if globalIsBrowserEnabled {
logger.FatalIf(registerWebRouter(router), "Unable to configure web browser")
}
// Add API router.
registerAPIRouter(router)
// Dummy endpoint representing gateway instance.
globalEndpoints = []Endpoint{{
URL: &url.URL{Path: "/minio/gateway"},
IsLocal: true,
}}
// Initialize Admin Peers.
initGlobalAdminPeers(globalEndpoints)
var getCert certs.GetCertificateFunc
if globalTLSCerts != nil {
getCert = globalTLSCerts.GetCertificate
}
globalHTTPServer = xhttp.NewServer([]string{gatewayAddr}, criticalErrorHandler{registerHandlers(router, globalHandlers...)}, getCert)
globalHTTPServer.UpdateBytesReadFunc = globalConnStats.incInputBytes
globalHTTPServer.UpdateBytesWrittenFunc = globalConnStats.incOutputBytes
go func() {
globalHTTPServerErrorCh <- globalHTTPServer.Start()
}()
signal.Notify(globalOSSignalCh, os.Interrupt, syscall.SIGTERM)
newObject, err := gw.NewGatewayLayer(globalServerConfig.GetCredential())
if err != nil {
// Stop watching for any certificate changes.
globalTLSCerts.Stop()
globalHTTPServer.Shutdown()
logger.FatalIf(err, "Unable to initialize gateway backend")
}
// Create a new config system.
globalConfigSys = NewConfigSys()
if globalEtcdClient != nil && gatewayName == "nas" {
// Initialize server config.
_ = globalConfigSys.Init(newObject)
} else {
// Initialize server config.
srvCfg := newServerConfig()
// Override any values from ENVs.
srvCfg.loadFromEnvs()
// Load values to cached global values.
srvCfg.loadToCachedConfigs()
// hold the mutex lock before a new config is assigned.
globalServerConfigMu.Lock()
globalServerConfig = srvCfg
globalServerConfigMu.Unlock()
}
// Load logger subsystem
loadLoggers()
// This is only to uniquely identify each gateway deployments.
globalDeploymentID = os.Getenv("MINIO_GATEWAY_DEPLOYMENT_ID")
var cacheConfig = globalServerConfig.GetCacheConfig()
if len(cacheConfig.Drives) > 0 {
var err error
// initialize the new disk cache objects.
globalCacheObjectAPI, err = newServerCacheObjects(cacheConfig)
logger.FatalIf(err, "Unable to initialize disk caching")
}
// Re-enable logging
logger.Disable = false
// Create new IAM system.
globalIAMSys = NewIAMSys()
if globalEtcdClient != nil {
// Initialize IAM sys.
_ = globalIAMSys.Init(newObject)
}
// Create new policy system.
globalPolicySys = NewPolicySys()
// Initialize policy system.
go globalPolicySys.Init(newObject)
// Create new notification system.
globalNotificationSys = NewNotificationSys(globalServerConfig, globalEndpoints)
if globalEtcdClient != nil && newObject.IsNotificationSupported() {
_ = globalNotificationSys.Init(newObject)
}
if globalAutoEncryption && !newObject.IsEncryptionSupported() {
logger.Fatal(errors.New("Invalid KMS configuration"), "auto-encryption is enabled but gateway does not support encryption")
}
// Once endpoints are finalized, initialize the new object api.
globalObjLayerMutex.Lock()
globalObjectAPI = newObject
globalObjLayerMutex.Unlock()
// Prints the formatted startup message once object layer is initialized.
if !quietFlag {
mode := globalMinioModeGatewayPrefix + gatewayName
// Check update mode.
checkUpdate(mode)
// Print a warning message if gateway is not ready for production before the startup banner.
if !gw.Production() {
logger.StartupMessage(colorYellow(" *** Warning: Not Ready for Production ***"))
}
// Print gateway startup message.
printGatewayStartupMessage(getAPIEndpoints(), gatewayName)
}
handleSignals()
}
|
[
"\"MINIO_GATEWAY_DEPLOYMENT_ID\""
] |
[] |
[
"MINIO_GATEWAY_DEPLOYMENT_ID"
] |
[]
|
["MINIO_GATEWAY_DEPLOYMENT_ID"]
|
go
| 1 | 0 | |
cmd/ruby/functions_framework/main.go
|
// Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Implements ruby/functions_framework buildpack.
// The functions_framework buildpack sets up the execution environment to
// run the Ruby Functions Framework. The framework itself, with its converter,
// is always installed as a dependency.
package main
import (
"os"
"github.com/GoogleCloudPlatform/buildpacks/pkg/env"
gcp "github.com/GoogleCloudPlatform/buildpacks/pkg/gcpbuildpack"
"github.com/buildpack/libbuildpack/layers"
)
const (
layerName = "functions-framework"
)
func main() {
gcp.Main(detectFn, buildFn)
}
func detectFn(ctx *gcp.Context) error {
if _, ok := os.LookupEnv(env.FunctionTarget); ok {
ctx.OptIn("%s set", env.FunctionTarget)
}
// TODO(b/154846199): For compatibility with GCF; this will be removed later.
if os.Getenv("CNB_STACK_ID") != "google" {
if _, ok := os.LookupEnv(env.FunctionTargetLaunch); ok {
ctx.OptIn("%s set", env.FunctionTargetLaunch)
}
}
ctx.OptOut("%s not set", env.FunctionTarget)
return nil
}
func buildFn(ctx *gcp.Context) error {
if err := validateSource(ctx); err != nil {
return err
}
// The framework has been installed with the dependencies, so this layer is
// used only for env vars.
l := ctx.Layer(layerName)
ctx.WriteMetadata(l, nil, layers.Launch)
ctx.SetFunctionsEnvVars(l)
// Verify that the framework is installed and ready.
// TODO(b/156038129): Implement a --verify flag in the functions framework
// that also checks the actual function for readiness.
cmd := []string{"bundle", "exec", "functions-framework", "--help"}
if _, err := ctx.ExecWithErr(cmd, gcp.WithUserAttribution); err != nil {
return gcp.UserErrorf("unable to execute functions-framework; please ensure the functions_framework gem is in your Gemfile")
}
ctx.AddWebProcess([]string{"bundle", "exec", "functions-framework"})
return nil
}
func validateSource(ctx *gcp.Context) error {
// Fail if the default|custom source file doesn't exist, otherwise the app will fail at runtime but still build here.
fnSource, ok := os.LookupEnv(env.FunctionSource)
if ok && !ctx.FileExists(fnSource) {
return gcp.UserErrorf("%s specified file '%s' but it does not exist", env.FunctionSource, fnSource)
}
return nil
}
|
[
"\"CNB_STACK_ID\""
] |
[] |
[
"CNB_STACK_ID"
] |
[]
|
["CNB_STACK_ID"]
|
go
| 1 | 0 | |
pingdom/pingdom.go
|
package main
import (
"flag"
"fmt"
"io/ioutil"
"log"
"net/url"
"os"
"regexp"
"strconv"
"strings"
pingdom "github.com/russellcardullo/go-pingdom/pingdom"
yaml "gopkg.in/yaml.v2"
)
// PingdomCheckDefaults represents the default values
type PingdomCheckDefaults struct {
TimeoutMS int `yaml:"timeout_ms"`
ResolutionMinutes int `yaml:"resolution_minutes"`
}
// PingdomCheck represents an individual check
type PingdomCheck struct {
URL string `yaml:"url"`
TimeoutMS int `yaml:"timeout_ms"`
ResolutionMinutes int `yaml:"resolution_minutes"`
Teams []string `yaml:"teams"`
Tags []string `yaml:"tags"`
Integrations []string `yaml:"integrations"`
NotifyWhenRestored bool `yaml:"notify_when_restored"`
}
// PingdomChecks represents the YAML config structure
type PingdomChecks struct {
UniqueTag string `yaml:"unique_tag"`
Defaults PingdomCheckDefaults `yaml:"defaults"`
Integrations []struct {
Name string `yaml:"name"`
ID int `yaml:"id"`
} `yaml:"integrations"`
Checks []PingdomCheck `yaml:"checks"`
}
func decomposeURL(input string) (encryption bool, hostname string, path string, err error) {
u, err := url.Parse(input)
if err != nil {
return false, "", "", err
}
encryption = u.Scheme == "https"
hostname = u.Hostname()
// We specifically avoid using the URL parser for the path component,
// relying on a regular expression instead. This is because the Go parser will
// convert `%2f` characters into `/` characters in the path, but the GitLab
// API (somewhat unusually) treats these as different.
//
// For this reason, we avoid the URL parser and rely on a regular expression
// See https://gitlab.com/gitlab-com/runbooks/merge_requests/1063#note_166398758
// for more details
var unparsedURLPathMatcher = regexp.MustCompile(`^https?://[^/]+(/.*)?$`)
matches := unparsedURLPathMatcher.FindStringSubmatch(input)
if len(matches) != 2 {
return false, "", "", fmt.Errorf("Unable to parse URL path: %v", input)
}
path = matches[1]
return encryption, hostname, path, nil
}
func (c PingdomCheck) name() string {
return fmt.Sprintf("check:%v", c.URL)
}
func (c PingdomCheck) getCheck(config PingdomChecks, teamMap map[string]pingdom.TeamResponse, integrationIDMap map[string]int) pingdom.Check {
timeoutMS := c.TimeoutMS
if timeoutMS == 0 {
timeoutMS = config.Defaults.TimeoutMS
}
if timeoutMS == 0 {
timeoutMS = 5000
}
resolutionMinutes := c.ResolutionMinutes
if resolutionMinutes == 0 {
resolutionMinutes = config.Defaults.ResolutionMinutes
}
if resolutionMinutes == 0 {
resolutionMinutes = 5
}
teamIds := []int{}
for _, v := range c.Teams {
team, ok := teamMap[v]
if !ok {
log.Fatalf("Unable to find team %v", v)
}
teamID, err := strconv.Atoi(team.ID)
if err != nil {
log.Fatalf("TeamID is not an integer: %s", team.ID)
}
teamIds = append(teamIds, teamID)
}
integrationIDs := []int{}
for _, v := range c.Integrations {
integrationID, ok := integrationIDMap[v]
if !ok {
log.Fatalf("Unable to find integration %v", v)
}
integrationIDs = append(integrationIDs, integrationID)
}
tags := []string{config.UniqueTag}
for _, v := range c.Tags {
if v != "" {
tags = append(tags, v)
}
}
encryption, hostname, path, err := decomposeURL(c.URL)
if err != nil {
log.Fatalf("unable to parse URL: %v", err)
}
tagCSV := strings.Join(tags, ",")
return &pingdom.HttpCheck{
Name: c.name(),
Hostname: hostname,
Url: path,
Encryption: encryption,
Resolution: resolutionMinutes,
ResponseTimeThreshold: timeoutMS,
Tags: tagCSV,
TeamIds: teamIds,
IntegrationIds: integrationIDs,
NotifyWhenBackup: c.NotifyWhenRestored,
}
}
func findChecksForRemoval(configMap map[string]PingdomCheck, deployedChecks map[string]pingdom.CheckResponse) []pingdom.CheckResponse {
var result []pingdom.CheckResponse
for k, v := range deployedChecks {
if _, ok := configMap[k]; !ok {
result = append(result, v)
}
}
return result
}
func findChecksForUpdate(configMap map[string]PingdomCheck, deployedChecks map[string]pingdom.CheckResponse) []pingdom.CheckResponse {
var result []pingdom.CheckResponse
for k, v := range deployedChecks {
if _, ok := configMap[k]; ok {
result = append(result, v)
}
}
return result
}
func findChecksForInsertion(configMap map[string]PingdomCheck, deployedChecks map[string]pingdom.CheckResponse) []PingdomCheck {
var result []PingdomCheck
for _, v := range configMap {
_, present := deployedChecks[v.name()]
if !present {
log.Printf("%v has not been deployed: %v", v.name(), deployedChecks)
result = append(result, v)
}
}
return result
}
type pingdomCheckUpdater interface {
insert(name string, check pingdom.Check) error
update(id int, name string, check pingdom.Check) error
delete(id int, name string) error
}
type dryRunUpdater struct{}
func (c dryRunUpdater) insert(name string, check pingdom.Check) error {
log.Printf("dry-run: will create: %s", name)
return nil
}
func (c dryRunUpdater) update(id int, name string, check pingdom.Check) error {
log.Printf("dry-run: will update: %s (%s)", name, urlForPingdomCheck(id))
return nil
}
func (c dryRunUpdater) delete(id int, name string) error {
log.Printf("dry-run: will delete: %s (%s)", name, urlForPingdomCheck(id))
return nil
}
type executingUpdater struct {
client *pingdom.Client
}
func (c executingUpdater) insert(name string, check pingdom.Check) error {
log.Printf("execute: creating check: %s", name)
response, err := c.client.Checks.Create(check)
if err != nil {
return err
}
log.Println("execute: created check:", response)
return nil
}
func (c executingUpdater) update(id int, name string, check pingdom.Check) error {
log.Printf("execute: updating check: %s (%s)", name, urlForPingdomCheck(id))
response, err := c.client.Checks.Update(id, check)
if err != nil {
return err
}
log.Println("execute: updated check:", response)
return nil
}
func (c executingUpdater) delete(id int, name string) error {
log.Printf("execute: deleting check: %s (%s)", name, urlForPingdomCheck(id))
response, err := c.client.Checks.Delete(id)
if err != nil {
return err
}
log.Println("execute: deleted check:", response)
return nil
}
func validateResolutionMinutes(value int, checkName string) {
switch value {
case 1, 5, 15, 30, 60:
return
}
log.Fatalf("invalid value %v for `ResolutionMinutes` in %v. Allowed values are [1,5,15,30,60].", value, checkName)
}
func validateDefaults(defaults PingdomCheckDefaults) {
if defaults.ResolutionMinutes != 0 {
validateResolutionMinutes(defaults.ResolutionMinutes, "defaults")
}
}
func validateCheck(check PingdomCheck) {
if check.ResolutionMinutes != 0 {
validateResolutionMinutes(check.ResolutionMinutes, check.name())
}
}
func urlForPingdomCheck(id int) string {
return fmt.Sprintf("https://my.pingdom.com/newchecks/checks#check=%d", id)
}
var (
configurationFile = flag.String("config", "pingdom.yml", "Configuration File")
dryRun = flag.Bool("dry-run", false, "Enable dry-run mode")
)
func newClient() (*pingdom.Client, error) {
username := os.Getenv("PINGDOM_USERNAME")
password := os.Getenv("PINGDOM_PASSWORD")
appkey := os.Getenv("PINGDOM_APPKEY")
accountEmail := os.Getenv("PINGDOM_ACCOUNT_EMAIL")
if username == "" || password == "" || appkey == "" || accountEmail == "" {
return nil, fmt.Errorf("please configure the PINGDOM_USERNAME, PINGDOM_PASSWORD, PINGDOM_APPKEY, PINGDOM_ACCOUNT_EMAIL environment variables")
}
client := pingdom.NewMultiUserClient(username, password, appkey, accountEmail)
return client, nil
}
func main() {
flag.Parse()
yamlFile, err := ioutil.ReadFile(*configurationFile)
if err != nil {
log.Fatalf("unable to parse configuration %v: %v", *configurationFile, err)
}
var configuration PingdomChecks
err = yaml.Unmarshal(yamlFile, &configuration)
validateDefaults(configuration.Defaults)
configMap := make(map[string]PingdomCheck)
for _, v := range configuration.Checks {
validateCheck(v)
configMap[v.name()] = v
}
integrationIDMap := make(map[string]int)
for _, v := range configuration.Integrations {
integrationIDMap[v.Name] = v.ID
}
client, err := newClient()
if err != nil {
log.Fatalf("unable to connect: %v", err)
}
var updater pingdomCheckUpdater
if *dryRun {
updater = dryRunUpdater{}
} else {
updater = executingUpdater{client: client}
}
teams, err := client.Teams.List()
if err != nil {
log.Fatalf("unable to list teams: %v", err)
}
teamMap := make(map[string]pingdom.TeamResponse)
for _, v := range teams {
teamMap[v.Name] = v
}
checks, err := client.Checks.List()
if err != nil {
log.Fatalf("unable to list checks: %v", err)
}
deployedChecks := make(map[string]pingdom.CheckResponse)
for _, v := range checks {
if strings.HasPrefix(v.Name, "check:") {
deployedChecks[v.Name] = v
}
}
forRemoval := findChecksForRemoval(configMap, deployedChecks)
forUpdate := findChecksForUpdate(configMap, deployedChecks)
forInsertion := findChecksForInsertion(configMap, deployedChecks)
// Do the inserts
for _, v := range forInsertion {
err := updater.insert(v.name(), v.getCheck(configuration, teamMap, integrationIDMap))
if err != nil {
log.Fatalf("insert failed: %v", err)
}
}
// Do the updates
for _, update := range forUpdate {
v, ok := configMap[update.Name]
if !ok {
log.Fatalf("Unable to lookup %s", update.Name)
}
err := updater.update(update.ID, v.name(), v.getCheck(configuration, teamMap, integrationIDMap))
if err != nil {
log.Fatalf("update failed: %v", err)
}
}
// Do the deletions
for _, d := range forRemoval {
err := updater.delete(d.ID, d.Name)
if err != nil {
log.Fatalf("delete failed: %v", err)
}
}
}
|
[
"\"PINGDOM_USERNAME\"",
"\"PINGDOM_PASSWORD\"",
"\"PINGDOM_APPKEY\"",
"\"PINGDOM_ACCOUNT_EMAIL\""
] |
[] |
[
"PINGDOM_APPKEY",
"PINGDOM_ACCOUNT_EMAIL",
"PINGDOM_PASSWORD",
"PINGDOM_USERNAME"
] |
[]
|
["PINGDOM_APPKEY", "PINGDOM_ACCOUNT_EMAIL", "PINGDOM_PASSWORD", "PINGDOM_USERNAME"]
|
go
| 4 | 0 | |
tickeys/config.py
|
import ConfigParser
import os
from logger import logger
class Configer():
"""docstring for Configer"""
def __init__(self, *arg):
try:
os.chdir(os.path.dirname(__file__))
except Exception:
pass
self.config_path = os.environ["HOME"] + "/.tickeys/tickeys.conf"
self.cf = ConfigParser.ConfigParser()
self.read_config()
def init_config(self):
self.style = 'mechanical'
self.volume = 1.0
self.pitch = 1.0
self.lang = 'en_US'
self.autostart = False
self.save_config()
def read_config(self):
try:
if not os.path.exists(self.config_path):
self.init_config()
else:
self.cf.read(self.config_path)
self.volume = self.cf.getfloat('options', 'volume')
self.pitch = self.cf.getfloat('options', 'pitch')
self.style = self.cf.get('options', 'style')
self.autostart = self.cf.get('options', 'autostart')
self.lang = self.cf.get('options', 'lang')
except Exception, e:
self.init_config()
logger.debug(e)
def save_config(self):
if not self.cf.sections():
self.cf.add_section('options')
self.cf.set('options', 'volume', self.volume)
self.cf.set('options', 'pitch', self.pitch)
self.cf.set('options', 'style', self.style)
self.cf.set('options', 'lang', self.lang)
self.cf.set('options', 'autostart', self.autostart)
with open(self.config_path, 'w') as f:
self.cf.write(f)
@property
def volume(self):
return self.volume
@property
def pitch(self):
return self.pitch
@property
def style(self):
return self.style
@property
def lang(self):
return self.lang
@property
def autostart(self):
return self.autostart
configer = Configer()
|
[] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
python
| 1 | 0 | |
supervisor/supervisor_suite_test.go
|
package supervisor_test
import (
"fmt"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"os"
"testing"
)
func TestSupervisor(t *testing.T) {
RegisterFailHandler(Fail)
wd, err := os.Getwd()
if err != nil {
panic(err)
}
os.Setenv("PATH", fmt.Sprintf("%s:%s/../bin", os.Getenv("PATH"), wd))
RunSpecs(t, "Supervisor Test Suite")
}
|
[
"\"PATH\""
] |
[] |
[
"PATH"
] |
[]
|
["PATH"]
|
go
| 1 | 0 | |
docs/conf.py
|
"""Sphinx docs build configuration file."""
import os
import inspect
import hcrystalball
__location__ = os.path.join(os.getcwd(), os.path.dirname(inspect.getfile(inspect.currentframe())))
extensions = [
"sphinx_automodapi.automodapi",
"sphinx.ext.intersphinx",
"sphinx.ext.todo",
"sphinx.ext.viewcode",
"sphinx.ext.coverage",
"sphinx.ext.doctest",
"sphinx.ext.ifconfig",
"sphinx.ext.mathjax",
"sphinx.ext.napoleon",
"nbsphinx",
"sphinx_gallery.load_style",
]
numpydoc_show_class_members = False
templates_path = ["_templates"]
source_suffix = [".rst"]
default_role = "py:obj"
master_doc = "index"
# General information about the project.
project = "hcrystalball"
copyright = "2020, Pavel, Michal, Jan, Attila and others[tm]"
version = hcrystalball.__version__
release = version
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
pygments_style = "sphinx"
html_theme = "sphinx_rtd_theme"
html_theme_options = {
# 'sidebar_width': '300px',
# 'page_width': '1000px'
}
html_logo = "_static/hcrystal_ball_logo_white.svg"
html_static_path = ["_static"]
html_context = {
"css_files": ["_static/theme_overrides.css"],
}
htmlhelp_basename = "hcrystalball-doc"
intersphinx_mapping = {
"sphinx": ("https://www.sphinx-doc.org/en/master", None),
"python": ("https://docs.python.org/3/", None),
"matplotlib": ("https://matplotlib.org", None),
"numpy": ("https://numpy.org/doc/stable/", None),
"sklearn": ("https://scikit-learn.org/stable", None),
"pandas": ("https://pandas.pydata.org/pandas-docs/stable/", None),
"scipy": ("https://docs.scipy.org/doc/scipy/reference", None),
"statsmodels": ("https://www.statsmodels.org/dev", None),
"pmdarima": ("http://alkaline-ml.com/pmdarima/", None),
}
autosummary_generate = True
automodsumm_inherited_members = True
# Handling notebook execution
nbsphinx_execute = os.getenv("NBSPHINX_EXECUTE", "auto")
|
[] |
[] |
[
"NBSPHINX_EXECUTE"
] |
[]
|
["NBSPHINX_EXECUTE"]
|
python
| 1 | 0 | |
tests/unit_tests/test_pycaprio.py
|
import os
import pytest
from pycaprio import Pycaprio
from pycaprio.core.exceptions import ConfigurationNotProvided
@pytest.mark.parametrize('host, auth', [(None, None),
('', (None, None)),
('host', (None, None)),
('', ('test', 'test'))
])
def test_pycaprio_no_host_raises_exception(host, auth):
with pytest.raises(ConfigurationNotProvided):
Pycaprio(host, auth)
def test_pycaprio_gets_host_from_os_env():
os.environ['INCEPTION_HOST'] = 'host'
try:
Pycaprio(authentication=('a', 'b'))
except:
pytest.fail()
def test_pycaprio_gets_auth_from_os_env():
os.environ['INCEPTION_USERNAME'] = 'test'
os.environ['INCEPTION_PASSWORD'] = 'passwd'
try:
Pycaprio(inception_host="host")
except:
pytest.fail()
|
[] |
[] |
[
"INCEPTION_HOST",
"INCEPTION_USERNAME",
"INCEPTION_PASSWORD"
] |
[]
|
["INCEPTION_HOST", "INCEPTION_USERNAME", "INCEPTION_PASSWORD"]
|
python
| 3 | 0 | |
env/lib/python3.8/site-packages/plotly/validators/volume/slices/y/_show.py
|
import _plotly_utils.basevalidators
class ShowValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(self, plotly_name="show", parent_name="volume.slices.y", **kwargs):
super(ShowValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
mne/datasets/utils.py
|
# Authors: Alexandre Gramfort <[email protected]>
# Martin Luessi <[email protected]>
# Eric Larson <[email protected]>
# Denis Egnemann <[email protected]>
# Stefan Appelhoff <[email protected]>
# License: BSD Style.
from collections import OrderedDict
import os
import os.path as op
import shutil
import tarfile
import stat
import sys
import zipfile
import tempfile
from distutils.version import LooseVersion
import numpy as np
from ._fsaverage.base import fetch_fsaverage
from .. import __version__ as mne_version
from ..label import read_labels_from_annot, Label, write_labels_to_annot
from ..utils import (get_config, set_config, _fetch_file, logger, warn,
verbose, get_subjects_dir, hashfunc, _pl)
from ..utils.docs import docdict
from ..externals.doccer import docformat
_data_path_doc = """Get path to local copy of {name} dataset.
Parameters
----------
path : None | str
Location of where to look for the {name} dataset.
If None, the environment variable or config parameter
``{conf}`` is used. If it doesn't exist, the
"~/mne_data" directory is used. If the {name} dataset
is not found under the given path, the data
will be automatically downloaded to the specified folder.
force_update : bool
Force update of the {name} dataset even if a local copy exists.
update_path : bool | None
If True, set the ``{conf}`` in mne-python
config to the given path. If None, the user is prompted.
download : bool
If False and the {name} dataset has not been downloaded yet,
it will not be downloaded and the path will be returned as
'' (empty string). This is mostly used for debugging purposes
and can be safely ignored by most users.
%(verbose)s
Returns
-------
path : str
Path to {name} dataset directory.
"""
_data_path_doc = docformat(_data_path_doc, docdict)
_version_doc = """Get version of the local {name} dataset.
Returns
-------
version : str | None
Version of the {name} local dataset, or None if the dataset
does not exist locally.
"""
_bst_license_text = """
License
-------
This tutorial dataset (EEG and MRI data) remains a property of the MEG Lab,
McConnell Brain Imaging Center, Montreal Neurological Institute,
McGill University, Canada. Its use and transfer outside the Brainstorm
tutorial, e.g. for research purposes, is prohibited without written consent
from the MEG Lab.
If you reference this dataset in your publications, please:
1) acknowledge its authors: Elizabeth Bock, Esther Florin, Francois Tadel
and Sylvain Baillet, and
2) cite Brainstorm as indicated on the website:
http://neuroimage.usc.edu/brainstorm
For questions, please contact Francois Tadel ([email protected]).
"""
_hcp_mmp_license_text = """
License
-------
I request access to data collected by the Washington University - University
of Minnesota Consortium of the Human Connectome Project (WU-Minn HCP), and
I agree to the following:
1. I will not attempt to establish the identity of or attempt to contact any
of the included human subjects.
2. I understand that under no circumstances will the code that would link
these data to Protected Health Information be given to me, nor will any
additional information about individual human subjects be released to me
under these Open Access Data Use Terms.
3. I will comply with all relevant rules and regulations imposed by my
institution. This may mean that I need my research to be approved or
declared exempt by a committee that oversees research on human subjects,
e.g. my IRB or Ethics Committee. The released HCP data are not considered
de-identified, insofar as certain combinations of HCP Restricted Data
(available through a separate process) might allow identification of
individuals. Different committees operate under different national, state
and local laws and may interpret regulations differently, so it is
important to ask about this. If needed and upon request, the HCP will
provide a certificate stating that you have accepted the HCP Open Access
Data Use Terms.
4. I may redistribute original WU-Minn HCP Open Access data and any derived
data as long as the data are redistributed under these same Data Use Terms.
5. I will acknowledge the use of WU-Minn HCP data and data derived from
WU-Minn HCP data when publicly presenting any results or algorithms
that benefitted from their use.
1. Papers, book chapters, books, posters, oral presentations, and all
other printed and digital presentations of results derived from HCP
data should contain the following wording in the acknowledgments
section: "Data were provided [in part] by the Human Connectome
Project, WU-Minn Consortium (Principal Investigators: David Van Essen
and Kamil Ugurbil; 1U54MH091657) funded by the 16 NIH Institutes and
Centers that support the NIH Blueprint for Neuroscience Research; and
by the McDonnell Center for Systems Neuroscience at Washington
University."
2. Authors of publications or presentations using WU-Minn HCP data
should cite relevant publications describing the methods used by the
HCP to acquire and process the data. The specific publications that
are appropriate to cite in any given study will depend on what HCP
data were used and for what purposes. An annotated and appropriately
up-to-date list of publications that may warrant consideration is
available at http://www.humanconnectome.org/about/acknowledgehcp.html
3. The WU-Minn HCP Consortium as a whole should not be included as an
author of publications or presentations if this authorship would be
based solely on the use of WU-Minn HCP data.
6. Failure to abide by these guidelines will result in termination of my
privileges to access WU-Minn HCP data.
"""
def _dataset_version(path, name):
"""Get the version of the dataset."""
ver_fname = op.join(path, 'version.txt')
if op.exists(ver_fname):
with open(ver_fname, 'r') as fid:
version = fid.readline().strip() # version is on first line
else:
# Sample dataset versioning was introduced after 0.3
# SPM dataset was introduced with 0.7
version = '0.3' if name == 'sample' else '0.7'
return version
def _get_path(path, key, name):
"""Get a dataset path."""
# 1. Input
if path is not None:
if not isinstance(path, str):
raise ValueError('path must be a string or None')
return path
# 2. get_config(key)
# 3. get_config('MNE_DATA')
path = get_config(key, get_config('MNE_DATA'))
if path is not None:
return path
# 4. ~/mne_data (but use a fake home during testing so we don't
# unnecessarily create ~/mne_data)
logger.info('Using default location ~/mne_data for %s...' % name)
path = op.join(os.getenv('_MNE_FAKE_HOME_DIR',
op.expanduser("~")), 'mne_data')
if not op.exists(path):
logger.info('Creating ~/mne_data')
try:
os.mkdir(path)
except OSError:
raise OSError("User does not have write permissions "
"at '%s', try giving the path as an "
"argument to data_path() where user has "
"write permissions, for ex:data_path"
"('/home/xyz/me2/')" % (path))
return path
def _do_path_update(path, update_path, key, name):
"""Update path."""
path = op.abspath(path)
identical = get_config(key, '', use_env=False) == path
if not identical:
if update_path is None:
update_path = True
if '--update-dataset-path' in sys.argv:
answer = 'y'
else:
msg = ('Do you want to set the path:\n %s\nas the default '
'%s dataset path in the mne-python config [y]/n? '
% (path, name))
answer = input(msg)
if answer.lower() == 'n':
update_path = False
if update_path:
set_config(key, path, set_env=False)
return path
def _data_path(path=None, force_update=False, update_path=True, download=True,
name=None, check_version=False, return_version=False,
archive_name=None):
"""Aux function."""
key = {
'fake': 'MNE_DATASETS_FAKE_PATH',
'misc': 'MNE_DATASETS_MISC_PATH',
'sample': 'MNE_DATASETS_SAMPLE_PATH',
'spm': 'MNE_DATASETS_SPM_FACE_PATH',
'somato': 'MNE_DATASETS_SOMATO_PATH',
'brainstorm': 'MNE_DATASETS_BRAINSTORM_PATH',
'testing': 'MNE_DATASETS_TESTING_PATH',
'multimodal': 'MNE_DATASETS_MULTIMODAL_PATH',
'fnirs_motor': 'MNE_DATASETS_FNIRS_MOTOR_PATH',
'opm': 'MNE_DATASETS_OPM_PATH',
'visual_92_categories': 'MNE_DATASETS_VISUAL_92_CATEGORIES_PATH',
'kiloword': 'MNE_DATASETS_KILOWORD_PATH',
'mtrf': 'MNE_DATASETS_MTRF_PATH',
'fieldtrip_cmc': 'MNE_DATASETS_FIELDTRIP_CMC_PATH',
'phantom_4dbti': 'MNE_DATASETS_PHANTOM_4DBTI_PATH',
'limo': 'MNE_DATASETS_LIMO_PATH',
'refmeg_noise': 'MNE_DATASETS_REFMEG_NOISE_PATH',
}[name]
path = _get_path(path, key, name)
# To update the testing or misc dataset, push commits, then make a new
# release on GitHub. Then update the "releases" variable:
releases = dict(testing='0.103', misc='0.6')
# And also update the "md5_hashes['testing']" variable below.
# To update any other dataset, update the data archive itself (upload
# an updated version) and update the md5 hash.
# try to match url->archive_name->folder_name
urls = dict( # the URLs to use
brainstorm=dict(
bst_auditory='https://osf.io/5t9n8/download?version=1',
bst_phantom_ctf='https://osf.io/sxr8y/download?version=1',
bst_phantom_elekta='https://osf.io/dpcku/download?version=1',
bst_raw='https://osf.io/9675n/download?version=2',
bst_resting='https://osf.io/m7bd3/download?version=3'),
fake='https://github.com/mne-tools/mne-testing-data/raw/master/'
'datasets/foo.tgz',
misc='https://codeload.github.com/mne-tools/mne-misc-data/'
'tar.gz/%s' % releases['misc'],
sample='https://osf.io/86qa2/download?version=5',
somato='https://osf.io/tp4sg/download?version=6',
spm='https://osf.io/je4s8/download?version=2',
testing='https://codeload.github.com/mne-tools/mne-testing-data/'
'tar.gz/%s' % releases['testing'],
multimodal='https://ndownloader.figshare.com/files/5999598',
fnirs_motor='https://osf.io/dj3eh/download?version=1',
opm='https://osf.io/p6ae7/download?version=2',
visual_92_categories=[
'https://osf.io/8ejrs/download?version=1',
'https://osf.io/t4yjp/download?version=1'],
mtrf='https://osf.io/h85s2/download?version=1',
kiloword='https://osf.io/qkvf9/download?version=1',
fieldtrip_cmc='https://osf.io/j9b6s/download?version=1',
phantom_4dbti='https://osf.io/v2brw/download?version=2',
refmeg_noise='https://osf.io/drt6v/download?version=1',
)
# filename of the resulting downloaded archive (only needed if the URL
# name does not match resulting filename)
archive_names = dict(
fieldtrip_cmc='SubjectCMC.zip',
kiloword='MNE-kiloword-data.tar.gz',
misc='mne-misc-data-%s.tar.gz' % releases['misc'],
mtrf='mTRF_1.5.zip',
multimodal='MNE-multimodal-data.tar.gz',
fnirs_motor='MNE-fNIRS-motor-data.tgz',
opm='MNE-OPM-data.tar.gz',
sample='MNE-sample-data-processed.tar.gz',
somato='MNE-somato-data.tar.gz',
spm='MNE-spm-face.tar.gz',
testing='mne-testing-data-%s.tar.gz' % releases['testing'],
visual_92_categories=['MNE-visual_92_categories-data-part1.tar.gz',
'MNE-visual_92_categories-data-part2.tar.gz'],
phantom_4dbti='MNE-phantom-4DBTi.zip',
refmeg_noise='sample_reference_MEG_noise-raw.zip'
)
# original folder names that get extracted (only needed if the
# archive does not extract the right folder name; e.g., usually GitHub)
folder_origs = dict( # not listed means None (no need to move)
misc='mne-misc-data-%s' % releases['misc'],
testing='mne-testing-data-%s' % releases['testing'],
)
# finally, where we want them to extract to (only needed if the folder name
# is not the same as the last bit of the archive name without the file
# extension)
folder_names = dict(
brainstorm='MNE-brainstorm-data',
fake='foo',
misc='MNE-misc-data',
mtrf='mTRF_1.5',
sample='MNE-sample-data',
testing='MNE-testing-data',
visual_92_categories='MNE-visual_92_categories-data',
fieldtrip_cmc='MNE-fieldtrip_cmc-data',
phantom_4dbti='MNE-phantom-4DBTi',
refmeg_noise='MNE-refmeg-noise-data'
)
md5_hashes = dict(
brainstorm=dict(
bst_auditory='fa371a889a5688258896bfa29dd1700b',
bst_phantom_ctf='80819cb7f5b92d1a5289db3fb6acb33c',
bst_phantom_elekta='1badccbe17998d18cc373526e86a7aaf',
bst_raw='fa2efaaec3f3d462b319bc24898f440c',
bst_resting='70fc7bf9c3b97c4f2eab6260ee4a0430'),
fake='3194e9f7b46039bb050a74f3e1ae9908',
misc='e00808c3b05123059e2cf49ff276e919',
sample='12b75d1cb7df9dfb4ad73ed82f61094f',
somato='ea825966c0a1e9b2f84e3826c5500161',
spm='9f43f67150e3b694b523a21eb929ea75',
testing='f7753da1b277d5ccf79da83b0854bcb7',
multimodal='26ec847ae9ab80f58f204d09e2c08367',
fnirs_motor='c4935d19ddab35422a69f3326a01fef8',
opm='370ad1dcfd5c47e029e692c85358a374',
visual_92_categories=['74f50bbeb65740903eadc229c9fa759f',
'203410a98afc9df9ae8ba9f933370e20'],
kiloword='3a124170795abbd2e48aae8727e719a8',
mtrf='273a390ebbc48da2c3184b01a82e4636',
fieldtrip_cmc='6f9fd6520f9a66e20994423808d2528c',
phantom_4dbti='938a601440f3ffa780d20a17bae039ff',
refmeg_noise='779fecd890d98b73a4832e717d7c7c45'
)
assert set(md5_hashes.keys()) == set(urls.keys())
url = urls[name]
hash_ = md5_hashes[name]
folder_orig = folder_origs.get(name, None)
if name == 'brainstorm':
assert archive_name is not None
url = [url[archive_name.split('.')[0]]]
folder_path = [op.join(path, folder_names[name],
archive_name.split('.')[0])]
hash_ = [hash_[archive_name.split('.')[0]]]
archive_name = [archive_name]
else:
url = [url] if not isinstance(url, list) else url
hash_ = [hash_] if not isinstance(hash_, list) else hash_
archive_name = archive_names.get(name)
if archive_name is None:
archive_name = [u.split('/')[-1] for u in url]
if not isinstance(archive_name, list):
archive_name = [archive_name]
folder_path = [op.join(path, folder_names.get(name, a.split('.')[0]))
for a in archive_name]
if not isinstance(folder_orig, list):
folder_orig = [folder_orig] * len(url)
folder_path = [op.abspath(f) for f in folder_path]
assert hash_ is not None
assert all(isinstance(x, list) for x in (url, archive_name, hash_,
folder_path))
assert len(url) == len(archive_name) == len(hash_) == len(folder_path)
logger.debug('URL: %s' % (url,))
logger.debug('archive_name: %s' % (archive_name,))
logger.debug('hash: %s' % (hash_,))
logger.debug('folder_path: %s' % (folder_path,))
need_download = any(not op.exists(f) for f in folder_path)
if need_download and not download:
return ''
if need_download or force_update:
logger.debug('Downloading: need_download=%s, force_update=%s'
% (need_download, force_update))
for f in folder_path:
logger.debug(' Exists: %s: %s' % (f, op.exists(f)))
if name == 'brainstorm':
if '--accept-brainstorm-license' in sys.argv:
answer = 'y'
else:
answer = input('%sAgree (y/[n])? ' % _bst_license_text)
if answer.lower() != 'y':
raise RuntimeError('You must agree to the license to use this '
'dataset')
assert len(url) == len(hash_)
assert len(url) == len(archive_name)
assert len(url) == len(folder_orig)
assert len(url) == len(folder_path)
assert len(url) > 0
# 1. Get all the archives
full_name = list()
for u, an, h, fo in zip(url, archive_name, hash_, folder_orig):
remove_archive, full = _download(path, u, an, h)
full_name.append(full)
del archive_name
# 2. Extract all of the files
remove_dir = True
for u, fp, an, h, fo in zip(url, folder_path, full_name, hash_,
folder_orig):
_extract(path, name, fp, an, fo, remove_dir)
remove_dir = False # only do on first iteration
# 3. Remove all of the archives
if remove_archive:
for an in full_name:
os.remove(op.join(path, an))
logger.info('Successfully extracted to: %s' % folder_path)
_do_path_update(path, update_path, key, name)
path = folder_path[0]
# compare the version of the dataset and mne
data_version = _dataset_version(path, name)
# 0.7 < 0.7.git should be False, therefore strip
if check_version and (LooseVersion(data_version) <
LooseVersion(mne_version.strip('.git'))):
warn('The {name} dataset (version {current}) is older than '
'mne-python (version {newest}). If the examples fail, '
'you may need to update the {name} dataset by using '
'mne.datasets.{name}.data_path(force_update=True)'.format(
name=name, current=data_version, newest=mne_version))
return (path, data_version) if return_version else path
def _download(path, url, archive_name, hash_, hash_type='md5'):
"""Download and extract an archive, completing the filename."""
martinos_path = '/cluster/fusion/sample_data/' + archive_name
neurospin_path = '/neurospin/tmp/gramfort/' + archive_name
remove_archive = False
if op.exists(martinos_path):
full_name = martinos_path
elif op.exists(neurospin_path):
full_name = neurospin_path
else:
full_name = op.join(path, archive_name)
remove_archive = True
fetch_archive = True
if op.exists(full_name):
logger.info('Archive exists (%s), checking hash %s.'
% (archive_name, hash_,))
fetch_archive = False
if hashfunc(full_name, hash_type=hash_type) != hash_:
if input('Archive already exists but the hash does not match: '
'%s\nOverwrite (y/[n])?'
% (archive_name,)).lower() == 'y':
os.remove(full_name)
fetch_archive = True
if fetch_archive:
logger.info('Downloading archive %s to %s' % (archive_name, path))
_fetch_file(url, full_name, print_destination=False,
hash_=hash_, hash_type=hash_type)
return remove_archive, full_name
def _extract(path, name, folder_path, archive_name, folder_orig, remove_dir):
if op.exists(folder_path) and remove_dir:
logger.info('Removing old directory: %s' % (folder_path,))
def onerror(func, path, exc_info):
"""Deal with access errors (e.g. testing dataset read-only)."""
# Is the error an access error ?
do = False
if not os.access(path, os.W_OK):
perm = os.stat(path).st_mode | stat.S_IWUSR
os.chmod(path, perm)
do = True
if not os.access(op.dirname(path), os.W_OK):
dir_perm = (os.stat(op.dirname(path)).st_mode |
stat.S_IWUSR)
os.chmod(op.dirname(path), dir_perm)
do = True
if do:
func(path)
else:
raise exc_info[1]
shutil.rmtree(folder_path, onerror=onerror)
logger.info('Decompressing the archive: %s' % archive_name)
logger.info('(please be patient, this can take some time)')
if name == 'fieldtrip_cmc':
extract_path = folder_path
elif name == 'brainstorm':
extract_path = op.join(*op.split(folder_path)[:-1])
else:
extract_path = path
if archive_name.endswith('.zip'):
with zipfile.ZipFile(archive_name, 'r') as ff:
ff.extractall(extract_path)
else:
if archive_name.endswith('.bz2'):
ext = 'bz2'
else:
ext = 'gz'
with tarfile.open(archive_name, 'r:%s' % ext) as tf:
tf.extractall(path=extract_path)
if folder_orig is not None:
shutil.move(op.join(path, folder_orig), folder_path)
def _get_version(name):
"""Get a dataset version."""
if not has_dataset(name):
return None
if name.startswith('brainstorm'):
name, archive_name = name.split('.')
else:
archive_name = None
return _data_path(name=name, archive_name=archive_name,
return_version=True)[1]
def has_dataset(name):
"""Check for dataset presence.
Parameters
----------
name : str
The dataset name.
For brainstorm datasets, should be formatted like
"brainstorm.bst_raw".
Returns
-------
has : bool
True if the dataset is present.
"""
name = 'spm' if name == 'spm_face' else name
if name.startswith('brainstorm'):
name, archive_name = name.split('.')
endswith = archive_name
else:
archive_name = None
# XXX eventually should be refactored with data_path
endswith = {
'fieldtrip_cmc': 'MNE-fieldtrip_cmc-data',
'fake': 'foo',
'misc': 'MNE-misc-data',
'sample': 'MNE-sample-data',
'somato': 'MNE-somato-data',
'spm': 'MNE-spm-face',
'multimodal': 'MNE-multimodal-data',
'fnirs_motor': 'MNE-fNIRS-motor-data',
'opm': 'MNE-OPM-data',
'testing': 'MNE-testing-data',
'visual_92_categories': 'MNE-visual_92_categories-data',
'kiloword': 'MNE-kiloword-data',
'phantom_4dbti': 'MNE-phantom-4DBTi',
'mtrf': 'mTRF_1.5',
'refmeg_noise': 'MNE-refmeg-noise-data'
}[name]
dp = _data_path(download=False, name=name, check_version=False,
archive_name=archive_name)
return dp.endswith(endswith)
@verbose
def _download_all_example_data(verbose=True):
"""Download all datasets used in examples and tutorials."""
# This function is designed primarily to be used by CircleCI, to:
#
# 1. Streamline data downloading
# 2. Make CircleCI fail early (rather than later) if some necessary data
# cannot be retrieved.
# 3. Avoid download statuses and timing biases in rendered examples.
#
# verbose=True by default so we get nice status messages.
# Consider adding datasets from here to CircleCI for PR-auto-build
from . import (sample, testing, misc, spm_face, somato, brainstorm,
eegbci, multimodal, opm, hf_sef, mtrf, fieldtrip_cmc,
kiloword, phantom_4dbti, sleep_physionet, limo,
fnirs_motor, refmeg_noise)
sample_path = sample.data_path()
testing.data_path()
misc.data_path()
spm_face.data_path()
somato.data_path()
hf_sef.data_path()
multimodal.data_path()
fnirs_motor.data_path()
opm.data_path()
mtrf.data_path()
fieldtrip_cmc.data_path()
kiloword.data_path()
phantom_4dbti.data_path()
refmeg_noise.data_path()
sys.argv += ['--accept-brainstorm-license']
try:
brainstorm.bst_raw.data_path()
brainstorm.bst_auditory.data_path()
brainstorm.bst_resting.data_path()
brainstorm.bst_phantom_elekta.data_path()
brainstorm.bst_phantom_ctf.data_path()
finally:
sys.argv.pop(-1)
eegbci.load_data(1, [6, 10, 14], update_path=True)
for subj in range(4):
eegbci.load_data(subj + 1, runs=[3], update_path=True)
sleep_physionet.age.fetch_data(subjects=[0, 1], recording=[1],
update_path=True)
# If the user has SUBJECTS_DIR, respect it, if not, set it to the EEG one
# (probably on CircleCI, or otherwise advanced user)
fetch_fsaverage(None)
sys.argv += ['--accept-hcpmmp-license']
try:
fetch_hcp_mmp_parcellation(subjects_dir=sample_path + '/subjects')
finally:
sys.argv.pop(-1)
limo.load_data(subject=1, update_path=True)
@verbose
def fetch_aparc_sub_parcellation(subjects_dir=None, verbose=None):
"""Fetch the modified subdivided aparc parcellation.
This will download and install the subdivided aparc parcellation [1]_ files for
FreeSurfer's fsaverage to the specified directory.
Parameters
----------
subjects_dir : str | None
The subjects directory to use. The file will be placed in
``subjects_dir + '/fsaverage/label'``.
%(verbose)s
References
----------
.. [1] Khan S et al. (2018) Maturation trajectories of cortical
resting-state networks depend on the mediating frequency band.
Neuroimage 174 57-68.
""" # noqa: E501
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
destination = op.join(subjects_dir, 'fsaverage', 'label')
urls = dict(lh='https://osf.io/p92yb/download',
rh='https://osf.io/4kxny/download')
hashes = dict(lh='9e4d8d6b90242b7e4b0145353436ef77',
rh='dd6464db8e7762d969fc1d8087cd211b')
for hemi in ('lh', 'rh'):
fname = op.join(destination, '%s.aparc_sub.annot' % hemi)
if not op.isfile(fname):
_fetch_file(urls[hemi], fname, hash_=hashes[hemi])
@verbose
def fetch_hcp_mmp_parcellation(subjects_dir=None, combine=True, verbose=None):
"""Fetch the HCP-MMP parcellation.
This will download and install the HCP-MMP parcellation
:footcite:`GlasserEtAl2016` files for FreeSurfer's fsaverage
:footcite:`Mills2016` to the specified directory.
Parameters
----------
subjects_dir : str | None
The subjects directory to use. The file will be placed in
``subjects_dir + '/fsaverage/label'``.
combine : bool
If True, also produce the combined/reduced set of 23 labels per
hemisphere as ``HCPMMP1_combined.annot``
:footcite:`GlasserEtAl2016supp`.
%(verbose)s
Notes
-----
Use of this parcellation is subject to terms of use on the
`HCP-MMP webpage <https://balsa.wustl.edu/WN56>`_.
References
----------
.. footbibliography::
"""
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
destination = op.join(subjects_dir, 'fsaverage', 'label')
fnames = [op.join(destination, '%s.HCPMMP1.annot' % hemi)
for hemi in ('lh', 'rh')]
urls = dict(lh='https://ndownloader.figshare.com/files/5528816',
rh='https://ndownloader.figshare.com/files/5528819')
hashes = dict(lh='46a102b59b2fb1bb4bd62d51bf02e975',
rh='75e96b331940227bbcb07c1c791c2463')
if not all(op.isfile(fname) for fname in fnames):
if '--accept-hcpmmp-license' in sys.argv:
answer = 'y'
else:
answer = input('%s\nAgree (y/[n])? ' % _hcp_mmp_license_text)
if answer.lower() != 'y':
raise RuntimeError('You must agree to the license to use this '
'dataset')
for hemi, fname in zip(('lh', 'rh'), fnames):
if not op.isfile(fname):
_fetch_file(urls[hemi], fname, hash_=hashes[hemi])
if combine:
fnames = [op.join(destination, '%s.HCPMMP1_combined.annot' % hemi)
for hemi in ('lh', 'rh')]
if all(op.isfile(fname) for fname in fnames):
return
# otherwise, let's make them
logger.info('Creating combined labels')
groups = OrderedDict([
('Primary Visual Cortex (V1)',
('V1',)),
('Early Visual Cortex',
('V2', 'V3', 'V4')),
('Dorsal Stream Visual Cortex',
('V3A', 'V3B', 'V6', 'V6A', 'V7', 'IPS1')),
('Ventral Stream Visual Cortex',
('V8', 'VVC', 'PIT', 'FFC', 'VMV1', 'VMV2', 'VMV3')),
('MT+ Complex and Neighboring Visual Areas',
('V3CD', 'LO1', 'LO2', 'LO3', 'V4t', 'FST', 'MT', 'MST', 'PH')),
('Somatosensory and Motor Cortex',
('4', '3a', '3b', '1', '2')),
('Paracentral Lobular and Mid Cingulate Cortex',
('24dd', '24dv', '6mp', '6ma', 'SCEF', '5m', '5L', '5mv',)),
('Premotor Cortex',
('55b', '6d', '6a', 'FEF', '6v', '6r', 'PEF')),
('Posterior Opercular Cortex',
('43', 'FOP1', 'OP4', 'OP1', 'OP2-3', 'PFcm')),
('Early Auditory Cortex',
('A1', 'LBelt', 'MBelt', 'PBelt', 'RI')),
('Auditory Association Cortex',
('A4', 'A5', 'STSdp', 'STSda', 'STSvp', 'STSva', 'STGa', 'TA2',)),
('Insular and Frontal Opercular Cortex',
('52', 'PI', 'Ig', 'PoI1', 'PoI2', 'FOP2', 'FOP3',
'MI', 'AVI', 'AAIC', 'Pir', 'FOP4', 'FOP5')),
('Medial Temporal Cortex',
('H', 'PreS', 'EC', 'PeEc', 'PHA1', 'PHA2', 'PHA3',)),
('Lateral Temporal Cortex',
('PHT', 'TE1p', 'TE1m', 'TE1a', 'TE2p', 'TE2a',
'TGv', 'TGd', 'TF',)),
('Temporo-Parieto-Occipital Junction',
('TPOJ1', 'TPOJ2', 'TPOJ3', 'STV', 'PSL',)),
('Superior Parietal Cortex',
('LIPv', 'LIPd', 'VIP', 'AIP', 'MIP',
'7PC', '7AL', '7Am', '7PL', '7Pm',)),
('Inferior Parietal Cortex',
('PGp', 'PGs', 'PGi', 'PFm', 'PF', 'PFt', 'PFop',
'IP0', 'IP1', 'IP2',)),
('Posterior Cingulate Cortex',
('DVT', 'ProS', 'POS1', 'POS2', 'RSC', 'v23ab', 'd23ab',
'31pv', '31pd', '31a', '23d', '23c', 'PCV', '7m',)),
('Anterior Cingulate and Medial Prefrontal Cortex',
('33pr', 'p24pr', 'a24pr', 'p24', 'a24', 'p32pr', 'a32pr', 'd32',
'p32', 's32', '8BM', '9m', '10v', '10r', '25',)),
('Orbital and Polar Frontal Cortex',
('47s', '47m', 'a47r', '11l', '13l',
'a10p', 'p10p', '10pp', '10d', 'OFC', 'pOFC',)),
('Inferior Frontal Cortex',
('44', '45', 'IFJp', 'IFJa', 'IFSp', 'IFSa', '47l', 'p47r',)),
('DorsoLateral Prefrontal Cortex',
('8C', '8Av', 'i6-8', 's6-8', 'SFL', '8BL', '9p', '9a', '8Ad',
'p9-46v', 'a9-46v', '46', '9-46d',)),
('???',
('???',))])
assert len(groups) == 23
labels_out = list()
for hemi in ('lh', 'rh'):
labels = read_labels_from_annot('fsaverage', 'HCPMMP1', hemi=hemi,
subjects_dir=subjects_dir,
sort=False)
label_names = [
'???' if label.name.startswith('???') else
label.name.split('_')[1] for label in labels]
used = np.zeros(len(labels), bool)
for key, want in groups.items():
assert '\t' not in key
these_labels = [li for li, label_name in enumerate(label_names)
if label_name in want]
assert not used[these_labels].any()
assert len(these_labels) == len(want)
used[these_labels] = True
these_labels = [labels[li] for li in these_labels]
# take a weighted average to get the color
# (here color == task activation)
w = np.array([len(label.vertices) for label in these_labels])
w = w / float(w.sum())
color = np.dot(w, [label.color for label in these_labels])
these_labels = sum(these_labels,
Label([], subject='fsaverage', hemi=hemi))
these_labels.name = key
these_labels.color = color
labels_out.append(these_labels)
assert used.all()
assert len(labels_out) == 46
for hemi, side in (('lh', 'left'), ('rh', 'right')):
table_name = './%s.fsaverage164.label.gii' % (side,)
write_labels_to_annot(labels_out, 'fsaverage', 'HCPMMP1_combined',
hemi=hemi, subjects_dir=subjects_dir,
sort=False, table_name=table_name)
def _manifest_check_download(manifest_path, destination, url, hash_):
with open(manifest_path, 'r') as fid:
names = [name.strip() for name in fid.readlines()]
need = list()
for name in names:
if not op.isfile(op.join(destination, name)):
need.append(name)
logger.info('%d file%s missing from %s in %s'
% (len(need), _pl(need), manifest_path, destination))
if len(need) > 0:
with tempfile.TemporaryDirectory() as path:
logger.info('Downloading missing files remotely')
fname_path = op.join(path, 'temp.zip')
_fetch_file(url, fname_path, hash_=hash_)
logger.info('Extracting missing file%s' % (_pl(need),))
with zipfile.ZipFile(fname_path, 'r') as ff:
members = set(f for f in ff.namelist() if not f.endswith('/'))
missing = sorted(members.symmetric_difference(set(names)))
if len(missing):
raise RuntimeError('Zip file did not have correct names:'
'\n%s' % ('\n'.join(missing)))
for name in need:
ff.extract(name, path=destination)
logger.info('Successfully extracted %d file%s'
% (len(need), _pl(need)))
|
[] |
[] |
[
"_MNE_FAKE_HOME_DIR"
] |
[]
|
["_MNE_FAKE_HOME_DIR"]
|
python
| 1 | 0 | |
tests/test_hgvs_variantmapper_gcp.py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import pprint
import re
import sys
import os
import unittest
if sys.version_info < (3, ):
import unicodecsv as csv
else:
import csv
import pytest
from hgvs.exceptions import HGVSError
import hgvs.dataproviders.uta
import hgvs.parser
import hgvs.sequencevariant
import hgvs.variantmapper
from support import CACHE
def gxp_file_reader(fn):
rdr = csv.DictReader(open(fn, "r"), delimiter=str("\t"))
for rec in rdr:
if rec["id"].startswith("#"):
continue
yield rec
@pytest.mark.mapping
class Test_VariantMapper(unittest.TestCase):
def setUp(self):
self.hdp = hgvs.dataproviders.uta.connect(mode=os.environ.get("HGVS_CACHE_MODE", "run"), cache=CACHE)
self.hm = hgvs.variantmapper.VariantMapper(self.hdp)
self.hp = hgvs.parser.Parser()
# ZCCHC3 -- one exon, + strand
# reece@[local]/uta_dev=> select hgnc,alt_strand,n_exons,tx_ac,alt_ac,s_cigars,cds_start_i,cds_end_i from bermuda.bermuda_data_mv where tx_ac = "NM_033089.6";
# ┌────────┬────────────┬─────────┬─────────────┬──────────────┬─────────────┬─────────────┬───────────┐
# │ hgnc │ alt_strand │ n_exons │ tx_ac │ alt_ac │ s_cigars │ cds_start_i │ cds_end_i │
# ├────────┼────────────┼─────────┼─────────────┼──────────────┼─────────────┼─────────────┼───────────┤
# │ ZCCHC3 │ 1 │ 1 │ NM_033089.6 │ NC_000020.10 │ 484=3I2275= │ 24 │ 1236 │
# └────────┴────────────┴─────────┴─────────────┴──────────────┴─────────────┴─────────────┴───────────┘
def test_ZCCHC3_dbSNP(self):
for rec in gxp_file_reader("tests/data/gcp/ZCCHC3-dbSNP.tsv"):
self._test_gxp_mapping(rec)
# ORAI1 -- two exons, + strand
# reece@[local]/uta_dev=> select hgnc,alt_strand,n_exons,tx_ac,alt_ac,s_cigars,cds_start_i,cds_end_i from bermuda.bermuda_data_mv where tx_ac = "NM_032790.3";
# ┌───────┬────────────┬─────────┬─────────────┬──────────────┬──────────────────┬─────────────┬───────────┐
# │ hgnc │ alt_strand │ n_exons │ tx_ac │ alt_ac │ s_cigars │ cds_start_i │ cds_end_i │
# ├───────┼────────────┼─────────┼─────────────┼──────────────┼──────────────────┼─────────────┼───────────┤
# │ ORAI1 │ 1 │ 2 │ NM_032790.3 │ NC_000012.11 │ 319=6I177=;1000= │ 193 │ 1099 │
# └───────┴────────────┴─────────┴─────────────┴──────────────┴──────────────────┴─────────────┴───────────┘
def test_ORAI1_dbSNP(self):
for rec in gxp_file_reader("tests/data/gcp/ORAI1-dbSNP.tsv"):
self._test_gxp_mapping(rec)
# FOLR3 -- multiple exons, + strand
# reece@[local]/uta_dev=> select hgnc,alt_strand,n_exons,tx_ac,alt_ac,s_cigars,cds_start_i,cds_end_i from bermuda.bermuda_data_mv where tx_ac = "NM_000804.2";
# ┌───────┬────────────┬─────────┬─────────────┬─────────────┬──────────────────────────────┬─────────────┬───────────┐
# │ hgnc │ alt_strand │ n_exons │ tx_ac │ alt_ac │ s_cigars │ cds_start_i │ cds_end_i │
# ├───────┼────────────┼─────────┼─────────────┼─────────────┼──────────────────────────────┼─────────────┼───────────┤
# │ FOLR3 │ 1 │ 5 │ NM_000804.2 │ NC_000011.9 │ 44=;174=;150=2D37=;136=;304= │ 50 │ 788 │
# └───────┴────────────┴─────────┴─────────────┴─────────────┴──────────────────────────────┴─────────────┴───────────┘
def test_FOLR3_dbSNP(self):
# TODO: CORE-158: g-to-c mapped insertions have incorrect interval bounds
for rec in gxp_file_reader("tests/data/gcp/FOLR3-dbSNP.tsv"):
self._test_gxp_mapping(rec)
# ADRA2B -- one exon, - strand
# reece@[local]/uta_dev=> select hgnc,alt_strand,n_exons,tx_ac,alt_ac,s_cigars,cds_start_i,cds_end_i from bermuda.bermuda_data_mv where tx_ac = "NM_000682.5";
# ┌────────┬────────────┬─────────┬─────────────┬──────────────┬─────────────┬─────────────┬───────────┐
# │ hgnc │ alt_strand │ n_exons │ tx_ac │ alt_ac │ s_cigars │ cds_start_i │ cds_end_i │
# ├────────┼────────────┼─────────┼─────────────┼──────────────┼─────────────┼─────────────┼───────────┤
# │ ADRA2B │ -1 │ 1 │ NM_000682.5 │ NC_000002.11 │ 891=9D2375= │ 0 │ 1353 │
# └────────┴────────────┴─────────┴─────────────┴──────────────┴─────────────┴─────────────┴───────────┘
def test_ADRA2B_dbSNP(self):
for rec in gxp_file_reader("tests/data/gcp/ADRA2B-dbSNP.tsv"):
self._test_gxp_mapping(rec)
# JRK -- multiple exons, - strand
# reece@[local]/uta_dev=> select hgnc,alt_strand,n_exons,tx_ac,alt_ac,s_cigars,cds_start_i,cds_end_i from bermuda.bermuda_data_mv where tx_ac = "NM_001077527.1";
# ┌──────┬────────────┬─────────┬────────────────┬──────────────┬───────────────────────┬─────────────┬───────────┐
# │ hgnc │ alt_strand │ n_exons │ tx_ac │ alt_ac │ s_cigars │ cds_start_i │ cds_end_i │
# ├──────┼────────────┼─────────┼────────────────┼──────────────┼───────────────────────┼─────────────┼───────────┤
# │ JRK │ -1 │ 3 │ NM_001077527.1 │ NC_000008.10 │ 52=;1844=2I199=;1483= │ 514 │ 2185 │
# └──────┴────────────┴─────────┴────────────────┴──────────────┴───────────────────────┴─────────────┴───────────┘
def test_JRK_dbSNP(self):
# TODO: CORE-157: del26 on -1 strands gets reverse complemented as del62
for rec in gxp_file_reader("tests/data/gcp/JRK-dbSNP.tsv"):
self._test_gxp_mapping(rec)
def test_NEFL_dbSNP(self):
for rec in gxp_file_reader("tests/data/gcp/NEFL-dbSNP.tsv"):
self._test_gxp_mapping(rec)
def test_DNAH11_hgmd(self):
for rec in gxp_file_reader("tests/data/gcp/DNAH11-HGMD.tsv"):
self._test_gxp_mapping(rec)
def test_DNAH11_dbSNP_NM_003777(self):
for rec in gxp_file_reader("tests/data/gcp/DNAH11-dbSNP-NM_003777.tsv"):
self._test_gxp_mapping(rec)
def test_DNAH11_dbSNP_NM_001277115(self):
for rec in gxp_file_reader("tests/data/gcp/DNAH11-dbSNP-NM_001277115.tsv"):
self._test_gxp_mapping(rec)
@pytest.mark.regression
def test_regression(self):
for rec in gxp_file_reader("tests/data/gcp/regression.tsv"):
self._test_gxp_mapping(rec)
@pytest.mark.extra
def test_DNAH11_dbSNP_full(self):
for rec in gxp_file_reader("tests/data/gcp/DNAH11-dbSNP.tsv"):
self._test_gxp_mapping(rec)
def test_real(self):
for rec in gxp_file_reader("tests/data/gcp/real.tsv"):
self._test_gxp_mapping(rec)
def test_noncoding(self):
for rec in gxp_file_reader("tests/data/gcp/noncoding.tsv"):
self._test_gxp_mapping(rec)
def _test_gxp_mapping(self, rec):
"""given one record (row) of g, c/n/r, and p (optional) test variants, map
g->c/n/r, c/n/r->g, and c->p and verify equivalence
"""
def _rm_del_seq(vs):
return re.sub(vs, "del\w+ins", "delins")
var_g = self.hp.parse_hgvs_variant(rec["HGVSg"])
var_x = self.hp.parse_hgvs_variant(rec["HGVSc"])
var_p = self.hp.parse_hgvs_variant(rec["HGVSp"]) if rec["HGVSp"] is not None and rec["HGVSp"] != "" else None
# g -> x
if var_x.type == "c":
var_x_test = self.hm.g_to_c(var_g, var_x.ac)
elif var_x.type == "n":
var_x_test = self.hm.g_to_n(var_g, var_x.ac)
self.assertEquals(
_rm_del_seq(str(var_x)),
_rm_del_seq(str(var_x_test)),
msg="%s != %s (%s; HGVSg=%s)" % (str(var_x_test), str(var_x), rec["id"], rec["HGVSg"]))
# c,n -> g
if var_x.type == "c":
var_g_test = self.hm.c_to_g(var_x, var_g.ac)
elif var_x.type == "n":
var_g_test = self.hm.n_to_g(var_x, var_g.ac)
self.assertEquals(
_rm_del_seq(str(var_g)),
_rm_del_seq(str(var_g_test)),
msg="%s != %s (%s; HGVSc=%s)" % (str(var_g_test), str(var_g), rec["id"], rec["HGVSc"]))
if var_p is not None:
# c -> p
hgvs_p_exp = str(var_p)
var_p_test = self.hm.c_to_p(var_x, var_p.ac)
if var_p.posedit and not var_p.posedit.uncertain:
# if expected value isn't uncertain, strip uncertain from test
var_p_test.posedit.uncertain = False
hgvs_p_test = str(var_p_test)
if re.search("Ter$", hgvs_p_exp):
# if expected value doesn't have a count, strip it from the test
hgvs_p_test = re.sub("Ter\d+$", "Ter", hgvs_p_test)
self.assertEquals(hgvs_p_exp, hgvs_p_test, msg="%s != %s (%s)" % (hgvs_p_exp, hgvs_p_test, rec["id"]))
if __name__ == "__main__":
unittest.main()
# <LICENSE>
# Copyright 2013-2015 HGVS Contributors (https://github.com/biocommons/hgvs)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# </LICENSE>
|
[] |
[] |
[
"HGVS_CACHE_MODE"
] |
[]
|
["HGVS_CACHE_MODE"]
|
python
| 1 | 0 | |
interop/server/main.go
|
package main
import (
"crypto/tls"
"fmt"
"log"
"net"
"net/http"
"os"
"github.com/lucas-clemente/quic-go"
"github.com/lucas-clemente/quic-go/http3"
"github.com/lucas-clemente/quic-go/internal/testdata"
"github.com/lucas-clemente/quic-go/interop/http09"
)
var tlsConf *tls.Config
func main() {
logFile, err := os.Create("/logs/log.txt")
if err != nil {
fmt.Printf("Could not create log file: %s\n", err.Error())
os.Exit(1)
}
defer logFile.Close()
log.SetOutput(logFile)
keyLog, err := os.Create("/logs/keylogfile.txt")
if err != nil {
fmt.Printf("Could not create key log file: %s\n", err.Error())
os.Exit(1)
}
defer keyLog.Close()
testcase := os.Getenv("TESTCASE")
// a quic.Config that doesn't do a Retry
quicConf := &quic.Config{
AcceptToken: func(_ net.Addr, _ *quic.Token) bool { return true },
}
tlsConf = testdata.GetTLSConfig()
tlsConf.KeyLogWriter = keyLog
switch testcase {
case "versionnegotiation", "handshake", "transfer", "resumption", "multiconnect":
err = runHTTP09Server(quicConf)
case "retry":
// By default, quic-go performs a Retry on every incoming connection.
quicConf.AcceptToken = nil
err = runHTTP09Server(quicConf)
case "http3":
err = runHTTP3Server(quicConf)
default:
fmt.Printf("unsupported test case: %s\n", testcase)
os.Exit(127)
}
if err != nil {
fmt.Printf("Error running server: %s\n", err.Error())
os.Exit(1)
}
}
func runHTTP09Server(quicConf *quic.Config) error {
server := http09.Server{
Server: &http.Server{
Addr: "0.0.0.0:443",
TLSConfig: tlsConf,
},
QuicConfig: quicConf,
}
http.DefaultServeMux.Handle("/", http.FileServer(http.Dir("/www")))
return server.ListenAndServe()
}
func runHTTP3Server(quicConf *quic.Config) error {
server := http3.Server{
Server: &http.Server{
Addr: "0.0.0.0:443",
TLSConfig: tlsConf,
},
QuicConfig: quicConf,
}
http.DefaultServeMux.Handle("/", http.FileServer(http.Dir("/www")))
return server.ListenAndServe()
}
|
[
"\"TESTCASE\""
] |
[] |
[
"TESTCASE"
] |
[]
|
["TESTCASE"]
|
go
| 1 | 0 | |
autobahn_test.go
|
//go:build !js
// +build !js
package websocket_test
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"net"
"os"
"os/exec"
"strconv"
"strings"
"testing"
"time"
"github.com/zjhmale/websocket"
"github.com/zjhmale/websocket/internal/errd"
"github.com/zjhmale/websocket/internal/test/assert"
"github.com/zjhmale/websocket/internal/test/wstest"
)
var excludedAutobahnCases = []string{
// We skip the UTF-8 handling tests as there isn't any reason to reject invalid UTF-8, just
// more performance overhead.
"6.*", "7.5.1",
// We skip the tests related to requestMaxWindowBits as that is unimplemented due
// to limitations in compress/flate. See https://github.com/golang/go/issues/3155
// Same with klauspost/compress which doesn't allow adjusting the sliding window size.
"13.3.*", "13.4.*", "13.5.*", "13.6.*",
}
var autobahnCases = []string{"*"}
func TestAutobahn(t *testing.T) {
t.Parallel()
if os.Getenv("AUTOBAHN_TEST") == "" {
t.SkipNow()
}
ctx, cancel := context.WithTimeout(context.Background(), time.Minute*15)
defer cancel()
wstestURL, closeFn, err := wstestClientServer(ctx)
assert.Success(t, err)
defer closeFn()
err = waitWS(ctx, wstestURL)
assert.Success(t, err)
cases, err := wstestCaseCount(ctx, wstestURL)
assert.Success(t, err)
t.Run("cases", func(t *testing.T) {
for i := 1; i <= cases; i++ {
i := i
t.Run("", func(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), time.Minute*5)
defer cancel()
c, _, err := websocket.Dial(ctx, fmt.Sprintf(wstestURL+"/runCase?case=%v&agent=main", i), nil)
assert.Success(t, err)
err = wstest.EchoLoop(ctx, c)
t.Logf("echoLoop: %v", err)
})
}
})
c, _, err := websocket.Dial(ctx, fmt.Sprintf(wstestURL+"/updateReports?agent=main"), nil)
assert.Success(t, err)
c.Close(websocket.StatusNormalClosure, "")
checkWSTestIndex(t, "./ci/out/wstestClientReports/index.json")
}
func waitWS(ctx context.Context, url string) error {
ctx, cancel := context.WithTimeout(ctx, time.Second*5)
defer cancel()
for ctx.Err() == nil {
c, _, err := websocket.Dial(ctx, url, nil)
if err != nil {
continue
}
c.Close(websocket.StatusNormalClosure, "")
return nil
}
return ctx.Err()
}
func wstestClientServer(ctx context.Context) (url string, closeFn func(), err error) {
serverAddr, err := unusedListenAddr()
if err != nil {
return "", nil, err
}
url = "ws://" + serverAddr
specFile, err := tempJSONFile(map[string]interface{}{
"url": url,
"outdir": "ci/out/wstestClientReports",
"cases": autobahnCases,
"exclude-cases": excludedAutobahnCases,
})
if err != nil {
return "", nil, fmt.Errorf("failed to write spec: %w", err)
}
ctx, cancel := context.WithTimeout(context.Background(), time.Minute*15)
defer func() {
if err != nil {
cancel()
}
}()
args := []string{"--mode", "fuzzingserver", "--spec", specFile,
// Disables some server that runs as part of fuzzingserver mode.
// See https://github.com/crossbario/autobahn-testsuite/blob/058db3a36b7c3a1edf68c282307c6b899ca4857f/autobahntestsuite/autobahntestsuite/wstest.py#L124
"--webport=0",
}
wstest := exec.CommandContext(ctx, "wstest", args...)
err = wstest.Start()
if err != nil {
return "", nil, fmt.Errorf("failed to start wstest: %w", err)
}
return url, func() {
wstest.Process.Kill()
}, nil
}
func wstestCaseCount(ctx context.Context, url string) (cases int, err error) {
defer errd.Wrap(&err, "failed to get case count")
c, _, err := websocket.Dial(ctx, url+"/getCaseCount", nil)
if err != nil {
return 0, err
}
defer c.Close(websocket.StatusInternalError, "")
_, r, err := c.Reader(ctx)
if err != nil {
return 0, err
}
b, err := ioutil.ReadAll(r)
if err != nil {
return 0, err
}
cases, err = strconv.Atoi(string(b))
if err != nil {
return 0, err
}
c.Close(websocket.StatusNormalClosure, "")
return cases, nil
}
func checkWSTestIndex(t *testing.T, path string) {
wstestOut, err := ioutil.ReadFile(path)
assert.Success(t, err)
var indexJSON map[string]map[string]struct {
Behavior string `json:"behavior"`
BehaviorClose string `json:"behaviorClose"`
}
err = json.Unmarshal(wstestOut, &indexJSON)
assert.Success(t, err)
for _, tests := range indexJSON {
for test, result := range tests {
t.Run(test, func(t *testing.T) {
switch result.BehaviorClose {
case "OK", "INFORMATIONAL":
default:
t.Errorf("bad close behaviour")
}
switch result.Behavior {
case "OK", "NON-STRICT", "INFORMATIONAL":
default:
t.Errorf("failed")
}
})
}
}
if t.Failed() {
htmlPath := strings.Replace(path, ".json", ".html", 1)
t.Errorf("detected autobahn violation, see %q", htmlPath)
}
}
func unusedListenAddr() (_ string, err error) {
defer errd.Wrap(&err, "failed to get unused listen address")
l, err := net.Listen("tcp", "localhost:0")
if err != nil {
return "", err
}
l.Close()
return l.Addr().String(), nil
}
func tempJSONFile(v interface{}) (string, error) {
f, err := ioutil.TempFile("", "temp.json")
if err != nil {
return "", fmt.Errorf("temp file: %w", err)
}
defer f.Close()
e := json.NewEncoder(f)
e.SetIndent("", "\t")
err = e.Encode(v)
if err != nil {
return "", fmt.Errorf("json encode: %w", err)
}
err = f.Close()
if err != nil {
return "", fmt.Errorf("close temp file: %w", err)
}
return f.Name(), nil
}
|
[
"\"AUTOBAHN_TEST\""
] |
[] |
[
"AUTOBAHN_TEST"
] |
[]
|
["AUTOBAHN_TEST"]
|
go
| 1 | 0 | |
pkg/tests/end_to_end_tests/main_test.go
|
// This file and its contents are licensed under the Apache License 2.0.
// Please see the included NOTICE for copyright information and
// LICENSE for a copy of the license.
package end_to_end_tests
import (
"context"
"flag"
"fmt"
"io"
"os"
"path/filepath"
"testing"
"time"
"github.com/docker/go-connections/nat"
"github.com/jackc/pgx/v4"
"github.com/jackc/pgx/v4/pgxpool"
"github.com/testcontainers/testcontainers-go"
"github.com/timescale/promscale/pkg/internal/testhelpers"
"github.com/timescale/promscale/pkg/log"
"github.com/timescale/promscale/pkg/pgmodel"
"github.com/timescale/promscale/pkg/pgmodel/common/extension"
ingstr "github.com/timescale/promscale/pkg/pgmodel/ingestor"
"github.com/timescale/promscale/pkg/prompb"
tput "github.com/timescale/promscale/pkg/util/throughput"
"github.com/timescale/promscale/pkg/version"
_ "github.com/jackc/pgx/v4/stdlib"
)
var (
testDatabase = flag.String("database", "tmp_db_timescale_migrate_test", "database to run integration tests on")
updateGoldenFiles = flag.Bool("update", false, "update the golden files of this test")
useDocker = flag.Bool("use-docker", true, "start database using a docker container")
useTimescaleDB = flag.Bool("use-timescaledb", true, "use TimescaleDB")
// TODO (james): Replace hardcoded value
timescaleDockerImage = flag.String("timescale-docker-image", "ghcr.io/timescale/dev_promscale_extension:master-ts2-pg14", "TimescaleDB docker image to run tests against")
useMultinode = flag.Bool("use-multinode", false, "use TimescaleDB Multinode")
useTimescaleDBNightly = flag.Bool("use-timescaledb-nightly", false, "use TimescaleDB nightly images")
printLogs = flag.Bool("print-logs", false, "print TimescaleDB logs")
extendedTest = flag.Bool("extended-test", false, "run extended testing dataset and PromQL queries")
logLevel = flag.String("log-level", "debug", "Logging level")
pgContainer testcontainers.Container
pgContainerTestDataDir string
promContainer testcontainers.Container
promHost string
promPort nat.Port
// We need a different set of environment for testing exemplars. This is because behaviour of exemplars and that of samples
// are very different in Prometheus, in nutshell, exemplars are WAL only. The moment a block is created, exemplars are lost.
// Hence, we need an isolated environment for testing exemplars. In the future, once exemplars become block compatible,
// we can remove this separation.
promExemplarContainer testcontainers.Container
promExemplarHost string
promExemplarPort nat.Port
// testOptions expects setExtensionState() to be called before using its value.
testOptions testhelpers.TestOptions
// timeseriesWithExemplars is a singleton instance for testing generated timeseries for exemplars based E2E tests.
// The called must ensure to use by call/copy only.
timeseriesWithExemplars []prompb.TimeSeries
)
func init() {
tput.InitWatcher(time.Second)
if err := os.Setenv("IS_TEST", "true"); err != nil {
// Test dependent behaviours call initializing a module more than once.
// An example of this is duplicate metrics registry in createAndRegister.
// This helps avoid such situations.
panic(err)
}
}
// setExtensionState sets the value of testOptions based on the input flags.
func setExtensionState() {
if *useTimescaleDB {
testOptions.UseTimescaleDB()
}
if *useTimescaleDBNightly {
testOptions.UseTimescaleNightly()
}
if *useMultinode {
testOptions.UseMultinode()
*useTimescaleDB = true
}
testOptions.SetTimescaleDockerImage(*timescaleDockerImage)
}
type testResult struct {
code int
}
func (t *testResult) Failed() bool {
return t.code != 0
}
func TestMain(m *testing.M) {
res := &testResult{1}
func() {
flag.Parse()
ctx := context.Background()
err := log.Init(log.Config{
Level: *logLevel,
})
if err != nil {
panic(err)
}
if !testing.Short() && *useDocker {
var err error
var closer io.Closer
setExtensionState()
pgContainerTestDataDir = generatePGTestDirFiles()
pgContainer, closer, err = testhelpers.StartPGContainer(
ctx,
res,
testOptions,
pgContainerTestDataDir,
*printLogs,
)
if err != nil {
fmt.Println("Error setting up container", err)
os.Exit(1)
}
_, storagePath, err := generatePrometheusWAL(false)
if err != nil {
fmt.Println("Error creating WAL file", err)
os.Exit(1)
}
promContainer, promHost, promPort, err = testhelpers.StartPromContainer(storagePath, ctx)
if err != nil {
fmt.Println("Error setting up container", err)
os.Exit(1)
}
var storageExemplarPath string
timeseriesWithExemplars, storageExemplarPath, err = generatePrometheusWAL(true)
if err != nil {
fmt.Println("Error creating WAL with exemplar file", err)
os.Exit(1)
}
promExemplarContainer, promExemplarHost, promExemplarPort, err = testhelpers.StartPromContainer(storageExemplarPath, ctx)
defer func() {
if err != nil {
panic(err)
}
if closer != nil {
_ = closer.Close()
}
err = promContainer.Terminate(ctx)
if err != nil {
panic(err)
}
}()
}
res.code = m.Run()
}()
os.Exit(res.code)
}
func attachDataNode2(t testing.TB, DBName string, connectURL string) {
db, err := pgx.Connect(context.Background(), testhelpers.PgConnectURL(DBName, testhelpers.Superuser))
if err != nil {
t.Fatal(err)
}
err = testhelpers.AddDataNode2(db, DBName)
if err != nil {
t.Fatal(err)
}
if err = db.Close(context.Background()); err != nil {
t.Fatal(err)
}
}
func addPromNode(t testing.TB, pool *pgxpool.Pool, attachExisting bool) {
_, err := pool.Exec(context.Background(), "CALL prom_api.add_prom_node('dn1', $1);", attachExisting)
if err != nil {
t.Fatal(err)
}
}
func withDB(t testing.TB, DBName string, f func(db *pgxpool.Pool, t testing.TB)) {
withDBAttachNode(t, DBName, true, nil, f)
}
/* When testing with multinode always add data node 2 after installing the extension, as that tests a strictly harder case */
func withDBAttachNode(t testing.TB, DBName string, attachExisting bool, beforeAddNode func(db *pgxpool.Pool, t testing.TB), afterAddNode func(db *pgxpool.Pool, t testing.TB)) {
testhelpers.WithDB(t, DBName, testhelpers.NoSuperuser, true, testOptions, func(_ *pgxpool.Pool, t testing.TB, connectURL string) {
performMigrate(t, connectURL)
if beforeAddNode != nil {
if !*useMultinode {
t.Fatal("Shouldn't be using beforeAddNode unless testing multinode")
}
func() {
pool, err := pgxpool.Connect(context.Background(), connectURL)
if err != nil {
t.Fatal(err)
}
defer pool.Close()
beforeAddNode(pool, t)
}()
}
if *useMultinode {
//add data 2 node /after/ install
attachDataNode2(t, DBName, connectURL)
}
// need to get a new pool after the Migrate to catch any GUC changes made during Migrate
pool, err := pgxpool.Connect(context.Background(), connectURL)
if err != nil {
t.Fatal(err)
}
defer pool.Close()
if *useMultinode {
//add prom node using the prom user (not superuser)
addPromNode(t, pool, attachExisting)
}
afterAddNode(pool, t)
})
}
func performMigrate(t testing.TB, connectURL string) {
extOptions := extension.ExtensionMigrateOptions{Install: true, Upgrade: true, UpgradePreRelease: true}
if *useTimescaleDB {
migrateURL := connectURL
err := extension.InstallUpgradeTimescaleDBExtensions(migrateURL, extOptions)
if err != nil {
t.Fatal(err)
}
}
migratePool, err := pgxpool.Connect(context.Background(), connectURL)
if err != nil {
t.Fatal(err)
}
defer migratePool.Close()
conn, err := migratePool.Acquire(context.Background())
if err != nil {
t.Fatal(err)
}
defer conn.Release()
err = pgmodel.Migrate(conn.Conn(), pgmodel.VersionInfo{Version: version.Promscale, CommitHash: "azxtestcommit"}, nil, extOptions)
if err != nil {
t.Fatal(err)
}
// after the migration, the promscale roles will exist
// we need grant the prom_admin role to the prom user
testhelpers.MakePromUserPromAdmin(t, conn.Conn().Config().Database)
}
func generatePGTestDirFiles() string {
tmpDir, err := testhelpers.TempDir("testdata")
if err != nil {
log.Fatal(err)
}
err = os.Chmod(tmpDir, 0777)
if err != nil {
log.Fatal(err)
}
err = os.Mkdir(filepath.Join(tmpDir, "sql"), 0777)
if err != nil {
log.Fatal(err)
}
err = os.Mkdir(filepath.Join(tmpDir, "out"), 0777)
if err != nil {
log.Fatal(err)
}
//Mkdir obeys umask. have to force
err = os.Chmod(filepath.Join(tmpDir, "out"), 0777)
if err != nil {
log.Fatal(err)
}
files, err := filepath.Glob("../testdata/sql/*")
if err != nil {
log.Fatal(err)
}
for _, file := range files {
err = copyFile(file, filepath.Join(tmpDir, "sql", filepath.Base(file)))
if err != nil {
log.Fatal(err)
}
}
return tmpDir
}
func copyFile(src string, dest string) error {
sourceFile, err := os.Open(src)
if err != nil {
return err
}
defer sourceFile.Close()
newFile, err := os.Create(dest)
if err != nil {
return err
}
defer newFile.Close()
_, err = io.Copy(newFile, sourceFile)
if err != nil {
return err
}
return nil
}
// newWriteRequestWithTs returns a new *prompb.WriteRequest from the pool and applies ts to it if ts is not nil.
func newWriteRequestWithTs(ts []prompb.TimeSeries) *prompb.WriteRequest {
wr := ingstr.NewWriteRequest()
if ts != nil {
wr.Timeseries = ts
}
return wr
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
gclient.py
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Meta checkout manager supporting both Subversion and GIT."""
# Files
# .gclient : Current client configuration, written by 'config' command.
# Format is a Python script defining 'solutions', a list whose
# entries each are maps binding the strings "name" and "url"
# to strings specifying the name and location of the client
# module, as well as "custom_deps" to a map similar to the
# deps section of the DEPS file below, as well as
# "custom_hooks" to a list similar to the hooks sections of
# the DEPS file below.
# .gclient_entries : A cache constructed by 'update' command. Format is a
# Python script defining 'entries', a list of the names
# of all modules in the client
# <module>/DEPS : Python script defining var 'deps' as a map from each
# requisite submodule name to a URL where it can be found (via
# one SCM)
#
# Hooks
# .gclient and DEPS files may optionally contain a list named "hooks" to
# allow custom actions to be performed based on files that have changed in the
# working copy as a result of a "sync"/"update" or "revert" operation. This
# can be prevented by using --nohooks (hooks run by default). Hooks can also
# be forced to run with the "runhooks" operation. If "sync" is run with
# --force, all known but not suppressed hooks will run regardless of the state
# of the working copy.
#
# Each item in a "hooks" list is a dict, containing these two keys:
# "pattern" The associated value is a string containing a regular
# expression. When a file whose pathname matches the expression
# is checked out, updated, or reverted, the hook's "action" will
# run.
# "action" A list describing a command to run along with its arguments, if
# any. An action command will run at most one time per gclient
# invocation, regardless of how many files matched the pattern.
# The action is executed in the same directory as the .gclient
# file. If the first item in the list is the string "python",
# the current Python interpreter (sys.executable) will be used
# to run the command. If the list contains string
# "$matching_files" it will be removed from the list and the list
# will be extended by the list of matching files.
# "name" An optional string specifying the group to which a hook belongs
# for overriding and organizing.
#
# Example:
# hooks = [
# { "pattern": "\\.(gif|jpe?g|pr0n|png)$",
# "action": ["python", "image_indexer.py", "--all"]},
# { "pattern": ".",
# "name": "gyp",
# "action": ["python", "src/build/gyp_chromium"]},
# ]
#
# Pre-DEPS Hooks
# DEPS files may optionally contain a list named "pre_deps_hooks". These are
# the same as normal hooks, except that they run before the DEPS are
# processed. Pre-DEPS run with "sync" and "revert" unless the --noprehooks
# flag is used.
#
# Specifying a target OS
# An optional key named "target_os" may be added to a gclient file to specify
# one or more additional operating systems that should be considered when
# processing the deps_os dict of a DEPS file.
#
# Example:
# target_os = [ "android" ]
#
# If the "target_os_only" key is also present and true, then *only* the
# operating systems listed in "target_os" will be used.
#
# Example:
# target_os = [ "ios" ]
# target_os_only = True
from __future__ import print_function
__version__ = '0.7'
import ast
import copy
import json
import logging
import optparse
import os
import platform
import posixpath
import pprint
import re
import sys
import time
import urllib
import urlparse
import fix_encoding
import gclient_scm
import gclient_utils
import git_cache
from third_party.repo.progress import Progress
import subcommand
import subprocess2
import setup_color
CHROMIUM_SRC_URL = 'https://chromium.googlesource.com/chromium/src.git'
def ast_dict_index(dnode, key):
"""Search an ast.Dict for the argument key, and return its index."""
idx = [i for i in range(len(dnode.keys)) if (
type(dnode.keys[i]) is ast.Str and dnode.keys[i].s == key)]
if not idx:
return -1
elif len(idx) > 1:
raise gclient_utils.Error('Multiple dict entries with same key in AST')
return idx[-1]
def ast2str(node, indent=0):
"""Return a pretty-printed rendition of an ast.Node."""
t = type(node)
if t is ast.Module:
return '\n'.join([ast2str(x, indent) for x in node.body])
elif t is ast.Assign:
return ((' ' * indent) +
' = '.join([ast2str(x) for x in node.targets] +
[ast2str(node.value, indent)]) + '\n')
elif t is ast.Name:
return node.id
elif t is ast.List:
if not node.elts:
return '[]'
elif len(node.elts) == 1:
return '[' + ast2str(node.elts[0], indent) + ']'
return ('[\n' + (' ' * (indent + 1)) +
(',\n' + (' ' * (indent + 1))).join(
[ast2str(x, indent + 1) for x in node.elts]) +
'\n' + (' ' * indent) + ']')
elif t is ast.Dict:
if not node.keys:
return '{}'
elif len(node.keys) == 1:
return '{%s: %s}' % (ast2str(node.keys[0]),
ast2str(node.values[0], indent + 1))
return ('{\n' + (' ' * (indent + 1)) +
(',\n' + (' ' * (indent + 1))).join(
['%s: %s' % (ast2str(node.keys[i]),
ast2str(node.values[i], indent + 1))
for i in range(len(node.keys))]) +
'\n' + (' ' * indent) + '}')
elif t is ast.Str:
return "'%s'" % node.s
else:
raise gclient_utils.Error("Unexpected AST node at line %d, column %d: %s"
% (node.lineno, node.col_offset, t))
class GClientKeywords(object):
class FromImpl(object):
"""Used to implement the From() syntax."""
def __init__(self, module_name, sub_target_name=None):
"""module_name is the dep module we want to include from. It can also be
the name of a subdirectory to include from.
sub_target_name is an optional parameter if the module name in the other
DEPS file is different. E.g., you might want to map src/net to net."""
self.module_name = module_name
self.sub_target_name = sub_target_name
def __str__(self):
return 'From(%s, %s)' % (repr(self.module_name),
repr(self.sub_target_name))
class FileImpl(object):
"""Used to implement the File('') syntax which lets you sync a single file
from a SVN repo."""
def __init__(self, file_location):
self.file_location = file_location
def __str__(self):
return 'File("%s")' % self.file_location
def GetPath(self):
return os.path.split(self.file_location)[0]
def GetFilename(self):
rev_tokens = self.file_location.split('@')
return os.path.split(rev_tokens[0])[1]
def GetRevision(self):
rev_tokens = self.file_location.split('@')
if len(rev_tokens) > 1:
return rev_tokens[1]
return None
class VarImpl(object):
def __init__(self, custom_vars, local_scope):
self._custom_vars = custom_vars
self._local_scope = local_scope
def Lookup(self, var_name):
"""Implements the Var syntax."""
if var_name in self._custom_vars:
return self._custom_vars[var_name]
elif var_name in self._local_scope.get("vars", {}):
return self._local_scope["vars"][var_name]
raise gclient_utils.Error("Var is not defined: %s" % var_name)
class DependencySettings(GClientKeywords):
"""Immutable configuration settings."""
def __init__(
self, parent, url, safesync_url, managed, custom_deps, custom_vars,
custom_hooks, deps_file, should_process):
GClientKeywords.__init__(self)
# These are not mutable:
self._parent = parent
self._safesync_url = safesync_url
self._deps_file = deps_file
self._url = url
# 'managed' determines whether or not this dependency is synced/updated by
# gclient after gclient checks it out initially. The difference between
# 'managed' and 'should_process' is that the user specifies 'managed' via
# the --unmanaged command-line flag or a .gclient config, where
# 'should_process' is dynamically set by gclient if it goes over its
# recursion limit and controls gclient's behavior so it does not misbehave.
self._managed = managed
self._should_process = should_process
# This is a mutable value which has the list of 'target_os' OSes listed in
# the current deps file.
self.local_target_os = None
# These are only set in .gclient and not in DEPS files.
self._custom_vars = custom_vars or {}
self._custom_deps = custom_deps or {}
self._custom_hooks = custom_hooks or []
# TODO(iannucci): Remove this when all masters are correctly substituting
# the new blink url.
if (self._custom_vars.get('webkit_trunk', '') ==
'svn://svn-mirror.golo.chromium.org/webkit-readonly/trunk'):
new_url = 'svn://svn-mirror.golo.chromium.org/blink/trunk'
print('Overwriting Var("webkit_trunk") with %s' % new_url)
self._custom_vars['webkit_trunk'] = new_url
# Post process the url to remove trailing slashes.
if isinstance(self._url, basestring):
# urls are sometime incorrectly written as proto://host/path/@rev. Replace
# it to proto://host/path@rev.
self._url = self._url.replace('/@', '@')
elif not isinstance(self._url,
(self.FromImpl, self.FileImpl, None.__class__)):
raise gclient_utils.Error(
('dependency url must be either a string, None, '
'File() or From() instead of %s') % self._url.__class__.__name__)
# Make any deps_file path platform-appropriate.
for sep in ['/', '\\']:
self._deps_file = self._deps_file.replace(sep, os.sep)
@property
def deps_file(self):
return self._deps_file
@property
def managed(self):
return self._managed
@property
def parent(self):
return self._parent
@property
def root(self):
"""Returns the root node, a GClient object."""
if not self.parent:
# This line is to signal pylint that it could be a GClient instance.
return self or GClient(None, None)
return self.parent.root
@property
def safesync_url(self):
return self._safesync_url
@property
def should_process(self):
"""True if this dependency should be processed, i.e. checked out."""
return self._should_process
@property
def custom_vars(self):
return self._custom_vars.copy()
@property
def custom_deps(self):
return self._custom_deps.copy()
@property
def custom_hooks(self):
return self._custom_hooks[:]
@property
def url(self):
return self._url
@property
def target_os(self):
if self.local_target_os is not None:
return tuple(set(self.local_target_os).union(self.parent.target_os))
else:
return self.parent.target_os
def get_custom_deps(self, name, url):
"""Returns a custom deps if applicable."""
if self.parent:
url = self.parent.get_custom_deps(name, url)
# None is a valid return value to disable a dependency.
return self.custom_deps.get(name, url)
class Dependency(gclient_utils.WorkItem, DependencySettings):
"""Object that represents a dependency checkout."""
def __init__(self, parent, name, url, safesync_url, managed, custom_deps,
custom_vars, custom_hooks, deps_file, should_process):
gclient_utils.WorkItem.__init__(self, name)
DependencySettings.__init__(
self, parent, url, safesync_url, managed, custom_deps, custom_vars,
custom_hooks, deps_file, should_process)
# This is in both .gclient and DEPS files:
self._deps_hooks = []
self._pre_deps_hooks = []
# Calculates properties:
self._parsed_url = None
self._dependencies = []
# A cache of the files affected by the current operation, necessary for
# hooks.
self._file_list = []
# List of host names from which dependencies are allowed.
# Default is an empty set, meaning unspecified in DEPS file, and hence all
# hosts will be allowed. Non-empty set means whitelist of hosts.
# allowed_hosts var is scoped to its DEPS file, and so it isn't recursive.
self._allowed_hosts = frozenset()
# If it is not set to True, the dependency wasn't processed for its child
# dependency, i.e. its DEPS wasn't read.
self._deps_parsed = False
# This dependency has been processed, i.e. checked out
self._processed = False
# This dependency had its pre-DEPS hooks run
self._pre_deps_hooks_ran = False
# This dependency had its hook run
self._hooks_ran = False
# This is the scm used to checkout self.url. It may be used by dependencies
# to get the datetime of the revision we checked out.
self._used_scm = None
self._used_revision = None
# The actual revision we ended up getting, or None if that information is
# unavailable
self._got_revision = None
# This is a mutable value that overrides the normal recursion limit for this
# dependency. It is read from the actual DEPS file so cannot be set on
# class instantiation.
self.recursion_override = None
# recursedeps is a mutable value that selectively overrides the default
# 'no recursion' setting on a dep-by-dep basis. It will replace
# recursion_override.
#
# It will be a dictionary of {deps_name: {"deps_file": depfile_name}} or
# None.
self.recursedeps = None
# This is inherited from WorkItem. We want the URL to be a resource.
if url and isinstance(url, basestring):
# The url is usually given to gclient either as https://blah@123
# or just https://blah. The @123 portion is irrelevent.
self.resources.append(url.split('@')[0])
if not self.name and self.parent:
raise gclient_utils.Error('Dependency without name')
@property
def requirements(self):
"""Calculate the list of requirements."""
requirements = set()
# self.parent is implicitly a requirement. This will be recursive by
# definition.
if self.parent and self.parent.name:
requirements.add(self.parent.name)
# For a tree with at least 2 levels*, the leaf node needs to depend
# on the level higher up in an orderly way.
# This becomes messy for >2 depth as the DEPS file format is a dictionary,
# thus unsorted, while the .gclient format is a list thus sorted.
#
# * _recursion_limit is hard coded 2 and there is no hope to change this
# value.
#
# Interestingly enough, the following condition only works in the case we
# want: self is a 2nd level node. 3nd level node wouldn't need this since
# they already have their parent as a requirement.
if self.parent and self.parent.parent and not self.parent.parent.parent:
requirements |= set(i.name for i in self.root.dependencies if i.name)
if isinstance(self.url, self.FromImpl):
requirements.add(self.url.module_name)
if self.name:
requirements |= set(
obj.name for obj in self.root.subtree(False)
if (obj is not self
and obj.name and
self.name.startswith(posixpath.join(obj.name, ''))))
requirements = tuple(sorted(requirements))
logging.info('Dependency(%s).requirements = %s' % (self.name, requirements))
return requirements
@property
def try_recursedeps(self):
"""Returns False if recursion_override is ever specified."""
if self.recursion_override is not None:
return False
return self.parent.try_recursedeps
@property
def recursion_limit(self):
"""Returns > 0 if this dependency is not too recursed to be processed."""
# We continue to support the absence of recursedeps until tools and DEPS
# using recursion_override are updated.
if self.try_recursedeps and self.parent.recursedeps != None:
if self.name in self.parent.recursedeps:
return 1
if self.recursion_override is not None:
return self.recursion_override
return max(self.parent.recursion_limit - 1, 0)
def verify_validity(self):
"""Verifies that this Dependency is fine to add as a child of another one.
Returns True if this entry should be added, False if it is a duplicate of
another entry.
"""
logging.info('Dependency(%s).verify_validity()' % self.name)
if self.name in [s.name for s in self.parent.dependencies]:
raise gclient_utils.Error(
'The same name "%s" appears multiple times in the deps section' %
self.name)
if not self.should_process:
# Return early, no need to set requirements.
return True
# This require a full tree traversal with locks.
siblings = [d for d in self.root.subtree(False) if d.name == self.name]
for sibling in siblings:
self_url = self.LateOverride(self.url)
sibling_url = sibling.LateOverride(sibling.url)
# Allow to have only one to be None or ''.
if self_url != sibling_url and bool(self_url) == bool(sibling_url):
raise gclient_utils.Error(
('Dependency %s specified more than once:\n'
' %s [%s]\n'
'vs\n'
' %s [%s]') % (
self.name,
sibling.hierarchy(),
sibling_url,
self.hierarchy(),
self_url))
# In theory we could keep it as a shadow of the other one. In
# practice, simply ignore it.
logging.warn('Won\'t process duplicate dependency %s' % sibling)
return False
return True
def LateOverride(self, url):
"""Resolves the parsed url from url.
Manages From() keyword accordingly. Do not touch self.parsed_url nor
self.url because it may called with other urls due to From()."""
assert self.parsed_url == None or not self.should_process, self.parsed_url
parsed_url = self.get_custom_deps(self.name, url)
if parsed_url != url:
logging.info(
'Dependency(%s).LateOverride(%s) -> %s' %
(self.name, url, parsed_url))
return parsed_url
if isinstance(url, self.FromImpl):
# Requires tree traversal.
ref = [
dep for dep in self.root.subtree(True) if url.module_name == dep.name
]
if not ref:
raise gclient_utils.Error('Failed to find one reference to %s. %s' % (
url.module_name, ref))
# It may happen that len(ref) > 1 but it's no big deal.
ref = ref[0]
sub_target = url.sub_target_name or self.name
found_deps = [d for d in ref.dependencies if d.name == sub_target]
if len(found_deps) != 1:
raise gclient_utils.Error(
'Couldn\'t find %s in %s, referenced by %s (parent: %s)\n%s' % (
sub_target, ref.name, self.name, self.parent.name,
str(self.root)))
# Call LateOverride() again.
found_dep = found_deps[0]
parsed_url = found_dep.LateOverride(found_dep.url)
logging.info(
'Dependency(%s).LateOverride(%s) -> %s (From)' %
(self.name, url, parsed_url))
return parsed_url
if isinstance(url, basestring):
parsed_url = urlparse.urlparse(url)
if (not parsed_url[0] and
not re.match(r'^\w+\@[\w\.-]+\:[\w\/]+', parsed_url[2])):
# A relative url. Fetch the real base.
path = parsed_url[2]
if not path.startswith('/'):
raise gclient_utils.Error(
'relative DEPS entry \'%s\' must begin with a slash' % url)
# Create a scm just to query the full url.
parent_url = self.parent.parsed_url
if isinstance(parent_url, self.FileImpl):
parent_url = parent_url.file_location
scm = gclient_scm.CreateSCM(
parent_url, self.root.root_dir, None, self.outbuf)
parsed_url = scm.FullUrlForRelativeUrl(url)
else:
parsed_url = url
logging.info(
'Dependency(%s).LateOverride(%s) -> %s' %
(self.name, url, parsed_url))
return parsed_url
if isinstance(url, self.FileImpl):
logging.info(
'Dependency(%s).LateOverride(%s) -> %s (File)' %
(self.name, url, url))
return url
if url is None:
logging.info(
'Dependency(%s).LateOverride(%s) -> %s' % (self.name, url, url))
return url
raise gclient_utils.Error('Unknown url type')
@staticmethod
def MergeWithOsDeps(deps, deps_os, target_os_list):
"""Returns a new "deps" structure that is the deps sent in updated
with information from deps_os (the deps_os section of the DEPS
file) that matches the list of target os."""
os_overrides = {}
for the_target_os in target_os_list:
the_target_os_deps = deps_os.get(the_target_os, {})
for os_dep_key, os_dep_value in the_target_os_deps.iteritems():
overrides = os_overrides.setdefault(os_dep_key, [])
overrides.append((the_target_os, os_dep_value))
# If any os didn't specify a value (we have fewer value entries
# than in the os list), then it wants to use the default value.
for os_dep_key, os_dep_value in os_overrides.iteritems():
if len(os_dep_value) != len(target_os_list):
# Record the default value too so that we don't accidently
# set it to None or miss a conflicting DEPS.
if os_dep_key in deps:
os_dep_value.append(('default', deps[os_dep_key]))
target_os_deps = {}
for os_dep_key, os_dep_value in os_overrides.iteritems():
# os_dep_value is a list of (os, value) pairs.
possible_values = set(x[1] for x in os_dep_value if x[1] is not None)
if not possible_values:
target_os_deps[os_dep_key] = None
else:
if len(possible_values) > 1:
# It would be possible to abort here but it would be
# unfortunate if we end up preventing any kind of checkout.
logging.error('Conflicting dependencies for %s: %s. (target_os=%s)',
os_dep_key, os_dep_value, target_os_list)
# Sorting to get the same result every time in case of conflicts.
target_os_deps[os_dep_key] = sorted(possible_values)[0]
new_deps = deps.copy()
new_deps.update(target_os_deps)
return new_deps
def ParseDepsFile(self):
"""Parses the DEPS file for this dependency."""
assert not self.deps_parsed
assert not self.dependencies
deps_content = None
use_strict = False
# First try to locate the configured deps file. If it's missing, fallback
# to DEPS.
deps_files = [self.deps_file]
if 'DEPS' not in deps_files:
deps_files.append('DEPS')
for deps_file in deps_files:
filepath = os.path.join(self.root.root_dir, self.name, deps_file)
if os.path.isfile(filepath):
logging.info(
'ParseDepsFile(%s): %s file found at %s', self.name, deps_file,
filepath)
break
logging.info(
'ParseDepsFile(%s): No %s file found at %s', self.name, deps_file,
filepath)
if os.path.isfile(filepath):
deps_content = gclient_utils.FileRead(filepath)
logging.debug('ParseDepsFile(%s) read:\n%s', self.name, deps_content)
use_strict = 'use strict' in deps_content.splitlines()[0]
local_scope = {}
if deps_content:
# One thing is unintuitive, vars = {} must happen before Var() use.
var = self.VarImpl(self.custom_vars, local_scope)
if use_strict:
logging.info(
'ParseDepsFile(%s): Strict Mode Enabled', self.name)
global_scope = {
'__builtins__': {'None': None},
'Var': var.Lookup,
'deps_os': {},
}
else:
global_scope = {
'File': self.FileImpl,
'From': self.FromImpl,
'Var': var.Lookup,
'deps_os': {},
}
# Eval the content.
try:
exec(deps_content, global_scope, local_scope)
except SyntaxError as e:
gclient_utils.SyntaxErrorToError(filepath, e)
if use_strict:
for key, val in local_scope.iteritems():
if not isinstance(val, (dict, list, tuple, str)):
raise gclient_utils.Error(
'ParseDepsFile(%s): Strict mode disallows %r -> %r' %
(self.name, key, val))
deps = local_scope.get('deps', {})
if 'recursion' in local_scope:
self.recursion_override = local_scope.get('recursion')
logging.warning(
'Setting %s recursion to %d.', self.name, self.recursion_limit)
self.recursedeps = None
if 'recursedeps' in local_scope:
self.recursedeps = {}
for ent in local_scope['recursedeps']:
if isinstance(ent, basestring):
self.recursedeps[ent] = {"deps_file": self.deps_file}
else: # (depname, depsfilename)
self.recursedeps[ent[0]] = {"deps_file": ent[1]}
logging.warning('Found recursedeps %r.', repr(self.recursedeps))
# If present, save 'target_os' in the local_target_os property.
if 'target_os' in local_scope:
self.local_target_os = local_scope['target_os']
# load os specific dependencies if defined. these dependencies may
# override or extend the values defined by the 'deps' member.
target_os_list = self.target_os
if 'deps_os' in local_scope and target_os_list:
deps = self.MergeWithOsDeps(deps, local_scope['deps_os'], target_os_list)
# If a line is in custom_deps, but not in the solution, we want to append
# this line to the solution.
for d in self.custom_deps:
if d not in deps:
deps[d] = self.custom_deps[d]
# If use_relative_paths is set in the DEPS file, regenerate
# the dictionary using paths relative to the directory containing
# the DEPS file. Also update recursedeps if use_relative_paths is
# enabled.
use_relative_paths = local_scope.get('use_relative_paths', False)
if use_relative_paths:
logging.warning('use_relative_paths enabled.')
rel_deps = {}
for d, url in deps.items():
# normpath is required to allow DEPS to use .. in their
# dependency local path.
rel_deps[os.path.normpath(os.path.join(self.name, d))] = url
logging.warning('Updating deps by prepending %s.', self.name)
deps = rel_deps
# Update recursedeps if it's set.
if self.recursedeps is not None:
logging.warning('Updating recursedeps by prepending %s.', self.name)
rel_deps = {}
for depname, options in self.recursedeps.iteritems():
rel_deps[os.path.normpath(os.path.join(self.name, depname))] = options
self.recursedeps = rel_deps
if 'allowed_hosts' in local_scope:
try:
self._allowed_hosts = frozenset(local_scope.get('allowed_hosts'))
except TypeError: # raised if non-iterable
pass
if not self._allowed_hosts:
logging.warning("allowed_hosts is specified but empty %s",
self._allowed_hosts)
raise gclient_utils.Error(
'ParseDepsFile(%s): allowed_hosts must be absent '
'or a non-empty iterable' % self.name)
# Convert the deps into real Dependency.
deps_to_add = []
for name, url in deps.iteritems():
should_process = self.recursion_limit and self.should_process
deps_file = self.deps_file
if self.recursedeps is not None:
ent = self.recursedeps.get(name)
if ent is not None:
deps_file = ent['deps_file']
deps_to_add.append(Dependency(
self, name, url, None, None, None, self.custom_vars, None,
deps_file, should_process))
deps_to_add.sort(key=lambda x: x.name)
# override named sets of hooks by the custom hooks
hooks_to_run = []
hook_names_to_suppress = [c.get('name', '') for c in self.custom_hooks]
for hook in local_scope.get('hooks', []):
if hook.get('name', '') not in hook_names_to_suppress:
hooks_to_run.append(hook)
# add the replacements and any additions
for hook in self.custom_hooks:
if 'action' in hook:
hooks_to_run.append(hook)
self._pre_deps_hooks = [self.GetHookAction(hook, []) for hook in
local_scope.get('pre_deps_hooks', [])]
self.add_dependencies_and_close(deps_to_add, hooks_to_run)
logging.info('ParseDepsFile(%s) done' % self.name)
def add_dependencies_and_close(self, deps_to_add, hooks):
"""Adds the dependencies, hooks and mark the parsing as done."""
for dep in deps_to_add:
if dep.verify_validity():
self.add_dependency(dep)
self._mark_as_parsed(hooks)
def maybeGetParentRevision(self, command, options, parsed_url, parent):
"""Uses revision/timestamp of parent if no explicit revision was specified.
If we are performing an update and --transitive is set, use
- the parent's revision if 'self.url' is in the same repository
- the parent's timestamp otherwise
to update 'self.url'. The used revision/timestamp will be set in
'options.revision'.
If we have an explicit revision do nothing.
"""
if command == 'update' and options.transitive and not options.revision:
_, revision = gclient_utils.SplitUrlRevision(parsed_url)
if not revision:
options.revision = getattr(parent, '_used_revision', None)
if (options.revision and
not gclient_utils.IsDateRevision(options.revision)):
assert self.parent and self.parent.used_scm
# If this dependency is in the same repository as parent it's url will
# start with a slash. If so we take the parent revision instead of
# it's timestamp.
# (The timestamps of commits in google code are broken -- which can
# result in dependencies to be checked out at the wrong revision)
if self.url.startswith('/'):
if options.verbose:
print('Using parent\'s revision %s since we are in the same '
'repository.' % options.revision)
else:
parent_revision_date = self.parent.used_scm.GetRevisionDate(
options.revision)
options.revision = gclient_utils.MakeDateRevision(
parent_revision_date)
if options.verbose:
print('Using parent\'s revision date %s since we are in a '
'different repository.' % options.revision)
def findDepsFromNotAllowedHosts(self):
"""Returns a list of depenecies from not allowed hosts.
If allowed_hosts is not set, allows all hosts and returns empty list.
"""
if not self._allowed_hosts:
return []
bad_deps = []
for dep in self._dependencies:
# Don't enforce this for custom_deps.
if dep.name in self._custom_deps:
continue
if isinstance(dep.url, basestring):
parsed_url = urlparse.urlparse(dep.url)
if parsed_url.netloc and parsed_url.netloc not in self._allowed_hosts:
bad_deps.append(dep)
return bad_deps
# Arguments number differs from overridden method
# pylint: disable=W0221
def run(self, revision_overrides, command, args, work_queue, options):
"""Runs |command| then parse the DEPS file."""
logging.info('Dependency(%s).run()' % self.name)
assert self._file_list == []
if not self.should_process:
return
# When running runhooks, there's no need to consult the SCM.
# All known hooks are expected to run unconditionally regardless of working
# copy state, so skip the SCM status check.
run_scm = command not in ('runhooks', 'recurse', None)
parsed_url = self.LateOverride(self.url)
file_list = [] if not options.nohooks else None
revision_override = revision_overrides.pop(self.name, None)
if run_scm and parsed_url:
if isinstance(parsed_url, self.FileImpl):
# Special support for single-file checkout.
if not command in (None, 'cleanup', 'diff', 'pack', 'status'):
# Sadly, pylint doesn't realize that parsed_url is of FileImpl.
# pylint: disable=E1103
options.revision = parsed_url.GetRevision()
self._used_scm = gclient_scm.SVNWrapper(
parsed_url.GetPath(), self.root.root_dir, self.name,
out_cb=work_queue.out_cb)
self._used_scm.RunCommand('updatesingle',
options, args + [parsed_url.GetFilename()], file_list)
else:
# Create a shallow copy to mutate revision.
options = copy.copy(options)
options.revision = revision_override
self.maybeGetParentRevision(
command, options, parsed_url, self.parent)
self._used_revision = options.revision
self._used_scm = gclient_scm.CreateSCM(
parsed_url, self.root.root_dir, self.name, self.outbuf,
out_cb=work_queue.out_cb)
self._got_revision = self._used_scm.RunCommand(command, options, args,
file_list)
if file_list:
file_list = [os.path.join(self.name, f.strip()) for f in file_list]
# TODO(phajdan.jr): We should know exactly when the paths are absolute.
# Convert all absolute paths to relative.
for i in range(len(file_list or [])):
# It depends on the command being executed (like runhooks vs sync).
if not os.path.isabs(file_list[i]):
continue
prefix = os.path.commonprefix(
[self.root.root_dir.lower(), file_list[i].lower()])
file_list[i] = file_list[i][len(prefix):]
# Strip any leading path separators.
while file_list[i].startswith(('\\', '/')):
file_list[i] = file_list[i][1:]
# Always parse the DEPS file.
self.ParseDepsFile()
self._run_is_done(file_list or [], parsed_url)
if command in ('update', 'revert') and not options.noprehooks:
self.RunPreDepsHooks()
if self.recursion_limit:
# Parse the dependencies of this dependency.
for s in self.dependencies:
work_queue.enqueue(s)
if command == 'recurse':
if not isinstance(parsed_url, self.FileImpl):
# Skip file only checkout.
scm = gclient_scm.GetScmName(parsed_url)
if not options.scm or scm in options.scm:
cwd = os.path.normpath(os.path.join(self.root.root_dir, self.name))
# Pass in the SCM type as an env variable. Make sure we don't put
# unicode strings in the environment.
env = os.environ.copy()
if scm:
env['GCLIENT_SCM'] = str(scm)
if parsed_url:
env['GCLIENT_URL'] = str(parsed_url)
env['GCLIENT_DEP_PATH'] = str(self.name)
if options.prepend_dir and scm == 'git':
print_stdout = False
def filter_fn(line):
"""Git-specific path marshaling. It is optimized for git-grep."""
def mod_path(git_pathspec):
match = re.match('^(\\S+?:)?([^\0]+)$', git_pathspec)
modified_path = os.path.join(self.name, match.group(2))
branch = match.group(1) or ''
return '%s%s' % (branch, modified_path)
match = re.match('^Binary file ([^\0]+) matches$', line)
if match:
print('Binary file %s matches\n' % mod_path(match.group(1)))
return
items = line.split('\0')
if len(items) == 2 and items[1]:
print('%s : %s' % (mod_path(items[0]), items[1]))
elif len(items) >= 2:
# Multiple null bytes or a single trailing null byte indicate
# git is likely displaying filenames only (such as with -l)
print('\n'.join(mod_path(path) for path in items if path))
else:
print(line)
else:
print_stdout = True
filter_fn = None
if parsed_url is None:
print('Skipped omitted dependency %s' % cwd, file=sys.stderr)
elif os.path.isdir(cwd):
try:
gclient_utils.CheckCallAndFilter(
args, cwd=cwd, env=env, print_stdout=print_stdout,
filter_fn=filter_fn,
)
except subprocess2.CalledProcessError:
if not options.ignore:
raise
else:
print('Skipped missing %s' % cwd, file=sys.stderr)
@gclient_utils.lockedmethod
def _run_is_done(self, file_list, parsed_url):
# Both these are kept for hooks that are run as a separate tree traversal.
self._file_list = file_list
self._parsed_url = parsed_url
self._processed = True
@staticmethod
def GetHookAction(hook_dict, matching_file_list):
"""Turns a parsed 'hook' dict into an executable command."""
logging.debug(hook_dict)
logging.debug(matching_file_list)
command = hook_dict['action'][:]
if command[0] == 'python':
# If the hook specified "python" as the first item, the action is a
# Python script. Run it by starting a new copy of the same
# interpreter.
command[0] = sys.executable
if '$matching_files' in command:
splice_index = command.index('$matching_files')
command[splice_index:splice_index + 1] = matching_file_list
return command
def GetHooks(self, options):
"""Evaluates all hooks, and return them in a flat list.
RunOnDeps() must have been called before to load the DEPS.
"""
result = []
if not self.should_process or not self.recursion_limit:
# Don't run the hook when it is above recursion_limit.
return result
# If "--force" was specified, run all hooks regardless of what files have
# changed.
if self.deps_hooks:
# TODO(maruel): If the user is using git or git-svn, then we don't know
# what files have changed so we always run all hooks. It'd be nice to fix
# that.
if (options.force or
isinstance(self.parsed_url, self.FileImpl) or
gclient_scm.GetScmName(self.parsed_url) in ('git', None) or
os.path.isdir(os.path.join(self.root.root_dir, self.name, '.git'))):
for hook_dict in self.deps_hooks:
result.append(self.GetHookAction(hook_dict, []))
else:
# Run hooks on the basis of whether the files from the gclient operation
# match each hook's pattern.
for hook_dict in self.deps_hooks:
pattern = re.compile(hook_dict['pattern'])
matching_file_list = [
f for f in self.file_list_and_children if pattern.search(f)
]
if matching_file_list:
result.append(self.GetHookAction(hook_dict, matching_file_list))
for s in self.dependencies:
result.extend(s.GetHooks(options))
return result
def RunHooksRecursively(self, options):
assert self.hooks_ran == False
self._hooks_ran = True
for hook in self.GetHooks(options):
try:
start_time = time.time()
gclient_utils.CheckCallAndFilterAndHeader(
hook, cwd=self.root.root_dir, always=True)
except (gclient_utils.Error, subprocess2.CalledProcessError) as e:
# Use a discrete exit status code of 2 to indicate that a hook action
# failed. Users of this script may wish to treat hook action failures
# differently from VC failures.
print('Error: %s' % str(e), file=sys.stderr)
sys.exit(2)
finally:
elapsed_time = time.time() - start_time
if elapsed_time > 10:
print("Hook '%s' took %.2f secs" % (
gclient_utils.CommandToStr(hook), elapsed_time))
def RunPreDepsHooks(self):
assert self.processed
assert self.deps_parsed
assert not self.pre_deps_hooks_ran
assert not self.hooks_ran
for s in self.dependencies:
assert not s.processed
self._pre_deps_hooks_ran = True
for hook in self.pre_deps_hooks:
try:
start_time = time.time()
gclient_utils.CheckCallAndFilterAndHeader(
hook, cwd=self.root.root_dir, always=True)
except (gclient_utils.Error, subprocess2.CalledProcessError) as e:
# Use a discrete exit status code of 2 to indicate that a hook action
# failed. Users of this script may wish to treat hook action failures
# differently from VC failures.
print('Error: %s' % str(e), file=sys.stderr)
sys.exit(2)
finally:
elapsed_time = time.time() - start_time
if elapsed_time > 10:
print("Hook '%s' took %.2f secs" % (
gclient_utils.CommandToStr(hook), elapsed_time))
def subtree(self, include_all):
"""Breadth first recursion excluding root node."""
dependencies = self.dependencies
for d in dependencies:
if d.should_process or include_all:
yield d
for d in dependencies:
for i in d.subtree(include_all):
yield i
def depth_first_tree(self):
"""Depth-first recursion including the root node."""
yield self
for i in self.dependencies:
for j in i.depth_first_tree():
if j.should_process:
yield j
@gclient_utils.lockedmethod
def add_dependency(self, new_dep):
self._dependencies.append(new_dep)
@gclient_utils.lockedmethod
def _mark_as_parsed(self, new_hooks):
self._deps_hooks.extend(new_hooks)
self._deps_parsed = True
@property
@gclient_utils.lockedmethod
def dependencies(self):
return tuple(self._dependencies)
@property
@gclient_utils.lockedmethod
def deps_hooks(self):
return tuple(self._deps_hooks)
@property
@gclient_utils.lockedmethod
def pre_deps_hooks(self):
return tuple(self._pre_deps_hooks)
@property
@gclient_utils.lockedmethod
def parsed_url(self):
return self._parsed_url
@property
@gclient_utils.lockedmethod
def deps_parsed(self):
"""This is purely for debugging purposes. It's not used anywhere."""
return self._deps_parsed
@property
@gclient_utils.lockedmethod
def processed(self):
return self._processed
@property
@gclient_utils.lockedmethod
def pre_deps_hooks_ran(self):
return self._pre_deps_hooks_ran
@property
@gclient_utils.lockedmethod
def hooks_ran(self):
return self._hooks_ran
@property
@gclient_utils.lockedmethod
def allowed_hosts(self):
return self._allowed_hosts
@property
@gclient_utils.lockedmethod
def file_list(self):
return tuple(self._file_list)
@property
def used_scm(self):
"""SCMWrapper instance for this dependency or None if not processed yet."""
return self._used_scm
@property
@gclient_utils.lockedmethod
def got_revision(self):
return self._got_revision
@property
def file_list_and_children(self):
result = list(self.file_list)
for d in self.dependencies:
result.extend(d.file_list_and_children)
return tuple(result)
def __str__(self):
out = []
for i in ('name', 'url', 'parsed_url', 'safesync_url', 'custom_deps',
'custom_vars', 'deps_hooks', 'file_list', 'should_process',
'processed', 'hooks_ran', 'deps_parsed', 'requirements',
'allowed_hosts'):
# First try the native property if it exists.
if hasattr(self, '_' + i):
value = getattr(self, '_' + i, False)
else:
value = getattr(self, i, False)
if value:
out.append('%s: %s' % (i, value))
for d in self.dependencies:
out.extend([' ' + x for x in str(d).splitlines()])
out.append('')
return '\n'.join(out)
def __repr__(self):
return '%s: %s' % (self.name, self.url)
def hierarchy(self):
"""Returns a human-readable hierarchical reference to a Dependency."""
out = '%s(%s)' % (self.name, self.url)
i = self.parent
while i and i.name:
out = '%s(%s) -> %s' % (i.name, i.url, out)
i = i.parent
return out
class GClient(Dependency):
"""Object that represent a gclient checkout. A tree of Dependency(), one per
solution or DEPS entry."""
DEPS_OS_CHOICES = {
"win32": "win",
"win": "win",
"cygwin": "win",
"darwin": "mac",
"mac": "mac",
"unix": "unix",
"linux": "unix",
"linux2": "unix",
"linux3": "unix",
"android": "android",
}
DEFAULT_CLIENT_FILE_TEXT = ("""\
solutions = [
{ "name" : "%(solution_name)s",
"url" : "%(solution_url)s",
"deps_file" : "%(deps_file)s",
"managed" : %(managed)s,
"custom_deps" : {
},
"safesync_url": "%(safesync_url)s",
},
]
cache_dir = %(cache_dir)r
""")
DEFAULT_SNAPSHOT_SOLUTION_TEXT = ("""\
{ "name" : "%(solution_name)s",
"url" : "%(solution_url)s",
"deps_file" : "%(deps_file)s",
"managed" : %(managed)s,
"custom_deps" : {
%(solution_deps)s },
"safesync_url": "%(safesync_url)s",
},
""")
DEFAULT_SNAPSHOT_FILE_TEXT = ("""\
# Snapshot generated with gclient revinfo --snapshot
solutions = [
%(solution_list)s]
""")
def __init__(self, root_dir, options):
# Do not change previous behavior. Only solution level and immediate DEPS
# are processed.
self._recursion_limit = 2
Dependency.__init__(self, None, None, None, None, True, None, None, None,
'unused', True)
self._options = options
if options.deps_os:
enforced_os = options.deps_os.split(',')
else:
enforced_os = [self.DEPS_OS_CHOICES.get(sys.platform, 'unix')]
if 'all' in enforced_os:
enforced_os = self.DEPS_OS_CHOICES.itervalues()
self._enforced_os = tuple(set(enforced_os))
self._root_dir = root_dir
self.config_content = None
def _CheckConfig(self):
"""Verify that the config matches the state of the existing checked-out
solutions."""
for dep in self.dependencies:
if dep.managed and dep.url:
scm = gclient_scm.CreateSCM(
dep.url, self.root_dir, dep.name, self.outbuf)
actual_url = scm.GetActualRemoteURL(self._options)
if actual_url and not scm.DoesRemoteURLMatch(self._options):
mirror = scm.GetCacheMirror()
if mirror:
mirror_string = '%s (exists=%s)' % (mirror.mirror_path,
mirror.exists())
else:
mirror_string = 'not used'
raise gclient_utils.Error('''
Your .gclient file seems to be broken. The requested URL is different from what
is actually checked out in %(checkout_path)s.
The .gclient file contains:
URL: %(expected_url)s (%(expected_scm)s)
Cache mirror: %(mirror_string)s
The local checkout in %(checkout_path)s reports:
%(actual_url)s (%(actual_scm)s)
You should ensure that the URL listed in .gclient is correct and either change
it or fix the checkout. If you're managing your own git checkout in
%(checkout_path)s but the URL in .gclient is for an svn repository, you probably
want to set 'managed': False in .gclient.
''' % {'checkout_path': os.path.join(self.root_dir, dep.name),
'expected_url': dep.url,
'expected_scm': gclient_scm.GetScmName(dep.url),
'mirror_string' : mirror_string,
'actual_url': actual_url,
'actual_scm': gclient_scm.GetScmName(actual_url)})
def SetConfig(self, content):
assert not self.dependencies
config_dict = {}
self.config_content = content
try:
exec(content, config_dict)
except SyntaxError as e:
gclient_utils.SyntaxErrorToError('.gclient', e)
# Append any target OS that is not already being enforced to the tuple.
target_os = config_dict.get('target_os', [])
if config_dict.get('target_os_only', False):
self._enforced_os = tuple(set(target_os))
else:
self._enforced_os = tuple(set(self._enforced_os).union(target_os))
cache_dir = config_dict.get('cache_dir')
if cache_dir:
cache_dir = os.path.join(self.root_dir, cache_dir)
cache_dir = os.path.abspath(cache_dir)
# If running on a bot, force break any stale git cache locks.
if os.path.exists(cache_dir) and os.environ.get('CHROME_HEADLESS'):
subprocess2.check_call(['git', 'cache', 'unlock', '--cache-dir',
cache_dir, '--force', '--all'])
gclient_scm.GitWrapper.cache_dir = cache_dir
git_cache.Mirror.SetCachePath(cache_dir)
if not target_os and config_dict.get('target_os_only', False):
raise gclient_utils.Error('Can\'t use target_os_only if target_os is '
'not specified')
deps_to_add = []
for s in config_dict.get('solutions', []):
try:
deps_to_add.append(Dependency(
self, s['name'], s['url'],
s.get('safesync_url', None),
s.get('managed', True),
s.get('custom_deps', {}),
s.get('custom_vars', {}),
s.get('custom_hooks', []),
s.get('deps_file', 'DEPS'),
True))
except KeyError:
raise gclient_utils.Error('Invalid .gclient file. Solution is '
'incomplete: %s' % s)
self.add_dependencies_and_close(deps_to_add, config_dict.get('hooks', []))
logging.info('SetConfig() done')
def SaveConfig(self):
gclient_utils.FileWrite(os.path.join(self.root_dir,
self._options.config_filename),
self.config_content)
def MigrateConfigToGit(self, path, options):
svn_url_re = re.compile('^(https?://src\.chromium\.org/svn|'
'svn://svn\.chromium\.org/chrome)/'
'(trunk|branches/[^/]+)/src')
old_git_re = re.compile('^(https?://git\.chromium\.org|'
'ssh://([a-zA-Z_][a-zA-Z0-9_-]*@)?'
'gerrit\.chromium\.org(:2941[89])?)/'
'chromium/src\.git')
# Scan existing .gclient file for obsolete settings. It would be simpler
# to traverse self.dependencies, but working with the AST allows the code to
# dump an updated .gclient file that preserves the ordering of the original.
a = ast.parse(self.config_content, options.config_filename, 'exec')
modified = False
solutions = [elem for elem in a.body if 'solutions' in
[target.id for target in elem.targets]]
if not solutions:
return self
solutions = solutions[-1]
for solution in solutions.value.elts:
# Check for obsolete URL's
url_idx = ast_dict_index(solution, 'url')
if url_idx == -1:
continue
url_val = solution.values[url_idx]
if type(url_val) is not ast.Str:
continue
if (svn_url_re.match(url_val.s.strip())):
raise gclient_utils.Error(
"""
The chromium code repository has migrated completely to git.
Your SVN-based checkout is now obsolete; you need to create a brand-new
git checkout by following these instructions:
http://www.chromium.org/developers/how-tos/get-the-code
""")
if (old_git_re.match(url_val.s.strip())):
url_val.s = CHROMIUM_SRC_URL
modified = True
# Ensure deps_file is set to .DEPS.git. We enforce this here to smooth
# over switching between pre-git-migration and post-git-migration
# revisions.
# - For pre-migration revisions, .DEPS.git must be explicitly set.
# - For post-migration revisions, .DEPS.git is not present, so gclient
# will correctly fall back to DEPS.
if url_val.s == CHROMIUM_SRC_URL:
deps_file_idx = ast_dict_index(solution, 'deps_file')
if deps_file_idx != -1:
continue
solution.keys.append(ast.Str('deps_file'))
solution.values.append(ast.Str('.DEPS.git'))
modified = True
if not modified:
return self
print(
"""
WARNING: gclient detected an obsolete setting in your %s file. The file has
been automagically updated. The previous version is available at %s.old.
""" % (options.config_filename, options.config_filename))
# Replace existing .gclient with the updated version.
# Return a new GClient instance based on the new content.
new_content = ast2str(a)
dot_gclient_fn = os.path.join(path, options.config_filename)
try:
os.rename(dot_gclient_fn, dot_gclient_fn + '.old')
except OSError:
pass
with open(dot_gclient_fn, 'w') as fh:
fh.write(new_content)
client = GClient(path, options)
client.SetConfig(new_content)
return client
@staticmethod
def LoadCurrentConfig(options):
"""Searches for and loads a .gclient file relative to the current working
dir. Returns a GClient object."""
if options.spec:
client = GClient('.', options)
client.SetConfig(options.spec)
else:
if options.verbose:
print('Looking for %s starting from %s\n' % (
options.config_filename, os.getcwd()))
path = gclient_utils.FindGclientRoot(os.getcwd(), options.config_filename)
if not path:
return None
client = GClient(path, options)
client.SetConfig(gclient_utils.FileRead(
os.path.join(path, options.config_filename)))
client = client.MigrateConfigToGit(path, options)
if (options.revisions and
len(client.dependencies) > 1 and
any('@' not in r for r in options.revisions)):
print(
('You must specify the full solution name like --revision %s@%s\n'
'when you have multiple solutions setup in your .gclient file.\n'
'Other solutions present are: %s.') % (
client.dependencies[0].name,
options.revisions[0],
', '.join(s.name for s in client.dependencies[1:])),
file=sys.stderr)
return client
def SetDefaultConfig(self, solution_name, deps_file, solution_url,
safesync_url, managed=True, cache_dir=None):
self.SetConfig(self.DEFAULT_CLIENT_FILE_TEXT % {
'solution_name': solution_name,
'solution_url': solution_url,
'deps_file': deps_file,
'safesync_url' : safesync_url,
'managed': managed,
'cache_dir': cache_dir,
})
def _SaveEntries(self):
"""Creates a .gclient_entries file to record the list of unique checkouts.
The .gclient_entries file lives in the same directory as .gclient.
"""
# Sometimes pprint.pformat will use {', sometimes it'll use { ' ... It
# makes testing a bit too fun.
result = 'entries = {\n'
for entry in self.root.subtree(False):
# Skip over File() dependencies as we can't version them.
if not isinstance(entry.parsed_url, self.FileImpl):
result += ' %s: %s,\n' % (pprint.pformat(entry.name),
pprint.pformat(entry.parsed_url))
result += '}\n'
file_path = os.path.join(self.root_dir, self._options.entries_filename)
logging.debug(result)
gclient_utils.FileWrite(file_path, result)
def _ReadEntries(self):
"""Read the .gclient_entries file for the given client.
Returns:
A sequence of solution names, which will be empty if there is the
entries file hasn't been created yet.
"""
scope = {}
filename = os.path.join(self.root_dir, self._options.entries_filename)
if not os.path.exists(filename):
return {}
try:
exec(gclient_utils.FileRead(filename), scope)
except SyntaxError as e:
gclient_utils.SyntaxErrorToError(filename, e)
return scope['entries']
def _EnforceRevisions(self):
"""Checks for revision overrides."""
revision_overrides = {}
if self._options.head:
return revision_overrides
# Do not check safesync_url if one or more --revision flag is specified.
if not self._options.revisions:
for s in self.dependencies:
if not s.managed:
self._options.revisions.append('%s@unmanaged' % s.name)
elif s.safesync_url:
self._ApplySafeSyncRev(dep=s)
if not self._options.revisions:
return revision_overrides
solutions_names = [s.name for s in self.dependencies]
index = 0
for revision in self._options.revisions:
if not '@' in revision:
# Support for --revision 123
revision = '%s@%s' % (solutions_names[index], revision)
name, rev = revision.split('@', 1)
revision_overrides[name] = rev
index += 1
return revision_overrides
def _ApplySafeSyncRev(self, dep):
"""Finds a valid revision from the content of the safesync_url and apply it
by appending revisions to the revision list. Throws if revision appears to
be invalid for the given |dep|."""
assert len(dep.safesync_url) > 0
handle = urllib.urlopen(dep.safesync_url)
rev = handle.read().strip()
handle.close()
if not rev:
raise gclient_utils.Error(
'It appears your safesync_url (%s) is not working properly\n'
'(as it returned an empty response). Check your config.' %
dep.safesync_url)
scm = gclient_scm.CreateSCM(
dep.url, dep.root.root_dir, dep.name, self.outbuf)
safe_rev = scm.GetUsableRev(rev, self._options)
if self._options.verbose:
print('Using safesync_url revision: %s.\n' % safe_rev)
self._options.revisions.append('%s@%s' % (dep.name, safe_rev))
def RunOnDeps(self, command, args, ignore_requirements=False, progress=True):
"""Runs a command on each dependency in a client and its dependencies.
Args:
command: The command to use (e.g., 'status' or 'diff')
args: list of str - extra arguments to add to the command line.
"""
if not self.dependencies:
raise gclient_utils.Error('No solution specified')
revision_overrides = {}
# It's unnecessary to check for revision overrides for 'recurse'.
# Save a few seconds by not calling _EnforceRevisions() in that case.
if command not in ('diff', 'recurse', 'runhooks', 'status', 'revert'):
self._CheckConfig()
revision_overrides = self._EnforceRevisions()
pm = None
# Disable progress for non-tty stdout.
if (setup_color.IS_TTY and not self._options.verbose and progress):
if command in ('update', 'revert'):
pm = Progress('Syncing projects', 1)
elif command == 'recurse':
pm = Progress(' '.join(args), 1)
work_queue = gclient_utils.ExecutionQueue(
self._options.jobs, pm, ignore_requirements=ignore_requirements,
verbose=self._options.verbose)
for s in self.dependencies:
work_queue.enqueue(s)
work_queue.flush(revision_overrides, command, args, options=self._options)
if revision_overrides:
print('Please fix your script, having invalid --revision flags will soon '
'considered an error.', file=sys.stderr)
# Once all the dependencies have been processed, it's now safe to run the
# hooks.
if not self._options.nohooks:
self.RunHooksRecursively(self._options)
if command == 'update':
# Notify the user if there is an orphaned entry in their working copy.
# Only delete the directory if there are no changes in it, and
# delete_unversioned_trees is set to true.
entries = [i.name for i in self.root.subtree(False) if i.url]
full_entries = [os.path.join(self.root_dir, e.replace('/', os.path.sep))
for e in entries]
for entry, prev_url in self._ReadEntries().iteritems():
if not prev_url:
# entry must have been overridden via .gclient custom_deps
continue
# Fix path separator on Windows.
entry_fixed = entry.replace('/', os.path.sep)
e_dir = os.path.join(self.root_dir, entry_fixed)
# Use entry and not entry_fixed there.
if (entry not in entries and
(not any(path.startswith(entry + '/') for path in entries)) and
os.path.exists(e_dir)):
# The entry has been removed from DEPS.
scm = gclient_scm.CreateSCM(
prev_url, self.root_dir, entry_fixed, self.outbuf)
# Check to see if this directory is now part of a higher-up checkout.
# The directory might be part of a git OR svn checkout.
scm_root = None
scm_class = None
for scm_class in (gclient_scm.scm.GIT, gclient_scm.scm.SVN):
try:
scm_root = scm_class.GetCheckoutRoot(scm.checkout_path)
except subprocess2.CalledProcessError:
pass
if scm_root:
break
else:
logging.warning('Could not find checkout root for %s. Unable to '
'determine whether it is part of a higher-level '
'checkout, so not removing.' % entry)
continue
# This is to handle the case of third_party/WebKit migrating from
# being a DEPS entry to being part of the main project.
# If the subproject is a Git project, we need to remove its .git
# folder. Otherwise git operations on that folder will have different
# effects depending on the current working directory.
if scm_class == gclient_scm.scm.GIT and (
os.path.abspath(scm_root) == os.path.abspath(e_dir)):
e_par_dir = os.path.join(e_dir, os.pardir)
if scm_class.IsInsideWorkTree(e_par_dir):
par_scm_root = scm_class.GetCheckoutRoot(e_par_dir)
# rel_e_dir : relative path of entry w.r.t. its parent repo.
rel_e_dir = os.path.relpath(e_dir, par_scm_root)
if scm_class.IsDirectoryVersioned(par_scm_root, rel_e_dir):
save_dir = scm.GetGitBackupDirPath()
# Remove any eventual stale backup dir for the same project.
if os.path.exists(save_dir):
gclient_utils.rmtree(save_dir)
os.rename(os.path.join(e_dir, '.git'), save_dir)
# When switching between the two states (entry/ is a subproject
# -> entry/ is part of the outer project), it is very likely
# that some files are changed in the checkout, unless we are
# jumping *exactly* across the commit which changed just DEPS.
# In such case we want to cleanup any eventual stale files
# (coming from the old subproject) in order to end up with a
# clean checkout.
scm_class.CleanupDir(par_scm_root, rel_e_dir)
assert not os.path.exists(os.path.join(e_dir, '.git'))
print(('\nWARNING: \'%s\' has been moved from DEPS to a higher '
'level checkout. The git folder containing all the local'
' branches has been saved to %s.\n'
'If you don\'t care about its state you can safely '
'remove that folder to free up space.') %
(entry, save_dir))
continue
if scm_root in full_entries:
logging.info('%s is part of a higher level checkout, not removing',
scm.GetCheckoutRoot())
continue
file_list = []
scm.status(self._options, [], file_list)
modified_files = file_list != []
if (not self._options.delete_unversioned_trees or
(modified_files and not self._options.force)):
# There are modified files in this entry. Keep warning until
# removed.
print(('\nWARNING: \'%s\' is no longer part of this client. '
'It is recommended that you manually remove it.\n') %
entry_fixed)
else:
# Delete the entry
print('\n________ deleting \'%s\' in \'%s\'' % (
entry_fixed, self.root_dir))
gclient_utils.rmtree(e_dir)
# record the current list of entries for next time
self._SaveEntries()
return 0
def PrintRevInfo(self):
if not self.dependencies:
raise gclient_utils.Error('No solution specified')
# Load all the settings.
work_queue = gclient_utils.ExecutionQueue(
self._options.jobs, None, False, verbose=self._options.verbose)
for s in self.dependencies:
work_queue.enqueue(s)
work_queue.flush({}, None, [], options=self._options)
def GetURLAndRev(dep):
"""Returns the revision-qualified SCM url for a Dependency."""
if dep.parsed_url is None:
return None
if isinstance(dep.parsed_url, self.FileImpl):
original_url = dep.parsed_url.file_location
else:
original_url = dep.parsed_url
url, _ = gclient_utils.SplitUrlRevision(original_url)
scm = gclient_scm.CreateSCM(
original_url, self.root_dir, dep.name, self.outbuf)
if not os.path.isdir(scm.checkout_path):
return None
return '%s@%s' % (url, scm.revinfo(self._options, [], None))
if self._options.snapshot:
new_gclient = ''
# First level at .gclient
for d in self.dependencies:
entries = {}
def GrabDeps(dep):
"""Recursively grab dependencies."""
for d in dep.dependencies:
entries[d.name] = GetURLAndRev(d)
GrabDeps(d)
GrabDeps(d)
custom_deps = []
for k in sorted(entries.keys()):
if entries[k]:
# Quotes aren't escaped...
custom_deps.append(' \"%s\": \'%s\',\n' % (k, entries[k]))
else:
custom_deps.append(' \"%s\": None,\n' % k)
new_gclient += self.DEFAULT_SNAPSHOT_SOLUTION_TEXT % {
'solution_name': d.name,
'solution_url': d.url,
'deps_file': d.deps_file,
'safesync_url' : d.safesync_url or '',
'managed': d.managed,
'solution_deps': ''.join(custom_deps),
}
# Print the snapshot configuration file
print(self.DEFAULT_SNAPSHOT_FILE_TEXT % {'solution_list': new_gclient})
else:
entries = {}
for d in self.root.subtree(False):
if self._options.actual:
entries[d.name] = GetURLAndRev(d)
else:
entries[d.name] = d.parsed_url
keys = sorted(entries.keys())
for x in keys:
print('%s: %s' % (x, entries[x]))
logging.info(str(self))
def ParseDepsFile(self):
"""No DEPS to parse for a .gclient file."""
raise gclient_utils.Error('Internal error')
def PrintLocationAndContents(self):
# Print out the .gclient file. This is longer than if we just printed the
# client dict, but more legible, and it might contain helpful comments.
print('Loaded .gclient config in %s:\n%s' % (
self.root_dir, self.config_content))
@property
def root_dir(self):
"""Root directory of gclient checkout."""
return self._root_dir
@property
def enforced_os(self):
"""What deps_os entries that are to be parsed."""
return self._enforced_os
@property
def recursion_limit(self):
"""How recursive can each dependencies in DEPS file can load DEPS file."""
return self._recursion_limit
@property
def try_recursedeps(self):
"""Whether to attempt using recursedeps-style recursion processing."""
return True
@property
def target_os(self):
return self._enforced_os
#### gclient commands.
def CMDcleanup(parser, args):
"""Cleans up all working copies.
Mostly svn-specific. Simply runs 'svn cleanup' for each module.
"""
parser.add_option('--deps', dest='deps_os', metavar='OS_LIST',
help='override deps for the specified (comma-separated) '
'platform(s); \'all\' will process all deps_os '
'references')
(options, args) = parser.parse_args(args)
client = GClient.LoadCurrentConfig(options)
if not client:
raise gclient_utils.Error('client not configured; see \'gclient config\'')
if options.verbose:
client.PrintLocationAndContents()
return client.RunOnDeps('cleanup', args)
@subcommand.usage('[command] [args ...]')
def CMDrecurse(parser, args):
"""Operates [command args ...] on all the dependencies.
Runs a shell command on all entries.
Sets GCLIENT_DEP_PATH enviroment variable as the dep's relative location to
root directory of the checkout.
"""
# Stop parsing at the first non-arg so that these go through to the command
parser.disable_interspersed_args()
parser.add_option('-s', '--scm', action='append', default=[],
help='Choose scm types to operate upon.')
parser.add_option('-i', '--ignore', action='store_true',
help='Ignore non-zero return codes from subcommands.')
parser.add_option('--prepend-dir', action='store_true',
help='Prepend relative dir for use with git <cmd> --null.')
parser.add_option('--no-progress', action='store_true',
help='Disable progress bar that shows sub-command updates')
options, args = parser.parse_args(args)
if not args:
print('Need to supply a command!', file=sys.stderr)
return 1
root_and_entries = gclient_utils.GetGClientRootAndEntries()
if not root_and_entries:
print(
'You need to run gclient sync at least once to use \'recurse\'.\n'
'This is because .gclient_entries needs to exist and be up to date.',
file=sys.stderr)
return 1
# Normalize options.scm to a set()
scm_set = set()
for scm in options.scm:
scm_set.update(scm.split(','))
options.scm = scm_set
options.nohooks = True
client = GClient.LoadCurrentConfig(options)
return client.RunOnDeps('recurse', args, ignore_requirements=True,
progress=not options.no_progress)
@subcommand.usage('[args ...]')
def CMDfetch(parser, args):
"""Fetches upstream commits for all modules.
Completely git-specific. Simply runs 'git fetch [args ...]' for each module.
"""
(options, args) = parser.parse_args(args)
return CMDrecurse(OptionParser(), [
'--jobs=%d' % options.jobs, '--scm=git', 'git', 'fetch'] + args)
def CMDgrep(parser, args):
"""Greps through git repos managed by gclient.
Runs 'git grep [args...]' for each module.
"""
# We can't use optparse because it will try to parse arguments sent
# to git grep and throw an error. :-(
if not args or re.match('(-h|--help)$', args[0]):
print(
'Usage: gclient grep [-j <N>] git-grep-args...\n\n'
'Example: "gclient grep -j10 -A2 RefCountedBase" runs\n"git grep '
'-A2 RefCountedBase" on each of gclient\'s git\nrepos with up to '
'10 jobs.\n\nBonus: page output by appending "|& less -FRSX" to the'
' end of your query.',
file=sys.stderr)
return 1
jobs_arg = ['--jobs=1']
if re.match(r'(-j|--jobs=)\d+$', args[0]):
jobs_arg, args = args[:1], args[1:]
elif re.match(r'(-j|--jobs)$', args[0]):
jobs_arg, args = args[:2], args[2:]
return CMDrecurse(
parser,
jobs_arg + ['--ignore', '--prepend-dir', '--no-progress', '--scm=git',
'git', 'grep', '--null', '--color=Always'] + args)
def CMDroot(parser, args):
"""Outputs the solution root (or current dir if there isn't one)."""
(options, args) = parser.parse_args(args)
client = GClient.LoadCurrentConfig(options)
if client:
print(os.path.abspath(client.root_dir))
else:
print(os.path.abspath('.'))
@subcommand.usage('[url] [safesync url]')
def CMDconfig(parser, args):
"""Creates a .gclient file in the current directory.
This specifies the configuration for further commands. After update/sync,
top-level DEPS files in each module are read to determine dependent
modules to operate on as well. If optional [url] parameter is
provided, then configuration is read from a specified Subversion server
URL.
"""
# We do a little dance with the --gclientfile option. 'gclient config' is the
# only command where it's acceptable to have both '--gclientfile' and '--spec'
# arguments. So, we temporarily stash any --gclientfile parameter into
# options.output_config_file until after the (gclientfile xor spec) error
# check.
parser.remove_option('--gclientfile')
parser.add_option('--gclientfile', dest='output_config_file',
help='Specify an alternate .gclient file')
parser.add_option('--name',
help='overrides the default name for the solution')
parser.add_option('--deps-file', default='DEPS',
help='overrides the default name for the DEPS file for the'
'main solutions and all sub-dependencies')
parser.add_option('--unmanaged', action='store_true', default=False,
help='overrides the default behavior to make it possible '
'to have the main solution untouched by gclient '
'(gclient will check out unmanaged dependencies but '
'will never sync them)')
parser.add_option('--cache-dir',
help='(git only) Cache all git repos into this dir and do '
'shared clones from the cache, instead of cloning '
'directly from the remote. (experimental)')
parser.set_defaults(config_filename=None)
(options, args) = parser.parse_args(args)
if options.output_config_file:
setattr(options, 'config_filename', getattr(options, 'output_config_file'))
if ((options.spec and args) or len(args) > 2 or
(not options.spec and not args)):
parser.error('Inconsistent arguments. Use either --spec or one or 2 args')
client = GClient('.', options)
if options.spec:
client.SetConfig(options.spec)
else:
base_url = args[0].rstrip('/')
if not options.name:
name = base_url.split('/')[-1]
if name.endswith('.git'):
name = name[:-4]
else:
# specify an alternate relpath for the given URL.
name = options.name
if not os.path.abspath(os.path.join(os.getcwd(), name)).startswith(
os.getcwd()):
parser.error('Do not pass a relative path for --name.')
if any(x in ('..', '.', '/', '\\') for x in name.split(os.sep)):
parser.error('Do not include relative path components in --name.')
deps_file = options.deps_file
safesync_url = ''
if len(args) > 1:
safesync_url = args[1]
client.SetDefaultConfig(name, deps_file, base_url, safesync_url,
managed=not options.unmanaged,
cache_dir=options.cache_dir)
client.SaveConfig()
return 0
@subcommand.epilog("""Example:
gclient pack > patch.txt
generate simple patch for configured client and dependences
""")
def CMDpack(parser, args):
"""Generates a patch which can be applied at the root of the tree.
Internally, runs 'svn diff'/'git diff' on each checked out module and
dependencies, and performs minimal postprocessing of the output. The
resulting patch is printed to stdout and can be applied to a freshly
checked out tree via 'patch -p0 < patchfile'.
"""
parser.add_option('--deps', dest='deps_os', metavar='OS_LIST',
help='override deps for the specified (comma-separated) '
'platform(s); \'all\' will process all deps_os '
'references')
parser.remove_option('--jobs')
(options, args) = parser.parse_args(args)
# Force jobs to 1 so the stdout is not annotated with the thread ids
options.jobs = 1
client = GClient.LoadCurrentConfig(options)
if not client:
raise gclient_utils.Error('client not configured; see \'gclient config\'')
if options.verbose:
client.PrintLocationAndContents()
return client.RunOnDeps('pack', args)
def CMDstatus(parser, args):
"""Shows modification status for every dependencies."""
parser.add_option('--deps', dest='deps_os', metavar='OS_LIST',
help='override deps for the specified (comma-separated) '
'platform(s); \'all\' will process all deps_os '
'references')
(options, args) = parser.parse_args(args)
client = GClient.LoadCurrentConfig(options)
if not client:
raise gclient_utils.Error('client not configured; see \'gclient config\'')
if options.verbose:
client.PrintLocationAndContents()
return client.RunOnDeps('status', args)
@subcommand.epilog("""Examples:
gclient sync
update files from SCM according to current configuration,
*for modules which have changed since last update or sync*
gclient sync --force
update files from SCM according to current configuration, for
all modules (useful for recovering files deleted from local copy)
gclient sync --revision src@31000
update src directory to r31000
JSON output format:
If the --output-json option is specified, the following document structure will
be emitted to the provided file. 'null' entries may occur for subprojects which
are present in the gclient solution, but were not processed (due to custom_deps,
os_deps, etc.)
{
"solutions" : {
"<name>": { # <name> is the posix-normalized path to the solution.
"revision": [<svn rev int>|<git id hex string>|null],
"scm": ["svn"|"git"|null],
}
}
}
""")
def CMDsync(parser, args):
"""Checkout/update all modules."""
parser.add_option('-f', '--force', action='store_true',
help='force update even for unchanged modules')
parser.add_option('-n', '--nohooks', action='store_true',
help='don\'t run hooks after the update is complete')
parser.add_option('-p', '--noprehooks', action='store_true',
help='don\'t run pre-DEPS hooks', default=False)
parser.add_option('-r', '--revision', action='append',
dest='revisions', metavar='REV', default=[],
help='Enforces revision/hash for the solutions with the '
'format src@rev. The src@ part is optional and can be '
'skipped. -r can be used multiple times when .gclient '
'has multiple solutions configured and will work even '
'if the src@ part is skipped. Note that specifying '
'--revision means your safesync_url gets ignored.')
parser.add_option('--with_branch_heads', action='store_true',
help='Clone git "branch_heads" refspecs in addition to '
'the default refspecs. This adds about 1/2GB to a '
'full checkout. (git only)')
parser.add_option('--with_tags', action='store_true',
help='Clone git tags in addition to the default refspecs.')
parser.add_option('-t', '--transitive', action='store_true',
help='When a revision is specified (in the DEPS file or '
'with the command-line flag), transitively update '
'the dependencies to the date of the given revision. '
'Only supported for SVN repositories.')
parser.add_option('-H', '--head', action='store_true',
help='skips any safesync_urls specified in '
'configured solutions and sync to head instead')
parser.add_option('-D', '--delete_unversioned_trees', action='store_true',
help='Deletes from the working copy any dependencies that '
'have been removed since the last sync, as long as '
'there are no local modifications. When used with '
'--force, such dependencies are removed even if they '
'have local modifications. When used with --reset, '
'all untracked directories are removed from the '
'working copy, excluding those which are explicitly '
'ignored in the repository.')
parser.add_option('-R', '--reset', action='store_true',
help='resets any local changes before updating (git only)')
parser.add_option('-M', '--merge', action='store_true',
help='merge upstream changes instead of trying to '
'fast-forward or rebase')
parser.add_option('-A', '--auto_rebase', action='store_true',
help='Automatically rebase repositories against local '
'checkout during update (git only).')
parser.add_option('--deps', dest='deps_os', metavar='OS_LIST',
help='override deps for the specified (comma-separated) '
'platform(s); \'all\' will process all deps_os '
'references')
parser.add_option('-m', '--manually_grab_svn_rev', action='store_true',
help='Skip svn up whenever possible by requesting '
'actual HEAD revision from the repository')
parser.add_option('--upstream', action='store_true',
help='Make repo state match upstream branch.')
parser.add_option('--output-json',
help='Output a json document to this path containing '
'summary information about the sync.')
parser.add_option('--no-history', action='store_true',
help='GIT ONLY - Reduces the size/time of the checkout at '
'the cost of no history. Requires Git 1.9+')
parser.add_option('--shallow', action='store_true',
help='GIT ONLY - Do a shallow clone into the cache dir. '
'Requires Git 1.9+')
parser.add_option('--no_bootstrap', '--no-bootstrap',
action='store_true',
help='Don\'t bootstrap from Google Storage.')
parser.add_option('--ignore_locks', action='store_true',
help='GIT ONLY - Ignore cache locks.')
parser.add_option('--break_repo_locks', action='store_true',
help='GIT ONLY - Forcibly remove repo locks (e.g. '
'index.lock). This should only be used if you know for '
'certain that this invocation of gclient is the only '
'thing operating on the git repos (e.g. on a bot).')
parser.add_option('--lock_timeout', type='int', default=5000,
help='GIT ONLY - Deadline (in seconds) to wait for git '
'cache lock to become available. Default is %default.')
(options, args) = parser.parse_args(args)
client = GClient.LoadCurrentConfig(options)
if not client:
raise gclient_utils.Error('client not configured; see \'gclient config\'')
if options.revisions and options.head:
# TODO(maruel): Make it a parser.error if it doesn't break any builder.
print('Warning: you cannot use both --head and --revision')
if options.verbose:
client.PrintLocationAndContents()
ret = client.RunOnDeps('update', args)
if options.output_json:
slns = {}
for d in client.subtree(True):
normed = d.name.replace('\\', '/').rstrip('/') + '/'
slns[normed] = {
'revision': d.got_revision,
'scm': d.used_scm.name if d.used_scm else None,
'url': str(d.url) if d.url else None,
}
with open(options.output_json, 'wb') as f:
json.dump({'solutions': slns}, f)
return ret
CMDupdate = CMDsync
def CMDdiff(parser, args):
"""Displays local diff for every dependencies."""
parser.add_option('--deps', dest='deps_os', metavar='OS_LIST',
help='override deps for the specified (comma-separated) '
'platform(s); \'all\' will process all deps_os '
'references')
(options, args) = parser.parse_args(args)
client = GClient.LoadCurrentConfig(options)
if not client:
raise gclient_utils.Error('client not configured; see \'gclient config\'')
if options.verbose:
client.PrintLocationAndContents()
return client.RunOnDeps('diff', args)
def CMDrevert(parser, args):
"""Reverts all modifications in every dependencies.
That's the nuclear option to get back to a 'clean' state. It removes anything
that shows up in svn status."""
parser.add_option('--deps', dest='deps_os', metavar='OS_LIST',
help='override deps for the specified (comma-separated) '
'platform(s); \'all\' will process all deps_os '
'references')
parser.add_option('-n', '--nohooks', action='store_true',
help='don\'t run hooks after the revert is complete')
parser.add_option('-p', '--noprehooks', action='store_true',
help='don\'t run pre-DEPS hooks', default=False)
parser.add_option('--upstream', action='store_true',
help='Make repo state match upstream branch.')
parser.add_option('--break_repo_locks', action='store_true',
help='GIT ONLY - Forcibly remove repo locks (e.g. '
'index.lock). This should only be used if you know for '
'certain that this invocation of gclient is the only '
'thing operating on the git repos (e.g. on a bot).')
(options, args) = parser.parse_args(args)
# --force is implied.
options.force = True
options.reset = False
options.delete_unversioned_trees = False
options.merge = False
client = GClient.LoadCurrentConfig(options)
if not client:
raise gclient_utils.Error('client not configured; see \'gclient config\'')
return client.RunOnDeps('revert', args)
def CMDrunhooks(parser, args):
"""Runs hooks for files that have been modified in the local working copy."""
parser.add_option('--deps', dest='deps_os', metavar='OS_LIST',
help='override deps for the specified (comma-separated) '
'platform(s); \'all\' will process all deps_os '
'references')
parser.add_option('-f', '--force', action='store_true', default=True,
help='Deprecated. No effect.')
(options, args) = parser.parse_args(args)
client = GClient.LoadCurrentConfig(options)
if not client:
raise gclient_utils.Error('client not configured; see \'gclient config\'')
if options.verbose:
client.PrintLocationAndContents()
options.force = True
options.nohooks = False
return client.RunOnDeps('runhooks', args)
def CMDrevinfo(parser, args):
"""Outputs revision info mapping for the client and its dependencies.
This allows the capture of an overall 'revision' for the source tree that
can be used to reproduce the same tree in the future. It is only useful for
'unpinned dependencies', i.e. DEPS/deps references without a svn revision
number or a git hash. A git branch name isn't 'pinned' since the actual
commit can change.
"""
parser.add_option('--deps', dest='deps_os', metavar='OS_LIST',
help='override deps for the specified (comma-separated) '
'platform(s); \'all\' will process all deps_os '
'references')
parser.add_option('-a', '--actual', action='store_true',
help='gets the actual checked out revisions instead of the '
'ones specified in the DEPS and .gclient files')
parser.add_option('-s', '--snapshot', action='store_true',
help='creates a snapshot .gclient file of the current '
'version of all repositories to reproduce the tree, '
'implies -a')
(options, args) = parser.parse_args(args)
client = GClient.LoadCurrentConfig(options)
if not client:
raise gclient_utils.Error('client not configured; see \'gclient config\'')
client.PrintRevInfo()
return 0
def CMDhookinfo(parser, args):
"""Outputs the hooks that would be run by `gclient runhooks`."""
(options, args) = parser.parse_args(args)
options.force = True
client = GClient.LoadCurrentConfig(options)
if not client:
raise gclient_utils.Error('client not configured; see \'gclient config\'')
client.RunOnDeps(None, [])
print('; '.join(' '.join(hook) for hook in client.GetHooks(options)))
return 0
def CMDverify(parser, args):
"""Verifies the DEPS file deps are only from allowed_hosts."""
(options, args) = parser.parse_args(args)
client = GClient.LoadCurrentConfig(options)
if not client:
raise gclient_utils.Error('client not configured; see \'gclient config\'')
client.RunOnDeps(None, [])
# Look at each first-level dependency of this gclient only.
for dep in client.dependencies:
bad_deps = dep.findDepsFromNotAllowedHosts()
if not bad_deps:
continue
print("There are deps from not allowed hosts in file %s" % dep.deps_file)
for bad_dep in bad_deps:
print("\t%s at %s" % (bad_dep.name, bad_dep.url))
print("allowed_hosts:", ', '.join(dep.allowed_hosts))
sys.stdout.flush()
raise gclient_utils.Error(
'dependencies from disallowed hosts; check your DEPS file.')
return 0
class OptionParser(optparse.OptionParser):
gclientfile_default = os.environ.get('GCLIENT_FILE', '.gclient')
def __init__(self, **kwargs):
optparse.OptionParser.__init__(
self, version='%prog ' + __version__, **kwargs)
# Some arm boards have issues with parallel sync.
if platform.machine().startswith('arm'):
jobs = 1
else:
jobs = max(8, gclient_utils.NumLocalCpus())
# cmp: 2013/06/19
# Temporary workaround to lower bot-load on SVN server.
# Bypassed if a bot_update flag is detected.
if (os.environ.get('CHROME_HEADLESS') == '1' and
not os.path.exists('update.flag')):
jobs = 1
self.add_option(
'-j', '--jobs', default=jobs, type='int',
help='Specify how many SCM commands can run in parallel; defaults to '
'%default on this machine')
self.add_option(
'-v', '--verbose', action='count', default=0,
help='Produces additional output for diagnostics. Can be used up to '
'three times for more logging info.')
self.add_option(
'--gclientfile', dest='config_filename',
help='Specify an alternate %s file' % self.gclientfile_default)
self.add_option(
'--spec',
help='create a gclient file containing the provided string. Due to '
'Cygwin/Python brokenness, it can\'t contain any newlines.')
self.add_option(
'--no-nag-max', default=False, action='store_true',
help='Ignored for backwards compatibility.')
def parse_args(self, args=None, values=None):
"""Integrates standard options processing."""
options, args = optparse.OptionParser.parse_args(self, args, values)
levels = [logging.ERROR, logging.WARNING, logging.INFO, logging.DEBUG]
logging.basicConfig(
level=levels[min(options.verbose, len(levels) - 1)],
format='%(module)s(%(lineno)d) %(funcName)s:%(message)s')
if options.config_filename and options.spec:
self.error('Cannot specifiy both --gclientfile and --spec')
if (options.config_filename and
options.config_filename != os.path.basename(options.config_filename)):
self.error('--gclientfile target must be a filename, not a path')
if not options.config_filename:
options.config_filename = self.gclientfile_default
options.entries_filename = options.config_filename + '_entries'
if options.jobs < 1:
self.error('--jobs must be 1 or higher')
# These hacks need to die.
if not hasattr(options, 'revisions'):
# GClient.RunOnDeps expects it even if not applicable.
options.revisions = []
if not hasattr(options, 'head'):
options.head = None
if not hasattr(options, 'nohooks'):
options.nohooks = True
if not hasattr(options, 'noprehooks'):
options.noprehooks = True
if not hasattr(options, 'deps_os'):
options.deps_os = None
if not hasattr(options, 'manually_grab_svn_rev'):
options.manually_grab_svn_rev = None
if not hasattr(options, 'force'):
options.force = None
return (options, args)
def disable_buffering():
# Make stdout auto-flush so buildbot doesn't kill us during lengthy
# operations. Python as a strong tendency to buffer sys.stdout.
sys.stdout = gclient_utils.MakeFileAutoFlush(sys.stdout)
# Make stdout annotated with the thread ids.
sys.stdout = gclient_utils.MakeFileAnnotated(sys.stdout)
def main(argv):
"""Doesn't parse the arguments here, just find the right subcommand to
execute."""
if sys.hexversion < 0x02060000:
print(
'\nYour python version %s is unsupported, please upgrade.\n' %
sys.version.split(' ', 1)[0],
file=sys.stderr)
return 2
if not sys.executable:
print(
'\nPython cannot find the location of it\'s own executable.\n',
file=sys.stderr)
return 2
fix_encoding.fix_encoding()
disable_buffering()
setup_color.init()
dispatcher = subcommand.CommandDispatcher(__name__)
try:
return dispatcher.execute(OptionParser(), argv)
except KeyboardInterrupt:
gclient_utils.GClientChildren.KillAllRemainingChildren()
raise
except (gclient_utils.Error, subprocess2.CalledProcessError) as e:
print('Error: %s' % str(e), file=sys.stderr)
return 1
finally:
gclient_utils.PrintWarnings()
return 0
if '__main__' == __name__:
try:
sys.exit(main(sys.argv[1:]))
except KeyboardInterrupt:
sys.stderr.write('interrupted\n')
sys.exit(1)
# vim: ts=2:sw=2:tw=80:et:
|
[] |
[] |
[
"CHROME_HEADLESS",
"GCLIENT_FILE"
] |
[]
|
["CHROME_HEADLESS", "GCLIENT_FILE"]
|
python
| 2 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.