filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
tests/conftest.py
|
import functools
import glob
import inspect
import os
import imageio
import numpy as np
import pytest
from bentoml.yatai.client import YataiClient
from tests.bento_service_examples.example_bento_service import ExampleBentoService
def pytest_configure():
'''
global constants for tests
'''
# async request client
async def assert_request(
method,
url,
headers=None,
data=None,
timeout=None,
assert_status=None,
assert_data=None,
):
if assert_status is None:
assert_status = 200
import aiohttp
try:
async with aiohttp.ClientSession() as sess:
async with sess.request(
method, url, data=data, headers=headers, timeout=timeout
) as r:
r_body = await r.read()
except RuntimeError:
# the event loop has been closed due to previous task failed, ignore
return
if callable(assert_status):
assert assert_status(r.status), f"{r.status} {r_body}"
else:
assert r.status == assert_status, f"{r.status} {r_body}"
if assert_data is not None:
if callable(assert_data):
assert assert_data(r_body), r_body
else:
assert r_body == assert_data
pytest.assert_request = assert_request
# dataframe json orients
pytest.DF_ORIENTS = {
'split',
'records',
'index',
'columns',
'values',
# 'table', # TODO(bojiang)
}
pytest.DF_AUTO_ORIENTS = {
'records',
'columns',
}
def _since_version(ver: str):
def _wrapper(func):
if not inspect.iscoroutinefunction(func):
@functools.wraps(func)
def _wrapped(*args, **kwargs):
from packaging import version
bundle_ver = os.environ.get("BUNDLE_BENTOML_VERSION")
if bundle_ver and version.parse(bundle_ver) < version.parse(ver):
pytest.skip()
return func(*args, **kwargs)
else:
@functools.wraps(func)
async def _wrapped(*args, **kwargs):
from packaging import version
bundle_ver = os.environ.get("BUNDLE_BENTOML_VERSION")
if bundle_ver and version.parse(bundle_ver) < version.parse(ver):
pytest.skip()
return await func(*args, **kwargs)
return _wrapped
return _wrapper
pytest.since_bentoml_version = _since_version
def pytest_addoption(parser):
parser.addoption("--batch-request", action="store_false")
@pytest.fixture()
def is_batch_request(pytestconfig):
return pytestconfig.getoption("batch_request")
@pytest.fixture()
def bin_file(tmpdir):
bin_file_ = tmpdir.join("bin_file.bin")
with open(bin_file_, "wb") as of:
of.write("â".encode('gb18030'))
return str(bin_file_)
@pytest.fixture()
def bin_files(tmpdir):
for i in range(10):
bin_file_ = tmpdir.join(f"{i}.bin")
with open(bin_file_, "wb") as of:
of.write(f"â{i}".encode('gb18030'))
return sorted(glob.glob(str(tmpdir.join("*.bin"))))
@pytest.fixture()
def unicode_file(tmpdir):
bin_file_ = tmpdir.join("bin_file.unicode")
with open(bin_file_, "wb") as of:
of.write("â".encode('utf-8'))
return str(bin_file_)
@pytest.fixture()
def unicode_files(tmpdir):
for i in range(10):
bin_file_ = tmpdir.join(f"{i}.list.unicode")
with open(bin_file_, "wb") as of:
of.write(f"â{i}".encode('utf-8'))
return sorted(glob.glob(str(tmpdir.join("*.list.unicode"))))
@pytest.fixture()
def img_file(tmpdir):
img_file_ = tmpdir.join("test_img.jpg")
imageio.imwrite(str(img_file_), np.zeros((10, 10)))
return str(img_file_)
@pytest.fixture()
def img_files(tmpdir):
for i in range(10):
img_file_ = tmpdir.join(f"{i}.list.jpg")
imageio.imwrite(str(img_file_), np.zeros((10, 10)))
return sorted(glob.glob(str(tmpdir.join("*.list.jpg"))))
@pytest.fixture()
def json_file(tmpdir):
json_file_ = tmpdir.join("test.json")
with open(json_file_, "w") as of:
of.write('{"name": "kaith", "game": "morrowind"}')
return str(json_file_)
@pytest.fixture()
def json_files(tmpdir):
for i in range(10):
file_ = tmpdir.join(f"{i}.list.json")
with open(file_, "w") as of:
of.write('{"i": %d, "name": "kaith", "game": "morrowind"}' % i)
return sorted(glob.glob(str(tmpdir.join("*.list.json"))))
class TestModel(object):
def predict_dataframe(self, df):
return df["col1"] * 2
def predict_image(self, input_datas):
for input_data in input_datas:
assert input_data is not None
return [input_data.shape for input_data in input_datas]
def predict_multi_images(self, original, compared):
return (original == compared).all()
def predict_json(self, input_jsons):
assert input_jsons
return [{"ok": True}] * len(input_jsons)
@pytest.fixture()
def example_bento_service_class():
# When the ExampleBentoService got saved and loaded again in the test, the two class
# attribute below got set to the loaded BentoService class. Resetting it here so it
# does not effect other tests
ExampleBentoService._bento_service_bundle_path = None
ExampleBentoService._bento_service_bundle_version = None
return ExampleBentoService
@pytest.fixture()
def bento_service(example_bento_service_class): # pylint:disable=redefined-outer-name
"""Create a new ExampleBentoService
"""
test_model = TestModel()
test_svc = example_bento_service_class()
test_svc.pack('model', test_model)
return test_svc
@pytest.fixture()
def bento_bundle_path(bento_service): # pylint:disable=redefined-outer-name
"""Create a new ExampleBentoService, saved it to tmpdir, and return full saved_path
"""
saved_path = bento_service.save()
yield saved_path
delete_saved_bento_service(bento_service.name, bento_service.version)
def delete_saved_bento_service(name, version):
yc = YataiClient()
yc.repository.delete(f'{name}:{version}')
|
[] |
[] |
[
"BUNDLE_BENTOML_VERSION"
] |
[]
|
["BUNDLE_BENTOML_VERSION"]
|
python
| 1 | 0 | |
cari-pakar web/manage.py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "caripakar.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
pkg/util.go
|
//
// Copyright 2022 Red Hat, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package pkg
import (
"context"
"flag"
"os"
"path/filepath"
"strings"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/util/homedir"
)
var (
kubeconfig *string
)
func ParseFlags() {
if home := homedir.HomeDir(); home != "" {
kubeconfig = flag.String("kubeconfig", filepath.Join(home, ".kube", "config"), "(optional) absolute path to the kubeconfig file")
} else {
kubeconfig = flag.String("kubeconfig", "", "absolute path to the kubeconfig file")
}
flag.Parse()
}
func GetClusterConfigPath() string {
return *kubeconfig
}
func IsOutSideClusterConfig() bool {
isOutSideClusterConfig := os.Getenv("OUTSIDE_CLUSTER")
return strings.ToLower(isOutSideClusterConfig) == "true"
}
func IsNamespaceInDeletingState(clientset *kubernetes.Clientset, namespaceName string) (bool, error) {
namespace, err := clientset.CoreV1().Namespaces().Get(context.TODO(), namespaceName, metav1.GetOptions{})
if err != nil {
if errors.IsNotFound(err) {
return true, nil
}
return false, err
}
if !namespace.ObjectMeta.DeletionTimestamp.IsZero() {
return true, nil
}
return false, nil
}
|
[
"\"OUTSIDE_CLUSTER\""
] |
[] |
[
"OUTSIDE_CLUSTER"
] |
[]
|
["OUTSIDE_CLUSTER"]
|
go
| 1 | 0 | |
recipes/libbasisu/all/conanfile.py
|
import os
from conans import ConanFile, CMake, tools
from conans.errors import ConanInvalidConfiguration
required_conan_version = ">=1.33.0"
class LibBasisUniversalConan(ConanFile):
name = "libbasisu"
description = "Basis Universal Supercompressed GPU Texture Codec"
homepage = "https://github.com/BinomialLLC/basis_universal"
topics = ("conan", "basis", "textures", "compression")
url = "https://github.com/conan-io/conan-center-index"
license = "Apache-2.0"
exports_sources = ["CMakeLists.txt", "patches/*"]
generators = "cmake"
settings = "os", "compiler", "build_type", "arch"
options = {
"fPIC": [True, False],
"shared": [True, False],
"use_sse4": [True, False],
"with_zstd": [True, False],
"custom_iterator_debug_level": [True, False]
}
default_options = {
"fPIC": True,
"shared": False,
"use_sse4": True,
"with_zstd": True,
"custom_iterator_debug_level": False
}
_cmake = None
@property
def _source_subfolder(self):
return "source_subfolder"
@property
def _build_subfolder(self):
return "build_subfolder"
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
if self.settings.compiler != "Visual Studio":
del self.options.custom_iterator_debug_level
def _minimum_compiler_version(self) -> bool:
return {
"Visual Studio": "15",
"gcc": "5.4",
"clang": "3.9",
"apple-clang": "10"
}
def validate(self):
min_version = self._minimum_compiler_version().get(str(self.settings.compiler))
if not min_version:
self.output.warn("{} recipe lacks information about the {} compiler support.".format(
self.name, self.settings.compiler))
elif tools.Version(self.settings.compiler.version) < min_version:
raise ConanInvalidConfiguration("{} {} does not support compiler with version {} {}, minimum supported compiler version is {} ".format(self.name, self.version, self.settings.compiler, self.settings.compiler.version, min_version))
if self.settings.compiler.get_safe("cppstd"):
tools.check_min_cppstd(self, 11)
def configure(self):
if self.options.shared:
del self.options.fPIC
def source(self):
tools.get(**self.conan_data["sources"][self.version], strip_root=True, destination=self._source_subfolder)
def _configure_cmake(self):
if self._cmake:
return self._cmake
self._cmake = CMake(self)
self._cmake.definitions["SSE4"] = self.options.use_sse4
self._cmake.definitions["ZSTD"] = self.options.with_zstd
self._cmake.definitions["BASISU_NO_ITERATOR_DEBUG_LEVEL"] = not self.options.get_safe("custom_iterator_debug_level", default=self.default_options["custom_iterator_debug_level"])
self._cmake.configure(build_folder=self._build_subfolder)
return self._cmake
def build(self):
for patch in self.conan_data.get("patches", {}).get(self.version, []):
tools.patch(**patch)
cmake = self._configure_cmake()
cmake.build()
def package(self):
self.copy("LICENSE", dst="licenses", src=self._source_subfolder)
self.copy("*.h", dst=os.path.join("include", self.name, "transcoder"), src=os.path.join(self._source_subfolder, "transcoder"))
self.copy("*.h", dst=os.path.join("include", self.name, "encoder"), src=os.path.join(self._source_subfolder, "encoder"))
self.copy(pattern="*.a", dst="lib", keep_path=False)
self.copy(pattern="*.so", dst="lib", keep_path=False)
self.copy(pattern="*.dylib*", dst="lib", keep_path=False)
self.copy(pattern="*.lib", dst="lib", keep_path=False)
self.copy(pattern="*.dll", dst="bin", keep_path=False)
def package_info(self):
self.cpp_info.libs = tools.collect_libs(self)
self.cpp_info.names["cmake_find_package"] = self.name
self.cpp_info.names["cmake_find_package_multi"] = self.name
self.cpp_info.includedirs = ["include", os.path.join("include", self.name)]
if self.settings.os == "Linux":
self.cpp_info.system_libs = ["m", "pthread"]
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
drivers/node/aws/aws.go
|
package aws
import (
"fmt"
"os"
"strings"
"time"
aws_pkg "github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/aws/aws-sdk-go/service/ssm"
"github.com/portworx/sched-ops/task"
"github.com/portworx/torpedo/drivers/node"
)
const (
// DriverName is the name of the aws driver
DriverName = "aws"
)
type aws struct {
node.Driver
session *session.Session
credentials *credentials.Credentials
config *aws_pkg.Config
region string
svc *ec2.EC2
svcSsm *ssm.SSM
instances []*ec2.Instance
}
func (a *aws) String() string {
return DriverName
}
func (a *aws) Init(nodeOpts node.InitOptions) error {
var err error
sess := session.Must(session.NewSessionWithOptions(session.Options{
SharedConfigState: session.SharedConfigEnable,
}))
creds := credentials.NewEnvCredentials()
a.credentials = creds
a.region = os.Getenv("AWS_REGION")
if a.region == "" {
return fmt.Errorf("Env AWS_REGION not found")
}
config := &aws_pkg.Config{Region: aws_pkg.String(a.region)}
config.WithCredentials(creds)
a.config = config
svc := ec2.New(sess, config)
a.svc = svc
a.svcSsm = ssm.New(sess, aws_pkg.NewConfig().WithRegion(a.region))
a.session = sess
instances, err := a.getAllInstances()
if err != nil {
return err
}
a.instances = instances
nodes := node.GetWorkerNodes()
for _, n := range nodes {
if err := a.TestConnection(n, node.ConnectionOpts{
Timeout: 1 * time.Minute,
TimeBeforeRetry: 10 * time.Second,
}); err != nil {
return &node.ErrFailedToTestConnection{
Node: n,
Cause: err.Error(),
}
}
}
return nil
}
func (a *aws) TestConnection(n node.Node, options node.ConnectionOpts) error {
var err error
instanceID, err := a.getNodeIDByPrivAddr(n)
if err != nil {
return &node.ErrFailedToTestConnection{
Node: n,
Cause: fmt.Sprintf("failed to get instance ID for connection due to: %v", err),
}
}
command := "uptime"
param := make(map[string][]*string)
param["commands"] = []*string{
aws_pkg.String(command),
}
sendCommandInput := &ssm.SendCommandInput{
Comment: aws_pkg.String(command),
DocumentName: aws_pkg.String("AWS-RunShellScript"),
Parameters: param,
InstanceIds: []*string{
aws_pkg.String(instanceID),
},
}
sendCommandOutput, err := a.svcSsm.SendCommand(sendCommandInput)
if err != nil {
return &node.ErrFailedToTestConnection{
Node: n,
Cause: fmt.Sprintf("failed to send command to instance %s: %v", instanceID, err),
}
}
if sendCommandOutput.Command == nil || sendCommandOutput.Command.CommandId == nil {
return fmt.Errorf("No command returned after sending command to %s", instanceID)
}
listCmdsInput := &ssm.ListCommandInvocationsInput{
CommandId: sendCommandOutput.Command.CommandId,
}
t := func() (interface{}, bool, error) {
return "", true, a.connect(n, listCmdsInput)
}
if _, err := task.DoRetryWithTimeout(t, options.Timeout, options.TimeBeforeRetry); err != nil {
return &node.ErrFailedToTestConnection{
Node: n,
Cause: err.Error(),
}
}
return err
}
func (a *aws) connect(n node.Node, listCmdsInput *ssm.ListCommandInvocationsInput) error {
var status string
listCmdInvsOutput, _ := a.svcSsm.ListCommandInvocations(listCmdsInput)
for _, cmd := range listCmdInvsOutput.CommandInvocations {
status = strings.TrimSpace(*cmd.StatusDetails)
if status == "Success" {
return nil
}
}
return &node.ErrFailedToTestConnection{
Node: n,
Cause: fmt.Sprintf("Failed to connect. Command status is %s", status),
}
}
func (a *aws) RebootNode(n node.Node, options node.RebootNodeOpts) error {
var err error
instanceID, err := a.getNodeIDByPrivAddr(n)
if err != nil {
return &node.ErrFailedToRebootNode{
Node: n,
Cause: fmt.Sprintf("failed to get instance ID due to: %v", err),
}
}
//Reboot the instance by its InstanceID
rebootInput := &ec2.RebootInstancesInput{
InstanceIds: []*string{
aws_pkg.String(instanceID),
},
}
_, err = a.svc.RebootInstances(rebootInput)
if err != nil {
return &node.ErrFailedToRebootNode{
Node: n,
Cause: fmt.Sprintf("failed to reboot instance due to: %v", err),
}
}
return nil
}
func (a *aws) ShutdownNode(n node.Node, options node.ShutdownNodeOpts) error {
return nil
}
// TODO add AWS implementation for this
func (a *aws) FindFiles(path string, n node.Node, options node.FindOpts) (string, error) {
return "", nil
}
// TODO implement for AWS
func (a *aws) Systemctl(n node.Node, service string, options node.SystemctlOpts) error {
return nil
}
func (a *aws) getAllInstances() ([]*ec2.Instance, error) {
instances := []*ec2.Instance{}
params := &ec2.DescribeInstancesInput{}
resp, err := a.svc.DescribeInstances(params)
if err != nil {
return instances, fmt.Errorf("there was an error listing instances in %s. Error: %q", a.region, err.Error())
}
reservations := resp.Reservations
for _, resv := range reservations {
for _, ins := range resv.Instances {
instances = append(instances, ins)
}
}
return instances, err
}
func (a *aws) getNodeIDByPrivAddr(n node.Node) (string, error) {
for _, i := range a.instances {
for _, addr := range n.Addresses {
if aws_pkg.StringValue(i.PrivateIpAddress) == addr {
return aws_pkg.StringValue(i.InstanceId), nil
}
}
}
return "", fmt.Errorf("Failed to get instanceID of %s by privateIP", n.Name)
}
func init() {
a := &aws{
Driver: node.NotSupportedDriver,
}
node.Register(DriverName, a)
}
|
[
"\"AWS_REGION\""
] |
[] |
[
"AWS_REGION"
] |
[]
|
["AWS_REGION"]
|
go
| 1 | 0 | |
config.py
|
import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config(object):
# SECRET_KEY = os.environ.get('SECRET_KEY') or 'you-will-never-guess'
try:
SECRET_KEY = open("/run/secrets/SECRET_KEY", "r").read().strip()
except Exception:
SECRET_KEY = 'donotknow'
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'app.db')
SQLALCHEMY_TRACK_MODIFICATIONS = False
|
[] |
[] |
[
"SECRET_KEY",
"DATABASE_URL"
] |
[]
|
["SECRET_KEY", "DATABASE_URL"]
|
python
| 2 | 0 | |
pkg/manager/boot/boot_manager_test.go
|
package bootmanager
import (
"io"
"net/http"
"os"
"sync"
"testing"
"time"
"github.com/labstack/echo/v4"
"github.com/IacopoMelani/Go-Starter-Project/pkg/helpers/request"
durationdata "github.com/IacopoMelani/Go-Starter-Project/pkg/models/duration_data"
"github.com/subosito/gotenv"
)
// DurationDataTest -
type DurationDataTest struct{}
var ddt *durationdata.DurationData
var onceUser sync.Once
// GeDurationDataTest - Restituisce l'istanza di DurantionData relativo agli utenti
func GeDurationDataTest() *durationdata.DurationData {
onceUser.Do(func() {
ddt = new(durationdata.DurationData)
ddt.SetDurationDataInterface(DurationDataTest{})
ddt.SetTimeToRefresh(1)
ddt.Daemon()
})
return ddt
}
// EncodeQueryString -
func (u DurationDataTest) EncodeQueryString(req *http.Request) {}
// GetBody -
func (u DurationDataTest) GetBody() io.Reader {
return nil
}
// GetMethod -
func (u DurationDataTest) GetMethod() string {
return "GET"
}
// GetURL -
func (u DurationDataTest) GetURL() string {
return "https://randomuser.me/api/"
}
// HandlerData -
func (u DurationDataTest) HandlerData() (interface{}, error) {
content, err := request.GetRemoteData(u)
return content, err
}
func laodEnv() error {
if err := gotenv.Load("./../../../.env"); err != nil {
return err
}
return nil
}
func TestBootManager(t *testing.T) {
if err := laodEnv(); err != nil {
t.Fatal(err.Error())
}
conn := os.Getenv("STRING_CONNECTION")
port := ":8889"
bm := GetBootManager()
bm.SetAppPort(port)
bm.SetConnectionSting(conn)
bm.SetDriverSQL(os.Getenv("SQL_DRIVER"))
bm.RegisterDDataProc(GeDurationDataTest)
bm.RegisterProc(func() {
println("hello world!")
})
bm.UseEchoLogger()
bm.UseEchoRecover()
bm.RegisterEchoRoutes(func(e *echo.Echo) {
e.GET("/", func(c echo.Context) error { return nil })
})
go bm.StartApp()
time.Sleep(1 * time.Second)
bm.RegisterDDataProc(GeDurationDataTest)
bm.RegisterProc(func() {
println("hello world!")
})
}
|
[
"\"STRING_CONNECTION\"",
"\"SQL_DRIVER\""
] |
[] |
[
"STRING_CONNECTION",
"SQL_DRIVER"
] |
[]
|
["STRING_CONNECTION", "SQL_DRIVER"]
|
go
| 2 | 0 | |
my_ad_project/manage.py
|
#!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'my_ad.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_analyze_conversation_app.py
|
# coding=utf-8
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
"""
FILE: sample_analyze_conversation_app.py
DESCRIPTION:
This sample demonstrates how to analyze user query for intents and entities using a deepstack project.
For more info about how to setup a CLU deepstack project, see the README.
USAGE:
python sample_analyze_conversation_app.py
Set the environment variables with your own values before running the sample:
1) AZURE_CONVERSATIONS_ENDPOINT - the endpoint to your CLU resource.
2) AZURE_CONVERSATIONS_KEY - your CLU API key.
3) AZURE_CONVERSATIONS_PROJECT - the name of your CLU conversations project.
"""
def sample_analyze_conversation_app():
# [START analyze_conversation_app]
# import libraries
import os
from azure.core.credentials import AzureKeyCredential
from azure.ai.language.conversations import ConversationAnalysisClient
from azure.ai.language.conversations.models import AnalyzeConversationOptions
# get secrets
conv_endpoint = os.environ["AZURE_CONVERSATIONS_ENDPOINT"]
conv_key = os.environ["AZURE_CONVERSATIONS_KEY"]
conv_project = os.environ["AZURE_CONVERSATIONS_PROJECT"]
# prepare data
query = "One california maki please."
input = AnalyzeConversationOptions(
query=query
)
# analyze quey
client = ConversationAnalysisClient(conv_endpoint, AzureKeyCredential(conv_key))
with client:
result = client.analyze_conversations(
input,
project_name=conv_project,
deployment_name='production'
)
# view result
print("query: {}".format(result.query))
print("project kind: {}\n".format(result.prediction.project_kind))
print("view top intent:")
print("top intent: {}".format(result.prediction.top_intent))
print("\tcategory: {}".format(result.prediction.intents[0].category))
print("\tconfidence score: {}\n".format(result.prediction.intents[0].confidence_score))
print("view entities:")
for entity in result.prediction.entities:
print("\tcategory: {}".format(entity.category))
print("\ttext: {}".format(entity.text))
print("\tconfidence score: {}".format(entity.confidence_score))
# [END analyze_conversation_app]
if __name__ == '__main__':
sample_analyze_conversation_app()
|
[] |
[] |
[
"AZURE_CONVERSATIONS_ENDPOINT",
"AZURE_CONVERSATIONS_KEY",
"AZURE_CONVERSATIONS_PROJECT"
] |
[]
|
["AZURE_CONVERSATIONS_ENDPOINT", "AZURE_CONVERSATIONS_KEY", "AZURE_CONVERSATIONS_PROJECT"]
|
python
| 3 | 0 | |
mach/database/up.go
|
package main
import (
"log"
"fmt"
"github.com/golang-migrate/migrate/v4"
_ "github.com/golang-migrate/migrate/v4/database/postgres"
_ "github.com/golang-migrate/migrate/v4/source/file"
"github.com/joho/godotenv"
"os"
)
func main() {
err := godotenv.Load("../.env")
if err != nil {
log.Fatal("Error loading .env file")
}
m, err := migrate.New("file://migrations", os.Getenv("DB_CONNECTION_STRING"))
if err != nil {
log.Fatal(err)
}
if err := m.Up(); err != nil {
log.Fatal(err)
}
fmt.Println("Migration 'UP' finished")
}
|
[
"\"DB_CONNECTION_STRING\""
] |
[] |
[
"DB_CONNECTION_STRING"
] |
[]
|
["DB_CONNECTION_STRING"]
|
go
| 1 | 0 | |
jsonrpc/client_test.go
|
package jsonrpc
import (
"github.com/stretchr/testify/assert"
"os"
"testing"
)
var testurl = os.Getenv("TESTURL")
// newTestClient is used to get a client for testing purposes
func newTestClient() (*Client, error) {
c, err := NewClient(testurl)
if err != nil {
return nil, err
}
return c, nil
}
func Test_NewClient(t *testing.T) {
assert.NotPanics(t, func() {
_, err := NewClient(testurl)
if err != nil {
panic(err)
}
})
}
|
[
"\"TESTURL\""
] |
[] |
[
"TESTURL"
] |
[]
|
["TESTURL"]
|
go
| 1 | 0 | |
pgmpy/utils/optimizer.py
|
import warnings
from math import isclose
try: # pragma: no cover
import torch
optim = torch.optim
except ImportError: # pragma: no cover
optim = None
def pinverse(t):
"""
Computes the pseudo-inverse of a matrix using SVD.
Parameters
----------
t: torch.tensor
The matrix whose inverse is to be calculated.
Returns
-------
torch.tensor: Inverse of the matrix `t`.
"""
u, s, v = t.svd()
t_inv = v @ torch.diag(torch.where(s != 0, 1 / s, s)) @ u.t()
return t_inv
def optimize(
loss_fn, params={}, loss_args={}, opt="adam", max_iter=10000, exit_delta=1e-4
):
"""
Generic function to optimize loss functions.
Parameters
----------
loss_fn: Function
The function to optimize. It must return a torch.Tensor object.
params: dict {str: torch.Tensor}
The parameters which need to be optimized along with their initial values. The
dictionary should be of the form: {variable name: initial value}
loss_args: dict {str: torch.Tensor}
Extra parameters which loss function needs to compute the loss.
opt: str | Instance of torch.optim.Optimizer
The optimizer to use. Should either be an instance of torch.optim or a str.
When str is given initializes the optimizer with default parameters.
If str the options are:
1. Adadelta: Adadelta algorithm (Ref: https://arxiv.org/abs/1212.5701)
2. Adagrad: Adagrad algorithm (Ref: http://jmlr.org/papers/v12/duchi11a.html)
3. Adam: Adam algorithm (Ref: https://arxiv.org/abs/1412.6980)
4. SparseAdam: Lazy version of Adam. Suitable for sparse tensors.
5. Adamax: Adamax algorithm (variant of Adam based on infinity norm)
6. ASGD: Averaged Stochastic Gradient Descent (Ref: https://dl.acm.org/citation.cfm?id=131098)
7. LBFGS: L-BFGS Algorithm
8. RMSprop: RMSprop Algorithm (Ref: https://arxiv.org/abs/1308.0850v5)
9. Rprop: Resilient Backpropagation Algorithm
10. SGD: Stochastic Gradient Descent.
max_iter: int (default: 10000)
The maximum number of iterations to run the optimization for.
exit_delta: float
The optmization exit criteria. When change in loss in an iteration is less than
`exit_delta` the optimizer returns the values.
Returns
-------
dict: The values that were given in params in the same format.
Examples
--------
"""
# TODO: Add option to modify the optimizers.
init_loss = float("inf")
if isinstance(opt, str):
opt_dict = {
"adadelta": optim.Adadelta,
"adagrad": optim.Adagrad,
"adam": optim.Adam,
"sparseadam": optim.SparseAdam,
"adamax": optim.Adamax,
"asgd": optim.ASGD,
"lbfgs": optim.LBFGS,
"rmsprop": optim.RMSprop,
"rprop": optim.Rprop,
"sgd": optim.SGD,
}
opt = opt_dict[opt.lower()](params.values())
for t in range(max_iter):
def closure():
opt.zero_grad()
loss = loss_fn(params, loss_args)
loss.backward()
return loss
opt.step(closure=closure)
if isclose(init_loss, closure().item(), abs_tol=exit_delta):
warnings.warn(
"Converged after {iterations} iterations.".format(iterations=t)
)
return params
else:
init_loss = closure().item()
warnings.warn(
"""Couldn't converge after {iterations} iterations. Try increasing max_iter or change
optimizer parameters""".format(
iterations=max_iter
)
)
return params
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
django_site/settings.py
|
"""
Django settings for django_site project.
Generated by 'django-admin startproject' using Django 3.1.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import os
from google.oauth2 import service_account
import json
import django_heroku
# Custom variables
#DJANGO_SETTINGS_MODULE='testtinymce.settings'
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get("DJANGO_SECRET_KEY")
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = (os.environ.get("DEBUG_VALUE") == "True")
ALLOWED_HOSTS = ['tejas-django-site.herokuapp.com']
if DEBUG:
ALLOWED_HOSTS += ['localhost']
# Application definition
INSTALLED_APPS = [
'bio.apps.BioConfig',
'blog.apps.BlogConfig',
'ckeditor',
'ckeditor_uploader',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'django_site.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'django_site.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Django-storages configuration for Google Cloud Storage
gs_service_account_credentials = json.loads(os.environ.get("GCS_SERVICE_ACCOUNT_CREDENTIALS"))
GS_CREDENTIALS = service_account.Credentials.from_service_account_info(gs_service_account_credentials)
DEFAULT_FILE_STORAGE = 'storages.backends.gcloud.GoogleCloudStorage'
if DEBUG:
GS_BUCKET_NAME = os.environ.get("GCS_TEST_BUCKET_NAME")
else:
GS_BUCKET_NAME = os.environ.get("GCS_BUCKET_NAME")
STATICFILES_STORAGE = 'storages.backends.gcloud.GoogleCloudStorage'
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles') # For Heroku
STATIC_URL = '/static/'
# Extra places for collectstatic to find static files.
#STATICFILES_DIRS = (
# os.path.join(BASE_DIR, 'static'),
#) # For Heroku
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
# CKEditor Config
CKEDITOR_UPLOAD_PATH = "uploads/"
# Heroku config
django_heroku.settings(locals())
|
[] |
[] |
[
"DEBUG_VALUE",
"DJANGO_SECRET_KEY",
"GCS_TEST_BUCKET_NAME",
"GCS_BUCKET_NAME",
"GCS_SERVICE_ACCOUNT_CREDENTIALS"
] |
[]
|
["DEBUG_VALUE", "DJANGO_SECRET_KEY", "GCS_TEST_BUCKET_NAME", "GCS_BUCKET_NAME", "GCS_SERVICE_ACCOUNT_CREDENTIALS"]
|
python
| 5 | 0 | |
internal/app/driver/redis_test.go
|
// Copyright 2018 Clivern. All rights reserved.
// Use of this source code is governed by the MIT
// license that can be found in the LICENSE file.
package driver
import (
"fmt"
"github.com/nbio/st"
"github.com/spf13/viper"
"os"
"strconv"
"testing"
)
// init setup stuff
func init() {
basePath := fmt.Sprintf("%s/src/github.com/clivern/beaver", os.Getenv("GOPATH"))
configFile := fmt.Sprintf("%s/%s", basePath, "config.test.yml")
viper.SetConfigFile(configFile)
err := viper.ReadInConfig()
if err != nil {
panic(fmt.Sprintf(
"Error while loading config file [%s]: %s",
configFile,
err.Error(),
))
}
os.Setenv("BeaverBasePath", fmt.Sprintf("%s/", basePath))
os.Setenv("PORT", strconv.Itoa(viper.GetInt("app.port")))
}
// TestRedisDriver test cases
func TestRedisDriver(t *testing.T) {
driver := NewRedisDriver()
ok, err := driver.Connect()
st.Expect(t, ok, true)
st.Expect(t, err, nil)
ok, err = driver.Ping()
st.Expect(t, ok, true)
st.Expect(t, err, nil)
// Do Clean
driver.Del("app_name")
driver.HTruncate("configs")
count, err := driver.Del("app_name")
st.Expect(t, int(count), 0)
st.Expect(t, err, nil)
ok, err = driver.Set("app_name", "Beaver", 0)
st.Expect(t, ok, true)
st.Expect(t, err, nil)
ok, err = driver.Exists("app_name")
st.Expect(t, ok, true)
st.Expect(t, err, nil)
value, err := driver.Get("app_name")
st.Expect(t, value, "Beaver")
st.Expect(t, err, nil)
count, err = driver.HDel("configs", "app_name")
st.Expect(t, int(count), 0)
st.Expect(t, err, nil)
ok, err = driver.HSet("configs", "app_name", "Beaver")
st.Expect(t, ok, true)
st.Expect(t, err, nil)
ok, err = driver.HExists("configs", "app_name")
st.Expect(t, ok, true)
st.Expect(t, err, nil)
value, err = driver.HGet("configs", "app_name")
st.Expect(t, value, "Beaver")
st.Expect(t, err, nil)
count, err = driver.HLen("configs")
st.Expect(t, int(count), 1)
st.Expect(t, err, nil)
count, err = driver.HDel("configs", "app_name")
st.Expect(t, int(count), 1)
st.Expect(t, err, nil)
count, err = driver.HTruncate("configs")
st.Expect(t, int(count), 0)
st.Expect(t, err, nil)
}
|
[
"\"GOPATH\""
] |
[] |
[
"GOPATH"
] |
[]
|
["GOPATH"]
|
go
| 1 | 0 | |
community-content/pytorch_image_classification_distributed_data_parallel_training_with_vertex_sdk/trainer/task.py
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the \"License\");
# you may not use this file except in compliance with the License.\n",
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an \"AS IS\" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Main program for PyTorch distributed training.
Adapted from: https://github.com/narumiruna/pytorch-distributed-example
"""
import argparse
import os
import shutil
import torch
from torch import distributed
from torch.nn.parallel import DistributedDataParallel
from torch.utils import data
from torch.utils.tensorboard import SummaryWriter
from torchvision import datasets, transforms
def parse_args():
parser = argparse.ArgumentParser()
# Using environment variables for Cloud Storage directories
# see more details in https://cloud.google.com/vertex-ai/docs/training/code-requirements
parser.add_argument(
'--model-dir', default=os.getenv('AIP_MODEL_DIR'), type=str,
help='a Cloud Storage URI of a directory intended for saving model artifacts')
parser.add_argument(
'--tensorboard-log-dir', default=os.getenv('AIP_TENSORBOARD_LOG_DIR'), type=str,
help='a Cloud Storage URI of a directory intended for saving TensorBoard')
parser.add_argument(
'--checkpoint-dir', default=os.getenv('AIP_CHECKPOINT_DIR'), type=str,
help='a Cloud Storage URI of a directory intended for saving checkpoints')
parser.add_argument(
'--backend', type=str, default='gloo',
help='Use the `nccl` backend for distributed GPU training.'
'Use the `gloo` backend for distributed CPU training.')
parser.add_argument(
'--init-method', type=str, default='env://',
help='URL specifying how to initialize the package.')
parser.add_argument(
'--world-size', type=int, default=os.environ.get('WORLD_SIZE', 1),
help='The total number of nodes in the cluster. '
'This variable has the same value on every node.')
parser.add_argument(
'--rank', type=int, default=os.environ.get('RANK', 0),
help='A unique identifier for each node. '
'On the master worker, this is set to 0. '
'On each worker, it is set to a different value from 1 to WORLD_SIZE - 1.')
parser.add_argument(
'--epochs', type=int, default=20)
parser.add_argument(
'--no-cuda', action='store_true')
parser.add_argument(
'-lr', '--learning-rate', type=float, default=1e-3)
parser.add_argument(
'--batch-size', type=int, default=128)
parser.add_argument(
'--local-mode', action='store_true', help='use local mode when running on your local machine')
args = parser.parse_args()
return args
def makedirs(model_dir):
if os.path.exists(model_dir) and os.path.isdir(model_dir):
shutil.rmtree(model_dir)
os.makedirs(model_dir)
return
def distributed_is_initialized():
if distributed.is_available():
if distributed.is_initialized():
return True
return False
class Average(object):
def __init__(self):
self.sum = 0
self.count = 0
def __str__(self):
return '{:.6f}'.format(self.average)
@property
def average(self):
return self.sum / self.count
def update(self, value, number):
self.sum += value * number
self.count += number
class Accuracy(object):
def __init__(self):
self.correct = 0
self.count = 0
def __str__(self):
return '{:.2f}%'.format(self.accuracy * 100)
@property
def accuracy(self):
return self.correct / self.count
@torch.no_grad()
def update(self, output, target):
pred = output.argmax(dim=1)
correct = pred.eq(target).sum().item()
self.correct += correct
self.count += output.size(0)
class Net(torch.nn.Module):
def __init__(self, device):
super(Net, self).__init__()
self.fc = torch.nn.Linear(784, 10).to(device)
def forward(self, x):
return self.fc(x.view(x.size(0), -1))
class MNISTDataLoader(data.DataLoader):
def __init__(self, root, batch_size, train=True):
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,)),
])
dataset = datasets.MNIST(root, train=train, transform=transform, download=True)
sampler = None
if train and distributed_is_initialized():
sampler = data.DistributedSampler(dataset)
super(MNISTDataLoader, self).__init__(
dataset,
batch_size=batch_size,
shuffle=(sampler is None),
sampler=sampler,
)
class Trainer(object):
def __init__(self,
model,
optimizer,
train_loader,
test_loader,
device,
model_name,
checkpoint_path
):
self.model = model
self.optimizer = optimizer
self.train_loader = train_loader
self.test_loader = test_loader
self.device = device
self.model_name = model_name
self.checkpoint_path = checkpoint_path
def save(self, model_dir):
model_path = os.path.join(model_dir, self.model_name)
torch.save(self.model.state_dict(), model_path)
def fit(self, epochs, is_chief, writer):
for epoch in range(1, epochs + 1):
print('Epoch: {}, Training ...'.format(epoch))
train_loss, train_acc = self.train()
if is_chief:
test_loss, test_acc = self.evaluate()
writer.add_scalar('Loss/train', train_loss.average, epoch)
writer.add_scalar('Loss/test', test_loss.average, epoch)
writer.add_scalar('Accuracy/train', train_acc.accuracy, epoch)
writer.add_scalar('Accuracy/test', test_acc.accuracy, epoch)
torch.save(self.model.state_dict(), self.checkpoint_path)
print(
'Epoch: {}/{},'.format(epoch, epochs),
'train loss: {}, train acc: {},'.format(train_loss, train_acc),
'test loss: {}, test acc: {}.'.format(test_loss, test_acc),
)
def train(self):
self.model.train()
train_loss = Average()
train_acc = Accuracy()
for data, target in self.train_loader:
data = data.to(self.device)
target = target.to(self.device)
output = self.model(data)
loss = torch.nn.functional.cross_entropy(output, target)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
train_loss.update(loss.item(), data.size(0))
train_acc.update(output, target)
return train_loss, train_acc
@torch.no_grad()
def evaluate(self):
self.model.eval()
test_loss = Average()
test_acc = Accuracy()
for data, target in self.test_loader:
data = data.to(self.device)
target = target.to(self.device)
output = self.model(data)
loss = torch.nn.functional.cross_entropy(output, target)
test_loss.update(loss.item(), data.size(0))
test_acc.update(output, target)
return test_loss, test_acc
def main():
args = parse_args()
local_data_dir = './tmp/data'
local_model_dir = './tmp/model'
local_tensorboard_log_dir = './tmp/logs'
local_checkpoint_dir = './tmp/checkpoints'
model_dir = args.model_dir or local_model_dir
tensorboard_log_dir = args.tensorboard_log_dir or local_tensorboard_log_dir
checkpoint_dir = args.checkpoint_dir or local_checkpoint_dir
gs_prefix = 'gs://'
gcsfuse_prefix = '/gcs/'
if model_dir and model_dir.startswith(gs_prefix):
model_dir = model_dir.replace(gs_prefix, gcsfuse_prefix)
if tensorboard_log_dir and tensorboard_log_dir.startswith(gs_prefix):
tensorboard_log_dir = tensorboard_log_dir.replace(gs_prefix, gcsfuse_prefix)
if checkpoint_dir and checkpoint_dir.startswith(gs_prefix):
checkpoint_dir = checkpoint_dir.replace(gs_prefix, gcsfuse_prefix)
writer = SummaryWriter(tensorboard_log_dir)
is_chief = args.rank == 0
if is_chief:
makedirs(checkpoint_dir)
print(f'Checkpoints will be saved to {checkpoint_dir}')
checkpoint_path = os.path.join(checkpoint_dir, 'checkpoint.pt')
print(f'checkpoint_path is {checkpoint_path}')
if args.world_size > 1:
print('Initializing distributed backend with {} nodes'.format(args.world_size))
distributed.init_process_group(
backend=args.backend,
init_method=args.init_method,
world_size=args.world_size,
rank=args.rank,
)
print(f'[{os.getpid()}]: '
f'world_size = {distributed.get_world_size()}, '
f'rank = {distributed.get_rank()}, '
f'backend={distributed.get_backend()} \n', end='')
if torch.cuda.is_available() and not args.no_cuda:
device = torch.device('cuda:{}'.format(args.rank))
else:
device = torch.device('cpu')
model = Net(device=device)
if distributed_is_initialized():
model.to(device)
model = DistributedDataParallel(model)
if is_chief:
# All processes should see same parameters as they all start from same
# random parameters and gradients are synchronized in backward passes.
# Therefore, saving it in one process is sufficient.
torch.save(model.state_dict(), checkpoint_path)
print(f'Initial chief checkpoint is saved to {checkpoint_path}')
# Use a barrier() to make sure that process 1 loads the model after process
# 0 saves it.
if distributed_is_initialized():
distributed.barrier()
# configure map_location properly
model.load_state_dict(torch.load(checkpoint_path, map_location=device))
print(f'Initial chief checkpoint is saved to {checkpoint_path} with map_location {device}')
else:
model.load_state_dict(torch.load(checkpoint_path))
print(f'Initial chief checkpoint is loaded from {checkpoint_path}')
optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate)
train_loader = MNISTDataLoader(
local_data_dir, args.batch_size, train=True)
test_loader = MNISTDataLoader(
local_data_dir, args.batch_size, train=False)
trainer = Trainer(
model=model,
optimizer=optimizer,
train_loader=train_loader,
test_loader=test_loader,
device=device,
model_name='mnist.pt',
checkpoint_path=checkpoint_path,
)
trainer.fit(args.epochs, is_chief, writer)
if model_dir == local_model_dir:
makedirs(model_dir)
trainer.save(model_dir)
print(f'Model is saved to {model_dir}')
print(f'Tensorboard logs are saved to: {tensorboard_log_dir}')
writer.close()
if is_chief:
os.remove(checkpoint_path)
if distributed_is_initialized():
distributed.destroy_process_group()
return
if __name__ == '__main__':
main()
|
[] |
[] |
[
"AIP_CHECKPOINT_DIR",
"AIP_TENSORBOARD_LOG_DIR",
"RANK",
"AIP_MODEL_DIR",
"WORLD_SIZE"
] |
[]
|
["AIP_CHECKPOINT_DIR", "AIP_TENSORBOARD_LOG_DIR", "RANK", "AIP_MODEL_DIR", "WORLD_SIZE"]
|
python
| 5 | 0 | |
bars.py
|
import matplotlib.pyplot as plt
import numpy as np
import os
import get_dc_data
# Differential figure.
casedata = get_dc_data.retrieve(download=False)
f2 = plt.figure(figsize=(6,4))
plt.suptitle("COVID-19 Data Summary, District of Columbia ",
fontweight="bold")
plt.title("github.com/reidac/covid19-curve-dc", style="oblique")
plt.xlabel("Days since March 8, 2020")
plt.ylabel("Increments")
inclen = len(casedata.positive)-1
total_incs = [casedata.positive[i+1]-casedata.positive[i] for i in range(inclen)]
pos_incs = [casedata.deaths[i+1]-casedata.deaths[i] for i in range(inclen)]
# recov_incs = [casedata.recovered[i+1]-casedata.recovered[i] for i in range(inclen)]
plt.bar(casedata.x[1:],total_incs,color='b',width=1.0)
plt.bar(casedata.x[1:],pos_incs,color='r',width=1.0)
# plt.bar(casedata.x[:-1]+0.4,recov_incs,color='g',width=0.4)
plt.legend(labels=['Positives','Deaths'])
if "FIG_PATH" in os.environ:
fig_path = os.environ['FIG_PATH']
else:
fig_path = "."
plt.savefig("{0}/us_dc_bars.png".format(fig_path),dpi=300,bbox_inches="tight")
print("Bar graph of case and death increments vs. date for the District of Columbia.")
|
[] |
[] |
[
"FIG_PATH"
] |
[]
|
["FIG_PATH"]
|
python
| 1 | 0 | |
script/simulate.py
|
execfile("../script/algorithms.py")
algorithms = ["MFU"]
algos = {}
algos["MFU"] = {}
algos["MFU"]["name"] = "MFU"
algos["MFU"]["function"] = "mfu_map"
metrics = {}
metrics["recall"] = {}
metrics["recall"]["name"] = "recall"
metrics["recall"]["function"] = globals()["evaluation_recall"]
metrics["averagePrecision"] = {}
metrics["averagePrecision"]["name"] = "averagePrecision"
metrics["averagePrecision"]["function"] = globals()["evaluation_ap"]
metrics["reciprocalRank"] = {}
metrics["reciprocalRank"]["name"] = "reciprocalRank"
metrics["reciprocalRank"]["function"] = globals()["evaluation_rr"]
context = {} #todo context selection
conf = {}
conf["eval"] = {}
conf["eval"]["listOfMetrics"] = ["recall","reciprocalRank","averagePrecision"]
conf["eval"]["metrics"] = metrics
conf["eval"]["listOfNs"] = [1,2,5,10]
conf["eval"]["minTestItems"] = 5
conf["eval"]["eventType"] = "opens" #installations, opens
import os
execfile("../script/utils.py")
eventsPath = os.environ["YAHOO_DATA"]
conf["eval"]["resultsFile"] = eventsPath + "/results.txt"
splitedRdd = sc.textFile(eventsPath + "/splitedData")
splitedRdd = splitedRdd.map(parseContextData2).filter(lambda row: len(row[1][1]) >= conf["eval"]["minTestItems"])
for algorithm in algorithms:
conf["algo"] = algos[algorithm]
algoFun = globals()[conf["algo"]["function"]]
resultsRdd = splitedRdd.map(lambda row : algoFun(row,conf))
results = fetchResults(resultsRdd, conf)
writeHeader = not(os.path.isfile(conf["eval"]["resultsFile"]))
with open(conf["eval"]["resultsFile"], "a") as myfile:
if writeHeader:
myfile.write("algorithm,metric,n,value\n")
for result in results:
myfile.write(",".join([conf["algo"]["name"],result[0],str(result[1]),str(result[2])]))
myfile.write("\n")
|
[] |
[] |
[
"YAHOO_DATA"
] |
[]
|
["YAHOO_DATA"]
|
python
| 1 | 0 | |
toolset/travis/travis_diff.py
|
#!/usr/bin/env python
# @file: toolset/travis/travis_diff.py
# @author: Nate Brady
#
# @description: This script is only for use within Travis-CI. It is meant to
# look through the commit history and determine whether or not the current
# framework test directory needs to be run. It compares the state of the PR branch
# against the target branch.
#
# Any changes found in the toolset/* directory other than continuous/*, travis/* and
# scaffolding/* will cause all tests to be run.
#
# The following commands can be put in commit messages to affect which tests will run:
#
# [ci skip] - Provided by Travis. Travis won't trigger any builds.
# [ci run-all] - This will force all tests to run.
# [ci fw-only Java/gemini JavaScript/nodejs] - Ensures that only Java/gemini and
# JavaScript/nodejs tests are run despite the detected changes.
# [ci fw Java/gemini] - Forces Java/gemini to run in addition to detected changes.
# [ci lang-only Java C++] - Ensures that only Java and C++ run despite detected changes.
# [ci lang Java C++] - Forces Java and C++ tests to run in addition to detected changes.
#
# If only a single test within a language group is forced to run, none of the other tests
# in that language group will run.
#
# IMPORTANT: the [ci *] commands must be added to every commit message. We do not look at
# previous commit messages. Make sure to keep your PR branch up-to-date with the target
# branch to avoid running unwanted tests.
import subprocess
import os
import re
def fw_found_in_changes(test, changes_output):
return re.search(
r"frameworks/" + re.escape(test) + "/",
changes_output, re.M)
# Cleans up diffing and grep output and into an array of strings
def clean_output(output):
return os.linesep.join([s for s in output.splitlines() if s])
def quit_diffing():
if len(run_tests):
print("travis-run-tests {!s}".format(" ".join(set(run_tests))))
else:
print("No tests to run.")
exit(0)
is_PR = (os.getenv("TRAVIS_PULL_REQUEST") != "false")
last_commit = ""
if is_PR:
print('I am testing a pull request')
last_commit = subprocess.check_output(
"git rev-list -n 1 FETCH_HEAD^2", shell=True).rstrip('\n')
# https://stackoverflow.com/questions/25071579/list-all-files-changed-in-a-pull-request-in-git-github
changes = clean_output(
subprocess.check_output([
'bash', '-c',
'git --no-pager diff --name-only FETCH_HEAD $(git merge-base FETCH_HEAD master)'
]))
print("Determining what to run based on the following file changes: \n{!s}"
.format('\n'.join(changes.split('\n')[0:10])))
if len(changes.split('\n')) > 10:
print("Too many files to show.")
# COMMIT MESSAGES:
# Before any complicated diffing, check for forced runs from the commit message
# Use -2 because travis now inserts a merge commit as the last commit
last_commit_msg = subprocess.check_output(
["bash", "-c", "git log --format=%B -n 1 {!s}".format(last_commit)])
print("Parsing commit message for travis commands: {!s}"
.format(last_commit_msg))
test_dirs = []
run_tests = []
# Break the test env variable down into test directories
if os.getenv("TESTLANG"):
dir = "frameworks/" + os.getenv("TESTLANG") + "/"
test_dirs = map(lambda x: os.getenv("TESTLANG") + "/" + x,
filter(lambda x: os.path.isdir(dir + x), os.listdir(dir)))
elif os.getenv("TESTDIR"):
test_dirs = os.getenv("TESTDIR").split(' ')
# Forced full run
if re.search(r'\[ci run-all\]', last_commit_msg, re.M):
print("All tests have been forced to run from the commit message.")
run_tests = test_dirs
quit_diffing()
# Forced *fw-only* specific tests
if re.search(r'\[ci fw-only .+\]', last_commit_msg, re.M):
tests = re.findall(r'\[ci fw-only (.+)\]', last_commit_msg, re.M)[0].strip().split(' ')
for test in tests:
if test in test_dirs:
print("{!s} has been forced to run from the commit message.".format(test))
run_tests.append(test)
# quit here because we're using "only"
quit_diffing()
# Forced *lang-only* specific tests
if re.search(r'\[ci lang-only .+\]', last_commit_msg, re.M):
langs = re.findall(r'\[ci lang-only (.+)\]', last_commit_msg, re.M)[0].strip().split(' ')
for test in test_dirs:
for lang in langs:
if test.startswith(lang + "/"):
print("{!s} has been forced to run from the commit message.".format(test))
run_tests.append(test)
# quit here because we're using "only"
quit_diffing()
# Forced framework run in addition to other tests
if re.search(r'\[ci fw .+\]', last_commit_msg, re.M):
tests = re.findall(r'\[ci fw (.+)\]', last_commit_msg, re.M)[0].strip().split(' ')
for test in tests:
if test in test_dirs:
print("{!s} has been forced to run from the commit message.".format(test))
run_tests.append(test)
# Forced lang run in addition to other running tests
if re.search(r'\[ci lang .+\]', last_commit_msg, re.M):
langs = re.findall(r'\[ci lang (.+)\]', last_commit_msg, re.M)[0].strip().split(' ')
for test in test_dirs:
for lang in langs:
if test.startswith(lang + "/"):
print("{!s} has been forced to run from the commit message.".format(test))
run_tests.append(test)
# Ignore travis and docker directory changes
# Also for now, ignore the old linux setup folders, as we don't want to
# trigger a full run as we remove old fw_depends scripts. [ci run-all] will
# still work if it's needed.
if re.search(r'^toolset\/(?!(travis\/|continuous\/|scaffolding\/))|^tfb|^Dockerfile', changes, re.M) is not None:
print("Found changes to core toolset. Running all tests.")
run_tests = test_dirs
quit_diffing()
for test in test_dirs:
if fw_found_in_changes(test, changes):
print("Found changes that affect {!s}".format(test))
run_tests.append(test)
quit_diffing()
|
[] |
[] |
[
"TESTLANG",
"TRAVIS_PULL_REQUEST",
"TESTDIR"
] |
[]
|
["TESTLANG", "TRAVIS_PULL_REQUEST", "TESTDIR"]
|
python
| 3 | 0 | |
internal/db/pool.go
|
package db
import (
"context"
"fmt"
"os"
"github.com/jackc/pgx/v4/pgxpool"
)
var pool *pgxpool.Pool
func Init(ctx context.Context) (func(), error) {
err := initPool(ctx, os.Getenv("DATABASE_URL"))
if err != nil {
return nil, err
}
fn := func() {
pool.Close()
}
return fn, nil
}
func initPool(ctx context.Context, url string) error {
p, err := pgxpool.Connect(ctx, url)
if err != nil {
return fmt.Errorf("could not connect to database: %w", err)
}
pool = p
return nil
}
|
[
"\"DATABASE_URL\""
] |
[] |
[
"DATABASE_URL"
] |
[]
|
["DATABASE_URL"]
|
go
| 1 | 0 | |
examples/get_feature/main.go
|
package main
import (
"context"
"fmt"
"log"
"os"
"github.com/grokify/mogo/config"
"github.com/grokify/mogo/fmt/fmtutil"
"github.com/grokify/mogo/io/ioutilmore"
"github.com/jessevdk/go-flags"
"github.com/grokify/go-aha/ahautil"
)
type Options struct {
EnvFile string `short:"e" long:"env" description:"Env filepath"`
FeatureId string `short:"f" long:"feature" description:"Feature" required:"true"`
}
func main() {
opts := Options{}
_, err := flags.Parse(&opts)
if err != nil {
log.Fatal(err)
}
err = config.LoadDotEnvSkipEmpty(opts.EnvFile, os.Getenv("ENV_PATH"))
if err != nil {
log.Fatal(err)
}
fmt.Println(os.Getenv("AHA_ACCOUNT"))
fmt.Println(os.Getenv("AHA_API_KEY"))
apis := ahautil.NewClientAPIs(os.Getenv("AHA_ACCOUNT"), os.Getenv("AHA_API_KEY"))
info, resp, err := apis.APIClient.FeaturesApi.GetFeature(
context.Background(),
opts.FeatureId)
if err != nil {
log.Fatal(err)
}
if resp.StatusCode >= 300 {
log.Fatal(fmt.Sprintf("Status Code [%v]\n", resp.StatusCode))
}
fmtutil.PrintJSON(info)
fmt.Println("DONE")
}
func WriteFile(fileName string, data interface{}) {
err := ioutilmore.WriteFileJSON(fileName, data, 0644, "", " ")
if err != nil {
log.Fatal(err)
}
fmt.Printf("WROTE %v\n", fileName)
}
|
[
"\"ENV_PATH\"",
"\"AHA_ACCOUNT\"",
"\"AHA_API_KEY\"",
"\"AHA_ACCOUNT\"",
"\"AHA_API_KEY\""
] |
[] |
[
"ENV_PATH",
"AHA_API_KEY",
"AHA_ACCOUNT"
] |
[]
|
["ENV_PATH", "AHA_API_KEY", "AHA_ACCOUNT"]
|
go
| 3 | 0 | |
cli-infra/httputils/httputils.go
|
package httputils
import (
"fmt"
"io"
"net/http"
"os"
)
var (
E2EApiToken = os.Getenv("E2E_API_TOKEN")
E2EApiKey = os.Getenv("E2E_API_KEY")
E2EApiBaseUrl = os.Getenv("E2E_API_BASE_URL")
)
// ExecuteHttpRequest executes and HTTP request and returns the response bytes to the caller. It is then upto the caller
// to decide what to do with those bytes
func ExecuteHttpRequest(httpMethod string, apiEndpoint string, queryParam map[string]string, reqBody io.Reader) ([]byte, error) {
fullUrl := fmt.Sprintf("%s/%s?apikey=%s", E2EApiBaseUrl, apiEndpoint, E2EApiKey)
queryString := ""
for k, v := range queryParam {
queryString += fmt.Sprintf("&%s=%s", k, v)
}
fullUrl = fullUrl + queryString
newRequest, err := http.NewRequest(httpMethod, fullUrl, reqBody)
if err != nil {
fmt.Printf("An error occured while trying to process this %s request to %s. Details: %+v", httpMethod, fullUrl, err)
return nil, err
}
newRequest.Header.Add("Authorization", fmt.Sprintf("Bearer %s", E2EApiToken))
if reqBody != nil {
newRequest.Header.Add("Content-Type", "application/json")
}
client := http.Client{}
res, err := client.Do(newRequest)
if err != nil || res.StatusCode >= 400 {
errMsg := fmt.Sprintf("an error occured while trying to execute an HTTP %s request to %s/%s, status code: %s.", httpMethod, E2EApiBaseUrl, apiEndpoint, res.Status)
if err != nil {
errMsg = errMsg + fmt.Sprintf("Details: %+v", err)
}
return nil, fmt.Errorf(errMsg)
}
var resBytes []byte
defer res.Body.Close()
resBytes, err = io.ReadAll(res.Body)
if err != nil {
fmt.Printf("An error occured while reading response body. %+v", err)
return nil, err
}
return resBytes, nil
}
|
[
"\"E2E_API_TOKEN\"",
"\"E2E_API_KEY\"",
"\"E2E_API_BASE_URL\""
] |
[] |
[
"E2E_API_BASE_URL",
"E2E_API_TOKEN",
"E2E_API_KEY"
] |
[]
|
["E2E_API_BASE_URL", "E2E_API_TOKEN", "E2E_API_KEY"]
|
go
| 3 | 0 | |
src/jeffchenseo/settings.py
|
"""
Django settings for jeffchenseo project.
Generated by 'django-admin startproject' using Django 1.11.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
import environ
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
ROOT_DIR = environ.Path(__file__) - 2
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Setting up environment variables
env = environ.Env()
environ.Env.read_env(env_file=ROOT_DIR('.env'))
# .env file, should load only in development environment
READ_DOT_ENV_FILE = env.bool('DJANGO_READ_DOT_ENV_FILE', default=False)
if READ_DOT_ENV_FILE:
# Operating System Environment variables have precedence over variables defined in the .env file,
# that is to say variables from the .env files will only be used if not defined
# as environment variables.
env_file = str(ROOT_DIR.path('.env'))
print('Loading : {}'.format(env_file))
env.read_env(env_file)
print('The .env file has been loaded. See base.py for more information')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env('DJANGO_SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ['jeffchenseo-dev.us-west-2.elasticbeanstalk.com',
'127.0.0.1',
'localhost',
'.jeffchenseo.com']
# Application definition
DJANGO_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sitemaps', # Add in ability to add sitemaps
'django.contrib.sites' # Sites framework for django
]
THIRD_PARTY_APPS = [
'storages', # App for S3
'bootstrap4', # Bootstrap plugin
'debug_toolbar', # Debugging tool
'django_elasticsearch_dsl', # Elasticsearch plugin
'crispy_forms', # Makes forms look cool
'send_email', # Sends emails
'raven.contrib.django.raven_compat', # Error reporting
'export_action' # Export data to CSVs
]
LOCAL_APPS = [
'jeffchenseo',
'votes'
]
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
'htmlmin.middleware.HtmlMinifyMiddleware',
'htmlmin.middleware.MarkRequestMiddleware',
]
ROOT_URLCONF = 'jeffchenseo.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'jeffchenseo.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
if 'RDS_DB_NAME' in os.environ:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': os.environ['RDS_DB_NAME'],
'USER': os.environ['RDS_USERNAME'],
'PASSWORD': os.environ['RDS_PASSWORD'],
'HOST': os.environ['RDS_HOSTNAME'],
'PORT': os.environ['RDS_PORT'],
}
}
else:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'jeffchenseo',
'USER': 'jeffchen',
'PASSWORD': '',
'HOST': 'localhost',
'PORT': '5432',
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'US/Pacific'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images) All Custom Below
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, "..", "www", "static")
MEDIA_ROOT = os.path.join(BASE_DIR, "..", "www", "media")
# AWS Connections
AWS_ACCESS_KEY_ID = env('AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = env('AWS_SECRET_ACCESS_KEY')
AWS_STORAGE_BUCKET_NAME = 'elasticbeanstalk-us-west-2-928248191884'
AWS_S3_CUSTOM_DOMAIN = '%s.s3.amazonaws.com' % AWS_STORAGE_BUCKET_NAME
AWS_S3_OBJECT_PARAMETERS = {
'CacheControl': 'max-age=86400',
}
AWS_LOCATION = 'static'
STATICFILES_LOCATION = 'static'
STATICFILES_STORAGE = 'jeffchenseo.storage_backends.StaticStorage'
STATIC_URL = '/static/'
DEFAULT_FILE_STORAGE = 'jeffchenseo.storage_backends.MediaStorage'
MEDIAFILES_LOCATION = 'media'
MEDIA_URL = '/media/'
AWS_HEADERS = { # see http://developer.yahoo.com/performance/rules.html#expires
'Expires': 'Thu, 31 Dec 2099 20:00:00 GMT',
'Cache-Control': 'max-age=94608000',
}
SITE_ID = 1
# Debug Toolbar Settings
INTERNAL_IPS = [
'127.0.0.1',
]
# Elasticsearch
ELASTICSEARCH_DSL = {
'default': {
'hosts': 'https://search-jeffchenseo-fsfpr6cxqth4fcp3ujkxk3ljyq.us-west-2.es.amazonaws.com'
},
}
# Send Email
EMAIL_BACKEND = 'django_mailgun.MailgunBackend'
MAILGUN_ACCESS_KEY = env('MAILGUN_ACCESS_KEY')
MAILGUN_SERVER_NAME = 'email.jeffchenseo.com'
# Crispy Forms
CRISPY_TEMPLATE_PACK = 'bootstrap3'
# Sentry
RAVEN_CONFIG = {
'dsn': 'https://9f31b4c4a93c41adbe1f600eae839740:[email protected]/238311',
}
# This is your actual Project ID and Write Key
import keen
keen.project_id = env('KEEN_PROJECT_ID')
keen.write_key = env('KEEN_WRITE_KEY')
# AWS Signing
import sys, hashlib, hmac
# Key derivation functions. See:
# http://docs.aws.amazon.com/general/latest/gr/signature-v4-examples.html#signature-v4-examples-python
def sign(key, msg):
return hmac.new(key, msg.encode("utf-8"), hashlib.sha256).digest()
def getSignatureKey(key, date_stamp, regionName, serviceName):
kDate = sign(('AWS4' + key).encode('utf-8'), date_stamp)
kRegion = sign(kDate, regionName)
kService = sign(kRegion, serviceName)
kSigning = sign(kService, 'aws4_request')
return kSigning
# Import Local Development Settings
try:
from jeffchenseo.local_settings import *
except ImportError:
pass
|
[] |
[] |
[
"RDS_PASSWORD",
"RDS_DB_NAME",
"RDS_USERNAME",
"RDS_PORT",
"RDS_HOSTNAME"
] |
[]
|
["RDS_PASSWORD", "RDS_DB_NAME", "RDS_USERNAME", "RDS_PORT", "RDS_HOSTNAME"]
|
python
| 5 | 0 | |
I-ViT/train_scnn_pos.py
|
import argparse
import os
from tqdm import tqdm
import time
import datetime
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import tensorboardX
import numpy as np
from load_dataset import load_prcc_dataset_scnn_pos
import vit
import my_transform as T
from vit.utils import (
adjust_learning_rate,
cross_entropy_with_label_smoothing,
accuracy,
save_model,
load_model,
resume_model,
)
best_val_acc = 0.0
best_test_acc = 0.0
def parse_args():
parser = argparse.ArgumentParser(description='Image classification')
parser.add_argument(
'--dataset', default='cifar', help='Dataset names.'
)
parser.add_argument('--local_rank', type=int, default = -1)
parser.add_argument('--num_heads', type=int, default = 12)
parser.add_argument('--hidden_dim', type=int, default = 128)
parser.add_argument('--num_layers', type=int, default = 12)
parser.add_argument('--img_size', type=int, default = 2000)
parser.add_argument('--nuclues_size', type=int, default = 64)
parser.add_argument('--crop_path', default = '../dataset/crops', help='path to crop_nuclues')
parser.add_argument(
'--num_classes',
type=int,
default=2,
help='The number of classes in the dataset.',
)
parser.add_argument(
'--train_dirs',
default='../dataset/trainset.txt',
help='path to training data',
)
parser.add_argument(
'--val_dirs',
default='../dataset/validset.txt',
help='path to validation data',
)
parser.add_argument(
'--test_dirs',
default='../dataset/testset.txt',
help='path to test data',
)
parser.add_argument(
'--num_nuclei',
type=int,
default=500,
help='The max number of nuclei to use.',
)
parser.add_argument(
'--batch_size',
type=int,
default=1,
help='input batch size for training',
)
parser.add_argument(
'--val_batch_size',
type=int,
default=1,
help='input batch size for val',
)
parser.add_argument(
'--num_workers',
type=int,
default=4,
help='input batch size for training',
)
parser.add_argument(
"--color_jitter",
action='store_true',
default=False,
help="To apply color augmentation or not.",
)
parser.add_argument('--model', default='VitsCNN_pos', help='Model names.')
parser.add_argument(
'--epochs', type=int, default=100, help='number of epochs to train'
)
parser.add_argument(
'--test_epochs',
type=int,
default=2,
help='number of internal epochs to test',
)
parser.add_argument(
'--save_epochs',
type=int,
default=1,
help='number of internal epochs to save',
)
parser.add_argument('--optim', default='sgd', help='Model names.')
parser.add_argument('--lr', type=float, default=0.001, help='learning rate')
parser.add_argument(
'--warmup_epochs',
type=float,
default=5,
help='number of warmup epochs',
)
parser.add_argument(
'--momentum', type=float, default=0.9, help='SGD momentum'
)
parser.add_argument(
'--weight_decay', type=float, default=0.00008, help='weight decay'
)
parser.add_argument(
"--label_smoothing",
action='store_true',
default=False,
help="To use label smoothing or not.",
)
parser.add_argument(
'--nesterov',
action='store_true',
default=False,
help='To use nesterov or not.',
)
parser.add_argument(
'--work_dirs', default='./work_dirs', help='path to work dirs'
)
parser.add_argument(
'--name', default='scnn_newdata_N2000_h64_L1_head2', help='the name of work_dir'
)
parser.add_argument(
'--no_cuda',
action='store_true',
default=False,
help='disables CUDA training',
)
parser.add_argument(
'--lr_scheduler',
type=str,
default="linear",
choices=["linear", "cosine"],
help='how to schedule learning rate',
)
parser.add_argument(
'--test_model', type=int, default=-1, help="Test model's epochs"
)
parser.add_argument(
'--resume', action='store_true', default=False, help='Resume training'
)
parser.add_argument(
'--gpu_id', default='4', type=str, help='id(s) for CUDA_VISIBLE_DEVICES'
)
args = parser.parse_args()
if not os.path.exists(args.work_dirs):
os.system('mkdir -p {}'.format(args.work_dirs))
args.log_dir = os.path.join(args.work_dirs, 'log')
if not os.path.exists(args.log_dir):
os.system('mkdir -p {}'.format(args.log_dir))
args.log_dir = os.path.join(args.log_dir, args.name)
args.work_dirs = os.path.join(args.work_dirs, args.name)
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id
return args
def get_transform(train):
transforms = []
if train:
transforms.append(T.RandomHorizontalFlip(0.5))
transforms.append(T.RandomRotation((-5,5)))
transforms.append(T.ToTensor2())
transforms.append(T.Normalize(mean=[0.681, 0.486, 0.630], std=[0.213, 0.256, 0.196]))
return T.Compose(transforms)
def build_dataset_scnn(train_files, val_files, test_files, N, nuclues_size,crop_path):
train_data = load_prcc_dataset_scnn_pos(train_files, transform = get_transform, train = True, N = N, nuclues_size = nuclues_size, crop_path = crop_path)
train_loader = torch.utils.data.DataLoader(
train_data, batch_size=args.batch_size, shuffle=True,
num_workers=args.num_workers, pin_memory=True)
val_data = load_prcc_dataset_scnn_pos(val_files, transform = get_transform, train = False, N = N, nuclues_size = nuclues_size, crop_path = crop_path)
val_loader = torch.utils.data.DataLoader(
val_data, batch_size=args.val_batch_size, shuffle=False,
num_workers=args.num_workers, pin_memory=True)
test_data = load_prcc_dataset_scnn_pos(test_files, transform = get_transform, train = False, N = N, nuclues_size = nuclues_size, crop_path = crop_path)
test_loader = torch.utils.data.DataLoader(
test_data, batch_size=args.val_batch_size, shuffle=False,
num_workers=args.num_workers, pin_memory=True)
return train_loader, val_loader, test_loader
def val(model, val_loader, criterion, epoch, args, log_writer=False):
global best_val_acc
model.eval()
val_loss = vit.Metric('val_loss')
val_accuracy = vit.Metric('val_accuracy')
if epoch == -1:
epoch = args.epochs - 1
with tqdm(
total=len(val_loader), desc='Validate Epoch #{}'.format(epoch + 1)
) as t:
with torch.no_grad():
for data, cls,pos, target in val_loader:
if args.cuda:
data, cls,pos, target = data.cuda(), cls.cuda(),pos.cuda(), target.cuda()
output = model(data, cls,pos)
val_loss.update(criterion(output, target))
val_accuracy.update(accuracy(output, target))
t.update(1)
print(
"\nloss: {}, accuracy: {:.2f}, best acc: {:.2f}\n".format(
val_loss.avg.item(),
100.0 * val_accuracy.avg.item(),
100.0 * max(best_val_acc, val_accuracy.avg),
)
)
if val_accuracy.avg > best_val_acc and log_writer:
save_model(model, None, -1, args, None)
if log_writer:
log_writer.add_scalar('val/loss', val_loss.avg, epoch)
log_writer.add_scalar('val/accuracy', val_accuracy.avg, epoch)
best_val_acc = max(best_val_acc, val_accuracy.avg)
log_writer.add_scalar('val/best_acc', best_val_acc, epoch)
def test(model, test_loader, criterion, epoch, args, log_writer=False):
global best_test_acc
model.eval()
val_loss = vit.Metric('test_loss')
val_accuracy = vit.Metric('test_accuracy')
if epoch == -1:
epoch = args.epochs - 1
with tqdm(
total=len(test_loader), desc='Test Epoch #{}'.format(epoch + 1)
) as t:
with torch.no_grad():
for data, cls,pos, target in test_loader:
if args.cuda:
data, cls,pos, target = data.cuda(), cls.cuda(),pos.cuda(), target.cuda()
output = model(data, cls,pos)
val_loss.update(criterion(output, target))
val_accuracy.update(accuracy(output, target))
t.update(1)
print(
"\nloss: {}, accuracy: {:.2f}, best acc: {:.2f}\n".format(
val_loss.avg.item(),
100.0 * val_accuracy.avg.item(),
100.0 * max(best_test_acc, val_accuracy.avg),
)
)
# if val_accuracy.avg > best_test_acc and log_writer:
# save_model(model, None, -1, args, None)
if log_writer:
log_writer.add_scalar('test/loss', val_loss.avg, epoch)
log_writer.add_scalar('test/accuracy', val_accuracy.avg, epoch)
best_test_acc = max(best_test_acc, val_accuracy.avg)
log_writer.add_scalar('test/best_acc', best_test_acc, epoch)
def train(model, train_loader, optimizer, criterion, epoch, log_writer, args):
train_loss = vit.Metric('train_loss')
train_accuracy = vit.Metric('train_accuracy')
model.train()
N = len(train_loader)
start_time = time.time()
for batch_idx, (data, cls, pos, target) in enumerate(train_loader):
lr_cur = adjust_learning_rate(
args, optimizer, epoch, batch_idx, N, type=args.lr_scheduler
)
if args.cuda:
data, cls, pos, target = data.cuda(), cls.cuda(),pos.cuda(), target.cuda()
optimizer.zero_grad()
output = model(data, cls, pos)
loss = criterion(output, target)
loss.backward()
optimizer.step()
train_loss.update(loss)
train_accuracy.update(accuracy(output, target))
if (batch_idx + 1) % 20 == 0:
memory = torch.cuda.max_memory_allocated() / 1024.0 / 1024.0
used_time = time.time() - start_time
eta = used_time / (batch_idx + 1) * (N - batch_idx)
eta = str(datetime.timedelta(seconds=int(eta)))
training_state = ' '.join(
[
'Epoch: {}',
'[{} / {}]',
'eta: {}',
'lr: {:.9f}',
'max_mem: {:.0f}',
'loss: {:.3f}',
'accuracy: {:.3f}',
]
)
training_state = training_state.format(
epoch + 1,
batch_idx + 1,
N,
eta,
lr_cur,
memory,
train_loss.avg.item(),
100.0 * train_accuracy.avg.item(),
)
print(training_state)
if log_writer:
log_writer.add_scalar('train/loss', train_loss.avg, epoch)
log_writer.add_scalar('train/accuracy', train_accuracy.avg, epoch)
def test_net(args):
print("Init...")
_, _, val_loader, _ = vit.build_dataloader(args)
model = vit.build_model(args)
load_model(model, args)
args.cuda = not args.no_cuda and torch.cuda.is_available()
if args.cuda:
model.cuda()
if args.label_smoothing:
criterion = cross_entropy_with_label_smoothing
else:
criterion = nn.CrossEntropyLoss()
print("Start testing...")
val(model, val_loader, criterion, args.test_model, args)
def train_net(args):
print("Init...")
log_writer = tensorboardX.SummaryWriter(args.log_dir)
#train_loader, _, val_loader, _ = vit.build_dataloader(args)
print('Number of Nuclei for each Patch={}'.format(args.num_nuclei))
print('Training Set: %s. \n Valid Set: %s' % (args.train_dirs, args.val_dirs))
train_loader, val_loader, test_loader= build_dataset_scnn(args.train_dirs, args.val_dirs, args.test_dirs, args.num_nuclei,args.nuclues_size,args.crop_path)
model = vit.build_model(args)
print('Parameters:', sum([np.prod(p.size()) for p in model.parameters()]))
model = torch.nn.DataParallel(model)
optimizer = vit.build_optimizer(args, model)
best =0.0
epoch = 0
if args.resume:
epoch = resume_model(model, optimizer, args)
args.cuda = not args.no_cuda and torch.cuda.is_available()
cudnn.benchmark = True
if args.label_smoothing:
criterion = cross_entropy_with_label_smoothing
else:
criterion = nn.CrossEntropyLoss()
if args.cuda:
model.cuda()
print("Start training...")
while epoch < args.epochs:
train(
model, train_loader, optimizer, criterion, epoch, log_writer, args
)
if (epoch + 1) % args.test_epochs == 0:
val(model, val_loader, criterion, epoch, args, log_writer)
test(model, test_loader, criterion, epoch, args, log_writer)
if (epoch + 1) % args.save_epochs == 0 :
save_model(model, optimizer, epoch, args, 0)
epoch += 1
save_model(model, optimizer, epoch - 1, args, 0)
if __name__ == "__main__":
args = parse_args()
print(args)
train_net(args)
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_VISIBLE_DEVICES"]
|
python
| 1 | 0 | |
backend/tst_daniel_ls_dev_23746/settings.py
|
"""
Django settings for tst_daniel_ls_dev_23746 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import io
import environ
import logging
import google.auth
from google.cloud import secretmanager
from google.auth.exceptions import DefaultCredentialsError
from google.api_core.exceptions import PermissionDenied
from modules.manifest import get_modules
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
env_file = os.path.join(BASE_DIR, ".env")
env = environ.Env()
env.read_env(env_file)
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
try:
# Pull secrets from Secret Manager
_, project = google.auth.default()
client = secretmanager.SecretManagerServiceClient()
settings_name = os.environ.get("SETTINGS_NAME", "django_settings")
name = client.secret_version_path(project, settings_name, "latest")
payload = client.access_secret_version(name=name).payload.data.decode("UTF-8")
env.read_env(io.StringIO(payload))
except (DefaultCredentialsError, PermissionDenied):
pass
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
LOCAL_APPS = [
'home',
'users.apps.UsersConfig',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'rest_auth.registration',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
'django_extensions',
'drf_yasg',
'storages',
]
MODULES_APPS = get_modules()
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS + MODULES_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'tst_daniel_ls_dev_23746.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'web_build')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'tst_daniel_ls_dev_23746.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {
'default': env.db()
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MIDDLEWARE += ['whitenoise.middleware.WhiteNoiseMiddleware']
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static'), os.path.join(BASE_DIR, 'web_build/static')]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "optional"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# AWS S3 config
AWS_ACCESS_KEY_ID = env.str("AWS_ACCESS_KEY_ID", "")
AWS_SECRET_ACCESS_KEY = env.str("AWS_SECRET_ACCESS_KEY", "")
AWS_STORAGE_BUCKET_NAME = env.str("AWS_STORAGE_BUCKET_NAME", "")
AWS_STORAGE_REGION = env.str("AWS_STORAGE_REGION", "")
USE_S3 = (
AWS_ACCESS_KEY_ID and
AWS_SECRET_ACCESS_KEY and
AWS_STORAGE_BUCKET_NAME and
AWS_STORAGE_REGION
)
if USE_S3:
AWS_S3_CUSTOM_DOMAIN = env.str("AWS_S3_CUSTOM_DOMAIN", "")
AWS_S3_OBJECT_PARAMETERS = {"CacheControl": "max-age=86400"}
AWS_DEFAULT_ACL = env.str("AWS_DEFAULT_ACL", "public-read")
AWS_MEDIA_LOCATION = env.str("AWS_MEDIA_LOCATION", "media")
AWS_AUTO_CREATE_BUCKET = env.bool("AWS_AUTO_CREATE_BUCKET", True)
DEFAULT_FILE_STORAGE = env.str(
"DEFAULT_FILE_STORAGE", "home.storage_backends.MediaStorage"
)
MEDIA_URL = '/mediafiles/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'mediafiles')
# Swagger settings for api docs
SWAGGER_SETTINGS = {
"DEFAULT_INFO": f"{ROOT_URLCONF}.api_info",
}
if DEBUG or not (EMAIL_HOST_USER and EMAIL_HOST_PASSWORD):
# output email to console instead of sending
if not DEBUG:
logging.warning("You should setup `SENDGRID_USERNAME` and `SENDGRID_PASSWORD` env vars to send emails.")
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
# GCP config
GS_BUCKET_NAME = env.str("GS_BUCKET_NAME", "")
if GS_BUCKET_NAME:
DEFAULT_FILE_STORAGE = "storages.backends.gcloud.GoogleCloudStorage"
STATICFILES_STORAGE = "storages.backends.gcloud.GoogleCloudStorage"
GS_DEFAULT_ACL = "publicRead"
|
[] |
[] |
[
"SETTINGS_NAME"
] |
[]
|
["SETTINGS_NAME"]
|
python
| 1 | 0 | |
research/cv/IRN/src/data/__init__.py
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
Helpers for creating SRDataset.
"""
import os
from mindspore.communication.management import init, get_rank, get_group_size
import mindspore.dataset as ds
from .dataset import SRDataset
class Sampler():
"""Sampler distributes samples to workers evenly."""
def __init__(self, num_data, rank_id, rank_size):
self._num_data = num_data
self._rank_id = rank_id
self._rank_size = rank_size
self._samples = int(num_data / rank_size)
self._total_samples = self._samples * rank_size
def __iter__(self):
begin = self._rank_id * self._samples
end = begin + self._samples
indices = range(begin, end)
return iter(indices)
def __len__(self):
return self._samples
def _get_rank_info():
"""Get rank size and rank id."""
rank_size = int(os.environ.get("RANK_SIZE", 1))
if rank_size > 1:
rank_size = get_group_size()
rank_id = get_rank()
else:
rank_size = 1
rank_id = 0
return rank_size, rank_id
def create_dataset(dataset_path, scale, do_train=True, repeat_num=1,
batch_size=8, target="Ascend", distribute=False):
"""
Create an SRDataset for training or testing.
Args:
dataset_path (string): Path to the dataset.
scale (int): downscaling ratio.
do_train (bool): Whether dataset is used for training or testing.
repeat_num (int): Repeat times of the dataset.
batch_size (int): Batch size of the dataset.
target (str): Device target.
distribute (bool): For distributed training or not.
Returns:
dataset
"""
paths = []
for p, _, fs in sorted(os.walk(dataset_path)):
for f in sorted(fs):
if f.endswith(".png"):
paths.append(os.path.join(p, f))
assert paths, "no png images found"
sr_ds = SRDataset(paths, scale=scale, training=do_train)
if target == "Ascend":
rank_size, rank_id = _get_rank_info()
else:
if distribute:
init()
rank_id = get_rank()
rank_size = get_group_size()
else:
rank_size = 1
num_shards = None if rank_size == 1 else rank_size
shard_id = None if rank_size == 1 else rank_id
if do_train:
dataset = ds.GeneratorDataset(
sr_ds, ["downscaled", "original"],
num_parallel_workers=1, shuffle=True,
sampler=Sampler(len(sr_ds), rank_id, rank_size),
num_shards=num_shards, shard_id=shard_id,
)
else:
dataset = ds.GeneratorDataset(
sr_ds, ["downscaled", "original"],
num_parallel_workers=1, shuffle=False
)
dataset = dataset.batch(batch_size, drop_remainder=True)
dataset = dataset.repeat(repeat_num)
return dataset
|
[] |
[] |
[
"RANK_SIZE"
] |
[]
|
["RANK_SIZE"]
|
python
| 1 | 0 | |
fairseq/data/iterators.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import itertools
import logging
import math
import operator
import os
import queue
import time
from threading import Thread
import numpy as np
import torch
from fairseq.data import data_utils
logger = logging.getLogger(__name__)
# Object used by _background_consumer to signal the source is exhausted
# to the main thread.
_sentinel = object()
class CountingIterator(object):
"""Wrapper around an iterable that maintains the iteration count.
Args:
iterable (iterable): iterable to wrap
start (int): starting iteration count. Note that this doesn't
actually advance the iterator.
total (int): override the iterator length returned by
``__len__``. This can be used to truncate *iterator*.
Attributes:
n (int): number of elements consumed from this iterator
"""
def __init__(self, iterable, start=None, total=None):
self.iterable = iterable
self.itr = iter(self)
if start is None:
self.n = getattr(iterable, 'n', 0)
else:
self.n = start
if total is None:
self.total = self.n + len(iterable)
else:
self.total = total
def __len__(self):
return self.total
def __iter__(self):
for x in self.iterable:
if self.n >= self.total:
raise RuntimeError(
'Mismatch between actual and expected iterable length. '
'Please report this to the fairseq developers.'
)
self.n += 1
yield x
def __next__(self):
return next(self.itr)
def has_next(self):
"""Whether the iterator has been exhausted."""
return self.n < len(self)
def skip(self, num_to_skip):
"""Fast-forward the iterator by skipping *num_to_skip* elements."""
next(itertools.islice(self.itr, num_to_skip, num_to_skip), None)
return self
def take(self, n):
"""
Truncates the iterator to n elements at most.
"""
self.total = min(self.total, n)
# Propagate this change to the underlying iterator
# Only take after what we have already consumed (i.e. after restarting
# from checkpoint mid epoch, we have to subtract self.n which is the
# starting point)
#
# This to maintain the invariant self.total = self.n + len(iterable),
# before calling __next__ or __iter__
propagated_take = max(n - self.n, 0)
if hasattr(self.iterable, "take"):
self.iterable.take(propagated_take)
else:
self.iterable = itertools.islice(self.iterable, propagated_take)
class EpochBatchIterating(object):
def __len__(self) -> int:
raise NotImplementedError
@property
def next_epoch_idx(self):
raise NotImplementedError
def next_epoch_itr(self, shuffle=True, fix_batches_to_gpus=False):
"""Return a new iterator over the dataset.
Args:
shuffle (bool, optional): shuffle batches before returning the
iterator (default: True).
fix_batches_to_gpus: ensure that batches are always
allocated to the same shards across epochs. Requires
that :attr:`dataset` supports prefetching (default: False).
"""
raise NotImplementedError
def end_of_epoch(self) -> bool:
"""Returns whether the most recent epoch iterator has been exhausted"""
raise NotImplementedError
@property
def iterations_in_epoch(self) -> int:
"""The number of consumed batches in the current epoch."""
raise NotImplementedError
def state_dict(self):
"""Returns a dictionary containing a whole state of the iterator."""
raise NotImplementedError
def load_state_dict(self, state_dict):
"""Copies the state of the iterator from the given *state_dict*."""
raise NotImplementedError
class StreamingEpochBatchIterator(EpochBatchIterating):
def __init__(
self, dataset, epoch=1, num_shards=1, shard_id=0,
):
assert isinstance(dataset, torch.utils.data.IterableDataset)
self.dataset = dataset
self.epoch = max(epoch, 1) # we use 1-based indexing for epochs
self._current_epoch_iterator = None
self.num_shards = num_shards
self.shard_id = shard_id
@property
def next_epoch_idx(self):
"""Return the epoch index after *next_epoch_itr* is called."""
if self._current_epoch_iterator is not None and self.end_of_epoch():
return self.epoch + 1
else:
return self.epoch
def next_epoch_itr(self, shuffle=True, fix_batches_to_gpus=False):
self.epoch = self.next_epoch_idx
self.dataset.set_epoch(self.epoch)
self._current_epoch_iterator = CountingIterator(
iterable=ShardedIterator(
iterable=self.dataset,
num_shards=self.num_shards,
shard_id=self.shard_id,
),
)
return self._current_epoch_iterator
def end_of_epoch(self) -> bool:
return not self._current_epoch_iterator.has_next()
@property
def iterations_in_epoch(self) -> int:
if self._current_epoch_iterator is not None:
return self._current_epoch_iterator.n
return 0
def state_dict(self):
return {
'epoch': self.epoch,
}
def load_state_dict(self, state_dict):
self.epoch = state_dict['epoch']
class EpochBatchIterator(EpochBatchIterating):
"""A multi-epoch iterator over a :class:`torch.utils.data.Dataset`.
Compared to :class:`torch.utils.data.DataLoader`, this iterator:
- can be reused across multiple epochs with the :func:`next_epoch_itr`
method (optionally shuffled between epochs)
- can be serialized/deserialized with the :func:`state_dict` and
:func:`load_state_dict` methods
- supports sharding with the *num_shards* and *shard_id* arguments
Args:
dataset (~torch.utils.data.Dataset): dataset from which to load the data
collate_fn (callable): merges a list of samples to form a mini-batch
batch_sampler (~torch.utils.data.Sampler or a callable): an iterator over batches of
indices, or a callable to create such an iterator (~torch.utils.data.Sampler).
A callable batch_sampler will be called for each epoch to enable per epoch dynamic
batch iterators defined by this callable batch_sampler.
seed (int, optional): seed for random number generator for
reproducibility (default: 1).
num_shards (int, optional): shard the data iterator into N
shards (default: 1).
shard_id (int, optional): which shard of the data iterator to
return (default: 0).
num_workers (int, optional): how many subprocesses to use for data
loading. 0 means the data will be loaded in the main process
(default: 0).
epoch (int, optional): the epoch to start the iterator from
(default: 1).
buffer_size (int, optional): the number of batches to keep ready in the
queue. Helps speeding up dataloading. When buffer_size is zero, the
default torch.utils.data.DataLoader preloading is used.
timeout (int, optional): if positive, the timeout value for collecting a batch
from workers. Should always be non-negative. (default: ``0``)
"""
def __init__(
self, dataset, collate_fn, batch_sampler, seed=1, num_shards=1, shard_id=0,
num_workers=0, epoch=1, buffer_size=0, timeout=0,
):
assert isinstance(dataset, torch.utils.data.Dataset)
self.dataset = dataset
self.collate_fn = collate_fn
self.batch_sampler = batch_sampler
self._frozen_batches = tuple(batch_sampler) if not callable(batch_sampler) else None
self.seed = seed
self.num_shards = num_shards
self.shard_id = shard_id
self.num_workers = num_workers
# This upper limit here is to prevent people from abusing this feature
# in a shared computing environment.
self.buffer_size = min(buffer_size, 20)
self.timeout = timeout
self.epoch = max(epoch, 1) # we use 1-based indexing for epochs
self.shuffle = True
self._cur_epoch_itr = None
self._next_epoch_itr = None
self._supports_prefetch = getattr(dataset, 'supports_prefetch', False)
@property
def frozen_batches(self):
if self._frozen_batches is None:
self._frozen_batches = tuple(self.batch_sampler(self.dataset, self.epoch))
return self._frozen_batches
def __len__(self):
return int(math.ceil(len(self.frozen_batches) / float(self.num_shards)))
@property
def n(self):
return self.iterations_in_epoch
@property
def next_epoch_idx(self):
"""Return the epoch index after *next_epoch_itr* is called."""
if self._next_epoch_itr is not None:
return self.epoch
elif self._cur_epoch_itr is not None and self.end_of_epoch():
return self.epoch + 1
else:
return self.epoch
def next_epoch_itr(self, shuffle=True, fix_batches_to_gpus=False):
"""Return a new iterator over the dataset.
Args:
shuffle (bool, optional): shuffle batches before returning the
iterator (default: True).
fix_batches_to_gpus: ensure that batches are always
allocated to the same shards across epochs. Requires
that :attr:`dataset` supports prefetching (default: False).
"""
self.epoch = self.next_epoch_idx
self.dataset.set_epoch(self.epoch)
if self._next_epoch_itr is not None:
self._cur_epoch_itr = self._next_epoch_itr
self._next_epoch_itr = None
else:
if callable(self.batch_sampler):
# reset _frozen_batches to refresh the next epoch
self._frozen_batches = None
self._cur_epoch_itr = self._get_iterator_for_epoch(
self.epoch, shuffle, fix_batches_to_gpus=fix_batches_to_gpus,
)
self.shuffle = shuffle
return self._cur_epoch_itr
def end_of_epoch(self) -> bool:
"""Returns whether the most recent epoch iterator has been exhausted"""
return not self._cur_epoch_itr.has_next()
@property
def iterations_in_epoch(self):
"""The number of consumed batches in the current epoch."""
if self._cur_epoch_itr is not None:
return self._cur_epoch_itr.n
elif self._next_epoch_itr is not None:
return self._next_epoch_itr.n
return 0
def state_dict(self):
"""Returns a dictionary containing a whole state of the iterator."""
if self.end_of_epoch():
epoch = self.epoch + 1
iter_in_epoch = 0
else:
epoch = self.epoch
iter_in_epoch = self.iterations_in_epoch
return {
'version': 2,
'epoch': epoch,
'iterations_in_epoch': iter_in_epoch,
'shuffle': self.shuffle,
}
def load_state_dict(self, state_dict):
"""Copies the state of the iterator from the given *state_dict*."""
self.epoch = state_dict['epoch']
itr_pos = state_dict.get('iterations_in_epoch', 0)
version = state_dict.get('version', 1)
if itr_pos > 0:
# fast-forward epoch iterator
self._next_epoch_itr = self._get_iterator_for_epoch(
self.epoch,
shuffle=state_dict.get('shuffle', True),
offset=itr_pos,
)
if self._next_epoch_itr is None:
if version == 1:
# legacy behavior: we finished the epoch, increment epoch counter
self.epoch += 1
else:
raise RuntimeError(
'Cannot resume training due to dataloader mismatch, please '
'report this to the fairseq developers. You can relaunch '
'training with `--reset-dataloader` and it should work.'
)
else:
self._next_epoch_itr = None
def _get_iterator_for_epoch(self, epoch, shuffle, fix_batches_to_gpus=False, offset=0):
def shuffle_batches(batches, seed):
with data_utils.numpy_seed(seed):
np.random.shuffle(batches)
return batches
if self._supports_prefetch:
batches = self.frozen_batches
if shuffle and not fix_batches_to_gpus:
batches = shuffle_batches(list(batches), self.seed + epoch)
batches = list(ShardedIterator(
batches, self.num_shards, self.shard_id, fill_value=[]
))
self.dataset.prefetch([i for s in batches for i in s])
if shuffle and fix_batches_to_gpus:
batches = shuffle_batches(batches, self.seed + epoch + self.shard_id)
else:
if shuffle:
batches = shuffle_batches(list(self.frozen_batches), self.seed + epoch)
else:
batches = self.frozen_batches
batches = list(ShardedIterator(
batches, self.num_shards, self.shard_id, fill_value=[]
))
if offset > 0 and offset >= len(batches):
return None
if self.num_workers > 0:
os.environ['PYTHONWARNINGS'] = 'ignore:semaphore_tracker:UserWarning'
itr = torch.utils.data.DataLoader(
self.dataset,
collate_fn=self.collate_fn,
batch_sampler=batches[offset:],
num_workers=self.num_workers,
timeout=self.timeout,
)
# Wrap with a BufferedIterator if needed
if self.buffer_size > 0:
itr = BufferedIterator(self.buffer_size, itr)
# Wrap with CountingIterator
itr = CountingIterator(itr, start=offset)
return itr
class GroupedIterator(CountingIterator):
"""Wrapper around an iterable that returns groups (chunks) of items.
Args:
iterable (iterable): iterable to wrap
chunk_size (int): size of each chunk
Attributes:
n (int): number of elements consumed from this iterator
"""
def __init__(self, iterable, chunk_size):
itr = _chunk_iterator(iterable, chunk_size)
super().__init__(
itr,
start=int(math.ceil(getattr(iterable, 'n', 0) / float(chunk_size))),
total=int(math.ceil(len(iterable) / float(chunk_size))),
)
self.chunk_size = chunk_size
def _chunk_iterator(itr, chunk_size):
chunk = []
for x in itr:
chunk.append(x)
if len(chunk) == chunk_size:
yield chunk
chunk = []
if len(chunk) > 0:
yield chunk
class ShardedIterator(CountingIterator):
"""A sharded wrapper around an iterable, padded to length.
Args:
iterable (iterable): iterable to wrap
num_shards (int): number of shards to split the iterable into
shard_id (int): which shard to iterator over
fill_value (Any, optional): padding value when the iterable doesn't
evenly divide *num_shards* (default: None).
Attributes:
n (int): number of elements consumed from this iterator
"""
def __init__(self, iterable, num_shards, shard_id, fill_value=None):
if shard_id < 0 or shard_id >= num_shards:
raise ValueError('shard_id must be between 0 and num_shards')
sharded_len = int(math.ceil(len(iterable) / float(num_shards)))
itr = map(
operator.itemgetter(1),
itertools.zip_longest(
range(sharded_len),
itertools.islice(iterable, shard_id, len(iterable), num_shards),
fillvalue=fill_value,
),
)
super().__init__(
itr,
start=int(math.ceil(getattr(iterable, 'n', 0) / float(num_shards))),
total=sharded_len,
)
class BackgroundConsumer(Thread):
def __init__(self, queue, source, max_len):
Thread.__init__(self)
self._queue = queue
self._source = source
self._max_len = max_len
self.count = 0
def run(self):
try:
for item in self._source:
self._queue.put(item)
# Stop if we reached the maximum length
self.count += 1
if self._max_len is not None and self.count >= self._max_len:
break
# Signal the consumer we are done.
self._queue.put(_sentinel)
except Exception as e:
self._queue.put(e)
class BufferedIterator(object):
def __init__(self, size, iterable):
self._queue = queue.Queue(size)
self._iterable = iterable
self._consumer = None
self.start_time = time.time()
self.warning_time = None
self.total = len(iterable)
def _create_consumer(self):
self._consumer = BackgroundConsumer(
self._queue,
self._iterable,
self.total,
)
self._consumer.daemon = True
self._consumer.start()
def __iter__(self):
return self
def __len__(self):
return self.total
def take(self, n):
self.total = min(self.total, n)
# Propagate this change to the underlying iterator
if hasattr(self._iterable, "take"):
self._iterable.take(n)
else:
self._iterable = itertools.islice(self._iterable, n)
def __next__(self):
# Create consumer if not created yet
if self._consumer is None:
self._create_consumer()
# Notify the user if there is a data loading bottleneck
if self._queue.qsize() < min(2, max(1, self._queue.maxsize // 2)):
if time.time() - self.start_time > 5 * 60:
if self.warning_time is None or time.time() - self.warning_time > 15 * 60:
logger.debug(
"Data loading buffer is empty or nearly empty. This may "
"indicate a data loading bottleneck, and increasing the "
"number of workers (--num-workers) may help."
)
self.warning_time = time.time()
# Get next example
item = self._queue.get(True)
if isinstance(item, Exception):
raise item
if item is _sentinel:
raise StopIteration()
return item
|
[] |
[] |
[
"PYTHONWARNINGS"
] |
[]
|
["PYTHONWARNINGS"]
|
python
| 1 | 0 | |
server/fs_storage_test.go
|
//
// Copyright 2019 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS-IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
//
package storageservice
import (
"context"
"fmt"
"io"
"io/ioutil"
"os"
"path"
"sort"
"testing"
"github.com/google/go-cmp/cmp"
"github.com/google/schedviz/analysis/sched"
"github.com/google/schedviz/server/models"
"github.com/google/schedviz/tracedata/trace"
)
var colRequest = &models.CreateCollectionRequest{
Creator: "bob",
Owners: []string{"joe"},
Tags: []string{"test"},
Description: "test",
CreationTime: 1,
}
var (
ctx = context.Background()
)
// TODO(tracked) Consider using schedtestcommon.TestTrace1(t) here
// as a lighter-weight alternative.
func getTestTarFile(t *testing.T, name string) io.Reader {
t.Helper()
// Bazel stores test location in these environment variables
runFiles := path.Join(os.Getenv("TEST_SRCDIR"), os.Getenv("TEST_WORKSPACE"))
file, err := os.Open(path.Join(runFiles, "server", "testdata", name))
if err != nil {
t.Fatalf("error fetching test tar: %s", err)
}
return file
}
var fh = func(t *testing.T) io.Reader {
t.Helper()
return getTestTarFile(t, "test.tar.gz")
}
func createCollectionDir() (string, error) {
tmpDir, err := ioutil.TempDir("", "collections")
if err != nil {
return "", fmt.Errorf("failed to create temp directory: %s", err)
}
return tmpDir, err
}
func cleanup(t *testing.T, tmpDir string) {
t.Helper()
if err := os.RemoveAll(tmpDir); err != nil {
t.Fatal(err)
}
}
func createFSStorage(t *testing.T, path string, count int) StorageService {
ss, err := CreateFSStorage(path, count)
if err != nil {
t.Fatalf("Failed to create storage service: %s", err)
}
return ss
}
func TestFsStorage_UploadFile(t *testing.T) {
tmpDir, err := createCollectionDir()
if err != nil {
t.Fatal(err)
}
defer cleanup(t, tmpDir)
fsStorage := createFSStorage(t, tmpDir, 1)
tests := []struct {
file io.Reader
wantNumEvents int
wantStart trace.Timestamp
wantEnd trace.Timestamp
wantSystemTopology models.SystemTopology
}{
{
file: getTestTarFile(t, "test.tar.gz"),
wantNumEvents: 28922,
wantStart: 0,
wantEnd: 2009150555,
wantSystemTopology: models.SystemTopology{
LogicalCores: []models.LogicalCore{{
SocketID: 0,
DieID: 0,
ThreadID: 0,
NumaNodeID: 0,
CPUID: 0,
CoreID: 0,
}},
},
},
{
file: getTestTarFile(t, "test_no_metadata.tar.gz"),
wantNumEvents: 28922,
wantStart: 0,
wantEnd: 2009150555,
wantSystemTopology: models.SystemTopology{
LogicalCores: []models.LogicalCore{{
SocketID: 0,
DieID: 0,
ThreadID: 0,
NumaNodeID: 0,
CPUID: 0,
CoreID: 0,
}},
},
},
{
file: getTestTarFile(t, "ebpf_trace.tar.gz"),
wantNumEvents: 991,
wantStart: 0,
wantEnd: 12321353,
wantSystemTopology: models.SystemTopology{
LogicalCores: []models.LogicalCore{
{
SocketID: 0,
DieID: 0,
ThreadID: 2,
NumaNodeID: 0,
CPUID: 0,
CoreID: 0,
},
{
SocketID: 0,
DieID: 0,
ThreadID: 2,
NumaNodeID: 0,
CPUID: 1,
CoreID: 1,
},
{
SocketID: 0,
DieID: 0,
ThreadID: 1,
NumaNodeID: 0,
CPUID: 2,
CoreID: 0,
},
{
SocketID: 0,
DieID: 0,
ThreadID: 1,
NumaNodeID: 0,
CPUID: 3,
CoreID: 1,
},
},
},
},
}
for _, test := range tests {
collectionName, err := fsStorage.UploadFile(ctx, colRequest, test.file)
if err != nil {
t.Fatalf("unexpected error thrown by FsStorage::UploadFile: %s", err)
}
cachedValue, err := fsStorage.GetCollection(ctx, collectionName)
if err != nil {
t.Fatalf("unexpected error thrown by FsStorage::GetCollection: %s", err)
}
rawEvents, err := cachedValue.Collection.GetRawEvents()
if err != nil {
t.Fatalf("unexpected error thrown while checking number of raw events: %s", err)
}
if len(rawEvents) != test.wantNumEvents {
t.Errorf("wrong number of events in event set. got: %d, want: %d", len(rawEvents), test.wantNumEvents)
}
gotStart, gotEnd := cachedValue.Collection.Interval()
if gotStart != test.wantStart {
t.Errorf("wrong start time of collection. got: %d, want: %d", gotStart, test.wantStart)
}
if gotEnd != test.wantEnd {
t.Errorf("wrong end time of collection. got: %d, want: %d", gotEnd, test.wantEnd)
}
sort.Slice(cachedValue.SystemTopology.LogicalCores, func(i, j int) bool {
lc := cachedValue.SystemTopology.LogicalCores
return lc[i].CPUID < lc[j].CPUID
})
if diff := cmp.Diff(cachedValue.SystemTopology, test.wantSystemTopology); diff != "" {
t.Errorf("wrong system topology returned; Diff -want +got %v", diff)
}
}
}
func TestFsStorage_DeleteCollection(t *testing.T) {
collectionName := "coll_to_delete"
tmpDir, err := createCollectionDir()
if err != nil {
t.Fatal(err)
}
defer cleanup(t, tmpDir)
tmpFile := path.Join(tmpDir, collectionName+".binproto")
if err := ioutil.WriteFile(tmpFile, []byte{}, 0644); err != nil {
t.Fatalf("failed to create temp file: %s", err)
}
if _, err := os.Stat(tmpFile); os.IsNotExist(err) {
t.Fatalf("temp file was not created: %s", err)
}
fsStorage := createFSStorage(t, tmpDir, 1)
if err := fsStorage.DeleteCollection(ctx, "", collectionName); err != nil {
t.Fatalf("unexpected error thrown by FsStorage::DeleteCollection: %s", err)
}
if _, err := os.Stat(tmpFile); !os.IsNotExist(err) {
t.Fatalf("temp file was not deleted: %s", err)
}
}
func TestFsStorage_GetCollection(t *testing.T) {
tmpDir, err := createCollectionDir()
if err != nil {
t.Fatal(err)
}
defer cleanup(t, tmpDir)
fsStorage, ok := createFSStorage(t, tmpDir, 1).(*FsStorage)
if !ok {
t.Fatalf("CreateFSStorage returned wrong type")
}
origAddToCache := addToCache
cacheAdds := 0
addToCache = func(sb *storageBase, path string, collection *CachedCollection) {
cacheAdds++
origAddToCache(sb, path, collection)
}
defer func() { addToCache = origAddToCache }()
// Adds an entry to the cache.
firstCollectionName, err := fsStorage.UploadFile(ctx, colRequest, fh(t))
if err != nil {
t.Fatalf("unexpected error thrown by FsStorage::UploadFile: %s", err)
}
_, err = fsStorage.GetCollection(ctx, firstCollectionName)
if err != nil {
t.Fatalf("unexpected error thrown by FsStorage::GetCollection: %s", err)
}
if cacheAdds != 1 {
t.Errorf("Expected 1 cache adds after first GetCollection, got %d", cacheAdds)
}
// Adds an entry to the cache, evicting the old one.
secondCollectionName, err := fsStorage.UploadFile(ctx, colRequest, fh(t))
if err != nil {
t.Fatalf("unexpected error thrown by FsStorage::UploadFile: %s", err)
}
// Should hit cache
_, err = fsStorage.GetCollection(ctx, secondCollectionName)
if err != nil {
t.Fatalf("unexpected error thrown by FsStorage::GetCollection: %s", err)
}
if cacheAdds != 2 {
t.Errorf("Expected 2 cache adds after second GetCollection, got %d", cacheAdds)
}
// Adds an entry to the cache, evicting the old one.
_, err = fsStorage.GetCollection(ctx, firstCollectionName)
if err != nil {
t.Fatalf("unexpected error thrown by FsStorage::GetCollection: %s", err)
}
if cacheAdds != 3 {
t.Errorf("Expected 3 cache adds after third GetCollection, got %d", cacheAdds)
}
// Should hit cache
_, err = fsStorage.GetCollection(ctx, firstCollectionName)
if err != nil {
t.Fatalf("unexpected error thrown by FsStorage::GetCollection: %s", err)
}
if cacheAdds != 3 {
t.Errorf("Expected 3 cache adds after fourth GetCollection, got %d", cacheAdds)
}
}
func TestFsStorage_GetCollectionMetadata(t *testing.T) {
tmpDir, err := createCollectionDir()
if err != nil {
t.Fatal(err)
}
defer cleanup(t, tmpDir)
fsStorage := createFSStorage(t, tmpDir, 1)
collectionName, err := fsStorage.UploadFile(ctx, colRequest, fh(t))
if err != nil {
t.Fatalf("unexpected error thrown by FsStorage::UploadFile: %s", err)
}
got, err := fsStorage.GetCollectionMetadata(ctx, collectionName)
if err != nil {
t.Fatalf("unexpected error thrown by FsStorage::GetCollectionMetadata: %s", err)
}
want := models.Metadata{
CollectionUniqueName: collectionName,
Creator: "bob",
Owners: []string{"joe"},
Tags: []string{"test"},
Description: "test",
CreationTime: 1,
FtraceEvents: []string{
"sched_migrate_task",
"sched_switch",
"sched_wakeup",
"sched_wakeup_new",
},
}
if diff := cmp.Diff(want, got); diff != "" {
t.Fatalf("TestFsStorage_GetCollectionMetadata: Diff -want +got:\n%s", diff)
}
}
func TestFsStorage_EditCollection(t *testing.T) {
tmpDir, err := createCollectionDir()
if err != nil {
t.Fatal(err)
}
defer cleanup(t, tmpDir)
fsStorage := createFSStorage(t, tmpDir, 1)
collectionName, err := fsStorage.UploadFile(ctx, colRequest, fh(t))
if err != nil {
t.Fatalf("unexpected error thrown by FsStorage::UploadFile: %s", err)
}
req := &models.EditCollectionRequest{
CollectionName: collectionName,
Description: "abc",
AddOwners: []string{"john"},
AddTags: []string{"edited"},
RemoveTags: []string{"test"},
}
if err := fsStorage.EditCollection(ctx, "", req); err != nil {
t.Fatalf("unexpected error thrown by FsStorage::EditCollection: %s", err)
}
want := models.Metadata{
CollectionUniqueName: collectionName,
Creator: "bob",
Owners: []string{"joe", "john"},
Tags: []string{"edited"},
Description: "abc",
CreationTime: 1,
FtraceEvents: []string{
"sched_migrate_task",
"sched_switch",
"sched_wakeup",
"sched_wakeup_new",
},
}
got, err := fsStorage.GetCollectionMetadata(ctx, collectionName)
if err != nil {
t.Fatalf("unexpected error thrown by FsStorage::GetCollectionMetadata: %s", err)
}
sort.Strings(got.Owners)
if diff := cmp.Diff(want, got); diff != "" {
t.Fatalf("TestFsStorage_EditCollection: Diff -want +got:\n%s", diff)
}
}
func TestFsStorage_ListCollectionMetadata(t *testing.T) {
tmpDir, err := createCollectionDir()
if err != nil {
t.Fatal(err)
}
defer cleanup(t, tmpDir)
fsStorage := createFSStorage(t, tmpDir, 1)
firstCollectionName, err := fsStorage.UploadFile(ctx, colRequest, fh(t))
if err != nil {
t.Fatalf("unexpected error thrown by FsStorage::UploadFile: %s", err)
}
secondCollectionName, err := fsStorage.UploadFile(ctx, colRequest, fh(t))
if err != nil {
t.Fatalf("unexpected error thrown by FsStorage::UploadFile: %s", err)
}
files, err := ioutil.ReadDir(tmpDir)
if err != nil {
t.Fatalf("eror reading temp directory: %s", err)
}
if len(files) != 2 {
t.Errorf("wrong number of files written. want %d, got %d", 2, len(files))
}
gotFiles := make(map[string]struct{})
for _, file := range files {
gotFiles[file.Name()] = struct{}{}
}
wantFiles := map[string]struct{}{
firstCollectionName + ".binproto": {},
secondCollectionName + ".binproto": {},
}
if diff := cmp.Diff(wantFiles, gotFiles); diff != "" {
t.Fatalf("TestFsStorage_ListCollectionMetadata: Diff -want +got:\n%s", diff)
}
}
func TestFsStorage_GetCollectionParameters(t *testing.T) {
tmpDir, err := createCollectionDir()
if err != nil {
t.Fatal(err)
}
defer cleanup(t, tmpDir)
fsStorage := createFSStorage(t, tmpDir, 1)
collectionName, err := fsStorage.UploadFile(ctx, colRequest, fh(t))
if err != nil {
t.Fatalf("unexpected error thrown by FsStorage::UploadFile: %s", err)
}
got, err := fsStorage.GetCollectionParameters(ctx, collectionName)
if err != nil {
t.Fatalf("unexpected error thrown by FsStorage::GetCollectionParameters: %s", err)
}
want := models.CollectionParametersResponse{
CollectionName: collectionName,
CPUs: []int64{0},
StartTimestampNs: 0,
EndTimestampNs: 2009150555,
FtraceEvents: []string{"sched_migrate_task", "sched_switch", "sched_wakeup", "sched_wakeup_new"},
}
if diff := cmp.Diff(want, got); diff != "" {
t.Fatalf("TestFsStorage_GetCollectionParameters: Diff -want +got:\n%s", diff)
}
}
func TestFsStorage_GetFtraceEvents(t *testing.T) {
tmpDir, err := createCollectionDir()
if err != nil {
t.Fatal(err)
}
defer cleanup(t, tmpDir)
fsStorage := createFSStorage(t, tmpDir, 1)
collectionName, err := fsStorage.UploadFile(ctx, colRequest, fh(t))
if err != nil {
t.Fatalf("unexpected error thrown by FsStorage::UploadFile: %s", err)
}
req := &models.FtraceEventsRequest{
CollectionName: collectionName,
Cpus: []int64{0},
EventTypes: []string{"sched_switch"},
StartTimestamp: 0,
EndTimestamp: 22000,
}
got, err := fsStorage.GetFtraceEvents(ctx, req)
if err != nil {
t.Fatalf("unexpected error thrown by FsStorage::GetFtraceEvents: %s", err)
}
want := models.FtraceEventsResponse{
CollectionName: collectionName,
EventsByCPU: map[sched.CPUID][]*trace.Event{
0: {{
Index: 2,
Name: "sched_switch",
CPU: 0,
Timestamp: 21845,
Clipped: false,
TextProperties: map[string]string{
"prev_comm": "trace.sh",
"next_comm": "kauditd",
},
NumberProperties: map[string]int64{
"common_type": 0,
"common_flags": 1,
"common_pid": 17254,
"common_preempt_count": 0,
"prev_pid": 17254,
"prev_prio": 120,
"prev_state": 4096,
"next_pid": 430,
"next_prio": 120,
},
}},
},
}
if diff := cmp.Diff(want, got); diff != "" {
t.Fatalf("TestFsStorage_GetFtraceEvents: Diff -want +got:\n%s", diff)
}
}
func TestConvertIntRangeToList(t *testing.T) {
tests := []struct {
in string
out []int64
err string
}{
{
in: "0-4,7,9,11-12",
out: []int64{0, 1, 2, 3, 4, 7, 9, 11, 12},
},
{
in: "0,1,2,3,4,5,6,7,8,9",
out: []int64{0, 1, 2, 3, 4, 5, 6, 7, 8, 9},
},
{
in: "9,8,7,6,5,4,3,2,1,0",
out: []int64{0, 1, 2, 3, 4, 5, 6, 7, 8, 9},
},
{
in: "abc,123",
err: "strconv.ParseInt: parsing \"abc\": invalid syntax",
},
{
in: "1-2-3,4",
err: "malformed range string. Ranges must be of the form int-int, or just a int. Got: 1-2-3",
},
{
in: "4-3,123",
err: "malformed range string. End of range must be after start. Got 4-3",
},
}
for i, test := range tests {
t.Run(fmt.Sprintf("TestConvertIntRangeToList Case: %d", i), func(t *testing.T) {
got, err := convertIntRangeToList(test.in)
if test.err != "" && err == nil {
t.Errorf("Expected %q error, but no error was thrown", test.err)
} else if err != nil && err.Error() != test.err {
t.Errorf("Expected %q error, but got %q error instead", test.err, err)
} else if diff := cmp.Diff(got, test.out); diff != "" {
t.Errorf("convertIntRangeToList(%q): Diff -want +got: \n%s", test.in, diff)
}
})
}
}
|
[
"\"TEST_SRCDIR\"",
"\"TEST_WORKSPACE\""
] |
[] |
[
"TEST_WORKSPACE",
"TEST_SRCDIR"
] |
[]
|
["TEST_WORKSPACE", "TEST_SRCDIR"]
|
go
| 2 | 0 | |
sdk/servicebus/azure-servicebus/samples/sync_samples/mgmt_topic.py
|
#!/usr/bin/env python
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
"""
Example to show managing topic entities under a ServiceBus Namespace, including
- Create a topic
- Get topic properties and runtime information
- Update a topic
- Delete a topic
- List topics under the given ServiceBus Namespace
"""
# pylint: disable=C0111
import os
import uuid
from azure.servicebus.management import ServiceBusAdministrationClient
CONNECTION_STR = os.environ['SERVICE_BUS_CONNECTION_STR']
TOPIC_NAME = "sb_mgmt_topic" + str(uuid.uuid4())
def create_topic(servicebus_mgmt_client):
print("-- Create Topic")
servicebus_mgmt_client.create_topic(TOPIC_NAME)
print("Topic {} is created.".format(TOPIC_NAME))
print("")
def delete_topic(servicebus_mgmt_client):
print("-- Delete Topic")
servicebus_mgmt_client.delete_topic(TOPIC_NAME)
print("Topic {} is deleted.".format(TOPIC_NAME))
print("")
def list_topics(servicebus_mgmt_client):
print("-- List Topics")
for topic_properties in servicebus_mgmt_client.list_topics():
print("Topic Name:", topic_properties.name)
print("")
def get_and_update_topic(servicebus_mgmt_client):
print("-- Get and Update Topic")
topic_properties = servicebus_mgmt_client.get_topic(TOPIC_NAME)
print("Topic Name:", topic_properties.name)
print("Please refer to TopicDescription for complete available settings.")
print("")
topic_properties.max_delivery_count = 5
servicebus_mgmt_client.update_topic(topic_properties)
def get_topic_runtime_properties(servicebus_mgmt_client):
print("-- Get Topic Runtime Properties")
get_topic_runtime_properties = servicebus_mgmt_client.get_topic_runtime_properties(TOPIC_NAME)
print("Topic Name:", get_topic_runtime_properties.name)
print("Please refer to TopicRuntimeProperties from complete available runtime properties.")
print("")
with ServiceBusAdministrationClient.from_connection_string(CONNECTION_STR) as servicebus_mgmt_client:
create_topic(servicebus_mgmt_client)
list_topics(servicebus_mgmt_client)
get_and_update_topic(servicebus_mgmt_client)
get_topic_runtime_properties(servicebus_mgmt_client)
delete_topic(servicebus_mgmt_client)
|
[] |
[] |
[
"SERVICE_BUS_CONNECTION_STR"
] |
[]
|
["SERVICE_BUS_CONNECTION_STR"]
|
python
| 1 | 0 | |
test_integration/geopm_test_integration.py
|
#!/usr/bin/env python
#
# Copyright (c) 2015, 2016, 2017, 2018, 2019, Intel Corporation
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY LOG OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from __future__ import absolute_import
from __future__ import division
from future import standard_library
standard_library.install_aliases()
from builtins import str
import os
import sys
import unittest
import subprocess
import time
import pandas
import collections
import socket
import shlex
import json
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from test_integration import util
from test_integration import geopm_test_launcher
import geopmpy.io
import geopmpy.launcher
def create_frequency_map_policy(min_freq, max_freq, frequency_map, use_env=False):
"""Create a frequency map to be consumed by the frequency map agent.
Arguments:
min_freq: Floor frequency for the agent
max_freq: Ceiling frequency for the agent
frequency_map: Dictionary mapping region names to frequencies
use_env: If true, apply the map to an environment variable, and return
the policy needed when the environment variable is in use.
Otherwise, clear the environment variable and return the policy
needed when the variable is not in use.
"""
policy = {'frequency_min': min_freq, 'frequency_max': max_freq}
known_hashes = {
'dgemm': 0x00000000a74bbf35,
'all2all': 0x000000003ddc81bf,
'stream': 0x00000000d691da00,
'sleep': 0x00000000536c798f,
'MPI_Barrier': 0x000000007b561f45,
'model-init': 0x00000000644f9787,
'unmarked-region': 0x00000000725e8066 }
if use_env:
os.environ['GEOPM_FREQUENCY_MAP'] = json.dumps(frequency_map)
else:
if 'GEOPM_FREQUENCY_MAP' in os.environ:
os.environ.pop('GEOPM_FREQUENCY_MAP')
for i, (region_name, frequency) in enumerate(frequency_map.items()):
region_hash = known_hashes[region_name]
policy['HASH_{}'.format(i)] = int(region_hash)
policy['FREQ_{}'.format(i)] = frequency
return policy
class TestIntegration(unittest.TestCase):
def setUp(self):
self.longMessage = True
self._agent = 'power_governor'
self._options = {'power_budget': 150}
self._tmp_files = []
self._output = None
self._power_limit = geopm_test_launcher.geopmread("MSR::PKG_POWER_LIMIT:PL1_POWER_LIMIT board 0")
self._frequency = geopm_test_launcher.geopmread("MSR::PERF_CTL:FREQ board 0")
self._original_freq_map_env = os.environ.get('GEOPM_FREQUENCY_MAP')
def tearDown(self):
geopm_test_launcher.geopmwrite("MSR::PKG_POWER_LIMIT:PL1_POWER_LIMIT board 0 " + str(self._power_limit))
geopm_test_launcher.geopmwrite("MSR::PERF_CTL:FREQ board 0 " + str(self._frequency))
if sys.exc_info() == (None, None, None) and os.getenv('GEOPM_KEEP_FILES') is None:
if self._output is not None:
self._output.remove_files()
for ff in self._tmp_files:
try:
os.remove(ff)
except OSError:
pass
if self._original_freq_map_env is None:
if 'GEOPM_FREQUENCY_MAP' in os.environ:
os.environ.pop('GEOPM_FREQUENCY_MAP')
else:
os.environ['GEOPM_FREQUENCY_MAP'] = self._original_freq_map_env
def assertNear(self, a, b, epsilon=0.05, msg=''):
denom = a if a != 0 else 1
if abs((a - b) / denom) >= epsilon:
self.fail('The fractional difference between {a} and {b} is greater than {epsilon}. {msg}'.format(a=a, b=b, epsilon=epsilon, msg=msg))
def create_progress_df(self, df):
# Build a df with only the first region entry and the exit.
df = df.reset_index(drop=True)
last_index = 0
filtered_df = pandas.DataFrame()
row_list = []
progress_1s = df['REGION_PROGRESS'].loc[df['REGION_PROGRESS'] == 1]
for index, _ in progress_1s.iteritems():
row = df.loc[last_index:index].head(1)
row_list += [row[['TIME', 'REGION_PROGRESS', 'REGION_RUNTIME']]]
row = df.loc[last_index:index].tail(1)
row_list += [row[['TIME', 'REGION_PROGRESS', 'REGION_RUNTIME']]]
last_index = index + 1 # Set the next starting index to be one past where we are
filtered_df = pandas.concat(row_list)
return filtered_df
def test_report_and_trace_generation(self):
name = 'test_report_and_trace_generation'
report_path = name + '.report'
trace_path = name + '.trace'
num_node = 4
num_rank = 16
app_conf = geopmpy.io.BenchConf(name + '_app.config')
self._tmp_files.append(app_conf.get_path())
app_conf.append_region('sleep', 1.0)
agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)
self._tmp_files.append(agent_conf.get_path())
launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path, trace_path)
launcher.set_num_node(num_node)
launcher.set_num_rank(num_rank)
launcher.run(name)
self._output = geopmpy.io.AppOutput(report_path, trace_path + '*')
node_names = self._output.get_node_names()
self.assertEqual(num_node, len(node_names))
for nn in node_names:
report = self._output.get_report_data(node_name=nn)
self.assertNotEqual(0, len(report))
trace = self._output.get_trace_data(node_name=nn)
self.assertNotEqual(0, len(trace))
def test_no_report_and_trace_generation(self):
name = 'test_no_report_and_trace_generation'
num_node = 4
num_rank = 16
app_conf = geopmpy.io.BenchConf(name + '_app.config')
self._tmp_files.append(app_conf.get_path())
app_conf.append_region('sleep', 1.0)
agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)
self._tmp_files.append(agent_conf.get_path())
launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf)
launcher.set_num_node(num_node)
launcher.set_num_rank(num_rank)
launcher.run(name)
@unittest.skipUnless('mr-fusion' in socket.gethostname(), "This test only enabled on known working systems.")
def test_report_and_trace_generation_pthread(self):
name = 'test_report_and_trace_generation_pthread'
report_path = name + '.report'
trace_path = name + '.trace'
num_node = 4
num_rank = 16
app_conf = geopmpy.io.BenchConf(name + '_app.config')
self._tmp_files.append(app_conf.get_path())
app_conf.append_region('sleep', 1.0)
agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)
self._tmp_files.append(agent_conf.get_path())
launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path, trace_path)
launcher.set_num_node(num_node)
launcher.set_num_rank(num_rank)
launcher.set_pmpi_ctl('pthread')
launcher.run(name)
self._output = geopmpy.io.AppOutput(report_path, trace_path + '*')
node_names = self._output.get_node_names()
self.assertEqual(num_node, len(node_names))
for nn in node_names:
report = self._output.get_report_data(node_name=nn)
self.assertNotEqual(0, len(report))
trace = self._output.get_trace_data(node_name=nn)
self.assertNotEqual(0, len(trace))
@unittest.skipUnless(geopm_test_launcher.detect_launcher() != "aprun",
'ALPS does not support multi-application launch on the same nodes.')
@util.skip_unless_batch()
def test_report_and_trace_generation_application(self):
name = 'test_report_and_trace_generation_application'
report_path = name + '.report'
trace_path = name + '.trace'
num_node = 4
num_rank = 16
app_conf = geopmpy.io.BenchConf(name + '_app.config')
self._tmp_files.append(app_conf.get_path())
app_conf.append_region('sleep', 1.0)
agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)
self._tmp_files.append(agent_conf.get_path())
launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path, trace_path)
launcher.set_num_node(num_node)
launcher.set_num_rank(num_rank)
launcher.set_pmpi_ctl('application')
launcher.run(name)
self._output = geopmpy.io.AppOutput(report_path, trace_path + '*')
node_names = self._output.get_node_names()
self.assertEqual(num_node, len(node_names))
for nn in node_names:
report = self._output.get_report_data(node_name=nn)
self.assertNotEqual(0, len(report))
trace = self._output.get_trace_data(node_name=nn)
self.assertNotEqual(0, len(trace))
@unittest.skipUnless(geopm_test_launcher.detect_launcher() == "srun" and os.getenv('SLURM_NODELIST') is None,
'Requires non-sbatch SLURM session for alloc\'d and idle nodes.')
def test_report_generation_all_nodes(self):
name = 'test_report_generation_all_nodes'
report_path = name + '.report'
num_node = 1
num_rank = 1
delay = 1.0
app_conf = geopmpy.io.BenchConf(name + '_app.config')
self._tmp_files.append(app_conf.get_path())
app_conf.append_region('sleep', delay)
agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)
self._tmp_files.append(agent_conf.get_path())
launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path)
launcher.set_num_node(num_node)
launcher.set_num_rank(num_rank)
time.sleep(5) # Wait a moment to finish cleaning-up from a previous test
idle_nodes = launcher.get_idle_nodes()
idle_nodes_copy = list(idle_nodes)
alloc_nodes = launcher.get_alloc_nodes()
launcher.write_log(name, 'Idle nodes : {nodes}'.format(nodes=idle_nodes))
launcher.write_log(name, 'Alloc\'d nodes : {nodes}'.format(nodes=alloc_nodes))
node_names = []
for nn in idle_nodes_copy:
launcher.set_node_list(nn.split()) # Hack to convert string to list
try:
launcher.run(name)
node_names += nn.split()
except subprocess.CalledProcessError as e:
if e.returncode == 1 and nn not in launcher.get_idle_nodes():
launcher.write_log(name, '{node} has disappeared from the idle list!'.format(node=nn))
idle_nodes.remove(nn)
else:
launcher.write_log(name, 'Return code = {code}'.format(code=e.returncode))
raise e
ao = geopmpy.io.AppOutput(report_path, do_cache=False)
sleep_data = ao.get_report_data(node_name=nn, region='sleep')
app_data = ao.get_app_total_data(node_name=nn)
self.assertNotEqual(0, len(sleep_data))
self.assertNear(delay, sleep_data['runtime'].item())
self.assertGreater(app_data['runtime'].item(), sleep_data['runtime'].item())
self.assertEqual(1, sleep_data['count'].item())
self.assertEqual(len(node_names), len(idle_nodes))
def test_runtime(self):
name = 'test_runtime'
report_path = name + '.report'
num_node = 1
num_rank = 5
delay = 3.0
app_conf = geopmpy.io.BenchConf(name + '_app.config')
self._tmp_files.append(app_conf.get_path())
app_conf.append_region('sleep', delay)
agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)
self._tmp_files.append(agent_conf.get_path())
launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path)
launcher.set_num_node(num_node)
launcher.set_num_rank(num_rank)
launcher.run(name)
self._output = geopmpy.io.AppOutput(report_path)
node_names = self._output.get_node_names()
self.assertEqual(num_node, len(node_names))
for nn in node_names:
report = self._output.get_report_data(node_name=nn, region='sleep')
app_total = self._output.get_app_total_data(node_name=nn)
self.assertNear(delay, report['runtime'].item())
self.assertGreater(app_total['runtime'].item(), report['runtime'].item())
def test_runtime_epoch(self):
name = 'test_runtime_epoch'
report_path = name + '.report'
num_node = 1
num_rank = 5
delay = 3.0
app_conf = geopmpy.io.BenchConf(name + '_app.config')
self._tmp_files.append(app_conf.get_path())
app_conf.append_region('sleep', delay)
app_conf.append_region('spin', delay)
agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)
self._tmp_files.append(agent_conf.get_path())
launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path)
launcher.set_num_node(num_node)
launcher.set_num_rank(num_rank)
launcher.run(name)
self._output = geopmpy.io.AppOutput(report_path)
node_names = self._output.get_node_names()
self.assertEqual(num_node, len(node_names))
for nn in node_names:
spin_data = self._output.get_report_data(node_name=nn, region='spin')
sleep_data = self._output.get_report_data(node_name=nn, region='sleep')
epoch_data = self._output.get_report_data(node_name=nn, region='epoch')
total_runtime = sleep_data['runtime'].item() + spin_data['runtime'].item()
self.assertNear(total_runtime, epoch_data['runtime'].item())
def test_epoch_data_valid(self):
name = 'test_epoch_data_valid'
report_path = name + '.report'
num_node = 1
num_rank = 1
big_o = 1.0
loop_count = 10
app_conf = geopmpy.io.BenchConf(name + '_app.config')
self._tmp_files.append(app_conf.get_path())
app_conf.set_loop_count(loop_count)
app_conf.append_region('spin-unmarked', big_o)
agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)
self._tmp_files.append(agent_conf.get_path())
launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path)
launcher.set_num_node(num_node)
launcher.set_num_rank(num_rank)
launcher.run(name)
report = geopmpy.io.RawReport(report_path)
node_names = report.host_names()
self.assertEqual(num_node, len(node_names))
for nn in node_names:
regions = report.region_names(nn)
self.assertTrue('model-init' not in regions)
totals = report.raw_totals(nn)
unmarked = report.raw_region(nn, 'unmarked-region')
epoch = report.raw_epoch(nn)
# Epoch has valid data
self.assertGreater(epoch['runtime (sec)'], 0)
self.assertGreater(epoch['sync-runtime (sec)'], 0)
self.assertGreater(epoch['package-energy (joules)'], 0)
self.assertGreater(epoch['dram-energy (joules)'], 0)
self.assertGreater(epoch['power (watts)'], 0)
self.assertGreater(epoch['frequency (%)'], 0)
self.assertGreater(epoch['frequency (Hz)'], 0)
self.assertEqual(epoch['count'], loop_count)
# Runtime
self.assertTrue(totals['runtime (sec)'] > unmarked['runtime (sec)'] >= epoch['runtime (sec)'],
'''The total runtime is NOT > the unmarked runtime or the unmarked runtime is NOT
>= the Epoch runtime.''')
# Package Energy (joules)
self.assertTrue(totals['package-energy (joules)'] >
unmarked['package-energy (joules)'] >=
epoch['package-energy (joules)'],
'''The total package energy (joules) is NOT > the unmarked package energy (joules)
or the unmarked package energy (joules) is NOT >= the Epoch package
energy (joules).''')
# DRAM Energy
self.assertTrue(totals['dram-energy (joules)'] >
unmarked['dram-energy (joules)'] >=
epoch['dram-energy (joules)'],
'''The total dram energy is NOT > the unmarked dram energy or the unmarked
dram energy is NOT >= the Epoch dram energy.''')
# Sync-runtime
self.assertTrue(unmarked['sync-runtime (sec)'] >= epoch['sync-runtime (sec)'],
'''The sync-runtime for the unmarked region is NOT >= the Epoch sync-runtime.''')
def test_runtime_nested(self):
name = 'test_runtime_nested'
report_path = name + '.report'
num_node = 1
num_rank = 1
delay = 1.0
loop_count = 2
app_conf = geopmpy.io.BenchConf(name + '_app.config')
self._tmp_files.append(app_conf.get_path())
app_conf.set_loop_count(loop_count)
app_conf.append_region('nested-progress', delay)
agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)
self._tmp_files.append(agent_conf.get_path())
launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path)
launcher.set_num_node(num_node)
launcher.set_num_rank(num_rank)
launcher.run(name)
self._output = geopmpy.io.AppOutput(report_path)
node_names = self._output.get_node_names()
self.assertEqual(num_node, len(node_names))
for nn in node_names:
spin_data = self._output.get_report_data(node_name=nn, region='spin')
epoch_data = self._output.get_report_data(node_name=nn, region='epoch')
app_totals = self._output.get_app_total_data(node_name=nn)
# The spin sections of this region sleep for 'delay' seconds twice per loop.
self.assertNear(2 * loop_count * delay, spin_data['runtime'].item())
self.assertNear(spin_data['runtime'].item(), epoch_data['runtime'].item(), epsilon=0.01)
self.assertGreater(app_totals['network-time'].item(), 0)
self.assertGreater(0.1, app_totals['network-time'].item())
self.assertEqual(loop_count, spin_data['count'].item())
def test_trace_runtimes(self):
name = 'test_trace_runtimes'
report_path = name + '.report'
trace_path = name + '.trace'
num_node = 4
num_rank = 16
app_conf = geopmpy.io.BenchConf(name + '_app.config')
self._tmp_files.append(app_conf.get_path())
app_conf.append_region('sleep', 1.0)
app_conf.append_region('dgemm', 1.0)
app_conf.append_region('all2all', 1.0)
agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)
self._tmp_files.append(agent_conf.get_path())
launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path,
trace_path, region_barrier=True)
launcher.set_num_node(num_node)
launcher.set_num_rank(num_rank)
launcher.run(name)
self._output = geopmpy.io.AppOutput(report_path, trace_path + '*')
node_names = self._output.get_node_names()
self.assertEqual(len(node_names), num_node)
regions = self._output.get_region_names()
for nn in node_names:
trace = self._output.get_trace_data(node_name=nn)
app_totals = self._output.get_app_total_data(node_name=nn)
self.assertNear(trace.iloc[-1]['TIME'], app_totals['runtime'].item(), msg='Application runtime failure, node_name={}.'.format(nn))
# Calculate runtime totals for each region in each trace, compare to report
tt = trace.reset_index(level='index') # move 'index' field from multiindex to columns
tt = tt.set_index(['REGION_HASH'], append=True) # add region_hash column to multiindex
tt_reg = tt.groupby(level=['REGION_HASH'])
for region_name in regions:
region_data = self._output.get_report_data(node_name=nn, region=region_name)
if (region_name not in ['unmarked-region', 'model-init', 'epoch'] and
not region_name.startswith('MPI_') and
region_data['sync_runtime'].item() != 0):
region_hash = region_data['id'].item()
trace_data = tt_reg.get_group(region_hash)
start_idx = trace_data.iloc[0]['index']
end_idx = trace_data.iloc[-1]['index'] + 1 # use time from sample after exiting region
start_time = tt.loc[tt['index'] == start_idx]['TIME'].item()
end_time = tt.loc[tt['index'] == end_idx]['TIME'].item()
trace_elapsed_time = end_time - start_time
msg = 'for region {rn} on node {nn}'.format(rn=region_name, nn=nn)
self.assertNear(trace_elapsed_time, region_data['sync_runtime'].item(), msg=msg)
#epoch
region_data = self._output.get_report_data(node_name=nn, region='epoch')
trace_elapsed_time = trace.iloc[-1]['TIME'] - trace['TIME'].loc[trace['EPOCH_COUNT'] == 0].iloc[0]
msg = 'for epoch on node {nn}'.format(nn=nn)
self.assertNear(trace_elapsed_time, region_data['runtime'].item(), msg=msg)
@util.skip_unless_config_enable('bloat')
def test_runtime_regulator(self):
name = 'test_runtime_regulator'
report_path = name + '.report'
trace_path = name + '.trace'
num_node = 1
num_rank = 4
loop_count = 20
app_conf = geopmpy.io.BenchConf(name + '_app.config')
self._tmp_files.append(app_conf.get_path())
app_conf.set_loop_count(loop_count)
sleep_big_o = 1.0
spin_big_o = 0.5
expected_region_runtime = {'spin': spin_big_o, 'sleep': sleep_big_o}
app_conf.append_region('sleep', sleep_big_o)
app_conf.append_region('spin', spin_big_o)
agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)
self._tmp_files.append(agent_conf.get_path())
launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path, trace_path, region_barrier=True)
launcher.set_num_node(num_node)
launcher.set_num_rank(num_rank)
launcher.run(name)
self._output = geopmpy.io.AppOutput(report_path, trace_path + '*')
node_names = self._output.get_node_names()
self.assertEqual(len(node_names), num_node)
regions = self._output.get_region_names()
for nn in node_names:
app_totals = self._output.get_app_total_data(node_name=nn)
trace = self._output.get_trace_data(node_name=nn)
self.assertNear(trace.iloc[-1]['TIME'], app_totals['runtime'].item())
tt = trace.set_index(['REGION_HASH'], append=True)
tt = tt.groupby(level=['REGION_HASH'])
for region_name in regions:
region_data = self._output.get_report_data(node_name=nn, region=region_name)
if region_name not in ['unmarked-region', 'model-init', 'epoch'] and not region_name.startswith('MPI_') and region_data['runtime'].item() != 0:
trace_data = tt.get_group(region_data['id'].item())
filtered_df = self.create_progress_df(trace_data)
first_time = False
epsilon = 0.001 if region_name != 'sleep' else 0.05
for index, df in filtered_df.iterrows():
if df['REGION_PROGRESS'] == 1:
self.assertNear(df['REGION_RUNTIME'], expected_region_runtime[region_name], epsilon=epsilon)
first_time = True
if first_time is True and df['REGION_PROGRESS'] == 0:
self.assertNear(df['REGION_RUNTIME'], expected_region_runtime[region_name], epsilon=epsilon)
@util.skip_unless_run_long_tests()
@util.skip_unless_config_enable('bloat')
def test_region_runtimes(self):
name = 'test_region_runtimes'
report_path = name + '.report'
trace_path = name + '.trace'
num_node = 4
num_rank = 16
loop_count = 500
app_conf = geopmpy.io.BenchConf(name + '_app.config')
self._tmp_files.append(app_conf.get_path())
app_conf.append_region('dgemm', 8.0)
app_conf.set_loop_count(loop_count)
agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)
self._tmp_files.append(agent_conf.get_path())
launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path, trace_path, time_limit=900)
launcher.set_num_node(num_node)
launcher.set_num_rank(num_rank)
launcher.run(name)
self._output = geopmpy.io.AppOutput(report_path, trace_path + '*')
node_names = self._output.get_node_names()
self.assertEqual(len(node_names), num_node)
# Calculate region times from traces
region_times = collections.defaultdict(lambda: collections.defaultdict(dict))
for nn in node_names:
tt = self._output.get_trace_data(node_name=nn).set_index(['REGION_HASH'], append=True).groupby(level=['REGION_HASH'])
for region_hash, data in tt:
filtered_df = self.create_progress_df(data)
filtered_df = filtered_df.diff()
# Since I'm not separating out the progress 0's from 1's, when I do the diff I only care about the
# case where 1 - 0 = 1 for the progress column.
filtered_df = filtered_df.loc[filtered_df['REGION_PROGRESS'] == 1]
if len(filtered_df) > 1:
launcher.write_log(name, 'Region elapsed time stats from {} - {} :\n{}'\
.format(nn, region_hash, filtered_df['TIME'].describe()))
filtered_df['TIME'].describe()
region_times[nn][region_hash] = filtered_df
launcher.write_log(name, '{}'.format('-' * 80))
# Loop through the reports to see if the region runtimes line up with what was calculated from the trace files above.
regions = self._output.get_region_names()
write_regions = True
for nn in node_names:
for region_name in regions:
rr = self._output.get_report_data(node_name=nn, region=region_name)
if (region_name != 'epoch' and
rr['id'].item() != 0 and
rr['count'].item() > 1):
if write_regions:
launcher.write_log(name, 'Region {} is {}.'.format(rr['id'].item(), region_name))
runtime = rr['sync_runtime'].item()
self.assertNear(runtime,
region_times[nn][rr['id'].item()]['TIME'].sum())
write_regions = False
# Test to ensure every region detected in the trace is captured in the report.
for nn in node_names:
report_ids = []
for region_name in regions:
rr = self._output.get_report_data(node_name=nn, region=region_name)
report_ids.append(rr['id'].item())
for region_hash in region_times[nn].keys():
self.assertTrue(region_hash in report_ids, msg='Report from {} missing region_hash {}'.format(nn, region_hash))
def test_progress(self):
name = 'test_progress'
report_path = name + '.report'
num_node = 1
num_rank = 4
delay = 3.0
app_conf = geopmpy.io.BenchConf(name + '_app.config')
self._tmp_files.append(app_conf.get_path())
app_conf.append_region('sleep-progress', delay)
agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)
self._tmp_files.append(agent_conf.get_path())
launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path)
launcher.set_num_node(num_node)
launcher.set_num_rank(num_rank)
launcher.run(name)
self._output = geopmpy.io.AppOutput(report_path)
node_names = self._output.get_node_names()
self.assertEqual(len(node_names), num_node)
for nn in node_names:
sleep_data = self._output.get_report_data(node_name=nn, region='sleep')
app_total = self._output.get_app_total_data(node_name=nn)
self.assertNear(delay, sleep_data['runtime'].item())
self.assertGreater(app_total['runtime'].item(), sleep_data['runtime'].item())
self.assertEqual(1, sleep_data['count'].item())
def test_count(self):
name = 'test_count'
report_path = name + '.report'
trace_path = name + '.trace'
num_node = 1
num_rank = 4
delay = 0.01
loop_count = 100
app_conf = geopmpy.io.BenchConf(name + '_app.config')
self._tmp_files.append(app_conf.get_path())
app_conf.set_loop_count(loop_count)
app_conf.append_region('spin', delay)
agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)
self._tmp_files.append(agent_conf.get_path())
launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path, trace_path)
launcher.set_num_node(num_node)
launcher.set_num_rank(num_rank)
launcher.run(name)
self._output = geopmpy.io.AppOutput(report_path, trace_path + '*')
node_names = self._output.get_node_names()
self.assertEqual(len(node_names), num_node)
for nn in node_names:
trace_data = self._output.get_trace_data(node_name=nn)
spin_data = self._output.get_report_data(node_name=nn, region='spin')
epoch_data = self._output.get_report_data(node_name=nn, region='epoch')
self.assertNear(delay * loop_count, spin_data['runtime'].item())
self.assertEqual(loop_count, spin_data['count'].item())
self.assertEqual(loop_count, epoch_data['count'].item())
self.assertEqual(loop_count, trace_data['EPOCH_COUNT'][-1])
@util.skip_unless_run_long_tests()
def test_scaling(self):
"""
This test will start at ${num_node} nodes and ranks. It will then calls check_run() to
ensure that commands can be executed successfully on all of the allocated compute nodes.
Afterwards it will run the specified app config on each node and verify the reports. When
complete it will double num_node and run the steps again.
WARNING: This test can take a long time to run depending on the number of starting nodes and
the size of the allocation.
"""
name = 'test_scaling'
report_path = name + '.report'
num_node = 2
loop_count = 100
app_conf = geopmpy.io.BenchConf(name + '_app.config')
self._tmp_files.append(app_conf.get_path())
app_conf.append_region('dgemm', 1.0)
app_conf.append_region('all2all', 1.0)
app_conf.set_loop_count(loop_count)
agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)
self._tmp_files.append(agent_conf.get_path())
launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path, time_limit=900)
check_successful = True
while check_successful:
launcher.set_num_node(num_node)
launcher.set_num_rank(num_node)
try:
launcher.check_run(name)
except subprocess.CalledProcessError as e:
# If we exceed the available nodes in the allocation ALPS/SLURM give a rc of 1
# All other rc's are real errors
if e.returncode != 1:
raise e
check_successful = False
if check_successful:
launcher.write_log(name, 'About to run on {} nodes.'.format(num_node))
launcher.run(name)
self._output = geopmpy.io.AppOutput(report_path)
node_names = self._output.get_node_names()
self.assertEqual(len(node_names), num_node)
for nn in node_names:
dgemm_data = self._output.get_report_data(node_name=nn, region='dgemm')
all2all_data = self._output.get_report_data(node_name=nn, region='all2all')
self.assertEqual(loop_count, dgemm_data['count'].item())
self.assertEqual(loop_count, all2all_data['count'].item())
self.assertGreater(dgemm_data['runtime'].item(), 0.0)
self.assertGreater(all2all_data['runtime'].item(), 0.0)
num_node *= 2
self._output.remove_files()
@util.skip_unless_run_long_tests()
def test_power_consumption(self):
name = 'test_power_consumption'
report_path = name + '.report'
trace_path = name + '.trace'
num_node = 4
num_rank = 16
loop_count = 500
app_conf = geopmpy.io.BenchConf(name + '_app.config')
self._tmp_files.append(app_conf.get_path())
app_conf.append_region('dgemm', 8.0)
app_conf.set_loop_count(loop_count)
fam, mod = geopm_test_launcher.get_platform()
if fam == 6 and mod == 87:
# budget for KNL
self._options['power_budget'] = 130
else:
self._options['power_budget'] = 200
gov_agent_conf_path = name + '_gov_agent.config'
self._tmp_files.append(gov_agent_conf_path)
gov_agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)
launcher = geopm_test_launcher.TestLauncher(app_conf, gov_agent_conf, report_path,
trace_path, time_limit=900)
launcher.set_num_node(num_node)
launcher.set_num_rank(num_rank)
launcher.write_log(name, 'Power cap = {}W'.format(self._options['power_budget']))
launcher.run(name)
self._output = geopmpy.io.AppOutput(report_path, trace_path + '*')
node_names = self._output.get_node_names()
self.assertEqual(num_node, len(node_names))
all_power_data = {}
# Total power consumed will be Socket(s) + DRAM
for nn in node_names:
tt = self._output.get_trace_data(node_name=nn)
first_epoch_index = tt.loc[tt['EPOCH_COUNT'] == 0][:1].index[0]
epoch_dropped_data = tt[first_epoch_index:] # Drop all startup data
power_data = epoch_dropped_data.filter(regex='ENERGY')
power_data['TIME'] = epoch_dropped_data['TIME']
power_data = power_data.diff().dropna()
power_data.rename(columns={'TIME': 'ELAPSED_TIME'}, inplace=True)
power_data = power_data.loc[(power_data != 0).all(axis=1)] # Will drop any row that is all 0's
pkg_energy_cols = [s for s in power_data.keys() if 'ENERGY_PACKAGE' in s]
dram_energy_cols = [s for s in power_data.keys() if 'ENERGY_DRAM' in s]
power_data['SOCKET_POWER'] = power_data[pkg_energy_cols].sum(axis=1) / power_data['ELAPSED_TIME']
power_data['DRAM_POWER'] = power_data[dram_energy_cols].sum(axis=1) / power_data['ELAPSED_TIME']
power_data['COMBINED_POWER'] = power_data['SOCKET_POWER'] + power_data['DRAM_POWER']
pandas.set_option('display.width', 100)
launcher.write_log(name, 'Power stats from {} :\n{}'.format(nn, power_data.describe()))
all_power_data[nn] = power_data
for node_name, power_data in all_power_data.items():
# Allow for overages of 2% at the 75th percentile.
self.assertGreater(self._options['power_budget'] * 1.02, power_data['SOCKET_POWER'].quantile(.75))
# TODO Checks on the maximum power computed during the run?
# TODO Checks to see how much power was left on the table?
@util.skip_unless_run_long_tests()
@util.skip_unless_batch()
def test_power_balancer(self):
name = 'test_power_balancer'
num_node = 4
num_rank = 16
loop_count = 500
# Require that the balancer moves the maximum dgemm runtime at
# least 1/4 the distance to the mean dgemm runtime under the
# governor.
margin_factor = 0.25
app_conf = geopmpy.io.BenchConf(name + '_app.config')
self._tmp_files.append(app_conf.get_path())
app_conf.append_region('dgemm-imbalance', 8.0)
app_conf.append_region('all2all', 0.05)
app_conf.set_loop_count(loop_count)
# Update app config with imbalance
alloc_nodes = geopm_test_launcher.TestLauncher.get_alloc_nodes()
for nn in range(len(alloc_nodes) // 2):
app_conf.append_imbalance(alloc_nodes[nn], 0.5)
fam, mod = geopm_test_launcher.get_platform()
if fam == 6 and mod == 87:
# budget for KNL
power_budget = 130
else:
power_budget = 200
self._options = {'power_budget': power_budget}
gov_agent_conf_path = name + '_gov_agent.config'
bal_agent_conf_path = name + '_bal_agent.config'
self._tmp_files.append(gov_agent_conf_path)
self._tmp_files.append(bal_agent_conf_path)
agent_list = ['power_governor', 'power_balancer']
path_dict = {'power_governor': gov_agent_conf_path, 'power_balancer': bal_agent_conf_path}
agent_runtime = dict()
for agent in agent_list:
agent_conf = geopmpy.io.AgentConf(path_dict[agent], agent, self._options)
run_name = '{}_{}'.format(name, agent)
report_path = '{}.report'.format(run_name)
trace_path = '{}.trace'.format(run_name)
launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path,
trace_path, time_limit=2700)
launcher.set_num_node(num_node)
launcher.set_num_rank(num_rank)
launcher.write_log(run_name, 'Power cap = {}W'.format(power_budget))
launcher.run(run_name)
self._output = geopmpy.io.AppOutput(report_path, trace_path + '*')
node_names = self._output.get_node_names()
self.assertEqual(num_node, len(node_names))
power_limits = []
# Total power consumed will be Socket(s) + DRAM
for nn in node_names:
tt = self._output.get_trace_data(node_name=nn)
first_epoch_index = tt.loc[tt['EPOCH_COUNT'] == 0][:1].index[0]
epoch_dropped_data = tt[first_epoch_index:] # Drop all startup data
power_data = epoch_dropped_data.filter(regex='ENERGY')
power_data['TIME'] = epoch_dropped_data['TIME']
power_data = power_data.diff().dropna()
power_data.rename(columns={'TIME': 'ELAPSED_TIME'}, inplace=True)
power_data = power_data.loc[(power_data != 0).all(axis=1)] # Will drop any row that is all 0's
pkg_energy_cols = [s for s in power_data.keys() if 'ENERGY_PACKAGE' in s]
dram_energy_cols = [s for s in power_data.keys() if 'ENERGY_DRAM' in s]
power_data['SOCKET_POWER'] = power_data[pkg_energy_cols].sum(axis=1) / power_data['ELAPSED_TIME']
power_data['DRAM_POWER'] = power_data[dram_energy_cols].sum(axis=1) / power_data['ELAPSED_TIME']
power_data['COMBINED_POWER'] = power_data['SOCKET_POWER'] + power_data['DRAM_POWER']
pandas.set_option('display.width', 100)
launcher.write_log(name, 'Power stats from {} {} :\n{}'.format(agent, nn, power_data.describe()))
# Get final power limit set on the node
if agent == 'power_balancer':
power_limits.append(epoch_dropped_data['POWER_LIMIT'][-1])
if agent == 'power_balancer':
avg_power_limit = sum(power_limits) / len(power_limits)
self.assertTrue(avg_power_limit <= power_budget)
min_runtime = float('nan')
max_runtime = float('nan')
node_names = self._output.get_node_names()
runtime_list = []
for node_name in node_names:
epoch_data = self._output.get_report_data(node_name=node_name, region='dgemm')
runtime_list.append(epoch_data['runtime'].item())
if agent == 'power_governor':
mean_runtime = sum(runtime_list) / len(runtime_list)
max_runtime = max(runtime_list)
margin = margin_factor * (max_runtime - mean_runtime)
agent_runtime[agent] = max(runtime_list)
self.assertGreater(agent_runtime['power_governor'] - margin,
agent_runtime['power_balancer'],
"governor runtime: {}, balancer runtime: {}, margin: {}".format(
agent_runtime['power_governor'], agent_runtime['power_balancer'], margin))
def test_progress_exit(self):
"""
Check that when we always see progress exit before the next entry.
Make sure that progress only decreases when a new region is entered.
"""
name = 'test_progress_exit'
report_path = name + '.report'
trace_path = name + '.trace'
num_node = 1
num_rank = 16
loop_count = 100
big_o = 0.1
app_conf = geopmpy.io.BenchConf(name + '_app.config')
self._tmp_files.append(app_conf.get_path())
app_conf.set_loop_count(loop_count)
app_conf.append_region('dgemm-progress', big_o)
app_conf.append_region('spin-progress', big_o)
agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)
self._tmp_files.append(agent_conf.get_path())
launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path, trace_path, region_barrier=True)
launcher.set_num_node(num_node)
launcher.set_num_rank(num_rank)
launcher.run(name)
self._output = geopmpy.io.AppOutput(report_path, trace_path + '*')
node_names = self._output.get_node_names()
self.assertEqual(num_node, len(node_names))
for nn in node_names:
tt = self._output.get_trace_data(node_name=nn)
tt = tt.set_index(['REGION_HASH'], append=True)
tt = tt.groupby(level=['REGION_HASH'])
for region_hash, data in tt:
tmp = data['REGION_PROGRESS'].diff()
#@todo legacy branch?
# Look for changes in progress that are more negative
# than can be expected due to extrapolation error.
if region_hash == 8300189175:
negative_progress = tmp.loc[(tmp > -1) & (tmp < -0.1)]
launcher.write_log(name, '{}'.format(negative_progress))
self.assertEqual(0, len(negative_progress))
@util.skip_unless_run_long_tests()
@util.skip_unless_optimized()
def test_sample_rate(self):
"""
Check that sample rate is regular and fast.
"""
name = 'test_sample_rate'
report_path = name + '.report'
trace_path = name + '.trace'
num_node = 1
num_rank = 16
loop_count = 10
big_o = 10.0
region = 'dgemm-progress'
max_mean = 0.01 # 10 millisecond max sample period
max_nstd = 0.1 # 10% normalized standard deviation (std / mean)
app_conf = geopmpy.io.BenchConf(name + '_app.config')
self._tmp_files.append(app_conf.get_path())
app_conf.set_loop_count(loop_count)
app_conf.append_region(region, big_o)
agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)
self._tmp_files.append(agent_conf.get_path())
launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path, trace_path)
launcher.set_num_node(num_node)
launcher.set_num_rank(num_rank)
launcher.run(name)
self._output = geopmpy.io.AppOutput(report_path, trace_path + '*')
node_names = self._output.get_node_names()
self.assertEqual(num_node, len(node_names))
for nn in node_names:
tt = self._output.get_trace_data(node_name=nn)
delta_t = tt['TIME'].diff()
delta_t = delta_t.loc[delta_t != 0]
self.assertGreater(max_mean, delta_t.mean())
# WARNING : The following line may mask issues in the sampling rate. To do a fine grained analysis, comment
# out the next line and do NOT run on the BSP. This will require modifications to the launcher or manual testing.
size_orig = len(delta_t)
delta_t = delta_t[(delta_t - delta_t.mean()) < 3*delta_t.std()] # Only keep samples within 3 stds of the mean
self.assertGreater(0.06, 1 - (float(len(delta_t)) / size_orig))
self.assertGreater(max_nstd, delta_t.std() / delta_t.mean())
def test_network_times(self):
name = 'test_network_times'
report_path = name + '.report'
trace_path = name + '.trace'
num_node = 4
num_rank = 16
app_conf = geopmpy.io.BenchConf(name + '_app.config')
self._tmp_files.append(app_conf.get_path())
app_conf.append_region('sleep', 1.0)
app_conf.append_region('dgemm', 1.0)
app_conf.append_region('all2all', 1.0)
agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)
self._tmp_files.append(agent_conf.get_path())
launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path, trace_path)
launcher.set_num_node(num_node)
launcher.set_num_rank(num_rank)
launcher.run(name)
self._output = geopmpy.io.AppOutput(report_path, trace_path + '*')
node_names = self._output.get_node_names()
self.assertEqual(len(node_names), num_node)
for nn in node_names:
all2all_data = self._output.get_report_data(node_name=nn, region='all2all')
sleep_data = self._output.get_report_data(node_name=nn, region='sleep')
dgemm_data = self._output.get_report_data(node_name=nn, region='dgemm')
barrier_data = self._output.get_report_data(node_name=nn, region='MPI_Barrier')
unmarked_data = self._output.get_report_data(node_name=nn, region='unmarked-region')
epoch_data = self._output.get_report_data(node_name=nn, region='epoch')
app_total = self._output.get_app_total_data(node_name=nn)
self.assertEqual(0, unmarked_data['count'].item())
# Since MPI time is is counted if any rank on a node is in
# an MPI call, but region time is counted only when all
# ranks on a node are in a region, we must use the
# unmarked-region time as our error term when comparing
# MPI time and all2all time.
mpi_epsilon = max(unmarked_data['runtime'].item() / all2all_data['network_time'].item(), 0.05)
self.assertNear(all2all_data['network_time'].item(), all2all_data['runtime'].item(), mpi_epsilon)
self.assertNear(all2all_data['network_time'].item() + barrier_data['network_time'].item(),
epoch_data['network_time'].item())
# TODO: inconsistent; can we just use _ everywhere?
self.assertNear(all2all_data['network_time'].item() + barrier_data['network_time'].item(),
app_total['network-time'].item())
self.assertEqual(0, unmarked_data['network_time'].item())
self.assertEqual(0, sleep_data['network_time'].item())
self.assertEqual(0, dgemm_data['network_time'].item())
def test_ignore_runtime(self):
name = 'test_ignore_runtime'
report_path = name + '.report'
trace_path = name + '.trace'
num_node = 4
num_rank = 16
app_conf = geopmpy.io.BenchConf(name + '_app.config')
self._tmp_files.append(app_conf.get_path())
app_conf.append_region('ignore', 1.0)
app_conf.append_region('dgemm', 1.0)
app_conf.append_region('all2all', 1.0)
agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)
self._tmp_files.append(agent_conf.get_path())
launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path, trace_path)
launcher.set_num_node(num_node)
launcher.set_num_rank(num_rank)
launcher.run(name)
self._output = geopmpy.io.AppOutput(report_path, trace_path + '*')
node_names = self._output.get_node_names()
self.assertEqual(len(node_names), num_node)
for nn in node_names:
ignore_data = self._output.get_report_data(node_name=nn, region='ignore')
app_data = self._output.get_app_total_data(node_name=nn)
self.assertNear(ignore_data['runtime'].item(),
app_data['ignore-runtime'].item(), 0.00005)
@util.skip_unless_config_enable('ompt')
def test_unmarked_ompt(self):
name = 'test_unmarked_ompt'
report_path = name + '.report'
num_node = 4
num_rank = 16
app_conf = geopmpy.io.BenchConf(name + '_app.config')
self._tmp_files.append(app_conf.get_path())
app_conf.append_region('stream-unmarked', 1.0)
app_conf.append_region('dgemm-unmarked', 1.0)
app_conf.append_region('all2all-unmarked', 1.0)
agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)
self._tmp_files.append(agent_conf.get_path())
launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path)
launcher.set_num_node(num_node)
launcher.set_num_rank(num_rank)
launcher.run(name)
self._output = geopmpy.io.AppOutput(report_path)
node_names = self._output.get_node_names()
self.assertEqual(len(node_names), num_node)
stream_id = None
region_names = self._output.get_region_names()
stream_name = [key for key in region_names if key.lower().find('stream') != -1][0]
for nn in node_names:
stream_data = self._output.get_report_data(node_name=nn, region=stream_name)
found = False
for name in region_names:
if stream_name in name: # account for numbers at end of OMPT region names
found = True
self.assertTrue(found)
self.assertEqual(1, stream_data['count'].item())
if stream_id:
self.assertEqual(stream_id, stream_data['id'].item())
else:
stream_id = stream_data['id'].item()
ompt_regions = [key for key in region_names if key.startswith('[OMPT]')]
self.assertLessEqual(2, len(ompt_regions))
self.assertTrue(('MPI_Alltoall' in region_names))
gemm_region = [key for key in region_names if key.lower().find('gemm') != -1]
self.assertLessEqual(1, len(gemm_region))
def _test_agent_frequency_map(self, name, use_env=False):
min_freq = geopm_test_launcher.geopmread("CPUINFO::FREQ_MIN board 0")
max_freq = geopm_test_launcher.geopmread("CPUINFO::FREQ_MAX board 0")
sticker_freq = geopm_test_launcher.geopmread("CPUINFO::FREQ_STICKER board 0")
freq_step = geopm_test_launcher.geopmread("CPUINFO::FREQ_STEP board 0")
self._agent = "frequency_map"
report_path = name + '.report'
trace_path = name + '.trace'
num_node = 1
num_rank = 4
loop_count = 5
dgemm_bigo = 15.0
stream_bigo = 1.0
dgemm_bigo_jlse = 35.647
dgemm_bigo_quartz = 29.12
stream_bigo_jlse = 1.6225
stream_bigo_quartz = 1.7941
hostname = socket.gethostname()
if hostname.endswith('.alcf.anl.gov'):
dgemm_bigo = dgemm_bigo_jlse
stream_bigo = stream_bigo_jlse
elif hostname.startswith('mcfly'):
dgemm_bigo = 42.0
stream_bigo = 1.75
elif hostname.startswith('quartz'):
dgemm_bigo = dgemm_bigo_quartz
stream_bigo = stream_bigo_quartz
app_conf = geopmpy.io.BenchConf(name + '_app.config')
self._tmp_files.append(app_conf.get_path())
app_conf.set_loop_count(loop_count)
app_conf.append_region('dgemm', dgemm_bigo)
app_conf.append_region('stream', stream_bigo)
app_conf.append_region('all2all', 1.0)
app_conf.write()
freq_map = {}
freq_map['dgemm'] = sticker_freq
freq_map['stream'] = sticker_freq - 2 * freq_step
freq_map['all2all'] = min_freq
self._options = create_frequency_map_policy(min_freq, max_freq, freq_map, use_env)
agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)
self._tmp_files.append(agent_conf.get_path())
launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path,
trace_path, region_barrier=True, time_limit=900)
launcher.set_num_node(num_node)
launcher.set_num_rank(num_rank)
launcher.run(name)
self._output = geopmpy.io.AppOutput(report_path, trace_path + '*')
node_names = self._output.get_node_names()
self.assertEqual(len(node_names), num_node)
regions = self._output.get_region_names()
for nn in node_names:
for region_name in regions:
region_data = self._output.get_report_data(node_name=nn, region=region_name)
if (region_name in ['dgemm', 'stream', 'all2all']):
#todo verify trace frequencies
#todo verify agent report augment frequecies
msg = region_name + " frequency should be near assigned map frequency"
self.assertNear(region_data['frequency'].item(), freq_map[region_name] / sticker_freq * 100, msg=msg)
def test_agent_frequency_map_env(self):
"""
Test of the FrequencyMapAgent, setting a map through GEOPM_FREQUENCY_MAP.
"""
self._test_agent_frequency_map('test_agent_frequency_map_env', use_env=True)
def test_agent_frequency_map_policy(self):
"""
Test of the FrequencyMapAgent, setting a map through the policy.
"""
self._test_agent_frequency_map('test_agent_frequency_map_policy', use_env=False)
def test_agent_energy_efficient_single_region(self):
"""
Test of the EnergyEfficientAgent against single region loop.
"""
name = 'test_energy_efficient_single_region'
min_freq = geopm_test_launcher.geopmread("CPUINFO::FREQ_MIN board 0")
sticker_freq = geopm_test_launcher.geopmread("CPUINFO::FREQ_STICKER board 0")
freq_step = geopm_test_launcher.geopmread("CPUINFO::FREQ_STEP board 0")
self._agent = "energy_efficient"
report_path = name + '.report'
trace_path = name + '.trace'
num_node = 1
num_rank = 4
loop_count = 100
app_conf = geopmpy.io.BenchConf(name + '_app.config')
self._tmp_files.append(app_conf.get_path())
app_conf.set_loop_count(loop_count)
app_conf.append_region('spin', 0.1)
self._options = {'frequency_min': min_freq,
'frequency_max': sticker_freq}
agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)
self._tmp_files.append(agent_conf.get_path())
launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path, trace_path)
launcher.set_num_node(num_node)
launcher.set_num_rank(num_rank)
launcher.run(name)
self._output = geopmpy.io.AppOutput(report_path, trace_path + '*')
node_names = self._output.get_node_names()
self.assertEqual(len(node_names), num_node)
regions = self._output.get_region_names()
for nn in node_names:
for region_name in regions:
report = geopmpy.io.RawReport(report_path)
if (region_name in ['spin']):
region = report.raw_region(nn, region_name)
msg = region_name + " frequency should be minimum frequency as specified by policy"
self.assertEqual(region['requested-online-frequency'], min_freq, msg=msg) # freq should reduce
@util.skip_unless_run_long_tests()
@util.skip_unless_cpufreq()
@util.skip_unless_batch()
def test_agent_energy_efficient(self):
"""
Test of the EnergyEfficientAgent.
"""
name = 'test_energy_efficient_sticker'
min_freq = geopm_test_launcher.geopmread("CPUINFO::FREQ_MIN board 0")
max_freq = geopm_test_launcher.geopmread("CPUINFO::FREQ_MAX board 0")
sticker_freq = geopm_test_launcher.geopmread("CPUINFO::FREQ_STICKER board 0")
freq_step = geopm_test_launcher.geopmread("CPUINFO::FREQ_STEP board 0")
self._agent = "energy_efficient"
num_node = 1
num_rank = 4
loop_count = 200
dgemm_bigo = 15.0
stream_bigo = 1.0
dgemm_bigo_jlse = 35.647
dgemm_bigo_quartz = 29.12
stream_bigo_jlse = 1.6225
stream_bigo_quartz = 1.7941
hostname = socket.gethostname()
if hostname.endswith('.alcf.anl.gov'):
dgemm_bigo = dgemm_bigo_jlse
stream_bigo = stream_bigo_jlse
elif hostname.startswith('mcfly'):
dgemm_bigo = 42.0
stream_bigo = 1.75
elif hostname.startswith('quartz'):
dgemm_bigo = dgemm_bigo_quartz
stream_bigo = stream_bigo_quartz
run = ['_sticker', '_nan_nan']
for rr in run:
report_path = name + rr + '.report'
trace_path = name + rr + '.trace'
app_conf = geopmpy.io.BenchConf(name + '_app.config')
self._tmp_files.append(app_conf.get_path())
app_conf.set_loop_count(loop_count)
app_conf.append_region('dgemm', dgemm_bigo)
app_conf.append_region('stream', stream_bigo)
app_conf.write()
if rr == '_sticker':
self._options = {'frequency_min': sticker_freq,
'frequency_max': sticker_freq}
freq = sticker_freq
else:
self._options = {'frequency_min': min_freq,
'frequency_max': sticker_freq}
agent_conf = geopmpy.io.AgentConf(name + '_agent.config', self._agent, self._options)
self._tmp_files.append(agent_conf.get_path())
launcher = geopm_test_launcher.TestLauncher(app_conf, agent_conf, report_path,
trace_path, region_barrier=True, time_limit=900)
launcher.set_num_node(num_node)
launcher.set_num_rank(num_rank)
launcher.run(name + rr)
# compare the app_total runtime and energy and assert within bounds
report_path = name + run[0] + '.report'
trace_path = name + run[0] + '.trace'
sticker_out = geopmpy.io.AppOutput(report_path, trace_path + '*')
report_path = name + run[1] + '.report'
trace_path = name + run[1] + '.trace'
nan_out = geopmpy.io.AppOutput(report_path, trace_path + '*')
for nn in nan_out.get_node_names():
sticker_app_total = sticker_out.get_app_total_data(node_name=nn)
nan_app_total = nan_out.get_app_total_data(node_name=nn)
runtime_savings_epoch = (sticker_app_total['runtime'].item() - nan_app_total['runtime'].item()) / sticker_app_total['runtime'].item()
energy_savings_epoch = (sticker_app_total['energy-package'].item() - nan_app_total['energy-package'].item()) / sticker_app_total['energy-package'].item()
self.assertLess(-0.1, runtime_savings_epoch) # want -10% or better
self.assertLess(0.0, energy_savings_epoch)
class TestIntegrationGeopmio(unittest.TestCase):
''' Tests of geopmread and geopmwrite.'''
def setUp(self):
self.skip_warning_string = 'Incompatible CPU'
def check_output(self, args, expected):
try:
proc = subprocess.Popen([self.exec_name] + args,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
for exp in expected:
line = proc.stdout.readline()
while self.skip_warning_string.encode() in line:
line = proc.stdout.readline()
self.assertIn(exp.encode(), line)
for line in proc.stdout:
if self.skip_warning_string.encode() not in line:
self.assertNotIn(b'Error', line)
except subprocess.CalledProcessError as ex:
sys.stderr.write('{}\n'.format(ex.output))
def check_output_range(self, args, min_exp, max_exp):
try:
proc = subprocess.Popen([self.exec_name] + args,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
for line in proc.stdout:
if self.skip_warning_string.encode() in line:
continue
if line.startswith(b'0x'):
value = int(line)
else:
value = float(line)
self.assertLessEqual(min_exp, value, msg="Value read for {} smaller than {}: {}.".format(args, min_exp, value))
self.assertGreaterEqual(max_exp, value, msg="Value read for {} larger than {}: {}.".format(args, max_exp, value))
except subprocess.CalledProcessError as ex:
sys.stderr.write('{}\n'.format(ex.output))
def check_no_error(self, args):
try:
proc = subprocess.Popen([self.exec_name] + args,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
for line in proc.stdout:
if self.skip_warning_string.encode() not in line:
self.assertNotIn(b'Error', line)
except subprocess.CalledProcessError as ex:
sys.stderr.write('{}\n'.format(ex.output))
def test_geopmread_command_line(self):
'''
Check that geopmread commandline arguments work.
'''
self.exec_name = "geopmread"
# no args
self.check_no_error([])
# domain flag
self.check_output(['--domain'], ['board', 'package', 'core', 'cpu',
'board_memory', 'package_memory',
'board_nic', 'package_nic',
'board_accelerator', 'package_accelerator'])
self.check_output(['--domain', 'TIME'], ['cpu'])
# read signal
self.check_no_error(['TIME', 'board', '0'])
# info
self.check_no_error(['--info'])
self.check_output(['--info', 'TIME'], ['Time in seconds'])
# errors
read_err = 'domain type and domain index are required'
self.check_output(['TIME'], [read_err])
self.check_output(['TIME', 'board'], [read_err])
self.check_output(['TIME', 'board', 'bad'], ['invalid domain index'])
self.check_output(['FREQUENCY', 'package', '111'], ['cannot read signal'])
self.check_output(['ENERGY_PACKAGE', 'cpu', '0'], ['cannot read signal'])
self.check_output(['INVALID', 'board', '0'], ['cannot read signal'])
self.check_output(['--domain', 'INVALID'], ['unable to determine signal type'])
self.check_output(['--domain', '--info'], ['info about domain not implemented'])
@util.skip_unless_batch()
def test_geopmread_all_signal_agg(self):
'''
Check that all reported signals can be read for board, aggregating if necessary.
'''
self.exec_name = "geopmread"
all_signals = []
try:
proc = subprocess.Popen([self.exec_name],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
for line in proc.stdout:
if self.skip_warning_string.encode() not in line:
all_signals.append(line.strip())
except subprocess.CalledProcessError as ex:
sys.stderr.write('{}\n'.format(ex.output))
for sig in all_signals:
self.check_no_error([sig.decode(), 'board', '0'])
@util.skip_unless_batch()
def test_geopmread_signal_value(self):
'''
Check that some specific signals give a sane value.
'''
self.exec_name = "geopmread"
signal_range = {
"POWER_PACKAGE": (20, 400),
"FREQUENCY": (1.0e8, 5.0e9),
"TIME": (0, 10), # time in sec to start geopmread
"TEMPERATURE_CORE": (0, 100)
}
for signal, val_range in signal_range.items():
try:
self.check_no_error([signal, "board", "0"])
except:
raise
pass # skip missing signals
else:
self.check_output_range([signal, "board", "0"], *val_range)
def test_geopmread_custom_msr(self):
'''
Check that MSRIOGroup picks up additional MSRs in path.
'''
self.exec_name = "geopmread"
path = os.path.join(
os.path.dirname(
os.path.dirname(
os.path.realpath(__file__))),
'examples/custom_msr/')
custom_env = os.environ.copy()
custom_env['GEOPM_PLUGIN_PATH'] = path
all_signals = []
try:
proc = subprocess.Popen([self.exec_name], env=custom_env,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
for line in proc.stdout:
if self.skip_warning_string.encode() not in line:
all_signals.append(line.strip())
except subprocess.CalledProcessError as ex:
sys.stderr.write('{}\n'.format(ex.output))
self.assertIn(b'MSR::CORE_PERF_LIMIT_REASONS#', all_signals)
def test_geopmwrite_command_line(self):
'''
Check that geopmwrite commandline arguments work.
'''
self.exec_name = "geopmwrite"
# no args
self.check_no_error([])
# domain flag
self.check_output(['--domain'], ['board', 'package', 'core', 'cpu',
'board_memory', 'package_memory',
'board_nic', 'package_nic',
'board_accelerator', 'package_accelerator'])
self.check_no_error(['--domain', 'FREQUENCY'])
# info
self.check_no_error(['--info'])
self.check_output(['--info', 'FREQUENCY'], ['processor frequency'])
# errors
write_err = 'domain type, domain index, and value are required'
self.check_output(['FREQUENCY'], [write_err])
self.check_output(['FREQUENCY', 'board'], [write_err])
self.check_output(['FREQUENCY', 'board', '0'], [write_err])
self.check_output(['FREQUENCY', 'board', 'bad', '0'], ['invalid domain index'])
self.check_output(['FREQUENCY', 'board', '0', 'bad'], ['invalid write value'])
self.check_output(['FREQUENCY', 'package', '111', '0'], ['cannot write control'])
self.check_output(['FREQUENCY', 'board_nic', '0', '0'], ['cannot write control'])
self.check_output(['INVALID', 'board', '0', '0'], ['cannot write control'])
self.check_output(['--domain', 'INVALID'], ['unable to determine control type'])
self.check_output(['--domain', '--info'], ['info about domain not implemented'])
@util.skip_unless_batch()
def test_geopmwrite_set_freq(self):
'''
Check that geopmwrite can be used to set frequency.
'''
def read_stdout_line(stdout):
line = stdout.readline()
while self.skip_warning_string.encode() in line:
line = stdout.readline()
return line.strip()
def read_current_freq(domain, signal='FREQUENCY'):
read_proc = subprocess.Popen(['geopmread', signal, domain, '0'],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
freq = read_stdout_line(read_proc.stdout)
freq = float(freq)
return freq
def read_min_max_freq():
read_proc = subprocess.Popen(['geopmread', 'CPUINFO::FREQ_MIN', 'board', '0'],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
min_freq = read_stdout_line(read_proc.stdout)
min_freq = float(int(float(min_freq)/1e8)*1e8) # convert to multiple of 1e8
read_proc = subprocess.Popen(['geopmread', 'CPUINFO::FREQ_MAX', 'board', '0'],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
max_freq = read_stdout_line(read_proc.stdout)
max_freq = float(int(float(max_freq)/1e8)*1e8)
return min_freq, max_freq
self.exec_name = "geopmwrite"
read_proc = subprocess.Popen(['geopmread', '--domain', 'FREQUENCY'],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
read_domain = read_stdout_line(read_proc.stdout).decode()
write_proc = subprocess.Popen([self.exec_name, '--domain', 'FREQUENCY'],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
write_domain = read_stdout_line(write_proc.stdout).decode()
min_freq, max_freq = read_min_max_freq()
old_freq = read_current_freq(write_domain, 'MSR::PERF_CTL:FREQ')
self.assertLess(old_freq, max_freq * 2)
self.assertGreater(old_freq, min_freq - 1e8)
# set to min and check
self.check_no_error(['FREQUENCY', write_domain, '0', str(min_freq)])
result = read_current_freq(read_domain)
self.assertEqual(min_freq, result)
# set to max and check
self.check_no_error(['FREQUENCY', write_domain, '0', str(max_freq)])
result = read_current_freq(read_domain)
self.assertEqual(max_freq, result)
self.check_no_error(['FREQUENCY', write_domain, '0', str(old_freq)])
class TestIntegrationGeopmagent(unittest.TestCase):
''' Tests of geopmagent.'''
def setUp(self):
self.exec_name = 'geopmagent'
self.skip_warning_string = 'Incompatible CPU frequency driver/governor'
def check_output(self, args, expected):
try:
proc = subprocess.Popen([self.exec_name] + args,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
for exp in expected:
line = proc.stdout.readline()
while self.skip_warning_string.encode() in line or line == b'\n':
line = proc.stdout.readline()
self.assertIn(exp.encode(), line)
for line in proc.stdout:
if self.skip_warning_string.encode() not in line:
self.assertNotIn(b'Error', line)
except subprocess.CalledProcessError as ex:
sys.stderr.write('{}\n'.format(ex.output))
def check_json_output(self, args, expected):
try:
proc = subprocess.Popen([self.exec_name] + args,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as ex:
sys.stderr.write('{}\n'.format(ex.output))
line = proc.stdout.readline()
while self.skip_warning_string.encode() in line or line == b'\n':
line = proc.stdout.readline()
try:
out_json = json.loads(line.decode())
except ValueError:
self.fail('Could not convert json string: {}\n'.format(line))
self.assertEqual(expected, out_json)
for line in proc.stdout:
if self.skip_warning_string.encode() not in line:
self.assertNotIn(b'Error', line)
def check_no_error(self, args):
try:
proc = subprocess.Popen([self.exec_name] + args,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
for line in proc.stdout:
if self.skip_warning_string.encode() not in line:
self.assertNotIn(b'Error', line)
except subprocess.CalledProcessError as ex:
sys.stderr.write('{}\n'.format(ex.output))
def test_geopmagent_command_line(self):
'''
Check that geopmagent commandline arguments work.
'''
# no args
agent_names = ['monitor', 'power_balancer', 'power_governor',
'energy_efficient', 'frequency_map']
self.check_output([], agent_names)
# help message
self.check_output(['--help'], ['Usage'])
# version
self.check_no_error(['--version'])
# agent policy and sample names
for agent in agent_names:
self.check_output(['--agent', agent],
['Policy', 'Sample'])
# policy file
self.check_json_output(['--agent', 'monitor', '--policy', 'None'],
{})
self.check_json_output(['--agent', 'power_governor', '--policy', '150'],
{'POWER_PACKAGE_LIMIT_TOTAL': 150})
# default value policy
self.check_json_output(['--agent', 'power_governor', '--policy', 'NAN'],
{'POWER_PACKAGE_LIMIT_TOTAL': 'NAN'})
self.check_json_output(['--agent', 'power_governor', '--policy', 'nan'],
{'POWER_PACKAGE_LIMIT_TOTAL': 'NAN'})
self.check_json_output(['--agent', 'energy_efficient', '--policy', 'nan,nan'],
{'FREQ_MIN': 'NAN', 'FREQ_MAX': 'NAN'})
self.check_json_output(['--agent', 'energy_efficient', '--policy', '1.2e9,nan'],
{'FREQ_MIN': 1.2e9, 'FREQ_MAX': 'NAN'})
self.check_json_output(['--agent', 'energy_efficient', '--policy', 'nan,1.3e9'],
{'FREQ_MIN': 'NAN', 'FREQ_MAX': 1.3e9})
# unspecified policy values are accepted
self.check_json_output(['--agent', 'power_balancer', '--policy', '150'],
{'POWER_PACKAGE_LIMIT_TOTAL': 150})
# errors
self.check_output(['--agent', 'power_governor', '--policy', 'None'],
['not a valid floating-point number', 'Invalid argument'])
self.check_output(['--agent', 'monitor', '--policy', '300'],
['agent takes no parameters', 'Invalid argument'])
self.check_output(['--agent', 'energy_efficient', '--policy', '2.0e9,5.0e9,4.5e9,6.7,4.2'],
['Number of policies', 'Invalid argument'])
if __name__ == '__main__':
unittest.main()
|
[] |
[] |
[
"SLURM_NODELIST",
"GEOPM_KEEP_FILES",
"GEOPM_FREQUENCY_MAP"
] |
[]
|
["SLURM_NODELIST", "GEOPM_KEEP_FILES", "GEOPM_FREQUENCY_MAP"]
|
python
| 3 | 0 | |
samples/snippets/create_data_labeling_job_active_learning_test.py
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import uuid
from google.cloud import aiplatform
import pytest
import create_data_labeling_job_active_learning_sample
import helpers
API_ENDPOINT = os.getenv("DATA_LABELING_API_ENDPOINT")
PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT")
LOCATION = "us-central1"
DATASET_ID = "1905673553261363200"
INPUTS_SCHEMA_URI = "gs://google-cloud-aiplatform/schema/datalabelingjob/inputs/image_classification_1.0.0.yaml"
DISPLAY_NAME = f"temp_create_data_labeling_job_active_learning_test_{uuid.uuid4()}"
INSTRUCTIONS_GCS_URI = (
"gs://ucaip-sample-resources/images/datalabeling_instructions.pdf"
)
ANNOTATION_SPEC = "rose"
@pytest.fixture
def shared_state():
state = {}
yield state
@pytest.fixture
def job_client():
client_options = {"api_endpoint": API_ENDPOINT}
job_client = aiplatform.gapic.JobServiceClient(client_options=client_options)
yield job_client
@pytest.fixture(scope="function", autouse=True)
def teardown(capsys, shared_state, job_client):
yield
job_client.cancel_data_labeling_job(name=shared_state["data_labeling_job_name"])
# Verify Data Labelling Job is cancelled, or timeout after 400 seconds
helpers.wait_for_job_state(
get_job_method=job_client.get_data_labeling_job,
name=shared_state["data_labeling_job_name"],
timeout=400,
freq=10,
)
# Delete the data labeling job
response = job_client.delete_data_labeling_job(
name=shared_state["data_labeling_job_name"]
)
print("Delete LRO:", response.operation.name)
delete_data_labeling_job_response = response.result(timeout=300)
print("delete_data_labeling_job_response", delete_data_labeling_job_response)
out, _ = capsys.readouterr()
assert "delete_data_labeling_job_response" in out
# Creating a data labeling job for images
def test_create_data_labeling_job_active_learning_sample(capsys, shared_state):
create_data_labeling_job_active_learning_sample.create_data_labeling_job_active_learning_sample(
project=PROJECT_ID,
display_name=DISPLAY_NAME,
dataset=f"projects/{PROJECT_ID}/locations/{LOCATION}/datasets/{DATASET_ID}",
instruction_uri=INSTRUCTIONS_GCS_URI,
inputs_schema_uri=INPUTS_SCHEMA_URI,
annotation_spec=ANNOTATION_SPEC,
api_endpoint=API_ENDPOINT,
)
out, _ = capsys.readouterr()
# Save resource name of the newly created data labeing job
shared_state["data_labeling_job_name"] = helpers.get_name(out)
|
[] |
[] |
[
"DATA_LABELING_API_ENDPOINT",
"BUILD_SPECIFIC_GCLOUD_PROJECT"
] |
[]
|
["DATA_LABELING_API_ENDPOINT", "BUILD_SPECIFIC_GCLOUD_PROJECT"]
|
python
| 2 | 0 | |
singularityimages/settings/development.py
|
from .base import *
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*',]
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': os.getenv('DATABASE_NAME', 'singularityimages'),
'USER': os.getenv('DATABASE_USER', 'singularityimages'),
'PASSWORD': os.getenv('DATABASE_PASSWORD', 'password'),
'HOST': os.getenv('DATABASE_HOST', '127.0.0.1'),
'PORT': os.getenv('DATABASE_PORT', '5432'),
}
}
SINGULARITY_IMAGE_DIR = '/work/projects/singularity/TACC/biocontainers'
|
[] |
[] |
[
"DATABASE_PASSWORD",
"DATABASE_HOST",
"DATABASE_NAME",
"DATABASE_PORT",
"DATABASE_USER"
] |
[]
|
["DATABASE_PASSWORD", "DATABASE_HOST", "DATABASE_NAME", "DATABASE_PORT", "DATABASE_USER"]
|
python
| 5 | 0 | |
cd4ml/webapp/model_cache.py
|
import datetime
import logging
import os
from functools import lru_cache
from pathlib import Path
import mlflow
import requests
from cd4ml.model_utils import load_deployed_model_from_local_file
from cd4ml.problems import list_available_scenarios
class ModelCache:
def __init__(self, cache_location=Path("mlflow_cache")):
self.logger = logging.getLogger(__name__)
self.known_problems = list_available_scenarios()
self.columns_of_interest = {
'run_id': 'run_id',
'tags.BuildNumber': 'build_number',
'tags.mlflow.runName': 'run_number',
'end_time': 'time',
'params.MLPipelineParamsName': 'ml_pipeline_params_name',
'params.FeatureSetName': 'feature_set_name',
'params.AlgorithmName': 'algorithm_name',
'params.AlgorithmParamsName': 'algorithm_params_name',
'tags.DidPassAcceptanceTest': 'passed_acceptance_test'
}
self.base_model_directory = cache_location
mlflow.set_tracking_uri(os.environ["MLFLOW_TRACKING_URL"])
def _get_id_for_latest_model(self, all_models_for_scenario):
possible_deployable_models = [row for row in all_models_for_scenario
if self.is_latest_deployable_model(row)]
if len(possible_deployable_models) == 0:
return None
last_deployment_model = sorted(possible_deployable_models,
key=lambda row: datetime.datetime.strptime(row['time'], "%c"),
reverse=True)
return last_deployment_model[0]['run_id']
def get_loaded_model_for_scenario_and_run_id(self, scenario, run_id):
if run_id == "latest":
all_models_for_scenario = self.list_available_models_from_ml_flow().get(scenario)
if all_models_for_scenario is None:
return None
latest_item = [item for item in all_models_for_scenario if item['is_latest']]
if len(latest_item) == 0:
return None
return self.get_loaded_model_for_scenario_and_run_id(scenario, latest_item[0]['run_id'])
model_path = Path(self.base_model_directory, scenario, run_id, "full_model.pkl")
if not model_path.exists():
self.download_and_save_from_ml_flow(model_path, run_id)
return self.read_model(model_path)
@lru_cache(maxsize=64)
def read_model(self, model_path):
return load_deployed_model_from_local_file(model_path)
def list_available_models_from_ml_flow(self):
returning_dictionary = dict()
for scenario in self.known_problems:
experiment = mlflow.get_experiment_by_name(scenario)
if experiment is None:
continue
runs = mlflow.search_runs(experiment_ids=experiment.experiment_id)
dataframe_with_columns_of_interest = runs[list(self.columns_of_interest.keys())]
dataframe_with_columns_renamed = dataframe_with_columns_of_interest.rename(columns=self.columns_of_interest)
dataframe_with_columns_renamed['time'] = dataframe_with_columns_renamed['time'].dt.strftime("%c")
list_of_dictionaries = dataframe_with_columns_renamed.to_dict(orient="rows")
id_of_latest = self._get_id_for_latest_model(list_of_dictionaries)
for d in list_of_dictionaries:
d['is_latest'] = d['run_id'] == id_of_latest
returning_dictionary[scenario] = list_of_dictionaries
return returning_dictionary
@staticmethod
def download_and_save_from_ml_flow(path, run_id):
path.parent.mkdir(parents=True, exist_ok=True)
results = requests.get("{}/get-artifact?path=full_model.pkl&run_uuid={}"
.format(mlflow.get_tracking_uri(), run_id))
with open(path, "wb") as f:
f.write(results.content)
@staticmethod
def is_latest_deployable_model(row):
return row['ml_pipeline_params_name'] == 'default' and \
row['feature_set_name'] == 'default' and \
row['algorithm_name'] == 'default' and \
row['algorithm_params_name'] == 'default' and \
row['passed_acceptance_test'] == 'yes'
|
[] |
[] |
[
"MLFLOW_TRACKING_URL"
] |
[]
|
["MLFLOW_TRACKING_URL"]
|
python
| 1 | 0 | |
venv/Lib/site-packages/werkzeug/serving.py
|
# -*- coding: utf-8 -*-
"""
werkzeug.serving
~~~~~~~~~~~~~~~~
There are many ways to serve a WSGI application. While you're developing
it you usually don't want a full blown webserver like Apache but a simple
standalone one. From Python 2.5 onwards there is the `wsgiref`_ server in
the standard library. If you're using older versions of Python you can
download the package from the cheeseshop.
However there are some caveats. Sourcecode won't reload itself when
changed and each time you kill the server using ``^C`` you get an
`KeyboardInterrupt` error. While the latter is easy to solve the first
one can be a pain in the ass in some situations.
The easiest way is creating a small ``start-myproject.py`` that runs the
application::
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from myproject import make_app
from werkzeug.serving import run_simple
app = make_app(...)
run_simple('localhost', 8080, app, use_reloader=True)
You can also pass it a `extra_files` keyword argument with a list of
additional files (like configuration files) you want to observe.
For bigger applications you should consider using `click`
(http://click.pocoo.org) instead of a simple start file.
:copyright: 2007 Pallets
:license: BSD-3-Clause
"""
import io
import os
import signal
import socket
import sys
import werkzeug
from ._compat import PY2
from ._compat import reraise
from ._compat import WIN
from ._compat import wsgi_encoding_dance
from ._internal import _log
from .exceptions import InternalServerError
from .urls import uri_to_iri
from .urls import url_parse
from .urls import url_unquote
try:
import socketserver
from http.server import BaseHTTPRequestHandler
from http.server import HTTPServer
except ImportError:
import SocketServer as socketserver
from BaseHTTPServer import HTTPServer
from BaseHTTPServer import BaseHTTPRequestHandler
try:
import ssl
except ImportError:
class _SslDummy(object):
def __getattr__(self, name):
raise RuntimeError("SSL support unavailable")
ssl = _SslDummy()
try:
import termcolor
except ImportError:
termcolor = None
def _get_openssl_crypto_module():
try:
from OpenSSL import crypto
except ImportError:
raise TypeError("Using ad-hoc certificates requires the pyOpenSSL library.")
else:
return crypto
ThreadingMixIn = socketserver.ThreadingMixIn
can_fork = hasattr(os, "fork")
if can_fork:
ForkingMixIn = socketserver.ForkingMixIn
else:
class ForkingMixIn(object):
pass
try:
af_unix = socket.AF_UNIX
except AttributeError:
af_unix = None
LISTEN_QUEUE = 128
can_open_by_fd = not WIN and hasattr(socket, "fromfd")
# On Python 3, ConnectionError represents the same errnos as
# socket.error from Python 2, while socket.error is an alias for the
# more generic OSError.
if PY2:
_ConnectionError = socket.error
else:
_ConnectionError = ConnectionError
class DechunkedInput(io.RawIOBase):
"""An input stream that handles Transfer-Encoding 'chunked'"""
def __init__(self, rfile):
self._rfile = rfile
self._done = False
self._len = 0
def readable(self):
return True
def read_chunk_len(self):
try:
line = self._rfile.readline().decode("latin1")
_len = int(line.strip(), 16)
except ValueError:
raise IOError("Invalid chunk header")
if _len < 0:
raise IOError("Negative chunk length not allowed")
return _len
def readinto(self, buf):
read = 0
while not self._done and read < len(buf):
if self._len == 0:
# This is the first chunk or we fully consumed the previous
# one. Read the next length of the next chunk
self._len = self.read_chunk_len()
if self._len == 0:
# Found the final chunk of size 0. The stream is now exhausted,
# but there is still a final newline that should be consumed
self._done = True
if self._len > 0:
# There is data (left) in this chunk, so append it to the
# buffer. If this operation fully consumes the chunk, this will
# reset self._len to 0.
n = min(len(buf), self._len)
buf[read : read + n] = self._rfile.read(n)
self._len -= n
read += n
if self._len == 0:
# Skip the terminating newline of a chunk that has been fully
# consumed. This also applies to the 0-sized final chunk
terminator = self._rfile.readline()
if terminator not in (b"\n", b"\r\n", b"\r"):
raise IOError("Missing chunk terminating newline")
return read
class WSGIRequestHandler(BaseHTTPRequestHandler, object):
"""A request handler that implements WSGI dispatching."""
@property
def server_version(self):
return "Werkzeug/" + werkzeug.__version__
def make_environ(self):
request_url = url_parse(self.path)
def shutdown_server():
self.server.shutdown_signal = True
url_scheme = "http" if self.server.ssl_context is None else "https"
if not self.client_address:
self.client_address = "<local>"
if isinstance(self.client_address, str):
self.client_address = (self.client_address, 0)
else:
pass
path_info = url_unquote(request_url.path)
environ = {
"wsgi.version": (1, 0),
"wsgi.url_scheme": url_scheme,
"wsgi.input": self.rfile,
"wsgi.errors": sys.stderr,
"wsgi.multithread": self.server.multithread,
"wsgi.multiprocess": self.server.multiprocess,
"wsgi.run_once": False,
"werkzeug.server.shutdown": shutdown_server,
"SERVER_SOFTWARE": self.server_version,
"REQUEST_METHOD": self.command,
"SCRIPT_NAME": "",
"PATH_INFO": wsgi_encoding_dance(path_info),
"QUERY_STRING": wsgi_encoding_dance(request_url.query),
# Non-standard, added by mod_wsgi, uWSGI
"REQUEST_URI": wsgi_encoding_dance(self.path),
# Non-standard, added by gunicorn
"RAW_URI": wsgi_encoding_dance(self.path),
"REMOTE_ADDR": self.address_string(),
"REMOTE_PORT": self.port_integer(),
"SERVER_NAME": self.server.server_address[0],
"SERVER_PORT": str(self.server.server_address[1]),
"SERVER_PROTOCOL": self.request_version,
}
for key, value in self.get_header_items():
key = key.upper().replace("-", "_")
value = value.replace("\r\n", "")
if key not in ("CONTENT_TYPE", "CONTENT_LENGTH"):
key = "HTTP_" + key
if key in environ:
value = "{},{}".format(environ[key], value)
environ[key] = value
if environ.get("HTTP_TRANSFER_ENCODING", "").strip().lower() == "chunked":
environ["wsgi.input_terminated"] = True
environ["wsgi.input"] = DechunkedInput(environ["wsgi.input"])
if request_url.scheme and request_url.netloc:
environ["HTTP_HOST"] = request_url.netloc
return environ
def run_wsgi(self):
if self.headers.get("Expect", "").lower().strip() == "100-continue":
self.wfile.write(b"HTTP/1.1 100 Continue\r\n\r\n")
self.environ = environ = self.make_environ()
headers_set = []
headers_sent = []
def write(data):
assert headers_set, "write() before start_response"
if not headers_sent:
status, response_headers = headers_sent[:] = headers_set
try:
code, msg = status.split(None, 1)
except ValueError:
code, msg = status, ""
code = int(code)
self.send_response(code, msg)
header_keys = set()
for key, value in response_headers:
self.send_header(key, value)
key = key.lower()
header_keys.add(key)
if not (
"content-length" in header_keys
or environ["REQUEST_METHOD"] == "HEAD"
or code < 200
or code in (204, 304)
):
self.close_connection = True
self.send_header("Connection", "close")
if "server" not in header_keys:
self.send_header("Server", self.version_string())
if "date" not in header_keys:
self.send_header("Date", self.date_time_string())
self.end_headers()
assert isinstance(data, bytes), "applications must write bytes"
self.wfile.write(data)
self.wfile.flush()
def start_response(status, response_headers, exc_info=None):
if exc_info:
try:
if headers_sent:
reraise(*exc_info)
finally:
exc_info = None
elif headers_set:
raise AssertionError("Headers already set")
headers_set[:] = [status, response_headers]
return write
def execute(app):
application_iter = app(environ, start_response)
try:
for data in application_iter:
write(data)
if not headers_sent:
write(b"")
finally:
if hasattr(application_iter, "close"):
application_iter.close()
application_iter = None
try:
execute(self.server.app)
except (_ConnectionError, socket.timeout) as e:
self.connection_dropped(e, environ)
except Exception:
if self.server.passthrough_errors:
raise
from .debug.tbtools import get_current_traceback
traceback = get_current_traceback(ignore_system_exceptions=True)
try:
# if we haven't yet sent the headers but they are set
# we roll back to be able to set them again.
if not headers_sent:
del headers_set[:]
execute(InternalServerError())
except Exception:
pass
self.server.log("error", "Error on request:\n%s", traceback.plaintext)
def handle(self):
"""Handles a request ignoring dropped connections."""
rv = None
try:
rv = BaseHTTPRequestHandler.handle(self)
except (_ConnectionError, socket.timeout) as e:
self.connection_dropped(e)
except Exception as e:
if self.server.ssl_context is None or not is_ssl_error(e):
raise
if self.server.shutdown_signal:
self.initiate_shutdown()
return rv
def initiate_shutdown(self):
"""A horrible, horrible way to kill the server for Python 2.6 and
later. It's the best we can do.
"""
# Windows does not provide SIGKILL, go with SIGTERM then.
sig = getattr(signal, "SIGKILL", signal.SIGTERM)
# reloader active
if is_running_from_reloader():
os.kill(os.getpid(), sig)
# python 2.7
self.server._BaseServer__shutdown_request = True
# python 2.6
self.server._BaseServer__serving = False
def connection_dropped(self, error, environ=None):
"""Called if the connection was closed by the client. By default
nothing happens.
"""
def handle_one_request(self):
"""Handle a single HTTP request."""
self.raw_requestline = self.rfile.readline()
if not self.raw_requestline:
self.close_connection = 1
elif self.parse_request():
return self.run_wsgi()
def send_response(self, code, message=None):
"""Send the response header and log the response code."""
self.log_request(code)
if message is None:
message = code in self.responses and self.responses[code][0] or ""
if self.request_version != "HTTP/0.9":
hdr = "%s %d %s\r\n" % (self.protocol_version, code, message)
self.wfile.write(hdr.encode("ascii"))
def version_string(self):
return BaseHTTPRequestHandler.version_string(self).strip()
def address_string(self):
if getattr(self, "environ", None):
return self.environ["REMOTE_ADDR"]
elif not self.client_address:
return "<local>"
elif isinstance(self.client_address, str):
return self.client_address
else:
return self.client_address[0]
def port_integer(self):
return self.client_address[1]
def log_request(self, code="-", size="-"):
try:
path = uri_to_iri(self.path)
msg = "%s %s %s" % (self.command, path, self.request_version)
except AttributeError:
# path isn't set if the requestline was bad
msg = self.requestline
code = str(code)
if termcolor:
color = termcolor.colored
if code[0] == "1": # 1xx - Informational
msg = color(msg, attrs=["bold"])
elif code[0] == "2": # 2xx - Success
msg = color(msg, color="white")
elif code == "304": # 304 - Resource Not Modified
msg = color(msg, color="cyan")
elif code[0] == "3": # 3xx - Redirection
msg = color(msg, color="green")
elif code == "404": # 404 - Resource Not Found
msg = color(msg, color="yellow")
elif code[0] == "4": # 4xx - Client Error
msg = color(msg, color="red", attrs=["bold"])
else: # 5xx, or any other response
msg = color(msg, color="magenta", attrs=["bold"])
self.log("info", '"%s" %s %s', msg, code, size)
def log_error(self, *args):
self.log("error", *args)
def log_message(self, format, *args):
self.log("info", format, *args)
def log(self, type, message, *args):
_log(
type,
"%s - - [%s] %s\n"
% (self.address_string(), self.log_date_time_string(), message % args),
)
def get_header_items(self):
"""
Get an iterable list of key/value pairs representing headers.
This function provides Python 2/3 compatibility as related to the
parsing of request headers. Python 2.7 is not compliant with
RFC 3875 Section 4.1.18 which requires multiple values for headers
to be provided or RFC 2616 which allows for folding of multi-line
headers. This function will return a matching list regardless
of Python version. It can be removed once Python 2.7 support
is dropped.
:return: List of tuples containing header hey/value pairs
"""
if PY2:
# For Python 2, process the headers manually according to
# W3C RFC 2616 Section 4.2.
items = []
for header in self.headers.headers:
# Remove "\r\n" from the header and split on ":" to get
# the field name and value.
try:
key, value = header[0:-2].split(":", 1)
except ValueError:
# If header could not be slit with : but starts with white
# space and it follows an existing header, it's a folded
# header.
if header[0] in ("\t", " ") and items:
# Pop off the last header
key, value = items.pop()
# Append the current header to the value of the last
# header which will be placed back on the end of the
# list
value = value + header
# Otherwise it's just a bad header and should error
else:
# Re-raise the value error
raise
# Add the key and the value once stripped of leading
# white space. The specification allows for stripping
# trailing white space but the Python 3 code does not
# strip trailing white space. Therefore, trailing space
# will be left as is to match the Python 3 behavior.
items.append((key, value.lstrip()))
else:
items = self.headers.items()
return items
#: backwards compatible name if someone is subclassing it
BaseRequestHandler = WSGIRequestHandler
def generate_adhoc_ssl_pair(cn=None):
from random import random
crypto = _get_openssl_crypto_module()
# pretty damn sure that this is not actually accepted by anyone
if cn is None:
cn = "*"
cert = crypto.X509()
cert.set_serial_number(int(random() * sys.maxsize))
cert.gmtime_adj_notBefore(0)
cert.gmtime_adj_notAfter(60 * 60 * 24 * 365)
subject = cert.get_subject()
subject.CN = cn
subject.O = "Dummy Certificate" # noqa: E741
issuer = cert.get_issuer()
issuer.CN = subject.CN
issuer.O = subject.O # noqa: E741
pkey = crypto.PKey()
pkey.generate_key(crypto.TYPE_RSA, 2048)
cert.set_pubkey(pkey)
cert.sign(pkey, "sha256")
return cert, pkey
def make_ssl_devcert(base_path, host=None, cn=None):
"""Creates an SSL key for development. This should be used instead of
the ``'adhoc'`` key which generates a new cert on each server start.
It accepts a path for where it should store the key and cert and
either a host or CN. If a host is given it will use the CN
``*.host/CN=host``.
For more information see :func:`run_simple`.
.. versionadded:: 0.9
:param base_path: the path to the certificate and key. The extension
``.crt`` is added for the certificate, ``.key`` is
added for the key.
:param host: the name of the host. This can be used as an alternative
for the `cn`.
:param cn: the `CN` to use.
"""
from OpenSSL import crypto
if host is not None:
cn = "*.%s/CN=%s" % (host, host)
cert, pkey = generate_adhoc_ssl_pair(cn=cn)
cert_file = base_path + ".crt"
pkey_file = base_path + ".key"
with open(cert_file, "wb") as f:
f.write(crypto.dump_certificate(crypto.FILETYPE_PEM, cert))
with open(pkey_file, "wb") as f:
f.write(crypto.dump_privatekey(crypto.FILETYPE_PEM, pkey))
return cert_file, pkey_file
def generate_adhoc_ssl_context():
"""Generates an adhoc SSL context for the development server."""
crypto = _get_openssl_crypto_module()
import tempfile
import atexit
cert, pkey = generate_adhoc_ssl_pair()
cert_handle, cert_file = tempfile.mkstemp()
pkey_handle, pkey_file = tempfile.mkstemp()
atexit.register(os.remove, pkey_file)
atexit.register(os.remove, cert_file)
os.write(cert_handle, crypto.dump_certificate(crypto.FILETYPE_PEM, cert))
os.write(pkey_handle, crypto.dump_privatekey(crypto.FILETYPE_PEM, pkey))
os.close(cert_handle)
os.close(pkey_handle)
ctx = load_ssl_context(cert_file, pkey_file)
return ctx
def load_ssl_context(cert_file, pkey_file=None, protocol=None):
"""Loads SSL context from cert/private key files and optional protocol.
Many parameters are directly taken from the API of
:py:class:`ssl.SSLContext`.
:param cert_file: Path of the certificate to use.
:param pkey_file: Path of the private key to use. If not given, the key
will be obtained from the certificate file.
:param protocol: One of the ``PROTOCOL_*`` constants in the stdlib ``ssl``
module. Defaults to ``PROTOCOL_SSLv23``.
"""
if protocol is None:
protocol = ssl.PROTOCOL_SSLv23
ctx = _SSLContext(protocol)
ctx.load_cert_chain(cert_file, pkey_file)
return ctx
class _SSLContext(object):
"""A dummy class with a small subset of Python3's ``ssl.SSLContext``, only
intended to be used with and by Werkzeug."""
def __init__(self, protocol):
self._protocol = protocol
self._certfile = None
self._keyfile = None
self._password = None
def load_cert_chain(self, certfile, keyfile=None, password=None):
self._certfile = certfile
self._keyfile = keyfile or certfile
self._password = password
def wrap_socket(self, sock, **kwargs):
return ssl.wrap_socket(
sock,
keyfile=self._keyfile,
certfile=self._certfile,
ssl_version=self._protocol,
**kwargs
)
def is_ssl_error(error=None):
"""Checks if the given error (or the current one) is an SSL error."""
exc_types = (ssl.SSLError,)
try:
from OpenSSL.SSL import Error
exc_types += (Error,)
except ImportError:
pass
if error is None:
error = sys.exc_info()[1]
return isinstance(error, exc_types)
def select_address_family(host, port):
"""Return ``AF_INET4``, ``AF_INET6``, or ``AF_UNIX`` depending on
the host and port."""
# disabled due to problems with current ipv6 implementations
# and various operating systems. Probably this code also is
# not supposed to work, but I can't come up with any other
# ways to implement this.
# try:
# info = socket.getaddrinfo(host, port, socket.AF_UNSPEC,
# socket.SOCK_STREAM, 0,
# socket.AI_PASSIVE)
# if info:
# return info[0][0]
# except socket.gaierror:
# pass
if host.startswith("unix://"):
return socket.AF_UNIX
elif ":" in host and hasattr(socket, "AF_INET6"):
return socket.AF_INET6
return socket.AF_INET
def get_sockaddr(host, port, family):
"""Return a fully qualified socket address that can be passed to
:func:`socket.bind`."""
if family == af_unix:
return host.split("://", 1)[1]
try:
res = socket.getaddrinfo(
host, port, family, socket.SOCK_STREAM, socket.IPPROTO_TCP
)
except socket.gaierror:
return host, port
return res[0][4]
class BaseWSGIServer(HTTPServer, object):
"""Simple single-threaded, single-process WSGI server."""
multithread = False
multiprocess = False
request_queue_size = LISTEN_QUEUE
def __init__(
self,
host,
port,
app,
handler=None,
passthrough_errors=False,
ssl_context=None,
fd=None,
):
if handler is None:
handler = WSGIRequestHandler
self.address_family = select_address_family(host, port)
if fd is not None:
real_sock = socket.fromfd(fd, self.address_family, socket.SOCK_STREAM)
port = 0
server_address = get_sockaddr(host, int(port), self.address_family)
# remove socket file if it already exists
if self.address_family == af_unix and os.path.exists(server_address):
os.unlink(server_address)
HTTPServer.__init__(self, server_address, handler)
self.app = app
self.passthrough_errors = passthrough_errors
self.shutdown_signal = False
self.host = host
self.port = self.socket.getsockname()[1]
# Patch in the original socket.
if fd is not None:
self.socket.close()
self.socket = real_sock
self.server_address = self.socket.getsockname()
if ssl_context is not None:
if isinstance(ssl_context, tuple):
ssl_context = load_ssl_context(*ssl_context)
if ssl_context == "adhoc":
ssl_context = generate_adhoc_ssl_context()
# If we are on Python 2 the return value from socket.fromfd
# is an internal socket object but what we need for ssl wrap
# is the wrapper around it :(
sock = self.socket
if PY2 and not isinstance(sock, socket.socket):
sock = socket.socket(sock.family, sock.type, sock.proto, sock)
self.socket = ssl_context.wrap_socket(sock, server_side=True)
self.ssl_context = ssl_context
else:
self.ssl_context = None
def log(self, type, message, *args):
_log(type, message, *args)
def serve_forever(self):
self.shutdown_signal = False
try:
HTTPServer.serve_forever(self)
except KeyboardInterrupt:
pass
finally:
self.server_close()
def handle_error(self, request, client_address):
if self.passthrough_errors:
raise
# Python 2 still causes a socket.error after the earlier
# handling, so silence it here.
if isinstance(sys.exc_info()[1], _ConnectionError):
return
return HTTPServer.handle_error(self, request, client_address)
def get_request(self):
con, info = self.socket.accept()
return con, info
class ThreadedWSGIServer(ThreadingMixIn, BaseWSGIServer):
"""A WSGI server that does threading."""
multithread = True
daemon_threads = True
class ForkingWSGIServer(ForkingMixIn, BaseWSGIServer):
"""A WSGI server that does forking."""
multiprocess = True
def __init__(
self,
host,
port,
app,
processes=40,
handler=None,
passthrough_errors=False,
ssl_context=None,
fd=None,
):
if not can_fork:
raise ValueError("Your platform does not support forking.")
BaseWSGIServer.__init__(
self, host, port, app, handler, passthrough_errors, ssl_context, fd
)
self.max_children = processes
def make_server(
host=None,
port=None,
app=None,
threaded=False,
processes=1,
request_handler=None,
passthrough_errors=False,
ssl_context=None,
fd=None,
):
"""Create a new server instance that is either threaded, or forks
or just processes one request after another.
"""
if threaded and processes > 1:
raise ValueError("cannot have a multithreaded and multi process server.")
elif threaded:
return ThreadedWSGIServer(
host, port, app, request_handler, passthrough_errors, ssl_context, fd=fd
)
elif processes > 1:
return ForkingWSGIServer(
host,
port,
app,
processes,
request_handler,
passthrough_errors,
ssl_context,
fd=fd,
)
else:
return BaseWSGIServer(
host, port, app, request_handler, passthrough_errors, ssl_context, fd=fd
)
def is_running_from_reloader():
"""Checks if the application is running from within the Werkzeug
reloader subprocess.
.. versionadded:: 0.10
"""
return os.environ.get("WERKZEUG_RUN_MAIN") == "true"
def run_simple(
hostname,
port,
application,
use_reloader=False,
use_debugger=False,
use_evalex=True,
extra_files=None,
reloader_interval=1,
reloader_type="auto",
threaded=False,
processes=1,
request_handler=None,
static_files=None,
passthrough_errors=False,
ssl_context=None,
):
"""Start a WSGI application. Optional features include a reloader,
multithreading and fork support.
This function has a command-line interface too::
python -m werkzeug.serving --help
.. versionadded:: 0.5
`static_files` was added to simplify serving of static files as well
as `passthrough_errors`.
.. versionadded:: 0.6
support for SSL was added.
.. versionadded:: 0.8
Added support for automatically loading a SSL context from certificate
file and private key.
.. versionadded:: 0.9
Added command-line interface.
.. versionadded:: 0.10
Improved the reloader and added support for changing the backend
through the `reloader_type` parameter. See :ref:`reloader`
for more information.
.. versionchanged:: 0.15
Bind to a Unix socket by passing a path that starts with
``unix://`` as the ``hostname``.
:param hostname: The host to bind to, for example ``'localhost'``.
If the value is a path that starts with ``unix://`` it will bind
to a Unix socket instead of a TCP socket..
:param port: The port for the server. eg: ``8080``
:param application: the WSGI application to execute
:param use_reloader: should the server automatically restart the python
process if modules were changed?
:param use_debugger: should the werkzeug debugging system be used?
:param use_evalex: should the exception evaluation feature be enabled?
:param extra_files: a list of files the reloader should watch
additionally to the modules. For example configuration
files.
:param reloader_interval: the interval for the reloader in seconds.
:param reloader_type: the type of reloader to use. The default is
auto detection. Valid values are ``'stat'`` and
``'watchdog'``. See :ref:`reloader` for more
information.
:param threaded: should the process handle each request in a separate
thread?
:param processes: if greater than 1 then handle each request in a new process
up to this maximum number of concurrent processes.
:param request_handler: optional parameter that can be used to replace
the default one. You can use this to replace it
with a different
:class:`~BaseHTTPServer.BaseHTTPRequestHandler`
subclass.
:param static_files: a list or dict of paths for static files. This works
exactly like :class:`SharedDataMiddleware`, it's actually
just wrapping the application in that middleware before
serving.
:param passthrough_errors: set this to `True` to disable the error catching.
This means that the server will die on errors but
it can be useful to hook debuggers in (pdb etc.)
:param ssl_context: an SSL context for the connection. Either an
:class:`ssl.SSLContext`, a tuple in the form
``(cert_file, pkey_file)``, the string ``'adhoc'`` if
the server should automatically create one, or ``None``
to disable SSL (which is the default).
"""
if not isinstance(port, int):
raise TypeError("port must be an integer")
if use_debugger:
from .debug import DebuggedApplication
application = DebuggedApplication(application, use_evalex)
if static_files:
from .middleware.shared_data import SharedDataMiddleware
application = SharedDataMiddleware(application, static_files)
def log_startup(sock):
display_hostname = hostname if hostname not in ("", "*") else "localhost"
quit_msg = "(Press CTRL+C to quit)"
if sock.family == af_unix:
_log("info", " * Running on %s %s", display_hostname, quit_msg)
else:
if ":" in display_hostname:
display_hostname = "[%s]" % display_hostname
port = sock.getsockname()[1]
_log(
"info",
" * Running on %s://%s:%d/ %s",
"http" if ssl_context is None else "https",
display_hostname,
port,
quit_msg,
)
def inner():
try:
fd = int(os.environ["WERKZEUG_SERVER_FD"])
except (LookupError, ValueError):
fd = None
srv = make_server(
hostname,
port,
application,
threaded,
processes,
request_handler,
passthrough_errors,
ssl_context,
fd=fd,
)
if fd is None:
log_startup(srv.socket)
srv.serve_forever()
if use_reloader:
# If we're not running already in the subprocess that is the
# reloader we want to open up a socket early to make sure the
# port is actually available.
if not is_running_from_reloader():
if port == 0 and not can_open_by_fd:
raise ValueError(
"Cannot bind to a random port with enabled "
"reloader if the Python interpreter does "
"not support socket opening by fd."
)
# Create and destroy a socket so that any exceptions are
# raised before we spawn a separate Python interpreter and
# lose this ability.
address_family = select_address_family(hostname, port)
server_address = get_sockaddr(hostname, port, address_family)
s = socket.socket(address_family, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind(server_address)
if hasattr(s, "set_inheritable"):
s.set_inheritable(True)
# If we can open the socket by file descriptor, then we can just
# reuse this one and our socket will survive the restarts.
if can_open_by_fd:
os.environ["WERKZEUG_SERVER_FD"] = str(s.fileno())
s.listen(LISTEN_QUEUE)
log_startup(s)
else:
s.close()
if address_family == af_unix:
_log("info", "Unlinking %s" % server_address)
os.unlink(server_address)
# Do not use relative imports, otherwise "python -m werkzeug.serving"
# breaks.
from ._reloader import run_with_reloader
run_with_reloader(inner, extra_files, reloader_interval, reloader_type)
else:
inner()
def run_with_reloader(*args, **kwargs):
# People keep using undocumented APIs. Do not use this function
# please, we do not guarantee that it continues working.
from ._reloader import run_with_reloader
return run_with_reloader(*args, **kwargs)
def main():
"""A simple command-line interface for :py:func:`run_simple`."""
# in contrast to argparse, this works at least under Python < 2.7
import optparse
from .utils import import_string
parser = optparse.OptionParser(usage="Usage: %prog [options] app_module:app_object")
parser.add_option(
"-b",
"--bind",
dest="address",
help="The hostname:port the app should listen on.",
)
parser.add_option(
"-d",
"--debug",
dest="use_debugger",
action="store_true",
default=False,
help="Use Werkzeug's debugger.",
)
parser.add_option(
"-r",
"--reload",
dest="use_reloader",
action="store_true",
default=False,
help="Reload Python process if modules change.",
)
options, args = parser.parse_args()
hostname, port = None, None
if options.address:
address = options.address.split(":")
hostname = address[0]
if len(address) > 1:
port = address[1]
if len(args) != 1:
sys.stdout.write("No application supplied, or too much. See --help\n")
sys.exit(1)
app = import_string(args[0])
run_simple(
hostname=(hostname or "127.0.0.1"),
port=int(port or 5000),
application=app,
use_reloader=options.use_reloader,
use_debugger=options.use_debugger,
)
if __name__ == "__main__":
main()
|
[] |
[] |
[
"WERKZEUG_SERVER_FD",
"WERKZEUG_RUN_MAIN"
] |
[]
|
["WERKZEUG_SERVER_FD", "WERKZEUG_RUN_MAIN"]
|
python
| 2 | 0 | |
main.go
|
package main
import (
"bytes"
"encoding/json"
"fmt"
"html/template"
"io"
"log"
"net/http"
"os"
"github.com/PuerkitoBio/goquery"
"github.com/gorilla/mux"
)
const (
STATIC_DIR = "/public/"
REL_STATIC_DIR = "./public/"
DEFAULT_PORT = "8080"
)
type publisher struct {
PublisherId string `json:"publisherId"`
PublisherName string
DisplayName string
Flags string
}
type file struct {
AssetType string
Source string
}
type version struct {
Version string
Flags string
LastUpdated string
Files []file
AssetURI string `json:"assetUri"`
FallbackAssetURI string `json:"fallbackAssetUri"`
}
type statistic struct {
StatisticName string
Value float32
}
type target struct {
Target string
TargetVersion string
}
type vss struct {
Publisher publisher
ExtensionID string `json:"extensionId"`
ExtensionName string
DisplayName string
Flags string
LastUpdated string
PublishedDate string
ReleaseDate string
ShortDescription string
Versions []version
Categories []string
Tags []string
Statistics []statistic
InstallationTargets []target
DeploymentType int
}
type item struct {
Publisher string
Extension string
Version string
Link string
DownloadLink string
APIDownloadLink string
Details vss
}
var marketplaceURL = "https://marketplace.visualstudio.com/items?itemName={{.Publisher}}.{{.Extension}}"
var marketplaceDownloadURL = "https://{{.Publisher}}.gallery.vsassets.io/_apis/public" +
"/gallery/publisher/{{.Publisher}}/extension/{{.Extension}}/{{.Version}}" +
"/assetbyname/Microsoft.VisualStudio.Services.VSIXPackage"
func (i item) templateLink(templateString string) string {
tmpl, err := template.New("item").Parse(templateString)
if err != nil {
panic(err)
}
var doc bytes.Buffer
err = tmpl.Execute(&doc, i)
if err != nil {
panic(err)
}
return doc.String()
}
func (i item) GetLink() string {
return i.templateLink(marketplaceURL)
}
func (i item) GetDownloadLink() string {
return i.templateLink(marketplaceDownloadURL)
}
func (i item) GetDetails() vss {
doc, err := goquery.NewDocument(i.GetLink())
if err != nil {
panic(err)
}
content := doc.Find(".vss-extension").First().Contents().Text()
var details vss
json.Unmarshal([]byte(content), &details)
return details
}
func getItem(r *http.Request) item {
vars := mux.Vars(r)
i := item{
Publisher: vars["publisher"],
Extension: vars["extension"],
Version: vars["version"],
}
i.Details = i.GetDetails()
i.Version = i.Details.Versions[0].Version
i.DownloadLink = i.GetDownloadLink()
i.Link = i.GetLink()
scheme := getScheme(r)
i.APIDownloadLink = scheme + "://" + r.Host + "/" + i.Publisher + "/" + i.Extension + "/" + i.Version + ".VSIX"
fmt.Println("Extension:", i.Publisher, i.Extension, i.Version)
fmt.Println("Link:", i.GetLink())
return i
}
func printMarketExtension(w http.ResponseWriter, r *http.Request) {
item := getItem(r)
js, err := json.Marshal(item)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.Header().Set("Content-Type", "application/json")
w.Write(js)
}
func downloadMarketExtension(w http.ResponseWriter, r *http.Request) {
item := getItem(r)
url := item.GetDownloadLink()
w.Header().Set("Content-Disposition", "attachment; filename="+item.Extension+"-"+item.Version+".VSIX")
w.Header().Set("Content-Type", "application/zip")
w.WriteHeader(http.StatusOK)
response, err := http.Get(url)
if err != nil {
fmt.Fprintf(w, "Error while downloading %s - %v", url, err)
return
}
defer response.Body.Close()
n, err := io.Copy(w, response.Body)
if err != nil {
fmt.Println("Error while downloading", url, "-", err)
return
}
fmt.Println(item, n, "bytes downloaded.")
}
func getScheme(r *http.Request) string {
scheme := r.URL.Scheme
if !r.URL.IsAbs() {
scheme = "http"
}
return scheme
}
func main() {
port := os.Getenv("PORT")
if port == "" {
port = DEFAULT_PORT
}
fs := http.FileServer(http.Dir(REL_STATIC_DIR))
r := mux.NewRouter()
r.HandleFunc("/{publisher}/{extension}", printMarketExtension).Methods("GET")
r.HandleFunc("/{publisher}/{extension}/{version:[0-9.]+}.VSIX", downloadMarketExtension).Methods("GET")
r.
PathPrefix(STATIC_DIR).
Handler(http.StripPrefix(STATIC_DIR, fs))
r.PathPrefix("").Handler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.URL.Path == "" {
//your default page
r.URL.Path = REL_STATIC_DIR + "index.html"
}
fs.ServeHTTP(w, r)
})).Methods("GET")
log.Fatal(http.ListenAndServe(":"+port, r))
}
|
[
"\"PORT\""
] |
[] |
[
"PORT"
] |
[]
|
["PORT"]
|
go
| 1 | 0 | |
lib/promscrape/discovery/openstack/auth.go
|
package openstack
import (
"encoding/json"
"fmt"
"net/url"
"os"
"time"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promauth"
)
// authResponse represents identity api response
//
// See https://docs.openstack.org/api-ref/identity/v3/#authentication-and-token-management
type authResponse struct {
Token struct {
ExpiresAt time.Time `json:"expires_at,omitempty"`
Catalog []catalogItem `json:"catalog,omitempty"`
}
}
type catalogItem struct {
Name string `json:"name"`
Type string `json:"type"`
Endpoints []endpoint `json:"endpoints"`
}
// openstack api endpoint
//
// See https://docs.openstack.org/api-ref/identity/v3/#list-endpoints
type endpoint struct {
RegionID string `json:"region_id"`
RegionName string `json:"region_name"`
URL string `json:"url"`
Name string `json:"name"`
Type string `json:"type"`
Interface string `json:"interface"`
}
// getComputeEndpointURL extracts compute endpoint url with given filters from keystone catalog
func getComputeEndpointURL(catalog []catalogItem, availability, region string) (*url.URL, error) {
for _, eps := range catalog {
if eps.Type != "compute" {
continue
}
for _, ep := range eps.Endpoints {
if ep.Interface == availability && (len(region) == 0 || region == ep.RegionID || region == ep.RegionName) {
return url.Parse(ep.URL)
}
}
}
return nil, fmt.Errorf("cannot find compute url for the given availability: %q, region: %q", availability, region)
}
// buildAuthRequestBody builds request for authentication
func buildAuthRequestBody(sdc *SDConfig) ([]byte, error) {
if sdc.Password == nil && len(sdc.ApplicationCredentialID) == 0 && len(sdc.ApplicationCredentialName) == 0 {
return nil, fmt.Errorf("password and application credentials are missing")
}
type domainReq struct {
ID *string `json:"id,omitempty"`
Name *string `json:"name,omitempty"`
}
type userReq struct {
ID *string `json:"id,omitempty"`
Name *string `json:"name,omitempty"`
Password *string `json:"password,omitempty"`
Passcode *string `json:"passcode,omitempty"`
Domain *domainReq `json:"domain,omitempty"`
}
type passwordReq struct {
User userReq `json:"user"`
}
type tokenReq struct {
ID string `json:"id"`
}
type applicationCredentialReq struct {
ID *string `json:"id,omitempty"`
Name *string `json:"name,omitempty"`
User *userReq `json:"user,omitempty"`
Secret *string `json:"secret,omitempty"`
}
type identityReq struct {
Methods []string `json:"methods"`
Password *passwordReq `json:"password,omitempty"`
Token *tokenReq `json:"token,omitempty"`
ApplicationCredential *applicationCredentialReq `json:"application_credential,omitempty"`
}
type authReq struct {
Identity identityReq `json:"identity"`
Scope map[string]interface{} `json:"scope,omitempty"`
}
type request struct {
Auth authReq `json:"auth"`
}
// Populate the request structure based on the provided arguments. Create and return an error
// if insufficient or incompatible information is present.
var req request
if sdc.Password == nil {
// There are three kinds of possible application_credential requests
// 1. application_credential id + secret
// 2. application_credential name + secret + user_id
// 3. application_credential name + secret + username + domain_id / domain_name
if len(sdc.ApplicationCredentialID) > 0 {
if sdc.ApplicationCredentialSecret == nil {
return nil, fmt.Errorf("ApplicationCredentialSecret is empty")
}
req.Auth.Identity.Methods = []string{"application_credential"}
secret := sdc.ApplicationCredentialSecret.String()
req.Auth.Identity.ApplicationCredential = &applicationCredentialReq{
ID: &sdc.ApplicationCredentialID,
Secret: &secret,
}
return json.Marshal(req)
}
if sdc.ApplicationCredentialSecret == nil {
return nil, fmt.Errorf("missing application_credential_secret when application_credential_name is set")
}
var userRequest *userReq
if len(sdc.UserID) > 0 {
// UserID could be used without the domain information
userRequest = &userReq{
ID: &sdc.UserID,
}
}
if userRequest == nil && len(sdc.Username) == 0 {
return nil, fmt.Errorf("username and userid is empty")
}
if userRequest == nil && len(sdc.DomainID) > 0 {
userRequest = &userReq{
Name: &sdc.Username,
Domain: &domainReq{ID: &sdc.DomainID},
}
}
if userRequest == nil && len(sdc.DomainName) > 0 {
userRequest = &userReq{
Name: &sdc.Username,
Domain: &domainReq{Name: &sdc.DomainName},
}
}
if userRequest == nil {
return nil, fmt.Errorf("domain_id and domain_name cannot be empty for application_credential_name auth")
}
req.Auth.Identity.Methods = []string{"application_credential"}
secret := sdc.ApplicationCredentialSecret.String()
req.Auth.Identity.ApplicationCredential = &applicationCredentialReq{
Name: &sdc.ApplicationCredentialName,
User: userRequest,
Secret: &secret,
}
return json.Marshal(req)
}
// Password authentication.
req.Auth.Identity.Methods = append(req.Auth.Identity.Methods, "password")
if len(sdc.Username) == 0 && len(sdc.UserID) == 0 {
return nil, fmt.Errorf("username and userid is empty for username/password auth")
}
if len(sdc.Username) > 0 {
if len(sdc.UserID) > 0 {
return nil, fmt.Errorf("both username and userid is present")
}
if len(sdc.DomainID) == 0 && len(sdc.DomainName) == 0 {
return nil, fmt.Errorf(" domain_id or domain_name is missing for username/password auth: %s", sdc.Username)
}
if len(sdc.DomainID) > 0 {
if sdc.DomainName != "" {
return nil, fmt.Errorf("both domain_id and domain_name is present")
}
// Configure the request for Username and Password authentication with a DomainID.
if sdc.Password != nil {
password := sdc.Password.String()
req.Auth.Identity.Password = &passwordReq{
User: userReq{
Name: &sdc.Username,
Password: &password,
Domain: &domainReq{ID: &sdc.DomainID},
},
}
}
}
if len(sdc.DomainName) > 0 {
// Configure the request for Username and Password authentication with a DomainName.
if sdc.Password != nil {
password := sdc.Password.String()
req.Auth.Identity.Password = &passwordReq{
User: userReq{
Name: &sdc.Username,
Password: &password,
Domain: &domainReq{Name: &sdc.DomainName},
},
}
}
}
}
if len(sdc.UserID) > 0 {
if len(sdc.DomainID) > 0 {
return nil, fmt.Errorf("both user_id and domain_id is present")
}
if len(sdc.DomainName) > 0 {
return nil, fmt.Errorf("both user_id and domain_name is present")
}
// Configure the request for UserID and Password authentication.
if sdc.Password != nil {
password := sdc.Password.String()
req.Auth.Identity.Password = &passwordReq{
User: userReq{
ID: &sdc.UserID,
Password: &password,
},
}
}
}
// build scope for password auth
scope, err := buildScope(sdc)
if err != nil {
return nil, err
}
if len(scope) > 0 {
req.Auth.Scope = scope
}
return json.Marshal(req)
}
// buildScope adds scope information into auth request
//
// See https://docs.openstack.org/api-ref/identity/v3/#password-authentication-with-unscoped-authorization
func buildScope(sdc *SDConfig) (map[string]interface{}, error) {
if len(sdc.ProjectName) == 0 && len(sdc.ProjectID) == 0 && len(sdc.DomainID) == 0 && len(sdc.DomainName) == 0 {
return nil, nil
}
if len(sdc.ProjectName) > 0 {
// ProjectName provided: either DomainID or DomainName must also be supplied.
// ProjectID may not be supplied.
if len(sdc.DomainID) == 0 && len(sdc.DomainName) == 0 {
return nil, fmt.Errorf("domain_id or domain_name must present")
}
if len(sdc.DomainID) > 0 {
return map[string]interface{}{
"project": map[string]interface{}{
"name": &sdc.ProjectName,
"domain": map[string]interface{}{"id": &sdc.DomainID},
},
}, nil
}
if len(sdc.DomainName) > 0 {
return map[string]interface{}{
"project": map[string]interface{}{
"name": &sdc.ProjectName,
"domain": map[string]interface{}{"name": &sdc.DomainName},
},
}, nil
}
} else if len(sdc.ProjectID) > 0 {
return map[string]interface{}{
"project": map[string]interface{}{
"id": &sdc.ProjectID,
},
}, nil
} else if len(sdc.DomainID) > 0 {
if len(sdc.DomainName) > 0 {
return nil, fmt.Errorf("both domain_id and domain_name present")
}
return map[string]interface{}{
"domain": map[string]interface{}{
"id": &sdc.DomainID,
},
}, nil
} else if len(sdc.DomainName) > 0 {
return map[string]interface{}{
"domain": map[string]interface{}{
"name": &sdc.DomainName,
},
}, nil
}
return nil, nil
}
// readCredentialsFromEnv obtains serviceDiscoveryConfig from env variables for openstack
func readCredentialsFromEnv() SDConfig {
authURL := os.Getenv("OS_AUTH_URL")
username := os.Getenv("OS_USERNAME")
userID := os.Getenv("OS_USERID")
password := os.Getenv("OS_PASSWORD")
tenantID := os.Getenv("OS_TENANT_ID")
tenantName := os.Getenv("OS_TENANT_NAME")
domainID := os.Getenv("OS_DOMAIN_ID")
domainName := os.Getenv("OS_DOMAIN_NAME")
applicationCredentialID := os.Getenv("OS_APPLICATION_CREDENTIAL_ID")
applicationCredentialName := os.Getenv("OS_APPLICATION_CREDENTIAL_NAME")
applicationCredentialSecret := os.Getenv("OS_APPLICATION_CREDENTIAL_SECRET")
// If OS_PROJECT_ID is set, overwrite tenantID with the value.
if v := os.Getenv("OS_PROJECT_ID"); v != "" {
tenantID = v
}
// If OS_PROJECT_NAME is set, overwrite tenantName with the value.
if v := os.Getenv("OS_PROJECT_NAME"); v != "" {
tenantName = v
}
return SDConfig{
IdentityEndpoint: authURL,
Username: username,
UserID: userID,
Password: promauth.NewSecret(password),
ProjectName: tenantName,
ProjectID: tenantID,
DomainName: domainName,
DomainID: domainID,
ApplicationCredentialName: applicationCredentialName,
ApplicationCredentialID: applicationCredentialID,
ApplicationCredentialSecret: promauth.NewSecret(applicationCredentialSecret),
}
}
|
[
"\"OS_AUTH_URL\"",
"\"OS_USERNAME\"",
"\"OS_USERID\"",
"\"OS_PASSWORD\"",
"\"OS_TENANT_ID\"",
"\"OS_TENANT_NAME\"",
"\"OS_DOMAIN_ID\"",
"\"OS_DOMAIN_NAME\"",
"\"OS_APPLICATION_CREDENTIAL_ID\"",
"\"OS_APPLICATION_CREDENTIAL_NAME\"",
"\"OS_APPLICATION_CREDENTIAL_SECRET\"",
"\"OS_PROJECT_ID\"",
"\"OS_PROJECT_NAME\""
] |
[] |
[
"OS_PROJECT_NAME",
"OS_PROJECT_ID",
"OS_AUTH_URL",
"OS_PASSWORD",
"OS_TENANT_ID",
"OS_USERNAME",
"OS_TENANT_NAME",
"OS_DOMAIN_ID",
"OS_APPLICATION_CREDENTIAL_NAME",
"OS_DOMAIN_NAME",
"OS_USERID",
"OS_APPLICATION_CREDENTIAL_SECRET",
"OS_APPLICATION_CREDENTIAL_ID"
] |
[]
|
["OS_PROJECT_NAME", "OS_PROJECT_ID", "OS_AUTH_URL", "OS_PASSWORD", "OS_TENANT_ID", "OS_USERNAME", "OS_TENANT_NAME", "OS_DOMAIN_ID", "OS_APPLICATION_CREDENTIAL_NAME", "OS_DOMAIN_NAME", "OS_USERID", "OS_APPLICATION_CREDENTIAL_SECRET", "OS_APPLICATION_CREDENTIAL_ID"]
|
go
| 13 | 0 | |
ssh/client_auth_test.go
|
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ssh
import (
"bytes"
"crypto/rand"
"errors"
"fmt"
"io"
"log"
"net"
"os"
"runtime"
"strings"
"testing"
)
type keyboardInteractive map[string]string
func (cr keyboardInteractive) Challenge(user string, instruction string, questions []string, echos []bool) ([]string, error) {
var answers []string
for _, q := range questions {
answers = append(answers, cr[q])
}
return answers, nil
}
// reused internally by tests
var clientPassword = "tiger"
// tryAuth runs a handshake with a given config against an SSH server
// with config serverConfig. Returns both client and server side errors.
func tryAuth(t *testing.T, config *ClientConfig) error {
err, _ := tryAuthBothSides(t, config, nil)
return err
}
// tryAuth runs a handshake with a given config against an SSH server
// with a given GSSAPIWithMICConfig and config serverConfig. Returns both client and server side errors.
func tryAuthWithGSSAPIWithMICConfig(t *testing.T, clientConfig *ClientConfig, gssAPIWithMICConfig *GSSAPIWithMICConfig) error {
err, _ := tryAuthBothSides(t, clientConfig, gssAPIWithMICConfig)
return err
}
// tryAuthBothSides runs the handshake and returns the resulting errors from both sides of the connection.
func tryAuthBothSides(t *testing.T, config *ClientConfig, gssAPIWithMICConfig *GSSAPIWithMICConfig) (clientError error, serverAuthErrors []error) {
c1, c2, err := netPipe()
if err != nil {
t.Fatalf("netPipe: %v", err)
}
defer c1.Close()
defer c2.Close()
certChecker := CertChecker{
IsUserAuthority: func(k PublicKey) bool {
return bytes.Equal(k.Marshal(), testPublicKeys["ecdsa"].Marshal())
},
UserKeyFallback: func(conn ConnMetadata, key PublicKey) (*Permissions, error) {
if conn.User() == "testuser" && bytes.Equal(key.Marshal(), testPublicKeys["rsa"].Marshal()) {
return nil, nil
}
return nil, fmt.Errorf("pubkey for %q not acceptable", conn.User())
},
IsRevoked: func(c *Certificate) bool {
return c.Serial == 666
},
}
serverConfig := &ServerConfig{
PasswordCallback: func(conn ConnMetadata, pass []byte) (*Permissions, error) {
if conn.User() == "testuser" && string(pass) == clientPassword {
return nil, nil
}
return nil, errors.New("password auth failed")
},
PublicKeyCallback: certChecker.Authenticate,
KeyboardInteractiveCallback: func(conn ConnMetadata, challenge KeyboardInteractiveChallenge) (*Permissions, error) {
ans, err := challenge("user",
"instruction",
[]string{"question1", "question2"},
[]bool{true, true})
if err != nil {
return nil, err
}
ok := conn.User() == "testuser" && ans[0] == "answer1" && ans[1] == "answer2"
if ok {
challenge("user", "motd", nil, nil)
return nil, nil
}
return nil, errors.New("keyboard-interactive failed")
},
GSSAPIWithMICConfig: gssAPIWithMICConfig,
}
serverConfig.AddHostKey(testSigners["rsa"])
serverConfig.AuthLogCallback = func(conn ConnMetadata, method string, err error) {
serverAuthErrors = append(serverAuthErrors, err)
}
go newServer(c1, serverConfig)
_, _, _, err = NewClientConn(c2, "", config)
return err, serverAuthErrors
}
func TestClientAuthPublicKey(t *testing.T) {
config := &ClientConfig{
User: "testuser",
Auth: []AuthMethod{
PublicKeys(testSigners["rsa"]),
},
HostKeyCallback: InsecureIgnoreHostKey(),
}
if err := tryAuth(t, config); err != nil {
t.Fatalf("unable to dial remote side: %s", err)
}
}
func TestAuthMethodPassword(t *testing.T) {
config := &ClientConfig{
User: "testuser",
Auth: []AuthMethod{
Password(clientPassword),
},
HostKeyCallback: InsecureIgnoreHostKey(),
}
if err := tryAuth(t, config); err != nil {
t.Fatalf("unable to dial remote side: %s", err)
}
}
func TestAuthMethodFallback(t *testing.T) {
var passwordCalled bool
config := &ClientConfig{
User: "testuser",
Auth: []AuthMethod{
PublicKeys(testSigners["rsa"]),
PasswordCallback(
func() (string, error) {
passwordCalled = true
return "WRONG", nil
}),
},
HostKeyCallback: InsecureIgnoreHostKey(),
}
if err := tryAuth(t, config); err != nil {
t.Fatalf("unable to dial remote side: %s", err)
}
if passwordCalled {
t.Errorf("password auth tried before public-key auth.")
}
}
func TestAuthMethodWrongPassword(t *testing.T) {
config := &ClientConfig{
User: "testuser",
Auth: []AuthMethod{
Password("wrong"),
PublicKeys(testSigners["rsa"]),
},
HostKeyCallback: InsecureIgnoreHostKey(),
}
if err := tryAuth(t, config); err != nil {
t.Fatalf("unable to dial remote side: %s", err)
}
}
func TestAuthMethodKeyboardInteractive(t *testing.T) {
answers := keyboardInteractive(map[string]string{
"question1": "answer1",
"question2": "answer2",
})
config := &ClientConfig{
User: "testuser",
Auth: []AuthMethod{
KeyboardInteractive(answers.Challenge),
},
HostKeyCallback: InsecureIgnoreHostKey(),
}
if err := tryAuth(t, config); err != nil {
t.Fatalf("unable to dial remote side: %s", err)
}
}
func TestAuthMethodWrongKeyboardInteractive(t *testing.T) {
answers := keyboardInteractive(map[string]string{
"question1": "answer1",
"question2": "WRONG",
})
config := &ClientConfig{
User: "testuser",
Auth: []AuthMethod{
KeyboardInteractive(answers.Challenge),
},
}
if err := tryAuth(t, config); err == nil {
t.Fatalf("wrong answers should not have authenticated with KeyboardInteractive")
}
}
// the mock server will only authenticate ssh-rsa keys
func TestAuthMethodInvalidPublicKey(t *testing.T) {
config := &ClientConfig{
User: "testuser",
Auth: []AuthMethod{
PublicKeys(testSigners["dsa"]),
},
}
if err := tryAuth(t, config); err == nil {
t.Fatalf("dsa private key should not have authenticated with rsa public key")
}
}
// the client should authenticate with the second key
func TestAuthMethodRSAandDSA(t *testing.T) {
config := &ClientConfig{
User: "testuser",
Auth: []AuthMethod{
PublicKeys(testSigners["dsa"], testSigners["rsa"]),
},
HostKeyCallback: InsecureIgnoreHostKey(),
}
if err := tryAuth(t, config); err != nil {
t.Fatalf("client could not authenticate with rsa key: %v", err)
}
}
type invalidAlgSigner struct {
Signer
}
func (s *invalidAlgSigner) Sign(rand io.Reader, data []byte) (*Signature, error) {
sig, err := s.Signer.Sign(rand, data)
if sig != nil {
sig.Format = "invalid"
}
return sig, err
}
func TestMethodInvalidAlgorithm(t *testing.T) {
config := &ClientConfig{
User: "testuser",
Auth: []AuthMethod{
PublicKeys(&invalidAlgSigner{testSigners["rsa"]}),
},
HostKeyCallback: InsecureIgnoreHostKey(),
}
err, serverErrors := tryAuthBothSides(t, config, nil)
if err == nil {
t.Fatalf("login succeeded")
}
found := false
want := "algorithm \"invalid\""
var errStrings []string
for _, err := range serverErrors {
found = found || (err != nil && strings.Contains(err.Error(), want))
errStrings = append(errStrings, err.Error())
}
if !found {
t.Errorf("server got error %q, want substring %q", errStrings, want)
}
}
func TestClientHMAC(t *testing.T) {
for _, mac := range supportedMACs {
config := &ClientConfig{
User: "testuser",
Auth: []AuthMethod{
PublicKeys(testSigners["rsa"]),
},
Config: Config{
MACs: []string{mac},
},
HostKeyCallback: InsecureIgnoreHostKey(),
}
if err := tryAuth(t, config); err != nil {
t.Fatalf("client could not authenticate with mac algo %s: %v", mac, err)
}
}
}
// issue 4285.
func TestClientUnsupportedCipher(t *testing.T) {
config := &ClientConfig{
User: "testuser",
Auth: []AuthMethod{
PublicKeys(),
},
Config: Config{
Ciphers: []string{"aes128-cbc"}, // not currently supported
},
}
if err := tryAuth(t, config); err == nil {
t.Errorf("expected no ciphers in common")
}
}
func TestClientUnsupportedKex(t *testing.T) {
if os.Getenv("GO_BUILDER_NAME") != "" {
t.Skip("skipping known-flaky test on the Go build dashboard; see golang.org/issue/15198")
}
config := &ClientConfig{
User: "testuser",
Auth: []AuthMethod{
PublicKeys(),
},
Config: Config{
KeyExchanges: []string{"non-existent-kex"},
},
HostKeyCallback: InsecureIgnoreHostKey(),
}
if err := tryAuth(t, config); err == nil || !strings.Contains(err.Error(), "common algorithm") {
t.Errorf("got %v, expected 'common algorithm'", err)
}
}
func TestClientLoginCert(t *testing.T) {
cert := &Certificate{
Key: testPublicKeys["rsa"],
ValidBefore: CertTimeInfinity,
CertType: UserCert,
}
cert.SignCert(rand.Reader, testSigners["ecdsa"])
certSigner, err := NewCertSigner(cert, testSigners["rsa"])
if err != nil {
t.Fatalf("NewCertSigner: %v", err)
}
clientConfig := &ClientConfig{
User: "user",
HostKeyCallback: InsecureIgnoreHostKey(),
}
clientConfig.Auth = append(clientConfig.Auth, PublicKeys(certSigner))
// should succeed
if err := tryAuth(t, clientConfig); err != nil {
t.Errorf("cert login failed: %v", err)
}
// corrupted signature
cert.Signature.Blob[0]++
if err := tryAuth(t, clientConfig); err == nil {
t.Errorf("cert login passed with corrupted sig")
}
// revoked
cert.Serial = 666
cert.SignCert(rand.Reader, testSigners["ecdsa"])
if err := tryAuth(t, clientConfig); err == nil {
t.Errorf("revoked cert login succeeded")
}
cert.Serial = 1
// sign with wrong key
cert.SignCert(rand.Reader, testSigners["dsa"])
if err := tryAuth(t, clientConfig); err == nil {
t.Errorf("cert login passed with non-authoritative key")
}
// host cert
cert.CertType = HostCert
cert.SignCert(rand.Reader, testSigners["ecdsa"])
if err := tryAuth(t, clientConfig); err == nil {
t.Errorf("cert login passed with wrong type")
}
cert.CertType = UserCert
// principal specified
cert.ValidPrincipals = []string{"user"}
cert.SignCert(rand.Reader, testSigners["ecdsa"])
if err := tryAuth(t, clientConfig); err != nil {
t.Errorf("cert login failed: %v", err)
}
// wrong principal specified
cert.ValidPrincipals = []string{"fred"}
cert.SignCert(rand.Reader, testSigners["ecdsa"])
if err := tryAuth(t, clientConfig); err == nil {
t.Errorf("cert login passed with wrong principal")
}
cert.ValidPrincipals = nil
// added critical option
cert.CriticalOptions = map[string]string{"root-access": "yes"}
cert.SignCert(rand.Reader, testSigners["ecdsa"])
if err := tryAuth(t, clientConfig); err == nil {
t.Errorf("cert login passed with unrecognized critical option")
}
// allowed source address
cert.CriticalOptions = map[string]string{"source-address": "127.0.0.42/24,::42/120"}
cert.SignCert(rand.Reader, testSigners["ecdsa"])
if err := tryAuth(t, clientConfig); err != nil {
t.Errorf("cert login with source-address failed: %v", err)
}
// disallowed source address
cert.CriticalOptions = map[string]string{"source-address": "127.0.0.42,::42"}
cert.SignCert(rand.Reader, testSigners["ecdsa"])
if err := tryAuth(t, clientConfig); err == nil {
t.Errorf("cert login with source-address succeeded")
}
}
func testPermissionsPassing(withPermissions bool, t *testing.T) {
serverConfig := &ServerConfig{
PublicKeyCallback: func(conn ConnMetadata, key PublicKey) (*Permissions, error) {
if conn.User() == "nopermissions" {
return nil, nil
}
return &Permissions{}, nil
},
}
serverConfig.AddHostKey(testSigners["rsa"])
clientConfig := &ClientConfig{
Auth: []AuthMethod{
PublicKeys(testSigners["rsa"]),
},
HostKeyCallback: InsecureIgnoreHostKey(),
}
if withPermissions {
clientConfig.User = "permissions"
} else {
clientConfig.User = "nopermissions"
}
c1, c2, err := netPipe()
if err != nil {
t.Fatalf("netPipe: %v", err)
}
defer c1.Close()
defer c2.Close()
go NewClientConn(c2, "", clientConfig)
serverConn, err := newServer(c1, serverConfig)
if err != nil {
t.Fatal(err)
}
if p := serverConn.Permissions; (p != nil) != withPermissions {
t.Fatalf("withPermissions is %t, but Permissions object is %#v", withPermissions, p)
}
}
func TestPermissionsPassing(t *testing.T) {
testPermissionsPassing(true, t)
}
func TestNoPermissionsPassing(t *testing.T) {
testPermissionsPassing(false, t)
}
func TestRetryableAuth(t *testing.T) {
n := 0
passwords := []string{"WRONG1", "WRONG2"}
config := &ClientConfig{
User: "testuser",
Auth: []AuthMethod{
RetryableAuthMethod(PasswordCallback(func() (string, error) {
p := passwords[n]
n++
return p, nil
}), 2),
PublicKeys(testSigners["rsa"]),
},
HostKeyCallback: InsecureIgnoreHostKey(),
}
if err := tryAuth(t, config); err != nil {
t.Fatalf("unable to dial remote side: %s", err)
}
if n != 2 {
t.Fatalf("Did not try all passwords")
}
}
func ExampleRetryableAuthMethod() {
user := "testuser"
NumberOfPrompts := 3
// Normally this would be a callback that prompts the user to answer the
// provided questions
Cb := func(user, instruction string, questions []string, echos []bool) (answers []string, err error) {
return []string{"answer1", "answer2"}, nil
}
config := &ClientConfig{
HostKeyCallback: InsecureIgnoreHostKey(),
User: user,
Auth: []AuthMethod{
RetryableAuthMethod(KeyboardInteractiveChallenge(Cb), NumberOfPrompts),
},
}
host := "mysshserver"
netConn, err := net.Dial("tcp", host)
if err != nil {
log.Fatal(err)
}
sshConn, _, _, err := NewClientConn(netConn, host, config)
if err != nil {
log.Fatal(err)
}
_ = sshConn
}
// Test if username is received on server side when NoClientAuth is used
func TestClientAuthNone(t *testing.T) {
user := "testuser"
serverConfig := &ServerConfig{
NoClientAuth: true,
}
serverConfig.AddHostKey(testSigners["rsa"])
clientConfig := &ClientConfig{
User: user,
HostKeyCallback: InsecureIgnoreHostKey(),
}
c1, c2, err := netPipe()
if err != nil {
t.Fatalf("netPipe: %v", err)
}
defer c1.Close()
defer c2.Close()
go NewClientConn(c2, "", clientConfig)
serverConn, err := newServer(c1, serverConfig)
if err != nil {
t.Fatalf("newServer: %v", err)
}
if serverConn.User() != user {
t.Fatalf("server: got %q, want %q", serverConn.User(), user)
}
}
// Test if authentication attempts are limited on server when MaxAuthTries is set
func TestClientAuthMaxAuthTries(t *testing.T) {
user := "testuser"
serverConfig := &ServerConfig{
MaxAuthTries: 2,
PasswordCallback: func(conn ConnMetadata, pass []byte) (*Permissions, error) {
if conn.User() == "testuser" && string(pass) == "right" {
return nil, nil
}
return nil, errors.New("password auth failed")
},
}
serverConfig.AddHostKey(testSigners["rsa"])
expectedErr := fmt.Errorf("ssh: handshake failed: %v", &disconnectMsg{
Reason: 2,
Message: "too many authentication failures",
})
for tries := 2; tries < 4; tries++ {
n := tries
clientConfig := &ClientConfig{
User: user,
Auth: []AuthMethod{
RetryableAuthMethod(PasswordCallback(func() (string, error) {
n--
if n == 0 {
return "right", nil
}
return "wrong", nil
}), tries),
},
HostKeyCallback: InsecureIgnoreHostKey(),
}
c1, c2, err := netPipe()
if err != nil {
t.Fatalf("netPipe: %v", err)
}
defer c1.Close()
defer c2.Close()
go newServer(c1, serverConfig)
_, _, _, err = NewClientConn(c2, "", clientConfig)
if tries > 2 {
if err == nil {
t.Fatalf("client: got no error, want %s", expectedErr)
} else if err.Error() != expectedErr.Error() {
t.Fatalf("client: got %s, want %s", err, expectedErr)
}
} else {
if err != nil {
t.Fatalf("client: got %s, want no error", err)
}
}
}
}
// Test if authentication attempts are correctly limited on server
// when more public keys are provided then MaxAuthTries
func TestClientAuthMaxAuthTriesPublicKey(t *testing.T) {
signers := []Signer{}
for i := 0; i < 6; i++ {
signers = append(signers, testSigners["dsa"])
}
validConfig := &ClientConfig{
User: "testuser",
Auth: []AuthMethod{
PublicKeys(append([]Signer{testSigners["rsa"]}, signers...)...),
},
HostKeyCallback: InsecureIgnoreHostKey(),
}
if err := tryAuth(t, validConfig); err != nil {
t.Fatalf("unable to dial remote side: %s", err)
}
expectedErr := fmt.Errorf("ssh: handshake failed: %v", &disconnectMsg{
Reason: 2,
Message: "too many authentication failures",
})
invalidConfig := &ClientConfig{
User: "testuser",
Auth: []AuthMethod{
PublicKeys(append(signers, testSigners["rsa"])...),
},
HostKeyCallback: InsecureIgnoreHostKey(),
}
if err := tryAuth(t, invalidConfig); err == nil {
t.Fatalf("client: got no error, want %s", expectedErr)
} else if err.Error() != expectedErr.Error() {
// On Windows we can see a WSAECONNABORTED error
// if the client writes another authentication request
// before the client goroutine reads the disconnection
// message. See issue 50805.
if runtime.GOOS == "windows" && strings.Contains(err.Error(), "wsarecv: An established connection was aborted") {
// OK.
} else {
t.Fatalf("client: got %s, want %s", err, expectedErr)
}
}
}
// Test whether authentication errors are being properly logged if all
// authentication methods have been exhausted
func TestClientAuthErrorList(t *testing.T) {
publicKeyErr := errors.New("This is an error from PublicKeyCallback")
clientConfig := &ClientConfig{
Auth: []AuthMethod{
PublicKeys(testSigners["rsa"]),
},
HostKeyCallback: InsecureIgnoreHostKey(),
}
serverConfig := &ServerConfig{
PublicKeyCallback: func(_ ConnMetadata, _ PublicKey) (*Permissions, error) {
return nil, publicKeyErr
},
}
serverConfig.AddHostKey(testSigners["rsa"])
c1, c2, err := netPipe()
if err != nil {
t.Fatalf("netPipe: %v", err)
}
defer c1.Close()
defer c2.Close()
go NewClientConn(c2, "", clientConfig)
_, err = newServer(c1, serverConfig)
if err == nil {
t.Fatal("newServer: got nil, expected errors")
}
authErrs, ok := err.(*ServerAuthError)
if !ok {
t.Fatalf("errors: got %T, want *ssh.ServerAuthError", err)
}
for i, e := range authErrs.Errors {
switch i {
case 0:
if e != ErrNoAuth {
t.Fatalf("errors: got error %v, want ErrNoAuth", e)
}
case 1:
if e != publicKeyErr {
t.Fatalf("errors: got %v, want %v", e, publicKeyErr)
}
default:
t.Fatalf("errors: got %v, expected 2 errors", authErrs.Errors)
}
}
}
func TestAuthMethodGSSAPIWithMIC(t *testing.T) {
type testcase struct {
config *ClientConfig
gssConfig *GSSAPIWithMICConfig
clientWantErr string
serverWantErr string
}
testcases := []*testcase{
{
config: &ClientConfig{
User: "testuser",
Auth: []AuthMethod{
GSSAPIWithMICAuthMethod(
&FakeClient{
exchanges: []*exchange{
{
outToken: "client-valid-token-1",
},
{
expectedToken: "server-valid-token-1",
},
},
mic: []byte("valid-mic"),
maxRound: 2,
}, "testtarget",
),
},
HostKeyCallback: InsecureIgnoreHostKey(),
},
gssConfig: &GSSAPIWithMICConfig{
AllowLogin: func(conn ConnMetadata, srcName string) (*Permissions, error) {
if srcName != conn.User()+"@DOMAIN" {
return nil, fmt.Errorf("srcName is %s, conn user is %s", srcName, conn.User())
}
return nil, nil
},
Server: &FakeServer{
exchanges: []*exchange{
{
outToken: "server-valid-token-1",
expectedToken: "client-valid-token-1",
},
},
maxRound: 1,
expectedMIC: []byte("valid-mic"),
srcName: "testuser@DOMAIN",
},
},
},
{
config: &ClientConfig{
User: "testuser",
Auth: []AuthMethod{
GSSAPIWithMICAuthMethod(
&FakeClient{
exchanges: []*exchange{
{
outToken: "client-valid-token-1",
},
{
expectedToken: "server-valid-token-1",
},
},
mic: []byte("valid-mic"),
maxRound: 2,
}, "testtarget",
),
},
HostKeyCallback: InsecureIgnoreHostKey(),
},
gssConfig: &GSSAPIWithMICConfig{
AllowLogin: func(conn ConnMetadata, srcName string) (*Permissions, error) {
return nil, fmt.Errorf("user is not allowed to login")
},
Server: &FakeServer{
exchanges: []*exchange{
{
outToken: "server-valid-token-1",
expectedToken: "client-valid-token-1",
},
},
maxRound: 1,
expectedMIC: []byte("valid-mic"),
srcName: "testuser@DOMAIN",
},
},
serverWantErr: "user is not allowed to login",
clientWantErr: "ssh: handshake failed: ssh: unable to authenticate",
},
{
config: &ClientConfig{
User: "testuser",
Auth: []AuthMethod{
GSSAPIWithMICAuthMethod(
&FakeClient{
exchanges: []*exchange{
{
outToken: "client-valid-token-1",
},
{
expectedToken: "server-valid-token-1",
},
},
mic: []byte("valid-mic"),
maxRound: 2,
}, "testtarget",
),
},
HostKeyCallback: InsecureIgnoreHostKey(),
},
gssConfig: &GSSAPIWithMICConfig{
AllowLogin: func(conn ConnMetadata, srcName string) (*Permissions, error) {
if srcName != conn.User() {
return nil, fmt.Errorf("srcName is %s, conn user is %s", srcName, conn.User())
}
return nil, nil
},
Server: &FakeServer{
exchanges: []*exchange{
{
outToken: "server-invalid-token-1",
expectedToken: "client-valid-token-1",
},
},
maxRound: 1,
expectedMIC: []byte("valid-mic"),
srcName: "testuser@DOMAIN",
},
},
clientWantErr: "ssh: handshake failed: got \"server-invalid-token-1\", want token \"server-valid-token-1\"",
},
{
config: &ClientConfig{
User: "testuser",
Auth: []AuthMethod{
GSSAPIWithMICAuthMethod(
&FakeClient{
exchanges: []*exchange{
{
outToken: "client-valid-token-1",
},
{
expectedToken: "server-valid-token-1",
},
},
mic: []byte("invalid-mic"),
maxRound: 2,
}, "testtarget",
),
},
HostKeyCallback: InsecureIgnoreHostKey(),
},
gssConfig: &GSSAPIWithMICConfig{
AllowLogin: func(conn ConnMetadata, srcName string) (*Permissions, error) {
if srcName != conn.User() {
return nil, fmt.Errorf("srcName is %s, conn user is %s", srcName, conn.User())
}
return nil, nil
},
Server: &FakeServer{
exchanges: []*exchange{
{
outToken: "server-valid-token-1",
expectedToken: "client-valid-token-1",
},
},
maxRound: 1,
expectedMIC: []byte("valid-mic"),
srcName: "testuser@DOMAIN",
},
},
serverWantErr: "got MICToken \"invalid-mic\", want \"valid-mic\"",
clientWantErr: "ssh: handshake failed: ssh: unable to authenticate",
},
}
for i, c := range testcases {
clientErr, serverErrs := tryAuthBothSides(t, c.config, c.gssConfig)
if (c.clientWantErr == "") != (clientErr == nil) {
t.Fatalf("client got %v, want %s, case %d", clientErr, c.clientWantErr, i)
}
if (c.serverWantErr == "") != (len(serverErrs) == 2 && serverErrs[1] == nil || len(serverErrs) == 1) {
t.Fatalf("server got err %v, want %s", serverErrs, c.serverWantErr)
}
if c.clientWantErr != "" {
if clientErr != nil && !strings.Contains(clientErr.Error(), c.clientWantErr) {
t.Fatalf("client got %v, want %s, case %d", clientErr, c.clientWantErr, i)
}
}
found := false
var errStrings []string
if c.serverWantErr != "" {
for _, err := range serverErrs {
found = found || (err != nil && strings.Contains(err.Error(), c.serverWantErr))
errStrings = append(errStrings, err.Error())
}
if !found {
t.Errorf("server got error %q, want substring %q, case %d", errStrings, c.serverWantErr, i)
}
}
}
}
|
[
"\"GO_BUILDER_NAME\""
] |
[] |
[
"GO_BUILDER_NAME"
] |
[]
|
["GO_BUILDER_NAME"]
|
go
| 1 | 0 | |
Data Structures and Algorithms/Graphs/02. Topological Sort.py
|
from collections import defaultdict
class Graph:
def __init__(self, number_of_vertices):
self.graph = defaultdict(list)
self.number_of_vertices = number_of_vertices
def addEdge(self, vertex, edge):
self.graph[vertex].append(edge)
def topologicalSortUtil(self, vertex, visited, stack):
visited.append(vertex)
for i in self.graph[vertex]:
if i not in visited:
self.topologicalSortUtil(i, visited, stack)
stack.append(vertex)
def topologicalSort(self):
visited = []
stack = []
for k in list(self.graph):
if k not in visited:
self.topologicalSortUtil(k, visited, stack)
print(stack)
customGraph = Graph(8)
customGraph.addEdge("A", "C")
customGraph.addEdge("C", "E")
customGraph.addEdge("E", "H")
customGraph.addEdge("E", "F")
customGraph.addEdge("F", "G")
customGraph.addEdge("B", "D")
customGraph.addEdge("B", "C")
customGraph.addEdge("D", "F")
customGraph.topologicalSort()
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
apache/django.wsgi
|
import os
import sys
activate_this = 'C:/virtualenvs/forum/Scripts/activate_this.py'
execfile(activate_this, dict(__file__=activate_this))
os.environ['DJANGO_SETTINGS_MODULE'] = 'forum.settings'
import django.core.handlers.wsgi
application = django.core.handlers.wsgi.WSGIHandler()
|
[] |
[] |
[
"DJANGO_SETTINGS_MODULE"
] |
[]
|
["DJANGO_SETTINGS_MODULE"]
|
python
| 1 | 0 | |
manage.py
|
#!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'grietapp.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
jvmgo/options/options.go
|
package options
import (
"os"
"path/filepath"
"strings"
)
var (
VerboseClass bool
ThreadStackSize uint
AbsJavaHome string // /path/to/jre
AbsJreLib string // /path/to/jre/lib
)
func InitOptions(verboseClass bool, xss int, useJavaHome bool) {
VerboseClass = verboseClass
ThreadStackSize = uint(xss)
initJavaHome(useJavaHome)
}
func initJavaHome(useOsEnv bool) {
jh := "./jre"
if useOsEnv {
jh = os.Getenv("JAVA_HOME")
if jh == "" {
panic("$JAVA_HOME not set!")
}
}
if absJh, err := filepath.Abs(jh); err == nil {
if strings.Contains(absJh, "jre") {
AbsJavaHome = absJh
AbsJreLib = filepath.Join(absJh, "lib")
} else {
AbsJavaHome = filepath.Join(absJh, "jre")
AbsJreLib = filepath.Join(absJh, "jre", "lib")
}
} else {
panic(err)
}
}
|
[
"\"JAVA_HOME\""
] |
[] |
[
"JAVA_HOME"
] |
[]
|
["JAVA_HOME"]
|
go
| 1 | 0 | |
cmd/alertmanager/main.go
|
// Copyright 2015 Prometheus Team
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"context"
"fmt"
"net"
"net/http"
"net/url"
"os"
"os/signal"
"path/filepath"
"runtime"
"strings"
"sync"
"syscall"
"time"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/prometheus/common/model"
"github.com/prometheus/common/promlog"
promlogflag "github.com/prometheus/common/promlog/flag"
"github.com/prometheus/common/route"
"github.com/prometheus/common/version"
"gopkg.in/alecthomas/kingpin.v2"
"github.com/prometheus/alertmanager/api"
"github.com/prometheus/alertmanager/cluster"
"github.com/prometheus/alertmanager/config"
"github.com/prometheus/alertmanager/dispatch"
"github.com/prometheus/alertmanager/inhibit"
"github.com/prometheus/alertmanager/nflog"
"github.com/prometheus/alertmanager/notify"
"github.com/prometheus/alertmanager/notify/email"
"github.com/prometheus/alertmanager/notify/hipchat"
"github.com/prometheus/alertmanager/notify/opsgenie"
"github.com/prometheus/alertmanager/notify/pagerduty"
"github.com/prometheus/alertmanager/notify/pushover"
"github.com/prometheus/alertmanager/notify/slack"
"github.com/prometheus/alertmanager/notify/victorops"
"github.com/prometheus/alertmanager/notify/webhook"
"github.com/prometheus/alertmanager/notify/wechat"
"github.com/prometheus/alertmanager/provider/mem"
"github.com/prometheus/alertmanager/silence"
"github.com/prometheus/alertmanager/template"
"github.com/prometheus/alertmanager/types"
"github.com/prometheus/alertmanager/ui"
)
var (
requestDuration = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "alertmanager_http_request_duration_seconds",
Help: "Histogram of latencies for HTTP requests.",
Buckets: []float64{.05, 0.1, .25, .5, .75, 1, 2, 5, 20, 60},
},
[]string{"handler", "method"},
)
responseSize = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "alertmanager_http_response_size_bytes",
Help: "Histogram of response size for HTTP requests.",
Buckets: prometheus.ExponentialBuckets(100, 10, 7),
},
[]string{"handler", "method"},
)
clusterEnabled = prometheus.NewGauge(
prometheus.GaugeOpts{
Name: "alertmanager_cluster_enabled",
Help: "Indicates whether the clustering is enabled or not.",
},
)
promlogConfig = promlog.Config{}
)
func init() {
prometheus.MustRegister(requestDuration)
prometheus.MustRegister(responseSize)
prometheus.MustRegister(clusterEnabled)
prometheus.MustRegister(version.NewCollector("alertmanager"))
}
func instrumentHandler(handlerName string, handler http.HandlerFunc) http.HandlerFunc {
handlerLabel := prometheus.Labels{"handler": handlerName}
return promhttp.InstrumentHandlerDuration(
requestDuration.MustCurryWith(handlerLabel),
promhttp.InstrumentHandlerResponseSize(
responseSize.MustCurryWith(handlerLabel),
handler,
),
)
}
const defaultClusterAddr = "0.0.0.0:9094"
// buildReceiverIntegrations builds a list of integration notifiers off of a
// receiver config.
func buildReceiverIntegrations(nc *config.Receiver, tmpl *template.Template, logger log.Logger) ([]notify.Integration, error) {
var (
errs types.MultiError
integrations []notify.Integration
add = func(name string, i int, rs notify.ResolvedSender, f func(l log.Logger) (notify.Notifier, error)) {
n, err := f(log.With(logger, "integration", name))
if err != nil {
errs.Add(err)
return
}
integrations = append(integrations, notify.NewIntegration(n, rs, name, i))
}
)
for i, c := range nc.WebhookConfigs {
add("webhook", i, c, func(l log.Logger) (notify.Notifier, error) { return webhook.New(c, tmpl, l) })
}
for i, c := range nc.EmailConfigs {
add("email", i, c, func(l log.Logger) (notify.Notifier, error) { return email.New(c, tmpl, l), nil })
}
for i, c := range nc.PagerdutyConfigs {
add("pagerduty", i, c, func(l log.Logger) (notify.Notifier, error) { return pagerduty.New(c, tmpl, l) })
}
for i, c := range nc.OpsGenieConfigs {
add("opsgenie", i, c, func(l log.Logger) (notify.Notifier, error) { return opsgenie.New(c, tmpl, l) })
}
for i, c := range nc.WechatConfigs {
add("wechat", i, c, func(l log.Logger) (notify.Notifier, error) { return wechat.New(c, tmpl, l) })
}
for i, c := range nc.SlackConfigs {
add("slack", i, c, func(l log.Logger) (notify.Notifier, error) { return slack.New(c, tmpl, l) })
}
for i, c := range nc.HipchatConfigs {
add("hipchat", i, c, func(l log.Logger) (notify.Notifier, error) { return hipchat.New(c, tmpl, l) })
}
for i, c := range nc.VictorOpsConfigs {
add("victorops", i, c, func(l log.Logger) (notify.Notifier, error) { return victorops.New(c, tmpl, l) })
}
for i, c := range nc.PushoverConfigs {
add("pushover", i, c, func(l log.Logger) (notify.Notifier, error) { return pushover.New(c, tmpl, l) })
}
if errs.Len() > 0 {
return nil, &errs
}
return integrations, nil
}
func main() {
os.Exit(run())
}
func run() int {
if os.Getenv("DEBUG") != "" {
runtime.SetBlockProfileRate(20)
runtime.SetMutexProfileFraction(20)
}
var (
configFile = kingpin.Flag("config.file", "Alertmanager configuration file name.").Default("alertmanager.yml").String()
dataDir = kingpin.Flag("storage.path", "Base path for data storage.").Default("data/").String()
retention = kingpin.Flag("data.retention", "How long to keep data for.").Default("120h").Duration()
alertGCInterval = kingpin.Flag("alerts.gc-interval", "Interval between alert GC.").Default("30m").Duration()
externalURL = kingpin.Flag("web.external-url", "The URL under which Alertmanager is externally reachable (for example, if Alertmanager is served via a reverse proxy). Used for generating relative and absolute links back to Alertmanager itself. If the URL has a path portion, it will be used to prefix all HTTP endpoints served by Alertmanager. If omitted, relevant URL components will be derived automatically.").String()
routePrefix = kingpin.Flag("web.route-prefix", "Prefix for the internal routes of web endpoints. Defaults to path of --web.external-url.").String()
listenAddress = kingpin.Flag("web.listen-address", "Address to listen on for the web interface and API.").Default(":9093").String()
getConcurrency = kingpin.Flag("web.get-concurrency", "Maximum number of GET requests processed concurrently. If negative or zero, the limit is GOMAXPROC or 8, whichever is larger.").Default("0").Int()
httpTimeout = kingpin.Flag("web.timeout", "Timeout for HTTP requests. If negative or zero, no timeout is set.").Default("0").Duration()
clusterBindAddr = kingpin.Flag("cluster.listen-address", "Listen address for cluster. Set to empty string to disable HA mode.").
Default(defaultClusterAddr).String()
clusterAdvertiseAddr = kingpin.Flag("cluster.advertise-address", "Explicit address to advertise in cluster.").String()
peers = kingpin.Flag("cluster.peer", "Initial peers (may be repeated).").Strings()
peerTimeout = kingpin.Flag("cluster.peer-timeout", "Time to wait between peers to send notifications.").Default("15s").Duration()
gossipInterval = kingpin.Flag("cluster.gossip-interval", "Interval between sending gossip messages. By lowering this value (more frequent) gossip messages are propagated across the cluster more quickly at the expense of increased bandwidth.").Default(cluster.DefaultGossipInterval.String()).Duration()
pushPullInterval = kingpin.Flag("cluster.pushpull-interval", "Interval for gossip state syncs. Setting this interval lower (more frequent) will increase convergence speeds across larger clusters at the expense of increased bandwidth usage.").Default(cluster.DefaultPushPullInterval.String()).Duration()
tcpTimeout = kingpin.Flag("cluster.tcp-timeout", "Timeout for establishing a stream connection with a remote node for a full state sync, and for stream read and write operations.").Default(cluster.DefaultTcpTimeout.String()).Duration()
probeTimeout = kingpin.Flag("cluster.probe-timeout", "Timeout to wait for an ack from a probed node before assuming it is unhealthy. This should be set to 99-percentile of RTT (round-trip time) on your network.").Default(cluster.DefaultProbeTimeout.String()).Duration()
probeInterval = kingpin.Flag("cluster.probe-interval", "Interval between random node probes. Setting this lower (more frequent) will cause the cluster to detect failed nodes more quickly at the expense of increased bandwidth usage.").Default(cluster.DefaultProbeInterval.String()).Duration()
settleTimeout = kingpin.Flag("cluster.settle-timeout", "Maximum time to wait for cluster connections to settle before evaluating notifications.").Default(cluster.DefaultPushPullInterval.String()).Duration()
reconnectInterval = kingpin.Flag("cluster.reconnect-interval", "Interval between attempting to reconnect to lost peers.").Default(cluster.DefaultReconnectInterval.String()).Duration()
peerReconnectTimeout = kingpin.Flag("cluster.reconnect-timeout", "Length of time to attempt to reconnect to a lost peer.").Default(cluster.DefaultReconnectTimeout.String()).Duration()
)
promlogflag.AddFlags(kingpin.CommandLine, &promlogConfig)
kingpin.Version(version.Print("alertmanager"))
kingpin.CommandLine.GetFlag("help").Short('h')
kingpin.Parse()
logger := promlog.New(&promlogConfig)
level.Info(logger).Log("msg", "Starting Alertmanager", "version", version.Info())
level.Info(logger).Log("build_context", version.BuildContext())
err := os.MkdirAll(*dataDir, 0777)
if err != nil {
level.Error(logger).Log("msg", "Unable to create data directory", "err", err)
return 1
}
var peer *cluster.Peer
if *clusterBindAddr != "" {
peer, err = cluster.Create(
log.With(logger, "component", "cluster"),
prometheus.DefaultRegisterer,
*clusterBindAddr,
*clusterAdvertiseAddr,
*peers,
true,
*pushPullInterval,
*gossipInterval,
*tcpTimeout,
*probeTimeout,
*probeInterval,
)
if err != nil {
level.Error(logger).Log("msg", "unable to initialize gossip mesh", "err", err)
return 1
}
clusterEnabled.Set(1)
}
stopc := make(chan struct{})
var wg sync.WaitGroup
wg.Add(1)
waitFunc := func() time.Duration { return 0 }
if peer != nil {
waitFunc = clusterWait(peer, *peerTimeout)
}
notificationLogOpts := []nflog.Option{
nflog.WithRetention(*retention),
nflog.WithClusterWait(waitFunc),
nflog.WithSnapshot(filepath.Join(*dataDir, "nflog")),
nflog.WithMaintenance(15*time.Minute, stopc, wg.Done),
nflog.WithMetrics(prometheus.DefaultRegisterer),
nflog.WithLogger(log.With(logger, "component", "nflog")),
}
notificationLog, err := nflog.New(notificationLogOpts...)
if err != nil {
level.Error(logger).Log("err", err)
return 1
}
if peer != nil {
c := peer.AddState("nfl", notificationLog, prometheus.DefaultRegisterer)
notificationLog.SetBroadcast(c.Broadcast)
}
marker := types.NewMarker(prometheus.DefaultRegisterer)
silenceOpts := silence.Options{
SnapshotFile: filepath.Join(*dataDir, "silences"),
Retention: *retention,
Logger: log.With(logger, "component", "silences"),
Metrics: prometheus.DefaultRegisterer,
}
silences, err := silence.New(silenceOpts)
if err != nil {
level.Error(logger).Log("err", err)
return 1
}
if peer != nil {
c := peer.AddState("sil", silences, prometheus.DefaultRegisterer)
silences.SetBroadcast(c.Broadcast)
}
// Start providers before router potentially sends updates.
wg.Add(1)
go func() {
silences.Maintenance(15*time.Minute, filepath.Join(*dataDir, "silences"), stopc)
wg.Done()
}()
defer func() {
close(stopc)
wg.Wait()
}()
// Peer state listeners have been registered, now we can join and get the initial state.
if peer != nil {
err = peer.Join(
*reconnectInterval,
*peerReconnectTimeout,
)
if err != nil {
level.Warn(logger).Log("msg", "unable to join gossip mesh", "err", err)
}
ctx, cancel := context.WithTimeout(context.Background(), *settleTimeout)
defer func() {
cancel()
if err := peer.Leave(10 * time.Second); err != nil {
level.Warn(logger).Log("msg", "unable to leave gossip mesh", "err", err)
}
}()
go peer.Settle(ctx, *gossipInterval*10)
}
alerts, err := mem.NewAlerts(context.Background(), marker, *alertGCInterval, logger)
if err != nil {
level.Error(logger).Log("err", err)
return 1
}
defer alerts.Close()
var disp *dispatch.Dispatcher
defer disp.Stop()
groupFn := func(routeFilter func(*dispatch.Route) bool, alertFilter func(*types.Alert, time.Time) bool) (dispatch.AlertGroups, map[model.Fingerprint][]string) {
return disp.Groups(routeFilter, alertFilter)
}
api, err := api.New(api.Options{
Alerts: alerts,
Silences: silences,
StatusFunc: marker.Status,
Peer: peer,
Timeout: *httpTimeout,
Concurrency: *getConcurrency,
Logger: log.With(logger, "component", "api"),
Registry: prometheus.DefaultRegisterer,
GroupFunc: groupFn,
})
if err != nil {
level.Error(logger).Log("err", errors.Wrap(err, "failed to create API"))
return 1
}
amURL, err := extURL(logger, os.Hostname, *listenAddress, *externalURL)
if err != nil {
level.Error(logger).Log("msg", "failed to determine external URL", "err", err)
return 1
}
level.Debug(logger).Log("externalURL", amURL.String())
timeoutFunc := func(d time.Duration) time.Duration {
if d < notify.MinTimeout {
d = notify.MinTimeout
}
return d + waitFunc()
}
var (
inhibitor *inhibit.Inhibitor
tmpl *template.Template
)
configCoordinator := config.NewCoordinator(
*configFile,
prometheus.DefaultRegisterer,
log.With(logger, "component", "configuration"),
)
configCoordinator.Subscribe(func(conf *config.Config) error {
tmpl, err = template.FromGlobs(conf.Templates...)
if err != nil {
return errors.Wrap(err, "failed to parse templates")
}
tmpl.ExternalURL = amURL
// Build the map of receiver to integrations.
receivers := make(map[string][]notify.Integration, len(conf.Receivers))
for _, rcv := range conf.Receivers {
integrations, err := buildReceiverIntegrations(rcv, tmpl, logger)
if err != nil {
return err
}
// rcv.Name is guaranteed to be unique across all receivers.
receivers[rcv.Name] = integrations
}
inhibitor.Stop()
disp.Stop()
inhibitor = inhibit.NewInhibitor(alerts, conf.InhibitRules, marker, logger)
silencer := silence.NewSilencer(silences, marker, logger)
pipeline := notify.BuildPipeline(
receivers,
waitFunc,
inhibitor,
silencer,
notificationLog,
peer,
)
api.Update(conf, func(labels model.LabelSet) {
inhibitor.Mutes(labels)
silencer.Mutes(labels)
})
disp = dispatch.NewDispatcher(alerts, dispatch.NewRoute(conf.Route, nil), pipeline, marker, timeoutFunc, logger)
go disp.Run()
go inhibitor.Run()
return nil
})
if err := configCoordinator.Reload(); err != nil {
return 1
}
// Make routePrefix default to externalURL path if empty string.
if *routePrefix == "" {
*routePrefix = amURL.Path
}
*routePrefix = "/" + strings.Trim(*routePrefix, "/")
level.Debug(logger).Log("routePrefix", *routePrefix)
router := route.New().WithInstrumentation(instrumentHandler)
if *routePrefix != "/" {
router = router.WithPrefix(*routePrefix)
}
webReload := make(chan chan error)
ui.Register(router, webReload, logger)
mux := api.Register(router, *routePrefix)
srv := http.Server{Addr: *listenAddress, Handler: mux}
srvc := make(chan struct{})
go func() {
level.Info(logger).Log("msg", "Listening", "address", *listenAddress)
if err := srv.ListenAndServe(); err != http.ErrServerClosed {
level.Error(logger).Log("msg", "Listen error", "err", err)
close(srvc)
}
defer func() {
if err := srv.Close(); err != nil {
level.Error(logger).Log("msg", "Error on closing the server", "err", err)
}
}()
}()
var (
hup = make(chan os.Signal, 1)
hupReady = make(chan bool)
term = make(chan os.Signal, 1)
)
signal.Notify(hup, syscall.SIGHUP)
signal.Notify(term, os.Interrupt, syscall.SIGTERM)
go func() {
<-hupReady
for {
select {
case <-hup:
// ignore error, already logged in `reload()`
_ = configCoordinator.Reload()
case errc := <-webReload:
errc <- configCoordinator.Reload()
}
}
}()
// Wait for reload or termination signals.
close(hupReady) // Unblock SIGHUP handler.
for {
select {
case <-term:
level.Info(logger).Log("msg", "Received SIGTERM, exiting gracefully...")
return 0
case <-srvc:
return 1
}
}
}
// clusterWait returns a function that inspects the current peer state and returns
// a duration of one base timeout for each peer with a higher ID than ourselves.
func clusterWait(p *cluster.Peer, timeout time.Duration) func() time.Duration {
return func() time.Duration {
return time.Duration(p.Position()) * timeout
}
}
func extURL(logger log.Logger, hostnamef func() (string, error), listen, external string) (*url.URL, error) {
if external == "" {
hostname, err := hostnamef()
if err != nil {
return nil, err
}
_, port, err := net.SplitHostPort(listen)
if err != nil {
return nil, err
}
if port == "" {
level.Warn(logger).Log("msg", "no port found for listen address", "address", listen)
}
external = fmt.Sprintf("http://%s:%s/", hostname, port)
}
u, err := url.Parse(external)
if err != nil {
return nil, err
}
if u.Scheme != "http" && u.Scheme != "https" {
return nil, errors.Errorf("%q: invalid %q scheme, only 'http' and 'https' are supported", u.String(), u.Scheme)
}
ppref := strings.TrimRight(u.Path, "/")
if ppref != "" && !strings.HasPrefix(ppref, "/") {
ppref = "/" + ppref
}
u.Path = ppref
return u, nil
}
|
[
"\"DEBUG\""
] |
[] |
[
"DEBUG"
] |
[]
|
["DEBUG"]
|
go
| 1 | 0 | |
main.go
|
package main
import (
"bytes"
"encoding/json"
"fmt"
"log"
"net/http"
"os"
"github.com/BlacksunLabs/drgero/event"
"github.com/BlacksunLabs/drgero/mq"
)
// Message is just a placeholder for the slack post body
type Message struct {
Text string `json:"text"`
}
var whURL string
var m = new(mq.Client)
func post(message string) error {
payload := Message{Text: message}
jsonPayload, err := json.Marshal(payload)
if err != nil {
log.Printf("unable to marshal payload: %v", err)
return err
}
_, err = http.Post(whURL, "application/json", bytes.NewBuffer(jsonPayload))
if err != nil {
log.Printf("failed to post to slack")
return err
}
return nil
}
func main() {
whURL = os.Getenv("SLACK_WEBHOOK")
err := m.Connect("amqp://guest:guest@localhost:5672")
if err != nil {
fmt.Printf("unable to connect to RabbitMQ : %v", err)
}
queueName, err := m.NewTempQueue()
if err != nil {
fmt.Printf("could not create temporary queue : %v", err)
}
err = m.BindQueueToExchange(queueName, "events")
if err != nil {
fmt.Printf("%v", err)
return
}
ch, err := m.GetChannel()
if err != nil {
fmt.Printf("%v", err)
return
}
events, err := ch.Consume(
queueName,
"",
true,
false,
false,
false,
nil,
)
if err != nil {
fmt.Printf("failed to register consumer to %s : %v", queueName, err)
return
}
forever := make(chan bool)
go func() {
for e := range events {
var event = new(event.Event)
var err = json.Unmarshal(e.Body, event)
if err != nil {
fmt.Printf("failed to unmarshal event: %v", err)
<-forever
}
// Feed Parsing/posting here
if event.UserAgent != "feedmonitor" {
break
}
log.Printf("%v", event.Message)
feedMap := make(map[string]interface{})
err = json.Unmarshal([]byte(event.Message), &feedMap)
if err != nil {
log.Printf("failed to unmarshal feed event: %v", err)
}
data := fmt.Sprintf("%s %s", feedMap["title"], feedMap["link"])
post(data)
}
}()
fmt.Println("[i] Waiting for events. To exit press CTRL+C")
<-forever
}
|
[
"\"SLACK_WEBHOOK\""
] |
[] |
[
"SLACK_WEBHOOK"
] |
[]
|
["SLACK_WEBHOOK"]
|
go
| 1 | 0 | |
airnow/historical.py
|
# -*- coding: utf-8 -*-
import json
import os
from airnow.api import get_airnow_data
def get_historical_zip(
zip_code: str,
date: str,
distance: int = 25,
api_key: str = os.environ["AIRNOW_API_KEY"],
) -> dict:
"""
Get historical air quality conditions for a given date by ZIP code
:param str zip_code: A US ZIP code
:param str date: Date from which to get the forecast (default: today)
:param int distance: If no reporting area exists for given ZIP code, search for nearby stations within this distance (default: 25; unit: miles)
:param str api_key: AirNow API token
:return: A dictionary containing the air quality conditions
"""
params = {
"zipCode": zip_code,
"date": f"{date}T00-0000",
"distance": distance,
"format": "application/json",
"API_KEY": api_key,
}
cond = get_airnow_data(endpoint="/aq/observation/zipCode/historical/", **params,)
return json.loads(cond)
def get_historical_latlon(
latitude: float,
longitude: float,
date: str,
distance: int = 25,
api_key: str = os.environ["AIRNOW_API_KEY"],
) -> dict:
"""
Get historical air quality conditions for a given date by latitude and longitude
:param float latitude: Latitude
:param float longitude: Longitude
:param str date: Date from which to get the forecast (default: today)
:param int distance: If no reporting area exists for given location, search for nearby stations within this distance (default: 25; unit: miles)
:param str api_key: AirNow API token
:return: A dictionary containing the air quality conditions
"""
params = {
"latitude": latitude,
"longitude": longitude,
"date": f"{date}T00-0000",
"distance": distance,
"format": "application/json",
"API_KEY": api_key,
}
cond = get_airnow_data(endpoint="/aq/observation/latLong/historical/", **params,)
return json.loads(cond)
|
[] |
[] |
[
"AIRNOW_API_KEY"
] |
[]
|
["AIRNOW_API_KEY"]
|
python
| 1 | 0 | |
backend/getreports.py
|
""" BOVI(n)E getreports endpoint. """
import json
import os
import boto3
REPORTS_BUCKET = os.environ['REPORTS_BUCKET']
STAGE = os.environ['STAGE']
def get_reports(report=None):
""" Get compliance reports """
s3_client = boto3.client('s3')
print "Report: %s" % report
if not report:
# print REPORTS_BUCKET
report_data = []
s3_objects = s3_client.list_objects(Bucket=REPORTS_BUCKET)['Contents']
for obj in s3_objects:
if 'compliance-audit-report' in obj['Key']:
report_data.append(dict(Report=obj['Key'].split(
'/')[0], LastModified=str(obj['LastModified'])))
else:
# print REPORTS_BUCKET
s3_key = report + '/compliance-audit-report.json'
report_data = s3_client.get_object(Bucket=REPORTS_BUCKET, Key=s3_key)[
'Body'].read()
return json.loads(report_data)
return report_data
def lambda_handler(*kwargs):
""" Lambda handler """
print kwargs[0]
report = None
params = kwargs[0].get('queryStringParameters')
if params:
report = params.get('report')
print report
results = get_reports(report)
body = results
response = {
"statusCode": 200,
"body": json.dumps(body)
}
return response
|
[] |
[] |
[
"STAGE",
"REPORTS_BUCKET"
] |
[]
|
["STAGE", "REPORTS_BUCKET"]
|
python
| 2 | 0 | |
go/keybase/main.go
|
// Copyright 2015 Keybase, Inc. All rights reserved. Use of
// this source code is governed by the included BSD license.
package main
import (
"errors"
"fmt"
"io/ioutil"
"os"
"os/signal"
"runtime"
"runtime/debug"
"runtime/pprof"
"syscall"
"time"
"github.com/keybase/client/go/client"
"github.com/keybase/client/go/externals"
"github.com/keybase/client/go/install"
"github.com/keybase/client/go/libcmdline"
"github.com/keybase/client/go/libkb"
"github.com/keybase/client/go/logger"
keybase1 "github.com/keybase/client/go/protocol/keybase1"
"github.com/keybase/client/go/service"
"github.com/keybase/client/go/uidmap"
"github.com/keybase/go-framed-msgpack-rpc/rpc"
"golang.org/x/net/context"
)
var cmd libcmdline.Command
var errParseArgs = errors.New("failed to parse command line arguments")
func handleQuickVersion() bool {
if len(os.Args) == 3 && os.Args[1] == "version" && os.Args[2] == "-S" {
fmt.Printf("%s\n", libkb.VersionString())
return true
}
return false
}
func keybaseExit(exitCode int) {
logger.Shutdown()
logger.RestoreConsoleMode()
os.Exit(exitCode)
}
func main() {
// Preserve non-critical errors that happen very early during
// startup, where logging is not set up yet, to be printed later
// when logging is functioning.
var startupErrors []error
if err := libkb.SaferDLLLoading(); err != nil {
// Don't abort here. This should not happen on any known
// version of Windows, but new MS platforms may create
// regressions.
startupErrors = append(startupErrors,
fmt.Errorf("SaferDLLLoading error: %v", err.Error()))
}
// handle a Quick version query
if handleQuickVersion() {
return
}
g := externals.NewGlobalContextInit()
go HandleSignals(g)
err := mainInner(g, startupErrors)
if g.Env.GetDebug() {
// hack to wait a little bit to receive all the log messages from the
// service before shutting down in debug mode.
time.Sleep(100 * time.Millisecond)
}
mctx := libkb.NewMetaContextTODO(g)
e2 := g.Shutdown(mctx)
if err == nil {
err = e2
}
if err != nil {
// if errParseArgs, the error was already output (along with usage)
if err != errParseArgs {
g.Log.Errorf("%s", stripFieldsFromAppStatusError(err).Error())
}
if g.ExitCode == keybase1.ExitCode_OK {
g.ExitCode = keybase1.ExitCode_NOTOK
}
}
if g.ExitCode != keybase1.ExitCode_OK {
keybaseExit(int(g.ExitCode))
}
}
func tryToDisableProcessTracing(log logger.Logger, e *libkb.Env) {
if e.GetRunMode() != libkb.ProductionRunMode || e.AllowPTrace() {
return
}
if !e.GetFeatureFlags().Admin(e.GetUID()) {
// Admin only for now
return
}
// We do our best but if it's not possible on some systems or
// configurations, it's not a fatal error. Also see documentation
// in ptrace_*.go files.
if err := libkb.DisableProcessTracing(); err != nil {
log.Debug("Unable to disable process tracing: %v", err.Error())
} else {
log.Debug("DisableProcessTracing call succeeded")
}
}
func logStartupIssues(errors []error, log logger.Logger) {
for _, err := range errors {
log.Warning(err.Error())
}
}
func warnNonProd(log logger.Logger, e *libkb.Env) {
mode := e.GetRunMode()
if mode != libkb.ProductionRunMode {
log.Warning("Running in %s mode", mode)
}
}
func checkSystemUser(log logger.Logger) {
if isAdminUser, match, _ := libkb.IsSystemAdminUser(); isAdminUser {
log.Errorf("Oops, you are trying to run as an admin user (%s). This isn't supported.", match)
keybaseExit(int(keybase1.ExitCode_NOTOK))
}
}
func osPreconfigure(g *libkb.GlobalContext) {
switch libkb.RuntimeGroup() {
case keybase1.RuntimeGroup_LINUXLIKE:
// On Linux, we used to put the mountdir in a different location, and
// then we changed it, and also added a default mountdir config var so
// we'll know if the user has changed it.
// Update the mountdir to the new location, but only if they're still
// using the old mountpoint *and* they haven't changed it since we
// added a default. This functionality was originally in the
// run_keybase script.
configReader := g.Env.GetConfig()
if configReader == nil {
// some commands don't configure config.
return
}
userMountdir := configReader.GetMountDir()
userMountdirDefault := configReader.GetMountDirDefault()
oldMountdirDefault := g.Env.GetOldMountDirDefault()
mountdirDefault := g.Env.GetMountDirDefault()
// User has not set a mountdir yet; e.g., on initial install.
nonexistentMountdir := userMountdir == ""
// User does not have a mountdirdefault; e.g., if last used Keybase
// before the change mentioned above.
nonexistentMountdirDefault := userMountdirDefault == ""
usingOldMountdirByDefault := userMountdir == oldMountdirDefault && (userMountdirDefault == oldMountdirDefault || nonexistentMountdirDefault)
shouldResetMountdir := nonexistentMountdir || usingOldMountdirByDefault
if nonexistentMountdirDefault || shouldResetMountdir {
configWriter := g.Env.GetConfigWriter()
if configWriter == nil {
// some commands don't configure config.
return
}
// Set the user's mountdirdefault to the current one if it's
// currently empty.
_ = configWriter.SetStringAtPath("mountdirdefault", mountdirDefault)
if shouldResetMountdir {
_ = configWriter.SetStringAtPath("mountdir", mountdirDefault)
}
}
default:
}
}
func mainInner(g *libkb.GlobalContext, startupErrors []error) error {
cl := libcmdline.NewCommandLine(true, client.GetExtraFlags())
cl.AddCommands(client.GetCommands(cl, g))
cl.AddCommands(service.GetCommands(cl, g))
cl.AddHelpTopics(client.GetHelpTopics())
var err error
cmd, err = cl.Parse(os.Args)
if err != nil {
g.Log.Errorf("Error parsing command line arguments: %s\n\n", err)
if _, isHelp := cmd.(*libcmdline.CmdSpecificHelp); isHelp {
// Parse returned the help command for this command, so run it:
_ = cmd.Run()
}
return errParseArgs
}
if cmd == nil {
return nil
}
if !cmd.GetUsage().AllowRoot && !g.Env.GetAllowRoot() {
checkSystemUser(g.Log)
}
if cl.IsService() {
startProfile(g)
}
if !cl.IsService() {
if logger.SaveConsoleMode() == nil {
defer logger.RestoreConsoleMode()
}
client.InitUI(g)
}
if err = g.ConfigureCommand(cl, cmd); err != nil {
return err
}
g.StartupMessage()
warnNonProd(g.Log, g.Env)
logStartupIssues(startupErrors, g.Log)
tryToDisableProcessTracing(g.Log, g.Env)
// Don't configure mountdir on a nofork command like nix configure redirector.
if cl.GetForkCmd() != libcmdline.NoFork {
osPreconfigure(g)
}
if err := configOtherLibraries(g); err != nil {
return err
}
if err = configureProcesses(g, cl, &cmd); err != nil {
return err
}
err = cmd.Run()
if !cl.IsService() && !cl.SkipOutOfDateCheck() {
// Errors that come up in printing this warning are logged but ignored.
client.PrintOutOfDateWarnings(g)
}
// Warn the user if there is an account reset in progress
if !cl.IsService() && !cl.SkipAccountResetCheck() {
// Errors that come up in printing this warning are logged but ignored.
client.PrintAccountResetWarning(g)
}
return err
}
func configOtherLibraries(g *libkb.GlobalContext) error {
// Set our UID -> Username mapping service
g.SetUIDMapper(uidmap.NewUIDMap(g.Env.GetUIDMapFullNameCacheSize()))
return nil
}
// AutoFork? Standalone? ClientServer? Brew service? This function deals with the
// various run configurations that we can run in.
func configureProcesses(g *libkb.GlobalContext, cl *libcmdline.CommandLine, cmd *libcmdline.Command) (err error) {
g.Log.Debug("+ configureProcesses")
defer func() {
g.Log.Debug("- configureProcesses -> %v", err)
}()
// On Linux, the service configures its own autostart file. Otherwise, no
// need to configure if we're a service.
if cl.IsService() {
g.Log.Debug("| in configureProcesses, is service")
if runtime.GOOS == "linux" {
g.Log.Debug("| calling AutoInstall for Linux")
_, err := install.AutoInstall(g, "", false, 10*time.Second, g.Log)
if err != nil {
return err
}
}
return nil
}
// Start the server on the other end, possibly.
// There are two cases in which we do this: (1) we want
// a local loopback server in standalone mode; (2) we
// need to "autofork" it. Do at most one of these
// operations.
if g.Env.GetStandalone() {
if cl.IsNoStandalone() {
err = client.CantRunInStandaloneError{}
return err
}
svc := service.NewService(g, false /* isDaemon */)
err = svc.SetupCriticalSubServices()
if err != nil {
return err
}
err = svc.StartLoopbackServer()
if err != nil {
return err
}
// StandaloneChatConnector is an interface with only one
// method: StartStandaloneChat. This way we can pass Service
// object while not exposing anything but that one function.
g.StandaloneChatConnector = svc
g.Standalone = true
if pflerr, ok := err.(libkb.PIDFileLockError); ok {
err = fmt.Errorf("Can't run in standalone mode with a service running (see %q)",
pflerr.Filename)
return err
}
return err
}
// After this point, we need to provide a remote logging story if necessary
// If this command specifically asks not to be forked, then we are done in this
// function. This sort of thing is true for the `ctl` commands and also the `version`
// command.
fc := cl.GetForkCmd()
if fc == libcmdline.NoFork {
return configureLogging(g, cl)
}
var newProc bool
if libkb.IsBrewBuild {
// If we're running in Brew mode, we might need to install ourselves as a persistent
// service for future invocations of the command.
newProc, err = install.AutoInstall(g, "", false, 10*time.Second, g.Log)
if err != nil {
return err
}
} else if fc == libcmdline.ForceFork || g.Env.GetAutoFork() {
// If this command warrants an autofork, do it now.
newProc, err = client.AutoForkServer(g, cl)
if err != nil {
return err
}
}
// Restart the service if we see that it's out of date. It's important to do this
// before we make any RPCs to the service --- for instance, before the logging
// calls below. See the v1.0.8 update fiasco for more details. Also, only need
// to do this if we didn't just start a new process.
if !newProc {
if err = client.FixVersionClash(g, cl); err != nil {
return err
}
}
// Ignore error
if err = client.WarnOutdatedKBFS(g, cl); err != nil {
g.Log.Debug("| Could not do kbfs versioncheck: %s", err)
}
g.Log.Debug("| After forks; newProc=%v", newProc)
if err = configureLogging(g, cl); err != nil {
return err
}
// This sends the client's PATH to the service so the service can update
// its PATH if necessary. This is called after FixVersionClash(), which
// happens above in configureProcesses().
if err = configurePath(g, cl); err != nil {
// Further note -- don't die here. It could be we're calling this method
// against an earlier version of the service that doesn't support it.
// It's not critical that it succeed, so continue on.
g.Log.Debug("Configure path failed: %v", err)
}
return nil
}
func configureLogging(g *libkb.GlobalContext, cl *libcmdline.CommandLine) error {
g.Log.Debug("+ configureLogging")
defer func() {
g.Log.Debug("- configureLogging")
}()
// Whether or not we autoforked, we're now running in client-server
// mode (as opposed to standalone). Register a global LogUI so that
// calls to G.Log() in the daemon can be copied to us. This is
// something of a hack on the daemon side.
if !g.Env.GetDoLogForward() || cl.GetLogForward() == libcmdline.LogForwardNone {
g.Log.Debug("Disabling log forwarding")
return nil
}
protocols := []rpc.Protocol{client.NewLogUIProtocol(g)}
if err := client.RegisterProtocolsWithContext(protocols, g); err != nil {
return err
}
logLevel := keybase1.LogLevel_INFO
if g.Env.GetDebug() {
logLevel = keybase1.LogLevel_DEBUG
}
logClient, err := client.GetLogClient(g)
if err != nil {
return err
}
arg := keybase1.RegisterLoggerArg{
Name: "CLI client",
Level: logLevel,
}
if err := logClient.RegisterLogger(context.TODO(), arg); err != nil {
g.Log.Warning("Failed to register as a logger: %s", err)
}
return nil
}
// configurePath sends the client's PATH to the service.
func configurePath(g *libkb.GlobalContext, cl *libcmdline.CommandLine) error {
if cl.IsService() {
// this only runs on the client
return nil
}
return client.SendPath(g)
}
func HandleSignals(g *libkb.GlobalContext) {
c := make(chan os.Signal, 1)
// Note: os.Kill can't be trapped.
signal.Notify(c, os.Interrupt, syscall.SIGTERM)
mctx := libkb.NewMetaContextTODO(g)
for {
s := <-c
if s != nil {
mctx.Debug("trapped signal %v", s)
// if the current command has a Stop function, then call it.
// It will do its own stopping of the process and calling
// shutdown
if stop, ok := cmd.(client.Stopper); ok {
mctx.Debug("Stopping command cleanly via stopper")
stop.Stop(keybase1.ExitCode_OK)
return
}
// if the current command has a Cancel function, then call it:
if canc, ok := cmd.(client.Canceler); ok {
mctx.Debug("canceling running command")
if err := canc.Cancel(); err != nil {
mctx.Warning("error canceling command: %s", err)
}
}
mctx.Debug("calling shutdown")
_ = g.Shutdown(mctx)
mctx.Error("interrupted")
keybaseExit(3)
}
}
}
// stripFieldsFromAppStatusError is an error prettifier. By default, AppStatusErrors print optional
// fields that were problematic. But they make for pretty ugly error messages spit back to the user.
// So strip that out, but still leave in an error-code integer, since those are quite helpful.
func stripFieldsFromAppStatusError(e error) error {
if e == nil {
return e
}
if ase, ok := e.(libkb.AppStatusError); ok {
return fmt.Errorf("%s (code %d)", ase.Desc, ase.Code)
}
return e
}
func startProfile(g *libkb.GlobalContext) {
if os.Getenv("KEYBASE_PERIODIC_MEMPROFILE") == "" {
return
}
interval, err := time.ParseDuration(os.Getenv("KEYBASE_PERIODIC_MEMPROFILE"))
if err != nil {
g.Log.Debug("error parsing KEYBASE_PERIODIC_MEMPROFILE interval duration: %s", err)
return
}
go func() {
g.Log.Debug("periodic memory profile enabled, will dump memory profiles every %s", interval)
for {
time.Sleep(interval)
g.Log.Debug("dumping periodic memory profile")
f, err := ioutil.TempFile("", "keybase_memprofile")
if err != nil {
g.Log.Debug("could not create memory profile: ", err)
continue
}
debug.FreeOSMemory()
runtime.GC() // get up-to-date statistics
if err := pprof.WriteHeapProfile(f); err != nil {
g.Log.Debug("could not write memory profile: ", err)
continue
}
f.Close()
g.Log.Debug("wrote periodic memory profile to %s", f.Name())
var mems runtime.MemStats
runtime.ReadMemStats(&mems)
g.Log.Debug("runtime mem alloc: %v", mems.Alloc)
g.Log.Debug("runtime total alloc: %v", mems.TotalAlloc)
g.Log.Debug("runtime heap alloc: %v", mems.HeapAlloc)
g.Log.Debug("runtime heap sys: %v", mems.HeapSys)
}
}()
}
|
[
"\"KEYBASE_PERIODIC_MEMPROFILE\"",
"\"KEYBASE_PERIODIC_MEMPROFILE\""
] |
[] |
[
"KEYBASE_PERIODIC_MEMPROFILE"
] |
[]
|
["KEYBASE_PERIODIC_MEMPROFILE"]
|
go
| 1 | 0 | |
tests/features/steps/execution_monitoring/test_execution_monitoring.py
|
import behave
import json
import time
import os
@behave.when(u'I push and deploy package with param "{on_reset}" in "{package_directory_path}"')
def step_impl(context, package_directory_path, on_reset):
package_directory_path = os.path.join(os.environ['DATALOOP_TEST_ASSETS'], package_directory_path)
package_json_path = os.path.join(package_directory_path, 'package.json')
with open(package_json_path, 'r') as f:
package_json = json.load(f)
if on_reset == 'None':
package_json['services'][0].pop('onReset', None)
else:
package_json['services'][0]['onReset'] = on_reset
with open(package_json_path, 'w') as f:
json.dump(package_json, f)
services, context.package = context.project.packages.deploy_from_file(project=context.project,
json_filepath=package_json_path)
context.service = services[0]
context.to_delete_packages_ids.append(context.package.id)
context.to_delete_services_ids.append(context.service.id)
@behave.when(u'I execute')
def step_impl(context):
context.execution = context.service.execute(project_id=context.project.id)
@behave.when(u'I terminate execution')
def step_impl(context):
context.execution.terminate()
@behave.then(u'Execution was terminated')
def step_impl(context):
num_tries = 15
interval = 10
terminated = False
for i in range(num_tries):
time.sleep(interval)
execution = context.service.executions.get(execution_id=context.execution.id)
terminated = execution.to_terminate
terminated = terminated and execution.latest_status['status'] == 'failed'
terminated = terminated and 'InterruptedError' in execution.latest_status['message']
if terminated:
break
assert terminated
@behave.then(u'Execution "{on_reset}" on timeout')
def step_impl(context, on_reset):
time.sleep(context.service.execution_timeout + context.service.drain_time + 5)
num_tries = 15
interval = 3
reset = False
for _ in range(num_tries):
execution = context.service.executions.get(execution_id=context.execution.id)
for stat in execution.status:
if on_reset == 'rerun' \
and stat['status'] == 'rerun' \
and 'Rerun due to runner timeout' in stat['message'] \
and execution.attempts > 1:
reset = True
elif on_reset == 'failed' \
and stat['status'] == 'failed' \
and 'Failed due to runner timeout' in stat['message']:
reset = True
if reset:
break
time.sleep(interval)
assert reset
|
[] |
[] |
[
"DATALOOP_TEST_ASSETS"
] |
[]
|
["DATALOOP_TEST_ASSETS"]
|
python
| 1 | 0 | |
src/main/java/utils/Constants.java
|
package utils;
public class Constants {
public static final String USERNAME = "tellamicrobot";
public static final String DESC = "A free URL shortener";
public static final String BOT_TOKEN = System.getenv("botellamicrokey");
public static final String RAPIDAPI_TOKEN = System.getenv("rapidapikey");
public static final String RAPIDAPI_HOST = System.getenv("rapidapihost");
public static final int CREATOR_ID = 562759740;
}
|
[
"\"botellamicrokey\"",
"\"rapidapikey\"",
"\"rapidapihost\""
] |
[] |
[
"rapidapihost",
"botellamicrokey",
"rapidapikey"
] |
[]
|
["rapidapihost", "botellamicrokey", "rapidapikey"]
|
java
| 3 | 0 | |
daemon/graphdriver/driver.go
|
package graphdriver
import (
"errors"
"fmt"
"os"
"path"
"strings"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/pkg/archive"
)
type FsMagic uint32
const (
FsMagicAufs = FsMagic(0x61756673)
FsMagicBtrfs = FsMagic(0x9123683E)
FsMagicCramfs = FsMagic(0x28cd3d45)
FsMagicExtfs = FsMagic(0x0000EF53)
FsMagicF2fs = FsMagic(0xF2F52010)
FsMagicJffs2Fs = FsMagic(0x000072b6)
FsMagicJfs = FsMagic(0x3153464a)
FsMagicNfsFs = FsMagic(0x00006969)
FsMagicRamFs = FsMagic(0x858458f6)
FsMagicReiserFs = FsMagic(0x52654973)
FsMagicSmbFs = FsMagic(0x0000517B)
FsMagicSquashFs = FsMagic(0x73717368)
FsMagicTmpFs = FsMagic(0x01021994)
FsMagicUnsupported = FsMagic(0x00000000)
FsMagicXfs = FsMagic(0x58465342)
FsMagicZfs = FsMagic(0x2fc12fc1)
)
var (
DefaultDriver string
// All registred drivers
drivers map[string]InitFunc
// Slice of drivers that should be used in an order
priority = []string{
"aufs",
"btrfs",
"devicemapper",
"overlay",
"vfs",
}
ErrNotSupported = errors.New("driver not supported")
ErrPrerequisites = errors.New("prerequisites for driver not satisfied (wrong filesystem?)")
ErrIncompatibleFS = fmt.Errorf("backing file system is unsupported for this graph driver")
FsNames = map[FsMagic]string{
FsMagicAufs: "aufs",
FsMagicBtrfs: "btrfs",
FsMagicCramfs: "cramfs",
FsMagicExtfs: "extfs",
FsMagicF2fs: "f2fs",
FsMagicJffs2Fs: "jffs2",
FsMagicJfs: "jfs",
FsMagicNfsFs: "nfs",
FsMagicRamFs: "ramfs",
FsMagicReiserFs: "reiserfs",
FsMagicSmbFs: "smb",
FsMagicSquashFs: "squashfs",
FsMagicTmpFs: "tmpfs",
FsMagicUnsupported: "unsupported",
FsMagicXfs: "xfs",
FsMagicZfs: "zfs",
}
)
type InitFunc func(root string, options []string) (Driver, error)
// ProtoDriver defines the basic capabilities of a driver.
// This interface exists solely to be a minimum set of methods
// for client code which choose not to implement the entire Driver
// interface and use the NaiveDiffDriver wrapper constructor.
//
// Use of ProtoDriver directly by client code is not recommended.
type ProtoDriver interface {
// String returns a string representation of this driver.
String() string
// Create creates a new, empty, filesystem layer with the
// specified id and parent. Parent may be "".
Create(id, parent string) error
// Remove attempts to remove the filesystem layer with this id.
Remove(id string) error
// Get returns the mountpoint for the layered filesystem referred
// to by this id. You can optionally specify a mountLabel or "".
// Returns the absolute path to the mounted layered filesystem.
Get(id, mountLabel string) (dir string, err error)
// Put releases the system resources for the specified id,
// e.g, unmounting layered filesystem.
Put(id string) error
// Exists returns whether a filesystem layer with the specified
// ID exists on this driver.
Exists(id string) bool
// Status returns a set of key-value pairs which give low
// level diagnostic status about this driver.
Status() [][2]string
// Cleanup performs necessary tasks to release resources
// held by the driver, e.g., unmounting all layered filesystems
// known to this driver.
Cleanup() error
}
// Driver is the interface for layered/snapshot file system drivers.
type Driver interface {
ProtoDriver
// Diff produces an archive of the changes between the specified
// layer and its parent layer which may be "".
Diff(id, parent string) (archive.Archive, error)
// Changes produces a list of changes between the specified layer
// and its parent layer. If parent is "", then all changes will be ADD changes.
Changes(id, parent string) ([]archive.Change, error)
// ApplyDiff extracts the changeset from the given diff into the
// layer with the specified id and parent, returning the size of the
// new layer in bytes.
ApplyDiff(id, parent string, diff archive.ArchiveReader) (size int64, err error)
// DiffSize calculates the changes between the specified id
// and its parent and returns the size in bytes of the changes
// relative to its base filesystem directory.
DiffSize(id, parent string) (size int64, err error)
}
func init() {
drivers = make(map[string]InitFunc)
}
func Register(name string, initFunc InitFunc) error {
if _, exists := drivers[name]; exists {
return fmt.Errorf("Name already registered %s", name)
}
drivers[name] = initFunc
return nil
}
func GetDriver(name, home string, options []string) (Driver, error) {
if initFunc, exists := drivers[name]; exists {
return initFunc(path.Join(home, name), options)
}
return nil, ErrNotSupported
}
func New(root string, options []string) (driver Driver, err error) {
for _, name := range []string{os.Getenv("DOCKER_DRIVER"), DefaultDriver} {
if name != "" {
return GetDriver(name, root, options)
}
}
// Check for priority drivers first
for _, name := range priority {
driver, err = GetDriver(name, root, options)
if err != nil {
if err == ErrNotSupported || err == ErrPrerequisites || err == ErrIncompatibleFS {
continue
}
return nil, err
}
checkPriorDriver(name, root)
return driver, nil
}
// Check all registered drivers if no priority driver is found
for name, initFunc := range drivers {
if driver, err = initFunc(root, options); err != nil {
if err == ErrNotSupported || err == ErrPrerequisites || err == ErrIncompatibleFS {
continue
}
return nil, err
}
checkPriorDriver(name, root)
return driver, nil
}
return nil, fmt.Errorf("No supported storage backend found")
}
func checkPriorDriver(name, root string) {
priorDrivers := []string{}
for prior := range drivers {
if prior != name && prior != "vfs" {
if _, err := os.Stat(path.Join(root, prior)); err == nil {
priorDrivers = append(priorDrivers, prior)
}
}
}
if len(priorDrivers) > 0 {
logrus.Warnf("Graphdriver %s selected. Your graphdriver directory %s already contains data managed by other graphdrivers: %s", name, root, strings.Join(priorDrivers, ","))
}
}
|
[
"\"DOCKER_DRIVER\""
] |
[] |
[
"DOCKER_DRIVER"
] |
[]
|
["DOCKER_DRIVER"]
|
go
| 1 | 0 | |
example/http-stackdriver/client/client.go
|
// Copyright 2019, OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"context"
"fmt"
"io/ioutil"
"log"
"os"
"net/http"
"time"
"google.golang.org/grpc/codes"
"go.opentelemetry.io/otel/api/distributedcontext"
"go.opentelemetry.io/otel/api/global"
"go.opentelemetry.io/otel/api/key"
"go.opentelemetry.io/otel/api/trace"
"go.opentelemetry.io/otel/exporter/trace/stackdriver"
"go.opentelemetry.io/otel/plugin/httptrace"
sdktrace "go.opentelemetry.io/otel/sdk/trace"
)
func initTracer() {
projectID := os.Getenv("PROJECT_ID")
// Create Stackdriver exporter to be able to retrieve
// the collected spans.
exporter, err := stackdriver.NewExporter(
stackdriver.WithProjectID(projectID),
)
if err != nil {
log.Fatal(err)
}
// For the demonstration, use sdktrace.AlwaysSample sampler to sample all traces.
// In a production application, use sdktrace.ProbabilitySampler with a desired probability.
tp, err := sdktrace.NewProvider(sdktrace.WithConfig(sdktrace.Config{DefaultSampler: sdktrace.AlwaysSample()}),
sdktrace.WithSyncer(exporter))
if err != nil {
log.Fatal(err)
}
global.SetTraceProvider(tp)
}
func main() {
initTracer()
tr := global.TraceProvider().Tracer("stackdriver/example/client")
client := http.DefaultClient
ctx := distributedcontext.NewContext(context.Background(),
key.String("username", "donuts"),
)
var body []byte
err := tr.WithSpan(ctx, "say hello",
func(ctx context.Context) error {
req, _ := http.NewRequest("GET", "http://localhost:7777/hello", nil)
ctx, req = httptrace.W3C(ctx, req)
httptrace.Inject(ctx, req)
fmt.Printf("Sending request...\n")
res, err := client.Do(req)
if err != nil {
panic(err)
}
body, err = ioutil.ReadAll(res.Body)
_ = res.Body.Close()
trace.SpanFromContext(ctx).SetStatus(codes.OK)
return err
})
if err != nil {
panic(err)
}
fmt.Printf("Response Received: %s\n\n\n", body)
fmt.Printf("Waiting for few seconds to export spans ...\n\n")
time.Sleep(10 * time.Second)
fmt.Println("Check traces on Stackdriver Trace")
}
|
[
"\"PROJECT_ID\""
] |
[] |
[
"PROJECT_ID"
] |
[]
|
["PROJECT_ID"]
|
go
| 1 | 0 | |
resources/scripts/migrate_users.py
|
'''
Script to migrate users to the ESGF database. Rules:
1) non-local users (i.e. users that logged onto CoG with an external openid) are ignored
2) if a user is local but has no local openid, look for ESGF users with that same email:
a) if found, try to associate that openid to the CoG user, if possible
b) if for any reason the openid cannot be associated, generate a new local openid and push it to the ESGF database
3) if a user is local and already has a local openid:
- if that openid does not exist in ESGF database, push it
NOTE: passwords cannot be migrated from CoG to ESGF, they will need to be reset by the users.
@author: cinquini
'''
import os
import sys
import cog
path = os.path.dirname(cog.__file__)
sys.path.append( path )
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
from django.conf import settings
from django.contrib.auth.models import User
from django.core.exceptions import ObjectDoesNotExist
from django_openid_auth.models import UserOpenID
from cog.models import UserProfile
from cog.plugins.esgf.security import ESGFDatabaseManager
esgfDatabaseManager = ESGFDatabaseManager()
if settings.ESGF_CONFIG:
# loop over CoG users
for user in User.objects.all():
# make sure user profile exists
try:
userp = user.profile
except ObjectDoesNotExist:
userp = UserProfile.objects.create(user=user)
# select local users:
if userp.type==1:
# 1) CoG users with no local openid
if len(userp.localOpenids())==0:
# look for user email in ESGF database
esgfUsers = esgfDatabaseManager.getUsersByEmail(user.email)
# migrate CoG user --> ESGF user
if len(esgfUsers)==0:
esgfDatabaseManager.insertUser(userp)
# associate local ESGF openid(s) to CoG user; if none is found, create a new local openid
else:
found = False
for esgfUser in esgfUsers:
# only assign if openid is local, and contains the CoG username
if settings.ESGF_HOSTNAME in esgfUser.openid and user.username in esgfUser.openid:
if not UserOpenID.objects.filter(claimed_id=esgfUser.openid).exists():
openid = UserOpenID.objects.create(user=user, claimed_id=esgfUser.openid, display_id=esgfUser.openid)
print 'Assigned ESGF openid=%s to CoG user=%s' % (openid.claimed_id, user)
found = True
if not found:
esgfDatabaseManager.insertUser(userp)
# 2) CoG user with local openid(s)
else:
for openid in userp.localOpenids():
# make sure openid exists in ESGF database, if not migrate user
if esgfDatabaseManager.getUserByOpenid(openid) is None:
esgfDatabaseManager.insertUser(userp)
else:
#print "Ignoring non-local user: %s" % user
pass
|
[] |
[] |
[
"DJANGO_SETTINGS_MODULE"
] |
[]
|
["DJANGO_SETTINGS_MODULE"]
|
python
| 1 | 0 | |
openmmtools/tests/test_alchemy.py
|
#!/usr/bin/python
# =============================================================================
# MODULE DOCSTRING
# =============================================================================
"""
Tests for alchemical factory in `alchemy.py`.
"""
# =============================================================================
# GLOBAL IMPORTS
# =============================================================================
from __future__ import print_function
import os
import sys
import zlib
import pickle
import itertools
from functools import partial
import nose
import scipy
from nose.plugins.attrib import attr
from openmmtools import testsystems, forces
from openmmtools.constants import kB
from openmmtools.alchemy import *
logger = logging.getLogger(__name__)
# =============================================================================
# CONSTANTS
# =============================================================================
temperature = 300.0 * unit.kelvin # reference temperature
# MAX_DELTA = 0.01 * kB * temperature # maximum allowable deviation
MAX_DELTA = 1.0 * kB * temperature # maximum allowable deviation
GLOBAL_ENERGY_UNIT = unit.kilojoules_per_mole # controls printed units
GLOBAL_ALCHEMY_PLATFORM = None # This is used in every energy calculation.
# GLOBAL_ALCHEMY_PLATFORM = openmm.Platform.getPlatformByName('OpenCL') # DEBUG: Use OpenCL over CPU platform for testing since OpenCL is deterministic, while CPU is not
# =============================================================================
# TESTING UTILITIES
# =============================================================================
def create_context(system, integrator, platform=None):
"""Create a Context.
If platform is None, GLOBAL_ALCHEMY_PLATFORM is used.
"""
if platform is None:
platform = GLOBAL_ALCHEMY_PLATFORM
if platform is not None:
context = openmm.Context(system, integrator, platform)
else:
context = openmm.Context(system, integrator)
return context
def compute_energy(system, positions, platform=None, force_group=-1):
"""Compute energy of the system in the given positions.
Parameters
----------
platform : simtk.openmm.Platform or None, optional
If None, the global GLOBAL_ALCHEMY_PLATFORM will be used.
force_group : int flag or set of int, optional
Passed to the groups argument of Context.getState().
"""
timestep = 1.0 * unit.femtoseconds
integrator = openmm.VerletIntegrator(timestep)
context = create_context(system, integrator, platform)
context.setPositions(positions)
state = context.getState(getEnergy=True, groups=force_group)
potential = state.getPotentialEnergy()
del context, integrator, state
return potential
def minimize(system, positions, platform=None, tolerance=1.0*unit.kilocalories_per_mole/unit.angstroms, maxIterations=50):
"""Minimize the energy of the given system.
Parameters
----------
platform : simtk.openmm.Platform or None, optional
If None, the global GLOBAL_ALCHEMY_PLATFORM will be used.
tolerance : simtk.unit.Quantity with units compatible with energy/distance, optional, default = 1*kilocalories_per_mole/angstroms
Minimization tolerance
maxIterations : int, optional, default=50
Maximum number of iterations for minimization
Returns
-------
minimized_positions : simtk.openmm.Quantity with shape [nparticle,3] with units compatible with distance
The energy-minimized positions.
"""
timestep = 1.0 * unit.femtoseconds
integrator = openmm.VerletIntegrator(timestep)
context = create_context(system, integrator, platform)
context.setPositions(positions)
openmm.LocalEnergyMinimizer.minimize(context, tolerance, maxIterations)
minimized_positions = context.getState(getPositions=True).getPositions(asNumpy=True)
del context, integrator
return minimized_positions
def compute_force_energy(system, positions, force_name):
"""Compute the energy of the force with the given name."""
system = copy.deepcopy(system) # Copy to avoid modifications
force_name_index = 1
found_force = False
# Separate force group of force_name from all others.
for force in system.getForces():
if force.__class__.__name__ == force_name:
force.setForceGroup(force_name_index)
found_force = True
else:
force.setForceGroup(0)
if not found_force:
return None
force_energy = compute_energy(system, positions, force_group=2**force_name_index)
del system
return force_energy
def assert_almost_equal(energy1, energy2, err_msg):
delta = energy1 - energy2
err_msg += ' interactions do not match! Reference {}, alchemical {},' \
' difference {}'.format(energy1, energy2, delta)
assert abs(delta) < MAX_DELTA, err_msg
def turn_off_nonbonded(system, sterics=False, electrostatics=False,
exceptions=False, only_atoms=frozenset()):
"""Turn off sterics and/or electrostatics interactions.
This affects only NonbondedForce and non-alchemical CustomNonbondedForces.
If `exceptions` is True, only the exceptions are turned off.
Support also system that have gone through replace_reaction_field.
The `system` must have only nonbonded forces.
If `only_atoms` is specified, only the those atoms will be turned off.
"""
if len(only_atoms) == 0: # if empty, turn off all particles
only_atoms = set(range(system.getNumParticles()))
epsilon_coeff = 0.0 if sterics else 1.0
charge_coeff = 0.0 if electrostatics else 1.0
if exceptions: # Turn off exceptions
force_idx, nonbonded_force = forces.find_forces(system, openmm.NonbondedForce, only_one=True)
# Exceptions.
for exception_index in range(nonbonded_force.getNumExceptions()):
iatom, jatom, charge, sigma, epsilon = nonbonded_force.getExceptionParameters(exception_index)
if iatom in only_atoms or jatom in only_atoms:
nonbonded_force.setExceptionParameters(exception_index, iatom, jatom,
charge_coeff*charge, sigma, epsilon_coeff*epsilon)
# Offset exceptions.
for offset_index in range(nonbonded_force.getNumExceptionParameterOffsets()):
(parameter, exception_index, chargeprod_scale,
sigma_scale, epsilon_scale) = nonbonded_force.getExceptionParameterOffset(offset_index)
iatom, jatom, _, _, _ = nonbonded_force.getExceptionParameters(exception_index)
if iatom in only_atoms or jatom in only_atoms:
nonbonded_force.setExceptionParameterOffset(offset_index, parameter, exception_index,
charge_coeff*chargeprod_scale, sigma_scale,
epsilon_coeff*epsilon_scale)
else:
# Turn off particle interactions
for force in system.getForces():
# Handle only a Nonbonded and a CustomNonbonded (for RF).
if not (isinstance(force, openmm.CustomNonbondedForce) and 'lambda' not in force.getEnergyFunction() or
isinstance(force, openmm.NonbondedForce)):
continue
# Particle interactions.
for particle_index in range(force.getNumParticles()):
if particle_index in only_atoms:
# Convert tuple parameters to list to allow changes.
parameters = list(force.getParticleParameters(particle_index))
parameters[0] *= charge_coeff # charge
try: # CustomNonbondedForce
force.setParticleParameters(particle_index, parameters)
except TypeError: # NonbondedForce
parameters[2] *= epsilon_coeff # epsilon
force.setParticleParameters(particle_index, *parameters)
# Offset particle interactions.
if isinstance(force, openmm.NonbondedForce):
for offset_index in range(force.getNumParticleParameterOffsets()):
(parameter, particle_index, charge_scale,
sigma_scale, epsilon_scale) = force.getParticleParameterOffset(offset_index)
if particle_index in only_atoms:
force.setParticleParameterOffset(offset_index, parameter, particle_index,
charge_coeff*charge_scale, sigma_scale,
epsilon_coeff*epsilon_scale)
def dissect_nonbonded_energy(reference_system, positions, alchemical_atoms, other_alchemical_atoms):
"""Dissect the nonbonded energy contributions of the reference system
by atom group and sterics/electrostatics.
This works also for systems objects whose CutoffPeriodic force
has been replaced by a CustomNonbondedForce to set c_rf = 0.
Parameters
----------
reference_system : simtk.openmm.System
The reference system with the NonbondedForce to dissect.
positions : simtk.openmm.unit.Quantity of dimension [nparticles,3] with units compatible with Angstroms
The positions to test.
alchemical_atoms : set of int
The indices of the alchemical atoms.
other_alchemical_atoms : set of int
The indices of the alchemical atoms in other alchemical regions
Returns
-------
tuple of simtk.openmm.unit.Quantity with units compatible with kJ/mol
All contributions to the potential energy of NonbondedForce in the order:
nn_particle_sterics: particle sterics interactions between nonalchemical atoms
aa_particle_sterics: particle sterics interactions between alchemical atoms
na_particle_sterics: particle sterics interactions between nonalchemical-alchemical atoms
nn_particle_electro: (direct space) particle electrostatics interactions between nonalchemical atoms
aa_particle_electro: (direct space) particle electrostatics interactions between alchemical atoms
na_particle_electro: (direct space) particle electrostatics interactions between nonalchemical-alchemical atoms
nn_exception_sterics: particle sterics 1,4 exceptions between nonalchemical atoms
aa_exception_sterics: particle sterics 1,4 exceptions between alchemical atoms
na_exception_sterics: particle sterics 1,4 exceptions between nonalchemical-alchemical atoms
nn_exception_electro: particle electrostatics 1,4 exceptions between nonalchemical atoms
aa_exception_electro: particle electrostatics 1,4 exceptions between alchemical atoms
na_exception_electro: particle electrostatics 1,4 exceptions between nonalchemical-alchemical atoms
nn_reciprocal_energy: electrostatics of reciprocal space between nonalchemical atoms
aa_reciprocal_energy: electrostatics of reciprocal space between alchemical atoms
na_reciprocal_energy: electrostatics of reciprocal space between nonalchemical-alchemical atoms
"""
all_alchemical_atoms = set(alchemical_atoms).union(other_alchemical_atoms)
nonalchemical_atoms = set(range(reference_system.getNumParticles())).difference(all_alchemical_atoms)
# Remove all forces but NonbondedForce and eventually the
# CustomNonbondedForce used to model reaction field.
reference_system = copy.deepcopy(reference_system) # don't modify original system
forces_to_remove = list()
for force_index, force in enumerate(reference_system.getForces()):
force.setForceGroup(0)
if isinstance(force, openmm.NonbondedForce):
force.setReciprocalSpaceForceGroup(30) # separate PME reciprocal from direct space
# We keep only CustomNonbondedForces that are not alchemically modified.
elif not (isinstance(force, openmm.CustomNonbondedForce) and
'lambda' not in force.getEnergyFunction()):
forces_to_remove.append(force_index)
for force_index in reversed(forces_to_remove):
reference_system.removeForce(force_index)
assert len(reference_system.getForces()) <= 2
# Compute particle interactions between different groups of atoms
# ----------------------------------------------------------------
# Turn off other alchemical regions
if len(other_alchemical_atoms) > 0:
turn_off_nonbonded(reference_system, sterics=True, electrostatics=True, only_atoms=other_alchemical_atoms)
turn_off_nonbonded(reference_system, sterics=True, electrostatics=True, exceptions=True, only_atoms=other_alchemical_atoms)
system = copy.deepcopy(reference_system)
# Compute total energy from nonbonded interactions
tot_energy = compute_energy(system, positions)
tot_reciprocal_energy = compute_energy(system, positions, force_group={30})
# Compute contributions from particle sterics
turn_off_nonbonded(system, sterics=True, only_atoms=alchemical_atoms)
tot_energy_no_alchem_particle_sterics = compute_energy(system, positions)
system = copy.deepcopy(reference_system) # Restore alchemical sterics
turn_off_nonbonded(system, sterics=True, only_atoms=nonalchemical_atoms)
tot_energy_no_nonalchem_particle_sterics = compute_energy(system, positions)
turn_off_nonbonded(system, sterics=True)
tot_energy_no_particle_sterics = compute_energy(system, positions)
tot_particle_sterics = tot_energy - tot_energy_no_particle_sterics
nn_particle_sterics = tot_energy_no_alchem_particle_sterics - tot_energy_no_particle_sterics
aa_particle_sterics = tot_energy_no_nonalchem_particle_sterics - tot_energy_no_particle_sterics
na_particle_sterics = tot_particle_sterics - nn_particle_sterics - aa_particle_sterics
# Compute contributions from particle electrostatics
system = copy.deepcopy(reference_system) # Restore sterics
turn_off_nonbonded(system, electrostatics=True, only_atoms=alchemical_atoms)
tot_energy_no_alchem_particle_electro = compute_energy(system, positions)
nn_reciprocal_energy = compute_energy(system, positions, force_group={30})
system = copy.deepcopy(reference_system) # Restore alchemical electrostatics
turn_off_nonbonded(system, electrostatics=True, only_atoms=nonalchemical_atoms)
tot_energy_no_nonalchem_particle_electro = compute_energy(system, positions)
aa_reciprocal_energy = compute_energy(system, positions, force_group={30})
turn_off_nonbonded(system, electrostatics=True)
tot_energy_no_particle_electro = compute_energy(system, positions)
na_reciprocal_energy = tot_reciprocal_energy - nn_reciprocal_energy - aa_reciprocal_energy
tot_particle_electro = tot_energy - tot_energy_no_particle_electro
nn_particle_electro = tot_energy_no_alchem_particle_electro - tot_energy_no_particle_electro
aa_particle_electro = tot_energy_no_nonalchem_particle_electro - tot_energy_no_particle_electro
na_particle_electro = tot_particle_electro - nn_particle_electro - aa_particle_electro
nn_particle_electro -= nn_reciprocal_energy
aa_particle_electro -= aa_reciprocal_energy
na_particle_electro -= na_reciprocal_energy
# Compute exceptions between different groups of atoms
# -----------------------------------------------------
# Compute contributions from exceptions sterics
system = copy.deepcopy(reference_system) # Restore particle interactions
turn_off_nonbonded(system, sterics=True, exceptions=True, only_atoms=alchemical_atoms)
tot_energy_no_alchem_exception_sterics = compute_energy(system, positions)
system = copy.deepcopy(reference_system) # Restore alchemical sterics
turn_off_nonbonded(system, sterics=True, exceptions=True, only_atoms=nonalchemical_atoms)
tot_energy_no_nonalchem_exception_sterics = compute_energy(system, positions)
turn_off_nonbonded(system, sterics=True, exceptions=True)
tot_energy_no_exception_sterics = compute_energy(system, positions)
tot_exception_sterics = tot_energy - tot_energy_no_exception_sterics
nn_exception_sterics = tot_energy_no_alchem_exception_sterics - tot_energy_no_exception_sterics
aa_exception_sterics = tot_energy_no_nonalchem_exception_sterics - tot_energy_no_exception_sterics
na_exception_sterics = tot_exception_sterics - nn_exception_sterics - aa_exception_sterics
# Compute contributions from exceptions electrostatics
system = copy.deepcopy(reference_system) # Restore exceptions sterics
turn_off_nonbonded(system, electrostatics=True, exceptions=True, only_atoms=alchemical_atoms)
tot_energy_no_alchem_exception_electro = compute_energy(system, positions)
system = copy.deepcopy(reference_system) # Restore alchemical electrostatics
turn_off_nonbonded(system, electrostatics=True, exceptions=True, only_atoms=nonalchemical_atoms)
tot_energy_no_nonalchem_exception_electro = compute_energy(system, positions)
turn_off_nonbonded(system, electrostatics=True, exceptions=True)
tot_energy_no_exception_electro = compute_energy(system, positions)
tot_exception_electro = tot_energy - tot_energy_no_exception_electro
nn_exception_electro = tot_energy_no_alchem_exception_electro - tot_energy_no_exception_electro
aa_exception_electro = tot_energy_no_nonalchem_exception_electro - tot_energy_no_exception_electro
na_exception_electro = tot_exception_electro - nn_exception_electro - aa_exception_electro
assert tot_particle_sterics == nn_particle_sterics + aa_particle_sterics + na_particle_sterics
assert_almost_equal(tot_particle_electro, nn_particle_electro + aa_particle_electro +
na_particle_electro + nn_reciprocal_energy + aa_reciprocal_energy + na_reciprocal_energy,
'Inconsistency during dissection of nonbonded contributions:')
assert tot_exception_sterics == nn_exception_sterics + aa_exception_sterics + na_exception_sterics
assert tot_exception_electro == nn_exception_electro + aa_exception_electro + na_exception_electro
assert_almost_equal(tot_energy, tot_particle_sterics + tot_particle_electro +
tot_exception_sterics + tot_exception_electro,
'Inconsistency during dissection of nonbonded contributions:')
return nn_particle_sterics, aa_particle_sterics, na_particle_sterics,\
nn_particle_electro, aa_particle_electro, na_particle_electro,\
nn_exception_sterics, aa_exception_sterics, na_exception_sterics,\
nn_exception_electro, aa_exception_electro, na_exception_electro,\
nn_reciprocal_energy, aa_reciprocal_energy, na_reciprocal_energy
def compute_direct_space_correction(nonbonded_force, alchemical_atoms, positions):
"""
Compute the correction added by OpenMM to the direct space to account for
exception in reciprocal space energy.
Parameters
----------
nonbonded_force : simtk.openmm.NonbondedForce
The nonbonded force to compute the direct space correction.
alchemical_atoms : set
Set of alchemical particles in the force.
positions : numpy.array
Position of the particles.
Returns
-------
aa_correction : simtk.openmm.unit.Quantity with units compatible with kJ/mol
The correction to the direct spaced caused by exceptions between alchemical atoms.
na_correction : simtk.openmm.unit.Quantity with units compatible with kJ/mol
The correction to the direct spaced caused by exceptions between nonalchemical-alchemical atoms.
"""
energy_unit = unit.kilojoule_per_mole
aa_correction = 0.0
na_correction = 0.0
# Convert quantity positions into floats.
if isinstance(positions, unit.Quantity):
positions = positions.value_in_unit_system(unit.md_unit_system)
# If there is no reciprocal space, the correction is 0.0
if nonbonded_force.getNonbondedMethod() not in [openmm.NonbondedForce.Ewald, openmm.NonbondedForce.PME]:
return aa_correction * energy_unit, na_correction * energy_unit
# Get alpha ewald parameter
alpha_ewald, _, _, _ = nonbonded_force.getPMEParameters()
if alpha_ewald / alpha_ewald.unit == 0.0:
cutoff_distance = nonbonded_force.getCutoffDistance()
tolerance = nonbonded_force.getEwaldErrorTolerance()
alpha_ewald = (1.0 / cutoff_distance) * np.sqrt(-np.log(2.0*tolerance))
alpha_ewald = alpha_ewald.value_in_unit_system(unit.md_unit_system)
assert alpha_ewald != 0.0
for exception_id in range(nonbonded_force.getNumExceptions()):
# Get particles parameters in md unit system
iatom, jatom, _, _, _ = nonbonded_force.getExceptionParameters(exception_id)
icharge, _, _ = nonbonded_force.getParticleParameters(iatom)
jcharge, _, _ = nonbonded_force.getParticleParameters(jatom)
icharge = icharge.value_in_unit_system(unit.md_unit_system)
jcharge = jcharge.value_in_unit_system(unit.md_unit_system)
# Compute the correction and take care of numerical instabilities
r = np.linalg.norm(positions[iatom] - positions[jatom]) # distance between atoms
alpha_r = alpha_ewald * r
if alpha_r > 1e-6:
correction = ONE_4PI_EPS0 * icharge * jcharge * scipy.special.erf(alpha_r) / r
else: # for small alpha_r we linearize erf()
correction = ONE_4PI_EPS0 * alpha_ewald * icharge * jcharge * 2.0 / np.sqrt(np.pi)
# Assign correction to correct group
if iatom in alchemical_atoms and jatom in alchemical_atoms:
aa_correction += correction
elif iatom in alchemical_atoms or jatom in alchemical_atoms:
na_correction += correction
return aa_correction * energy_unit, na_correction * energy_unit
def is_alchemical_pme_treatment_exact(alchemical_system):
"""Return True if the given alchemical system models PME exactly."""
# If exact PME is here, the NonbondedForce defines a
# lambda_electrostatics variable.
_, nonbonded_force = forces.find_forces(alchemical_system, openmm.NonbondedForce,
only_one=True)
for parameter_idx in range(nonbonded_force.getNumGlobalParameters()):
parameter_name = nonbonded_force.getGlobalParameterName(parameter_idx)
# With multiple alchemical regions, lambda_electrostatics might have a suffix.
if parameter_name.startswith('lambda_electrostatics'):
return True
return False
# =============================================================================
# SUBROUTINES FOR TESTING
# =============================================================================
def compare_system_energies(reference_system, alchemical_system, alchemical_regions, positions):
"""Check that the energies of reference and alchemical systems are close.
This takes care of ignoring the reciprocal space when the nonbonded
method is an Ewald method.
"""
if not isinstance(alchemical_regions, list):
alchemical_regions = [alchemical_regions]
# Default we compare the energy of all groups.
force_group = -1
# Check nonbonded method. Comparing with PME is more complicated
# because the alchemical system with direct-space treatment of PME
# does not take into account the reciprocal space.
force_idx, nonbonded_force = forces.find_forces(reference_system, openmm.NonbondedForce, only_one=True)
nonbonded_method = nonbonded_force.getNonbondedMethod()
is_direct_space_pme = (nonbonded_method in [openmm.NonbondedForce.PME, openmm.NonbondedForce.Ewald] and
not is_alchemical_pme_treatment_exact(alchemical_system))
if is_direct_space_pme:
# Separate the reciprocal space force in a different group.
reference_system = copy.deepcopy(reference_system)
alchemical_system = copy.deepcopy(alchemical_system)
for system in [reference_system, alchemical_system]:
for force in system.getForces():
force.setForceGroup(0)
if isinstance(force, openmm.NonbondedForce):
force.setReciprocalSpaceForceGroup(31)
# We compare only the direct space energy
force_group = {0}
# Compute the reciprocal space correction added to the direct space
# energy due to the exceptions of the alchemical atoms.
aa_correction = 0.0 * unit.kilojoule_per_mole
na_correction = 0.0 * unit.kilojoule_per_mole
for region in alchemical_regions:
alchemical_atoms = region.alchemical_atoms
aa, na = compute_direct_space_correction(nonbonded_force, alchemical_atoms, positions)
aa_correction += aa
na_correction += na
# Compute potential of the direct space.
potentials = [compute_energy(system, positions, force_group=force_group)
for system in [reference_system, alchemical_system]]
# Add the direct space correction.
if is_direct_space_pme:
potentials.append(aa_correction + na_correction)
else:
potentials.append(0.0 * GLOBAL_ENERGY_UNIT)
# Check that error is small.
delta = potentials[1] - potentials[2] - potentials[0]
if abs(delta) > MAX_DELTA:
print("========")
for description, potential in zip(['reference', 'alchemical', 'PME correction'], potentials):
print("{}: {} ".format(description, potential))
print("delta : {}".format(delta))
err_msg = "Maximum allowable deviation exceeded (was {:.8f} kcal/mol; allowed {:.8f} kcal/mol)."
raise Exception(err_msg.format(delta / unit.kilocalories_per_mole, MAX_DELTA / unit.kilocalories_per_mole))
def check_multi_interacting_energy_components(reference_system, alchemical_system, alchemical_regions, positions):
"""wrapper around check_interacting_energy_components for multiple regions
Parameters
----------
reference_system : simtk.openmm.System
The reference system.
alchemical_system : simtk.openmm.System
The alchemically modified system to test.
alchemical_regions : AlchemicalRegion.
The alchemically modified region.
positions : n_particlesx3 array-like of simtk.openmm.unit.Quantity
The positions to test (units of length).
Note
----------
Interactions between alchemical regions are not tested here.
Alchemical regions are assumed to be non interacting.
"""
all_alchemical_atoms = set()
for region in alchemical_regions:
for atom in region.alchemical_atoms:
all_alchemical_atoms.add(atom)
for region in alchemical_regions:
check_interacting_energy_components(
reference_system, alchemical_system, region, positions,
all_alchemical_atoms, multi_regions=True)
def check_interacting_energy_components(reference_system, alchemical_system, alchemical_regions, positions,
all_alchemical_atoms=None, multi_regions=False):
"""Compare full and alchemically-modified system energies by energy component.
Parameters
----------
reference_system : simtk.openmm.System
The reference system.
alchemical_system : simtk.openmm.System
The alchemically modified system to test.
alchemical_regions : AlchemicalRegion.
The alchemically modified region.
positions : n_particlesx3 array-like of simtk.openmm.unit.Quantity
The positions to test (units of length).
multi_regions : boolean
Indicates if mutiple regions are being tested
"""
energy_unit = unit.kilojoule_per_mole
reference_system = copy.deepcopy(reference_system)
alchemical_system = copy.deepcopy(alchemical_system)
is_exact_pme = is_alchemical_pme_treatment_exact(alchemical_system)
# Find nonbonded method
_, nonbonded_force = forces.find_forces(reference_system, openmm.NonbondedForce, only_one=True)
nonbonded_method = nonbonded_force.getNonbondedMethod()
# Get energy components of reference system's nonbonded force
if multi_regions:
other_alchemical_atoms = all_alchemical_atoms.difference(alchemical_regions.alchemical_atoms)
print("Dissecting reference system's nonbonded force for region {}".format(alchemical_regions.name))
else:
other_alchemical_atoms = set()
print("Dissecting reference system's nonbonded force")
energy_components = dissect_nonbonded_energy(reference_system, positions,
alchemical_regions.alchemical_atoms, other_alchemical_atoms)
nn_particle_sterics, aa_particle_sterics, na_particle_sterics,\
nn_particle_electro, aa_particle_electro, na_particle_electro,\
nn_exception_sterics, aa_exception_sterics, na_exception_sterics,\
nn_exception_electro, aa_exception_electro, na_exception_electro,\
nn_reciprocal_energy, aa_reciprocal_energy, na_reciprocal_energy = energy_components
# Dissect unmodified nonbonded force in alchemical system
if multi_regions:
print("Dissecting alchemical system's unmodified nonbonded force for region {}".format(alchemical_regions.name))
else:
print("Dissecting alchemical system's unmodified nonbonded force")
energy_components = dissect_nonbonded_energy(alchemical_system, positions,
alchemical_regions.alchemical_atoms, other_alchemical_atoms)
unmod_nn_particle_sterics, unmod_aa_particle_sterics, unmod_na_particle_sterics,\
unmod_nn_particle_electro, unmod_aa_particle_electro, unmod_na_particle_electro,\
unmod_nn_exception_sterics, unmod_aa_exception_sterics, unmod_na_exception_sterics,\
unmod_nn_exception_electro, unmod_aa_exception_electro, unmod_na_exception_electro,\
unmod_nn_reciprocal_energy, unmod_aa_reciprocal_energy, unmod_na_reciprocal_energy = energy_components
# Get alchemically-modified energy components
if multi_regions:
print("Computing alchemical system components energies for region {}".format(alchemical_regions.name))
else:
print("Computing alchemical system components energies")
alchemical_state = AlchemicalState.from_system(alchemical_system, parameters_name_suffix=alchemical_regions.name)
alchemical_state.set_alchemical_parameters(1.0)
energy_components = AbsoluteAlchemicalFactory.get_energy_components(alchemical_system, alchemical_state,
positions, platform=GLOBAL_ALCHEMY_PLATFORM)
if multi_regions:
region_label = ' for region {}'.format(alchemical_regions.name)
else:
region_label = ''
# Sterics particle and exception interactions are always modeled with a custom force.
na_custom_particle_sterics = energy_components['alchemically modified NonbondedForce for non-alchemical/alchemical sterics' + region_label]
aa_custom_particle_sterics = energy_components['alchemically modified NonbondedForce for alchemical/alchemical sterics' + region_label]
na_custom_exception_sterics = energy_components['alchemically modified BondForce for non-alchemical/alchemical sterics exceptions' + region_label]
aa_custom_exception_sterics = energy_components['alchemically modified BondForce for alchemical/alchemical sterics exceptions' + region_label]
# With exact treatment of PME, we use the NonbondedForce offset for electrostatics.
try:
na_custom_particle_electro = energy_components['alchemically modified NonbondedForce for non-alchemical/alchemical electrostatics' + region_label]
aa_custom_particle_electro = energy_components['alchemically modified NonbondedForce for alchemical/alchemical electrostatics' + region_label]
na_custom_exception_electro = energy_components['alchemically modified BondForce for non-alchemical/alchemical electrostatics exceptions' + region_label]
aa_custom_exception_electro = energy_components['alchemically modified BondForce for alchemical/alchemical electrostatics exceptions' + region_label]
except KeyError:
assert is_exact_pme
# Test that all NonbondedForce contributions match
# -------------------------------------------------
# All contributions from alchemical atoms in unmodified nonbonded force are turned off
err_msg = 'Non-zero contribution from unmodified NonbondedForce alchemical atoms: '
assert_almost_equal(unmod_aa_particle_sterics, 0.0 * energy_unit, err_msg)
assert_almost_equal(unmod_na_particle_sterics, 0.0 * energy_unit, err_msg)
assert_almost_equal(unmod_aa_exception_sterics, 0.0 * energy_unit, err_msg)
assert_almost_equal(unmod_na_exception_sterics, 0.0 * energy_unit, err_msg)
if not is_exact_pme:
# With exact PME treatment these are tested below.
assert_almost_equal(unmod_aa_particle_electro, 0.0 * energy_unit, err_msg)
assert_almost_equal(unmod_na_particle_electro, 0.0 * energy_unit, err_msg)
assert_almost_equal(unmod_aa_reciprocal_energy, 0.0 * energy_unit, err_msg)
assert_almost_equal(unmod_na_reciprocal_energy, 0.0 * energy_unit, err_msg)
assert_almost_equal(unmod_aa_exception_electro, 0.0 * energy_unit, err_msg)
assert_almost_equal(unmod_na_exception_electro, 0.0 * energy_unit, err_msg)
# Check sterics interactions match
assert_almost_equal(nn_particle_sterics, unmod_nn_particle_sterics,
'Non-alchemical/non-alchemical atoms particle sterics' + region_label)
assert_almost_equal(nn_exception_sterics, unmod_nn_exception_sterics,
'Non-alchemical/non-alchemical atoms exceptions sterics' + region_label)
assert_almost_equal(aa_particle_sterics, aa_custom_particle_sterics,
'Alchemical/alchemical atoms particle sterics' + region_label)
assert_almost_equal(aa_exception_sterics, aa_custom_exception_sterics,
'Alchemical/alchemical atoms exceptions sterics' + region_label)
assert_almost_equal(na_particle_sterics, na_custom_particle_sterics,
'Non-alchemical/alchemical atoms particle sterics' + region_label)
assert_almost_equal(na_exception_sterics, na_custom_exception_sterics,
'Non-alchemical/alchemical atoms exceptions sterics' + region_label)
# Check electrostatics interactions
assert_almost_equal(nn_particle_electro, unmod_nn_particle_electro,
'Non-alchemical/non-alchemical atoms particle electrostatics' + region_label)
assert_almost_equal(nn_exception_electro, unmod_nn_exception_electro,
'Non-alchemical/non-alchemical atoms exceptions electrostatics' + region_label)
# With exact treatment of PME, the electrostatics of alchemical-alchemical
# atoms is modeled with NonbondedForce offsets.
if is_exact_pme:
# Reciprocal space.
assert_almost_equal(aa_reciprocal_energy, unmod_aa_reciprocal_energy,
'Alchemical/alchemical atoms reciprocal space energy' + region_label)
assert_almost_equal(na_reciprocal_energy, unmod_na_reciprocal_energy,
'Non-alchemical/alchemical atoms reciprocal space energy' + region_label)
# Direct space.
assert_almost_equal(aa_particle_electro, unmod_aa_particle_electro,
'Alchemical/alchemical atoms particle electrostatics' + region_label)
assert_almost_equal(na_particle_electro, unmod_na_particle_electro,
'Non-alchemical/alchemical atoms particle electrostatics' + region_label)
# Exceptions.
assert_almost_equal(aa_exception_electro, unmod_aa_exception_electro,
'Alchemical/alchemical atoms exceptions electrostatics' + region_label)
assert_almost_equal(na_exception_electro, unmod_na_exception_electro,
'Non-alchemical/alchemical atoms exceptions electrostatics' + region_label)
# With direct space PME, the custom forces model only the
# direct space of alchemical-alchemical interactions.
else:
# Get direct space correction due to reciprocal space exceptions
aa_correction, na_correction = compute_direct_space_correction(nonbonded_force,
alchemical_regions.alchemical_atoms,
positions)
aa_particle_electro += aa_correction
na_particle_electro += na_correction
# Check direct space energy
assert_almost_equal(aa_particle_electro, aa_custom_particle_electro,
'Alchemical/alchemical atoms particle electrostatics' + region_label)
assert_almost_equal(na_particle_electro, na_custom_particle_electro,
'Non-alchemical/alchemical atoms particle electrostatics' + region_label)
# Check exceptions.
assert_almost_equal(aa_exception_electro, aa_custom_exception_electro,
'Alchemical/alchemical atoms exceptions electrostatics' + region_label)
assert_almost_equal(na_exception_electro, na_custom_exception_electro,
'Non-alchemical/alchemical atoms exceptions electrostatics' + region_label)
# With Ewald methods, the NonbondedForce should always hold the
# reciprocal space energy of nonalchemical-nonalchemical atoms.
if nonbonded_method in [openmm.NonbondedForce.PME, openmm.NonbondedForce.Ewald]:
# Reciprocal space.
assert_almost_equal(nn_reciprocal_energy, unmod_nn_reciprocal_energy,
'Non-alchemical/non-alchemical atoms reciprocal space energy')
else:
# Reciprocal space energy should be null in this case
assert nn_reciprocal_energy == unmod_nn_reciprocal_energy == 0.0 * energy_unit
assert aa_reciprocal_energy == unmod_aa_reciprocal_energy == 0.0 * energy_unit
assert na_reciprocal_energy == unmod_na_reciprocal_energy == 0.0 * energy_unit
# Check forces other than nonbonded
# ----------------------------------
for force_name in ['HarmonicBondForce', 'HarmonicAngleForce', 'PeriodicTorsionForce',
'GBSAOBCForce', 'CustomGBForce']:
alchemical_forces_energies = [energy for label, energy in energy_components.items() if force_name in label]
reference_force_energy = compute_force_energy(reference_system, positions, force_name)
# There should be no force in the alchemical system if force_name is missing from the reference
if reference_force_energy is None:
assert len(alchemical_forces_energies) == 0, str(alchemical_forces_energies)
continue
# Check that the energies match
tot_alchemical_forces_energies = 0.0 * energy_unit
for energy in alchemical_forces_energies:
tot_alchemical_forces_energies += energy
assert_almost_equal(reference_force_energy, tot_alchemical_forces_energies,
'{} energy '.format(force_name))
def check_multi_noninteracting_energy_components(reference_system, alchemical_system, alchemical_regions, positions):
"""wrapper around check_noninteracting_energy_components for multiple regions
Parameters
----------
reference_system : simtk.openmm.System
The reference system (not alchemically modified).
alchemical_system : simtk.openmm.System
The alchemically modified system to test.
alchemical_regions : AlchemicalRegion.
The alchemically modified region.
positions : n_particlesx3 array-like of simtk.openmm.unit.Quantity
The positions to test (units of length).
"""
for region in alchemical_regions:
check_noninteracting_energy_components(reference_system, alchemical_system, region, positions, True)
def check_noninteracting_energy_components(reference_system, alchemical_system, alchemical_regions, positions, multi_regions=False):
"""Check non-interacting energy components are zero when appropriate.
Parameters
----------
reference_system : simtk.openmm.System
The reference system (not alchemically modified).
alchemical_system : simtk.openmm.System
The alchemically modified system to test.
alchemical_regions : AlchemicalRegion.
The alchemically modified region.
positions : n_particlesx3 array-like of simtk.openmm.unit.Quantity
The positions to test (units of length).
multi_regions : boolean
Indicates if mutiple regions are being tested
"""
alchemical_system = copy.deepcopy(alchemical_system)
is_exact_pme = is_alchemical_pme_treatment_exact(alchemical_system)
# Set state to non-interacting.
alchemical_state = AlchemicalState.from_system(alchemical_system, parameters_name_suffix=alchemical_regions.name)
alchemical_state.set_alchemical_parameters(0.0)
energy_components = AbsoluteAlchemicalFactory.get_energy_components(alchemical_system, alchemical_state,
positions, platform=GLOBAL_ALCHEMY_PLATFORM)
def assert_zero_energy(label):
# Handle multiple alchemical regions.
if multi_regions:
label = label + ' for region ' + alchemical_regions.name
# Testing energy component of each region.
print('testing {}'.format(label))
value = energy_components[label]
assert abs(value / GLOBAL_ENERGY_UNIT) == 0.0, ("'{}' should have zero energy in annihilated alchemical"
" state, but energy is {}").format(label, str(value))
# Check that non-alchemical/alchemical particle interactions and 1,4 exceptions have been annihilated
assert_zero_energy('alchemically modified BondForce for non-alchemical/alchemical sterics exceptions')
assert_zero_energy('alchemically modified NonbondedForce for non-alchemical/alchemical sterics')
if is_exact_pme:
assert 'alchemically modified NonbondedForce for non-alchemical/alchemical electrostatics' not in energy_components
assert 'alchemically modified BondForce for non-alchemical/alchemical electrostatics exceptions' not in energy_components
else:
assert_zero_energy('alchemically modified NonbondedForce for non-alchemical/alchemical electrostatics')
assert_zero_energy('alchemically modified BondForce for non-alchemical/alchemical electrostatics exceptions')
# Check that alchemical/alchemical particle interactions and 1,4 exceptions have been annihilated
if alchemical_regions.annihilate_sterics:
assert_zero_energy('alchemically modified NonbondedForce for alchemical/alchemical sterics')
assert_zero_energy('alchemically modified BondForce for alchemical/alchemical sterics exceptions')
if alchemical_regions.annihilate_electrostatics:
if is_exact_pme:
assert 'alchemically modified NonbondedForce for alchemical/alchemical electrostatics' not in energy_components
assert 'alchemically modified BondForce for alchemical/alchemical electrostatics exceptions' not in energy_components
else:
assert_zero_energy('alchemically modified NonbondedForce for alchemical/alchemical electrostatics')
assert_zero_energy('alchemically modified BondForce for alchemical/alchemical electrostatics exceptions')
# Check valence terms
for force_name in ['HarmonicBondForce', 'HarmonicAngleForce', 'PeriodicTorsionForce']:
force_label = 'alchemically modified ' + force_name
if force_label in energy_components:
assert_zero_energy(force_label)
# Check implicit solvent force.
for force_name in ['CustomGBForce', 'GBSAOBCForce']:
label = 'alchemically modified ' + force_name
# Check if the system has an implicit solvent force.
try:
alchemical_energy = energy_components[label]
except KeyError: # No implicit solvent.
continue
# If all alchemical particles are modified, the alchemical energy should be zero.
if len(alchemical_regions.alchemical_atoms) == reference_system.getNumParticles():
assert_zero_energy(label)
continue
# Otherwise compare the alchemical energy with a
# reference system with only non-alchemical particles.
# Find implicit solvent force in reference system.
for reference_force in reference_system.getForces():
if reference_force.__class__.__name__ == force_name:
break
system = openmm.System()
force = reference_force.__class__()
# For custom GB forces, we need to copy all computed values,
# energy terms, parameters, tabulated functions and exclusions.
if isinstance(force, openmm.CustomGBForce):
for index in range(reference_force.getNumPerParticleParameters()):
name = reference_force.getPerParticleParameterName(index)
force.addPerParticleParameter(name)
for index in range(reference_force.getNumComputedValues()):
computed_value = reference_force.getComputedValueParameters(index)
force.addComputedValue(*computed_value)
for index in range(reference_force.getNumEnergyTerms()):
energy_term = reference_force.getEnergyTermParameters(index)
force.addEnergyTerm(*energy_term)
for index in range(reference_force.getNumGlobalParameters()):
name = reference_force.getGlobalParameterName(index)
default_value = reference_force.getGlobalParameterDefaultValue(index)
force.addGlobalParameter(name, default_value)
for function_index in range(reference_force.getNumTabulatedFunctions()):
name = reference_force.getTabulatedFunctionName(function_index)
function = reference_force.getTabulatedFunction(function_index)
function_copy = copy.deepcopy(function)
force.addTabulatedFunction(name, function_copy)
for exclusion_index in range(reference_force.getNumExclusions()):
particles = reference_force.getExclusionParticles(exclusion_index)
force.addExclusion(*particles)
# Create a system with only the non-alchemical particles.
for particle_index in range(reference_system.getNumParticles()):
if particle_index not in alchemical_regions.alchemical_atoms:
# Add particle to System.
mass = reference_system.getParticleMass(particle_index)
system.addParticle(mass)
# Add particle to Force..
parameters = reference_force.getParticleParameters(particle_index)
try: # GBSAOBCForce
force.addParticle(*parameters)
except NotImplementedError: # CustomGBForce
force.addParticle(parameters)
system.addForce(force)
# Get positions for all non-alchemical particles.
non_alchemical_positions = [pos for i, pos in enumerate(positions)
if i not in alchemical_regions.alchemical_atoms]
# Compute reference force energy.
reference_force_energy = compute_force_energy(system, non_alchemical_positions, force_name)
assert_almost_equal(reference_force_energy, alchemical_energy,
'reference {}, alchemical {}'.format(reference_force_energy, alchemical_energy))
def check_split_force_groups(system, region_names=None):
"""Check that force groups are split correctly."""
if region_names is None:
region_names = []
# Separate forces groups by lambda parameters that AlchemicalState supports.
for region in region_names:
force_groups_by_lambda = {}
lambdas_by_force_group = {}
for force, lambda_name, _ in AlchemicalState._get_system_controlled_parameters(
system, parameters_name_suffix=region):
force_group = force.getForceGroup()
try:
force_groups_by_lambda[lambda_name].add(force_group)
except KeyError:
force_groups_by_lambda[lambda_name] = {force_group}
try:
lambdas_by_force_group[force_group].add(lambda_name)
except KeyError:
lambdas_by_force_group[force_group] = {lambda_name}
# Check that force group 0 doesn't hold alchemical forces.
assert 0 not in force_groups_by_lambda
# There are as many alchemical force groups as not-None lambda variables.
alchemical_state = AlchemicalState.from_system(system, parameters_name_suffix=region)
valid_lambdas = {lambda_name for lambda_name in alchemical_state._get_controlled_parameters(parameters_name_suffix=region)
if getattr(alchemical_state, lambda_name) is not None}
assert valid_lambdas == set(force_groups_by_lambda.keys())
# Check that force groups and lambda variables are in 1-to-1 correspondence.
assert len(force_groups_by_lambda) == len(lambdas_by_force_group)
for d in [force_groups_by_lambda, lambdas_by_force_group]:
for value in d.values():
assert len(value) == 1
# With exact treatment of PME, the NonbondedForce must
# be in the lambda_electrostatics force group.
if is_alchemical_pme_treatment_exact(system):
force_idx, nonbonded_force = forces.find_forces(system, openmm.NonbondedForce, only_one=True)
assert force_groups_by_lambda['lambda_electrostatics_{}'.format(region)] == {nonbonded_force.getForceGroup()}
# =============================================================================
# BENCHMARKING AND DEBUG FUNCTIONS
# =============================================================================
def benchmark(reference_system, alchemical_regions, positions, nsteps=500,
timestep=1.0*unit.femtoseconds):
"""
Benchmark performance of alchemically modified system relative to original system.
Parameters
----------
reference_system : simtk.openmm.System
The reference System object to compare with.
alchemical_regions : AlchemicalRegion
The region to alchemically modify.
positions : n_particlesx3 array-like of simtk.unit.Quantity
The initial positions (units of distance).
nsteps : int, optional
Number of molecular dynamics steps to use for benchmarking (default is 500).
timestep : simtk.unit.Quantity, optional
Timestep to use for benchmarking (units of time, default is 1.0*unit.femtoseconds).
"""
timer = utils.Timer()
# Create the perturbed system.
factory = AbsoluteAlchemicalFactory()
timer.start('Create alchemical system')
alchemical_system = factory.create_alchemical_system(reference_system, alchemical_regions)
timer.stop('Create alchemical system')
# Create an alchemically-perturbed state corresponding to nearly fully-interacting.
# NOTE: We use a lambda slightly smaller than 1.0 because the AbsoluteAlchemicalFactory
# may not use Custom*Force softcore versions if lambda = 1.0 identically.
alchemical_state = AlchemicalState.from_system(alchemical_system)
alchemical_state.set_alchemical_parameters(1.0 - 1.0e-6)
# Create integrators.
reference_integrator = openmm.VerletIntegrator(timestep)
alchemical_integrator = openmm.VerletIntegrator(timestep)
# Create contexts for sampling.
if GLOBAL_ALCHEMY_PLATFORM:
reference_context = openmm.Context(reference_system, reference_integrator, GLOBAL_ALCHEMY_PLATFORM)
alchemical_context = openmm.Context(alchemical_system, alchemical_integrator, GLOBAL_ALCHEMY_PLATFORM)
else:
reference_context = openmm.Context(reference_system, reference_integrator)
alchemical_context = openmm.Context(alchemical_system, alchemical_integrator)
reference_context.setPositions(positions)
alchemical_context.setPositions(positions)
# Make sure all kernels are compiled.
reference_integrator.step(1)
alchemical_integrator.step(1)
# Run simulations.
print('Running reference system...')
timer.start('Run reference system')
reference_integrator.step(nsteps)
timer.stop('Run reference system')
print('Running alchemical system...')
timer.start('Run alchemical system')
alchemical_integrator.step(nsteps)
timer.stop('Run alchemical system')
print('Done.')
timer.report_timing()
def benchmark_alchemy_from_pdb():
"""CLI entry point for benchmarking alchemical performance from a PDB file.
"""
logging.basicConfig(level=logging.DEBUG)
import mdtraj
import argparse
from simtk.openmm import app
parser = argparse.ArgumentParser(description='Benchmark performance of alchemically-modified system.')
parser.add_argument('-p', '--pdb', metavar='PDBFILE', type=str, action='store', required=True,
help='PDB file to benchmark; only protein forcefields supported for now (no small molecules)')
parser.add_argument('-s', '--selection', metavar='SELECTION', type=str, action='store', default='not water',
help='MDTraj DSL describing alchemical region (default: "not water")')
parser.add_argument('-n', '--nsteps', metavar='STEPS', type=int, action='store', default=1000,
help='Number of benchmarking steps (default: 1000)')
args = parser.parse_args()
# Read the PDB file
print('Loading PDB file...')
pdbfile = app.PDBFile(args.pdb)
print('Loading forcefield...')
forcefield = app.ForceField('amber99sbildn.xml', 'tip3p.xml')
print('Adding missing hydrogens...')
modeller = app.Modeller(pdbfile.topology, pdbfile.positions)
modeller.addHydrogens(forcefield)
print('Creating System...')
reference_system = forcefield.createSystem(modeller.topology, nonbondedMethod=app.PME)
# Minimize
print('Minimizing...')
positions = minimize(reference_system, modeller.positions)
# Select alchemical regions
mdtraj_topology = mdtraj.Topology.from_openmm(modeller.topology)
alchemical_atoms = mdtraj_topology.select(args.selection)
alchemical_region = AlchemicalRegion(alchemical_atoms=alchemical_atoms)
print('There are %d atoms in the alchemical region.' % len(alchemical_atoms))
# Benchmark
print('Benchmarking...')
benchmark(reference_system, alchemical_region, positions, nsteps=args.nsteps, timestep=1.0*unit.femtoseconds)
def overlap_check(reference_system, alchemical_system, positions, nsteps=50, nsamples=200,
cached_trajectory_filename=None, name=""):
"""
Test overlap between reference system and alchemical system by running a short simulation.
Parameters
----------
reference_system : simtk.openmm.System
The reference System object to compare with.
alchemical_system : simtk.openmm.System
Alchemically-modified system.
positions : n_particlesx3 array-like of simtk.unit.Quantity
The initial positions (units of distance).
nsteps : int, optional
Number of molecular dynamics steps between samples (default is 50).
nsamples : int, optional
Number of samples to collect (default is 100).
cached_trajectory_filename : str, optional, default=None
If not None, this file will be used to cache intermediate results with pickle.
name : str, optional, default=None
Name of test system being evaluated.
"""
temperature = 300.0 * unit.kelvin
pressure = 1.0 * unit.atmospheres
collision_rate = 5.0 / unit.picoseconds
timestep = 2.0 * unit.femtoseconds
kT = kB * temperature
# Minimize
positions = minimize(reference_system, positions)
# Add a barostat if possible.
reference_system = copy.deepcopy(reference_system)
if reference_system.usesPeriodicBoundaryConditions():
reference_system.addForce(openmm.MonteCarloBarostat(pressure, temperature))
# Create integrators.
reference_integrator = openmm.LangevinIntegrator(temperature, collision_rate, timestep)
alchemical_integrator = openmm.VerletIntegrator(timestep)
# Create contexts.
reference_context = create_context(reference_system, reference_integrator)
alchemical_context = create_context(alchemical_system, alchemical_integrator)
# Initialize data structure or load if from cache.
# du_n[n] is the potential energy difference of sample n.
if cached_trajectory_filename is not None:
try:
with open(cached_trajectory_filename, 'rb') as f:
data = pickle.load(f)
except FileNotFoundError:
data = dict(du_n=[])
# Create directory if it doesn't exist.
directory = os.path.dirname(cached_trajectory_filename)
if not os.path.exists(directory):
os.makedirs(directory)
else:
positions = data['positions']
reference_context.setPeriodicBoxVectors(*data['box_vectors'])
else:
data = dict(du_n=[])
# Collect simulation data.
iteration = len(data['du_n'])
reference_context.setPositions(positions)
print()
for sample in range(iteration, nsamples):
print('\rSample {}/{}'.format(sample+1, nsamples), end='')
sys.stdout.flush()
# Run dynamics.
reference_integrator.step(nsteps)
# Get reference energies.
reference_state = reference_context.getState(getEnergy=True, getPositions=True)
reference_potential = reference_state.getPotentialEnergy()
if np.isnan(reference_potential/kT):
raise Exception("Reference potential is NaN")
# Get alchemical energies.
alchemical_context.setPeriodicBoxVectors(*reference_state.getPeriodicBoxVectors())
alchemical_context.setPositions(reference_state.getPositions(asNumpy=True))
alchemical_state = alchemical_context.getState(getEnergy=True)
alchemical_potential = alchemical_state.getPotentialEnergy()
if np.isnan(alchemical_potential/kT):
raise Exception("Alchemical potential is NaN")
# Update and cache data.
data['du_n'].append((alchemical_potential - reference_potential) / kT)
if cached_trajectory_filename is not None:
# Save only last iteration positions and vectors.
data['positions'] = reference_state.getPositions()
data['box_vectors'] = reference_state.getPeriodicBoxVectors()
with open(cached_trajectory_filename, 'wb') as f:
pickle.dump(data, f)
# Discard data to equilibration and subsample.
du_n = np.array(data['du_n'])
from pymbar import timeseries, EXP
t0, g, Neff = timeseries.detectEquilibration(du_n)
indices = timeseries.subsampleCorrelatedData(du_n, g=g)
du_n = du_n[indices]
# Compute statistics.
DeltaF, dDeltaF = EXP(du_n)
# Raise an exception if the error is larger than 3kT.
MAX_DEVIATION = 3.0 # kT
report = ('\nDeltaF = {:12.3f} +- {:12.3f} kT ({:3.2f} samples, g = {:3.1f}); '
'du mean {:.3f} kT stddev {:.3f} kT').format(DeltaF, dDeltaF, Neff, g, du_n.mean(), du_n.std())
print(report)
if dDeltaF > MAX_DEVIATION:
raise Exception(report)
def rstyle(ax):
"""Styles x,y axes to appear like ggplot2
Must be called after all plot and axis manipulation operations have been
carried out (needs to know final tick spacing)
From:
http://nbviewer.ipython.org/github/wrobstory/climatic/blob/master/examples/ggplot_styling_for_matplotlib.ipynb
"""
import pylab
import matplotlib
import matplotlib.pyplot as plt
#Set the style of the major and minor grid lines, filled blocks
ax.grid(True, 'major', color='w', linestyle='-', linewidth=1.4)
ax.grid(True, 'minor', color='0.99', linestyle='-', linewidth=0.7)
ax.patch.set_facecolor('0.90')
ax.set_axisbelow(True)
#Set minor tick spacing to 1/2 of the major ticks
ax.xaxis.set_minor_locator((pylab.MultipleLocator((plt.xticks()[0][1] - plt.xticks()[0][0]) / 2.0)))
ax.yaxis.set_minor_locator((pylab.MultipleLocator((plt.yticks()[0][1] - plt.yticks()[0][0]) / 2.0)))
#Remove axis border
for child in ax.get_children():
if isinstance(child, matplotlib.spines.Spine):
child.set_alpha(0)
#Restyle the tick lines
for line in ax.get_xticklines() + ax.get_yticklines():
line.set_markersize(5)
line.set_color("gray")
line.set_markeredgewidth(1.4)
#Remove the minor tick lines
for line in (ax.xaxis.get_ticklines(minor=True) +
ax.yaxis.get_ticklines(minor=True)):
line.set_markersize(0)
#Only show bottom left ticks, pointing out of axis
plt.rcParams['xtick.direction'] = 'out'
plt.rcParams['ytick.direction'] = 'out'
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
def lambda_trace(reference_system, alchemical_regions, positions, nsteps=100):
"""
Compute potential energy as a function of lambda.
"""
# Create a factory to produce alchemical intermediates.
factory = AbsoluteAlchemicalFactory()
alchemical_system = factory.create_alchemical_system(reference_system, alchemical_regions)
alchemical_state = AlchemicalState.from_system(alchemical_system)
# Take equally-sized steps.
delta = 1.0 / nsteps
# Compute unmodified energy.
u_original = compute_energy(reference_system, positions)
# Scan through lambda values.
lambda_i = np.zeros([nsteps+1], np.float64) # lambda values for u_i
# u_i[i] is the potential energy for lambda_i[i]
u_i = unit.Quantity(np.zeros([nsteps+1], np.float64), unit.kilocalories_per_mole)
for i in range(nsteps+1):
lambda_i[i] = 1.0-i*delta
alchemical_state.set_alchemical_parameters(lambda_i[i])
alchemical_state.apply_to_system(alchemical_system)
u_i[i] = compute_energy(alchemical_system, positions)
logger.info("{:12.9f} {:24.8f} kcal/mol".format(lambda_i[i], u_i[i] / GLOBAL_ENERGY_UNIT))
# Write figure as PDF.
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.pyplot as plt
with PdfPages('lambda-trace.pdf') as pdf:
fig = plt.figure(figsize=(10, 5))
ax = fig.add_subplot(111)
plt.plot(1, u_original / unit.kilocalories_per_mole, 'ro', label='unmodified')
plt.plot(lambda_i, u_i / unit.kilocalories_per_mole, 'k.', label='alchemical')
plt.title('T4 lysozyme L99A + p-xylene : AMBER96 + OBC GBSA')
plt.ylabel('potential (kcal/mol)')
plt.xlabel('lambda')
ax.legend()
rstyle(ax)
pdf.savefig() # saves the current figure into a pdf page
plt.close()
def generate_trace(test_system):
lambda_trace(test_system['test'].system, test_system['test'].positions, test_system['receptor_atoms'], test_system['ligand_atoms'])
# =============================================================================
# TEST ALCHEMICAL FACTORY SUITE
# =============================================================================
def test_resolve_alchemical_region():
"""Test the method AbsoluteAlchemicalFactory._resolve_alchemical_region."""
test_cases = [
(testsystems.AlanineDipeptideVacuum(), range(22), 9, 36, 48),
(testsystems.AlanineDipeptideVacuum(), range(11, 22), 4, 21, 31),
(testsystems.LennardJonesCluster(), range(27), 0, 0, 0)
]
for i, (test_case, atoms, n_bonds, n_angles, n_torsions) in enumerate(test_cases):
system = test_case.system
# Default arguments are converted to empty list.
alchemical_region = AlchemicalRegion(alchemical_atoms=atoms)
resolved_region = AbsoluteAlchemicalFactory._resolve_alchemical_region(system, alchemical_region)
for region in ['bonds', 'angles', 'torsions']:
assert getattr(resolved_region, 'alchemical_' + region) == set()
# Numpy arrays are converted to sets.
alchemical_region = AlchemicalRegion(alchemical_atoms=np.array(atoms),
alchemical_bonds=np.array(range(n_bonds)),
alchemical_angles=np.array(range(n_angles)),
alchemical_torsions=np.array(range(n_torsions)))
resolved_region = AbsoluteAlchemicalFactory._resolve_alchemical_region(system, alchemical_region)
for region in ['atoms', 'bonds', 'angles', 'torsions']:
assert isinstance(getattr(resolved_region, 'alchemical_' + region), frozenset)
# Bonds, angles and torsions are inferred correctly.
alchemical_region = AlchemicalRegion(alchemical_atoms=atoms, alchemical_bonds=True,
alchemical_angles=True, alchemical_torsions=True)
resolved_region = AbsoluteAlchemicalFactory._resolve_alchemical_region(system, alchemical_region)
for j, region in enumerate(['bonds', 'angles', 'torsions']):
assert len(getattr(resolved_region, 'alchemical_' + region)) == test_cases[i][j+2]
# An exception is if indices are not part of the system.
alchemical_region = AlchemicalRegion(alchemical_atoms=[10000000])
with nose.tools.assert_raises(ValueError):
AbsoluteAlchemicalFactory._resolve_alchemical_region(system, alchemical_region)
# An exception is raised if nothing is defined.
alchemical_region = AlchemicalRegion()
with nose.tools.assert_raises(ValueError):
AbsoluteAlchemicalFactory._resolve_alchemical_region(system, alchemical_region)
class TestAbsoluteAlchemicalFactory(object):
"""Test AbsoluteAlchemicalFactory class."""
@classmethod
def setup_class(cls):
"""Create test systems and shared objects."""
cls.define_systems()
cls.define_regions()
cls.generate_cases()
@classmethod
def define_systems(cls):
"""Create shared test systems in cls.test_systems for the test suite."""
cls.test_systems = dict()
# Basic test systems: Lennard-Jones and water particles only.
# Test also dispersion correction and switch off ("on" values
# for these options are tested in HostGuestExplicit system).
cls.test_systems['LennardJonesCluster'] = testsystems.LennardJonesCluster()
cls.test_systems['LennardJonesFluid with dispersion correction'] = \
testsystems.LennardJonesFluid(nparticles=100, dispersion_correction=True)
cls.test_systems['TIP3P WaterBox with reaction field, no switch, no dispersion correction'] = \
testsystems.WaterBox(dispersion_correction=False, switch=False, nonbondedMethod=openmm.app.CutoffPeriodic)
cls.test_systems['TIP4P-EW WaterBox and NaCl with PME'] = \
testsystems.WaterBox(nonbondedMethod=openmm.app.PME, model='tip4pew', ionic_strength=200*unit.millimolar)
# Vacuum and implicit.
cls.test_systems['AlanineDipeptideVacuum'] = testsystems.AlanineDipeptideVacuum()
cls.test_systems['AlanineDipeptideImplicit'] = testsystems.AlanineDipeptideImplicit()
cls.test_systems['TolueneImplicitOBC2'] = testsystems.TolueneImplicitOBC2()
cls.test_systems['TolueneImplicitGBn'] = testsystems.TolueneImplicitGBn()
# Explicit test system: PME and CutoffPeriodic.
#cls.test_systems['AlanineDipeptideExplicit with CutoffPeriodic'] = \
# testsystems.AlanineDipeptideExplicit(nonbondedMethod=openmm.app.CutoffPeriodic)
cls.test_systems['HostGuestExplicit with PME'] = \
testsystems.HostGuestExplicit(nonbondedMethod=openmm.app.PME)
cls.test_systems['HostGuestExplicit with CutoffPeriodic'] = \
testsystems.HostGuestExplicit(nonbondedMethod=openmm.app.CutoffPeriodic)
@classmethod
def define_regions(cls):
"""Create shared AlchemicalRegions for test systems in cls.test_regions."""
cls.test_regions = dict()
cls.test_regions['LennardJonesCluster'] = AlchemicalRegion(alchemical_atoms=range(2))
cls.test_regions['LennardJonesFluid'] = AlchemicalRegion(alchemical_atoms=range(10))
cls.test_regions['Toluene'] = AlchemicalRegion(alchemical_atoms=range(6)) # Only partially modified.
cls.test_regions['AlanineDipeptide'] = AlchemicalRegion(alchemical_atoms=range(22))
cls.test_regions['HostGuestExplicit'] = AlchemicalRegion(alchemical_atoms=range(126, 156))
cls.test_regions['TIP3P WaterBox'] = AlchemicalRegion(alchemical_atoms=range(0,3))
# Modify ions.
for atom in cls.test_systems['TIP4P-EW WaterBox and NaCl with PME'].topology.atoms():
if atom.name in ['Na', 'Cl']:
cls.test_regions['TIP4P-EW WaterBox and NaCl'] = AlchemicalRegion(alchemical_atoms=range(atom.index, atom.index+1))
break
@classmethod
def generate_cases(cls):
"""Generate all test cases in cls.test_cases combinatorially."""
cls.test_cases = dict()
direct_space_factory = AbsoluteAlchemicalFactory(alchemical_pme_treatment='direct-space',
alchemical_rf_treatment='switched')
exact_pme_factory = AbsoluteAlchemicalFactory(alchemical_pme_treatment='exact')
# We generate all possible combinations of annihilate_sterics/electrostatics
# for each test system. We also annihilate bonds, angles and torsions every
# 3 test cases so that we test it at least one for each test system and for
# each combination of annihilate_sterics/electrostatics.
n_test_cases = 0
for test_system_name, test_system in cls.test_systems.items():
# Find standard alchemical region.
for region_name, region in cls.test_regions.items():
if region_name in test_system_name:
break
assert region_name in test_system_name, test_system_name
# Find nonbonded method.
force_idx, nonbonded_force = forces.find_forces(test_system.system, openmm.NonbondedForce, only_one=True)
nonbonded_method = nonbonded_force.getNonbondedMethod()
# Create all combinations of annihilate_sterics/electrostatics.
for annihilate_sterics, annihilate_electrostatics in itertools.product((True, False), repeat=2):
# Create new region that we can modify.
test_region = region._replace(annihilate_sterics=annihilate_sterics,
annihilate_electrostatics=annihilate_electrostatics)
# Create test name.
test_case_name = test_system_name[:]
if annihilate_sterics:
test_case_name += ', annihilated sterics'
if annihilate_electrostatics:
test_case_name += ', annihilated electrostatics'
# Annihilate bonds and angles every three test_cases.
if n_test_cases % 3 == 0:
test_region = test_region._replace(alchemical_bonds=True, alchemical_angles=True,
alchemical_torsions=True)
test_case_name += ', annihilated bonds, angles and torsions'
# Add different softcore parameters every five test_cases.
if n_test_cases % 5 == 0:
test_region = test_region._replace(softcore_alpha=1.0, softcore_beta=1.0, softcore_a=1.0, softcore_b=1.0,
softcore_c=1.0, softcore_d=1.0, softcore_e=1.0, softcore_f=1.0)
test_case_name += ', modified softcore parameters'
# Pre-generate alchemical system.
alchemical_system = direct_space_factory.create_alchemical_system(test_system.system, test_region)
# Add test case.
cls.test_cases[test_case_name] = (test_system, alchemical_system, test_region)
n_test_cases += 1
# If we don't use softcore electrostatics and we annihilate charges
# we can test also exact PME treatment. We don't increase n_test_cases
# purposely to keep track of which tests are added above.
if (test_region.softcore_beta == 0.0 and annihilate_electrostatics and
nonbonded_method in [openmm.NonbondedForce.PME, openmm.NonbondedForce.Ewald]):
alchemical_system = exact_pme_factory.create_alchemical_system(test_system.system, test_region)
test_case_name += ', exact PME'
cls.test_cases[test_case_name] = (test_system, alchemical_system, test_region)
# If the test system uses reaction field replace reaction field
# of the reference system to allow comparisons.
if nonbonded_method == openmm.NonbondedForce.CutoffPeriodic:
forcefactories.replace_reaction_field(test_system.system, return_copy=False,
switch_width=direct_space_factory.switch_width)
def filter_cases(self, condition_func, max_number=None):
"""Return the list of test cases that satisfy condition_func(test_case_name)."""
if max_number is None:
max_number = len(self.test_cases)
test_cases = {}
for test_name, test_case in self.test_cases.items():
if condition_func(test_name):
test_cases[test_name] = test_case
if len(test_cases) >= max_number:
break
return test_cases
def test_split_force_groups(self):
"""Forces having different lambda variables should have a different force group."""
# Select 1 implicit, 1 explicit, and 1 exact PME explicit test case randomly.
test_cases = self.filter_cases(lambda x: 'Implicit' in x, max_number=1)
test_cases.update(self.filter_cases(lambda x: 'Explicit ' in x and 'exact PME' in x, max_number=1))
test_cases.update(self.filter_cases(lambda x: 'Explicit ' in x and 'exact PME' not in x, max_number=1))
for test_name, (test_system, alchemical_system, alchemical_region) in test_cases.items():
f = partial(check_split_force_groups, alchemical_system)
f.description = "Testing force splitting among groups of {}".format(test_name)
yield f
def test_fully_interacting_energy(self):
"""Compare the energies of reference and fully interacting alchemical system."""
for test_name, (test_system, alchemical_system, alchemical_region) in self.test_cases.items():
f = partial(compare_system_energies, test_system.system,
alchemical_system, alchemical_region, test_system.positions)
f.description = "Testing fully interacting energy of {}".format(test_name)
yield f
def test_noninteracting_energy_components(self):
"""Check all forces annihilated/decoupled when their lambda variables are zero."""
for test_name, (test_system, alchemical_system, alchemical_region) in self.test_cases.items():
f = partial(check_noninteracting_energy_components, test_system.system, alchemical_system,
alchemical_region, test_system.positions)
f.description = "Testing non-interacting energy of {}".format(test_name)
yield f
@attr('slow')
def test_fully_interacting_energy_components(self):
"""Test interacting state energy by force component."""
# This is a very expensive but very informative test. We can
# run this locally when test_fully_interacting_energies() fails.
test_cases = self.filter_cases(lambda x: 'Explicit' in x)
for test_name, (test_system, alchemical_system, alchemical_region) in test_cases.items():
f = partial(check_interacting_energy_components, test_system.system, alchemical_system,
alchemical_region, test_system.positions)
f.description = "Testing energy components of %s..." % test_name
yield f
@attr('slow')
def test_platforms(self):
"""Test interacting and noninteracting energies on all platforms."""
global GLOBAL_ALCHEMY_PLATFORM
old_global_platform = GLOBAL_ALCHEMY_PLATFORM
# Do not repeat tests on the platform already tested.
if old_global_platform is None:
default_platform_name = utils.get_fastest_platform().getName()
else:
default_platform_name = old_global_platform.getName()
platforms = [platform for platform in utils.get_available_platforms()
if platform.getName() != default_platform_name]
# Test interacting and noninteracting energies on all platforms.
for platform in platforms:
GLOBAL_ALCHEMY_PLATFORM = platform
for test_name, (test_system, alchemical_system, alchemical_region) in self.test_cases.items():
f = partial(compare_system_energies, test_system.system, alchemical_system,
alchemical_region, test_system.positions)
f.description = "Test fully interacting energy of {} on {}".format(test_name, platform.getName())
yield f
f = partial(check_noninteracting_energy_components, test_system.system, alchemical_system,
alchemical_region, test_system.positions)
f.description = "Test non-interacting energy of {} on {}".format(test_name, platform.getName())
yield f
# Restore global platform
GLOBAL_ALCHEMY_PLATFORM = old_global_platform
@attr('slow')
def test_overlap(self):
"""Tests overlap between reference and alchemical systems."""
for test_name, (test_system, alchemical_system, alchemical_region) in self.test_cases.items():
#cached_trajectory_filename = os.path.join(os.environ['HOME'], '.cache', 'alchemy', 'tests',
# test_name + '.pickle')
cached_trajectory_filename = None
f = partial(overlap_check, test_system.system, alchemical_system, test_system.positions,
cached_trajectory_filename=cached_trajectory_filename, name=test_name)
f.description = "Testing reference/alchemical overlap for {}".format(test_name)
yield f
class TestMultiRegionAbsoluteAlchemicalFactory(TestAbsoluteAlchemicalFactory):
"""Test AbsoluteAlchemicalFactory class using multiple regions."""
@classmethod
def define_systems(cls):
"""Create shared test systems in cls.test_systems for the test suite."""
cls.test_systems = dict()
# Basic test systems: Lennard-Jones and water particles only.
# Test also dispersion correction and switch off ("on" values
# for these options are tested in HostGuestExplicit system).
cls.test_systems['LennardJonesCluster'] = testsystems.LennardJonesCluster()
cls.test_systems['LennardJonesFluid with dispersion correction'] = \
testsystems.LennardJonesFluid(nparticles=100, dispersion_correction=True)
cls.test_systems['TIP3P WaterBox with reaction field, no switch, no dispersion correction'] = \
testsystems.WaterBox(dispersion_correction=False, switch=False, nonbondedMethod=openmm.app.CutoffPeriodic)
cls.test_systems['HostGuestExplicit with PME'] = \
testsystems.HostGuestExplicit(nonbondedMethod=openmm.app.PME)
cls.test_systems['HostGuestExplicit with CutoffPeriodic'] = \
testsystems.HostGuestExplicit(nonbondedMethod=openmm.app.CutoffPeriodic)
@classmethod
def define_regions(cls):
"""Create shared AlchemicalRegions for test systems in cls.test_regions."""
cls.test_region_zero = dict()
cls.test_region_one = dict()
cls.test_region_two = dict()
cls.test_region_zero['LennardJonesCluster'] = AlchemicalRegion(alchemical_atoms=range(2), name='zero')
cls.test_region_one['LennardJonesCluster'] = AlchemicalRegion(alchemical_atoms=range(2,4), name='one')
cls.test_region_two['LennardJonesCluster'] = AlchemicalRegion(alchemical_atoms=range(4,6), name='two')
cls.test_region_zero['LennardJonesFluid'] = AlchemicalRegion(alchemical_atoms=range(10), name='zero')
cls.test_region_one['LennardJonesFluid'] = AlchemicalRegion(alchemical_atoms=range(10,20), name='one')
cls.test_region_two['LennardJonesFluid'] = AlchemicalRegion(alchemical_atoms=range(20,30), name='two')
cls.test_region_zero['TIP3P WaterBox'] = AlchemicalRegion(alchemical_atoms=range(3), name='zero')
cls.test_region_one['TIP3P WaterBox'] = AlchemicalRegion(alchemical_atoms=range(3,6), name='one')
cls.test_region_two['TIP3P WaterBox'] = AlchemicalRegion(alchemical_atoms=range(6,9), name='two')
#Three regions push HostGuest system beyond 32 force groups
cls.test_region_zero['HostGuestExplicit'] = AlchemicalRegion(alchemical_atoms=range(126, 156), name='zero')
cls.test_region_one['HostGuestExplicit'] = AlchemicalRegion(alchemical_atoms=range(156,160), name='one')
cls.test_region_two['HostGuestExplicit'] = None
@classmethod
def generate_cases(cls):
"""Generate all test cases in cls.test_cases combinatorially."""
cls.test_cases = dict()
direct_space_factory = AbsoluteAlchemicalFactory(alchemical_pme_treatment='direct-space',
alchemical_rf_treatment='switched')
exact_pme_factory = AbsoluteAlchemicalFactory(alchemical_pme_treatment='exact')
# We generate all possible combinations of annihilate_sterics/electrostatics
# for each test system. We also annihilate bonds, angles and torsions every
# 3 test cases so that we test it at least one for each test system and for
# each combination of annihilate_sterics/electrostatics.
n_test_cases = 0
for test_system_name, test_system in cls.test_systems.items():
# Find standard alchemical region zero.
for region_name_zero, region_zero in cls.test_region_zero.items():
if region_name_zero in test_system_name:
break
assert region_name_zero in test_system_name, test_system_name
# Find standard alchemical region one.
for region_name_one, region_one in cls.test_region_one.items():
if region_name_one in test_system_name:
break
assert region_name_one in test_system_name, test_system_name
# Find standard alchemical region two.
for region_name_two, region_two in cls.test_region_two.items():
if region_name_two in test_system_name:
break
assert region_name_two in test_system_name, test_system_name
assert region_name_zero == region_name_one and region_name_one == region_name_two
#We only want two regions for HostGuest or we get too many force groups
if 'HostGuestExplicit' in region_name_one:
test_regions = [region_zero, region_one]
else:
test_regions = [region_zero, region_one, region_two]
# Find nonbonded method.
force_idx, nonbonded_force = forces.find_forces(test_system.system, openmm.NonbondedForce, only_one=True)
nonbonded_method = nonbonded_force.getNonbondedMethod()
# Create all combinations of annihilate_sterics/electrostatics.
for annihilate_sterics, annihilate_electrostatics in itertools.product((True, False), repeat=2):
# Create new region that we can modify.
for i, test_region in enumerate(test_regions):
test_regions[i] = test_region._replace(annihilate_sterics=annihilate_sterics,
annihilate_electrostatics=annihilate_electrostatics)
# Create test name.
test_case_name = test_system_name[:]
if annihilate_sterics:
test_case_name += ', annihilated sterics'
if annihilate_electrostatics:
test_case_name += ', annihilated electrostatics'
# Annihilate bonds and angles every three test_cases.
if n_test_cases % 3 == 0:
for i, test_region in enumerate(test_regions):
test_regions[i] = test_region._replace(alchemical_bonds=True, alchemical_angles=True,
alchemical_torsions=True)
test_case_name += ', annihilated bonds, angles and torsions'
# Add different softcore parameters every five test_cases.
if n_test_cases % 5 == 0:
for i, test_region in enumerate(test_regions):
test_regions[i] = test_region._replace(softcore_alpha=1.0, softcore_beta=1.0, softcore_a=1.0, softcore_b=1.0,
softcore_c=1.0, softcore_d=1.0, softcore_e=1.0, softcore_f=1.0)
test_case_name += ', modified softcore parameters'
#region_interactions = frozenset(itertools.combinations(range(len(test_regions)), 2))
# Pre-generate alchemical system.
alchemical_system = direct_space_factory.create_alchemical_system(test_system.system, alchemical_regions = test_regions)
# Add test case.
cls.test_cases[test_case_name] = (test_system, alchemical_system, test_regions)
n_test_cases += 1
# If we don't use softcore electrostatics and we annihilate charges
# we can test also exact PME treatment. We don't increase n_test_cases
# purposely to keep track of which tests are added above.
if (test_regions[1].softcore_beta == 0.0 and annihilate_electrostatics and
nonbonded_method in [openmm.NonbondedForce.PME, openmm.NonbondedForce.Ewald]):
alchemical_system = exact_pme_factory.create_alchemical_system(test_system.system, alchemical_regions = test_regions)
test_case_name += ', exact PME'
cls.test_cases[test_case_name] = (test_system, alchemical_system, test_regions)
# If the test system uses reaction field replace reaction field
# of the reference system to allow comparisons.
if nonbonded_method == openmm.NonbondedForce.CutoffPeriodic:
forcefactories.replace_reaction_field(test_system.system, return_copy=False,
switch_width=direct_space_factory.switch_width)
def test_split_force_groups(self):
"""Forces having different lambda variables should have a different force group."""
# Select 1 implicit, 1 explicit, and 1 exact PME explicit test case randomly.
test_cases = self.filter_cases(lambda x: 'Implicit' in x, max_number=1)
test_cases.update(self.filter_cases(lambda x: 'Explicit ' in x and 'exact PME' in x, max_number=1))
test_cases.update(self.filter_cases(lambda x: 'Explicit ' in x and 'exact PME' not in x, max_number=1))
for test_name, (test_system, alchemical_system, alchemical_region) in test_cases.items():
region_names = []
for region in alchemical_region:
region_names.append(region.name)
f = partial(check_split_force_groups, alchemical_system, region_names)
f.description = "Testing force splitting among groups of {}".format(test_name)
yield f
def test_noninteracting_energy_components(self):
"""Check all forces annihilated/decoupled when their lambda variables are zero."""
for test_name, (test_system, alchemical_system, alchemical_region) in self.test_cases.items():
f = partial(check_multi_noninteracting_energy_components, test_system.system, alchemical_system,
alchemical_region, test_system.positions)
f.description = "Testing non-interacting energy of {}".format(test_name)
yield f
@attr('slow')
def test_platforms(self):
"""Test interacting and noninteracting energies on all platforms."""
global GLOBAL_ALCHEMY_PLATFORM
old_global_platform = GLOBAL_ALCHEMY_PLATFORM
# Do not repeat tests on the platform already tested.
if old_global_platform is None:
default_platform_name = utils.get_fastest_platform().getName()
else:
default_platform_name = old_global_platform.getName()
platforms = [platform for platform in utils.get_available_platforms()
if platform.getName() != default_platform_name]
# Test interacting and noninteracting energies on all platforms.
for platform in platforms:
GLOBAL_ALCHEMY_PLATFORM = platform
for test_name, (test_system, alchemical_system, alchemical_region) in self.test_cases.items():
f = partial(compare_system_energies, test_system.system, alchemical_system,
alchemical_region, test_system.positions)
f.description = "Test fully interacting energy of {} on {}".format(test_name, platform.getName())
yield f
f = partial(check_multi_noninteracting_energy_components, test_system.system, alchemical_system,
alchemical_region, test_system.positions)
f.description = "Test non-interacting energy of {} on {}".format(test_name, platform.getName())
yield f
# Restore global platform
GLOBAL_ALCHEMY_PLATFORM = old_global_platform
@attr('slow')
def test_fully_interacting_energy_components(self):
"""Test interacting state energy by force component."""
# This is a very expensive but very informative test. We can
# run this locally when test_fully_interacting_energies() fails.
test_cases = self.filter_cases(lambda x: 'Explicit' in x)
for test_name, (test_system, alchemical_system, alchemical_region) in test_cases.items():
f = partial(check_multi_interacting_energy_components, test_system.system, alchemical_system,
alchemical_region, test_system.positions)
f.description = "Testing energy components of %s..." % test_name
yield f
class TestDispersionlessAlchemicalFactory(object):
"""
Only test overlap for dispersionless alchemical factory, since energy agreement
will be poor.
"""
@classmethod
def setup_class(cls):
"""Create test systems and shared objects."""
cls.define_systems()
cls.define_regions()
cls.generate_cases()
@classmethod
def define_systems(cls):
"""Create test systems and shared objects."""
cls.test_systems = dict()
cls.test_systems['LennardJonesFluid with dispersion correction'] = \
testsystems.LennardJonesFluid(nparticles=100, dispersion_correction=True)
@classmethod
def define_regions(cls):
"""Create shared AlchemicalRegions for test systems in cls.test_regions."""
cls.test_regions = dict()
cls.test_regions['LennardJonesFluid'] = AlchemicalRegion(alchemical_atoms=range(10))
@classmethod
def generate_cases(cls):
"""Generate all test cases in cls.test_cases combinatorially."""
cls.test_cases = dict()
factory = AbsoluteAlchemicalFactory(disable_alchemical_dispersion_correction=True)
# We generate all possible combinations of annihilate_sterics/electrostatics
# for each test system. We also annihilate bonds, angles and torsions every
# 3 test cases so that we test it at least one for each test system and for
# each combination of annihilate_sterics/electrostatics.
n_test_cases = 0
for test_system_name, test_system in cls.test_systems.items():
# Find standard alchemical region.
for region_name, region in cls.test_regions.items():
if region_name in test_system_name:
break
assert region_name in test_system_name
# Create all combinations of annihilate_sterics.
for annihilate_sterics in itertools.product((True, False), repeat=1):
region = region._replace(annihilate_sterics=annihilate_sterics,
annihilate_electrostatics=True)
# Create test name.
test_case_name = test_system_name[:]
if annihilate_sterics:
test_case_name += ', annihilated sterics'
# Pre-generate alchemical system
alchemical_system = factory.create_alchemical_system(test_system.system, region)
cls.test_cases[test_case_name] = (test_system, alchemical_system, region)
n_test_cases += 1
def test_overlap(self):
"""Tests overlap between reference and alchemical systems."""
for test_name, (test_system, alchemical_system, alchemical_region) in self.test_cases.items():
#cached_trajectory_filename = os.path.join(os.environ['HOME'], '.cache', 'alchemy', 'tests',
# test_name + '.pickle')
cached_trajectory_filename = None
f = partial(overlap_check, test_system.system, alchemical_system, test_system.positions,
cached_trajectory_filename=cached_trajectory_filename, name=test_name)
f.description = "Testing reference/alchemical overlap for no alchemical dispersion {}".format(test_name)
yield f
@attr('slow')
class TestAbsoluteAlchemicalFactorySlow(TestAbsoluteAlchemicalFactory):
"""Test AbsoluteAlchemicalFactory class with a more comprehensive set of systems."""
@classmethod
def define_systems(cls):
"""Create test systems and shared objects."""
cls.test_systems = dict()
cls.test_systems['LennardJonesFluid without dispersion correction'] = \
testsystems.LennardJonesFluid(nparticles=100, dispersion_correction=False)
cls.test_systems['DischargedWaterBox with reaction field, no switch, no dispersion correction'] = \
testsystems.DischargedWaterBox(dispersion_correction=False, switch=False,
nonbondedMethod=openmm.app.CutoffPeriodic)
cls.test_systems['WaterBox with reaction field, no switch, dispersion correction'] = \
testsystems.WaterBox(dispersion_correction=False, switch=True, nonbondedMethod=openmm.app.CutoffPeriodic)
cls.test_systems['WaterBox with reaction field, switch, no dispersion correction'] = \
testsystems.WaterBox(dispersion_correction=False, switch=True, nonbondedMethod=openmm.app.CutoffPeriodic)
cls.test_systems['WaterBox with PME, switch, dispersion correction'] = \
testsystems.WaterBox(dispersion_correction=True, switch=True, nonbondedMethod=openmm.app.PME)
# Big systems.
cls.test_systems['LysozymeImplicit'] = testsystems.LysozymeImplicit()
cls.test_systems['DHFRExplicit with reaction field'] = \
testsystems.DHFRExplicit(nonbondedMethod=openmm.app.CutoffPeriodic)
cls.test_systems['SrcExplicit with PME'] = \
testsystems.SrcExplicit(nonbondedMethod=openmm.app.PME)
cls.test_systems['SrcExplicit with reaction field'] = \
testsystems.SrcExplicit(nonbondedMethod=openmm.app.CutoffPeriodic)
cls.test_systems['SrcImplicit'] = testsystems.SrcImplicit()
@classmethod
def define_regions(cls):
super(TestAbsoluteAlchemicalFactorySlow, cls).define_regions()
cls.test_regions['WaterBox'] = AlchemicalRegion(alchemical_atoms=range(3))
cls.test_regions['LysozymeImplicit'] = AlchemicalRegion(alchemical_atoms=range(2603, 2621))
cls.test_regions['DHFRExplicit'] = AlchemicalRegion(alchemical_atoms=range(0, 2849))
cls.test_regions['Src'] = AlchemicalRegion(alchemical_atoms=range(0, 21))
# =============================================================================
# TEST ALCHEMICAL STATE
# =============================================================================
class TestAlchemicalState(object):
"""Test AlchemicalState compatibility with CompoundThermodynamicState."""
@classmethod
def setup_class(cls):
"""Create test systems and shared objects."""
alanine_vacuum = testsystems.AlanineDipeptideVacuum()
alanine_explicit = testsystems.AlanineDipeptideExplicit()
factory = AbsoluteAlchemicalFactory()
factory_exact_pme = AbsoluteAlchemicalFactory(alchemical_pme_treatment='exact')
cls.alanine_alchemical_atoms = list(range(22))
cls.alanine_test_system = alanine_explicit
# System with only lambda_sterics and lambda_electrostatics.
alchemical_region = AlchemicalRegion(alchemical_atoms=cls.alanine_alchemical_atoms)
alchemical_alanine_system = factory.create_alchemical_system(alanine_vacuum.system, alchemical_region)
cls.alanine_state = states.ThermodynamicState(alchemical_alanine_system,
temperature=300*unit.kelvin)
# System with lambda_sterics and lambda_electrostatics and exact PME treatment.
alchemical_alanine_system_exact_pme = factory_exact_pme.create_alchemical_system(alanine_explicit.system,
alchemical_region)
cls.alanine_state_exact_pme = states.ThermodynamicState(alchemical_alanine_system_exact_pme,
temperature=300*unit.kelvin,
pressure=1.0*unit.atmosphere)
# System with all lambdas.
alchemical_region = AlchemicalRegion(alchemical_atoms=cls.alanine_alchemical_atoms,
alchemical_torsions=True, alchemical_angles=True,
alchemical_bonds=True)
fully_alchemical_alanine_system = factory.create_alchemical_system(alanine_vacuum.system, alchemical_region)
cls.full_alanine_state = states.ThermodynamicState(fully_alchemical_alanine_system,
temperature=300*unit.kelvin)
# Test case: (ThermodynamicState, defined_lambda_parameters)
cls.test_cases = [
(cls.alanine_state, {'lambda_sterics', 'lambda_electrostatics'}),
(cls.alanine_state_exact_pme, {'lambda_sterics', 'lambda_electrostatics'}),
(cls.full_alanine_state, {'lambda_sterics', 'lambda_electrostatics', 'lambda_bonds',
'lambda_angles', 'lambda_torsions'})
]
@staticmethod
def test_constructor():
"""Test AlchemicalState constructor behave as expected."""
# Raise an exception if parameter is not recognized.
with nose.tools.assert_raises(AlchemicalStateError):
AlchemicalState(lambda_electro=1.0)
# Properties are initialized correctly.
test_cases = [{},
{'lambda_sterics': 0.5, 'lambda_angles': 0.5},
{'lambda_electrostatics': 1.0}]
for test_kwargs in test_cases:
alchemical_state = AlchemicalState(**test_kwargs)
for parameter in AlchemicalState._get_controlled_parameters():
if parameter in test_kwargs:
assert getattr(alchemical_state, parameter) == test_kwargs[parameter]
else:
assert getattr(alchemical_state, parameter) is None
def test_from_system_constructor(self):
"""Test AlchemicalState.from_system constructor."""
# A non-alchemical system raises an error.
with nose.tools.assert_raises(AlchemicalStateError):
AlchemicalState.from_system(testsystems.AlanineDipeptideVacuum().system)
# Valid parameters are 1.0 by default in AbsoluteAlchemicalFactory,
# and all the others must be None.
for state, defined_lambdas in self.test_cases:
alchemical_state = AlchemicalState.from_system(state.system)
for parameter in AlchemicalState._get_controlled_parameters():
property_value = getattr(alchemical_state, parameter)
if parameter in defined_lambdas:
assert property_value == 1.0, '{}: {}'.format(parameter, property_value)
else:
assert property_value is None, '{}: {}'.format(parameter, property_value)
@staticmethod
def test_equality_operator():
"""Test equality operator between AlchemicalStates."""
state1 = AlchemicalState(lambda_electrostatics=1.0)
state2 = AlchemicalState(lambda_electrostatics=1.0)
state3 = AlchemicalState(lambda_electrostatics=0.9)
state4 = AlchemicalState(lambda_electrostatics=0.9, lambda_sterics=1.0)
assert state1 == state2
assert state2 != state3
assert state3 != state4
def test_apply_to_system(self):
"""Test method AlchemicalState.apply_to_system()."""
# Do not modify cached test cases.
test_cases = copy.deepcopy(self.test_cases)
# Test precondition: all parameters are 1.0.
for state, defined_lambdas in test_cases:
kwargs = dict.fromkeys(defined_lambdas, 1.0)
alchemical_state = AlchemicalState(**kwargs)
assert alchemical_state == AlchemicalState.from_system(state.system)
# apply_to_system() modifies the state.
for state, defined_lambdas in test_cases:
kwargs = dict.fromkeys(defined_lambdas, 0.5)
alchemical_state = AlchemicalState(**kwargs)
system = state.system
alchemical_state.apply_to_system(system)
system_state = AlchemicalState.from_system(system)
assert system_state == alchemical_state
# Raise an error if an extra parameter is defined in the system.
for state, defined_lambdas in test_cases:
defined_lambdas = set(defined_lambdas) # Copy
defined_lambdas.pop() # Remove one element.
kwargs = dict.fromkeys(defined_lambdas, 1.0)
alchemical_state = AlchemicalState(**kwargs)
with nose.tools.assert_raises(AlchemicalStateError):
alchemical_state.apply_to_system(state.system)
# Raise an error if an extra parameter is defined in the state.
for state, defined_lambdas in test_cases:
if 'lambda_bonds' in defined_lambdas:
continue
defined_lambdas = set(defined_lambdas) # Copy
defined_lambdas.add('lambda_bonds') # Add extra parameter.
kwargs = dict.fromkeys(defined_lambdas, 1.0)
alchemical_state = AlchemicalState(**kwargs)
with nose.tools.assert_raises(AlchemicalStateError):
alchemical_state.apply_to_system(state.system)
def test_check_system_consistency(self):
"""Test method AlchemicalState.check_system_consistency()."""
# A system is consistent with itself.
alchemical_state = AlchemicalState.from_system(self.alanine_state.system)
alchemical_state.check_system_consistency(self.alanine_state.system)
# Raise error if system has MORE lambda parameters.
with nose.tools.assert_raises(AlchemicalStateError):
alchemical_state.check_system_consistency(self.full_alanine_state.system)
# Raise error if system has LESS lambda parameters.
alchemical_state = AlchemicalState.from_system(self.full_alanine_state.system)
with nose.tools.assert_raises(AlchemicalStateError):
alchemical_state.check_system_consistency(self.alanine_state.system)
# Raise error if system has different lambda values.
alchemical_state.lambda_bonds = 0.5
with nose.tools.assert_raises(AlchemicalStateError):
alchemical_state.check_system_consistency(self.full_alanine_state.system)
def test_apply_to_context(self):
"""Test method AlchemicalState.apply_to_context."""
integrator = openmm.VerletIntegrator(1.0*unit.femtosecond)
# Raise error if Context has more parameters than AlchemicalState.
alchemical_state = AlchemicalState.from_system(self.alanine_state.system)
context = self.full_alanine_state.create_context(copy.deepcopy(integrator))
with nose.tools.assert_raises(AlchemicalStateError):
alchemical_state.apply_to_context(context)
del context
# Raise error if AlchemicalState is applied to a Context with missing parameters.
alchemical_state = AlchemicalState.from_system(self.full_alanine_state.system)
context = self.alanine_state.create_context(copy.deepcopy(integrator))
with nose.tools.assert_raises(AlchemicalStateError):
alchemical_state.apply_to_context(context)
del context
# Correctly sets Context's parameters.
for state in [self.full_alanine_state, self.alanine_state_exact_pme]:
alchemical_state = AlchemicalState.from_system(state.system)
context = state.create_context(copy.deepcopy(integrator))
alchemical_state.set_alchemical_parameters(0.5)
alchemical_state.apply_to_context(context)
for parameter_name, parameter_value in context.getParameters().items():
if parameter_name in alchemical_state._parameters:
assert parameter_value == 0.5
del context
def test_standardize_system(self):
"""Test method AlchemicalState.standardize_system."""
test_cases = [self.full_alanine_state, self.alanine_state_exact_pme]
for state in test_cases:
# First create a non-standard system.
system = copy.deepcopy(state.system)
alchemical_state = AlchemicalState.from_system(system)
alchemical_state.set_alchemical_parameters(0.5)
alchemical_state.apply_to_system(system)
# Test pre-condition: The state of the System has been changed.
assert AlchemicalState.from_system(system).lambda_electrostatics == 0.5
# Check that _standardize_system() sets all parameters back to 1.0.
alchemical_state._standardize_system(system)
standard_alchemical_state = AlchemicalState.from_system(system)
assert alchemical_state != standard_alchemical_state
for parameter_name, value in alchemical_state._parameters.items():
standard_value = getattr(standard_alchemical_state, parameter_name)
assert (value is None and standard_value is None) or (standard_value == 1.0)
def test_find_force_groups_to_update(self):
"""Test method AlchemicalState._find_force_groups_to_update."""
test_cases = [self.full_alanine_state, self.alanine_state_exact_pme]
for thermodynamic_state in test_cases:
system = copy.deepcopy(thermodynamic_state.system)
alchemical_state = AlchemicalState.from_system(system)
alchemical_state2 = copy.deepcopy(alchemical_state)
# Each lambda should be separated in its own force group.
expected_force_groups = {}
for force, lambda_name, _ in AlchemicalState._get_system_controlled_parameters(
system, parameters_name_suffix=None):
expected_force_groups[lambda_name] = force.getForceGroup()
integrator = openmm.VerletIntegrator(2.0*unit.femtoseconds)
context = create_context(system, integrator)
# No force group should be updated if we don't move.
assert alchemical_state._find_force_groups_to_update(context, alchemical_state2, memo={}) == set()
# Change the lambdas one by one and check that the method
# recognize that the force group energy must be updated.
for lambda_name in AlchemicalState._get_controlled_parameters():
# Check that the system defines the global variable.
if getattr(alchemical_state, lambda_name) is None:
continue
# Change the current state.
setattr(alchemical_state2, lambda_name, 0.0)
force_group = expected_force_groups[lambda_name]
assert alchemical_state._find_force_groups_to_update(context, alchemical_state2, memo={}) == {force_group}
setattr(alchemical_state2, lambda_name, 1.0) # Reset current state.
del context
def test_alchemical_functions(self):
"""Test alchemical variables and functions work correctly."""
system = copy.deepcopy(self.full_alanine_state.system)
alchemical_state = AlchemicalState.from_system(system)
# Add two alchemical variables to the state.
alchemical_state.set_function_variable('lambda', 1.0)
alchemical_state.set_function_variable('lambda2', 0.5)
assert alchemical_state.get_function_variable('lambda') == 1.0
assert alchemical_state.get_function_variable('lambda2') == 0.5
# Cannot call an alchemical variable as a supported parameter.
with nose.tools.assert_raises(AlchemicalStateError):
alchemical_state.set_function_variable('lambda_sterics', 0.5)
# Assign string alchemical functions to parameters.
alchemical_state.lambda_sterics = AlchemicalFunction('lambda')
alchemical_state.lambda_electrostatics = AlchemicalFunction('(lambda + lambda2) / 2.0')
assert alchemical_state.lambda_sterics == 1.0
assert alchemical_state.lambda_electrostatics == 0.75
# Setting alchemical variables updates alchemical parameter as well.
alchemical_state.set_function_variable('lambda2', 0)
assert alchemical_state.lambda_electrostatics == 0.5
# ---------------------------------------------------
# Integration tests with CompoundThermodynamicStates
# ---------------------------------------------------
def test_constructor_compound_state(self):
"""The AlchemicalState is set on construction of the CompoundState."""
test_cases = copy.deepcopy(self.test_cases)
# Test precondition: the original systems are in fully interacting state.
for state, defined_lambdas in test_cases:
system_state = AlchemicalState.from_system(state.system)
kwargs = dict.fromkeys(defined_lambdas, 1.0)
assert system_state == AlchemicalState(**kwargs)
# CompoundThermodynamicState set the system state in constructor.
for state, defined_lambdas in test_cases:
kwargs = dict.fromkeys(defined_lambdas, 0.5)
alchemical_state = AlchemicalState(**kwargs)
compound_state = states.CompoundThermodynamicState(state, [alchemical_state])
system_state = AlchemicalState.from_system(compound_state.system)
assert system_state == alchemical_state
def test_lambda_properties_compound_state(self):
"""Lambda properties setters/getters work in the CompoundState system."""
test_cases = copy.deepcopy(self.test_cases)
for state, defined_lambdas in test_cases:
alchemical_state = AlchemicalState.from_system(state.system)
compound_state = states.CompoundThermodynamicState(state, [alchemical_state])
# Defined properties can be assigned and read.
for parameter_name in defined_lambdas:
assert getattr(compound_state, parameter_name) == 1.0
setattr(compound_state, parameter_name, 0.5)
assert getattr(compound_state, parameter_name) == 0.5
# System global variables are updated correctly
system_alchemical_state = AlchemicalState.from_system(compound_state.system)
for parameter_name in defined_lambdas:
assert getattr(system_alchemical_state, parameter_name) == 0.5
# Same for parameters setters.
compound_state.set_alchemical_parameters(1.0)
system_alchemical_state = AlchemicalState.from_system(compound_state.system)
for parameter_name in defined_lambdas:
assert getattr(compound_state, parameter_name) == 1.0
assert getattr(system_alchemical_state, parameter_name) == 1.0
# Same for alchemical variables setters.
compound_state.set_function_variable('lambda', 0.25)
for parameter_name in defined_lambdas:
setattr(compound_state, parameter_name, AlchemicalFunction('lambda'))
system_alchemical_state = AlchemicalState.from_system(compound_state.system)
for parameter_name in defined_lambdas:
assert getattr(compound_state, parameter_name) == 0.25
assert getattr(system_alchemical_state, parameter_name) == 0.25
def test_set_system_compound_state(self):
"""Setting inconsistent system in compound state raise errors."""
alanine_state = copy.deepcopy(self.alanine_state)
alchemical_state = AlchemicalState.from_system(alanine_state.system)
compound_state = states.CompoundThermodynamicState(alanine_state, [alchemical_state])
# We create an inconsistent state that has different parameters.
incompatible_state = copy.deepcopy(alchemical_state)
incompatible_state.lambda_electrostatics = 0.5
# Setting an inconsistent alchemical system raise an error.
system = compound_state.system
incompatible_state.apply_to_system(system)
with nose.tools.assert_raises(AlchemicalStateError):
compound_state.system = system
# Same for set_system when called with default arguments.
with nose.tools.assert_raises(AlchemicalStateError):
compound_state.set_system(system)
# This doesn't happen if we fix the state.
compound_state.set_system(system, fix_state=True)
assert AlchemicalState.from_system(compound_state.system) != incompatible_state
def test_method_compatibility_compound_state(self):
"""Compatibility between states is handled correctly in compound state."""
test_cases = [self.alanine_state, self.alanine_state_exact_pme]
# An incompatible state has a different set of defined lambdas.
full_alanine_state = copy.deepcopy(self.full_alanine_state)
alchemical_state_incompatible = AlchemicalState.from_system(full_alanine_state.system)
compound_state_incompatible = states.CompoundThermodynamicState(full_alanine_state,
[alchemical_state_incompatible])
for state in test_cases:
state = copy.deepcopy(state)
alchemical_state = AlchemicalState.from_system(state.system)
compound_state = states.CompoundThermodynamicState(state, [alchemical_state])
# A compatible state has the same defined lambda parameters,
# but their values can be different.
alchemical_state_compatible = copy.deepcopy(alchemical_state)
assert alchemical_state.lambda_electrostatics != 0.5 # Test pre-condition.
alchemical_state_compatible.lambda_electrostatics = 0.5
compound_state_compatible = states.CompoundThermodynamicState(copy.deepcopy(state),
[alchemical_state_compatible])
# Test states compatibility.
assert compound_state.is_state_compatible(compound_state_compatible)
assert not compound_state.is_state_compatible(compound_state_incompatible)
# Test context compatibility.
integrator = openmm.VerletIntegrator(1.0*unit.femtosecond)
context = compound_state_compatible.create_context(copy.deepcopy(integrator))
assert compound_state.is_context_compatible(context)
context = compound_state_incompatible.create_context(copy.deepcopy(integrator))
assert not compound_state.is_context_compatible(context)
@staticmethod
def _check_compatibility(state1, state2, context_state1, is_compatible):
"""Check the compatibility of states and contexts between 2 states."""
# Compatibility should be commutative
assert state1.is_state_compatible(state2) is is_compatible
assert state2.is_state_compatible(state1) is is_compatible
# Test context incompatibility is commutative.
context_state2 = state2.create_context(openmm.VerletIntegrator(1.0*unit.femtosecond))
assert state2.is_context_compatible(context_state1) is is_compatible
assert state1.is_context_compatible(context_state2) is is_compatible
del context_state2
def test_method_reduced_potential_compound_state(self):
"""Test CompoundThermodynamicState.reduced_potential_at_states() method.
Computing the reduced potential singularly and with the class
method should give the same result.
"""
# Build a mixed collection of compatible and incompatible thermodynamic states.
thermodynamic_states = [
copy.deepcopy(self.alanine_state),
copy.deepcopy(self.alanine_state_exact_pme)
]
alchemical_states = [
AlchemicalState(lambda_electrostatics=1.0, lambda_sterics=1.0),
AlchemicalState(lambda_electrostatics=0.5, lambda_sterics=1.0),
AlchemicalState(lambda_electrostatics=0.5, lambda_sterics=0.0),
AlchemicalState(lambda_electrostatics=1.0, lambda_sterics=1.0)
]
compound_states = []
for thermo_state in thermodynamic_states:
for alchemical_state in alchemical_states:
compound_states.append(states.CompoundThermodynamicState(
copy.deepcopy(thermo_state), [copy.deepcopy(alchemical_state)]))
# Group thermodynamic states by compatibility.
compatible_groups, _ = states.group_by_compatibility(compound_states)
assert len(compatible_groups) == 2
# Compute the reduced potentials.
expected_energies = []
obtained_energies = []
for compatible_group in compatible_groups:
# Create context.
integrator = openmm.VerletIntegrator(2.0*unit.femtoseconds)
context = compatible_group[0].create_context(integrator)
context.setPositions(self.alanine_test_system.positions[:compatible_group[0].n_particles])
# Compute with single-state method.
for state in compatible_group:
state.apply_to_context(context)
expected_energies.append(state.reduced_potential(context))
# Compute with multi-state method.
compatible_energies = states.ThermodynamicState.reduced_potential_at_states(context, compatible_group)
# The first and the last state must be equal.
assert np.isclose(compatible_energies[0], compatible_energies[-1])
obtained_energies.extend(compatible_energies)
assert np.allclose(np.array(expected_energies), np.array(obtained_energies))
def test_serialization(self):
"""Test AlchemicalState serialization alone and in a compound state."""
alchemical_state = AlchemicalState(lambda_electrostatics=0.5, lambda_angles=None)
alchemical_state.set_function_variable('lambda', 0.0)
alchemical_state.lambda_sterics = AlchemicalFunction('lambda')
# Test serialization/deserialization of AlchemicalState.
serialization = utils.serialize(alchemical_state)
deserialized_state = utils.deserialize(serialization)
original_pickle = pickle.dumps(alchemical_state)
deserialized_pickle = pickle.dumps(deserialized_state)
assert original_pickle == deserialized_pickle
# Test serialization/deserialization of AlchemicalState in CompoundState.
test_cases = [copy.deepcopy(self.alanine_state), copy.deepcopy(self.alanine_state_exact_pme)]
for thermodynamic_state in test_cases:
compound_state = states.CompoundThermodynamicState(thermodynamic_state, [alchemical_state])
# The serialized system is standard.
serialization = utils.serialize(compound_state)
serialized_standard_system = serialization['thermodynamic_state']['standard_system']
# Decompress the serialized_system
serialized_standard_system = zlib.decompress(serialized_standard_system).decode(
states.ThermodynamicState._ENCODING)
assert serialized_standard_system.__hash__() == compound_state._standard_system_hash
# The object is deserialized correctly.
deserialized_state = utils.deserialize(serialization)
assert pickle.dumps(compound_state) == pickle.dumps(deserialized_state)
# =============================================================================
# MAIN FOR MANUAL DEBUGGING
# =============================================================================
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
|
[] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
python
| 1 | 0 | |
cli/cmd/run/run.go
|
/*
Copyright 2020 Docker Compose CLI authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package run
import (
"context"
"fmt"
"io"
"os"
"time"
"github.com/containerd/console"
"github.com/spf13/cobra"
"github.com/docker/compose-cli/api/client"
"github.com/docker/compose-cli/api/containers"
"github.com/docker/compose-cli/api/context/store"
"github.com/docker/compose-cli/api/progress"
"github.com/docker/compose-cli/cli/options/run"
)
// Command runs a container
func Command(contextType string) *cobra.Command {
var opts run.Opts
cmd := &cobra.Command{
Use: "run",
Short: "Run a container",
Args: cobra.MinimumNArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
if len(args) > 1 {
opts.Command = args[1:]
}
return runRun(cmd.Context(), args[0], contextType, opts)
},
}
cmd.Flags().SetInterspersed(false)
cmd.Flags().StringArrayVarP(&opts.Publish, "publish", "p", []string{}, "Publish a container's port(s). [HOST_PORT:]CONTAINER_PORT")
cmd.Flags().StringVar(&opts.Name, "name", "", "Assign a name to the container")
cmd.Flags().StringArrayVarP(&opts.Labels, "label", "l", []string{}, "Set meta data on a container")
cmd.Flags().StringArrayVarP(&opts.Volumes, "volume", "v", []string{}, "Volume. Ex: storageaccount/my_share[:/absolute/path/to/target][:ro]")
cmd.Flags().BoolVarP(&opts.Detach, "detach", "d", false, "Run container in background and print container ID")
cmd.Flags().Float64Var(&opts.Cpus, "cpus", 1., "Number of CPUs")
cmd.Flags().VarP(&opts.Memory, "memory", "m", "Memory limit")
cmd.Flags().StringArrayVarP(&opts.Environment, "env", "e", []string{}, "Set environment variables")
cmd.Flags().StringArrayVar(&opts.EnvironmentFiles, "env-file", []string{}, "Path to environment files to be translated as environment variables")
cmd.Flags().StringVarP(&opts.RestartPolicyCondition, "restart", "", containers.RestartPolicyRunNo, "Restart policy to apply when a container exits (no|always|on-failure)")
cmd.Flags().BoolVar(&opts.Rm, "rm", false, "Automatically remove the container when it exits")
cmd.Flags().StringVar(&opts.HealthCmd, "health-cmd", "", "Command to run to check health")
cmd.Flags().DurationVar(&opts.HealthInterval, "health-interval", time.Duration(0), "Time between running the check (ms|s|m|h) (default 0s)")
cmd.Flags().IntVar(&opts.HealthRetries, "health-retries", 0, "Consecutive failures needed to report unhealthy")
cmd.Flags().DurationVar(&opts.HealthStartPeriod, "health-start-period", time.Duration(0), "Start period for the container to initialize before starting "+
"health-retries countdown (ms|s|m|h) (default 0s)")
cmd.Flags().DurationVar(&opts.HealthTimeout, "health-timeout", time.Duration(0), "Maximum time to allow one check to run (ms|s|m|h) (default 0s)")
if contextType == store.LocalContextType {
cmd.Flags().StringVar(&opts.Platform, "platform", os.Getenv("DOCKER_DEFAULT_PLATFORM"), "Set platform if server is multi-platform capable")
}
if contextType == store.AciContextType {
cmd.Flags().StringVar(&opts.DomainName, "domainname", "", "Container NIS domain name")
}
switch contextType {
case store.LocalContextType:
default:
_ = cmd.Flags().MarkHidden("rm")
}
return cmd
}
func runRun(ctx context.Context, image string, contextType string, opts run.Opts) error {
switch contextType {
case store.LocalContextType:
default:
if opts.Rm {
return fmt.Errorf(`flag "--rm" is not yet implemented for %q context type`, contextType)
}
}
c, err := client.New(ctx)
if err != nil {
return err
}
containerConfig, err := opts.ToContainerConfig(image)
if err != nil {
return err
}
result, err := progress.Run(ctx, func(ctx context.Context) (string, error) {
return containerConfig.ID, c.ContainerService().Run(ctx, containerConfig)
})
if err != nil {
return err
}
if !opts.Detach {
var con io.Writer = os.Stdout
req := containers.LogsRequest{
Follow: true,
}
if c, err := console.ConsoleFromFile(os.Stdout); err == nil {
size, err := c.Size()
if err != nil {
return err
}
req.Width = int(size.Width)
con = c
}
req.Writer = con
return c.ContainerService().Logs(ctx, opts.Name, req)
}
fmt.Println(result)
return nil
}
|
[
"\"DOCKER_DEFAULT_PLATFORM\""
] |
[] |
[
"DOCKER_DEFAULT_PLATFORM"
] |
[]
|
["DOCKER_DEFAULT_PLATFORM"]
|
go
| 1 | 0 | |
vendor/github.com/pengsrc/go-shared/utils/home.go
|
package utils
import (
"os"
"runtime"
)
// GetHome returns the home directory.
func GetHome() string {
if runtime.GOOS == "windows" {
home := os.Getenv("HOMEDRIVE") + os.Getenv("HOMEPATH")
if home == "" {
home = os.Getenv("USERPROFILE")
}
}
return os.Getenv("HOME")
}
|
[
"\"HOMEDRIVE\"",
"\"HOMEPATH\"",
"\"USERPROFILE\"",
"\"HOME\""
] |
[] |
[
"USERPROFILE",
"HOME",
"HOMEPATH",
"HOMEDRIVE"
] |
[]
|
["USERPROFILE", "HOME", "HOMEPATH", "HOMEDRIVE"]
|
go
| 4 | 0 | |
docs/source/conf.py
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import locale
import os
import sys
def fake_locale_set(*args, **kwargs):
try:
locale.setlocale(*args, **kwargs)
except Exception:
pass
orig_set_locale = locale.setlocale
locale.setlocale = fake_locale_set
import urwid
locale.setlocale = orig_set_locale
PROJECT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", ".."))
DOCS_SOURCE_DIR = os.path.abspath(os.path.dirname(__file__))
def read_file(*parts):
with open(os.path.join(PROJECT_DIR, *parts), "r") as f:
return f.read()
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'lookatme'
copyright = "2019, James 'd0c-s4vage' Johnson"
author = "James 'd0c-s4vage' Johnson"
# The full version, including alpha/beta/rc tags
release = os.environ.get("READTHEDOCS_VERSION", '{{VERSION}}')
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.viewcode",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
master_doc = "index"
#-----------------------------------------------------------------------------
# Generate list of overrideable funcitons within lookatme
#-----------------------------------------------------------------------------
def get_contrib_functions(*file_parts):
render_module = file_parts[-1].replace(".py", "")
lines = read_file(*file_parts).split("\n")
res = []
in_contrib = False
for idx, line in enumerate(lines):
line = line.strip()
if "@contrib_first" == line:
in_contrib = True
continue
if line.startswith("@"):
continue
elif line.startswith("def "):
if in_contrib:
fn_name = line.split()[1].split("(")[0]
res.append(f":any:`{fn_name} <lookatme.render.{render_module}.{fn_name}>`")
in_contrib = False
return res
contrib_fns = []
contrib_fns += get_contrib_functions("lookatme", "render", "markdown_block.py")
contrib_fns += get_contrib_functions("lookatme", "render", "markdown_inline.py")
list_text = []
for fn_ref in contrib_fns:
list_text.append(f" * {fn_ref}")
list_text = "\n".join(list_text)
with open(os.path.join(DOCS_SOURCE_DIR, "contrib_extensions.rst"), "r") as f:
orig_data = f.read()
new_data = orig_data.replace("LOOKATME_OVERRIDES", list_text)
with open(os.path.join(DOCS_SOURCE_DIR, "contrib_extensions_auto.rst"), "w") as f:
f.write(new_data)
def run_apidoc(_):
from sphinx.ext.apidoc import main
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), "..", ".."))
cur_dir = os.path.abspath(os.path.dirname(__file__))
module = os.path.join(cur_dir, "..", "..", "lookatme")
main(["-e", "-o", os.path.join(cur_dir, "autodoc"), module, "--force"])
def setup(app):
app.connect('builder-inited', run_apidoc)
|
[] |
[] |
[
"READTHEDOCS_VERSION"
] |
[]
|
["READTHEDOCS_VERSION"]
|
python
| 1 | 0 | |
config/config.go
|
package config
import (
"bytes"
"fmt"
"io"
"log"
"net/http"
"net/url"
"os"
"path/filepath"
"reflect"
"regexp"
"runtime"
"sort"
"strconv"
"strings"
"time"
"github.com/influxdata/telegraf"
"github.com/influxdata/telegraf/internal"
"github.com/influxdata/telegraf/internal/choice"
"github.com/influxdata/telegraf/models"
"github.com/influxdata/telegraf/plugins/aggregators"
"github.com/influxdata/telegraf/plugins/inputs"
"github.com/influxdata/telegraf/plugins/outputs"
"github.com/influxdata/telegraf/plugins/parsers"
"github.com/influxdata/telegraf/plugins/parsers/json_v2"
"github.com/influxdata/telegraf/plugins/processors"
"github.com/influxdata/telegraf/plugins/serializers"
"github.com/influxdata/toml"
"github.com/influxdata/toml/ast"
)
var (
// Default sections
sectionDefaults = []string{"global_tags", "agent", "outputs",
"processors", "aggregators", "inputs"}
// Default input plugins
inputDefaults = []string{"cpu", "mem", "swap", "system", "kernel",
"processes", "disk", "diskio"}
// Default output plugins
outputDefaults = []string{"influxdb"}
// envVarRe is a regex to find environment variables in the config file
envVarRe = regexp.MustCompile(`\$\{(\w+)\}|\$(\w+)`)
envVarEscaper = strings.NewReplacer(
`"`, `\"`,
`\`, `\\`,
)
httpLoadConfigRetryInterval = 10 * time.Second
// fetchURLRe is a regex to determine whether the requested file should
// be fetched from a remote or read from the filesystem.
fetchURLRe = regexp.MustCompile(`^\w+://`)
)
// Config specifies the URL/user/password for the database that telegraf
// will be logging to, as well as all the plugins that the user has
// specified
type Config struct {
toml *toml.Config
errs []error // config load errors.
UnusedFields map[string]bool
Tags map[string]string
InputFilters []string
OutputFilters []string
Agent *AgentConfig
Inputs []*models.RunningInput
Outputs []*models.RunningOutput
Aggregators []*models.RunningAggregator
// Processors have a slice wrapper type because they need to be sorted
Processors models.RunningProcessors
AggProcessors models.RunningProcessors
}
// NewConfig creates a new struct to hold the Telegraf config.
// For historical reasons, It holds the actual instances of the running plugins
// once the configuration is parsed.
func NewConfig() *Config {
c := &Config{
UnusedFields: map[string]bool{},
// Agent defaults:
Agent: &AgentConfig{
Interval: Duration(10 * time.Second),
RoundInterval: true,
FlushInterval: Duration(10 * time.Second),
LogTarget: "file",
LogfileRotationMaxArchives: 5,
},
Tags: make(map[string]string),
Inputs: make([]*models.RunningInput, 0),
Outputs: make([]*models.RunningOutput, 0),
Processors: make([]*models.RunningProcessor, 0),
AggProcessors: make([]*models.RunningProcessor, 0),
InputFilters: make([]string, 0),
OutputFilters: make([]string, 0),
}
tomlCfg := &toml.Config{
NormFieldName: toml.DefaultConfig.NormFieldName,
FieldToKey: toml.DefaultConfig.FieldToKey,
MissingField: c.missingTomlField,
}
c.toml = tomlCfg
return c
}
// AgentConfig defines configuration that will be used by the Telegraf agent
type AgentConfig struct {
// Interval at which to gather information
Interval Duration
// RoundInterval rounds collection interval to 'interval'.
// ie, if Interval=10s then always collect on :00, :10, :20, etc.
RoundInterval bool
// By default or when set to "0s", precision will be set to the same
// timestamp order as the collection interval, with the maximum being 1s.
// ie, when interval = "10s", precision will be "1s"
// when interval = "250ms", precision will be "1ms"
// Precision will NOT be used for service inputs. It is up to each individual
// service input to set the timestamp at the appropriate precision.
Precision Duration
// CollectionJitter is used to jitter the collection by a random amount.
// Each plugin will sleep for a random time within jitter before collecting.
// This can be used to avoid many plugins querying things like sysfs at the
// same time, which can have a measurable effect on the system.
CollectionJitter Duration
// FlushInterval is the Interval at which to flush data
FlushInterval Duration
// FlushJitter Jitters the flush interval by a random amount.
// This is primarily to avoid large write spikes for users running a large
// number of telegraf instances.
// ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
FlushJitter Duration
// MetricBatchSize is the maximum number of metrics that is wrote to an
// output plugin in one call.
MetricBatchSize int
// MetricBufferLimit is the max number of metrics that each output plugin
// will cache. The buffer is cleared when a successful write occurs. When
// full, the oldest metrics will be overwritten. This number should be a
// multiple of MetricBatchSize. Due to current implementation, this could
// not be less than 2 times MetricBatchSize.
MetricBufferLimit int
// FlushBufferWhenFull tells Telegraf to flush the metric buffer whenever
// it fills up, regardless of FlushInterval. Setting this option to true
// does _not_ deactivate FlushInterval.
FlushBufferWhenFull bool // deprecated in 0.13; has no effect
// TODO(cam): Remove UTC and parameter, they are no longer
// valid for the agent config. Leaving them here for now for backwards-
// compatibility
UTC bool `toml:"utc"` // deprecated in 1.0.0; has no effect
// Debug is the option for running in debug mode
Debug bool `toml:"debug"`
// Quiet is the option for running in quiet mode
Quiet bool `toml:"quiet"`
// Log target controls the destination for logs and can be one of "file",
// "stderr" or, on Windows, "eventlog". When set to "file", the output file
// is determined by the "logfile" setting.
LogTarget string `toml:"logtarget"`
// Name of the file to be logged to when using the "file" logtarget. If set to
// the empty string then logs are written to stderr.
Logfile string `toml:"logfile"`
// The file will be rotated after the time interval specified. When set
// to 0 no time based rotation is performed.
LogfileRotationInterval Duration `toml:"logfile_rotation_interval"`
// The logfile will be rotated when it becomes larger than the specified
// size. When set to 0 no size based rotation is performed.
LogfileRotationMaxSize Size `toml:"logfile_rotation_max_size"`
// Maximum number of rotated archives to keep, any older logs are deleted.
// If set to -1, no archives are removed.
LogfileRotationMaxArchives int `toml:"logfile_rotation_max_archives"`
// Pick a timezone to use when logging or type 'local' for local time.
LogWithTimezone string `toml:"log_with_timezone"`
Hostname string
OmitHostname bool
}
// InputNames returns a list of strings of the configured inputs.
func (c *Config) InputNames() []string {
var name []string
for _, input := range c.Inputs {
name = append(name, input.Config.Name)
}
return PluginNameCounts(name)
}
// AggregatorNames returns a list of strings of the configured aggregators.
func (c *Config) AggregatorNames() []string {
var name []string
for _, aggregator := range c.Aggregators {
name = append(name, aggregator.Config.Name)
}
return PluginNameCounts(name)
}
// ProcessorNames returns a list of strings of the configured processors.
func (c *Config) ProcessorNames() []string {
var name []string
for _, processor := range c.Processors {
name = append(name, processor.Config.Name)
}
return PluginNameCounts(name)
}
// OutputNames returns a list of strings of the configured outputs.
func (c *Config) OutputNames() []string {
var name []string
for _, output := range c.Outputs {
name = append(name, output.Config.Name)
}
return PluginNameCounts(name)
}
// PluginNameCounts returns a list of sorted plugin names and their count
func PluginNameCounts(plugins []string) []string {
names := make(map[string]int)
for _, plugin := range plugins {
names[plugin]++
}
var namecount []string
for name, count := range names {
if count == 1 {
namecount = append(namecount, name)
} else {
namecount = append(namecount, fmt.Sprintf("%s (%dx)", name, count))
}
}
sort.Strings(namecount)
return namecount
}
// ListTags returns a string of tags specified in the config,
// line-protocol style
func (c *Config) ListTags() string {
var tags []string
for k, v := range c.Tags {
tags = append(tags, fmt.Sprintf("%s=%s", k, v))
}
sort.Strings(tags)
return strings.Join(tags, " ")
}
var header = `# Telegraf Configuration
#
# Telegraf is entirely plugin driven. All metrics are gathered from the
# declared inputs, and sent to the declared outputs.
#
# Plugins must be declared in here to be active.
# To deactivate a plugin, comment out the name and any variables.
#
# Use 'telegraf -config telegraf.conf -test' to see what metrics a config
# file would generate.
#
# Environment variables can be used anywhere in this config file, simply surround
# them with ${}. For strings the variable must be within quotes (ie, "${STR_VAR}"),
# for numbers and booleans they should be plain (ie, ${INT_VAR}, ${BOOL_VAR})
`
var globalTagsConfig = `
# Global tags can be specified here in key="value" format.
[global_tags]
# dc = "us-east-1" # will tag all metrics with dc=us-east-1
# rack = "1a"
## Environment variables can be used as tags, and throughout the config file
# user = "$USER"
`
var agentConfig = `
# Configuration for telegraf agent
[agent]
## Default data collection interval for all inputs
interval = "10s"
## Rounds collection interval to 'interval'
## ie, if interval="10s" then always collect on :00, :10, :20, etc.
round_interval = true
## Telegraf will send metrics to outputs in batches of at most
## metric_batch_size metrics.
## This controls the size of writes that Telegraf sends to output plugins.
metric_batch_size = 1000
## Maximum number of unwritten metrics per output. Increasing this value
## allows for longer periods of output downtime without dropping metrics at the
## cost of higher maximum memory usage.
metric_buffer_limit = 10000
## Collection jitter is used to jitter the collection by a random amount.
## Each plugin will sleep for a random time within jitter before collecting.
## This can be used to avoid many plugins querying things like sysfs at the
## same time, which can have a measurable effect on the system.
collection_jitter = "0s"
## Default flushing interval for all outputs. Maximum flush_interval will be
## flush_interval + flush_jitter
flush_interval = "10s"
## Jitter the flush interval by a random amount. This is primarily to avoid
## large write spikes for users running a large number of telegraf instances.
## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
flush_jitter = "0s"
## By default or when set to "0s", precision will be set to the same
## timestamp order as the collection interval, with the maximum being 1s.
## ie, when interval = "10s", precision will be "1s"
## when interval = "250ms", precision will be "1ms"
## Precision will NOT be used for service inputs. It is up to each individual
## service input to set the timestamp at the appropriate precision.
## Valid time units are "ns", "us" (or "µs"), "ms", "s".
precision = ""
## Log at debug level.
# debug = false
## Log only error level messages.
# quiet = false
## Log target controls the destination for logs and can be one of "file",
## "stderr" or, on Windows, "eventlog". When set to "file", the output file
## is determined by the "logfile" setting.
# logtarget = "file"
## Name of the file to be logged to when using the "file" logtarget. If set to
## the empty string then logs are written to stderr.
# logfile = ""
## The logfile will be rotated after the time interval specified. When set
## to 0 no time based rotation is performed. Logs are rotated only when
## written to, if there is no log activity rotation may be delayed.
# logfile_rotation_interval = "0d"
## The logfile will be rotated when it becomes larger than the specified
## size. When set to 0 no size based rotation is performed.
# logfile_rotation_max_size = "0MB"
## Maximum number of rotated archives to keep, any older logs are deleted.
## If set to -1, no archives are removed.
# logfile_rotation_max_archives = 5
## Pick a timezone to use when logging or type 'local' for local time.
## Example: America/Chicago
# log_with_timezone = ""
## Override default hostname, if empty use os.Hostname()
hostname = ""
## If set to true, do no set the "host" tag in the telegraf agent.
omit_hostname = false
`
var outputHeader = `
###############################################################################
# OUTPUT PLUGINS #
###############################################################################
`
var processorHeader = `
###############################################################################
# PROCESSOR PLUGINS #
###############################################################################
`
var aggregatorHeader = `
###############################################################################
# AGGREGATOR PLUGINS #
###############################################################################
`
var inputHeader = `
###############################################################################
# INPUT PLUGINS #
###############################################################################
`
var serviceInputHeader = `
###############################################################################
# SERVICE INPUT PLUGINS #
###############################################################################
`
// PrintSampleConfig prints the sample config
func PrintSampleConfig(
sectionFilters []string,
inputFilters []string,
outputFilters []string,
aggregatorFilters []string,
processorFilters []string,
) {
// print headers
fmt.Printf(header)
if len(sectionFilters) == 0 {
sectionFilters = sectionDefaults
}
printFilteredGlobalSections(sectionFilters)
// print output plugins
if sliceContains("outputs", sectionFilters) {
if len(outputFilters) != 0 {
if len(outputFilters) >= 3 && outputFilters[1] != "none" {
fmt.Printf(outputHeader)
}
printFilteredOutputs(outputFilters, false)
} else {
fmt.Printf(outputHeader)
printFilteredOutputs(outputDefaults, false)
// Print non-default outputs, commented
var pnames []string
for pname := range outputs.Outputs {
if !sliceContains(pname, outputDefaults) {
pnames = append(pnames, pname)
}
}
sort.Strings(pnames)
printFilteredOutputs(pnames, true)
}
}
// print processor plugins
if sliceContains("processors", sectionFilters) {
if len(processorFilters) != 0 {
if len(processorFilters) >= 3 && processorFilters[1] != "none" {
fmt.Printf(processorHeader)
}
printFilteredProcessors(processorFilters, false)
} else {
fmt.Printf(processorHeader)
pnames := []string{}
for pname := range processors.Processors {
pnames = append(pnames, pname)
}
sort.Strings(pnames)
printFilteredProcessors(pnames, true)
}
}
// print aggregator plugins
if sliceContains("aggregators", sectionFilters) {
if len(aggregatorFilters) != 0 {
if len(aggregatorFilters) >= 3 && aggregatorFilters[1] != "none" {
fmt.Printf(aggregatorHeader)
}
printFilteredAggregators(aggregatorFilters, false)
} else {
fmt.Printf(aggregatorHeader)
pnames := []string{}
for pname := range aggregators.Aggregators {
pnames = append(pnames, pname)
}
sort.Strings(pnames)
printFilteredAggregators(pnames, true)
}
}
// print input plugins
if sliceContains("inputs", sectionFilters) {
if len(inputFilters) != 0 {
if len(inputFilters) >= 3 && inputFilters[1] != "none" {
fmt.Printf(inputHeader)
}
printFilteredInputs(inputFilters, false)
} else {
fmt.Printf(inputHeader)
printFilteredInputs(inputDefaults, false)
// Print non-default inputs, commented
var pnames []string
for pname := range inputs.Inputs {
if !sliceContains(pname, inputDefaults) {
pnames = append(pnames, pname)
}
}
sort.Strings(pnames)
printFilteredInputs(pnames, true)
}
}
}
func printFilteredProcessors(processorFilters []string, commented bool) {
// Filter processors
var pnames []string
for pname := range processors.Processors {
if sliceContains(pname, processorFilters) {
pnames = append(pnames, pname)
}
}
sort.Strings(pnames)
// Print Outputs
for _, pname := range pnames {
creator := processors.Processors[pname]
output := creator()
printConfig(pname, output, "processors", commented)
}
}
func printFilteredAggregators(aggregatorFilters []string, commented bool) {
// Filter outputs
var anames []string
for aname := range aggregators.Aggregators {
if sliceContains(aname, aggregatorFilters) {
anames = append(anames, aname)
}
}
sort.Strings(anames)
// Print Outputs
for _, aname := range anames {
creator := aggregators.Aggregators[aname]
output := creator()
printConfig(aname, output, "aggregators", commented)
}
}
func printFilteredInputs(inputFilters []string, commented bool) {
// Filter inputs
var pnames []string
for pname := range inputs.Inputs {
if sliceContains(pname, inputFilters) {
pnames = append(pnames, pname)
}
}
sort.Strings(pnames)
// cache service inputs to print them at the end
servInputs := make(map[string]telegraf.ServiceInput)
// for alphabetical looping:
servInputNames := []string{}
// Print Inputs
for _, pname := range pnames {
if pname == "cisco_telemetry_gnmi" {
continue
}
creator := inputs.Inputs[pname]
input := creator()
switch p := input.(type) {
case telegraf.ServiceInput:
servInputs[pname] = p
servInputNames = append(servInputNames, pname)
continue
}
printConfig(pname, input, "inputs", commented)
}
// Print Service Inputs
if len(servInputs) == 0 {
return
}
sort.Strings(servInputNames)
fmt.Printf(serviceInputHeader)
for _, name := range servInputNames {
printConfig(name, servInputs[name], "inputs", commented)
}
}
func printFilteredOutputs(outputFilters []string, commented bool) {
// Filter outputs
var onames []string
for oname := range outputs.Outputs {
if sliceContains(oname, outputFilters) {
onames = append(onames, oname)
}
}
sort.Strings(onames)
// Print Outputs
for _, oname := range onames {
creator := outputs.Outputs[oname]
output := creator()
printConfig(oname, output, "outputs", commented)
}
}
func printFilteredGlobalSections(sectionFilters []string) {
if sliceContains("global_tags", sectionFilters) {
fmt.Printf(globalTagsConfig)
}
if sliceContains("agent", sectionFilters) {
fmt.Printf(agentConfig)
}
}
func printConfig(name string, p telegraf.PluginDescriber, op string, commented bool) {
comment := ""
if commented {
comment = "# "
}
fmt.Printf("\n%s# %s\n%s[[%s.%s]]", comment, p.Description(), comment,
op, name)
config := p.SampleConfig()
if config == "" {
fmt.Printf("\n%s # no configuration\n\n", comment)
} else {
lines := strings.Split(config, "\n")
for i, line := range lines {
if i == 0 || i == len(lines)-1 {
fmt.Print("\n")
continue
}
fmt.Print(strings.TrimRight(comment+line, " ") + "\n")
}
}
}
func sliceContains(name string, list []string) bool {
for _, b := range list {
if b == name {
return true
}
}
return false
}
// PrintInputConfig prints the config usage of a single input.
func PrintInputConfig(name string) error {
if creator, ok := inputs.Inputs[name]; ok {
printConfig(name, creator(), "inputs", false)
} else {
return fmt.Errorf("Input %s not found", name)
}
return nil
}
// PrintOutputConfig prints the config usage of a single output.
func PrintOutputConfig(name string) error {
if creator, ok := outputs.Outputs[name]; ok {
printConfig(name, creator(), "outputs", false)
} else {
return fmt.Errorf("Output %s not found", name)
}
return nil
}
// LoadDirectory loads all toml config files found in the specified path, recursively.
func (c *Config) LoadDirectory(path string) error {
walkfn := func(thispath string, info os.FileInfo, _ error) error {
if info == nil {
log.Printf("W! Telegraf is not permitted to read %s", thispath)
return nil
}
if info.IsDir() {
if strings.HasPrefix(info.Name(), "..") {
// skip Kubernetes mounts, prevening loading the same config twice
return filepath.SkipDir
}
return nil
}
name := info.Name()
if len(name) < 6 || name[len(name)-5:] != ".conf" {
return nil
}
err := c.LoadConfig(thispath)
if err != nil {
return err
}
return nil
}
return filepath.Walk(path, walkfn)
}
// Try to find a default config file at these locations (in order):
// 1. $TELEGRAF_CONFIG_PATH
// 2. $HOME/.telegraf/telegraf.conf
// 3. /etc/telegraf/telegraf.conf
//
func getDefaultConfigPath() (string, error) {
envfile := os.Getenv("TELEGRAF_CONFIG_PATH")
homefile := os.ExpandEnv("${HOME}/.telegraf/telegraf.conf")
etcfile := "/etc/telegraf/telegraf.conf"
if runtime.GOOS == "windows" {
programFiles := os.Getenv("ProgramFiles")
if programFiles == "" { // Should never happen
programFiles = `C:\Program Files`
}
etcfile = programFiles + `\Telegraf\telegraf.conf`
}
for _, path := range []string{envfile, homefile, etcfile} {
if isURL(path) {
log.Printf("I! Using config url: %s", path)
return path, nil
}
if _, err := os.Stat(path); err == nil {
log.Printf("I! Using config file: %s", path)
return path, nil
}
}
// if we got here, we didn't find a file in a default location
return "", fmt.Errorf("No config file specified, and could not find one"+
" in $TELEGRAF_CONFIG_PATH, %s, or %s", homefile, etcfile)
}
// isURL checks if string is valid url
func isURL(str string) bool {
u, err := url.Parse(str)
return err == nil && u.Scheme != "" && u.Host != ""
}
// LoadConfig loads the given config file and applies it to c
func (c *Config) LoadConfig(path string) error {
var err error
if path == "" {
if path, err = getDefaultConfigPath(); err != nil {
return err
}
}
data, err := loadConfig(path)
if err != nil {
return fmt.Errorf("Error loading config file %s: %w", path, err)
}
if err = c.LoadConfigData(data); err != nil {
return fmt.Errorf("Error loading config file %s: %w", path, err)
}
return nil
}
// LoadConfigData loads TOML-formatted config data
func (c *Config) LoadConfigData(data []byte) error {
tbl, err := parseConfig(data)
if err != nil {
return fmt.Errorf("Error parsing data: %s", err)
}
// Parse tags tables first:
for _, tableName := range []string{"tags", "global_tags"} {
if val, ok := tbl.Fields[tableName]; ok {
subTable, ok := val.(*ast.Table)
if !ok {
return fmt.Errorf("invalid configuration, bad table name %q", tableName)
}
if err = c.toml.UnmarshalTable(subTable, c.Tags); err != nil {
return fmt.Errorf("error parsing table name %q: %s", tableName, err)
}
}
}
// Parse agent table:
if val, ok := tbl.Fields["agent"]; ok {
subTable, ok := val.(*ast.Table)
if !ok {
return fmt.Errorf("invalid configuration, error parsing agent table")
}
if err = c.toml.UnmarshalTable(subTable, c.Agent); err != nil {
return fmt.Errorf("error parsing [agent]: %w", err)
}
}
if !c.Agent.OmitHostname {
if c.Agent.Hostname == "" {
hostname, err := os.Hostname()
if err != nil {
return err
}
c.Agent.Hostname = hostname
}
c.Tags["host"] = c.Agent.Hostname
}
if len(c.UnusedFields) > 0 {
return fmt.Errorf("line %d: configuration specified the fields %q, but they weren't used", tbl.Line, keys(c.UnusedFields))
}
// Parse all the rest of the plugins:
for name, val := range tbl.Fields {
subTable, ok := val.(*ast.Table)
if !ok {
return fmt.Errorf("invalid configuration, error parsing field %q as table", name)
}
switch name {
case "agent", "global_tags", "tags":
case "outputs":
for pluginName, pluginVal := range subTable.Fields {
switch pluginSubTable := pluginVal.(type) {
// legacy [outputs.influxdb] support
case *ast.Table:
if err = c.addOutput(pluginName, pluginSubTable); err != nil {
return fmt.Errorf("error parsing %s, %w", pluginName, err)
}
case []*ast.Table:
for _, t := range pluginSubTable {
if err = c.addOutput(pluginName, t); err != nil {
return fmt.Errorf("error parsing %s array, %w", pluginName, err)
}
}
default:
return fmt.Errorf("unsupported config format: %s",
pluginName)
}
if len(c.UnusedFields) > 0 {
return fmt.Errorf("plugin %s.%s: line %d: configuration specified the fields %q, but they weren't used", name, pluginName, subTable.Line, keys(c.UnusedFields))
}
}
case "inputs", "plugins":
for pluginName, pluginVal := range subTable.Fields {
switch pluginSubTable := pluginVal.(type) {
// legacy [inputs.cpu] support
case *ast.Table:
if err = c.addInput(pluginName, pluginSubTable); err != nil {
return fmt.Errorf("error parsing %s, %w", pluginName, err)
}
case []*ast.Table:
for _, t := range pluginSubTable {
if err = c.addInput(pluginName, t); err != nil {
return fmt.Errorf("error parsing %s, %w", pluginName, err)
}
}
default:
return fmt.Errorf("Unsupported config format: %s",
pluginName)
}
if len(c.UnusedFields) > 0 {
return fmt.Errorf("plugin %s.%s: line %d: configuration specified the fields %q, but they weren't used", name, pluginName, subTable.Line, keys(c.UnusedFields))
}
}
case "processors":
for pluginName, pluginVal := range subTable.Fields {
switch pluginSubTable := pluginVal.(type) {
case []*ast.Table:
for _, t := range pluginSubTable {
if err = c.addProcessor(pluginName, t); err != nil {
return fmt.Errorf("error parsing %s, %w", pluginName, err)
}
}
default:
return fmt.Errorf("Unsupported config format: %s",
pluginName)
}
if len(c.UnusedFields) > 0 {
return fmt.Errorf("plugin %s.%s: line %d: configuration specified the fields %q, but they weren't used", name, pluginName, subTable.Line, keys(c.UnusedFields))
}
}
case "aggregators":
for pluginName, pluginVal := range subTable.Fields {
switch pluginSubTable := pluginVal.(type) {
case []*ast.Table:
for _, t := range pluginSubTable {
if err = c.addAggregator(pluginName, t); err != nil {
return fmt.Errorf("Error parsing %s, %s", pluginName, err)
}
}
default:
return fmt.Errorf("Unsupported config format: %s",
pluginName)
}
if len(c.UnusedFields) > 0 {
return fmt.Errorf("plugin %s.%s: line %d: configuration specified the fields %q, but they weren't used", name, pluginName, subTable.Line, keys(c.UnusedFields))
}
}
// Assume it's an input input for legacy config file support if no other
// identifiers are present
default:
if err = c.addInput(name, subTable); err != nil {
return fmt.Errorf("Error parsing %s, %s", name, err)
}
}
}
if len(c.Processors) > 1 {
sort.Sort(c.Processors)
}
return nil
}
// trimBOM trims the Byte-Order-Marks from the beginning of the file.
// this is for Windows compatibility only.
// see https://github.com/influxdata/telegraf/issues/1378
func trimBOM(f []byte) []byte {
return bytes.TrimPrefix(f, []byte("\xef\xbb\xbf"))
}
// escapeEnv escapes a value for inserting into a TOML string.
func escapeEnv(value string) string {
return envVarEscaper.Replace(value)
}
func loadConfig(config string) ([]byte, error) {
if fetchURLRe.MatchString(config) {
u, err := url.Parse(config)
if err != nil {
return nil, err
}
switch u.Scheme {
case "https", "http":
return fetchConfig(u)
default:
return nil, fmt.Errorf("scheme %q not supported", u.Scheme)
}
}
// If it isn't a https scheme, try it as a file
return os.ReadFile(config)
}
func fetchConfig(u *url.URL) ([]byte, error) {
req, err := http.NewRequest("GET", u.String(), nil)
if err != nil {
return nil, err
}
if v, exists := os.LookupEnv("INFLUX_TOKEN"); exists {
req.Header.Add("Authorization", "Token "+v)
}
req.Header.Add("Accept", "application/toml")
req.Header.Set("User-Agent", internal.ProductToken())
retries := 3
for i := 0; i <= retries; i++ {
resp, err := http.DefaultClient.Do(req)
if err != nil {
return nil, fmt.Errorf("Retry %d of %d failed connecting to HTTP config server %s", i, retries, err)
}
if resp.StatusCode != http.StatusOK {
if i < retries {
log.Printf("Error getting HTTP config. Retry %d of %d in %s. Status=%d", i, retries, httpLoadConfigRetryInterval, resp.StatusCode)
time.Sleep(httpLoadConfigRetryInterval)
continue
}
return nil, fmt.Errorf("Retry %d of %d failed to retrieve remote config: %s", i, retries, resp.Status)
}
defer resp.Body.Close()
return io.ReadAll(resp.Body)
}
return nil, nil
}
// parseConfig loads a TOML configuration from a provided path and
// returns the AST produced from the TOML parser. When loading the file, it
// will find environment variables and replace them.
func parseConfig(contents []byte) (*ast.Table, error) {
contents = trimBOM(contents)
parameters := envVarRe.FindAllSubmatch(contents, -1)
for _, parameter := range parameters {
if len(parameter) != 3 {
continue
}
var envVar []byte
if parameter[1] != nil {
envVar = parameter[1]
} else if parameter[2] != nil {
envVar = parameter[2]
} else {
continue
}
envVal, ok := os.LookupEnv(strings.TrimPrefix(string(envVar), "$"))
if ok {
envVal = escapeEnv(envVal)
contents = bytes.Replace(contents, parameter[0], []byte(envVal), 1)
}
}
return toml.Parse(contents)
}
func (c *Config) addAggregator(name string, table *ast.Table) error {
creator, ok := aggregators.Aggregators[name]
if !ok {
return fmt.Errorf("Undefined but requested aggregator: %s", name)
}
aggregator := creator()
conf, err := c.buildAggregator(name, table)
if err != nil {
return err
}
if err := c.toml.UnmarshalTable(table, aggregator); err != nil {
return err
}
c.Aggregators = append(c.Aggregators, models.NewRunningAggregator(aggregator, conf))
return nil
}
func (c *Config) addProcessor(name string, table *ast.Table) error {
creator, ok := processors.Processors[name]
if !ok {
return fmt.Errorf("Undefined but requested processor: %s", name)
}
processorConfig, err := c.buildProcessor(name, table)
if err != nil {
return err
}
rf, err := c.newRunningProcessor(creator, processorConfig, table)
if err != nil {
return err
}
c.Processors = append(c.Processors, rf)
// save a copy for the aggregator
rf, err = c.newRunningProcessor(creator, processorConfig, table)
if err != nil {
return err
}
c.AggProcessors = append(c.AggProcessors, rf)
return nil
}
func (c *Config) newRunningProcessor(
creator processors.StreamingCreator,
processorConfig *models.ProcessorConfig,
table *ast.Table,
) (*models.RunningProcessor, error) {
processor := creator()
if p, ok := processor.(unwrappable); ok {
if err := c.toml.UnmarshalTable(table, p.Unwrap()); err != nil {
return nil, err
}
} else {
if err := c.toml.UnmarshalTable(table, processor); err != nil {
return nil, err
}
}
rf := models.NewRunningProcessor(processor, processorConfig)
return rf, nil
}
func (c *Config) addOutput(name string, table *ast.Table) error {
if len(c.OutputFilters) > 0 && !sliceContains(name, c.OutputFilters) {
return nil
}
creator, ok := outputs.Outputs[name]
if !ok {
return fmt.Errorf("Undefined but requested output: %s", name)
}
output := creator()
// If the output has a SetSerializer function, then this means it can write
// arbitrary types of output, so build the serializer and set it.
switch t := output.(type) {
case serializers.SerializerOutput:
serializer, err := c.buildSerializer(table)
if err != nil {
return err
}
t.SetSerializer(serializer)
}
outputConfig, err := c.buildOutput(name, table)
if err != nil {
return err
}
if err := c.toml.UnmarshalTable(table, output); err != nil {
return err
}
ro := models.NewRunningOutput(output, outputConfig, c.Agent.MetricBatchSize, c.Agent.MetricBufferLimit)
c.Outputs = append(c.Outputs, ro)
return nil
}
func (c *Config) addInput(name string, table *ast.Table) error {
if len(c.InputFilters) > 0 && !sliceContains(name, c.InputFilters) {
return nil
}
// Legacy support renaming io input to diskio
if name == "io" {
name = "diskio"
}
creator, ok := inputs.Inputs[name]
if !ok {
return fmt.Errorf("Undefined but requested input: %s", name)
}
input := creator()
// If the input has a SetParser function, then this means it can accept
// arbitrary types of input, so build the parser and set it.
if t, ok := input.(parsers.ParserInput); ok {
parser, err := c.buildParser(name, table)
if err != nil {
return err
}
t.SetParser(parser)
}
if t, ok := input.(parsers.ParserFuncInput); ok {
config, err := c.getParserConfig(name, table)
if err != nil {
return err
}
t.SetParserFunc(func() (parsers.Parser, error) {
return parsers.NewParser(config)
})
}
pluginConfig, err := c.buildInput(name, table)
if err != nil {
return err
}
if err := c.toml.UnmarshalTable(table, input); err != nil {
return err
}
rp := models.NewRunningInput(input, pluginConfig)
rp.SetDefaultTags(c.Tags)
c.Inputs = append(c.Inputs, rp)
return nil
}
// buildAggregator parses Aggregator specific items from the ast.Table,
// builds the filter and returns a
// models.AggregatorConfig to be inserted into models.RunningAggregator
func (c *Config) buildAggregator(name string, tbl *ast.Table) (*models.AggregatorConfig, error) {
conf := &models.AggregatorConfig{
Name: name,
Delay: time.Millisecond * 100,
Period: time.Second * 30,
Grace: time.Second * 0,
}
c.getFieldDuration(tbl, "period", &conf.Period)
c.getFieldDuration(tbl, "delay", &conf.Delay)
c.getFieldDuration(tbl, "grace", &conf.Grace)
c.getFieldBool(tbl, "drop_original", &conf.DropOriginal)
c.getFieldString(tbl, "name_prefix", &conf.MeasurementPrefix)
c.getFieldString(tbl, "name_suffix", &conf.MeasurementSuffix)
c.getFieldString(tbl, "name_override", &conf.NameOverride)
c.getFieldString(tbl, "alias", &conf.Alias)
conf.Tags = make(map[string]string)
if node, ok := tbl.Fields["tags"]; ok {
if subtbl, ok := node.(*ast.Table); ok {
if err := c.toml.UnmarshalTable(subtbl, conf.Tags); err != nil {
return nil, fmt.Errorf("could not parse tags for input %s", name)
}
}
}
if c.hasErrs() {
return nil, c.firstErr()
}
var err error
conf.Filter, err = c.buildFilter(tbl)
if err != nil {
return conf, err
}
return conf, nil
}
// buildProcessor parses Processor specific items from the ast.Table,
// builds the filter and returns a
// models.ProcessorConfig to be inserted into models.RunningProcessor
func (c *Config) buildProcessor(name string, tbl *ast.Table) (*models.ProcessorConfig, error) {
conf := &models.ProcessorConfig{Name: name}
c.getFieldInt64(tbl, "order", &conf.Order)
c.getFieldString(tbl, "alias", &conf.Alias)
if c.hasErrs() {
return nil, c.firstErr()
}
var err error
conf.Filter, err = c.buildFilter(tbl)
if err != nil {
return conf, err
}
return conf, nil
}
// buildFilter builds a Filter
// (tagpass/tagdrop/namepass/namedrop/fieldpass/fielddrop) to
// be inserted into the models.OutputConfig/models.InputConfig
// to be used for glob filtering on tags and measurements
func (c *Config) buildFilter(tbl *ast.Table) (models.Filter, error) {
f := models.Filter{}
c.getFieldStringSlice(tbl, "namepass", &f.NamePass)
c.getFieldStringSlice(tbl, "namedrop", &f.NameDrop)
c.getFieldStringSlice(tbl, "pass", &f.FieldPass)
c.getFieldStringSlice(tbl, "fieldpass", &f.FieldPass)
c.getFieldStringSlice(tbl, "drop", &f.FieldDrop)
c.getFieldStringSlice(tbl, "fielddrop", &f.FieldDrop)
c.getFieldTagFilter(tbl, "tagpass", &f.TagPass)
c.getFieldTagFilter(tbl, "tagdrop", &f.TagDrop)
c.getFieldStringSlice(tbl, "tagexclude", &f.TagExclude)
c.getFieldStringSlice(tbl, "taginclude", &f.TagInclude)
if c.hasErrs() {
return f, c.firstErr()
}
if err := f.Compile(); err != nil {
return f, err
}
return f, nil
}
// buildInput parses input specific items from the ast.Table,
// builds the filter and returns a
// models.InputConfig to be inserted into models.RunningInput
func (c *Config) buildInput(name string, tbl *ast.Table) (*models.InputConfig, error) {
cp := &models.InputConfig{Name: name}
c.getFieldDuration(tbl, "interval", &cp.Interval)
c.getFieldDuration(tbl, "precision", &cp.Precision)
c.getFieldDuration(tbl, "collection_jitter", &cp.CollectionJitter)
c.getFieldString(tbl, "name_prefix", &cp.MeasurementPrefix)
c.getFieldString(tbl, "name_suffix", &cp.MeasurementSuffix)
c.getFieldString(tbl, "name_override", &cp.NameOverride)
c.getFieldString(tbl, "alias", &cp.Alias)
cp.Tags = make(map[string]string)
if node, ok := tbl.Fields["tags"]; ok {
if subtbl, ok := node.(*ast.Table); ok {
if err := c.toml.UnmarshalTable(subtbl, cp.Tags); err != nil {
return nil, fmt.Errorf("could not parse tags for input %s", name)
}
}
}
if c.hasErrs() {
return nil, c.firstErr()
}
var err error
cp.Filter, err = c.buildFilter(tbl)
if err != nil {
return cp, err
}
return cp, nil
}
// buildParser grabs the necessary entries from the ast.Table for creating
// a parsers.Parser object, and creates it, which can then be added onto
// an Input object.
func (c *Config) buildParser(name string, tbl *ast.Table) (parsers.Parser, error) {
config, err := c.getParserConfig(name, tbl)
if err != nil {
return nil, err
}
parser, err := parsers.NewParser(config)
if err != nil {
return nil, err
}
logger := models.NewLogger("parsers", config.DataFormat, name)
models.SetLoggerOnPlugin(parser, logger)
if initializer, ok := parser.(telegraf.Initializer); ok {
if err := initializer.Init(); err != nil {
return nil, err
}
}
return parser, nil
}
func (c *Config) getParserConfig(name string, tbl *ast.Table) (*parsers.Config, error) {
pc := &parsers.Config{
JSONStrict: true,
}
c.getFieldString(tbl, "data_format", &pc.DataFormat)
// Legacy support, exec plugin originally parsed JSON by default.
if name == "exec" && pc.DataFormat == "" {
pc.DataFormat = "json"
} else if pc.DataFormat == "" {
pc.DataFormat = "influx"
}
c.getFieldString(tbl, "separator", &pc.Separator)
c.getFieldStringSlice(tbl, "templates", &pc.Templates)
c.getFieldStringSlice(tbl, "tag_keys", &pc.TagKeys)
c.getFieldStringSlice(tbl, "json_string_fields", &pc.JSONStringFields)
c.getFieldString(tbl, "json_name_key", &pc.JSONNameKey)
c.getFieldString(tbl, "json_query", &pc.JSONQuery)
c.getFieldString(tbl, "json_time_key", &pc.JSONTimeKey)
c.getFieldString(tbl, "json_time_format", &pc.JSONTimeFormat)
c.getFieldString(tbl, "json_timezone", &pc.JSONTimezone)
c.getFieldBool(tbl, "json_strict", &pc.JSONStrict)
c.getFieldString(tbl, "data_type", &pc.DataType)
c.getFieldString(tbl, "collectd_auth_file", &pc.CollectdAuthFile)
c.getFieldString(tbl, "collectd_security_level", &pc.CollectdSecurityLevel)
c.getFieldString(tbl, "collectd_parse_multivalue", &pc.CollectdSplit)
c.getFieldStringSlice(tbl, "collectd_typesdb", &pc.CollectdTypesDB)
c.getFieldString(tbl, "dropwizard_metric_registry_path", &pc.DropwizardMetricRegistryPath)
c.getFieldString(tbl, "dropwizard_time_path", &pc.DropwizardTimePath)
c.getFieldString(tbl, "dropwizard_time_format", &pc.DropwizardTimeFormat)
c.getFieldString(tbl, "dropwizard_tags_path", &pc.DropwizardTagsPath)
c.getFieldStringMap(tbl, "dropwizard_tag_paths", &pc.DropwizardTagPathsMap)
//for grok data_format
c.getFieldStringSlice(tbl, "grok_named_patterns", &pc.GrokNamedPatterns)
c.getFieldStringSlice(tbl, "grok_patterns", &pc.GrokPatterns)
c.getFieldString(tbl, "grok_custom_patterns", &pc.GrokCustomPatterns)
c.getFieldStringSlice(tbl, "grok_custom_pattern_files", &pc.GrokCustomPatternFiles)
c.getFieldString(tbl, "grok_timezone", &pc.GrokTimezone)
c.getFieldString(tbl, "grok_unique_timestamp", &pc.GrokUniqueTimestamp)
//for csv parser
c.getFieldStringSlice(tbl, "csv_column_names", &pc.CSVColumnNames)
c.getFieldStringSlice(tbl, "csv_column_types", &pc.CSVColumnTypes)
c.getFieldStringSlice(tbl, "csv_tag_columns", &pc.CSVTagColumns)
c.getFieldString(tbl, "csv_timezone", &pc.CSVTimezone)
c.getFieldString(tbl, "csv_delimiter", &pc.CSVDelimiter)
c.getFieldString(tbl, "csv_comment", &pc.CSVComment)
c.getFieldString(tbl, "csv_measurement_column", &pc.CSVMeasurementColumn)
c.getFieldString(tbl, "csv_timestamp_column", &pc.CSVTimestampColumn)
c.getFieldString(tbl, "csv_timestamp_format", &pc.CSVTimestampFormat)
c.getFieldInt(tbl, "csv_header_row_count", &pc.CSVHeaderRowCount)
c.getFieldInt(tbl, "csv_skip_rows", &pc.CSVSkipRows)
c.getFieldInt(tbl, "csv_skip_columns", &pc.CSVSkipColumns)
c.getFieldBool(tbl, "csv_trim_space", &pc.CSVTrimSpace)
c.getFieldStringSlice(tbl, "csv_skip_values", &pc.CSVSkipValues)
//for hep parser
c.getFieldStringSlice(tbl, "hep_header", &pc.HEPHeader)
c.getFieldString(tbl, "hep_measurement_name", &pc.HepMeasurementName)
c.getFieldStringSlice(tbl, "form_urlencoded_tag_keys", &pc.FormUrlencodedTagKeys)
c.getFieldString(tbl, "value_field_name", &pc.ValueFieldName)
//for XPath parser family
if choice.Contains(pc.DataFormat, []string{"xml", "xpath_json", "xpath_msgpack", "xpath_protobuf"}) {
c.getFieldString(tbl, "xpath_protobuf_file", &pc.XPathProtobufFile)
c.getFieldString(tbl, "xpath_protobuf_type", &pc.XPathProtobufType)
c.getFieldBool(tbl, "xpath_print_document", &pc.XPathPrintDocument)
// Determine the actual xpath configuration tables
node, xpathOK := tbl.Fields["xpath"]
if !xpathOK {
// Add this for backward compatibility
node, xpathOK = tbl.Fields[pc.DataFormat]
}
if xpathOK {
if subtbls, ok := node.([]*ast.Table); ok {
pc.XPathConfig = make([]parsers.XPathConfig, len(subtbls))
for i, subtbl := range subtbls {
subcfg := pc.XPathConfig[i]
c.getFieldString(subtbl, "metric_name", &subcfg.MetricQuery)
c.getFieldString(subtbl, "metric_selection", &subcfg.Selection)
c.getFieldString(subtbl, "timestamp", &subcfg.Timestamp)
c.getFieldString(subtbl, "timestamp_format", &subcfg.TimestampFmt)
c.getFieldStringMap(subtbl, "tags", &subcfg.Tags)
c.getFieldStringMap(subtbl, "fields", &subcfg.Fields)
c.getFieldStringMap(subtbl, "fields_int", &subcfg.FieldsInt)
c.getFieldString(subtbl, "field_selection", &subcfg.FieldSelection)
c.getFieldBool(subtbl, "field_name_expansion", &subcfg.FieldNameExpand)
c.getFieldString(subtbl, "field_name", &subcfg.FieldNameQuery)
c.getFieldString(subtbl, "field_value", &subcfg.FieldValueQuery)
pc.XPathConfig[i] = subcfg
}
}
}
}
//for JSONPath parser
if node, ok := tbl.Fields["json_v2"]; ok {
if metricConfigs, ok := node.([]*ast.Table); ok {
pc.JSONV2Config = make([]parsers.JSONV2Config, len(metricConfigs))
for i, metricConfig := range metricConfigs {
mc := pc.JSONV2Config[i]
c.getFieldString(metricConfig, "measurement_name", &mc.MeasurementName)
if mc.MeasurementName == "" {
mc.MeasurementName = name
}
c.getFieldString(metricConfig, "measurement_name_path", &mc.MeasurementNamePath)
c.getFieldString(metricConfig, "timestamp_path", &mc.TimestampPath)
c.getFieldString(metricConfig, "timestamp_format", &mc.TimestampFormat)
c.getFieldString(metricConfig, "timestamp_timezone", &mc.TimestampTimezone)
mc.Fields = getFieldSubtable(c, metricConfig)
mc.Tags = getTagSubtable(c, metricConfig)
if objectconfigs, ok := metricConfig.Fields["object"]; ok {
if objectconfigs, ok := objectconfigs.([]*ast.Table); ok {
for _, objectConfig := range objectconfigs {
var o json_v2.JSONObject
c.getFieldString(objectConfig, "path", &o.Path)
c.getFieldString(objectConfig, "timestamp_key", &o.TimestampKey)
c.getFieldString(objectConfig, "timestamp_format", &o.TimestampFormat)
c.getFieldString(objectConfig, "timestamp_timezone", &o.TimestampTimezone)
c.getFieldBool(objectConfig, "disable_prepend_keys", &o.DisablePrependKeys)
c.getFieldStringSlice(objectConfig, "included_keys", &o.IncludedKeys)
c.getFieldStringSlice(objectConfig, "excluded_keys", &o.ExcludedKeys)
c.getFieldStringSlice(objectConfig, "tags", &o.Tags)
c.getFieldStringMap(objectConfig, "renames", &o.Renames)
c.getFieldStringMap(objectConfig, "fields", &o.Fields)
o.FieldPaths = getFieldSubtable(c, objectConfig)
o.TagPaths = getTagSubtable(c, objectConfig)
mc.JSONObjects = append(mc.JSONObjects, o)
}
}
}
pc.JSONV2Config[i] = mc
}
}
}
pc.MetricName = name
if c.hasErrs() {
return nil, c.firstErr()
}
return pc, nil
}
func getFieldSubtable(c *Config, metricConfig *ast.Table) []json_v2.DataSet {
var fields []json_v2.DataSet
if fieldConfigs, ok := metricConfig.Fields["field"]; ok {
if fieldConfigs, ok := fieldConfigs.([]*ast.Table); ok {
for _, fieldconfig := range fieldConfigs {
var f json_v2.DataSet
c.getFieldString(fieldconfig, "path", &f.Path)
c.getFieldString(fieldconfig, "rename", &f.Rename)
c.getFieldString(fieldconfig, "type", &f.Type)
fields = append(fields, f)
}
}
}
return fields
}
func getTagSubtable(c *Config, metricConfig *ast.Table) []json_v2.DataSet {
var tags []json_v2.DataSet
if fieldConfigs, ok := metricConfig.Fields["tag"]; ok {
if fieldConfigs, ok := fieldConfigs.([]*ast.Table); ok {
for _, fieldconfig := range fieldConfigs {
var t json_v2.DataSet
c.getFieldString(fieldconfig, "path", &t.Path)
c.getFieldString(fieldconfig, "rename", &t.Rename)
t.Type = "string"
tags = append(tags, t)
}
}
}
return tags
}
// buildSerializer grabs the necessary entries from the ast.Table for creating
// a serializers.Serializer object, and creates it, which can then be added onto
// an Output object.
func (c *Config) buildSerializer(tbl *ast.Table) (serializers.Serializer, error) {
sc := &serializers.Config{TimestampUnits: 1 * time.Second}
c.getFieldString(tbl, "data_format", &sc.DataFormat)
if sc.DataFormat == "" {
sc.DataFormat = "influx"
}
c.getFieldString(tbl, "prefix", &sc.Prefix)
c.getFieldString(tbl, "template", &sc.Template)
c.getFieldStringSlice(tbl, "templates", &sc.Templates)
c.getFieldString(tbl, "carbon2_format", &sc.Carbon2Format)
c.getFieldString(tbl, "carbon2_sanitize_replace_char", &sc.Carbon2SanitizeReplaceChar)
c.getFieldInt(tbl, "influx_max_line_bytes", &sc.InfluxMaxLineBytes)
c.getFieldBool(tbl, "influx_sort_fields", &sc.InfluxSortFields)
c.getFieldBool(tbl, "influx_uint_support", &sc.InfluxUintSupport)
c.getFieldBool(tbl, "graphite_tag_support", &sc.GraphiteTagSupport)
c.getFieldString(tbl, "graphite_tag_sanitize_mode", &sc.GraphiteTagSanitizeMode)
c.getFieldString(tbl, "graphite_separator", &sc.GraphiteSeparator)
c.getFieldDuration(tbl, "json_timestamp_units", &sc.TimestampUnits)
c.getFieldString(tbl, "json_timestamp_format", &sc.TimestampFormat)
c.getFieldBool(tbl, "splunkmetric_hec_routing", &sc.HecRouting)
c.getFieldBool(tbl, "splunkmetric_multimetric", &sc.SplunkmetricMultiMetric)
c.getFieldStringSlice(tbl, "wavefront_source_override", &sc.WavefrontSourceOverride)
c.getFieldBool(tbl, "wavefront_use_strict", &sc.WavefrontUseStrict)
c.getFieldBool(tbl, "prometheus_export_timestamp", &sc.PrometheusExportTimestamp)
c.getFieldBool(tbl, "prometheus_sort_metrics", &sc.PrometheusSortMetrics)
c.getFieldBool(tbl, "prometheus_string_as_label", &sc.PrometheusStringAsLabel)
if c.hasErrs() {
return nil, c.firstErr()
}
return serializers.NewSerializer(sc)
}
// buildOutput parses output specific items from the ast.Table,
// builds the filter and returns an
// models.OutputConfig to be inserted into models.RunningInput
// Note: error exists in the return for future calls that might require error
func (c *Config) buildOutput(name string, tbl *ast.Table) (*models.OutputConfig, error) {
filter, err := c.buildFilter(tbl)
if err != nil {
return nil, err
}
oc := &models.OutputConfig{
Name: name,
Filter: filter,
}
// TODO: support FieldPass/FieldDrop on outputs
c.getFieldDuration(tbl, "flush_interval", &oc.FlushInterval)
c.getFieldDuration(tbl, "flush_jitter", &oc.FlushJitter)
c.getFieldInt(tbl, "metric_buffer_limit", &oc.MetricBufferLimit)
c.getFieldInt(tbl, "metric_batch_size", &oc.MetricBatchSize)
c.getFieldString(tbl, "alias", &oc.Alias)
c.getFieldString(tbl, "name_override", &oc.NameOverride)
c.getFieldString(tbl, "name_suffix", &oc.NameSuffix)
c.getFieldString(tbl, "name_prefix", &oc.NamePrefix)
if c.hasErrs() {
return nil, c.firstErr()
}
return oc, nil
}
func (c *Config) missingTomlField(_ reflect.Type, key string) error {
switch key {
case "alias", "carbon2_format", "carbon2_sanitize_replace_char", "collectd_auth_file",
"collectd_parse_multivalue", "collectd_security_level", "collectd_typesdb", "collection_jitter",
"csv_column_names", "csv_column_types", "csv_comment", "csv_delimiter", "csv_header_row_count",
"csv_measurement_column", "csv_skip_columns", "csv_skip_rows", "csv_tag_columns",
"csv_timestamp_column", "csv_timestamp_format", "csv_timezone", "csv_trim_space", "csv_skip_values",
"data_format", "data_type", "delay", "drop", "drop_original", "dropwizard_metric_registry_path",
"dropwizard_tag_paths", "dropwizard_tags_path", "dropwizard_time_format", "dropwizard_time_path",
"fielddrop", "fieldpass", "flush_interval", "flush_jitter", "form_urlencoded_tag_keys",
"grace", "graphite_separator", "graphite_tag_sanitize_mode", "graphite_tag_support",
"grok_custom_pattern_files", "grok_custom_patterns", "grok_named_patterns", "grok_patterns",
"grok_timezone", "grok_unique_timestamp", "influx_max_line_bytes", "influx_sort_fields",
"influx_uint_support", "interval", "json_name_key", "json_query", "json_strict",
"json_string_fields", "json_time_format", "json_time_key", "json_timestamp_format", "json_timestamp_units", "json_timezone", "json_v2",
"lvm", "metric_batch_size", "metric_buffer_limit", "name_override", "name_prefix",
"name_suffix", "namedrop", "namepass", "order", "pass", "period", "precision",
"prefix", "prometheus_export_timestamp", "prometheus_ignore_timestamp", "prometheus_sort_metrics", "prometheus_string_as_label",
"separator", "splunkmetric_hec_routing", "splunkmetric_multimetric", "tag_keys",
"tagdrop", "tagexclude", "taginclude", "tagpass", "tags", "template", "templates",
"value_field_name", "wavefront_source_override", "wavefront_use_strict",
"xml", "xpath", "xpath_json", "xpath_msgpack", "xpath_protobuf", "xpath_print_document",
"xpath_protobuf_file", "xpath_protobuf_type", "hep_header", "hep_measurement_name":
// ignore fields that are common to all plugins.
default:
c.UnusedFields[key] = true
}
return nil
}
func (c *Config) getFieldString(tbl *ast.Table, fieldName string, target *string) {
if node, ok := tbl.Fields[fieldName]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
*target = str.Value
}
}
}
}
func (c *Config) getFieldDuration(tbl *ast.Table, fieldName string, target interface{}) {
if node, ok := tbl.Fields[fieldName]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
d, err := time.ParseDuration(str.Value)
if err != nil {
c.addError(tbl, fmt.Errorf("error parsing duration: %w", err))
return
}
targetVal := reflect.ValueOf(target).Elem()
targetVal.Set(reflect.ValueOf(d))
}
}
}
}
func (c *Config) getFieldBool(tbl *ast.Table, fieldName string, target *bool) {
var err error
if node, ok := tbl.Fields[fieldName]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
switch t := kv.Value.(type) {
case *ast.Boolean:
*target, err = t.Boolean()
if err != nil {
c.addError(tbl, fmt.Errorf("unknown boolean value type %q, expecting boolean", kv.Value))
return
}
case *ast.String:
*target, err = strconv.ParseBool(t.Value)
if err != nil {
c.addError(tbl, fmt.Errorf("unknown boolean value type %q, expecting boolean", kv.Value))
return
}
default:
c.addError(tbl, fmt.Errorf("unknown boolean value type %q, expecting boolean", kv.Value.Source()))
return
}
}
}
}
func (c *Config) getFieldInt(tbl *ast.Table, fieldName string, target *int) {
if node, ok := tbl.Fields[fieldName]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if iAst, ok := kv.Value.(*ast.Integer); ok {
i, err := iAst.Int()
if err != nil {
c.addError(tbl, fmt.Errorf("unexpected int type %q, expecting int", iAst.Value))
return
}
*target = int(i)
}
}
}
}
func (c *Config) getFieldInt64(tbl *ast.Table, fieldName string, target *int64) {
if node, ok := tbl.Fields[fieldName]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if iAst, ok := kv.Value.(*ast.Integer); ok {
i, err := iAst.Int()
if err != nil {
c.addError(tbl, fmt.Errorf("unexpected int type %q, expecting int", iAst.Value))
return
}
*target = i
}
}
}
}
func (c *Config) getFieldStringSlice(tbl *ast.Table, fieldName string, target *[]string) {
if node, ok := tbl.Fields[fieldName]; ok {
if kv, ok := node.(*ast.KeyValue); ok {
if ary, ok := kv.Value.(*ast.Array); ok {
for _, elem := range ary.Value {
if str, ok := elem.(*ast.String); ok {
*target = append(*target, str.Value)
}
}
} else {
c.addError(tbl, fmt.Errorf("found unexpected format while parsing %q, expecting string array/slice format", fieldName))
return
}
}
}
}
func (c *Config) getFieldTagFilter(tbl *ast.Table, fieldName string, target *[]models.TagFilter) {
if node, ok := tbl.Fields[fieldName]; ok {
if subtbl, ok := node.(*ast.Table); ok {
for name, val := range subtbl.Fields {
if kv, ok := val.(*ast.KeyValue); ok {
tagfilter := models.TagFilter{Name: name}
if ary, ok := kv.Value.(*ast.Array); ok {
for _, elem := range ary.Value {
if str, ok := elem.(*ast.String); ok {
tagfilter.Filter = append(tagfilter.Filter, str.Value)
}
}
} else {
c.addError(tbl, fmt.Errorf("found unexpected format while parsing %q, expecting string array/slice format on each entry", fieldName))
return
}
*target = append(*target, tagfilter)
}
}
}
}
}
func (c *Config) getFieldStringMap(tbl *ast.Table, fieldName string, target *map[string]string) {
*target = map[string]string{}
if node, ok := tbl.Fields[fieldName]; ok {
if subtbl, ok := node.(*ast.Table); ok {
for name, val := range subtbl.Fields {
if kv, ok := val.(*ast.KeyValue); ok {
if str, ok := kv.Value.(*ast.String); ok {
(*target)[name] = str.Value
}
}
}
}
}
}
func keys(m map[string]bool) []string {
result := []string{}
for k := range m {
result = append(result, k)
}
return result
}
func (c *Config) hasErrs() bool {
return len(c.errs) > 0
}
func (c *Config) firstErr() error {
if len(c.errs) == 0 {
return nil
}
return c.errs[0]
}
func (c *Config) addError(tbl *ast.Table, err error) {
c.errs = append(c.errs, fmt.Errorf("line %d:%d: %w", tbl.Line, tbl.Position, err))
}
// unwrappable lets you retrieve the original telegraf.Processor from the
// StreamingProcessor. This is necessary because the toml Unmarshaller won't
// look inside composed types.
type unwrappable interface {
Unwrap() telegraf.Processor
}
|
[
"\"TELEGRAF_CONFIG_PATH\"",
"\"ProgramFiles\""
] |
[] |
[
"TELEGRAF_CONFIG_PATH",
"ProgramFiles"
] |
[]
|
["TELEGRAF_CONFIG_PATH", "ProgramFiles"]
|
go
| 2 | 0 | |
.travis/manage_daily_builds.py
|
#!/usr/bin/env python3
'''
Created on May 16, 2019
'''
import os
import re
from github3 import GitHub
from pprint import pformat
GITHUB_API = 'https://api.github.com/repos'
GITHUB_RELEASES = 'releases'
AUTH_TOKEN = os.environ['GH_TOKEN'] if 'GH_TOKEN' in os.environ.keys() else None
REPOSITORY_OWNER = 'loonwerks'
REPOSITORY_REPO = 'AGREE'
PRODUCT_ASSET_PATTERN = re.compile(r'com.rockwellcollins.atc.agree.repository-\d+\.\d+\.\d+(-(\d{12}))?-.*')
def manage_daily_builds():
# obtain git handle
gh = GitHub(GITHUB_API, token=AUTH_TOKEN)
repository = gh.repository(REPOSITORY_OWNER, REPOSITORY_REPO)
# get list of releases
releases = repository.releases()
# extract keys and sort by build date
release_keys = {x.id : x.created_at for x in releases if "Nightly development build" in x.name}
sorted_keys = sorted(release_keys.items(), reverse=True, key=lambda x: x[1])
print('%s' % (pformat(sorted_keys)))
# filter to obtain the keys to delete
delete_keys = [v[0] for v in sorted_keys[2:]]
print('Deleting releases: %s' % (pformat(delete_keys)))
# iterate, deleting the releases and corresponding tags
for rel in releases:
print('examining rel %d from %s...' % (rel.id, str(rel.created_at)))
if rel.id in delete_keys and rel.tag_name is not None:
print(' deleting release id %d and tag %s.' % (rel.id, rel.tag_name))
rel_tag_ref = repository.ref('tags/%s' % (rel.tag_name))
rel.delete()
if rel_tag_ref is not None:
print(' deleting tag %s' % (rel_tag_ref.ref))
rel_tag_ref.delete()
else:
# Look for stale files in the release
assets = rel.assets()
print('In release %s found assets:' % (rel.name))
for asset in assets:
match = PRODUCT_ASSET_PATTERN.search(asset.name)
print(' asset named %s matches %s' % (asset.name, match.group(1) if match is not None else 'None'))
build_times = sorted([PRODUCT_ASSET_PATTERN.search(x.name).group(1) for x in assets if PRODUCT_ASSET_PATTERN.search(x.name)])
latest_build_time = build_times[-1] if build_times else None
print('Lastest build time is %s' % (latest_build_time))
for asset in assets:
match = PRODUCT_ASSET_PATTERN.search(asset.name)
# print(' asset named %s matches %s' % (asset.name, match.group(1) if match is not None else 'None'))
if match is not None:
asset_build_time = match.group(1)
if asset_build_time != latest_build_time:
print('deleting stale asset %s' % (asset.name))
asset.delete()
if __name__ == '__main__':
manage_daily_builds()
|
[] |
[] |
[
"GH_TOKEN"
] |
[]
|
["GH_TOKEN"]
|
python
| 1 | 0 | |
pkg/testutils/modelharness.go
|
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package testutils
import (
"fmt"
"io/ioutil"
"os"
"path"
"path/filepath"
"sort"
"strings"
"testing"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/pkg/apis/kops/v1alpha2"
"k8s.io/kops/pkg/diff"
"k8s.io/kops/pkg/kopscodecs"
"k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/util/pkg/text"
)
type Model struct {
Cluster *kops.Cluster
InstanceGroups []*kops.InstanceGroup
}
// LoadModel loads a cluster and instancegroups from a cluster.yaml file found in basedir
func LoadModel(basedir string) (*Model, error) {
clusterYamlPath := path.Join(basedir, "cluster.yaml")
clusterYaml, err := ioutil.ReadFile(clusterYamlPath)
if err != nil {
return nil, fmt.Errorf("error reading file %q: %v", clusterYamlPath, err)
}
spec := &Model{}
sections := text.SplitContentToSections(clusterYaml)
for _, section := range sections {
defaults := &schema.GroupVersionKind{
Group: v1alpha2.SchemeGroupVersion.Group,
Version: v1alpha2.SchemeGroupVersion.Version,
}
o, gvk, err := kopscodecs.Decode(section, defaults)
if err != nil {
return nil, fmt.Errorf("error parsing file %v", err)
}
switch v := o.(type) {
case *kops.Cluster:
if spec.Cluster != nil {
return nil, fmt.Errorf("found multiple clusters")
}
spec.Cluster = v
case *kops.InstanceGroup:
spec.InstanceGroups = append(spec.InstanceGroups, v)
default:
return nil, fmt.Errorf("Unhandled kind %q", gvk)
}
}
return spec, nil
}
func ValidateTasks(t *testing.T, basedir string, context *fi.ModelBuilderContext) {
var keys []string
for key := range context.Tasks {
keys = append(keys, key)
}
sort.Strings(keys)
var yamls []string
for _, key := range keys {
task := context.Tasks[key]
yaml, err := kops.ToRawYaml(task)
if err != nil {
t.Fatalf("error serializing task: %v", err)
}
yamls = append(yamls, strings.TrimSpace(string(yaml)))
}
actualTasksYaml := strings.Join(yamls, "\n---\n")
actualTasksYaml = strings.TrimSpace(actualTasksYaml)
tasksYamlPath := path.Join(basedir, "tasks.yaml")
AssertMatchesFile(t, actualTasksYaml, tasksYamlPath)
}
// AssertMatchesFile matches the actual value to a with expected file.
// If HACK_UPDATE_EXPECTED_IN_PLACE is set, it will write the actual value to the expected file,
// which is very handy when updating our tests.
func AssertMatchesFile(t *testing.T, actual string, p string) {
actual = strings.TrimSpace(actual)
expectedBytes, err := ioutil.ReadFile(p)
if err != nil {
t.Fatalf("error reading file %q: %v", p, err)
}
expected := strings.TrimSpace(string(expectedBytes))
//on windows, with git set to autocrlf, the reference files on disk have windows line endings
expected = strings.Replace(expected, "\r\n", "\n", -1)
if actual == expected {
return
}
if os.Getenv("HACK_UPDATE_EXPECTED_IN_PLACE") != "" {
t.Logf("HACK_UPDATE_EXPECTED_IN_PLACE: writing expected output %s", p)
// Keep git happy with a trailing newline
actual += "\n"
if err := ioutil.WriteFile(p, []byte(actual), 0644); err != nil {
t.Errorf("error writing expected output %s: %v", p, err)
}
// Keep going so we write all files in a test
t.Errorf("output did not match expected for %q", p)
return
}
diffString := diff.FormatDiff(expected, actual)
t.Logf("diff:\n%s\n", diffString)
abs, err := filepath.Abs(p)
if err != nil {
t.Errorf("unable to get absolute path for %q: %v", p, err)
} else {
p = abs
}
t.Logf("to update golden output automatically, run hack/update-expected.sh")
t.Fatalf("output did not match expected for %q", p)
}
|
[
"\"HACK_UPDATE_EXPECTED_IN_PLACE\""
] |
[] |
[
"HACK_UPDATE_EXPECTED_IN_PLACE"
] |
[]
|
["HACK_UPDATE_EXPECTED_IN_PLACE"]
|
go
| 1 | 0 | |
pkg/nytimes/nytimes.go
|
package nytimes
import (
"encoding/json"
"github.com/vitsensei/infogrid/pkg/extractor"
"github.com/vitsensei/infogrid/pkg/models"
"golang.org/x/net/html"
"io/ioutil"
"net/http"
"os"
"strings"
"sync"
)
var (
apiKey = os.Getenv("NYTIMES_KEY")
partialTopStoryURL = "https://api.nytimes.com/svc/topstories/v2/home.json?api-key="
wg sync.WaitGroup
)
// The json of the response from NYTimes API
type TopStories struct {
Articles []models.Article `json:"results"`
}
// The API for other package to interact with
type API struct {
url string
allowedSections []string
TopStories TopStories `json:"body"`
}
func NewAPI() *API {
return &API{
allowedSections: []string{"business", "politics", "technology", "us", "world"},
}
}
// Used in controller/article to filter out the "non-news" sections
func (a *API) FilterBySections() {
var filteredArticles []models.Article
for _, article := range a.TopStories.Articles {
for _, allowedSection := range a.allowedSections {
if article.Section == allowedSection {
filteredArticles = append(filteredArticles, article)
break
}
}
}
a.TopStories.Articles = filteredArticles
}
func (a *API) generateURL() {
a.url = partialTopStoryURL + apiKey
}
// Used in ExtractText to detect ArticleBody node
func isArticleBody(n html.Node) bool {
for _, a := range n.Attr {
if a.Key == "name" && a.Val == "articleBody" {
return true
}
}
return false
}
// Given a URL, the text will be extracted (if exist)
func ExtractText(url string) (string, error) {
var paragraph string
bodyString, err := extractor.ExtractTextFromURL(url)
doc, err := html.Parse(strings.NewReader(bodyString))
if err != nil {
return "", err
}
var articleBodyNode *html.Node
// All the actual writing is in Article Body node. Find this node
// and extract text from it to avoid extracting rubbish
var findArticleBodyNode func(*html.Node)
findArticleBodyNode = func(n *html.Node) {
if isArticleBody(*n) {
articleBodyNode = n
return
}
for c := n.FirstChild; c != nil; c = c.NextSibling {
findArticleBodyNode(c)
}
}
findArticleBodyNode(doc)
// Given the article body node, extract the text
var f func(*html.Node)
f = func(n *html.Node) {
if n.Type == html.TextNode && n.Parent.Data == "p" {
paragraph = paragraph + n.Data + "\n"
}
for c := n.FirstChild; c != nil; c = c.NextSibling {
f(c)
}
}
// NYTimes loves interactive articles (and they are amazing!). Unfortunately, it is not
// the usual text format and therefore cannot be extract
// (for example: https://www.nytimes.com/interactive/2020/obituaries/people-died-coronavirus-obituaries.html#lloyd-porter).
// Most likely they don't have an article node, and we will skip those interactive ones.
if articleBodyNode != nil {
f(articleBodyNode)
}
return paragraph, nil
}
func GenerateArticleText(article *models.Article) {
defer wg.Done()
text, _ := ExtractText(article.URL)
if text != "" {
article.Text = text
tags, err := extractor.ExtractTags(text, 3)
if err == nil {
article.Tags = tags
}
}
}
// Construct the Article list (TopStories struct).
// Each Article in the list will only contain the URL, Section, and Title
// after this call. These are the value returned from NYTimes API.
func (a *API) GenerateArticles() error {
if a.url == "" {
a.generateURL()
}
resp, err := http.Get(a.url)
if err != nil {
return err
}
defer func() {
err = resp.Body.Close()
}()
bytes, err := ioutil.ReadAll(resp.Body)
if err != nil {
return err
}
err = json.Unmarshal(bytes, &a.TopStories)
if err != nil {
return err
}
a.FilterBySections()
// Extract text from URL
for i := range a.TopStories.Articles {
wg.Add(1)
go GenerateArticleText(&a.TopStories.Articles[i])
}
wg.Wait()
// Filter out the node that is interactive ~= article.text == ""
var articleWithText []models.Article
for i := range a.TopStories.Articles {
if a.TopStories.Articles[i].Text != "" {
articleWithText = append(articleWithText, a.TopStories.Articles[i])
}
}
a.TopStories.Articles = articleWithText
return err
}
// A Get-Set style function to exposes the the articles array
// through interface
func (a *API) GetArticles() []models.Article {
return a.TopStories.Articles
}
|
[
"\"NYTIMES_KEY\""
] |
[] |
[
"NYTIMES_KEY"
] |
[]
|
["NYTIMES_KEY"]
|
go
| 1 | 0 | |
logger.py
|
import logging
import os
import logstash
LOGLEVEL = os.getenv('LOGLEVEL', 'DEBUG')
LOG_HUMAN_READABLE = bool(os.getenv('LOG_HUMAN_READABLE', False))
LOG_FILE = bool(os.getenv('LOG_FILE', False))
LOG_FILE_NAME = str(os.getenv("LOG_FILE_NAME", "app.log"))
class AppLogger:
def __init__(self, logger_name):
global LOG_FILE_NAME
self.logger = logging.getLogger(logger_name)
self.logger.setLevel(LOGLEVEL)
logger_formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
if not LOG_HUMAN_READABLE:
logger_formatter = logstash.LogstashFormatterVersion1()
LOG_FILE_NAME = "app.json"
console_logger = logging.StreamHandler()
console_logger.setFormatter(logger_formatter)
self.logger.addHandler(console_logger)
if LOG_FILE:
if not os.path.exists("../logs"):
os.mkdir("../logs")
file_logger = logging.FileHandler(f'../logs/{LOG_FILE_NAME}')
file_logger.setFormatter(logger_formatter)
self.logger.addHandler(file_logger)
def info(self, msg):
self.logger.info(msg)
def debug(self, msg):
self.logger.debug(msg)
def error(self, msg, ex=None):
if ex is not None:
self.logger.exception(msg, exc_info=ex)
else:
self.logger.error(msg)
|
[] |
[] |
[
"LOG_FILE",
"LOG_HUMAN_READABLE",
"LOGLEVEL",
"LOG_FILE_NAME"
] |
[]
|
["LOG_FILE", "LOG_HUMAN_READABLE", "LOGLEVEL", "LOG_FILE_NAME"]
|
python
| 4 | 0 | |
base_directory_test.go
|
// (c) 2014 John R. Lenton. See LICENSE.
package xdg
import (
. "launchpad.net/gocheck"
"os"
"path/filepath"
"strings"
"testing"
)
func TestXDGd(t *testing.T) { TestingT(t) }
type xdgdSuite struct {
home string
env1 string
val1 string
env2 string
val2 string
dir *XDGDir
}
var _ = Suite(&xdgdSuite{})
func (s *xdgdSuite) SetUpTest(c *C) {
s.home = os.Getenv("HOME")
s.env1 = "go_xdg_one"
s.env2 = "go_xdg_two"
s.val1 = "something"
s.val2 = "one:two:three"
s.dir = &XDGDir{s.env1, s.val1, s.env2, s.val2}
}
func (s *xdgdSuite) TestHomePrefersEnviron(c *C) {
err := os.Setenv(s.env1, "algo")
c.Assert(err, IsNil)
defer os.Setenv(s.env1, "")
h := s.dir.Home()
c.Check(h, Equals, "algo")
}
func (s *xdgdSuite) TestHomeUsesDefault(c *C) {
h := s.dir.Home()
c.Check(h, Matches, s.home+".*"+s.val1)
}
func (s *xdgdSuite) TestDirsPrefersEnviron(c *C) {
err := os.Setenv(s.env1, "cero")
c.Assert(err, IsNil)
defer os.Setenv(s.env1, "")
err = os.Setenv(s.env2, "uno:dos")
c.Assert(err, IsNil)
defer os.Setenv(s.env2, "")
hs := s.dir.Dirs()
c.Check(hs, DeepEquals, []string{"cero", "uno", "dos"})
}
func (s *xdgdSuite) TestDirsSkipsEmpty(c *C) {
err := os.Setenv(s.env2, "::")
c.Assert(err, IsNil)
defer os.Setenv(s.env2, "")
hs := s.dir.Dirs()
c.Check(hs, HasLen, 1)
}
func (s *xdgdSuite) TestDirsUsesDefault(c *C) {
hs := s.dir.Dirs()
c.Assert(hs, HasLen, 4)
c.Check(hs[1:], DeepEquals, strings.Split(s.val2, ":"))
c.Check(hs[0], Matches, s.home+".*"+s.val1)
}
// now repeat all the tests, but without the HOME environ.
type xdgdNoHomeSuite struct {
xdgdSuite
}
var _ = Suite(&xdgdNoHomeSuite{})
func (s *xdgdNoHomeSuite) SetUpTest(c *C) {
s.xdgdSuite.SetUpTest(c)
os.Setenv("HOME", "")
}
func (s *xdgdNoHomeSuite) TearDownTest(c *C) {
os.Setenv("HOME", s.home)
}
// and for these tests, an entirely fake HOME
type xdgdFHSuite struct {
xdgdSuite
real_home string
}
var _ = Suite(&xdgdFHSuite{})
func (s *xdgdFHSuite) SetUpTest(c *C) {
s.real_home = os.Getenv("HOME")
home := c.MkDir()
os.Setenv("HOME", home)
s.xdgdSuite.SetUpTest(c)
s.val2 = c.MkDir() + ":" + c.MkDir() + ":" + c.MkDir()
s.dir = &XDGDir{s.env1, s.val1, s.env2, s.val2}
}
func (s *xdgdFHSuite) TearDownTest(c *C) {
os.Setenv("HOME", s.real_home)
}
func (s *xdgdFHSuite) TestFind(c *C) {
vs := strings.Split(s.val2, ":")
res1 := "stuff"
exp1 := filepath.Join(s.home, s.val1, res1)
res2 := "things/that"
exp2 := filepath.Join(vs[1], res2)
res3 := "more"
exp3 := filepath.Join(vs[2], res3)
for _, d := range []string{exp1, exp2, exp3} {
err := os.MkdirAll(d, 0700)
c.Assert(err, IsNil, Commentf(d))
}
for _, it := range []struct {
res string
exp string
}{{res1, exp1}, {res2, exp2}, {res3, exp3}} {
rv, err := s.dir.Find(it.res)
c.Assert(err, IsNil)
c.Check(rv, Equals, it.exp)
}
_, err := s.dir.Find("missing")
c.Check(err, NotNil)
}
func (s *xdgdFHSuite) TestEnsureFirst(c *C) {
// creates it if missing
rv1, err := s.dir.Ensure("missing/file")
c.Assert(err, IsNil)
_, err = os.Stat(rv1)
c.Check(err, IsNil)
c.Check(rv1, Matches, s.home+".*"+"missing/file")
// just gets it if existing
rv2, err := s.dir.Ensure("missing/file")
c.Assert(err, IsNil)
c.Check(rv2, Equals, rv1)
}
func (s *xdgdFHSuite) TestEnsureFirstFailures(c *C) {
_, err := s.dir.Ensure(strings.Repeat("*", 1<<9) + "/" + strings.Repeat("*", 1<<9))
c.Assert(err, NotNil)
}
|
[
"\"HOME\"",
"\"HOME\""
] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
go
| 1 | 0 | |
luaj-core/src/main/java/org/luaj/vm2/lib/OsLib.java
|
/*******************************************************************************
* Copyright (c) 2009 Luaj.org. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
******************************************************************************/
package org.luaj.vm2.lib;
import java.io.IOException;
import java.time.format.TextStyle;
import java.util.Calendar;
import java.util.Date;
import java.util.HashMap;
import java.util.Locale;
import java.util.Map;
import org.luaj.vm2.Buffer;
import org.luaj.vm2.Globals;
import org.luaj.vm2.LuaTable;
import org.luaj.vm2.LuaValue;
import org.luaj.vm2.Varargs;
/**
* Subclass of {@link LibFunction} which implements the standard lua {@code os}
* library.
* <p>
* It is a usable base with simplified stub functions for library functions that
* cannot be implemented uniformly on Jse and Jme.
* <p>
* This can be installed as-is on either platform, or extended and refined to be
* used in a complete Jse implementation.
* <p>
* Because the nature of the {@code os} library is to encapsulate os-specific
* features, the behavior of these functions varies considerably from their
* counterparts in the C platform.
* <p>
* The following functions have limited implementations of features that are not
* supported well on Jme:
* <ul>
* <li>{@code execute()}</li>
* <li>{@code remove()}</li>
* <li>{@code rename()}</li>
* <li>{@code tmpname()}</li>
* </ul>
* <p>
* Typically, this library is included as part of a call to either
* {@link org.luaj.vm2.lib.jse.JsePlatform#standardGlobals()} or
* {@link org.luaj.vm2.lib.jme.JmePlatform#standardGlobals()}
*
* <pre>
* {
* @code
* Globals globals = JsePlatform.standardGlobals();
* System.out.println(globals.get("os").get("time").call());
* }
* </pre>
*
* In this example the platform-specific {@link org.luaj.vm2.lib.jse.JseOsLib}
* library will be loaded, which will include the base functionality provided by
* this class.
* <p>
* To instantiate and use it directly, link it into your globals table via
* {@link LuaValue#load(LuaValue)} using code such as:
*
* <pre>
* {
* @code
* Globals globals = new Globals();
* globals.load(new JseBaseLib());
* globals.load(new PackageLib());
* globals.load(new OsLib());
* System.out.println(globals.get("os").get("time").call());
* }
* </pre>
* <p>
*
* @see LibFunction
* @see org.luaj.vm2.lib.jse.JseOsLib
* @see org.luaj.vm2.lib.jse.JsePlatform
* @see org.luaj.vm2.lib.jme.JmePlatform
* @see <a href=
* "http://www.lua.org/manual/5.1/manual.html#5.8">http://www.lua.org/manual/5.1/manual.html#5.8</a>
*/
public class OsLib extends TwoArgFunction {
public static final String TMP_PREFIX = ".luaj";
public static final String TMP_SUFFIX = "tmp";
private static final int CLOCK = 0;
private static final int DATE = 1;
private static final int DIFFTIME = 2;
private static final int EXECUTE = 3;
private static final int EXIT = 4;
private static final int GETENV = 5;
private static final int REMOVE = 6;
private static final int RENAME = 7;
private static final int SETLOCALE = 8;
private static final int TIME = 9;
private static final int TMPNAME = 10;
private static final String[] NAMES = { "clock", "date", "difftime", "execute", "exit", "getenv", "remove",
"rename", "setlocale", "time", "tmpname", };
private static final long t0 = System.currentTimeMillis();
private static long tmpnames = t0;
protected Globals globals;
/**
* Create and OsLib instance.
*/
public OsLib() {
}
/**
* Perform one-time initialization on the library by creating a table
* containing the library functions, adding that table to the supplied
* environment, adding the table to package.loaded, and returning table as
* the return value.
*
* @param modname the module name supplied if this is loaded via 'require'.
* @param env the environment to load into, typically a Globals
* instance.
*/
@Override
public LuaValue call(LuaValue modname, LuaValue env) {
globals = env.checkglobals();
LuaTable os = new LuaTable();
for (int i = 0; i < NAMES.length; ++i)
os.set(NAMES[i], new OsLibFunc(i, NAMES[i]));
env.set("os", os);
if (!env.get("package").isnil())
env.get("package").get("loaded").set("os", os);
return os;
}
class OsLibFunc extends VarArgFunction {
public OsLibFunc(int opcode, String name) {
this.opcode = opcode;
this.name = name;
}
@Override
public Varargs invoke(Varargs args) {
try {
switch (opcode) {
case CLOCK:
return valueOf(clock());
case DATE: {
String s = args.optjstring(1, "%c");
long t = args.isnumber(2)? args.tolong(2): time(null);
if (s.equals("*t")) {
Calendar d = Calendar.getInstance();
d.setTime(new Date(t*1000));
LuaTable tbl = LuaValue.tableOf();
tbl.set("year", LuaValue.valueOf(d.get(Calendar.YEAR)));
tbl.set("month", LuaValue.valueOf(d.get(Calendar.MONTH)+1));
tbl.set("day", LuaValue.valueOf(d.get(Calendar.DAY_OF_MONTH)));
tbl.set("hour", LuaValue.valueOf(d.get(Calendar.HOUR_OF_DAY)));
tbl.set("min", LuaValue.valueOf(d.get(Calendar.MINUTE)));
tbl.set("sec", LuaValue.valueOf(d.get(Calendar.SECOND)));
tbl.set("wday", LuaValue.valueOf(d.get(Calendar.DAY_OF_WEEK)));
tbl.set("yday", LuaValue.valueOf(d.get(0x6))); // Day of year
tbl.set("isdst", LuaValue.valueOf(isDaylightSavingsTime(d)));
return tbl;
}
return valueOf(date(s, t == -1? time(null): t));
}
case DIFFTIME:
return valueOf(difftime(args.checkdouble(1), args.checkdouble(2)));
case EXECUTE:
return execute(args.optjstring(1, null));
case EXIT:
exit(args.optint(1, 0));
return NONE;
case GETENV: {
final String val = getenv(args.checkjstring(1));
return val != null? valueOf(val): NIL;
}
case REMOVE:
remove(args.checkjstring(1));
return LuaValue.TRUE;
case RENAME:
rename(args.checkjstring(1), args.checkjstring(2));
return LuaValue.TRUE;
case SETLOCALE: {
String s = setlocale(args.optjstring(1, null), args.optjstring(2, "all"));
return s != null? valueOf(s): NIL;
}
case TIME:
return valueOf(time(args.opttable(1, null)));
case TMPNAME:
return valueOf(tmpname());
}
return NONE;
} catch (IOException e) {
return varargsOf(NIL, valueOf(e.getMessage()));
}
}
}
/**
* @return an approximation of the amount in seconds of CPU time used by the
* program. For luaj this simple returns the elapsed time since the
* OsLib class was loaded.
*/
protected double clock() {
return (System.currentTimeMillis()-t0)/1000.;
}
/**
* Returns the number of seconds from time t1 to time t2. In POSIX, Windows,
* and some other systems, this value is exactly t2-t1.
*
* @param t2
* @param t1
* @return diffeence in time values, in seconds
*/
protected double difftime(double t2, double t1) {
return t2-t1;
}
/**
* If the time argument is present, this is the time to be formatted (see
* the os.time function for a description of this value). Otherwise, date
* formats the current time.
*
* Date returns the date as a string, formatted according to the same rules
* as ANSII strftime, but without support for %g, %G, or %V.
*
* When called without arguments, date returns a reasonable date and time
* representation that depends on the host system and on the current locale
* (that is, os.date() is equivalent to os.date("%c")).
*
* @param format
* @param timeInSec time since epoch, or -1 if not supplied
* @return a LString or a LTable containing date and time, formatted
* according to the given string format.
*/
private static String date(String format, long timeInSec) {
Calendar d = Calendar.getInstance();
d.setTime(new Date(timeInSec*1000));
if (format.startsWith("!")) {
timeInSec -= timeZoneOffset(d);
d.setTime(new Date(timeInSec*1000));
format = format.substring(1);
}
byte[] fmt = format.getBytes();
final int n = fmt.length;
Buffer result = new Buffer(n);
for (int i = 0; i < n; i++) {
byte c = fmt[i];
switch (c) {
case '\n':
result.append("\n");
break;
case '%':
if (++i >= n)
break;
String conv = Character.toString((char) fmt[i]);
if (CONVERTERS.containsKey(conv)) {
result.append(CONVERTERS.get(conv).convert(d));
} else {
LuaValue.argerror(1, "invalid conversion specifier '%" + conv + "'");
}
break;
default:
result.append(c);
break;
}
}
return result.tojstring();
}
private static final String[] WeekdayNameAbbrev = { "Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat" };
private static final String[] WeekdayName = { "Sunday", "Monday", "Tuesday", "Wednesday", "Thursday",
"Friday", "Saturday" };
private static final String[] MonthNameAbbrev = { "Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep",
"Oct", "Nov", "Dec" };
private static final String[] MonthName = { "January", "February", "March", "April", "May", "June", "July",
"August", "September", "October", "November", "December" };
private static interface DateConversion {
public String convert(Calendar d);
}
private static final Map<String, DateConversion> CONVERTERS = new HashMap<>();
static {
CONVERTERS.put("%", d -> "%");
CONVERTERS.put("a", d -> WeekdayNameAbbrev[d.get(Calendar.DAY_OF_WEEK)-1]);
CONVERTERS.put("A", d -> WeekdayName[d.get(Calendar.DAY_OF_WEEK)-1]);
CONVERTERS.put("b", d -> MonthNameAbbrev[d.get(Calendar.MONTH)]);
CONVERTERS.put("B", d -> MonthName[d.get(Calendar.MONTH)]);
CONVERTERS.put("c", d -> date("%a %b %e %H:%M:%S %Y", d.getTimeInMillis()/1000L));
CONVERTERS.put("C", d -> String.valueOf(d.get(Calendar.YEAR)).substring(0, 2));
CONVERTERS.put("d", d -> String.valueOf(100+d.get(Calendar.DAY_OF_MONTH)).substring(1));
CONVERTERS.put("D", d -> date("%m/%d/%y", d.getTimeInMillis()/1000L));
CONVERTERS.put("e", d -> String.format("%2d", d.get(Calendar.DAY_OF_MONTH)));
CONVERTERS.put("F", d -> date("%Y-%m-%d", d.getTimeInMillis()/1000L));
CONVERTERS.put("g", d -> String.valueOf(d.get(Calendar.YEAR)).substring(2));
CONVERTERS.put("G", d -> String.valueOf(d.get(Calendar.YEAR)));
CONVERTERS.put("h", d -> MonthNameAbbrev[d.get(Calendar.MONTH)]);
CONVERTERS.put("H", d -> String.valueOf(100+d.get(Calendar.HOUR_OF_DAY)).substring(1));
CONVERTERS.put("I", d -> String.valueOf(100+d.get(Calendar.HOUR_OF_DAY)%12).substring(1));
// day of year
CONVERTERS.put("j", d -> {
Calendar y0 = beginningOfYear(d);
int dayOfYear = (int) ((d.getTimeInMillis()-y0.getTimeInMillis())/(24*3600L*1000L));
return String.valueOf(1001+dayOfYear).substring(1);
});
CONVERTERS.put("m", d -> String.valueOf(101+d.get(Calendar.MONTH)).substring(1));
CONVERTERS.put("M", d -> String.valueOf(100+d.get(Calendar.MINUTE)).substring(1));
CONVERTERS.put("n", d -> "\n");
CONVERTERS.put("p", d -> d.get(Calendar.HOUR_OF_DAY) < 12? "AM": "PM");
CONVERTERS.put("r", d -> date("%I:%M:%S %p", d.getTimeInMillis()/1000L));
CONVERTERS.put("R", d -> date("%H:%M", d.getTimeInMillis()/1000L));
CONVERTERS.put("S", d -> String.valueOf(100+d.get(Calendar.SECOND)).substring(1));
CONVERTERS.put("t", d -> "\t");
CONVERTERS.put("T", d -> date("%H:%M:%S", d.getTimeInMillis()/1000L));
CONVERTERS.put("u", d -> String.valueOf((d.get(Calendar.DAY_OF_WEEK)+6)%7));
CONVERTERS.put("U", d -> String.valueOf(weekNumber(d, 0)));
CONVERTERS.put("V", d -> String.valueOf(weekNumber(d, 0)));
CONVERTERS.put("w", d -> String.valueOf((d.get(Calendar.DAY_OF_WEEK)+6)%7));
CONVERTERS.put("W", d -> String.valueOf(weekNumber(d, 1)));
CONVERTERS.put("x", d -> date("%m/%d/%y", d.getTimeInMillis()/1000L));
CONVERTERS.put("X", d -> date("%H:%M:%S", d.getTimeInMillis()/1000L));
CONVERTERS.put("y", d -> String.valueOf(d.get(Calendar.YEAR)).substring(2));
CONVERTERS.put("Y", d -> String.valueOf(d.get(Calendar.YEAR)));
CONVERTERS.put("z", d -> {
final int tzo = timeZoneOffset(d)/60;
final int a = Math.abs(tzo);
final String h = String.valueOf(100+a/60).substring(1);
final String m = String.valueOf(100+a%60).substring(1);
return (tzo >= 0? "+": "-")+h+m;
});
CONVERTERS.put("Z", d -> d.getTimeZone().toZoneId().getDisplayName(TextStyle.SHORT, Locale.getDefault()));
}
private static Calendar beginningOfYear(Calendar d) {
Calendar y0 = Calendar.getInstance();
y0.setTime(d.getTime());
y0.set(Calendar.MONTH, 0);
y0.set(Calendar.DAY_OF_MONTH, 1);
y0.set(Calendar.HOUR_OF_DAY, 0);
y0.set(Calendar.MINUTE, 0);
y0.set(Calendar.SECOND, 0);
y0.set(Calendar.MILLISECOND, 0);
return y0;
}
private static int weekNumber(Calendar d, int startDay) {
Calendar y0 = beginningOfYear(d);
y0.set(Calendar.DAY_OF_MONTH, 1+(startDay+8-y0.get(Calendar.DAY_OF_WEEK))%7);
if (y0.after(d)) {
y0.set(Calendar.YEAR, y0.get(Calendar.YEAR)-1);
y0.set(Calendar.DAY_OF_MONTH, 1+(startDay+8-y0.get(Calendar.DAY_OF_WEEK))%7);
}
long dt = d.getTime().getTime()-y0.getTime().getTime();
return 1+(int) (dt/(7L*24L*3600L*1000L));
}
private static int timeZoneOffset(Calendar d) {
int localStandarTimeMillis = (d.get(Calendar.HOUR_OF_DAY)*3600+d.get(Calendar.MINUTE)*60+d.get(Calendar.SECOND))
*1000;
return d.getTimeZone().getOffset(1, d.get(Calendar.YEAR), d.get(Calendar.MONTH), d.get(Calendar.DAY_OF_MONTH),
d.get(Calendar.DAY_OF_WEEK), localStandarTimeMillis)/1000;
}
private boolean isDaylightSavingsTime(Calendar d) {
return timeZoneOffset(d) != d.getTimeZone().getRawOffset()/1000;
}
/**
* This function is equivalent to the C function system. It passes command
* to be executed by an operating system shell. It returns a status code,
* which is system-dependent. If command is absent, then it returns nonzero
* if a shell is available and zero otherwise.
*
* @param command command to pass to the system
*/
protected Varargs execute(String command) {
return varargsOf(NIL, valueOf("exit"), ONE);
}
/**
* Calls the C function exit, with an optional code, to terminate the host
* program.
*
* @param code
*/
protected void exit(int code) {
System.exit(code);
}
/**
* Returns the value of the process environment variable varname, or the
* System property value for varname, or null if the variable is not defined
* in either environment.
*
* The default implementation, which is used by the JmePlatform, only
* queryies System.getProperty().
*
* The JsePlatform overrides this behavior and returns the environment
* variable value using System.getenv() if it exists, or the System property
* value if it does not.
*
* A SecurityException may be thrown if access is not allowed for 'varname'.
*
* @param varname
* @return String value, or null if not defined
*/
protected String getenv(String varname) {
return System.getProperty(varname);
}
/**
* Deletes the file or directory with the given name. Directories must be
* empty to be removed. If this function fails, it throws and IOException
*
* @param filename
* @throws IOException if it fails
*/
protected void remove(String filename) throws IOException {
throw new IOException("not implemented");
}
/**
* Renames file or directory named oldname to newname. If this function
* fails,it throws and IOException
*
* @param oldname old file name
* @param newname new file name
* @throws IOException if it fails
*/
protected void rename(String oldname, String newname) throws IOException {
throw new IOException("not implemented");
}
/**
* Sets the current locale of the program. locale is a string specifying a
* locale; category is an optional string describing which category to
* change: "all", "collate", "ctype", "monetary", "numeric", or "time"; the
* default category is "all".
*
* If locale is the empty string, the current locale is set to an
* implementation- defined native locale. If locale is the string "C", the
* current locale is set to the standard C locale.
*
* When called with null as the first argument, this function only returns
* the name of the current locale for the given category.
*
* @param locale
* @param category
* @return the name of the new locale, or null if the request cannot be
* honored.
*/
protected String setlocale(String locale, String category) {
return "C";
}
/**
* Returns the current time when called without arguments, or a time
* representing the date and time specified by the given table. This table
* must have fields year, month, and day, and may have fields hour, min,
* sec, and isdst (for a description of these fields, see the os.date
* function).
*
* @param table
* @return long value for the time
*/
protected long time(LuaTable table) {
java.util.Date d;
if (table == null) {
d = new java.util.Date();
} else {
Calendar c = Calendar.getInstance();
c.set(Calendar.YEAR, table.get("year").checkint());
c.set(Calendar.MONTH, table.get("month").checkint()-1);
c.set(Calendar.DAY_OF_MONTH, table.get("day").checkint());
c.set(Calendar.HOUR_OF_DAY, table.get("hour").optint(12));
c.set(Calendar.MINUTE, table.get("min").optint(0));
c.set(Calendar.SECOND, table.get("sec").optint(0));
c.set(Calendar.MILLISECOND, 0);
d = c.getTime();
}
return d.getTime()/1000L;
}
/**
* Returns a string with a file name that can be used for a temporary file.
* The file must be explicitly opened before its use and explicitly removed
* when no longer needed.
*
* On some systems (POSIX), this function also creates a file with that
* name, to avoid security risks. (Someone else might create the file with
* wrong permissions in the time between getting the name and creating the
* file.) You still have to open the file to use it and to remove it (even
* if you do not use it).
*
* @return String filename to use
*/
protected String tmpname() {
synchronized (OsLib.class) {
return TMP_PREFIX+tmpnames+++TMP_SUFFIX;
}
}
}
|
[] |
[] |
[] |
[]
|
[]
|
java
| 0 | 0 | |
figure/s2/abcd.py
|
import os
import glob
import numpy as np
import nibabel as nb
import os
import scipy.io as sio
from scipy.stats import pearsonr
PH_SERVER_ROOT = os.environ.get('PH_SERVER_ROOT')
def zscore(data, axis):
data -= data.mean(axis=axis, keepdims=True)
data /= data.std(axis=axis, keepdims=True)
return np.nan_to_num(data, copy=False)
def correlation(matrix1, matrix2):
d1 = matrix1.shape[-1]
d2 = matrix2.shape[-1]
assert d1 == d2
assert matrix1.ndim <= 2
assert matrix2.ndim <= 2
matrix1 = zscore(matrix1.astype(float), matrix1.ndim - 1) / np.sqrt(d1)
matrix2 = zscore(matrix2.astype(float), matrix2.ndim - 1) / np.sqrt(d2)
if matrix1.ndim >= matrix2.ndim:
return np.dot(matrix1, matrix2.T)
else:
return np.dot(matrix2, matrix1.T)
def get_motion_params(file, pipeline = 'cpac'):
data = np.genfromtxt(file).T
if pipeline == 'abcd':
data = np.vstack((data[3:,:],data[:3,:]))
data = np.vstack((data[2,:]*180/np.pi,
data[0,:]*180/np.pi,
-data[1,:]*180/np.pi,
data[5,:],
data[3,:],
-data[4,:]))
else:
data = np.vstack((data[2,:]*180/np.pi,
data[0,:]*180/np.pi,
-data[1,:]*180/np.pi,
data[5,:],
data[3,:],
-data[4,:]))
return data
path1 = f'{os.environ.get("PH_SERVER_WORKING_ROOT")}/CPAC_XCP/ABCD/preprocessed/data'
path2 = f'{os.environ.get("DATA_INPUT_DIR")}/cpac_abcd'
sub_list = list(range(25427,25457))
sub_list.remove(25430)
sub_list.remove(25448)
var_list = ['anat mask', 'CSF', 'GM', 'WM', 'func mask', 'motion',
'anat-mni abcd', 'anat-mni cpac', 'func-mni abcd', 'func-mni cpac',
'func-t1 abcd', 'func-t1 cpac', 'anat-mni', 'func-mni', 'func-t1']
if 'motion' in var_list:
motion_index = var_list.index('motion')
corrs = np.zeros((len(sub_list), len(var_list)+5))
for num_sub, sub in enumerate(sub_list):
sub = '00'+str(sub)
path_list1 = [path1+'/sub-'+sub+'/ses-1/files/T1w/brainmask_fs.nii.gz',
path1+'/sub-'+sub+'/ses-1/files/T1w/T1w_fast_pve_0.nii.gz',
path1+'/sub-'+sub+'/ses-1/files/T1w/T1w_fast_pve_1.nii.gz',
path1+'/sub-'+sub+'/ses-1/files/T1w/T1w_fast_pve_2.nii.gz',
path1+'/sub-'+sub+'/ses-1/files/task-rest01/brainmask_fs.2.0.nii.gz',
path1+'/sub-'+sub+'/ses-1/files/task-rest01/MotionCorrection/task-rest01_mc.par',
# path1+'/sub-'+sub+'/ses-1/files/MNINonLinear/Results/task-rest01/DCANBOLDProc_v4.0.0/FD.mat',
path1+'/sub-'+sub+'/ses-1/files/MNINonLinear/T1w_restore_brain.nii.gz', # ABCD anat to standard
path2+'/output/cpac_cpac_abcd-options/sub-'+sub+'a_ses-1/anat/sub-'+sub+'a_ses-1_space-template_desc-brain_T1w.nii.gz', # C-PAC anat to standard
path1+'/sub-'+sub+'/ses-1/files/MNINonLinear/Results/task-rest01/task-rest01_mean.nii.gz', # ABCD func to standard
path2+'/output/cpac_cpac_abcd-options/sub-'+sub+'a_ses-1/func/sub-'+sub+'a_ses-1_task-rest_run-1_space-template_desc-mean_bold.nii.gz', # C-PAC func to standard
path1+'/sub-'+sub+'/ses-1/files/task-rest01/Scout2T1w_masked.nii.gz', # ABCD func in T1 space
glob.glob(path2+'/working/cpac_sub-'+sub+'a_ses-1/func_to_anat_FLIRT_*/_*/linear_func_to_anat/*flirt.nii.gz')[0], # C-PAC func in T1 space
path1+'/sub-'+sub+'/ses-1/files/MNINonLinear/T1w_restore_brain.nii.gz', # ABCD anat to standard
path1+'/sub-'+sub+'/ses-1/files/MNINonLinear/Results/task-rest01/task-rest01_mean.nii.gz', # ABCD func to standard
path1+'/sub-'+sub+'/ses-1/files/task-rest01/Scout2T1w_masked.nii.gz'] # ABCD func in T1 space
path_list2 = [path2+'/output/cpac_cpac_abcd-options/sub-'+sub+'a_ses-1/anat/sub-'+sub+'a_ses-1_space-T1w_desc-brain_mask.nii.gz',
path2+'/output/cpac_cpac_abcd-options/sub-'+sub+'a_ses-1/anat/sub-'+sub+'a_ses-1_label-CSF_mask.nii.gz',
path2+'/output/cpac_cpac_abcd-options/sub-'+sub+'a_ses-1/anat/sub-'+sub+'a_ses-1_label-GM_mask.nii.gz',
path2+'/output/cpac_cpac_abcd-options/sub-'+sub+'a_ses-1/anat/sub-'+sub+'a_ses-1_label-WM_mask.nii.gz',
path2+'/working/cpac_sub-'+sub+'a_ses-1/resample_anat_brain_mask_in_standard_125/wmparc_maths_fill_holes_maths_warp_warp_warp.nii.gz',
glob.glob(path2+'/working/cpac_sub-'+sub+'a_ses-1/_*/*mcflirt_122/*par')[0],
# glob.glob(path2+'/sub-'+sub+'/output/*/sub-'+sub+ses+'_ses-1/frame_wise_displacement_power/*/FD.1D')[0], # TODO find FD, only max/rel disp
# Note: this template is from DCAN-HCP GitHub: https://github.com/DCAN-Labs/DCAN-HCP/tree/master/global/templates/MNI152_T1_1mm_brain.nii.gz
f'{PH_SERVER_ROOT}/freesurfer/DCAN-HCP/global/templates/MNI152_T1_1mm_brain.nii.gz', # ABCD anat template
f'{PH_SERVER_ROOT}/freesurfer/DCAN-HCP/global/templates/MNI152_T1_1mm_brain.nii.gz', # C-PAC anat template
# Note: this template is from FSL standard template distribution
'/usr/local/fsl/data/standard/MNI152_T1_2mm_brain.nii.gz', # ABCD func template on Lisa
'/usr/local/fsl/data/standard/MNI152_T1_2mm_brain.nii.gz', # C-PAC func template on Lisa
# '/usr/share/fsl/5.0/data/standard/MNI152_T1_2mm_brain.nii.gz', # ABCD func template on Ned
# '/usr/share/fsl/5.0/data/standard/MNI152_T1_2mm_brain.nii.gz', # C-PAC func template on Ned
path1+'/sub-'+sub+'/ses-1/files/T1w/T1w_acpc_dc_restore_brain.nii.gz', # ABCD T1
glob.glob(path2+'/working/cpac_sub-'+sub+'a_ses-1/brain_extraction_*/*.nii.gz')[0], # C-PAC T1
path2+'/output/cpac_cpac_abcd-options/sub-'+sub+'a_ses-1/anat/sub-'+sub+'a_ses-1_space-template_desc-brain_T1w.nii.gz', # C-PAC anat to standard
path2+'/output/cpac_cpac_abcd-options/sub-'+sub+'a_ses-1/func/sub-'+sub+'a_ses-1_task-rest_run-1_space-template_desc-mean_bold.nii.gz', # C-PAC func to standard
glob.glob(path2+'/working/cpac_sub-'+sub+'a_ses-1/func_to_anat_FLIRT_*/_*/linear_func_to_anat/*flirt.nii.gz')[0]] # C-PAC func in T1 space
for num_var, var in enumerate(var_list):
file1 = path_list1[num_var]
file2 = path_list2[num_var]
if '.nii.gz' in file1:
img1 = nb.load(file1)
data1 = img1.get_fdata()
# data1 = img1.get_data()
img2 = nb.load(file2)
data2 = img2.get_fdata()
# data2 = img2.get_data()
elif '.par' in file1:
data1 = get_motion_params(file1, 'abcd')
data2 = get_motion_params(file2)
elif '.mat' in file1:
data1 = sio.loadmat(file1)['FD']
data2 = np.expand_dims(np.loadtxt(file2)[1:], axis=1)
if var == 'motion':
motion_params = correlation(data1, data2)
corr = motion_params.diagonal()
elif isinstance(data1, np.ndarray) and data1.shape == data2.shape:
corr, _ = pearsonr(data1.flatten(), data2.flatten())
print(sub + ' ' + str(num_var) + ' ' + var)
print(corr)
if num_var < motion_index:
corrs[num_sub][num_var] = round(corr, 3)
elif num_var == motion_index:
corrs[num_sub][num_var:num_var+6] = corr
elif num_var > motion_index:
corrs[num_sub][num_var+5] = round(corr, 3)
print(corrs)
np.save(f'{os.environ.get("SCRIPT_DIR")}/abcd_corrs.npy', corrs)
|
[] |
[] |
[
"PH_SERVER_ROOT",
"SCRIPT_DIR",
"PH_SERVER_WORKING_ROOT",
"DATA_INPUT_DIR"
] |
[]
|
["PH_SERVER_ROOT", "SCRIPT_DIR", "PH_SERVER_WORKING_ROOT", "DATA_INPUT_DIR"]
|
python
| 4 | 0 | |
prow/pod-utils/clone/clone_test.go
|
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package clone
import (
"fmt"
"io/ioutil"
"os"
"os/exec"
"reflect"
"testing"
"time"
prowapi "k8s.io/test-infra/prow/apis/prowjobs/v1"
"github.com/google/go-cmp/cmp"
)
func TestPathForRefs(t *testing.T) {
var testCases = []struct {
name string
refs prowapi.Refs
expected string
}{
{
name: "literal override",
refs: prowapi.Refs{
PathAlias: "alias",
},
expected: "base/src/alias",
},
{
name: "default generated",
refs: prowapi.Refs{
Org: "org",
Repo: "repo",
},
expected: "base/src/github.com/org/repo",
},
}
for _, testCase := range testCases {
if actual, expected := PathForRefs("base", testCase.refs), testCase.expected; actual != expected {
t.Errorf("%s: expected path %q, got %q", testCase.name, expected, actual)
}
}
}
func TestCommandsForRefs(t *testing.T) {
fakeTimestamp := 100200300
var testCases = []struct {
name string
refs prowapi.Refs
dir, gitUserName, gitUserEmail, cookiePath string
env []string
expectedBase []runnable
expectedPull []runnable
authUser string
authToken string
}{
{
name: "simplest case, minimal refs",
refs: prowapi.Refs{
Org: "org",
Repo: "repo",
BaseRef: "master",
},
dir: "/go",
expectedBase: []runnable{
cloneCommand{dir: "/", command: "mkdir", args: []string{"-p", "/go/src/github.com/org/repo"}},
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"init"}},
retryCommand{
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"fetch", "https://github.com/org/repo.git", "--tags", "--prune"}},
fetchRetries,
},
retryCommand{
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"fetch", "https://github.com/org/repo.git", "master"}},
fetchRetries,
},
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"checkout", "FETCH_HEAD"}},
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"branch", "--force", "master", "FETCH_HEAD"}},
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"checkout", "master"}},
},
expectedPull: []runnable{
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"submodule", "update", "--init", "--recursive"}},
},
},
{
name: "simple case, root dir",
refs: prowapi.Refs{
Org: "org",
Repo: "repo",
BaseRef: "master",
},
dir: "/",
expectedBase: []runnable{
cloneCommand{dir: "/", command: "mkdir", args: []string{"-p", "/src/github.com/org/repo"}},
cloneCommand{dir: "/src/github.com/org/repo", command: "git", args: []string{"init"}},
retryCommand{
cloneCommand{dir: "/src/github.com/org/repo", command: "git", args: []string{"fetch", "https://github.com/org/repo.git", "--tags", "--prune"}},
fetchRetries,
},
retryCommand{
cloneCommand{dir: "/src/github.com/org/repo", command: "git", args: []string{"fetch", "https://github.com/org/repo.git", "master"}},
fetchRetries,
},
cloneCommand{dir: "/src/github.com/org/repo", command: "git", args: []string{"checkout", "FETCH_HEAD"}},
cloneCommand{dir: "/src/github.com/org/repo", command: "git", args: []string{"branch", "--force", "master", "FETCH_HEAD"}},
cloneCommand{dir: "/src/github.com/org/repo", command: "git", args: []string{"checkout", "master"}},
},
expectedPull: []runnable{
cloneCommand{dir: "/src/github.com/org/repo", command: "git", args: []string{"submodule", "update", "--init", "--recursive"}},
},
},
{
name: "minimal refs with git user name",
refs: prowapi.Refs{
Org: "org",
Repo: "repo",
BaseRef: "master",
},
gitUserName: "user",
dir: "/go",
expectedBase: []runnable{
cloneCommand{dir: "/", command: "mkdir", args: []string{"-p", "/go/src/github.com/org/repo"}},
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"init"}},
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"config", "user.name", "user"}},
retryCommand{
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"fetch", "https://github.com/org/repo.git", "--tags", "--prune"}},
fetchRetries,
},
retryCommand{
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"fetch", "https://github.com/org/repo.git", "master"}},
fetchRetries,
},
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"checkout", "FETCH_HEAD"}},
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"branch", "--force", "master", "FETCH_HEAD"}},
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"checkout", "master"}},
},
expectedPull: []runnable{
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"submodule", "update", "--init", "--recursive"}},
},
},
{
name: "minimal refs with git user email",
refs: prowapi.Refs{
Org: "org",
Repo: "repo",
BaseRef: "master",
},
gitUserEmail: "[email protected]",
dir: "/go",
expectedBase: []runnable{
cloneCommand{dir: "/", command: "mkdir", args: []string{"-p", "/go/src/github.com/org/repo"}},
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"init"}},
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"config", "user.email", "[email protected]"}},
retryCommand{
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"fetch", "https://github.com/org/repo.git", "--tags", "--prune"}},
fetchRetries,
},
retryCommand{
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"fetch", "https://github.com/org/repo.git", "master"}},
fetchRetries,
},
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"checkout", "FETCH_HEAD"}},
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"branch", "--force", "master", "FETCH_HEAD"}},
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"checkout", "master"}},
},
expectedPull: []runnable{
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"submodule", "update", "--init", "--recursive"}},
},
},
{
name: "minimal refs with http cookie file (skip submodules)",
refs: prowapi.Refs{
Org: "org",
Repo: "repo",
BaseRef: "master",
SkipSubmodules: true,
},
cookiePath: "/cookie.txt",
dir: "/go",
expectedBase: []runnable{
cloneCommand{dir: "/", command: "mkdir", args: []string{"-p", "/go/src/github.com/org/repo"}},
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"init"}},
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"config", "http.cookiefile", "/cookie.txt"}},
retryCommand{
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"fetch", "https://github.com/org/repo.git", "--tags", "--prune"}},
fetchRetries,
},
retryCommand{
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"fetch", "https://github.com/org/repo.git", "master"}},
fetchRetries,
},
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"checkout", "FETCH_HEAD"}},
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"branch", "--force", "master", "FETCH_HEAD"}},
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"checkout", "master"}},
},
},
{
name: "minimal refs with http cookie file",
refs: prowapi.Refs{
Org: "org",
Repo: "repo",
BaseRef: "master",
},
cookiePath: "/cookie.txt",
dir: "/go",
expectedBase: []runnable{
cloneCommand{dir: "/", command: "mkdir", args: []string{"-p", "/go/src/github.com/org/repo"}},
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"init"}},
retryCommand{
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"fetch", "https://github.com/org/repo.git", "--tags", "--prune"}},
fetchRetries,
},
retryCommand{
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"fetch", "https://github.com/org/repo.git", "master"}},
fetchRetries,
},
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"checkout", "FETCH_HEAD"}},
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"branch", "--force", "master", "FETCH_HEAD"}},
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"checkout", "master"}},
},
expectedPull: []runnable{
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"submodule", "update", "--init", "--recursive"}},
},
},
{
name: "minimal refs with no submodules",
refs: prowapi.Refs{
Org: "org",
Repo: "repo",
BaseRef: "master",
SkipSubmodules: true,
},
dir: "/go",
expectedBase: []runnable{
cloneCommand{dir: "/", command: "mkdir", args: []string{"-p", "/go/src/github.com/org/repo"}},
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"init"}},
retryCommand{
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"fetch", "https://github.com/org/repo.git", "--tags", "--prune"}},
fetchRetries,
},
retryCommand{
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"fetch", "https://github.com/org/repo.git", "master"}},
fetchRetries,
},
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"checkout", "FETCH_HEAD"}},
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"branch", "--force", "master", "FETCH_HEAD"}},
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"checkout", "master"}},
},
expectedPull: nil,
},
{
name: "minimal refs with oauth token",
authToken: "12345678",
refs: prowapi.Refs{
Org: "org",
Repo: "repo",
BaseRef: "master",
},
dir: "/go",
expectedBase: []runnable{
cloneCommand{dir: "/", command: "mkdir", args: []string{"-p", "/go/src/github.com/org/repo"}},
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"init"}},
retryCommand{
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"fetch", "https://12345678:[email protected]/org/repo.git", "--tags", "--prune"}},
fetchRetries,
},
retryCommand{
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"fetch", "https://12345678:[email protected]/org/repo.git", "master"}},
fetchRetries,
},
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"checkout", "FETCH_HEAD"}},
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"branch", "--force", "master", "FETCH_HEAD"}},
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"checkout", "master"}},
},
expectedPull: []runnable{
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"submodule", "update", "--init", "--recursive"}},
},
},
{
name: "minimal refs with GitHub App user and token",
authUser: "x-access-token",
authToken: "xxxxx",
refs: prowapi.Refs{
Org: "org",
Repo: "repo",
BaseRef: "master",
},
dir: "/go",
expectedBase: []runnable{
cloneCommand{dir: "/", command: "mkdir", args: []string{"-p", "/go/src/github.com/org/repo"}},
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"init"}},
retryCommand{
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"fetch", "https://x-access-token:[email protected]/org/repo.git", "--tags", "--prune"}},
fetchRetries,
},
retryCommand{
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"fetch", "https://x-access-token:[email protected]/org/repo.git", "master"}},
fetchRetries,
},
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"checkout", "FETCH_HEAD"}},
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"branch", "--force", "master", "FETCH_HEAD"}},
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"checkout", "master"}},
},
expectedPull: []runnable{
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"submodule", "update", "--init", "--recursive"}},
},
},
{
name: "refs with clone URI override",
refs: prowapi.Refs{
Org: "org",
Repo: "repo",
BaseRef: "master",
CloneURI: "internet.com",
},
dir: "/go",
expectedBase: []runnable{
cloneCommand{dir: "/", command: "mkdir", args: []string{"-p", "/go/src/github.com/org/repo"}},
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"init"}},
retryCommand{
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"fetch", "internet.com", "--tags", "--prune"}},
fetchRetries,
},
retryCommand{
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"fetch", "internet.com", "master"}},
fetchRetries,
},
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"checkout", "FETCH_HEAD"}},
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"branch", "--force", "master", "FETCH_HEAD"}},
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"checkout", "master"}},
},
expectedPull: []runnable{
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"submodule", "update", "--init", "--recursive"}},
},
},
{
name: "refs with clone URI override and oauth token specified",
authToken: "12345678",
refs: prowapi.Refs{
Org: "org",
Repo: "repo",
BaseRef: "master",
CloneURI: "https://internet.com",
},
dir: "/go",
expectedBase: []runnable{
cloneCommand{dir: "/", command: "mkdir", args: []string{"-p", "/go/src/github.com/org/repo"}},
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"init"}},
retryCommand{
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"fetch", "https://12345678:[email protected]", "--tags", "--prune"}},
fetchRetries,
},
retryCommand{
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"fetch", "https://12345678:[email protected]", "master"}},
fetchRetries,
},
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"checkout", "FETCH_HEAD"}},
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"branch", "--force", "master", "FETCH_HEAD"}},
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"checkout", "master"}},
},
expectedPull: []runnable{
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"submodule", "update", "--init", "--recursive"}},
},
},
{
name: "refs with path alias",
refs: prowapi.Refs{
Org: "org",
Repo: "repo",
BaseRef: "master",
PathAlias: "my/favorite/dir",
RepoLink: "https://github.com/org/repo",
},
dir: "/go",
expectedBase: []runnable{
cloneCommand{dir: "/", command: "mkdir", args: []string{"-p", "/go/src/my/favorite/dir"}},
cloneCommand{dir: "/go/src/my/favorite/dir", command: "git", args: []string{"init"}},
retryCommand{
cloneCommand{dir: "/go/src/my/favorite/dir", command: "git", args: []string{"fetch", "https://github.com/org/repo.git", "--tags", "--prune"}},
fetchRetries,
},
retryCommand{
cloneCommand{dir: "/go/src/my/favorite/dir", command: "git", args: []string{"fetch", "https://github.com/org/repo.git", "master"}},
fetchRetries,
},
cloneCommand{dir: "/go/src/my/favorite/dir", command: "git", args: []string{"checkout", "FETCH_HEAD"}},
cloneCommand{dir: "/go/src/my/favorite/dir", command: "git", args: []string{"branch", "--force", "master", "FETCH_HEAD"}},
cloneCommand{dir: "/go/src/my/favorite/dir", command: "git", args: []string{"checkout", "master"}},
},
expectedPull: []runnable{
cloneCommand{dir: "/go/src/my/favorite/dir", command: "git", args: []string{"submodule", "update", "--init", "--recursive"}},
},
},
{
name: "refs with specific base sha",
refs: prowapi.Refs{
Org: "org",
Repo: "repo",
BaseRef: "master",
BaseSHA: "abcdef",
},
dir: "/go",
expectedBase: []runnable{
cloneCommand{dir: "/", command: "mkdir", args: []string{"-p", "/go/src/github.com/org/repo"}},
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"init"}},
retryCommand{
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"fetch", "https://github.com/org/repo.git", "--tags", "--prune"}},
fetchRetries,
},
retryCommand{
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"fetch", "https://github.com/org/repo.git", "abcdef"}},
fetchRetries,
},
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"checkout", "abcdef"}},
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"branch", "--force", "master", "abcdef"}},
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"checkout", "master"}},
},
expectedPull: []runnable{
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"submodule", "update", "--init", "--recursive"}},
},
},
{
name: "refs with simple pr ref",
refs: prowapi.Refs{
Org: "org",
Repo: "repo",
BaseRef: "master",
Pulls: []prowapi.Pull{
{Number: 1},
},
},
dir: "/go",
expectedBase: []runnable{
cloneCommand{dir: "/", command: "mkdir", args: []string{"-p", "/go/src/github.com/org/repo"}},
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"init"}},
retryCommand{
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"fetch", "https://github.com/org/repo.git", "--tags", "--prune"}},
fetchRetries,
},
retryCommand{
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"fetch", "https://github.com/org/repo.git", "master"}},
fetchRetries,
},
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"checkout", "FETCH_HEAD"}},
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"branch", "--force", "master", "FETCH_HEAD"}},
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"checkout", "master"}},
},
expectedPull: []runnable{
retryCommand{
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"fetch", "https://github.com/org/repo.git", "pull/1/head"}},
fetchRetries,
},
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"merge", "--no-ff", "FETCH_HEAD"}, env: gitTimestampEnvs(fakeTimestamp + 1)},
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"submodule", "update", "--init", "--recursive"}},
},
},
{
name: "refs with simple pr ref, sha takes precedence over virtual pull ref",
refs: prowapi.Refs{
Org: "org",
Repo: "repo",
BaseRef: "master",
Pulls: []prowapi.Pull{
{Number: 1, SHA: "pull-1-sha"},
},
},
dir: "/go",
expectedBase: []runnable{
cloneCommand{dir: "/", command: "mkdir", args: []string{"-p", "/go/src/github.com/org/repo"}},
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"init"}},
retryCommand{
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"fetch", "https://github.com/org/repo.git", "--tags", "--prune"}},
fetchRetries,
},
retryCommand{
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"fetch", "https://github.com/org/repo.git", "master"}},
fetchRetries,
},
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"checkout", "FETCH_HEAD"}},
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"branch", "--force", "master", "FETCH_HEAD"}},
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"checkout", "master"}},
},
expectedPull: []runnable{
retryCommand{
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"fetch", "https://github.com/org/repo.git", "pull-1-sha"}},
fetchRetries,
},
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"merge", "--no-ff", "pull-1-sha"}, env: gitTimestampEnvs(fakeTimestamp + 1)},
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"submodule", "update", "--init", "--recursive"}},
},
},
{
name: "refs with pr ref override",
refs: prowapi.Refs{
Org: "org",
Repo: "repo",
BaseRef: "master",
Pulls: []prowapi.Pull{
{Number: 1, Ref: "pull-me"},
},
},
dir: "/go",
expectedBase: []runnable{
cloneCommand{dir: "/", command: "mkdir", args: []string{"-p", "/go/src/github.com/org/repo"}},
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"init"}},
retryCommand{
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"fetch", "https://github.com/org/repo.git", "--tags", "--prune"}},
fetchRetries,
},
retryCommand{
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"fetch", "https://github.com/org/repo.git", "master"}},
fetchRetries,
},
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"checkout", "FETCH_HEAD"}},
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"branch", "--force", "master", "FETCH_HEAD"}},
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"checkout", "master"}},
},
expectedPull: []runnable{
retryCommand{
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"fetch", "https://github.com/org/repo.git", "pull-me"}},
fetchRetries,
},
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"merge", "--no-ff", "FETCH_HEAD"}, env: gitTimestampEnvs(fakeTimestamp + 1)},
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"submodule", "update", "--init", "--recursive"}},
},
},
{
name: "refs with pr ref with specific sha",
refs: prowapi.Refs{
Org: "org",
Repo: "repo",
BaseRef: "master",
Pulls: []prowapi.Pull{
{Number: 1, SHA: "abcdef"},
},
},
dir: "/go",
expectedBase: []runnable{
cloneCommand{dir: "/", command: "mkdir", args: []string{"-p", "/go/src/github.com/org/repo"}},
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"init"}},
retryCommand{
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"fetch", "https://github.com/org/repo.git", "--tags", "--prune"}},
fetchRetries,
},
retryCommand{
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"fetch", "https://github.com/org/repo.git", "master"}},
fetchRetries,
},
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"checkout", "FETCH_HEAD"}},
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"branch", "--force", "master", "FETCH_HEAD"}},
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"checkout", "master"}},
},
expectedPull: []runnable{
retryCommand{
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"fetch", "https://github.com/org/repo.git", "abcdef"}},
fetchRetries,
},
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"merge", "--no-ff", "abcdef"}, env: gitTimestampEnvs(fakeTimestamp + 1)},
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"submodule", "update", "--init", "--recursive"}},
},
},
{
name: "refs with multiple simple pr refs",
refs: prowapi.Refs{
Org: "org",
Repo: "repo",
BaseRef: "master",
Pulls: []prowapi.Pull{
{Number: 1},
{Number: 2},
},
},
dir: "/go",
expectedBase: []runnable{
cloneCommand{dir: "/", command: "mkdir", args: []string{"-p", "/go/src/github.com/org/repo"}},
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"init"}},
retryCommand{
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"fetch", "https://github.com/org/repo.git", "--tags", "--prune"}},
fetchRetries,
},
retryCommand{
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"fetch", "https://github.com/org/repo.git", "master"}},
fetchRetries,
},
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"checkout", "FETCH_HEAD"}},
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"branch", "--force", "master", "FETCH_HEAD"}},
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"checkout", "master"}},
},
expectedPull: []runnable{
retryCommand{
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"fetch", "https://github.com/org/repo.git", "pull/1/head"}},
fetchRetries,
},
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"merge", "--no-ff", "FETCH_HEAD"}, env: gitTimestampEnvs(fakeTimestamp + 1)},
retryCommand{
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"fetch", "https://github.com/org/repo.git", "pull/2/head"}},
fetchRetries,
},
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"merge", "--no-ff", "FETCH_HEAD"}, env: gitTimestampEnvs(fakeTimestamp + 2)},
cloneCommand{dir: "/go/src/github.com/org/repo", command: "git", args: []string{"submodule", "update", "--init", "--recursive"}},
},
},
{
name: "refs with repo link",
refs: prowapi.Refs{
Org: "org",
Repo: "repo",
BaseRef: "master",
BaseSHA: "abcdef",
RepoLink: "https://github.enterprise.com/org/repo",
},
dir: "/go",
expectedBase: []runnable{
cloneCommand{dir: "/", command: "mkdir", args: []string{"-p", "/go/src/github.enterprise.com/org/repo"}},
cloneCommand{dir: "/go/src/github.enterprise.com/org/repo", command: "git", args: []string{"init"}},
retryCommand{
cloneCommand{dir: "/go/src/github.enterprise.com/org/repo", command: "git", args: []string{"fetch", "https://github.enterprise.com/org/repo.git", "--tags", "--prune"}},
fetchRetries,
},
retryCommand{
cloneCommand{dir: "/go/src/github.enterprise.com/org/repo", command: "git", args: []string{"fetch", "https://github.enterprise.com/org/repo.git", "abcdef"}},
fetchRetries,
},
cloneCommand{dir: "/go/src/github.enterprise.com/org/repo", command: "git", args: []string{"checkout", "abcdef"}},
cloneCommand{dir: "/go/src/github.enterprise.com/org/repo", command: "git", args: []string{"branch", "--force", "master", "abcdef"}},
cloneCommand{dir: "/go/src/github.enterprise.com/org/repo", command: "git", args: []string{"checkout", "master"}},
},
expectedPull: []runnable{
cloneCommand{dir: "/go/src/github.enterprise.com/org/repo", command: "git", args: []string{"submodule", "update", "--init", "--recursive"}},
},
},
{
name: "support fetching repo with multiple heads",
refs: prowapi.Refs{
Org: "org",
Repo: "repo",
BaseRef: "master",
BaseSHA: "abcdef",
RepoLink: "https://github.enterprise.com/org/repo",
SkipFetchHead: true, // no single HEAD
},
dir: "/go",
expectedBase: []runnable{
cloneCommand{dir: "/", command: "mkdir", args: []string{"-p", "/go/src/github.enterprise.com/org/repo"}},
cloneCommand{dir: "/go/src/github.enterprise.com/org/repo", command: "git", args: []string{"init"}},
retryCommand{
cloneCommand{dir: "/go/src/github.enterprise.com/org/repo", command: "git", args: []string{"fetch", "https://github.enterprise.com/org/repo.git", "abcdef"}},
fetchRetries,
},
cloneCommand{dir: "/go/src/github.enterprise.com/org/repo", command: "git", args: []string{"checkout", "abcdef"}},
cloneCommand{dir: "/go/src/github.enterprise.com/org/repo", command: "git", args: []string{"branch", "--force", "master", "abcdef"}},
cloneCommand{dir: "/go/src/github.enterprise.com/org/repo", command: "git", args: []string{"checkout", "master"}},
},
expectedPull: []runnable{
cloneCommand{dir: "/go/src/github.enterprise.com/org/repo", command: "git", args: []string{"submodule", "update", "--init", "--recursive"}},
},
},
{
name: "support shallow fetching repo with multiple heads",
refs: prowapi.Refs{
Org: "org",
Repo: "repo",
BaseRef: "master",
BaseSHA: "abcdef",
RepoLink: "https://github.enterprise.com/org/repo",
SkipFetchHead: true, // no single HEAD
CloneDepth: 2,
},
dir: "/go",
expectedBase: []runnable{
cloneCommand{dir: "/", command: "mkdir", args: []string{"-p", "/go/src/github.enterprise.com/org/repo"}},
cloneCommand{dir: "/go/src/github.enterprise.com/org/repo", command: "git", args: []string{"init"}},
retryCommand{
cloneCommand{dir: "/go/src/github.enterprise.com/org/repo", command: "git", args: []string{"fetch", "--depth", "2", "https://github.enterprise.com/org/repo.git", "abcdef"}},
fetchRetries,
},
cloneCommand{dir: "/go/src/github.enterprise.com/org/repo", command: "git", args: []string{"checkout", "abcdef"}},
cloneCommand{dir: "/go/src/github.enterprise.com/org/repo", command: "git", args: []string{"branch", "--force", "master", "abcdef"}},
cloneCommand{dir: "/go/src/github.enterprise.com/org/repo", command: "git", args: []string{"checkout", "master"}},
},
expectedPull: []runnable{
cloneCommand{dir: "/go/src/github.enterprise.com/org/repo", command: "git", args: []string{"submodule", "update", "--init", "--recursive"}},
},
},
}
allow := cmp.AllowUnexported(retryCommand{}, cloneCommand{})
for _, testCase := range testCases {
t.Run(testCase.name, func(t *testing.T) {
g := gitCtxForRefs(testCase.refs, testCase.dir, testCase.env, testCase.authUser, testCase.authToken)
actualBase := g.commandsForBaseRef(testCase.refs, testCase.gitUserName, testCase.gitUserEmail, testCase.cookiePath)
if diff := cmp.Diff(actualBase, testCase.expectedBase, allow); diff != "" {
t.Errorf("commandsForBaseRef() got unexpected diff (-got, +want):\n%s", diff)
}
actualPull := g.commandsForPullRefs(testCase.refs, fakeTimestamp)
if diff := cmp.Diff(actualPull, testCase.expectedPull, allow); diff != "" {
t.Errorf("commandsForPullRefs() got unexpected diff (-got, +want):\n%s", diff)
}
})
}
}
func TestGitHeadTimestamp(t *testing.T) {
fakeTimestamp := 987654321
fakeGitDir, err := makeFakeGitRepo(fakeTimestamp)
if err != nil {
t.Errorf("error creating fake git dir: %v", err)
}
defer func() {
if err := os.RemoveAll(fakeGitDir); err != nil {
t.Errorf("error cleaning up fake git dir: %v", err)
}
}()
var testCases = []struct {
name string
dir string
noPath bool
expected int
expectError bool
}{
{
name: "root - no git",
dir: "/",
expected: 0,
expectError: true,
},
{
name: "fake git repo",
dir: fakeGitDir,
expected: fakeTimestamp,
expectError: false,
},
{
name: "fake git repo but no git binary",
dir: fakeGitDir,
noPath: true,
expected: 0,
expectError: true,
},
}
origCwd, err := os.Getwd()
if err != nil {
t.Errorf("failed getting cwd: %v", err)
}
origPath := os.Getenv("PATH")
for _, testCase := range testCases {
t.Run(testCase.name, func(t *testing.T) {
if err := os.Chdir(testCase.dir); err != nil {
t.Errorf("%s: failed to chdir to %s: %v", testCase.name, testCase.dir, err)
}
if testCase.noPath {
if err := os.Unsetenv("PATH"); err != nil {
t.Errorf("%s: failed to unset PATH: %v", testCase.name, err)
}
}
g := gitCtx{
cloneDir: testCase.dir,
}
timestamp, err := g.gitHeadTimestamp()
if timestamp != testCase.expected {
t.Errorf("%s: timestamp %d does not match expected timestamp %d", testCase.name, timestamp, testCase.expected)
}
if (err == nil && testCase.expectError) || (err != nil && !testCase.expectError) {
t.Errorf("%s: expect error is %v but received error %v", testCase.name, testCase.expectError, err)
}
if err := os.Chdir(origCwd); err != nil {
t.Errorf("%s: failed to chdir to original cwd %s: %v", testCase.name, origCwd, err)
}
if testCase.noPath {
if err := os.Setenv("PATH", origPath); err != nil {
t.Errorf("%s: failed to set PATH to original: %v", testCase.name, err)
}
}
})
}
}
// makeFakeGitRepo creates a fake git repo with a constant digest and timestamp.
func makeFakeGitRepo(fakeTimestamp int) (string, error) {
fakeGitDir, err := ioutil.TempDir("", "fakegit")
if err != nil {
return "", err
}
cmds := [][]string{
{"git", "init"},
{"git", "config", "user.email", "[email protected]"},
{"git", "config", "user.name", "test test"},
{"touch", "a_file"},
{"git", "add", "a_file"},
{"git", "commit", "-m", "adding a_file"},
}
for _, cmd := range cmds {
c := exec.Command(cmd[0], cmd[1:]...)
c.Dir = fakeGitDir
c.Env = append(os.Environ(), gitTimestampEnvs(fakeTimestamp)...)
if err := c.Run(); err != nil {
return fakeGitDir, err
}
}
return fakeGitDir, nil
}
func TestCensorToken(t *testing.T) {
testCases := []struct {
id string
token string
msg string
expected string
}{
{
id: "no token",
msg: "git fetch https://github.com/kubernetes/test-infra.git",
expected: "git fetch https://github.com/kubernetes/test-infra.git",
},
{
id: "with token",
token: "123456789",
msg: "git fetch 123456789:x-oauth-basic@https://github.com/kubernetes/test-infra.git",
expected: "git fetch CENSORED:x-oauth-basic@https://github.com/kubernetes/test-infra.git",
},
{
id: "git output with token",
token: "123456789",
msg: `
Cloning into 'test-infa'...
remote: Invalid username or password.
fatal: Authentication failed for 'https://[email protected]/kubernetes/test-infa/'
`,
expected: `
Cloning into 'test-infa'...
remote: Invalid username or password.
fatal: Authentication failed for 'https://[email protected]/kubernetes/test-infa/'
`,
},
}
for _, tc := range testCases {
t.Run(tc.id, func(t *testing.T) {
censoredMsg := censorToken(tc.msg, tc.token)
if !reflect.DeepEqual(censoredMsg, tc.expected) {
t.Fatalf("expected: %s got %s", tc.expected, censoredMsg)
}
})
}
}
// fakeRunner will pass run() if called when calls == 1,
// decrementing calls each time.
type fakeRunner struct {
calls int
}
func (fr *fakeRunner) run() (string, string, error) {
fr.calls--
if fr.calls == 0 {
return "command", "output", nil
}
return "command", "output", fmt.Errorf("calls: %d", fr.calls)
}
func TestGitFetch(t *testing.T) {
const short = time.Nanosecond
command := func(calls int, retries ...time.Duration) retryCommand {
return retryCommand{
runnable: &fakeRunner{calls},
retries: retries,
}
}
cases := []struct {
name string
retryCommand
err bool
}{
{
name: "works without retires",
retryCommand: command(1),
},
{
name: "errors if first call fails without retries",
retryCommand: command(0),
err: true,
},
{
name: "works with retries (without retrying)",
retryCommand: command(1, short),
},
{
name: "works with retries (retrying)",
retryCommand: command(2, short),
},
{
name: "errors without retries if first call fails",
retryCommand: command(2),
err: true,
},
{
name: "errors with retries when all retries are consumed",
retryCommand: command(3, short),
err: true,
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
_, _, err := tc.run()
switch {
case err != nil:
if !tc.err {
t.Errorf("unexpected error: %v", err)
}
case tc.err:
t.Error("failed to received expected error")
}
})
}
}
func TestCloneCommandString(t *testing.T) {
tests := []struct {
name string
cc cloneCommand
want string
}{
{
name: "empty",
cc: cloneCommand{},
want: "PWD= ",
},
{
name: "base",
cc: cloneCommand{
dir: "abc",
env: []string{"d=e", "f=g"},
command: "echo",
args: []string{"hij klm"},
},
want: "PWD=abc d=e f=g echo hij klm",
},
}
for _, tc := range tests {
tc := tc
t.Run(tc.name, func(t *testing.T) {
want, got := tc.want, tc.cc.String()
if diff := cmp.Diff(want, got); diff != "" {
t.Errorf("mismatch. want(-), got(+):\n%s", diff)
}
})
}
}
|
[
"\"PATH\""
] |
[] |
[
"PATH"
] |
[]
|
["PATH"]
|
go
| 1 | 0 | |
cmd/server/pipeline_stopwords.go
|
// Copyright 2022 The Moov Authors
// Use of this source code is governed by an Apache License
// license that can be found in the LICENSE file.
package main
import (
"os"
"strconv"
"strings"
"github.com/moov-io/watchman/pkg/ofac"
"github.com/abadojack/whatlanggo"
"github.com/bbalet/stopwords"
"github.com/pariz/gountries"
)
const (
minConfidence = 0.50
)
var (
keepStopwords = func(raw string) bool {
if raw == "" {
raw = "false"
}
keep, _ := strconv.ParseBool(raw)
return keep
}(os.Getenv("KEEP_STOPWORDS"))
)
type stopwordsStep struct{}
func (s *stopwordsStep) apply(in *Name) error {
if in == nil {
return nil
}
switch {
case in.sdn != nil && !strings.EqualFold(in.sdn.SDNType, "individual"):
in.Processed = removeStopwords(in.Processed, detectLanguage(in.Processed, in.addrs))
case in.ssi != nil && !strings.EqualFold(in.ssi.Type, "individual"):
in.Processed = removeStopwords(in.Processed, detectLanguage(in.Processed, nil))
case in.alt != nil:
in.Processed = removeStopwords(in.Processed, detectLanguage(in.Processed, nil))
}
return nil
}
func removeStopwords(in string, lang whatlanggo.Lang) string {
if keepStopwords {
return in
}
return strings.TrimSpace(stopwords.CleanString(strings.ToLower(in), lang.Iso6391(), false))
}
// detectLanguage will return a guess as to the appropriate language a given SDN's name
// is written in. The addresses must be linked to the SDN whose name is detected.
func detectLanguage(in string, addrs []*ofac.Address) whatlanggo.Lang {
info := whatlanggo.Detect(in)
if info.IsReliable() {
// Return the detected language if whatlanggo is confident enough
return info.Lang
}
if len(addrs) == 0 {
// If no addresses are associated to this text blob then fallback to English
return whatlanggo.Eng
}
// Return the countries primary language associated to the primary address for this SDN.
//
// TODO(adam): Should we do this only if there's one address? If there are multiple should we
// fallback to English or a mixed set?
country, err := gountries.New().FindCountryByName(addrs[0].Country)
if len(country.Languages) == 0 || err != nil {
return whatlanggo.Eng
}
// If the language is spoken in the country and we're somewhat confident in the original detection
// then return that language.
if info.Confidence > minConfidence {
for key := range country.Languages {
if strings.EqualFold(key, info.Lang.Iso6393()) {
return info.Lang
}
}
}
if len(country.Languages) == 1 {
for key := range country.Languages {
return whatlanggo.CodeToLang(key)
}
}
// How should we pick the language for countries with multiple languages? A hardcoded map?
// What if we found the language whose name is closest to the country's name and returned that?
//
// Should this fallback be the mixed set that contains stop words from several popular languages
// in the various data sets?
return whatlanggo.Eng
}
|
[
"\"KEEP_STOPWORDS\""
] |
[] |
[
"KEEP_STOPWORDS"
] |
[]
|
["KEEP_STOPWORDS"]
|
go
| 1 | 0 | |
bake/manage.py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'bake.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
day6/task1.py
|
import math
def read_int():
return int(input())
def normal_cdf(x, mean=0, std_dev=1) -> float:
''' Returns the normal CDF (cumulative distribution function) of x
given the mean and standard deviation.
'''
return 0.5 * (1 + math.erf((x - mean) / (std_dev * math.sqrt(2))))
max_weight = read_int()
n_boxes = read_int()
mean_weight = read_int()
std_dev = read_int()
p = normal_cdf(max_weight, mean, std_dev)
print (f"{p:.4f}")
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
bin/load_plate.py
|
#!/usr/bin/env python
# encoding: utf-8
#
# plate_plans_db.py
#
# Originally created by Demitri Muna in 2004
# Rewritten by José Sánchez-Gallego on 14 Jun 2017.
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import hashlib
import os
import six
import warnings
import peewee
from astropy import table
from sdssdb.peewee.operationsdb import platedb
from sdssdb.peewee.operationsdb import database
import numpy as np
import fitsio
@platedb.database.atomic()
def _load_design(design_id, definition, overwrite=True):
"""Loads a design into the DB."""
design_dbo, created = platedb.Design.get_or_create(pk=design_id)
if not created:
print('found design {0} in the DB.'.format(design_id))
if overwrite:
warnings.warn('overwriting design for design_id={0}.'.format(design_id),
UserWarning)
else:
return
else:
print('creating new Design for design_id={0}.'.format(design_id))
# definition = definition_from_id(design_id)
# Delete old values (if present; easier than syncing).
for xx in design_dbo.values:
xx.delete_instance()
for key in definition.keys():
design_value_dbo = platedb.DesignValue()
design_value_dbo.design = design_dbo
design_value_dbo.value = definition[key]
design_field_dbo, created = platedb.DesignField.get_or_create(label=key)
if created:
print('created new row in DesignField with value label={0!r}'.format(key))
design_value_dbo.field = design_field_dbo
design_value_dbo.save()
pointing_dbo, created = platedb.Pointing.get_or_create(design_pk=design_dbo.pk,
pointing_no=1)
pointing_dbo.design = design_dbo
pointing_dbo.center_ra = definition['racen']
pointing_dbo.center_dec = definition['deccen']
pointing_dbo.pointing_no = 1
pointing_dbo.save()
print("!! pointing!", pointing_dbo)
# Handle inputs (PlateInput)
# Delete any old inputs if present.
# for xx in design_dbo.inputs:
# xx.delete_instance()
# priority_list = 'priority' in definition and list(map(int, definition['priority'].split()))
# for key in definition:
# if not key.startswith('plateinput'):
# continue
# input_number = int(key.strip('plateinput'))
# priority = priority_list[input_number - 1] if priority_list else None
# filepath = definition[key]
# plate_input_dbo = platedb.PlateInput()
# plate_input_dbo.design = design_dbo
# plate_input_dbo.input_number = input_number
# plate_input_dbo.priority = priority
# plate_input_dbo.filepath = filepath
# plate_input_full_path = os.path.join(os.environ['PLATELIST_DIR'], 'inputs', filepath)
# if not os.path.exists(plate_input_full_path):
# warnings.warn('cannot find plateInput {0}. '
# 'MD5 check sum will be null.'.format(filepath), UserWarning)
# else:
# plate_input_dbo.md5_checksum = hashlib.md5(
# open(plate_input_full_path).read().encode('utf-8')).hexdigest()
# print('added plateInput file {0}'.format(filepath))
# plate_input_dbo.save()
# # Create Pointings
# no_pointings = int(definition['npointings'])
# # If the design already has pointings it must be because we are loading a
# # plate from a design already loaded. In that case we check that the number
# # of pointings loaded matches.
# if len(design_dbo.pointings) > 0 and len(design_dbo.pointings) != no_pointings:
# # If the number of pointins disagree but they do not have plate_pointings
# # associated, we can remove the pointings and start from scratch.
# no_plate_pointings = np.sum([len(pointing.plate_pointings)
# for pointing in design_dbo.pointings])
# if no_plate_pointings > 0:
# raise RuntimeError('design_id {0} has pointings with '
# 'already created plate_pointings. '
# 'This requires manual intervention.'
# .format(design_id))
# else:
# for pointing_dbo in design_dbo.pointings:
# pointing_dbo.delete_instance()
# for pno in range(1, no_pointings + 1):
# pointing_dbo, created = platedb.Pointing.get_or_create(design_pk=design_dbo.pk,
# pointing_no=pno)
# pointing_dbo.design = design_dbo
# pointing_dbo.center_ra = definition['racen'].split()[pno - 1]
# pointing_dbo.center_dec = definition['deccen'].split()[pno - 1]
# pointing_dbo.pointing_no = pno
# pointing_dbo.save()
# if created:
# print('created pointing #{0} for design {1}'.format(pno, design_id))
# else:
# print('found pointing #{0} for design {1} in DB'.format(pno, design_id))
@platedb.database.atomic()
def _load_plate(plate_id, plateplans_line, overwrite=True):
"""Loads a plate and plate_pointing info to the DB."""
# Does this plate exist in the database?
plate_dbo, created = platedb.Plate.get_or_create(plate_id=plate_id)
if not created:
print('found plate {0} in the DB.'.format(plate_id))
if overwrite:
warnings.warn('overwriting plate for plate_id={0}.'.format(plate_id),
UserWarning)
else:
return
else:
print('creating new Plate for plate_id={0}.'.format(plate_id))
plate_dbo.location_id = plateplans_line['locationid']
plate_dbo.temperature = 15.0
plate_dbo.epoch = round(2020.75, 6)
plate_dbo.center_ra = round(plateplans_line['raCen'], 6)
plate_dbo.center_dec = round(plateplans_line['decCen'], 6)
# plate_dbo.rerun = plateplans_line['rerun']
# plate_dbo.chunk = plateplans_line['chunk']
plate_dbo.chunk = "2020.8.a.mwm-bhm-fake"
if plateplans_line['name'] != "''" and len(plateplans_line['name']) > 0:
plate_dbo.name = plateplans_line['name']
# plate_dbo.comment = plateplans_line['comments']
plate_dbo.comment = "999"
# Tile info
# tileid = plateplans_line['tileid']
# if tileid > -1:
# plate_dbo.tile_id = tileid
# tile_dbo, created = platedb.Tile.get_or_create(id=tileid)
# if not created:
# print('found tile {0} in the DB'.format(tileid))
# else:
# print('created new tile with id={0}'.format(tileid))
# tile_dbo.save()
# plate_dbo.tile = tile_dbo
# plate_dbo.epoch = round(plateplans_line['epoch'], 6)
# plate_dbo.center_ra = round(plateplans_line['raCen'], 6)
# plate_dbo.center_dec = round(plateplans_line['decCen'], 6)
# Assigns the platerun
try:
platerun_dbo = platedb.PlateRun.get(label="2020.8.a.mwm-bhm-fake")
except peewee.DoesNotExist:
raise ValueError('cannot found a PlateRun row for plate {0}. '
'The design should already be in the DB.'.format(plate_id))
plate_dbo.plate_run = platerun_dbo
# Sets the plate status to design.
design_status = platedb.PlateStatus.get(label='Design')
plate_dbo.statuses.clear() # First remove statuses
plate_dbo.statuses.add([design_status])
# Handle survey relationship
plate_dbo.surveys.clear() # First remove surveys
for survey in six.u(plateplans_line['survey']).split('-'):
plate_dbo.surveys.add([platedb.Survey.get(plateplan_name=survey)])
# Ensure "design" foreign key constraint is met (lookup design from db).
design_id = plateplans_line['designid']
try:
# Look for existing design in the database.
design_dbo = platedb.Design.get(pk=design_id)
print('found design {0} for plate {1}.'.format(design_id, plate_id))
except peewee.DoesNotExist:
raise ValueError('cannot found a Design for plate {0}. '
'The design should already be in the DB.'.format(plate_id))
plate_dbo.design = design_dbo
# The default survey mode key needs to also be written to the plate table
defaultsurveymode_dbo = platedb.DesignValue.select().join(
platedb.DesignField).where((platedb.DesignValue.design_pk == design_dbo.pk) &
(platedb.DesignField.label == 'defaultsurveymode'))
if len(defaultsurveymode_dbo) == 0:
warnings.warn('cannot find defaultsurveymode for '
'design {0} for plate {1}. '
'Not setting current_survey_mode'.format(design_dbo.pk, plate_id))
else:
defaultsurveymode = defaultsurveymode_dbo[0].value
survey_mode_pk = platedb.SurveyMode.select(platedb.SurveyMode.pk).where(
platedb.SurveyMode.definition_label ** defaultsurveymode).scalar()
if not survey_mode_pk:
raise RuntimeError('The database is missing an entry in \'survey_mode\' '
'for the entry {0!r}.'.format(defaultsurveymode))
plate_dbo.current_survey_mode_pk = survey_mode_pk
plate_dbo.save()
# PlatePointings
# The actual instance of a telescope pointing - the parameters of Pointing
# plus an actual plate and hour angle.
for pointing_dbo in plate_dbo.design.pointings:
plate_pointing_dbo, created = platedb.PlatePointing.get_or_create(
pointing_pk=pointing_dbo.pk, plate_pk=plate_dbo.pk,
defaults={'pointing_name': 'A'})
if not created:
print('found plate_pointing for plate_id={0} in DB.'.format(plate_id))
return
pno = pointing_dbo.pointing_no
plate_pointing_dbo.design = design_dbo
plate_pointing_dbo.pointing = pointing_dbo
plate_pointing_dbo.plate = plate_dbo
plate_pointing_dbo.hour_angle = plateplans_line['ha']
plate_pointing_dbo.ha_observable_min = plateplans_line['ha_min']
plate_pointing_dbo.ha_observable_max = plateplans_line['ha_max']
# pointing_name = platedb.DesignValue.select().join(
# platedb.DesignField).where((platedb.DesignValue.design_pk == design_dbo.pk) &
# (platedb.DesignField.label == 'pointing_name'))
# if len(pointing_name) == 0:
# raise ValueError('cannot find pointing_name for '
# 'design {0} for plate {1}'.format(design_dbo.pk, plate_id))
# plate_pointing_dbo.pointing_name = pointing_name[0].value.split()[pno - 1]
# Sets the priority to 5 for MaNGA and APOGEE, 2 for eBOSS
survey = six.u(plateplans_line['survey'])
if 'manga' in survey or 'apogee' in survey:
plate_pointing_dbo.priority = 5
else:
plate_pointing_dbo.priority = 5
plate_pointing_dbo.save()
print('created plate_pointing for plate_id={}.'
.format(plate_id))
def plate_plans_db(plate_id, design_id, plate_line, design_defs, overwrite=True):
platerun = "2020.8.a.mwm-bhm-fake"
# Checks the connection
conn_status = platedb.database.connected
if conn_status:
print('database connection is open.')
else:
raise RuntimeError('cannot connect to the database. Review you connection settings.')
pr, created = platedb.PlateRun.get_or_create(label=platerun, year=2020)
if not created:
print('platerun {0} already is already in the DB.'.format(platerun))
else:
print('added platerun {0} to the plate_run table.'.format(platerun))
# design_ids = np.unique(run_lines['designid'])
# design_ids = [9100]
# for design_id in design_ids:
print('loading design_id={0}'.format(design_id))
_load_design(design_id, design_defs, overwrite=overwrite)
# if load_addenda:
# log.important('loading plateDefinitionAddendas ...')
# plate_addenda_db(design_ids, design_mode=True, log=log)
# plate_ids = np.unique(run_lines['plateid'])
# for plate_id in plate_ids:
# plate_line = run_lines[run_lines['plateid'] == plate_id][0]
print('loading plate_id={0}'.format(plate_id))
_load_plate(plate_id, plate_line, overwrite=overwrite)
# log.important('populating observing ranges for {0} ... '.format(platerun))
# populate_obs_range(plate_ids, log=log)
def fitsToDb(line, designid):
plateplans_line = {}
if line["CADENCE"] in ["YSO", "RV6", "RV12", "GG"]:
survey = "mwm"
else:
survey = "bhm"
plateplans_line['survey'] = survey
plateplans_line['ha'] = line["HA"]
plateplans_line['ha_min'] = line["HA_MIN"]
plateplans_line['ha_max'] = line["HA_MAX"]
plateplans_line['designid'] = designid
plateplans_line['locationid'] = designid
plateplans_line['raCen'] = line["RA"]
plateplans_line['decCen'] = line["DEC"]
plateplans_line['name'] = line["FIELD"]
design_defs = {"cadence": line["CADENCE"],
"racen": line["RA"],
"deccen": line["DEC"]}
plate_plans_db(line["PLATE_ID"], designid, plateplans_line, design_defs)
if __name__ == "__main__":
database.connect_from_parameters(dbname="apodb", user="sdssdb_admin",
host="localhost", port="5500")
plates = fitsio.read("/home/john/Downloads/first_plates.fits")
for i, p in enumerate(plates):
fitsToDb(p, 91000+i)
|
[] |
[] |
[
"PLATELIST_DIR"
] |
[]
|
["PLATELIST_DIR"]
|
python
| 1 | 0 | |
otkafka/writer_test.go
|
package otkafka
import (
"context"
"os"
"strings"
"testing"
"github.com/go-kit/log"
"github.com/opentracing/opentracing-go"
"github.com/opentracing/opentracing-go/mocktracer"
"github.com/segmentio/kafka-go"
"github.com/stretchr/testify/assert"
)
func TestWriter(t *testing.T) {
if os.Getenv("KAFKA_ADDR") == "" {
t.Skip("set KAFKA_ADDR to run TestModule_ProvideRunGroup")
return
}
addrs := strings.Split(os.Getenv("KAFKA_ADDR"), ",")
{
ctx := context.Background()
kw := kafka.Writer{
Addr: kafka.TCP(addrs...),
Topic: "trace",
}
tracer := mocktracer.New()
w := Trace(&kw, tracer, WithLogger(log.NewNopLogger()))
span, ctx := opentracing.StartSpanFromContextWithTracer(ctx, tracer, "test")
span.SetBaggageItem("foo", "bar")
err := w.WriteMessages(ctx, kafka.Message{Value: []byte(`hello`)})
assert.NoError(t, err)
assert.Len(t, tracer.FinishedSpans(), 1)
span.Finish()
}
{
ctx := context.Background()
kr := kafka.NewReader(kafka.ReaderConfig{Brokers: addrs, Topic: "trace", GroupID: "test", MinBytes: 1, MaxBytes: 1})
tracer := mocktracer.New()
msg, err := kr.ReadMessage(ctx)
assert.NoError(t, err)
assert.Equal(t, "hello", string(msg.Value))
span, _, err := SpanFromMessage(ctx, tracer, &msg)
assert.NoError(t, err)
foo := span.BaggageItem("foo")
assert.Equal(t, "bar", foo)
span.Finish()
}
}
func Test_fromWriterConfig(t *testing.T) {
writer := fromWriterConfig(WriterConfig{})
assert.Equal(t, "127.0.0.1:9092", writer.Addr.String())
}
|
[
"\"KAFKA_ADDR\"",
"\"KAFKA_ADDR\""
] |
[] |
[
"KAFKA_ADDR"
] |
[]
|
["KAFKA_ADDR"]
|
go
| 1 | 0 | |
tests/test_modeling_flax_common.py
|
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import inspect
import random
import tempfile
import unittest
from typing import List, Tuple
import numpy as np
import transformers
from huggingface_hub import HfApi
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import (
ENDPOINT_STAGING,
PASS,
USER,
CaptureLogger,
is_pt_flax_cross_test,
is_staging_test,
require_flax,
slow,
)
from transformers.utils import logging
if is_flax_available():
import os
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import (
FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
FLAX_MODEL_MAPPING,
FlaxAutoModelForSequenceClassification,
FlaxBertModel,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
os.environ["XLA_PYTHON_CLIENT_MEM_FRACTION"] = "0.12" # assumed parallelism: 8
if is_torch_available():
import torch
def _config_zero_init(config):
configs_no_init = copy.deepcopy(config)
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key:
setattr(configs_no_init, key, 1e-10)
return configs_no_init
def ids_tensor(shape, vocab_size, rng=None):
"""Creates a random int32 tensor of the shape within the vocab size."""
if rng is None:
rng = random.Random()
total_dims = 1
for dim in shape:
total_dims *= dim
values = []
for _ in range(total_dims):
values.append(rng.randint(0, vocab_size - 1))
output = np.array(values, dtype=jnp.int32).reshape(shape)
return output
def floats_tensor(shape, scale=1.0, rng=None, name=None):
"""Creates a random float32 tensor"""
if rng is None:
rng = random.Random()
total_dims = 1
for dim in shape:
total_dims *= dim
values = []
for _ in range(total_dims):
values.append(rng.random() * scale)
return np.array(values, dtype=jnp.float32).reshape(shape)
def random_attention_mask(shape, rng=None):
attn_mask = ids_tensor(shape, vocab_size=2, rng=rng)
# make sure that at least one token is attended to for each batch
attn_mask[:, -1] = 1
return attn_mask
@require_flax
class FlaxModelTesterMixin:
model_tester = None
all_model_classes = ()
is_encoder_decoder = False
def _prepare_for_class(self, inputs_dict, model_class):
inputs_dict = copy.deepcopy(inputs_dict)
# hack for now until we have AutoModel classes
if "ForMultipleChoice" in model_class.__name__:
inputs_dict = {
k: jnp.broadcast_to(v[:, None], (v.shape[0], self.model_tester.num_choices, v.shape[-1]))
if isinstance(v, (jnp.ndarray, np.ndarray))
else v
for k, v in inputs_dict.items()
}
return inputs_dict
def assert_almost_equals(self, a: np.ndarray, b: np.ndarray, tol: float):
diff = np.abs((a - b)).max()
self.assertLessEqual(diff, tol, f"Difference between torch and flax is {diff} (>= {tol}).")
def test_model_outputs_equivalence(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(t):
t[t != t] = 0
return t
def check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs={}):
tuple_output = model(**tuple_inputs, return_dict=False, **additional_kwargs)
dict_output = model(**dict_inputs, return_dict=True, **additional_kwargs).to_tuple()
def recursive_check(tuple_object, dict_object):
if isinstance(tuple_object, (List, Tuple)):
for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object):
recursive_check(tuple_iterable_value, dict_iterable_value)
elif tuple_object is None:
return
else:
self.assert_almost_equals(
set_nan_tensor_to_zero(tuple_object), set_nan_tensor_to_zero(dict_object), 1e-5
)
recursive_check(tuple_output, dict_output)
for model_class in self.all_model_classes:
model = model_class(config)
tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
dict_inputs = self._prepare_for_class(inputs_dict, model_class)
check_equivalence(model, tuple_inputs, dict_inputs)
tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
dict_inputs = self._prepare_for_class(inputs_dict, model_class)
check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True})
@is_pt_flax_cross_test
def test_equivalence_pt_to_flax(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
# prepare inputs
prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class)
pt_inputs = {k: torch.tensor(v.tolist()) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
pt_model_class_name = model_class.__name__[4:] # Skip the "Flax" at the beginning
pt_model_class = getattr(transformers, pt_model_class_name)
pt_model = pt_model_class(config).eval()
# Flax models don't use the `use_cache` option and cache is not returned as a default.
# So we disable `use_cache` here for PyTorch model.
pt_model.config.use_cache = False
fx_model = model_class(config, dtype=jnp.float32)
fx_state = convert_pytorch_state_dict_to_flax(pt_model.state_dict(), fx_model)
fx_model.params = fx_state
with torch.no_grad():
pt_outputs = pt_model(**pt_inputs).to_tuple()
fx_outputs = fx_model(**prepared_inputs_dict).to_tuple()
self.assertEqual(len(fx_outputs), len(pt_outputs), "Output lengths differ between Flax and PyTorch")
for fx_output, pt_output in zip(fx_outputs, pt_outputs):
self.assert_almost_equals(fx_output, pt_output.numpy(), 4e-2)
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(tmpdirname)
fx_model_loaded = model_class.from_pretrained(tmpdirname, from_pt=True)
fx_outputs_loaded = fx_model_loaded(**prepared_inputs_dict).to_tuple()
self.assertEqual(
len(fx_outputs_loaded), len(pt_outputs), "Output lengths differ between Flax and PyTorch"
)
for fx_output_loaded, pt_output in zip(fx_outputs_loaded, pt_outputs):
self.assert_almost_equals(fx_output_loaded, pt_output.numpy(), 4e-2)
@is_pt_flax_cross_test
def test_equivalence_flax_to_pt(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
# prepare inputs
prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class)
pt_inputs = {k: torch.tensor(v.tolist()) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
pt_model_class_name = model_class.__name__[4:] # Skip the "Flax" at the beginning
pt_model_class = getattr(transformers, pt_model_class_name)
pt_model = pt_model_class(config).eval()
# Flax models don't use the `use_cache` option and cache is not returned as a default.
# So we disable `use_cache` here for PyTorch model.
pt_model.config.use_cache = False
fx_model = model_class(config, dtype=jnp.float32)
pt_model = load_flax_weights_in_pytorch_model(pt_model, fx_model.params)
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
pt_outputs = pt_model(**pt_inputs).to_tuple()
fx_outputs = fx_model(**prepared_inputs_dict).to_tuple()
self.assertEqual(len(fx_outputs), len(pt_outputs), "Output lengths differ between Flax and PyTorch")
for fx_output, pt_output in zip(fx_outputs, pt_outputs):
self.assert_almost_equals(fx_output, pt_output.numpy(), 4e-2)
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(tmpdirname)
pt_model_loaded = pt_model_class.from_pretrained(tmpdirname, from_flax=True)
with torch.no_grad():
pt_outputs_loaded = pt_model_loaded(**pt_inputs).to_tuple()
self.assertEqual(
len(fx_outputs), len(pt_outputs_loaded), "Output lengths differ between Flax and PyTorch"
)
for fx_output, pt_output in zip(fx_outputs, pt_outputs_loaded):
self.assert_almost_equals(fx_output, pt_output.numpy(), 4e-2)
def test_from_pretrained_save_pretrained(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
model = model_class(config)
prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class)
outputs = model(**prepared_inputs_dict).to_tuple()
# verify that normal save_pretrained works as expected
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
model_loaded = model_class.from_pretrained(tmpdirname)
outputs_loaded = model_loaded(**prepared_inputs_dict).to_tuple()
for output_loaded, output in zip(outputs_loaded, outputs):
self.assert_almost_equals(output_loaded, output, 1e-3)
# verify that save_pretrained for distributed training
# with `params=params` works as expected
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname, params=model.params)
model_loaded = model_class.from_pretrained(tmpdirname)
outputs_loaded = model_loaded(**prepared_inputs_dict).to_tuple()
for output_loaded, output in zip(outputs_loaded, outputs):
self.assert_almost_equals(output_loaded, output, 1e-3)
def test_save_load_from_base(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
base_class = FLAX_MODEL_MAPPING[config.__class__]
for model_class in self.all_model_classes:
if model_class == base_class:
continue
model = base_class(config)
base_params = flatten_dict(unfreeze(model.params))
# check that all base model weights are loaded correctly
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
head_model = model_class.from_pretrained(tmpdirname)
base_param_from_head = flatten_dict(unfreeze(head_model.params[head_model.base_model_prefix]))
for key in base_param_from_head.keys():
max_diff = (base_params[key] - base_param_from_head[key]).sum().item()
self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical")
def test_save_load_to_base(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
base_class = FLAX_MODEL_MAPPING[config.__class__]
for model_class in self.all_model_classes:
if model_class == base_class:
continue
model = model_class(config)
base_params_from_head = flatten_dict(unfreeze(model.params[model.base_model_prefix]))
# check that all base model weights are loaded correctly
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
base_model = base_class.from_pretrained(tmpdirname)
base_params = flatten_dict(unfreeze(base_model.params))
for key in base_params_from_head.keys():
max_diff = (base_params[key] - base_params_from_head[key]).sum().item()
self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical")
@is_pt_flax_cross_test
def test_save_load_from_base_pt(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
base_class = FLAX_MODEL_MAPPING[config.__class__]
for model_class in self.all_model_classes:
if model_class == base_class:
continue
model = base_class(config)
base_params = flatten_dict(unfreeze(model.params))
# convert Flax model to PyTorch model
pt_model_class = getattr(transformers, base_class.__name__[4:]) # Skip the "Flax" at the beginning
pt_model = pt_model_class(config).eval()
pt_model = load_flax_weights_in_pytorch_model(pt_model, model.params)
# check that all base model weights are loaded correctly
with tempfile.TemporaryDirectory() as tmpdirname:
# save pt model
pt_model.save_pretrained(tmpdirname)
head_model = model_class.from_pretrained(tmpdirname, from_pt=True)
base_param_from_head = flatten_dict(unfreeze(head_model.params[head_model.base_model_prefix]))
for key in base_param_from_head.keys():
max_diff = (base_params[key] - base_param_from_head[key]).sum().item()
self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical")
@is_pt_flax_cross_test
def test_save_load_to_base_pt(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
base_class = FLAX_MODEL_MAPPING[config.__class__]
for model_class in self.all_model_classes:
if model_class == base_class:
continue
model = model_class(config)
base_params_from_head = flatten_dict(unfreeze(model.params[model.base_model_prefix]))
# convert Flax model to PyTorch model
pt_model_class = getattr(transformers, model_class.__name__[4:]) # Skip the "Flax" at the beginning
pt_model = pt_model_class(config).eval()
pt_model = load_flax_weights_in_pytorch_model(pt_model, model.params)
# check that all base model weights are loaded correctly
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(tmpdirname)
base_model = base_class.from_pretrained(tmpdirname, from_pt=True)
base_params = flatten_dict(unfreeze(base_model.params))
for key in base_params_from_head.keys():
max_diff = (base_params[key] - base_params_from_head[key]).sum().item()
self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical")
@slow
def test_jit_compilation(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class)
model = model_class(config)
@jax.jit
def model_jitted(input_ids, attention_mask=None, **kwargs):
return model(input_ids=input_ids, attention_mask=attention_mask, **kwargs)
with self.subTest("JIT Enabled"):
jitted_outputs = model_jitted(**prepared_inputs_dict).to_tuple()
with self.subTest("JIT Disabled"):
with jax.disable_jit():
outputs = model_jitted(**prepared_inputs_dict).to_tuple()
self.assertEqual(len(outputs), len(jitted_outputs))
for jitted_output, output in zip(jitted_outputs, outputs):
self.assertEqual(jitted_output.shape, output.shape)
def test_forward_signature(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.__call__)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
arg_names = [*signature.parameters.keys()]
if model.config.is_encoder_decoder:
expected_arg_names = [
"input_ids",
"attention_mask",
"decoder_input_ids",
"decoder_attention_mask",
]
self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names)
else:
expected_arg_names = ["input_ids", "attention_mask"]
self.assertListEqual(arg_names[:2], expected_arg_names)
def test_naming_convention(self):
for model_class in self.all_model_classes:
model_class_name = model_class.__name__
module_class_name = (
model_class_name[:-5] + "Module" if model_class_name[-5:] == "Model" else model_class_name + "Module"
)
bert_modeling_flax_module = __import__(model_class.__module__, fromlist=[module_class_name])
module_cls = getattr(bert_modeling_flax_module, module_class_name)
self.assertIsNotNone(module_cls)
def test_hidden_states_output(self):
def check_hidden_states_output(inputs_dict, config, model_class):
model = model_class(config)
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
expected_num_layers = getattr(
self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1
)
self.assertEqual(len(hidden_states), expected_num_layers)
if hasattr(self.model_tester, "encoder_seq_length"):
seq_length = self.model_tester.encoder_seq_length
else:
seq_length = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[seq_length, self.model_tester.hidden_size],
)
if config.is_encoder_decoder:
hidden_states = outputs.decoder_hidden_states
self.assertIsInstance(hidden_states, (list, tuple))
self.assertEqual(len(hidden_states), expected_num_layers)
seq_len = getattr(self.model_tester, "seq_length", None)
decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len)
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[decoder_seq_length, self.model_tester.hidden_size],
)
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(inputs_dict, config, model_class)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
check_hidden_states_output(inputs_dict, config, model_class)
def test_attention_outputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
seq_length = getattr(self.model_tester, "seq_length", None)
decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_length)
encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_length)
decoder_key_length = getattr(self.model_tester, "decoder_key_length", decoder_seq_length)
encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length)
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = False
model = model_class(config)
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
config.output_attentions = True
model = model_class(config)
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
)
out_len = len(outputs)
if self.is_encoder_decoder:
correct_outlen = 5
# Question Answering model returns start_logits and end_logits
if model_class in get_values(FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING):
correct_outlen += 1 # start_logits and end_logits instead of only 1 output
self.assertEqual(out_len, correct_outlen)
# decoder attentions
decoder_attentions = outputs.decoder_attentions
self.assertIsInstance(decoder_attentions, (list, tuple))
self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(decoder_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length],
)
# cross attentions
cross_attentions = outputs.cross_attentions
self.assertIsInstance(cross_attentions, (list, tuple))
self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(cross_attentions[0].shape[-3:]),
[
self.model_tester.num_attention_heads,
decoder_seq_length,
encoder_key_length,
],
)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = True
model = model_class(config)
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
if hasattr(self.model_tester, "num_hidden_states_types"):
added_hidden_states = self.model_tester.num_hidden_states_types
elif self.is_encoder_decoder:
added_hidden_states = 2
else:
added_hidden_states = 1
self.assertEqual(out_len + added_hidden_states, len(outputs))
self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(self_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
)
def test_load_with_mismatched_shapes(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
if model_class not in get_values(FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING):
continue
with self.subTest(msg=f"Testing {model_class}"):
with tempfile.TemporaryDirectory() as tmp_dir:
model = model_class(config)
model.save_pretrained(tmp_dir)
# Fails when we don't set ignore_mismatched_sizes=True
with self.assertRaises(ValueError):
new_model = FlaxAutoModelForSequenceClassification.from_pretrained(tmp_dir, num_labels=42)
logger = logging.get_logger("transformers.modeling_flax_utils")
with CaptureLogger(logger) as cl:
new_model = FlaxAutoModelForSequenceClassification.from_pretrained(
tmp_dir, num_labels=42, ignore_mismatched_sizes=True
)
self.assertIn("the shapes did not match", cl.out)
logits = new_model(**inputs_dict)["logits"]
self.assertEqual(logits.shape[1], 42)
@require_flax
@is_staging_test
class FlaxModelPushToHubTester(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls._api = HfApi(endpoint=ENDPOINT_STAGING)
cls._token = cls._api.login(username=USER, password=PASS)
@classmethod
def tearDownClass(cls):
try:
cls._api.delete_repo(token=cls._token, name="test-model-flax")
except HTTPError:
pass
try:
cls._api.delete_repo(token=cls._token, name="test-model-flax-org", organization="valid_org")
except HTTPError:
pass
def test_push_to_hub(self):
config = BertConfig(
vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37
)
model = FlaxBertModel(config)
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
os.path.join(tmp_dir, "test-model-flax"), push_to_hub=True, use_auth_token=self._token
)
new_model = FlaxBertModel.from_pretrained(f"{USER}/test-model-flax")
base_params = flatten_dict(unfreeze(model.params))
new_params = flatten_dict(unfreeze(new_model.params))
for key in base_params.keys():
max_diff = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical")
def test_push_to_hub_in_organization(self):
config = BertConfig(
vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37
)
model = FlaxBertModel(config)
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
os.path.join(tmp_dir, "test-model-flax-org"),
push_to_hub=True,
use_auth_token=self._token,
organization="valid_org",
)
new_model = FlaxBertModel.from_pretrained("valid_org/test-model-flax-org")
base_params = flatten_dict(unfreeze(model.params))
new_params = flatten_dict(unfreeze(new_model.params))
for key in base_params.keys():
max_diff = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical")
|
[] |
[] |
[
"XLA_PYTHON_CLIENT_MEM_FRACTION"
] |
[]
|
["XLA_PYTHON_CLIENT_MEM_FRACTION"]
|
python
| 1 | 0 | |
main.go
|
package main
import (
"context"
"fmt"
"os"
"path/filepath"
"runtime"
"strings"
"github.com/pkg/errors"
"github.com/google/go-containerregistry/pkg/authn"
"github.com/google/go-containerregistry/pkg/name"
v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/cache"
"github.com/google/go-containerregistry/pkg/v1/remote"
"github.com/rancher/wharfie/pkg/credentialprovider/plugin"
"github.com/rancher/wharfie/pkg/extract"
"github.com/rancher/wharfie/pkg/registries"
"github.com/rancher/wharfie/pkg/tarfile"
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
)
var (
version = "v0.0.0"
)
func main() {
app := cli.NewApp()
app.Name = "wharfie"
app.Usage = "pulls and unpacks a container image to the local filesystem"
app.Description = "Supports K3s/RKE2 style repository rewrites, endpoint overrides, and auth configuration. Supports optional loading from local image tarballs or layer cache. Supports Kubelet credential provider plugins."
app.ArgsUsage = "<image> [<destination>|<source:destination>] [<source:destination>]"
app.Version = version
app.Action = run
app.Flags = []cli.Flag{
cli.StringFlag{
Name: "private-registry",
Usage: "Private registry configuration file",
Value: "/etc/rancher/common/registries.yaml",
},
cli.StringFlag{
Name: "images-dir",
Usage: "Images tarball directory",
},
cli.BoolFlag{
Name: "cache",
Usage: "Enable layer cache when image is not available locally",
},
cli.StringFlag{
Name: "cache-dir",
Usage: "Layer cache directory",
Value: "$XDG_CACHE_HOME/rancher/wharfie",
},
cli.StringFlag{
Name: "image-credential-provider-config",
Usage: "Image credential provider configuration file",
},
cli.StringFlag{
Name: "image-credential-provider-bin-dir",
Usage: "Image credential provider binary directory",
},
cli.BoolFlag{
Name: "debug",
Usage: "Enable debug logging",
},
cli.StringFlag{
Name: "arch",
Usage: "Override the machine architecture",
Value: runtime.GOARCH,
},
cli.StringFlag{
Name: "os",
Usage: "Override the machine operating system",
Value: runtime.GOOS,
},
}
if os.Getenv("XDG_CACHE_HOME") == "" && os.Getenv("HOME") != "" {
os.Setenv("XDG_CACHE_HOME", os.ExpandEnv("$HOME/.cache"))
}
if err := app.Run(os.Args); err != nil {
if !errors.Is(err, context.Canceled) {
logrus.Fatalf("Error: %v", err)
}
}
}
func run(clx *cli.Context) error {
var img v1.Image
if len(clx.Args()) < 2 {
fmt.Fprintf(clx.App.Writer, "Incorrect Usage. <image> and <destination> are required arguments.\n\n")
cli.ShowAppHelpAndExit(clx, 1)
}
if clx.Bool("debug") {
logrus.SetLevel(logrus.TraceLevel)
}
ref, err := name.ParseReference(clx.Args().Get(0))
if err != nil {
return err
}
// destination is one or more bare local paths to extract to on the host, or
// image-path:local-path pairs if the content should be extracted to specific
// locations.
dirs := map[string]string{}
for i := 1; i < clx.NArg(); i++ {
var source, destination string
destination = clx.Args().Get(i)
parts := strings.SplitN(destination, ":", 2)
if len(parts) == 2 {
source, destination = parts[0], parts[1]
} else {
source, destination = "/", parts[0]
}
destination, err := filepath.Abs(os.ExpandEnv(destination))
if err != nil {
return err
}
logrus.Infof("Extract mapping %s => %s", source, destination)
dirs[source] = destination
}
if clx.IsSet("images-dir") {
imagesDir, err := filepath.Abs(os.ExpandEnv(clx.String("images-dir")))
if err != nil {
return err
}
i, err := tarfile.FindImage(imagesDir, ref)
if err != nil && !errors.Is(err, tarfile.ErrNotFound) {
return err
}
img = i
}
if img == nil {
registry, err := registries.GetPrivateRegistries(clx.String("private-registry"))
if err != nil {
return err
}
// Next check Kubelet image credential provider plugins, if configured
if clx.IsSet("image-credential-provider-config") && clx.IsSet("image-credential-provider-bin-dir") {
plugins, err := plugin.RegisterCredentialProviderPlugins(clx.String("image-credential-provider-config"), clx.String("image-credential-provider-bin-dir"))
if err != nil {
return err
}
registry.DefaultKeychain = plugins
} else {
// The kubelet image credential provider plugin also falls back to checking legacy Docker credentials, so only
// explicitly set up the go-containerregistry DefaultKeychain if plugins are not configured.
// DefaultKeychain tries to read config from the home dir, and will error if HOME isn't set, so also gate on that.
if os.Getenv("HOME") != "" {
registry.DefaultKeychain = authn.DefaultKeychain
}
}
logrus.Infof("Pulling image reference %s", ref.Name())
img, err = registry.Image(ref, remote.WithPlatform(v1.Platform{Architecture: clx.String("arch"), OS: clx.String("os")}))
if err != nil {
return errors.Wrapf(err, "failed to get image reference %s", ref.Name())
}
if clx.Bool("cache") {
cacheDir, err := filepath.Abs(os.ExpandEnv(clx.String("cache-dir")))
if err != nil {
return err
}
logrus.Infof("Using layer cache %s", cacheDir)
imageCache := cache.NewFilesystemCache(cacheDir)
img = cache.Image(img, imageCache)
}
}
return extract.ExtractDirs(img, dirs)
}
|
[
"\"XDG_CACHE_HOME\"",
"\"HOME\"",
"\"HOME\""
] |
[] |
[
"HOME",
"XDG_CACHE_HOME"
] |
[]
|
["HOME", "XDG_CACHE_HOME"]
|
go
| 2 | 0 | |
certbot-dns-dnsimple/setup.py
|
from distutils.version import LooseVersion
import os
import sys
from setuptools import __version__ as setuptools_version
from setuptools import find_packages
from setuptools import setup
version = '1.12.0.dev0'
# Remember to update local-oldest-requirements.txt when changing the minimum
# acme/certbot version.
install_requires = [
'setuptools>=39.0.1',
'zope.interface',
]
if not os.environ.get('SNAP_BUILD'):
install_requires.extend([
'acme>=0.31.0',
'certbot>=1.1.0',
])
elif 'bdist_wheel' in sys.argv[1:]:
raise RuntimeError('Unset SNAP_BUILD when building wheels '
'to include certbot dependencies.')
if os.environ.get('SNAP_BUILD'):
install_requires.append('packaging')
setuptools_known_environment_markers = (LooseVersion(setuptools_version) >= LooseVersion('36.2'))
if setuptools_known_environment_markers:
install_requires.append('mock ; python_version < "3.3"')
elif 'bdist_wheel' in sys.argv[1:]:
raise RuntimeError('Error, you are trying to build certbot wheels using an old version '
'of setuptools. Version 36.2+ of setuptools is required.')
elif sys.version_info < (3,3):
install_requires.append('mock')
# This package normally depends on dns-lexicon>=3.2.1 to address the
# problem described in https://github.com/AnalogJ/lexicon/issues/387,
# however, the fix there has been backported to older versions of
# lexicon found in various Linux distros. This conditional helps us test
# that we've maintained compatibility with these versions of lexicon
# which allows us to potentially upgrade our packages in these distros
# as necessary.
if os.environ.get('CERTBOT_OLDEST') == '1':
install_requires.append('dns-lexicon>=2.2.1')
else:
install_requires.append('dns-lexicon>=3.2.1')
docs_extras = [
'Sphinx>=1.0', # autodoc_member_order = 'bysource', autodoc_default_flags
'sphinx_rtd_theme',
]
setup(
name='certbot-dns-dnsimple',
version=version,
description="DNSimple DNS Authenticator plugin for Certbot",
url='https://github.com/certbot/certbot',
author="Certbot Project",
author_email='[email protected]',
license='Apache License 2.0',
python_requires='>=3.6',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Plugins',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Security',
'Topic :: System :: Installation/Setup',
'Topic :: System :: Networking',
'Topic :: System :: Systems Administration',
'Topic :: Utilities',
],
packages=find_packages(),
include_package_data=True,
install_requires=install_requires,
extras_require={
'docs': docs_extras,
},
entry_points={
'certbot.plugins': [
'dns-dnsimple = certbot_dns_dnsimple._internal.dns_dnsimple:Authenticator',
],
},
)
|
[] |
[] |
[
"CERTBOT_OLDEST",
"SNAP_BUILD"
] |
[]
|
["CERTBOT_OLDEST", "SNAP_BUILD"]
|
python
| 2 | 0 | |
integration/subcommand_test.go
|
package integration_test
import (
"io/ioutil"
"os"
"path/filepath"
"strings"
. "github.com/hackrish007/ginkgo"
"github.com/hackrish007/ginkgo/types"
. "github.com/hackrish007/gomega"
"github.com/hackrish007/gomega/gexec"
)
var _ = Describe("Subcommand", func() {
Describe("ginkgo bootstrap", func() {
var pkgPath string
BeforeEach(func() {
pkgPath = tmpPath("foo")
os.Mkdir(pkgPath, 0777)
})
It("should generate a bootstrap file, as long as one does not exist", func() {
session := startGinkgo(pkgPath, "bootstrap")
Eventually(session).Should(gexec.Exit(0))
output := session.Out.Contents()
Ω(output).Should(ContainSubstring("foo_suite_test.go"))
content, err := ioutil.ReadFile(filepath.Join(pkgPath, "foo_suite_test.go"))
Ω(err).ShouldNot(HaveOccurred())
Ω(content).Should(ContainSubstring("package foo_test"))
Ω(content).Should(ContainSubstring("func TestFoo(t *testing.T) {"))
Ω(content).Should(ContainSubstring("RegisterFailHandler"))
Ω(content).Should(ContainSubstring("RunSpecs"))
Ω(content).Should(ContainSubstring("\t" + `. "github.com/hackrish007/ginkgo"`))
Ω(content).Should(ContainSubstring("\t" + `. "github.com/hackrish007/gomega"`))
session = startGinkgo(pkgPath, "bootstrap")
Eventually(session).Should(gexec.Exit(1))
output = session.Out.Contents()
Ω(output).Should(ContainSubstring("foo_suite_test.go already exists"))
})
It("should import nodot declarations when told to", func() {
session := startGinkgo(pkgPath, "bootstrap", "--nodot")
Eventually(session).Should(gexec.Exit(0))
output := session.Out.Contents()
Ω(output).Should(ContainSubstring("foo_suite_test.go"))
content, err := ioutil.ReadFile(filepath.Join(pkgPath, "foo_suite_test.go"))
Ω(err).ShouldNot(HaveOccurred())
Ω(content).Should(ContainSubstring("package foo_test"))
Ω(content).Should(ContainSubstring("func TestFoo(t *testing.T) {"))
Ω(content).Should(ContainSubstring("RegisterFailHandler"))
Ω(content).Should(ContainSubstring("RunSpecs"))
Ω(content).Should(ContainSubstring("var It = ginkgo.It"))
Ω(content).Should(ContainSubstring("var Ω = gomega.Ω"))
Ω(content).Should(ContainSubstring("\t" + `"github.com/hackrish007/ginkgo"`))
Ω(content).Should(ContainSubstring("\t" + `"github.com/hackrish007/gomega"`))
})
It("should generate an agouti bootstrap file when told to", func() {
session := startGinkgo(pkgPath, "bootstrap", "--agouti")
Eventually(session).Should(gexec.Exit(0))
output := session.Out.Contents()
Ω(output).Should(ContainSubstring("foo_suite_test.go"))
content, err := ioutil.ReadFile(filepath.Join(pkgPath, "foo_suite_test.go"))
Ω(err).ShouldNot(HaveOccurred())
Ω(content).Should(ContainSubstring("package foo_test"))
Ω(content).Should(ContainSubstring("func TestFoo(t *testing.T) {"))
Ω(content).Should(ContainSubstring("RegisterFailHandler"))
Ω(content).Should(ContainSubstring("RunSpecs"))
Ω(content).Should(ContainSubstring("\t" + `. "github.com/hackrish007/ginkgo"`))
Ω(content).Should(ContainSubstring("\t" + `. "github.com/hackrish007/gomega"`))
Ω(content).Should(ContainSubstring("\t" + `"github.com/sclevine/agouti"`))
})
It("should generate a bootstrap file using a template when told to", func() {
templateFile := filepath.Join(pkgPath, ".bootstrap")
ioutil.WriteFile(templateFile, []byte(`package {{.Package}}
import (
{{.GinkgoImport}}
{{.GomegaImport}}
"testing"
"binary"
)
func Test{{.FormattedName}}(t *testing.T) {
// This is a {{.Package}} test
}`), 0666)
session := startGinkgo(pkgPath, "bootstrap", "--template", ".bootstrap")
Eventually(session).Should(gexec.Exit(0))
output := session.Out.Contents()
Ω(output).Should(ContainSubstring("foo_suite_test.go"))
content, err := ioutil.ReadFile(filepath.Join(pkgPath, "foo_suite_test.go"))
Ω(err).ShouldNot(HaveOccurred())
Ω(content).Should(ContainSubstring("package foo_test"))
Ω(content).Should(ContainSubstring(`. "github.com/hackrish007/ginkgo"`))
Ω(content).Should(ContainSubstring(`. "github.com/hackrish007/gomega"`))
Ω(content).Should(ContainSubstring(`"binary"`))
Ω(content).Should(ContainSubstring("// This is a foo_test test"))
})
It("should generate a bootstrap file using a template that contains functions when told to", func() {
templateFile := filepath.Join(pkgPath, ".bootstrap")
ioutil.WriteFile(templateFile, []byte(`package {{.Package}}
import (
{{.GinkgoImport}}
{{.GomegaImport}}
"testing"
"binary"
)
func Test{{.FormattedName}}(t *testing.T) {
// This is a {{.Package | repeat 3}} test
}`), 0666)
session := startGinkgo(pkgPath, "bootstrap", "--template", ".bootstrap")
Eventually(session).Should(gexec.Exit(0))
output := session.Out.Contents()
Ω(output).Should(ContainSubstring("foo_suite_test.go"))
content, err := ioutil.ReadFile(filepath.Join(pkgPath, "foo_suite_test.go"))
Ω(err).ShouldNot(HaveOccurred())
Ω(content).Should(ContainSubstring("package foo_test"))
Ω(content).Should(ContainSubstring(`. "github.com/hackrish007/ginkgo"`))
Ω(content).Should(ContainSubstring(`. "github.com/hackrish007/gomega"`))
Ω(content).Should(ContainSubstring(`"binary"`))
Ω(content).Should(ContainSubstring("// This is a foo_testfoo_testfoo_test test"))
})
})
Describe("nodot", func() {
It("should update the declarations in the bootstrap file", func() {
pkgPath := tmpPath("foo")
os.Mkdir(pkgPath, 0777)
session := startGinkgo(pkgPath, "bootstrap", "--nodot")
Eventually(session).Should(gexec.Exit(0))
byteContent, err := ioutil.ReadFile(filepath.Join(pkgPath, "foo_suite_test.go"))
Ω(err).ShouldNot(HaveOccurred())
content := string(byteContent)
content = strings.Replace(content, "var It =", "var MyIt =", -1)
content = strings.Replace(content, "var Ω = gomega.Ω\n", "", -1)
err = ioutil.WriteFile(filepath.Join(pkgPath, "foo_suite_test.go"), []byte(content), os.ModePerm)
Ω(err).ShouldNot(HaveOccurred())
session = startGinkgo(pkgPath, "nodot")
Eventually(session).Should(gexec.Exit(0))
byteContent, err = ioutil.ReadFile(filepath.Join(pkgPath, "foo_suite_test.go"))
Ω(err).ShouldNot(HaveOccurred())
Ω(byteContent).Should(ContainSubstring("var MyIt = ginkgo.It"))
Ω(byteContent).ShouldNot(ContainSubstring("var It = ginkgo.It"))
Ω(byteContent).Should(ContainSubstring("var Ω = gomega.Ω"))
})
})
Describe("ginkgo generate", func() {
var pkgPath string
BeforeEach(func() {
pkgPath = tmpPath("foo_bar")
os.Mkdir(pkgPath, 0777)
})
Context("with no arguments", func() {
It("should generate a test file named after the package", func() {
session := startGinkgo(pkgPath, "generate")
Eventually(session).Should(gexec.Exit(0))
output := session.Out.Contents()
Ω(output).Should(ContainSubstring("foo_bar_test.go"))
content, err := ioutil.ReadFile(filepath.Join(pkgPath, "foo_bar_test.go"))
Ω(err).ShouldNot(HaveOccurred())
Ω(content).Should(ContainSubstring("package foo_bar_test"))
Ω(content).Should(ContainSubstring(`var _ = Describe("FooBar", func() {`))
Ω(content).Should(ContainSubstring("\t" + `. "github.com/hackrish007/ginkgo"`))
Ω(content).Should(ContainSubstring("\t" + `. "github.com/hackrish007/gomega"`))
session = startGinkgo(pkgPath, "generate")
Eventually(session).Should(gexec.Exit(1))
output = session.Out.Contents()
Ω(output).Should(ContainSubstring("foo_bar_test.go already exists"))
})
})
Context("with template argument", func() {
It("should generate a test file using a template", func() {
templateFile := filepath.Join(pkgPath, ".generate")
ioutil.WriteFile(templateFile, []byte(`package {{.Package}}
import (
{{if .IncludeImports}}. "github.com/hackrish007/ginkgo"{{end}}
{{if .IncludeImports}}. "github.com/hackrish007/gomega"{{end}}
{{if .ImportPackage}}"{{.PackageImportPath}}"{{end}}
)
var _ = Describe("{{.Subject}}", func() {
// This is a {{.Package}} test
})`), 0666)
session := startGinkgo(pkgPath, "generate", "--template", ".generate")
Eventually(session).Should(gexec.Exit(0))
output := session.Out.Contents()
Ω(output).Should(ContainSubstring("foo_bar_test.go"))
content, err := ioutil.ReadFile(filepath.Join(pkgPath, "foo_bar_test.go"))
Ω(err).ShouldNot(HaveOccurred())
Ω(content).Should(ContainSubstring("package foo_bar_test"))
Ω(content).Should(ContainSubstring(`. "github.com/hackrish007/ginkgo"`))
Ω(content).Should(ContainSubstring(`. "github.com/hackrish007/gomega"`))
Ω(content).Should(ContainSubstring(`/foo_bar"`))
Ω(content).Should(ContainSubstring("// This is a foo_bar_test test"))
})
It("should generate a test file using a template that contains functions", func() {
templateFile := filepath.Join(pkgPath, ".generate")
ioutil.WriteFile(templateFile, []byte(`package {{.Package}}
import (
{{if .IncludeImports}}. "github.com/hackrish007/ginkgo"{{end}}
{{if .IncludeImports}}. "github.com/hackrish007/gomega"{{end}}
{{if .ImportPackage}}"{{.PackageImportPath}}"{{end}}
)
var _ = Describe("{{.Subject}}", func() {
// This is a {{.Package | repeat 3 }} test
})`), 0666)
session := startGinkgo(pkgPath, "generate", "--template", ".generate")
Eventually(session).Should(gexec.Exit(0))
output := session.Out.Contents()
Ω(output).Should(ContainSubstring("foo_bar_test.go"))
content, err := ioutil.ReadFile(filepath.Join(pkgPath, "foo_bar_test.go"))
Ω(err).ShouldNot(HaveOccurred())
Ω(content).Should(ContainSubstring("package foo_bar_test"))
Ω(content).Should(ContainSubstring(`. "github.com/hackrish007/ginkgo"`))
Ω(content).Should(ContainSubstring(`. "github.com/hackrish007/gomega"`))
Ω(content).Should(ContainSubstring(`/foo_bar"`))
Ω(content).Should(ContainSubstring("// This is a foo_bar_testfoo_bar_testfoo_bar_test test"))
})
})
Context("with an argument of the form: foo", func() {
It("should generate a test file named after the argument", func() {
session := startGinkgo(pkgPath, "generate", "baz_buzz")
Eventually(session).Should(gexec.Exit(0))
output := session.Out.Contents()
Ω(output).Should(ContainSubstring("baz_buzz_test.go"))
content, err := ioutil.ReadFile(filepath.Join(pkgPath, "baz_buzz_test.go"))
Ω(err).ShouldNot(HaveOccurred())
Ω(content).Should(ContainSubstring("package foo_bar_test"))
Ω(content).Should(ContainSubstring(`var _ = Describe("BazBuzz", func() {`))
})
})
Context("with an argument of the form: foo.go", func() {
It("should generate a test file named after the argument", func() {
session := startGinkgo(pkgPath, "generate", "baz_buzz.go")
Eventually(session).Should(gexec.Exit(0))
output := session.Out.Contents()
Ω(output).Should(ContainSubstring("baz_buzz_test.go"))
content, err := ioutil.ReadFile(filepath.Join(pkgPath, "baz_buzz_test.go"))
Ω(err).ShouldNot(HaveOccurred())
Ω(content).Should(ContainSubstring("package foo_bar_test"))
Ω(content).Should(ContainSubstring(`var _ = Describe("BazBuzz", func() {`))
})
})
Context("with an argument of the form: foo_test", func() {
It("should generate a test file named after the argument", func() {
session := startGinkgo(pkgPath, "generate", "baz_buzz_test")
Eventually(session).Should(gexec.Exit(0))
output := session.Out.Contents()
Ω(output).Should(ContainSubstring("baz_buzz_test.go"))
content, err := ioutil.ReadFile(filepath.Join(pkgPath, "baz_buzz_test.go"))
Ω(err).ShouldNot(HaveOccurred())
Ω(content).Should(ContainSubstring("package foo_bar_test"))
Ω(content).Should(ContainSubstring(`var _ = Describe("BazBuzz", func() {`))
})
})
Context("with an argument of the form: foo-test", func() {
It("should generate a test file named after the argument", func() {
session := startGinkgo(pkgPath, "generate", "baz-buzz-test")
Eventually(session).Should(gexec.Exit(0))
output := session.Out.Contents()
Ω(output).Should(ContainSubstring("baz_buzz_test.go"))
content, err := ioutil.ReadFile(filepath.Join(pkgPath, "baz_buzz_test.go"))
Ω(err).ShouldNot(HaveOccurred())
Ω(content).Should(ContainSubstring("package foo_bar_test"))
Ω(content).Should(ContainSubstring(`var _ = Describe("BazBuzz", func() {`))
})
})
Context("with an argument of the form: foo_test.go", func() {
It("should generate a test file named after the argument", func() {
session := startGinkgo(pkgPath, "generate", "baz_buzz_test.go")
Eventually(session).Should(gexec.Exit(0))
output := session.Out.Contents()
Ω(output).Should(ContainSubstring("baz_buzz_test.go"))
content, err := ioutil.ReadFile(filepath.Join(pkgPath, "baz_buzz_test.go"))
Ω(err).ShouldNot(HaveOccurred())
Ω(content).Should(ContainSubstring("package foo_bar_test"))
Ω(content).Should(ContainSubstring(`var _ = Describe("BazBuzz", func() {`))
})
})
Context("with multiple arguments", func() {
It("should generate a test file named after the argument", func() {
session := startGinkgo(pkgPath, "generate", "baz", "buzz")
Eventually(session).Should(gexec.Exit(0))
output := session.Out.Contents()
Ω(output).Should(ContainSubstring("baz_test.go"))
Ω(output).Should(ContainSubstring("buzz_test.go"))
content, err := ioutil.ReadFile(filepath.Join(pkgPath, "baz_test.go"))
Ω(err).ShouldNot(HaveOccurred())
Ω(content).Should(ContainSubstring("package foo_bar_test"))
Ω(content).Should(ContainSubstring(`var _ = Describe("Baz", func() {`))
content, err = ioutil.ReadFile(filepath.Join(pkgPath, "buzz_test.go"))
Ω(err).ShouldNot(HaveOccurred())
Ω(content).Should(ContainSubstring("package foo_bar_test"))
Ω(content).Should(ContainSubstring(`var _ = Describe("Buzz", func() {`))
})
})
Context("with nodot", func() {
It("should not import ginkgo or gomega", func() {
session := startGinkgo(pkgPath, "generate", "--nodot")
Eventually(session).Should(gexec.Exit(0))
output := session.Out.Contents()
Ω(output).Should(ContainSubstring("foo_bar_test.go"))
content, err := ioutil.ReadFile(filepath.Join(pkgPath, "foo_bar_test.go"))
Ω(err).ShouldNot(HaveOccurred())
Ω(content).Should(ContainSubstring("package foo_bar_test"))
Ω(content).ShouldNot(ContainSubstring("\t" + `. "github.com/hackrish007/ginkgo"`))
Ω(content).ShouldNot(ContainSubstring("\t" + `. "github.com/hackrish007/gomega"`))
})
})
Context("with agouti", func() {
It("should generate an agouti test file", func() {
session := startGinkgo(pkgPath, "generate", "--agouti")
Eventually(session).Should(gexec.Exit(0))
output := session.Out.Contents()
Ω(output).Should(ContainSubstring("foo_bar_test.go"))
content, err := ioutil.ReadFile(filepath.Join(pkgPath, "foo_bar_test.go"))
Ω(err).ShouldNot(HaveOccurred())
Ω(content).Should(ContainSubstring("package foo_bar_test"))
Ω(content).Should(ContainSubstring("\t" + `. "github.com/hackrish007/ginkgo"`))
Ω(content).Should(ContainSubstring("\t" + `. "github.com/hackrish007/gomega"`))
Ω(content).Should(ContainSubstring("\t" + `. "github.com/sclevine/agouti/matchers"`))
Ω(content).Should(ContainSubstring("\t" + `"github.com/sclevine/agouti"`))
Ω(content).Should(ContainSubstring("page, err = agoutiDriver.NewPage()"))
})
})
})
Describe("ginkgo bootstrap/generate", func() {
var pkgPath string
BeforeEach(func() {
pkgPath = tmpPath("some-crazy-thing")
os.Mkdir(pkgPath, 0777)
})
Context("when the working directory is empty", func() {
It("generates correctly named bootstrap and generate files with a package name derived from the directory", func() {
session := startGinkgo(pkgPath, "bootstrap")
Eventually(session).Should(gexec.Exit(0))
content, err := ioutil.ReadFile(filepath.Join(pkgPath, "some_crazy_thing_suite_test.go"))
Ω(err).ShouldNot(HaveOccurred())
Ω(content).Should(ContainSubstring("package some_crazy_thing_test"))
Ω(content).Should(ContainSubstring("SomeCrazyThing Suite"))
session = startGinkgo(pkgPath, "generate")
Eventually(session).Should(gexec.Exit(0))
content, err = ioutil.ReadFile(filepath.Join(pkgPath, "some_crazy_thing_test.go"))
Ω(err).ShouldNot(HaveOccurred())
Ω(content).Should(ContainSubstring("package some_crazy_thing_test"))
Ω(content).Should(ContainSubstring("SomeCrazyThing"))
})
})
Context("when the working directory contains a file with a package name", func() {
BeforeEach(func() {
Ω(ioutil.WriteFile(filepath.Join(pkgPath, "foo.go"), []byte("package main\n\nfunc main() {}"), 0777)).Should(Succeed())
})
It("generates correctly named bootstrap and generate files with the package name", func() {
session := startGinkgo(pkgPath, "bootstrap")
Eventually(session).Should(gexec.Exit(0))
content, err := ioutil.ReadFile(filepath.Join(pkgPath, "some_crazy_thing_suite_test.go"))
Ω(err).ShouldNot(HaveOccurred())
Ω(content).Should(ContainSubstring("package main_test"))
Ω(content).Should(ContainSubstring("SomeCrazyThing Suite"))
session = startGinkgo(pkgPath, "generate")
Eventually(session).Should(gexec.Exit(0))
content, err = ioutil.ReadFile(filepath.Join(pkgPath, "some_crazy_thing_test.go"))
Ω(err).ShouldNot(HaveOccurred())
Ω(content).Should(ContainSubstring("package main_test"))
Ω(content).Should(ContainSubstring("SomeCrazyThing"))
})
})
})
Describe("Go module and sginkgo bootstrap/generate", func() {
var (
pkgPath string
savedGoPath string
)
BeforeEach(func() {
pkgPath = tmpPath("myamazingmodule")
os.Mkdir(pkgPath, 0777)
Expect(ioutil.WriteFile(filepath.Join(pkgPath, "go.mod"), []byte("module fake.com/me/myamazingmodule\n"), 0777)).To(Succeed())
savedGoPath = os.Getenv("GOPATH")
Expect(os.Setenv("GOPATH", "")).To(Succeed())
Expect(os.Setenv("GO111MODULE", "on")).To(Succeed()) // needed pre-Go 1.13
})
AfterEach(func() {
Expect(os.Setenv("GOPATH", savedGoPath)).To(Succeed())
Expect(os.Setenv("GO111MODULE", "")).To(Succeed())
})
It("generates correctly named bootstrap and generate files with the module name", func() {
session := startGinkgo(pkgPath, "bootstrap")
Eventually(session).Should(gexec.Exit(0))
content, err := ioutil.ReadFile(filepath.Join(pkgPath, "myamazingmodule_suite_test.go"))
Expect(err).NotTo(HaveOccurred())
Expect(content).To(ContainSubstring("package myamazingmodule_test"), string(content))
Expect(content).To(ContainSubstring("Myamazingmodule Suite"), string(content))
session = startGinkgo(pkgPath, "generate")
Eventually(session).Should(gexec.Exit(0))
content, err = ioutil.ReadFile(filepath.Join(pkgPath, "myamazingmodule_test.go"))
Expect(err).NotTo(HaveOccurred())
Expect(content).To(ContainSubstring("package myamazingmodule_test"), string(content))
Expect(content).To(ContainSubstring("fake.com/me/myamazingmodule"), string(content))
Expect(content).To(ContainSubstring("Myamazingmodule"), string(content))
})
})
Describe("ginkgo blur", func() {
It("should unfocus tests", func() {
pathToTest := tmpPath("focused")
fixture := fixturePath("focused_fixture")
copyIn(fixture, pathToTest, true)
session := startGinkgo(pathToTest, "--noColor", "-r")
Eventually(session).Should(gexec.Exit(types.GINKGO_FOCUS_EXIT_CODE))
output := session.Out.Contents()
Ω(string(output)).Should(ContainSubstring("Detected Programmatic Focus"))
session = startGinkgo(pathToTest, "blur")
Eventually(session).Should(gexec.Exit(0))
output = session.Out.Contents()
Ω(string(output)).ShouldNot(ContainSubstring("expected 'package'"))
session = startGinkgo(pathToTest, "--noColor", "-r")
Eventually(session).Should(gexec.Exit(0))
output = session.Out.Contents()
Ω(string(output)).Should(ContainSubstring("Ginkgo ran 2 suites"))
Ω(string(output)).Should(ContainSubstring("Test Suite Passed"))
Ω(string(output)).ShouldNot(ContainSubstring("Detected Programmatic Focus"))
Expect(sameFile(filepath.Join(pathToTest, "README.md"), filepath.Join(fixture, "README.md"))).To(BeTrue())
})
It("should ignore the 'vendor' folder", func() {
pathToTest := tmpPath("focused_fixture_with_vendor")
copyIn(fixturePath("focused_fixture_with_vendor"), pathToTest, true)
session := startGinkgo(pathToTest, "blur")
Eventually(session).Should(gexec.Exit(0))
session = startGinkgo(pathToTest, "--noColor")
Eventually(session).Should(gexec.Exit(0))
output := session.Out.Contents()
Expect(string(output)).To(ContainSubstring("13 Passed"))
Expect(string(output)).To(ContainSubstring("0 Skipped"))
vendorPath := fixturePath("focused_fixture_with_vendor/vendor")
otherVendorPath := filepath.Join(pathToTest, "vendor")
Expect(sameFolder(vendorPath, otherVendorPath)).To(BeTrue())
})
})
Describe("ginkgo version", func() {
It("should print out the version info", func() {
session := startGinkgo("", "version")
Eventually(session).Should(gexec.Exit(0))
output := session.Out.Contents()
Ω(output).Should(MatchRegexp(`Ginkgo Version \d+\.\d+\.\d+`))
})
})
Describe("ginkgo help", func() {
It("should print out usage information", func() {
session := startGinkgo("", "help")
Eventually(session).Should(gexec.Exit(0))
output := string(session.Out.Contents())
Ω(output).Should(MatchRegexp(`Ginkgo Version \d+\.\d+\.\d+`))
Ω(output).Should(ContainSubstring("ginkgo watch"))
Ω(output).Should(ContainSubstring("-succinct"))
Ω(output).Should(ContainSubstring("-nodes"))
Ω(output).Should(ContainSubstring("ginkgo generate"))
Ω(output).Should(ContainSubstring("ginkgo help <COMMAND>"))
})
})
})
|
[
"\"GOPATH\""
] |
[] |
[
"GOPATH"
] |
[]
|
["GOPATH"]
|
go
| 1 | 0 | |
vendor/knative.dev/eventing/test/rekt/features/broker/control_plane.go
|
/*
Copyright 2021 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package broker
import (
"context"
"encoding/json"
"fmt"
"strings"
conformanceevent "github.com/cloudevents/conformance/pkg/event"
cetest "github.com/cloudevents/sdk-go/v2/test"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
duckv1 "knative.dev/pkg/apis/duck/v1"
"knative.dev/pkg/ptr"
"knative.dev/reconciler-test/pkg/environment"
"knative.dev/reconciler-test/pkg/feature"
"knative.dev/reconciler-test/pkg/state"
"knative.dev/reconciler-test/resources/svc"
v1 "knative.dev/eventing/pkg/apis/duck/v1"
eventingv1 "knative.dev/eventing/pkg/apis/eventing/v1"
eventingclientsetv1 "knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1"
eventingclient "knative.dev/eventing/pkg/client/injection/client"
"knative.dev/eventing/test/rekt/features/knconf"
triggerfeatures "knative.dev/eventing/test/rekt/features/trigger"
"knative.dev/eventing/test/rekt/resources/broker"
brokerresources "knative.dev/eventing/test/rekt/resources/broker"
"knative.dev/eventing/test/rekt/resources/delivery"
triggerresources "knative.dev/eventing/test/rekt/resources/trigger"
)
func ControlPlaneConformance(brokerName string) *feature.FeatureSet {
fs := &feature.FeatureSet{
Name: "Knative Broker Specification - Control Plane",
Features: []*feature.Feature{
ControlPlaneBroker(brokerName),
ControlPlaneTrigger_GivenBroker(brokerName),
ControlPlaneTrigger_GivenBrokerTriggerReady(brokerName),
ControlPlaneTrigger_WithBrokerLifecycle(),
ControlPlaneTrigger_WithValidFilters(brokerName),
ControlPlaneTrigger_WithInvalidFilters(brokerName),
},
}
// Add each feature of event routing and Delivery tests as a new feature
addControlPlaneEventRouting(fs)
addControlPlaneDelivery(fs)
// TODO: This is not a control plane test, or at best it is a blend with data plane.
// Must("Events that pass the attributes filter MUST include context or extension attributes that match all key-value pairs exactly.", todo)
return fs
}
func setBrokerName(name string) feature.StepFn {
return func(ctx context.Context, t feature.T) {
state.SetOrFail(ctx, t, BrokerNameKey, name)
}
}
func ControlPlaneBroker(brokerName string) *feature.Feature {
f := feature.NewFeatureNamed("Broker")
bName := feature.MakeRandomK8sName("broker")
sink := feature.MakeRandomK8sName("sink")
f.Setup("Set Broker Name", setBrokerName(bName))
f.Setup("install a service", svc.Install(sink, "app", "rekt"))
brokerOpts := append(brokerresources.WithEnvConfig(), delivery.WithDeadLetterSink(svc.AsKReference(sink), ""))
f.Setup("update broker", broker.Install(bName, brokerOpts...))
f.Setup("broker goes ready", broker.IsReady(bName))
f.Stable("Conformance").
Should("Broker objects SHOULD include a Ready condition in their status",
knconf.KResourceHasReadyInConditions(brokerresources.GVR(), brokerName)).
Should("The Broker SHOULD indicate Ready=True when its ingress is available to receive events.",
readyBrokerHasIngressAvailable).
Should("While a Broker is Ready, it SHOULD be a valid Addressable and its `status.address.url` field SHOULD indicate the address of its ingress.",
readyBrokerIsAddressable).
Should("The class of a Broker object SHOULD be immutable.",
brokerClassIsImmutable).
Should("Set the Broker status.deadLetterSinkURI if there is a valid spec.delivery.deadLetterSink defined",
BrokerStatusDLSURISet)
return f
}
func ControlPlaneTrigger_GivenBroker(brokerName string) *feature.Feature {
f := feature.NewFeatureNamed("Trigger, Given Broker")
f.Setup("Set Broker Name", setBrokerName(brokerName))
subscriberName := feature.MakeRandomK8sName("sub")
f.Setup("Install Subscriber", svc.Install(subscriberName, "bad", "svc"))
triggerName := feature.MakeRandomK8sName("trigger")
f.Setup("Create a Trigger", triggerresources.Install(triggerName, brokerName,
triggerresources.WithSubscriber(svc.AsKReference(subscriberName), ""),
))
f.Setup("Set Trigger Name", triggerfeatures.SetTriggerName(triggerName))
f.Stable("Conformance").
Should("Triggers SHOULD include a Ready condition in their status.",
triggerHasReadyInConditions).
Should("The Trigger SHOULD indicate Ready=True when events can be delivered to its subscriber.",
readyTriggerCanDeliver).
Must("Triggers MUST be assigned to exactly one Broker.",
triggerHasOneBroker).
Must("The assigned Broker of a Trigger SHOULD be immutable.",
triggerSpecBrokerIsImmutable)
return f
}
func ControlPlaneTrigger_GivenBrokerTriggerReady(brokerName string) *feature.Feature {
f := feature.NewFeatureNamed("Trigger, Given Broker")
f.Setup("Set Broker Name", setBrokerName(brokerName))
subscriberName := feature.MakeRandomK8sName("sub")
f.Setup("Install Subscriber", svc.Install(subscriberName, "bad", "svc"))
triggerName := feature.MakeRandomK8sName("trigger")
f.Setup("Create a Trigger", triggerresources.Install(triggerName, brokerName,
triggerresources.WithSubscriber(svc.AsKReference(subscriberName), ""),
))
f.Setup("Set Trigger Name", triggerfeatures.SetTriggerName(triggerName))
f.Requirement("The Trigger is Ready", triggerresources.IsReady(triggerName))
f.Stable("Conformance").
Should("While a Trigger is Ready, it SHOULD indicate its subscriber's URI via the `status.subscriberUri` field.",
readyTriggerHasSubscriberURI)
return f
}
func ControlPlaneTrigger_WithBrokerLifecycle() *feature.Feature {
f := feature.NewFeatureNamed("Trigger, With Broker Lifecycle")
subscriberName := feature.MakeRandomK8sName("sub")
f.Setup("Install Subscriber", svc.Install(subscriberName, "bad", "svc"))
brokerName := feature.MakeRandomK8sName("broker")
triggerName := feature.MakeRandomK8sName("trigger")
f.Setup("Create a Trigger", triggerresources.Install(triggerName, brokerName,
triggerresources.WithSubscriber(svc.AsKReference(subscriberName), ""),
))
f.Setup("Set Trigger Name", triggerfeatures.SetTriggerName(triggerName))
f.Stable("Conformance").
May("A Trigger MAY be created before its assigned Broker exists.",
triggerHasOneBroker).
Should("A Trigger SHOULD progress to Ready when its assigned Broker exists and is Ready.",
func(ctx context.Context, t feature.T) {
brokerresources.Install(brokerName, brokerresources.WithEnvConfig()...)(ctx, t) // Default broker from Env.
brokerresources.IsReady(brokerName)(ctx, t)
triggerresources.IsReady(triggerName)(ctx, t)
})
return f
}
func ControlPlaneTrigger_WithValidFilters(brokerName string) *feature.Feature {
f := feature.NewFeatureNamed("Trigger, With Filters")
f.Setup("Set Broker Name", setBrokerName(brokerName))
subscriberName := feature.MakeRandomK8sName("sub")
f.Setup("Install Subscriber", svc.Install(subscriberName, "bad", "svc"))
// CloudEvents attribute names MUST consist of lower-case letters ('a' to 'z') or digits ('0' to '9') from the ASCII character set. Attribute names SHOULD be descriptive and terse and SHOULD NOT exceed 20 characters in length.
filters := map[string]string{
"source": "a source",
"id": "an id",
"specversion": "the spec version",
"type": "the type",
"subject": "a subject",
"time": "a time",
"datacontenttype": "a datacontenttype",
"dataschema": "a dataschema",
"aaa": "bbb",
"c1d2e3": "123",
"abcdefghijklmnopqrst": "max length",
}
triggerName := feature.MakeRandomK8sName("trigger")
f.Setup("Create a Trigger", triggerresources.Install(triggerName, brokerName,
triggerresources.WithSubscriber(svc.AsKReference(subscriberName), ""),
triggerresources.WithFilter(filters),
))
f.Setup("Set Trigger Name", triggerfeatures.SetTriggerName(triggerName))
f.Stable("Conformance").
Must("The attributes filter specifying a list of key-value pairs MUST be supported by Trigger.",
// Compare the passed filters with what is found on the control plane.
func(ctx context.Context, t feature.T) {
trigger := triggerfeatures.GetTrigger(ctx, t)
got := make(map[string]string)
for k, v := range trigger.Spec.Filter.Attributes {
got[k] = v
}
want := filters
if diff := cmp.Diff(want, got, cmpopts.SortMaps(func(a, b string) bool {
return a < b
})); diff != "" {
t.Error("Filters do not match (-want, +got) =", diff)
}
})
return f
}
func ControlPlaneTrigger_WithInvalidFilters(brokerName string) *feature.Feature {
f := feature.NewFeatureNamed("Trigger, With Filters")
f.Setup("Set Broker Name", setBrokerName(brokerName))
subscriberName := feature.MakeRandomK8sName("sub")
f.Setup("Install Subscriber", svc.Install(subscriberName, "bad", "svc"))
// CloudEvents attribute names MUST consist of lower-case letters ('a' to 'z') or digits ('0' to '9') from the ASCII character set. Attribute names SHOULD be descriptive and terse and SHOULD NOT exceed 20 characters in length.
filters := map[string]string{
"SOURCE": "not lower case letters, all",
"Source": "not lower case letters, first",
"souRce": "not lower case letters, not first",
"s pace s": "no spaces",
"s_pace_s": "no underscores",
"s-pace-s": "no dashes",
"123": "just numbers",
"😊": "unicode not supported",
"!@#$%^&*()-_=_`~+\\": "other non-(a-z,0-9) type chars, top row",
"{}[];':\"<>,./?": "other non-(a-z,0-9) type chars, brackets",
}
triggerName := feature.MakeRandomK8sName("trigger")
f.Setup("Create a Trigger", triggerresources.Install(triggerName, brokerName,
triggerresources.WithSubscriber(svc.AsKReference(subscriberName), ""),
))
f.Setup("Set Trigger Name", triggerfeatures.SetTriggerName(triggerName))
asserter := f.Stable("Conformance - Negatives - The attributes filter specifying a list of key-value pairs MUST be supported by Trigger.")
for key, value := range filters {
k := key
v := value
asserter.Must("Reject invalid filter - "+k+" - "+v,
// Compare the passed filters with what is found on the control plane.
func(ctx context.Context, t feature.T) {
trigger := triggerfeatures.GetTrigger(ctx, t)
if trigger.Spec.Filter == nil {
trigger.Spec.Filter = &eventingv1.TriggerFilter{
Attributes: map[string]string{},
}
} else if trigger.Spec.Filter.Attributes == nil {
trigger.Spec.Filter.Attributes = map[string]string{}
}
trigger.Spec.Filter.Attributes[k] = v
_, err := Client(ctx).Triggers.Update(ctx, trigger, metav1.UpdateOptions{})
if err != nil {
// We expect an error.
// Success!
} else {
t.Error("expected Trigger to reject the spec.filter update.")
}
})
}
return f
}
func addControlPlaneDelivery(fs *feature.FeatureSet) {
for i, tt := range []struct {
name string
brokerDS *v1.DeliverySpec
// Trigger 1 Delivery spec
t1DS *v1.DeliverySpec
// How many events to fail before succeeding
t1FailCount uint
// Trigger 2 Delivery spec
t2DS *v1.DeliverySpec
// How many events to fail before succeeding
t2FailCount uint
}{{
name: "When `BrokerSpec.Delivery` and `TriggerSpec.Delivery` are both not configured, no delivery spec SHOULD be used.",
}, {
name: "When `BrokerSpec.Delivery` is configured, but not the specific `TriggerSpec.Delivery`, then the `BrokerSpec.Delivery` SHOULD be used. (Retry)",
brokerDS: &v1.DeliverySpec{
DeadLetterSink: new(duckv1.Destination),
Retry: ptr.Int32(3),
},
t1FailCount: 3, // Should get event.
t2FailCount: 4, // Should end up in DLQ.
}, {
name: "When `TriggerSpec.Delivery` is configured, then `TriggerSpec.Delivery` SHOULD be used. (Retry)",
brokerDS: &v1.DeliverySpec{ // Disable delivery spec defaulting
Retry: ptr.Int32(0),
},
t1DS: &v1.DeliverySpec{
DeadLetterSink: new(duckv1.Destination),
Retry: ptr.Int32(3),
},
t2DS: &v1.DeliverySpec{
Retry: ptr.Int32(1),
},
t1FailCount: 3, // Should get event.
t2FailCount: 2, // Should be dropped.
}, {
name: "When both `BrokerSpec.Delivery` and `TriggerSpec.Delivery` is configured, then `TriggerSpec.Delivery` SHOULD be used. (Retry)",
brokerDS: &v1.DeliverySpec{
DeadLetterSink: new(duckv1.Destination),
Retry: ptr.Int32(1),
},
t1DS: &v1.DeliverySpec{
DeadLetterSink: new(duckv1.Destination),
Retry: ptr.Int32(3),
},
t1FailCount: 3, // Should get event.
t2FailCount: 2, // Should end up in DLQ.
}, {
name: "When both `BrokerSpec.Delivery` and `TriggerSpec.Delivery` is configured, then `TriggerSpec.Delivery` SHOULD be used. (Retry+DLQ)",
brokerDS: &v1.DeliverySpec{
DeadLetterSink: new(duckv1.Destination),
Retry: ptr.Int32(1),
},
t1DS: &v1.DeliverySpec{
DeadLetterSink: new(duckv1.Destination),
Retry: ptr.Int32(3),
},
t1FailCount: 4, // Should end up in Trigger DLQ.
t2FailCount: 2, // Should end up in Broker DLQ.
}} {
// TODO: Each of these creates quite a few resources. We need to figure out a way
// to delete the resources for each Feature once the test completes. Today it's
// not easy (if at all possible) to do this, since Environment contains the References
// to created resources, but it's not granular enough.
brokerName := fmt.Sprintf("dlq-test-%d", i)
f := feature.NewFeatureNamed(fmt.Sprintf("Delivery Spec - %s", brokerName))
cfg := []triggerCfg{{
delivery: tt.t1DS,
failCount: tt.t1FailCount,
}, {
delivery: tt.t2DS,
failCount: tt.t2FailCount,
}}
prober := createBrokerTriggerTopology(f, brokerName, tt.brokerDS, cfg)
// Send an event into the matrix and hope for the best
prober.SenderFullEvents(1)
f.Setup("install source", prober.SenderInstall("source"))
f.Requirement("sender is finished", prober.SenderDone("source"))
// All events have been sent, time to look at the specs and confirm we got them.
expectedEvents := createExpectedEventPatterns(tt.brokerDS, cfg)
f.Requirement("wait until done", func(ctx context.Context, t feature.T) {
interval, timeout := environment.PollTimingsFromContext(ctx)
err := wait.PollImmediate(interval, timeout, func() (bool, error) {
gtg := true
for prefix, want := range expectedEvents {
events := prober.ReceivedOrRejectedBy(ctx, prefix)
if len(events) != len(want.Success) {
gtg = false
}
}
return gtg, nil
})
if err != nil {
t.Failed()
}
})
f.Stable("Conformance").Should(tt.name, knconf.AssertEventPatterns(prober, expectedEvents))
f.Teardown("Delete feature resources", f.DeleteResources)
fs.Features = append(fs.Features, f)
}
}
func addControlPlaneEventRouting(fs *feature.FeatureSet) {
fullEvent := cetest.FullEvent()
replyEvent := cetest.FullEvent()
replyEvent.SetType("com.example.ReplyEvent")
for i, tt := range []struct {
name string
config []triggerCfg
inEvents []conformanceevent.Event
}{{
name: "One trigger, no filter, gets event",
config: []triggerCfg{{}},
inEvents: []conformanceevent.Event{knconf.EventToEvent(&fullEvent)},
}, {
name: "One trigger, with filter, does not get event",
config: []triggerCfg{
{
filter: &eventingv1.TriggerFilter{
Attributes: eventingv1.TriggerFilterAttributes{
"type": "mytype",
},
},
},
},
inEvents: []conformanceevent.Event{
{
Attributes: conformanceevent.ContextAttributes{
Type: "notmytype",
},
},
},
}, {
name: "One trigger, with filter, gets the event",
config: []triggerCfg{
{
filter: &eventingv1.TriggerFilter{
Attributes: eventingv1.TriggerFilterAttributes{
"type": "com.example.FullEvent",
},
},
},
},
inEvents: []conformanceevent.Event{knconf.EventToEvent(&fullEvent)},
}, {
// name: "Two triggers, with filter, both get the event",
config: []triggerCfg{
{
filter: &eventingv1.TriggerFilter{
Attributes: eventingv1.TriggerFilterAttributes{
"type": "com.example.FullEvent",
},
},
}, {
filter: &eventingv1.TriggerFilter{
Attributes: eventingv1.TriggerFilterAttributes{
"type": "com.example.FullEvent",
},
},
},
},
inEvents: []conformanceevent.Event{knconf.EventToEvent(&fullEvent)},
}, {
name: "Two triggers, with filter, only matching one gets the event",
config: []triggerCfg{
{
filter: &eventingv1.TriggerFilter{
Attributes: eventingv1.TriggerFilterAttributes{
"type": "notmytype",
},
},
}, {
filter: &eventingv1.TriggerFilter{
Attributes: eventingv1.TriggerFilterAttributes{
"type": "com.example.FullEvent",
},
},
},
},
inEvents: []conformanceevent.Event{knconf.EventToEvent(&fullEvent)},
}, {
name: "Two triggers, with filter, first one matches incoming event, creates reply, which matches the second one",
config: []triggerCfg{
{
filter: &eventingv1.TriggerFilter{
Attributes: eventingv1.TriggerFilterAttributes{
"type": "com.example.FullEvent",
},
},
reply: func() *conformanceevent.Event {
reply := knconf.EventToEvent(&replyEvent)
reply.Attributes.DataContentType = "application/json" // EventsHub defaults all data to this.
return &reply
}(),
}, {
filter: &eventingv1.TriggerFilter{
Attributes: eventingv1.TriggerFilterAttributes{
"type": "com.example.ReplyEvent",
},
},
},
},
inEvents: []conformanceevent.Event{knconf.EventToEvent(&fullEvent)},
}, {
name: "Two triggers, with no filters, both get the event",
config: []triggerCfg{{}, {}},
inEvents: []conformanceevent.Event{knconf.EventToEvent(&fullEvent)},
}} {
brokerName := fmt.Sprintf("routing-test-%d", i)
f := feature.NewFeatureNamed(fmt.Sprintf("Event Routing Spec - %s", brokerName))
f.Setup("Set Broker Name", setBrokerName(brokerName))
prober := createBrokerTriggerTopology(f, brokerName, nil, tt.config)
// Send an event into the matrix and hope for the best
// TODO: We need to do some work to get the event types into the Prober.
// All the events generated are currently hardcoded into the com.example.FullEvent
// so once prober supports more configuration, wire it up here.
prober.SenderFullEvents(1)
f.Setup("install source", prober.SenderInstall("source"))
f.Requirement("sender is finished", prober.SenderDone("source"))
// All events have been sent, time to look at the specs and confirm we got them.
expectedEvents := createExpectedEventRoutingMap(tt.config, tt.inEvents)
f.Requirement("wait until done", func(ctx context.Context, t feature.T) {
interval, timeout := environment.PollTimingsFromContext(ctx)
err := wait.PollImmediate(interval, timeout, func() (bool, error) {
gtg := true
for prefix, want := range expectedEvents {
events := prober.ReceivedOrRejectedBy(ctx, prefix)
if len(events) != len(want) {
gtg = false
}
}
return gtg, nil
})
if err != nil {
t.Failed()
}
})
f.Stable("Conformance").Should(tt.name, assertExpectedRoutedEvents(prober, expectedEvents))
f.Teardown("Delete feature resources", f.DeleteResources)
fs.Features = append(fs.Features, f)
}
}
type EventingClient struct {
Brokers eventingclientsetv1.BrokerInterface
Triggers eventingclientsetv1.TriggerInterface
}
func Client(ctx context.Context) *EventingClient {
ec := eventingclient.Get(ctx).EventingV1()
env := environment.FromContext(ctx)
return &EventingClient{
Brokers: ec.Brokers(env.Namespace()),
Triggers: ec.Triggers(env.Namespace()),
}
}
const (
BrokerNameKey = "brokerName"
)
func getBroker(ctx context.Context, t feature.T) *eventingv1.Broker {
c := Client(ctx)
name := state.GetStringOrFail(ctx, t, BrokerNameKey)
broker, err := c.Brokers.Get(ctx, name, metav1.GetOptions{})
if err != nil {
t.Errorf("failed to get Broker, %v", err)
}
return broker
}
func readyBrokerHasIngressAvailable(ctx context.Context, t feature.T) {
// TODO: I am not sure how to test this from the outside.
}
func readyBrokerIsAddressable(ctx context.Context, t feature.T) {
broker := getBroker(ctx, t)
if broker.IsReady() {
if broker.Status.Address.URL == nil {
t.Errorf("broker is not addressable")
}
// Success!
} else {
t.Errorf("broker was not ready, reason: %s", broker.Status.GetTopLevelCondition().Reason)
}
}
func BrokerStatusDLSURISet(ctx context.Context, t feature.T) {
broker := getBroker(ctx, t)
if broker.IsReady() {
if broker.Status.DeadLetterSinkURI == nil {
t.Errorf("broker DLS not resolved but resource reported ready")
}
// Success!
} else {
t.Errorf("broker was not ready")
}
}
func brokerClassIsImmutable(ctx context.Context, t feature.T) {
broker := getBroker(ctx, t)
if broker.Annotations == nil {
broker.Annotations = map[string]string{}
}
// update annotations
broker.Annotations[eventingv1.BrokerClassAnnotationKey] = "Rekt.brokerClassIsImmutable"
if _, err := Client(ctx).Brokers.Update(ctx, broker, metav1.UpdateOptions{}); err != nil {
// Success!
t.Log("broker class is immutable")
} else {
t.Errorf("broker class is mutable")
}
}
func triggerHasReadyInConditions(ctx context.Context, t feature.T) {
name := state.GetStringOrFail(ctx, t, triggerfeatures.TriggerNameKey)
knconf.KResourceHasReadyInConditions(triggerresources.GVR(), name)(ctx, t)
}
func readyTriggerCanDeliver(ctx context.Context, t feature.T) {
trigger := triggerfeatures.GetTrigger(ctx, t)
_ = trigger
// TODO: I am not sure how to test this from the outside.
}
func readyTriggerHasSubscriberURI(ctx context.Context, t feature.T) {
trigger := triggerfeatures.GetTrigger(ctx, t)
if trigger.Status.IsReady() {
if trigger.Status.SubscriberURI == nil {
t.Errorf("trigger did not have subscriber uri in status")
}
// Success!
} else {
j, _ := json.Marshal(trigger)
t.Errorf("trigger was not ready, \n%s", string(j))
}
}
func triggerHasOneBroker(ctx context.Context, t feature.T) {
trigger := triggerfeatures.GetTrigger(ctx, t)
if trigger.Spec.Broker == "" {
t.Error("broker is empty")
}
if strings.Contains(trigger.Spec.Broker, ",") {
t.Errorf("more than one broker specified: %q", trigger.Spec.Broker)
}
}
func triggerSpecBrokerIsImmutable(ctx context.Context, t feature.T) {
trigger := triggerfeatures.GetTrigger(ctx, t)
// Update spec.broker
trigger.Spec.Broker = "Rekt.BrokerImmutable"
if _, err := Client(ctx).Triggers.Update(ctx, trigger, metav1.UpdateOptions{}); err != nil {
// Success!
t.Log("Trigger spec.broker is immutable")
} else {
t.Errorf("Trigger spec.broker is mutable")
}
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
config/Index.go
|
package config
import (
"../util"
"../integrate/logger"
"../exceptions"
"os"
"path/filepath"
"log"
"strings"
"reflect"
)
var packageJson map[string]interface{}
func checkError(err error) {
if nil != err {
logger.Error("service", err.Error())
os.Exit(101)
return
}
}
/**
* 获取代码运行目录
*/
func getCurrentDirectory() string {
dir, err := filepath.Abs(filepath.Dir(os.Args[0]))
if err != nil {
log.Fatal(err)
}
return strings.Replace(dir, "\\", "/", -1)
}
/**
* 初始化包函数
*/
func init() {
dir := getCurrentDirectory()
configInfo, err := util.ReadRealFile(filepath.Join(dir, "./package.json"))
checkError(err)
pkg, err := util.FormatToStruct(&configInfo)
checkError(err)
if 0 == len(pkg) {
checkError(&exceptions.Error{Msg: "read json fail", Code: 500})
}
packageJson = pkg
readEnv() // 读取env中的信息进行覆盖package.json中的信息
}
/**
读取env中的数据进行覆盖package.json中的内容
*/
func readEnv() {
redis_addr := os.Getenv("REDIS_ADDR")
redis_port := os.Getenv("REDIS_PORT")
server := packageJson["server"]
if "" != redis_addr {
setByTarget(GetByTarget(server, "cache"), "addr", redis_addr)
}
if "" != redis_port {
setByTarget(GetByTarget(server, "cache"), "port", redis_port)
}
}
/**
获取配置项
@param key string 配置项key
@return interface{} 配置内容
*/
func Get(key string) interface{} {
return packageJson[key]
}
/**
反射设置map
*/
func setByTarget(target, key, value interface{}) {
reflect.ValueOf(target).SetMapIndex(reflect.ValueOf(key), reflect.ValueOf(value))
}
/**
反射获取map配置
*/
func GetByTarget(target interface{}, key interface{}) interface{} {
v := reflect.ValueOf(target)
value := v.MapIndex(reflect.ValueOf(key))
if !value.IsValid() {
return nil
}
return value.Interface()
}
|
[
"\"REDIS_ADDR\"",
"\"REDIS_PORT\""
] |
[] |
[
"REDIS_PORT",
"REDIS_ADDR"
] |
[]
|
["REDIS_PORT", "REDIS_ADDR"]
|
go
| 2 | 0 | |
demos/form/app.py
|
# -*- coding: utf-8 -*-
"""
:author: Grey Li (李辉)
:url: http://greyli.com
:copyright: © 2018 Grey Li
:license: MIT, see LICENSE for more details.
"""
import os
import uuid
from flask import Flask, render_template, flash, redirect, url_for, request, send_from_directory, session
from flask_ckeditor import CKEditor, upload_success, upload_fail
from flask_dropzone import Dropzone
from flask_wtf.csrf import validate_csrf
from wtforms import ValidationError
from ppp.forms import LoginForm, FortyTwoForm, NewPostForm, UploadForm, MultiUploadForm, SigninForm, \
RegisterForm, SigninForm2, RegisterForm2, RichTextForm
app = Flask(__name__)
# print(__name__)
app.secret_key = os.getenv('SECRET_KEY', 'secret string')
app.jinja_env.trim_blocks = True
app.jinja_env.pystrip_blocks = True
# Custom config
app.config['UPLOAD_PATH'] = os.path.join(app.root_path, 'uploads')
app.config['ALLOWED_EXTENSIONS'] = ['png', 'jpg', 'jpeg', 'gif']
# Flask config
# set request body's max length
app.config['MAX_CONTENT_LENGTH'] = 3 * 1024 * 1024 # 3Mb
# Flask-CKEditor config
app.config['CKEDITOR_SERVE_LOCAL'] = True
app.config['CKEDITOR_FILE_UPLOADER'] = 'upload_for_ckeditor'
# Flask-Dropzone config
app.config['DROPZONE_ALLOWED_FILE_TYPE'] = 'image'
app.config['DROPZONE_MAX_FILE_SIZE'] = 3
app.config['DROPZONE_MAX_FILES'] = 30
ckeditor = CKEditor(app)
dropzone = Dropzone(app)
@app.route('/', methods=['GET', 'POST'])
def index():
return render_template('index.html')
@app.route('/html', methods=['GET', 'POST'])
def html():
form = LoginForm()
if request.method == 'POST':
username = request.form.get('username')
flash('Welcome home, %s!' % username)
return redirect(url_for('index'))
return render_template('pure_html.html')
@app.route('/basic', methods=['GET', 'POST'])
def basic():
form = LoginForm()
if form.validate_on_submit():
username = form.username.data
# flash('Welcome home, %s!' % username)
flash(f'Welcome home, {username}!')
return redirect(url_for('index'))
return render_template('basic.html', form=form)
@app.route('/bootstrap', methods=['GET', 'POST'])
def bootstrap():
form = LoginForm()
if form.validate_on_submit():
username = form.username.data
flash('Welcome home, %s!' % username)
return redirect(url_for('index'))
return render_template('bootstrap.html', form=form)
@app.route('/custom-validator', methods=['GET', 'POST'])
def custom_validator():
form = FortyTwoForm()
if form.validate_on_submit():
flash('Bingo!')
return redirect(url_for('index'))
return render_template('custom_validator.html', form=form)
@app.route('/uploads/<path:filename>') # <[converter:]variable_name> path: like string but also accepts slashes
def get_file(filename):
return send_from_directory(app.config['UPLOAD_PATH'], filename)
@app.route('/uploaded-images')
def show_images():
return render_template('uploaded.html')
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in app.config['ALLOWED_EXTENSIONS'] # 'rsplit': Returns a list of the words in the string, separated by the delimiter string (starting from right)
def random_filename(filename):
ext = os.path.splitext(filename)[1]
new_filename = uuid.uuid4().hex + ext
return new_filename
@app.route('/upload', methods=['GET', 'POST'])
def upload():
form = UploadForm()
if form.validate_on_submit():
f = form.photo.data
filename = random_filename(f.filename)
f.save(os.path.join(app.config['UPLOAD_PATH'], filename))
flash('Upload success.')
session['filenames'] = [filename]
return redirect(url_for('show_images'))
return render_template('upload.html', form=form)
@app.route('/multi-upload', methods=['GET', 'POST'])
def multi_upload():
form = MultiUploadForm()
if request.method == 'POST':
filenames = []
# check csrf token
try:
validate_csrf(form.csrf_token.data)
except ValidationError:
flash('CSRF token error.')
return redirect(url_for('multi_upload'))
# check if the post request has the file part
if 'photo' not in request.files: # 'photo' is an attribute of the 'MultiUploadForm' class in form.py
flash('This field is required.')
return redirect(url_for('multi_upload'))
for f in request.files.getlist('photo'):
# if user does not select file, browser also
# submit a empty part without filename
# if f.filename == '':
# flash('No selected file.')
# return redirect(url_for('multi_upload'))
# check the file extension
if f and allowed_file(f.filename):
filename = random_filename(f.filename)
f.save(os.path.join(
app.config['UPLOAD_PATH'], filename
))
filenames.append(filename)
else:
flash('Invalid file type.')
return redirect(url_for('multi_upload'))
flash('Upload success.')
session['filenames'] = filenames
return redirect(url_for('show_images'))
return render_template('upload.html', form=form)
@app.route('/dropzone-upload', methods=['GET', 'POST'])
def dropzone_upload():
if request.method == 'POST':
# check if the post request has the file part
if 'file' not in request.files:
return 'This field is required.', 400
f = request.files.get('file')
if f and allowed_file(f.filename):
filename = random_filename(f.filename)
f.save(os.path.join(
app.config['UPLOAD_PATH'], filename
))
else:
return 'Invalid file type.', 400
return render_template('dropzone.html')
@app.route('/two-submits', methods=['GET', 'POST'])
def two_submits():
form = NewPostForm()
if form.validate_on_submit():
if form.save.data:
# save it...
flash('You click the "Save" button.')
elif form.publish.data:
# publish it...
flash('You click the "Publish" button.')
return redirect(url_for('index'))
return render_template('2submit.html', form=form)
@app.route('/multi-form', methods=['GET', 'POST'])
def multi_form():
signin_form = SigninForm()
register_form = RegisterForm()
if signin_form.submit1.data and signin_form.validate():
username = signin_form.username.data
flash('%s, you just submit the Signin Form.' % username)
return redirect(url_for('index'))
if register_form.submit2.data and register_form.validate():
username = register_form.username.data
flash('%s, you just submit the Register Form.' % username)
return redirect(url_for('index'))
return render_template('2form.html', signin_form=signin_form, register_form=register_form)
@app.route('/multi-form-multi-view')
def multi_form_multi_view():
signin_form = SigninForm2()
register_form = RegisterForm2()
return render_template('2form2view.html', signin_form=signin_form, register_form=register_form)
@app.route('/handle-signin', methods=['POST'])
def handle_signin():
signin_form = SigninForm2()
register_form = RegisterForm2()
if signin_form.validate_on_submit():
username = signin_form.username.data
flash('%s, you just submit the Signin Form.' % username)
return redirect(url_for('index'))
return render_template('2form2view.html', signin_form=signin_form, register_form=register_form)
@app.route('/handle-register', methods=['POST'])
def handle_register():
signin_form = SigninForm2()
register_form = RegisterForm2()
if register_form.validate_on_submit():
username = register_form.username.data
flash('%s, you just submit the Register Form.' % username)
return redirect(url_for('index'))
return render_template('2form2view.html', signin_form=signin_form, register_form=register_form)
@app.route('/ckeditor', methods=['GET', 'POST'])
def integrate_ckeditor():
form = RichTextForm()
if form.validate_on_submit():
title = form.title.data
body = form.body.data
flash('Your post is published!')
return render_template('post.html', title=title, body=body)
return render_template('ckeditor.html', form=form)
# handle image upload for ckeditor
@app.route('/upload-ck', methods=['POST'])
def upload_for_ckeditor():
f = request.files.get('upload')
if not allowed_file(f.filename):
return upload_fail('Image only!')
f.save(os.path.join(app.config['UPLOAD_PATH'], f.filename))
url = url_for('get_file', filename=f.filename)
return upload_success(url, f.filename)
# Shutdown the simple server
def shutdown_server():
func = request.environ.get('werkzeug.server.shutdown')
if not func:
raise RuntimeError('Not running with the Werkzeug Server')
func()
@app.route('/shutdown', methods=['GET'])
def shutdown():
shutdown_server()
return 'Server shutting down...'
|
[] |
[] |
[
"SECRET_KEY"
] |
[]
|
["SECRET_KEY"]
|
python
| 1 | 0 | |
state/indexer/sink/psql/psql_test.go
|
package psql
import (
"context"
"database/sql"
"flag"
"fmt"
"io/ioutil"
"log"
"os"
"os/signal"
"testing"
"time"
"github.com/adlio/schema"
"github.com/gogo/protobuf/proto"
"github.com/ory/dockertest"
"github.com/ory/dockertest/docker"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
abci "github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/state/indexer"
"github.com/tendermint/tendermint/types"
// Register the Postgres database driver.
_ "github.com/lib/pq"
)
// Verify that the type satisfies the EventSink interface.
var _ indexer.EventSink = (*EventSink)(nil)
var (
doPauseAtExit = flag.Bool("pause-at-exit", false,
"If true, pause the test until interrupted at shutdown, to allow debugging")
// A hook that test cases can call to obtain the shared database instance
// used for testing the sink. This is initialized in TestMain (see below).
testDB func() *sql.DB
)
const (
user = "postgres"
password = "secret"
port = "5432"
dsn = "postgres://%s:%s@localhost:%s/%s?sslmode=disable"
dbName = "postgres"
chainID = "test-chainID"
viewBlockEvents = "block_events"
viewTxEvents = "tx_events"
)
func TestMain(m *testing.M) {
flag.Parse()
// Set up docker and start a container running PostgreSQL.
pool, err := dockertest.NewPool(os.Getenv("DOCKER_URL"))
if err != nil {
log.Fatalf("Creating docker pool: %v", err)
}
resource, err := pool.RunWithOptions(&dockertest.RunOptions{
Repository: "postgres",
Tag: "13",
Env: []string{
"POSTGRES_USER=" + user,
"POSTGRES_PASSWORD=" + password,
"POSTGRES_DB=" + dbName,
"listen_addresses = '*'",
},
ExposedPorts: []string{port},
}, func(config *docker.HostConfig) {
// set AutoRemove to true so that stopped container goes away by itself
config.AutoRemove = true
config.RestartPolicy = docker.RestartPolicy{
Name: "no",
}
})
if err != nil {
log.Fatalf("Starting docker pool: %v", err)
}
if *doPauseAtExit {
log.Print("Pause at exit is enabled, containers will not expire")
} else {
const expireSeconds = 60
_ = resource.Expire(expireSeconds)
log.Printf("Container expiration set to %d seconds", expireSeconds)
}
// Connect to the database, clear any leftover data, and install the
// indexing schema.
conn := fmt.Sprintf(dsn, user, password, resource.GetPort(port+"/tcp"), dbName)
var db *sql.DB
if err := pool.Retry(func() error {
sink, err := NewEventSink(conn, chainID)
if err != nil {
return err
}
db = sink.DB() // set global for test use
return db.Ping()
}); err != nil {
log.Fatalf("Connecting to database: %v", err)
}
if err := resetDatabase(db); err != nil {
log.Fatalf("Flushing database: %v", err)
}
sm, err := readSchema()
if err != nil {
log.Fatalf("Reading schema: %v", err)
} else if err := schema.NewMigrator().Apply(db, sm); err != nil {
log.Fatalf("Applying schema: %v", err)
}
// Set up the hook for tests to get the shared database handle.
testDB = func() *sql.DB { return db }
// Run the selected test cases.
code := m.Run()
// Clean up and shut down the database container.
if *doPauseAtExit {
log.Print("Testing complete, pausing for inspection. Send SIGINT to resume teardown")
waitForInterrupt()
log.Print("(resuming)")
}
log.Print("Shutting down database")
if err := pool.Purge(resource); err != nil {
log.Printf("WARNING: Purging pool failed: %v", err)
}
if err := db.Close(); err != nil {
log.Printf("WARNING: Closing database failed: %v", err)
}
os.Exit(code)
}
func TestType(t *testing.T) {
psqlSink := &EventSink{store: testDB(), chainID: chainID}
assert.Equal(t, indexer.PSQL, psqlSink.Type())
}
func TestIndexing(t *testing.T) {
t.Run("IndexBlockEvents", func(t *testing.T) {
indexer := &EventSink{store: testDB(), chainID: chainID}
require.NoError(t, indexer.IndexBlockEvents(newTestBlockHeader()))
verifyBlock(t, 1)
verifyBlock(t, 2)
verifyNotImplemented(t, "hasBlock", func() (bool, error) { return indexer.HasBlock(1) })
verifyNotImplemented(t, "hasBlock", func() (bool, error) { return indexer.HasBlock(2) })
verifyNotImplemented(t, "block search", func() (bool, error) {
v, err := indexer.SearchBlockEvents(context.Background(), nil)
return v != nil, err
})
require.NoError(t, verifyTimeStamp(tableBlocks))
// Attempting to reindex the same events should gracefully succeed.
require.NoError(t, indexer.IndexBlockEvents(newTestBlockHeader()))
})
t.Run("IndexTxEvents", func(t *testing.T) {
indexer := &EventSink{store: testDB(), chainID: chainID}
txResult := txResultWithEvents([]abci.Event{
makeIndexedEvent("account.number", "1"),
makeIndexedEvent("account.owner", "Ivan"),
makeIndexedEvent("account.owner", "Yulieta"),
{Type: "", Attributes: []abci.EventAttribute{{Key: "not_allowed", Value: "Vlad", Index: true}}},
})
require.NoError(t, indexer.IndexTxEvents([]*abci.TxResult{txResult}))
txr, err := loadTxResult(types.Tx(txResult.Tx).Hash())
require.NoError(t, err)
assert.Equal(t, txResult, txr)
require.NoError(t, verifyTimeStamp(tableTxResults))
require.NoError(t, verifyTimeStamp(viewTxEvents))
verifyNotImplemented(t, "getTxByHash", func() (bool, error) {
txr, err := indexer.GetTxByHash(types.Tx(txResult.Tx).Hash())
return txr != nil, err
})
verifyNotImplemented(t, "tx search", func() (bool, error) {
txr, err := indexer.SearchTxEvents(context.Background(), nil)
return txr != nil, err
})
// try to insert the duplicate tx events.
err = indexer.IndexTxEvents([]*abci.TxResult{txResult})
require.NoError(t, err)
})
}
func TestStop(t *testing.T) {
indexer := &EventSink{store: testDB()}
require.NoError(t, indexer.Stop())
}
// newTestBlockHeader constructs a fresh copy of a block header containing
// known test values to exercise the indexer.
func newTestBlockHeader() types.EventDataNewBlockHeader {
return types.EventDataNewBlockHeader{
Header: types.Header{Height: 1},
ResultBeginBlock: abci.ResponseBeginBlock{
Events: []abci.Event{
makeIndexedEvent("begin_event.proposer", "FCAA001"),
makeIndexedEvent("thingy.whatzit", "O.O"),
},
},
ResultEndBlock: abci.ResponseEndBlock{
Events: []abci.Event{
makeIndexedEvent("end_event.foo", "100"),
makeIndexedEvent("thingy.whatzit", "-.O"),
},
},
}
}
// readSchema loads the indexing database schema file
func readSchema() ([]*schema.Migration, error) {
const filename = "schema.sql"
contents, err := ioutil.ReadFile(filename)
if err != nil {
return nil, fmt.Errorf("failed to read sql file from '%s': %w", filename, err)
}
return []*schema.Migration{{
ID: time.Now().Local().String() + " db schema",
Script: string(contents),
}}, nil
}
// resetDB drops all the data from the test database.
func resetDatabase(db *sql.DB) error {
_, err := db.Exec(`DROP TABLE IF EXISTS blocks,tx_results,events,attributes CASCADE;`)
if err != nil {
return fmt.Errorf("dropping tables: %v", err)
}
_, err = db.Exec(`DROP VIEW IF EXISTS event_attributes,block_events,tx_events CASCADE;`)
if err != nil {
return fmt.Errorf("dropping views: %v", err)
}
return nil
}
// txResultWithEvents constructs a fresh transaction result with fixed values
// for testing, that includes the specified events.
func txResultWithEvents(events []abci.Event) *abci.TxResult {
return &abci.TxResult{
Height: 1,
Index: 0,
Tx: types.Tx("HELLO WORLD"),
Result: abci.ResponseDeliverTx{
Data: []byte{0},
Code: abci.CodeTypeOK,
Log: "",
Events: events,
},
}
}
func loadTxResult(hash []byte) (*abci.TxResult, error) {
hashString := fmt.Sprintf("%X", hash)
var resultData []byte
if err := testDB().QueryRow(`
SELECT tx_result FROM `+tableTxResults+` WHERE tx_hash = $1;
`, hashString).Scan(&resultData); err != nil {
return nil, fmt.Errorf("lookup transaction for hash %q failed: %v", hashString, err)
}
txr := new(abci.TxResult)
if err := proto.Unmarshal(resultData, txr); err != nil {
return nil, fmt.Errorf("unmarshaling txr: %v", err)
}
return txr, nil
}
func verifyTimeStamp(tableName string) error {
return testDB().QueryRow(fmt.Sprintf(`
SELECT DISTINCT %[1]s.created_at
FROM %[1]s
WHERE %[1]s.created_at >= $1;
`, tableName), time.Now().Add(-2*time.Second)).Err()
}
func verifyBlock(t *testing.T, height int64) {
// Check that the blocks table contains an entry for this height.
if err := testDB().QueryRow(`
SELECT height FROM `+tableBlocks+` WHERE height = $1;
`, height).Err(); err == sql.ErrNoRows {
t.Errorf("No block found for height=%d", height)
} else if err != nil {
t.Fatalf("Database query failed: %v", err)
}
// Verify the presence of begin_block and end_block events.
if err := testDB().QueryRow(`
SELECT type, height, chain_id FROM `+viewBlockEvents+`
WHERE height = $1 AND type = $2 AND chain_id = $3;
`, height, types.EventTypeBeginBlock, chainID).Err(); err == sql.ErrNoRows {
t.Errorf("No %q event found for height=%d", types.EventTypeBeginBlock, height)
} else if err != nil {
t.Fatalf("Database query failed: %v", err)
}
if err := testDB().QueryRow(`
SELECT type, height, chain_id FROM `+viewBlockEvents+`
WHERE height = $1 AND type = $2 AND chain_id = $3;
`, height, types.EventTypeEndBlock, chainID).Err(); err == sql.ErrNoRows {
t.Errorf("No %q event found for height=%d", types.EventTypeEndBlock, height)
} else if err != nil {
t.Fatalf("Database query failed: %v", err)
}
}
// verifyNotImplemented calls f and verifies that it returns both a
// false-valued flag and a non-nil error whose string matching the expected
// "not supported" message with label prefixed.
func verifyNotImplemented(t *testing.T, label string, f func() (bool, error)) {
t.Helper()
t.Logf("Verifying that %q reports it is not implemented", label)
want := label + " is not supported via the postgres event sink"
ok, err := f()
assert.False(t, ok)
require.NotNil(t, err)
assert.Equal(t, want, err.Error())
}
// waitForInterrupt blocks until a SIGINT is received by the process.
func waitForInterrupt() {
ch := make(chan os.Signal, 1)
signal.Notify(ch, os.Interrupt)
<-ch
}
|
[
"\"DOCKER_URL\""
] |
[] |
[
"DOCKER_URL"
] |
[]
|
["DOCKER_URL"]
|
go
| 1 | 0 | |
tools/visualize_actmap.py
|
"""Visualizes CNN activation maps to see where the CNN focuses on to extract features.
Reference:
- Zagoruyko and Komodakis. Paying more attention to attention: Improving the
performance of convolutional neural networks via attention transfer. ICLR, 2017
- Zhou et al. Omni-Scale Feature Learning for Person Re-Identification. ICCV, 2019.
"""
import numpy as np
import os.path as osp
import os
import argparse
import cv2
import torch
from torch.nn import functional as F
import torchreid
from torchreid.utils import (
check_isfile, mkdir_if_missing, load_pretrained_weights
)
IMAGENET_MEAN = [0.485, 0.456, 0.406]
IMAGENET_STD = [0.229, 0.224, 0.225]
GRID_SPACING = 10
@torch.no_grad()
def visactmap(
model,
test_loader,
save_dir,
width,
height,
use_gpu,
img_mean=None,
img_std=None
):
if img_mean is None or img_std is None:
# use imagenet mean and std
img_mean = IMAGENET_MEAN
img_std = IMAGENET_STD
model.eval()
for target in list(test_loader.keys()):
data_loader = test_loader[target]['query'] # only process query images
# original images and activation maps are saved individually
actmap_dir = osp.join(save_dir, 'actmap_' + target)
mkdir_if_missing(actmap_dir)
print('Visualizing activation maps for {} ...'.format(target))
for batch_idx, data in enumerate(data_loader):
imgs, paths = data[0], data[3]
if use_gpu:
imgs = imgs.cuda()
# forward to get convolutional feature maps
try:
outputs = model(imgs)
outputs = outputs[3]['after'][0]
except TypeError:
raise TypeError(
'forward() got unexpected keyword argument "return_featuremaps". '
'Please add return_featuremaps as an input argument to forward(). When '
'return_featuremaps=True, return feature maps only.'
)
if outputs.dim() != 4:
raise ValueError(
'The model output is supposed to have '
'shape of (b, c, h, w), i.e. 4 dimensions, but got {} dimensions. '
'Please make sure you set the model output at eval mode '
'to be the last convolutional feature maps'.format(
outputs.dim()
)
)
# compute activation maps
outputs = (outputs**2).sum(1)
b, h, w = outputs.size()
outputs = outputs.view(b, h * w)
outputs = F.normalize(outputs, p=2, dim=1)
outputs = outputs.view(b, h, w)
if use_gpu:
imgs, outputs = imgs.cpu(), outputs.cpu()
for j in range(outputs.size(0)):
# get image name
p = paths[j]
imname = p.split('/')
imname = imname[-1]
imdir = p.split('/')
imdir = imdir[-2]
# RGB image
img = imgs[j, ...]
for t, m, s in zip(img, img_mean, img_std):
t.mul_(s).add_(m).clamp_(0, 1)
img_np = np.uint8(np.floor(img.numpy() * 255))
img_np = img_np.transpose((1, 2, 0)) # (c, h, w) -> (h, w, c)
# activation map
am = outputs[j, ...].numpy()
am = cv2.resize(am, (width, height))
am = 255 * (am - np.min(am)) / (
np.max(am) - np.min(am) + 1e-12
)
am = np.uint8(np.floor(am))
am = cv2.applyColorMap(am, cv2.COLORMAP_JET)
# overlapped
overlapped = img_np*0.3 + am*0.7
overlapped[overlapped > 255] = 255
overlapped = overlapped.astype(np.uint8)
# save images in a single figure (add white spacing between images)
# from left to right: original image, activation map, overlapped image
grid_img = 255 * np.ones(
(height, 3*width + 2*GRID_SPACING, 3), dtype=np.uint8
)
grid_img[:, :width, :] = img_np[:, :, ::-1]
grid_img[:,
width + GRID_SPACING:2*width + GRID_SPACING, :] = am
grid_img[:, 2*width + 2*GRID_SPACING:, :] = overlapped
if not osp.exists(osp.join(actmap_dir, imdir)):
mkdir_if_missing(osp.join(actmap_dir, imdir))
cv2.imwrite(osp.join(actmap_dir, imdir, imname), grid_img)
if (batch_idx+1) % 10 == 0:
print(
'- done batch {}/{}'.format(
batch_idx + 1, len(data_loader)
)
)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--root', type=str, default='/media/ddj2/ce611f70-968b-4316-9547-9bc9cf931d32/V20200108/zhejiang_train')
parser.add_argument('-d', '--dataset', type=str, default='rock_dataset')
parser.add_argument('-m', '--model', type=str, default='abd_resnet')
parser.add_argument('--weights', type=str)
parser.add_argument('--save-dir', type=str, default='log/resnet50_cam')
parser.add_argument('--height', type=int, default=672)
parser.add_argument('--width', type=int, default=672)
args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = "3"
use_gpu = torch.cuda.is_available()
torchreid.data.register_image_dataset('rock_dataset', torchreid.data.datasets.image.rock_dataset.RockDataSet)
datamanager = torchreid.data.ImageDataManager(
root=args.root,
sources=args.dataset,
height=args.height,
width=args.width,
batch_size_train=4,
batch_size_test=4,
transforms=None,
train_sampler='SequentialSampler'
)
test_loader = datamanager.test_loader
model = torchreid.models.build_model(
name=args.model,
num_classes=datamanager.num_train_pids,
use_gpu=use_gpu
)
if use_gpu:
model = model.cuda()
if args.weights and check_isfile(args.weights):
load_pretrained_weights(model, args.weights)
visactmap(
model, test_loader, args.save_dir, args.width, args.height, use_gpu
)
if __name__ == '__main__':
main()
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_VISIBLE_DEVICES"]
|
python
| 1 | 0 | |
pkg/sync/sync.go
|
package sync
import (
"archive/tar"
"bufio"
"bytes"
"compress/gzip"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"sync"
"time"
"github.com/convox/changes"
"github.com/fsouza/go-dockerclient"
)
type Stream chan string
type Sync struct {
Container string
Local string
Remote string
docker *docker.Client
lock sync.Mutex
ignores []string
incoming chan changes.Change
outgoing chan changes.Change
incomingBlocks map[string]int
outgoingBlocks map[string]int
}
type execState struct {
Running bool
Error error
ExitCode int
}
func NewSync(container, local, remote string, ignores []string) (*Sync, error) {
l, err := filepath.Abs(local)
if err != nil {
return nil, err
}
sync := &Sync{
Container: container,
Local: l,
Remote: remote,
ignores: ignores,
}
sync.docker, _ = docker.NewClientFromEnv()
sync.incoming = make(chan changes.Change, 1000)
sync.outgoing = make(chan changes.Change, 1000)
sync.incomingBlocks = make(map[string]int)
sync.outgoingBlocks = make(map[string]int)
return sync, nil
}
func (s *Sync) Contains(t Sync) bool {
if !filepath.HasPrefix(t.Local, s.Local) {
return false
}
lr, err := filepath.Rel(s.Local, t.Local)
if err != nil {
return false
}
rr, err := filepath.Rel(s.Remote, t.Remote)
if err != nil {
return false
}
return lr == rr
}
func (s *Sync) Start(st Stream) error {
s.waitForContainer()
if !filepath.IsAbs(s.Remote) {
wdb, err := Docker("inspect", "--format", "'{{.Config.WorkingDir}}'", s.Container).Output()
if err != nil {
return err
}
swdb := string(wdb)
swdb = strings.TrimSpace(swdb)
swdb = strings.TrimPrefix(swdb, "'")
swdb = strings.TrimSuffix(swdb, "'")
s.Remote = filepath.Join(swdb, s.Remote)
}
go s.watchIncoming(st)
go s.watchOutgoing(st)
incoming := []changes.Change{}
outgoing := []changes.Change{}
tick := time.Tick(1 * time.Second)
for {
select {
case c := <-s.incoming:
incoming = append(incoming, c)
case c := <-s.outgoing:
outgoing = append(outgoing, c)
case <-tick:
if len(incoming) > 0 {
a, r := changes.Partition(incoming)
s.syncIncomingAdds(a, st)
s.syncIncomingRemoves(r, st)
incoming = []changes.Change{}
}
if len(outgoing) > 0 {
a, r := changes.Partition(outgoing)
s.syncOutgoingAdds(a, st)
s.syncOutgoingRemoves(r, st)
outgoing = []changes.Change{}
}
}
}
return nil
}
func (s *Sync) syncIncomingAdds(adds []changes.Change, st Stream) {
if len(adds) == 0 {
return
}
tar := []string{"tar", "czf", "-"}
// docker exec can fail if the argument list is too long
// limit to 2000 files per exec
for i := 0; i < len(adds); i += 2000 {
max := i + 2000
if max > len(adds) {
max = len(adds)
}
cmd := tar
batch := adds[i:max]
for _, a := range batch {
cmd = append(cmd, filepath.Join(s.Remote, a.Path))
}
if err := s.execTar(cmd, st); err == nil {
st <- fmt.Sprintf("%d files downloaded", len(batch))
} else {
st <- fmt.Sprintf("error: %s", err)
return
}
}
if os.Getenv("CONVOX_DEBUG") != "" {
for _, a := range adds {
st <- fmt.Sprintf("<- %s", filepath.Join(a.Base, a.Path))
}
}
}
func (s *Sync) execTar(cmd []string, st Stream) error {
retries := 0
success := false
for {
exec, err := s.docker.CreateExec(docker.CreateExecOptions{
AttachStdout: true,
Container: s.Container,
Cmd: cmd,
})
if err != nil {
return err
}
r, w := io.Pipe()
cw, err := s.docker.StartExecNonBlocking(exec.ID, docker.StartExecOptions{
OutputStream: w,
})
if err != nil {
if cw != nil {
cw.Close()
}
return err
}
done := make(chan struct{})
state := make(chan execState)
wait := make(chan error)
go func() {
wait <- cw.Wait()
}()
go tgzReader(s, r, st)
go inspectExec(exec.ID, s, state, done)
select {
case err := <-wait:
if err != nil {
return err
}
close(done)
success = true
case es := <-state:
cw.Close()
if retries < 3 {
retries++
if es.Error == nil && es.ExitCode == 0 {
success = true
}
} else {
if es.Error != nil {
return es.Error
}
return fmt.Errorf("failed to sync after retries")
}
}
if success {
break
}
}
return nil
}
func (s *Sync) syncIncomingRemoves(removes []changes.Change, st Stream) {
// do not sync removes out from the container for safety
}
func (s *Sync) syncOutgoingAdds(adds []changes.Change, st Stream) {
if len(adds) == 0 {
return
}
var buf bytes.Buffer
tgz := tar.NewWriter(&buf)
for _, a := range adds {
local := filepath.Join(a.Base, a.Path)
info, err := os.Stat(local)
if err != nil {
continue
}
remote := filepath.Join(s.Remote, a.Path)
s.lock.Lock()
s.incomingBlocks[a.Path]++
s.lock.Unlock()
tgz.WriteHeader(&tar.Header{
Name: remote,
Mode: 0644,
Size: info.Size(),
ModTime: info.ModTime(),
})
fd, err := os.Open(local)
if err != nil {
st <- fmt.Sprintf("error: %s", err)
continue
}
io.Copy(tgz, fd)
fd.Close()
}
st <- fmt.Sprintf("%d files uploaded", len(adds))
if os.Getenv("CONVOX_DEBUG") != "" {
for _, a := range adds {
st <- fmt.Sprintf("%s -> %s:%s", filepath.Join(a.Base, a.Path), s.Container, filepath.Join(s.Remote, a.Path))
}
}
tgz.Close()
err := s.docker.UploadToContainer(s.Container, docker.UploadToContainerOptions{
InputStream: &buf,
Path: "/",
})
if err != nil {
st <- fmt.Sprintf("error: %s", err)
}
}
func (s *Sync) syncOutgoingRemoves(removes []changes.Change, st Stream) {
if len(removes) == 0 {
return
}
cmd := []string{"rm", "-f"}
for _, r := range removes {
cmd = append(cmd, filepath.Join(s.Remote, r.Path))
}
res, err := s.docker.CreateExec(docker.CreateExecOptions{
Container: s.Container,
Cmd: cmd,
})
if err != nil {
st <- fmt.Sprintf("error: %s", err)
return
}
err = s.docker.StartExec(res.ID, docker.StartExecOptions{
Detach: true,
})
if err != nil {
st <- fmt.Sprintf("error: %s", err)
return
}
st <- fmt.Sprintf("%d files removed", len(removes))
}
func (s *Sync) uploadChangesDaemon(st Stream) {
var buf bytes.Buffer
tgz := tar.NewWriter(&buf)
data, err := Asset("changed")
if err != nil {
st <- fmt.Sprintf("error: %s", err)
}
tgz.WriteHeader(&tar.Header{
Name: "changed",
Mode: 0755,
Size: int64(len(data)),
})
tgz.Write(data)
tgz.Close()
err = s.docker.UploadToContainer(s.Container, docker.UploadToContainerOptions{
InputStream: &buf,
Path: "/",
})
if err != nil {
st <- fmt.Sprintf("error: %s", err)
}
}
func (s *Sync) waitForContainer() {
for {
if res, err := s.docker.InspectContainer(s.Container); err == nil && res.State.Running {
return
}
time.Sleep(1 * time.Second)
}
}
func (s *Sync) watchIncoming(st Stream) {
s.uploadChangesDaemon(st)
res, err := s.docker.CreateExec(docker.CreateExecOptions{
AttachStdout: true,
Container: s.Container,
Cmd: []string{"/changed", s.Remote},
})
if err != nil {
st <- fmt.Sprintf("error: %s", err)
return
}
r, w := io.Pipe()
go func() {
scanner := bufio.NewScanner(r)
for scanner.Scan() {
text := scanner.Text()
parts := strings.SplitN(text, "|", 3)
if len(parts) != 3 {
continue
}
// skip incoming removes for now. they make sync hard and not sure we want
// the container deleting local files anyway
if parts[0] == "remove" {
continue
}
s.lock.Lock()
if s.incomingBlocks[parts[2]] > 0 {
s.incomingBlocks[parts[2]]--
s.lock.Unlock()
} else {
s.lock.Unlock()
s.incoming <- changes.Change{
Operation: parts[0],
Base: parts[1],
Path: parts[2],
}
}
}
if err := scanner.Err(); err != nil {
st <- fmt.Sprintf("error: %s", err)
}
}()
err = s.docker.StartExec(res.ID, docker.StartExecOptions{
OutputStream: w,
})
if err != nil {
st <- fmt.Sprintf("error: %s", err)
}
}
func (s *Sync) watchOutgoing(st Stream) {
ch := make(chan changes.Change, 1)
go func() {
if err := changes.Watch(s.Local, ch, changes.WatchOptions{Ignores: s.ignores}); err != nil {
st <- fmt.Sprintf("error: %s", err)
}
}()
for c := range ch {
s.lock.Lock()
if s.outgoingBlocks[c.Path] > 0 {
s.outgoingBlocks[c.Path]--
s.lock.Unlock()
} else {
s.lock.Unlock()
s.outgoing <- c
}
}
}
func inspectExec(id string, s *Sync, state chan execState, done chan struct{}) {
es := execState{}
select {
case <-time.After(5 * time.Second):
i, err := s.docker.InspectExec(id)
if err != nil {
es.Error = err
state <- es
return
}
es.Error = nil
es.ExitCode = i.ExitCode
es.Running = i.Running
state <- es
case <-done:
return
}
}
func tgzReader(s *Sync, r io.Reader, st Stream) {
gz, err := gzip.NewReader(r)
if err != nil {
st <- fmt.Sprintf("error: %s", err)
return
}
tr := tar.NewReader(gz)
for {
header, err := tr.Next()
if err == io.EOF {
break
}
if err != nil {
st <- fmt.Sprintf("error: %s", err)
return
}
switch header.Typeflag {
case tar.TypeReg:
rel, err := filepath.Rel(s.Remote, filepath.Join("/", header.Name))
if err != nil {
st <- fmt.Sprintf("error: %s", err)
return
}
local := filepath.Join(s.Local, rel)
s.lock.Lock()
s.outgoingBlocks[rel]++
s.lock.Unlock()
err = os.MkdirAll(filepath.Dir(local), 0755)
if err != nil {
st <- fmt.Sprintf("error: %s", err)
return
}
lf, err := os.Create(local)
if err != nil {
st <- fmt.Sprintf("error: %s", err)
return
}
_, err = io.Copy(lf, tr)
if err != nil {
st <- fmt.Sprintf("error: %s", err)
return
}
err = lf.Sync()
if err != nil {
st <- fmt.Sprintf("error: %s", err)
return
}
err = lf.Close()
if err != nil {
st <- fmt.Sprintf("error: %s", err)
return
}
err = os.Chmod(local, os.FileMode(header.Mode))
if err != nil {
st <- fmt.Sprintf("error: %s", err)
return
}
}
}
}
|
[
"\"CONVOX_DEBUG\"",
"\"CONVOX_DEBUG\""
] |
[] |
[
"CONVOX_DEBUG"
] |
[]
|
["CONVOX_DEBUG"]
|
go
| 1 | 0 | |
metrics_test.go
|
package devstats
import (
"database/sql"
"fmt"
"io/ioutil"
"math/rand"
"os"
"reflect"
"strconv"
"strings"
"testing"
"time"
lib "github.com/cncf/devstatscode"
testlib "github.com/cncf/devstatscode/test"
yaml "gopkg.in/yaml.v2"
)
// metricTestCase - used to test single metric
// Setups are called to create database entries for metric to return results
// metric - metrics/{{metric}}.sql file is used to run metric, inside file {{from}} and {{to}} are replaced with from, to
// from, to - used as data range when calling metric
// expected - we're expecting this result from metric, it can either be a single row with single column numeric value
// or multiple rows, each containing metric name and its numeric value
type metricTestCase struct {
Setups []reflect.Value
Metric string `yaml:"metric"`
SQL string `yaml:"sql"` // When empty or not specified, 'Metric' is used as SQL name (default)
From time.Time `yaml:"from"` // used by non-histogram metrics
To time.Time `yaml:"to"` // used by non-histogram metrics
Period string `yaml:"period"` // used by histogram metrics
N int `yaml:"n"` // used by metrics that use moving periods
DebugDB bool `yaml:"debug"` // if set, test will not drop database at the end and will return after such test, so You can run metric manually via `runq` or directly on DB
Replaces [][]string `yaml:"replaces"`
Expected [][]interface{} `yaml:"expected"`
SetupNames []string `yaml:"additional_setup_funcs"`
SetupArgs []string `yaml:"additional_setup_args"`
DataName string `yaml:"data"`
}
// Tests set for single project
type projectMetricTestCase struct {
ProjectName string `yaml:"project_name"`
Tests []metricTestCase `yaml:"tests"`
}
// Test YAML struct (for all projects)
type metricTests struct {
Projects []projectMetricTestCase `yaml:"projects"`
Data map[string]map[string][][]interface{} `yaml:"data"`
}
// Tests all metrics
func TestMetrics(t *testing.T) {
rand.Seed(time.Now().UnixNano())
// Environment context parse
var ctx lib.Ctx
ctx.Init()
ctx.TestMode = true
// Do not allow to run tests in "gha" database
if ctx.PgDB != "dbtest" {
t.Errorf("tests can only be run on \"dbtest\" database")
return
}
// We need to know project to test
if ctx.Project == "" {
t.Errorf("you need to set project via GHA2DB_PROJECT=project_name (one of projects from projects.yaml)")
}
// Load test cases
var tests metricTests
data, err := lib.ReadFile(&ctx, ctx.TestsYaml)
if err != nil {
lib.FatalOnError(err)
return
}
lib.FatalOnError(yaml.Unmarshal(data, &tests))
// Read per project test cases
testCases := []metricTestCase{}
for _, project := range tests.Projects {
if project.ProjectName == ctx.Project {
testCases = project.Tests
break
}
}
if len(testCases) < 1 {
t.Errorf("no tests defined for '%s' project", ctx.Project)
}
// Only selected metrics?
testMetrics := os.Getenv("TEST_METRICS")
selected := false
selectedMetrics := make(map[string]struct{})
if testMetrics != "" {
selected = true
ary := strings.Split(testMetrics, ",")
for _, m := range ary {
selectedMetrics[m] = struct{}{}
found := false
for _, test := range testCases {
if test.Metric == m {
found = true
break
}
}
if !found {
t.Errorf("no such test case '%s'", m)
}
}
}
// Execute test cases
for index, test := range testCases {
if selected {
_, ok := selectedMetrics[test.Metric]
if !ok {
continue
}
}
prepareMetricTestCase(&test)
got, err := executeMetricTestCase(&test, &tests, &ctx)
if err != nil {
t.Errorf("test number %d (%s): %v", index+1, test.Metric, err.Error())
}
if !testlib.CompareSlices2D(test.Expected, got) {
t.Errorf("test number %d (%s), expected:\n%+v\n%+v\ngot test case: %+v", index+1, test.Metric, test.Expected, got, test)
}
if test.DebugDB {
t.Errorf("returning due to debugDB mode")
return
}
}
}
// This prepares raw YAML metric test to be executed:
// Binds additional setup function(s)
// if test uses "additional_setup_funcs", "additional_setup_args" section(s)
func prepareMetricTestCase(testMetric *metricTestCase) {
if len(testMetric.SetupNames) < 1 {
return
}
reflectTestMetric := reflect.ValueOf(*testMetric)
for _, setupName := range testMetric.SetupNames {
method := reflectTestMetric.MethodByName(setupName)
testMetric.Setups = append(testMetric.Setups, method)
}
}
// This prepares raw metric test to be executed:
// Generates data if test uses "data" section
func dataForMetricTestCase(con *sql.DB, ctx *lib.Ctx, testMetric *metricTestCase, tests *metricTests) (err error) {
if testMetric.DataName != "" {
data, ok := tests.Data[testMetric.DataName]
if !ok {
err = fmt.Errorf("No data key for \"%s\" in \"data\" section of \"%s\"", testMetric.DataName, ctx.TestsYaml)
return
}
events, ok := data["events"]
if ok {
// Add events
for _, event := range events {
err = addEvent(con, ctx, event...)
if err != nil {
return
}
}
}
repos, ok := data["repos"]
if ok {
// Add repos
for _, repo := range repos {
err = addRepo(con, ctx, repo...)
if err != nil {
return
}
}
}
iels, ok := data["issues_events_labels"]
if ok {
for _, iel := range iels {
err = addIssueEventLabel(con, ctx, iel...)
if err != nil {
return
}
}
}
texts, ok := data["texts"]
if ok {
textsAppend, okAppend := data["texts_append"]
for idx, text := range texts {
if okAppend {
text = append(text, textsAppend[idx%len(textsAppend)]...)
}
err = addText(con, ctx, text...)
if err != nil {
return
}
}
}
prs, ok := data["prs"]
if ok {
prsAppend, okAppend := data["prs_append"]
for idx, pr := range prs {
if okAppend {
pr = append(pr, prsAppend[idx%len(prsAppend)]...)
}
err = addPR(con, ctx, pr...)
if err != nil {
return
}
}
}
issuesLabels, ok := data["issues_labels"]
if ok {
for _, issueLabel := range issuesLabels {
err = addIssueLabel(con, ctx, issueLabel...)
if err != nil {
return
}
}
}
issues, ok := data["issues"]
if ok {
issuesAppend, okAppend := data["issues_append"]
for idx, issue := range issues {
if okAppend {
issue = append(issue, issuesAppend[idx%len(issuesAppend)]...)
}
err = addIssue(con, ctx, issue...)
if err != nil {
return
}
}
}
comments, ok := data["comments"]
if ok {
commentsAppend, okAppend := data["comments_append"]
for idx, comment := range comments {
if okAppend {
comment = append(comment, commentsAppend[idx%len(commentsAppend)]...)
}
err = addComment(con, ctx, comment...)
if err != nil {
return
}
}
}
commits, ok := data["commits"]
if ok {
commitsAppend, okAppend := data["commits_append"]
for idx, commit := range commits {
if okAppend {
commit = append(commit, commitsAppend[idx%len(commitsAppend)]...)
}
err = addCommit(con, ctx, commit...)
if err != nil {
return
}
}
}
affiliations, ok := data["affiliations"]
if ok {
for _, affiliation := range affiliations {
err = addActorAffiliation(con, ctx, affiliation...)
if err != nil {
return
}
}
}
actors, ok := data["actors"]
if ok {
actorsAppend, okAppend := data["actors_append"]
for idx, actor := range actors {
if okAppend {
actor = append(actor, actorsAppend[idx%len(actorsAppend)]...)
}
err = addActor(con, ctx, actor...)
if err != nil {
return
}
}
}
companies, ok := data["companies"]
if ok {
for _, company := range companies {
err = addCompany(con, ctx, company...)
if err != nil {
return
}
}
}
iprs, ok := data["issues_prs"]
if ok {
for _, ipr := range iprs {
err = addIssuePR(con, ctx, ipr...)
if err != nil {
return
}
}
}
payloads, ok := data["payloads"]
if ok {
for _, payload := range payloads {
err = addPayload(con, ctx, payload...)
if err != nil {
return
}
}
}
forkees, ok := data["forkees"]
if ok {
for _, forkee := range forkees {
err = addForkee(con, ctx, forkee...)
if err != nil {
return
}
}
}
ecfs, ok := data["events_commits_files"]
if ok {
for _, ecf := range ecfs {
err = addEventCommitFile(con, ctx, ecf...)
if err != nil {
return
}
}
}
milestones, ok := data["milestones"]
if ok {
for _, milestone := range milestones {
err = addMilestone(con, ctx, milestone...)
if err != nil {
return
}
}
}
}
return
}
// This executes test of single metric
// All metric data is defined in "testMetric" argument
// Single metric test is dropping & creating database from scratch (to avoid junky database)
// It also creates full DB structure - without indexes - they're not needed in
// small databases - like the ones created by test covergae tools
func executeMetricTestCase(testMetric *metricTestCase, tests *metricTests, ctx *lib.Ctx) (result [][]interface{}, err error) {
// Drop database if exists
lib.DropDatabaseIfExists(ctx)
// Create database if needed
createdDatabase := lib.CreateDatabaseIfNeededExtended(
ctx,
"lc_collate = 'en_US.UTF-8' lc_ctype = 'en_US.UTF-8' encoding = 'UTF8' template = 'template0'",
)
if !createdDatabase {
err = fmt.Errorf("failed to create database \"%s\"", ctx.PgDB)
return
}
// Drop database after tests
if !testMetric.DebugDB {
// Drop database after tests
defer func() { lib.DropDatabaseIfExists(ctx) }()
}
// Connect to Postgres DB
c := lib.PgConn(ctx)
defer func() { lib.FatalOnError(c.Close()) }()
// Create DB structure
lib.Structure(ctx)
// Setup test data
err = dataForMetricTestCase(c, ctx, testMetric, tests)
if err != nil {
return
}
// Execute metrics additional setup(s) function
lenArgs := len(testMetric.SetupArgs)
for index, setup := range testMetric.Setups {
setupArgs := ""
if index < lenArgs {
setupArgs = testMetric.SetupArgs[index]
}
args := []reflect.Value{reflect.ValueOf(c), reflect.ValueOf(ctx), reflect.ValueOf(setupArgs), reflect.ValueOf(testMetric.Replaces)}
switch ret := setup.Call(args)[0].Interface().(type) {
case error:
err = ret
}
if err != nil {
return
}
}
// Execute metric and get its results
result, err = executeMetric(
c,
ctx,
testMetric.Metric,
testMetric.SQL,
testMetric.From,
testMetric.To,
testMetric.Period,
testMetric.N,
testMetric.Replaces,
)
return
}
// random string
func randString() string {
return fmt.Sprintf("%d", rand.Uint64())
}
// execute metric metrics/{{metric}}.sql with {{from}} and {{to}} replaced by from/YMDHMS, to/YMDHMS
// end result slice of slices of any type
func executeMetric(c *sql.DB, ctx *lib.Ctx, metric, msql string, from, to time.Time, period string, n int, replaces [][]string) (result [][]interface{}, err error) {
// Metric file name
if msql == "" {
msql = metric
}
sqlFile := fmt.Sprintf("metrics/%s/%s.sql", ctx.Project, msql)
// Read and transform SQL file.
bytes, err := lib.ReadFile(ctx, sqlFile)
if err != nil {
return
}
sqlQuery := string(bytes)
if from.Year() >= 1980 {
sqlQuery = strings.Replace(sqlQuery, "{{from}}", lib.ToYMDHMSDate(from), -1)
}
if to.Year() >= 1980 {
sqlQuery = strings.Replace(sqlQuery, "{{to}}", lib.ToYMDHMSDate(to), -1)
}
sqlQuery = strings.Replace(sqlQuery, "{{period}}", period, -1)
sqlQuery = strings.Replace(sqlQuery, "{{n}}", strconv.Itoa(n)+".0", -1)
sqlQuery = strings.Replace(
sqlQuery,
"{{exclude_bots}}",
"not like all(array['googlebot', 'rktbot', 'coveralls', 'k8s-%', '%-bot', '%-robot', "+
"'bot-%', 'robot-%', '%[bot]%', '%-jenkins', '%-ci%bot', '%-testing', 'codecov-%'])",
-1,
)
for _, replace := range replaces {
if len(replace) != 2 {
err = fmt.Errorf("replace(s) should have length 2, invalid: %+v", replace)
return
}
sqlQuery = strings.Replace(sqlQuery, replace[0], replace[1], -1)
}
qrFrom := ""
qrTo := ""
if from.Year() >= 1980 {
qrFrom = lib.ToYMDHMSDate(from)
}
if to.Year() >= 1980 {
qrTo = lib.ToYMDHMSDate(to)
}
sHours := ""
sqlQuery, sHours = lib.PrepareQuickRangeQuery(sqlQuery, period, qrFrom, qrTo)
sqlQuery = strings.Replace(sqlQuery, "{{range}}", sHours, -1)
sqlQuery = strings.Replace(sqlQuery, "{{project_scale}}", "1.0", -1)
sqlQuery = strings.Replace(sqlQuery, "{{rnd}}", randString(), -1)
// Execute SQL
rows, err := lib.QuerySQL(c, ctx, sqlQuery)
if err != nil {
lib.Printf("Failed: metric: %s, sql: %s\n", metric, msql)
lib.FatalOnError(err)
}
defer func() { lib.FatalOnError(rows.Close()) }()
// Now unknown rows, with unknown types
columns, err := rows.Columns()
if err != nil {
return
}
// Vals to hold any type as []interface{}
vals := make([]interface{}, len(columns))
for i := range columns {
vals[i] = new(sql.RawBytes)
}
// Get results into slices of slices of any type
var results [][]interface{}
for rows.Next() {
err = rows.Scan(vals...)
if err != nil {
return
}
// We need to iterate row and get columns types
rowSlice := []interface{}{}
for _, val := range vals {
var value interface{}
if val != nil {
value = string(*val.(*sql.RawBytes))
iValue, err := strconv.Atoi(value.(string))
if err == nil {
value = iValue
}
}
rowSlice = append(rowSlice, value)
}
results = append(results, rowSlice)
}
err = rows.Err()
if err != nil {
return
}
result = results
return
}
// Add event
// eid, etype, aid, rid, public, created_at, aname, rname, orgid
func addEvent(con *sql.DB, ctx *lib.Ctx, args ...interface{}) (err error) {
if len(args) != 9 {
err = fmt.Errorf("addEvent: expects 9 variadic parameters, got %d %+v", len(args), args)
return
}
_, err = lib.ExecSQL(
con,
ctx,
"insert into gha_events("+
"id, type, actor_id, repo_id, public, created_at, "+
"dup_actor_login, dup_repo_name, org_id) "+lib.NValues(9),
args...,
)
return
}
// Add repo
// id, name, org_id, org_login, repo_group
func addRepo(con *sql.DB, ctx *lib.Ctx, args ...interface{}) (err error) {
if len(args) != 5 {
err = fmt.Errorf("addRepo: expects 5 variadic parameters, got %d %+v", len(args), args)
return
}
_, err = lib.ExecSQL(
con,
ctx,
"insert into gha_repos(id, name, org_id, org_login, repo_group) "+lib.NValues(5),
args...,
)
return
}
// Add forkee
// forkee_id, event_id, name, full_name, owner_id, created_at, updated_at
// org, stargazers/watchers, forks, open_issues,
// actor_id, actor_login, repo_id, repo_name, type, owner_login
func addForkee(con *sql.DB, ctx *lib.Ctx, args ...interface{}) (err error) {
if len(args) != 17 {
err = fmt.Errorf("addForkee: expects 17 variadic parameters, got %d %+v", len(args), args)
return
}
newArgs := lib.AnyArray{
args[0], // forkee_id
args[1], // event_id
args[2], // name
args[3], // full_name
args[4], // owner_id
"description",
false, // fork
args[5], // created_at
args[6], // updated_at
time.Now(), // pushed_at
"www.homepage.com",
1, // size
"Golang", // language
args[7], // org
args[8], // stargazers
true, // has_issues
nil, // has_projects
true, // has_downloads
true, // has_wiki
nil, // has_pages
args[9], // forks
"master", // default_branch
args[10], // open_issues
args[8], // watchers
false, // private
args[11], // dup_actor_id
args[12], // dup_actor_login
args[13], // dup_repo_id
args[14], // dup_repo_name
args[15], // dup_type
args[5], // dup_created_at
args[16], // dup_owner_login
}
_, err = lib.ExecSQL(
con,
ctx,
"insert into gha_forkees("+
"id, event_id, name, full_name, owner_id, description, fork, "+
"created_at, updated_at, pushed_at, homepage, size, language, organization, "+
"stargazers_count, has_issues, has_projects, has_downloads, "+
"has_wiki, has_pages, forks, default_branch, open_issues, watchers, public, "+
"dup_actor_id, dup_actor_login, dup_repo_id, dup_repo_name, dup_type, dup_created_at, "+
"dup_owner_login) "+lib.NValues(32),
newArgs...,
)
return
}
// Add company
// name
func addCompany(con *sql.DB, ctx *lib.Ctx, args ...interface{}) (err error) {
if len(args) != 1 {
err = fmt.Errorf("addCompany: expects 1 variadic parameter, got %d %+v", len(args), args)
return
}
_, err = lib.ExecSQL(
con,
ctx,
"insert into gha_companies(name) "+lib.NValues(1),
args...,
)
return
}
// Add actor
// id, login, name
func addActor(con *sql.DB, ctx *lib.Ctx, args ...interface{}) (err error) {
if len(args) != 10 {
err = fmt.Errorf("addActor: expects 10 variadic parameters, got %d %+v", len(args), args)
return
}
_, err = lib.ExecSQL(
con,
ctx,
"insert into gha_actors(id, login, name, country_id, country_name, tz, tz_offset, sex, sex_prob, age) "+lib.NValues(10),
args...,
)
return
}
// Add actor affiliation
// actor_id, company_name, original_company_name, dt_from, dt_to
func addActorAffiliation(con *sql.DB, ctx *lib.Ctx, args ...interface{}) (err error) {
if len(args) != 5 {
err = fmt.Errorf("addActorAffiliation: expects 5 variadic parameters, got %d %+v", len(args), args)
return
}
_, err = lib.ExecSQL(
con,
ctx,
"insert into gha_actors_affiliations(actor_id, company_name, original_company_name, dt_from, dt_to) "+lib.NValues(5),
args...,
)
return
}
// Add issue event label
// iid, eid, lid, lname, created_at
func addIssueEventLabel(con *sql.DB, ctx *lib.Ctx, args ...interface{}) (err error) {
if len(args) != 11 {
err = fmt.Errorf("addIssueEventLabel: expects 11 variadic parameters, got %d %+v", len(args), args)
return
}
_, err = lib.ExecSQL(
con,
ctx,
"insert into gha_issues_events_labels("+
"issue_id, event_id, label_id, label_name, created_at, "+
"repo_id, repo_name, actor_id, actor_login, type, issue_number"+
") "+lib.NValues(11),
args...,
)
return
}
// Add events commits files
// sha, eid, path, size, dt, repo_group,
// dup_repo_id, dup_repo_name, dup_type, dup_created_at
func addEventCommitFile(con *sql.DB, ctx *lib.Ctx, args ...interface{}) (err error) {
if len(args) != 10 {
err = fmt.Errorf("addEventCommitFile: expects 10 variadic parameters, got %d %+v", len(args), args)
return
}
_, err = lib.ExecSQL(
con,
ctx,
"insert into gha_events_commits_files("+
"sha, event_id, path, size, dt, repo_group, "+
"dup_repo_id, dup_repo_name, dup_type, dup_created_at, ext"+
") values($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, "+
"regexp_replace(lower($3), '^.*\\.', ''))",
args...,
)
return
}
// Add issue label
// iid, eid, lid, actor_id, actor_login, repo_id, repo_name,
// ev_type, ev_created_at, issue_number, label_name
func addIssueLabel(con *sql.DB, ctx *lib.Ctx, args ...interface{}) (err error) {
if len(args) != 11 {
err = fmt.Errorf("addIssueLabel: expects 11 variadic parameters, got %d %+v", len(args), args)
return
}
_, err = lib.ExecSQL(
con,
ctx,
"insert into gha_issues_labels(issue_id, event_id, label_id, "+
"dup_actor_id, dup_actor_login, dup_repo_id, dup_repo_name, dup_type, dup_created_at, "+
"dup_issue_number, dup_label_name"+
") "+lib.NValues(11),
args...,
)
return
}
// Add text
// eid, body, created_at
// repo_id, repo_name, actor_id, actor_login, type
func addText(con *sql.DB, ctx *lib.Ctx, args ...interface{}) (err error) {
if len(args) != 8 {
err = fmt.Errorf("addText: expects 8 variadic parameters, got %d %+v", len(args), args)
return
}
_, err = lib.ExecSQL(
con,
ctx,
"insert into gha_texts("+
"event_id, body, created_at, "+
"repo_id, repo_name, actor_id, actor_login, type"+
") "+lib.NValues(8),
args...,
)
return
}
// Add commit
// sha, event_id, author_name, encrypted_email, message, dup_actor_id, dup_actor_login,
// dup_repo_id, dup_repo_name, dup_type, dup_created_at,
// author_id, committer_id, dup_author_login, dup_committer_login
func addCommit(con *sql.DB, ctx *lib.Ctx, args ...interface{}) (err error) {
if len(args) != 15 {
err = fmt.Errorf("addCommit: expects 15 variadic parameters, got %d %+v", len(args), args)
return
}
// New args
newArgs := lib.AnyArray{
args[0], // sha
args[1], // event_id
args[2], // author_name
args[3], // encrypted_email
args[4], // message
true, // is_distinct
args[5], // dup_actor_id
args[6], // dup_actor_login
args[7], // dup_repo_id
args[8], // dup_repo_name
args[9], // dup_type
args[10], // dup_created_at
args[11], // author_id
args[12], // committer_id
args[13], // dup_author_login
args[14], // dup_committer_login
}
_, err = lib.ExecSQL(
con,
ctx,
"insert into gha_commits("+
"sha, event_id, author_name, encrypted_email, message, is_distinct, "+
"dup_actor_id, dup_actor_login, dup_repo_id, dup_repo_name, dup_type, dup_created_at, "+
"author_id, committer_id, dup_author_login, dup_committer_login"+
") "+lib.NValues(16),
newArgs...,
)
return
}
// Add comment
// id, event_id, body, created_at, user_id, repo_id, repo_name, actor_id, actor_login, type, user_login
func addComment(con *sql.DB, ctx *lib.Ctx, args ...interface{}) (err error) {
if len(args) != 11 {
err = fmt.Errorf("addComment: expects 11 variadic parameters, got %d %+v", len(args), args)
return
}
// New args
newArgs := lib.AnyArray{
args[0], // id
args[1], // event_id
args[2], // body
args[3], // created_at
time.Now(), // updated_at
args[4], // user_id
nil, // commit_id
nil, // original_commit_id
nil, // diff_hunk
nil, // position
nil, // original_position
nil, // path
nil, // pull_request_review_ai
nil, // line
args[7], // actor_id
args[8], // actor_login
args[5], // repo_id
args[6], // repo_name
args[9], // type
args[3], // dup_created_at
args[10], // dup_user_login
}
_, err = lib.ExecSQL(
con,
ctx,
"insert into gha_comments("+
"id, event_id, body, created_at, updated_at, user_id, "+
"commit_id, original_commit_id, diff_hunk, position, "+
"original_position, path, pull_request_review_id, line, "+
"dup_actor_id, dup_actor_login, dup_repo_id, dup_repo_name, dup_type, dup_created_at, "+
"dup_user_login) "+lib.NValues(21),
newArgs...,
)
return
}
// Add payload
// event_id, issue_id, pull_request_id, comment_id, number, forkee_id, release_id, member_id
// actor_id, actor_login, repo_id, repo_name, event_type, event_created_at
func addPayload(con *sql.DB, ctx *lib.Ctx, args ...interface{}) (err error) {
if len(args) != 14 {
err = fmt.Errorf("addPayload: expects 14 variadic parameters, got %d %+v", len(args), args)
return
}
newArgs := lib.AnyArray{
args[0], // event_id
nil, // push_id, size, ref, head, befor
nil,
nil,
nil,
nil,
"created", // action
args[1], // issue_id
args[2], // pull_request_id
args[3], // comment_id
nil, // ref_type, master_branch, commit
nil,
nil,
"desc", // description
args[4], // number
args[5], // forkee_id
args[6], // release_id
args[7], // member_id
args[8], // actor.ID
args[9], // actor.Login
args[10], // repo.ID
args[11], // repo.Name
args[12], // event.Type
args[13], // event.CreatedAt
}
_, err = lib.ExecSQL(
con,
ctx,
"insert into gha_payloads("+
"event_id, push_id, size, ref, head, befor, action, "+
"issue_id, pull_request_id, comment_id, ref_type, master_branch, commit, "+
"description, number, forkee_id, release_id, member_id, "+
"dup_actor_id, dup_actor_login, dup_repo_id, dup_repo_name, dup_type, dup_created_at"+
") "+lib.NValues(24),
newArgs...,
)
return
}
// Add PR
// prid, eid, uid, merged_id, assignee_id, num, state, title, body, created_at, closed_at, merged_at, merged
// repo_id, repo_name, actor_id, actor_login, updated_at
func addPR(con *sql.DB, ctx *lib.Ctx, args ...interface{}) (err error) {
if len(args) != 18 {
err = fmt.Errorf("addPR: expects 18 variadic parameters, got %d %+v", len(args), args)
return
}
newArgs := lib.AnyArray{
args[0], // PR.id
args[1], // event.ID
args[2], // user.ID
"250aac33d5aae922aac08bba4f06bd139c1c8994", // base SHA
"9c31bcbc683a491c3d4122adcfe4caaab6e2d0fc", // head SHA
args[3], // MergedBy.ID
args[4], // Assignee.ID
nil,
args[5], // PR.Number
args[6], // PR.State (open, closed)
false, // PR.Locked
args[7], // PR.Title
args[8], // PR.Body
args[9], // PR.CreatedAt
args[17], // PR.UpdatedAt
args[10], // PR.ClosedAt
args[11], // PR.MergedAt
"9c31bcbc683a491c3d4122adcfe4caaab6e2d0fc", // PR.MergeCommitSHA
args[12], // PR.Merged
true, // PR.mergable
true, // PR.Rebaseable
"clean", // PR.MergeableState (nil, unknown, clean, unstable, dirty)
1, // PR.Comments
1, // PR.ReviewComments
true, // PR.MaintainerCanModify
1, // PR.Commits
1, // PR.additions
1, // PR.Deletions
1, // PR.ChangedFiles
args[15], // Duplicate data starts here: ev.Actor.ID
args[16], // ev.Actor.Login
args[13], // ev.Repo.ID
args[14], // ev.Repo.Name
"T", // ev.Type
time.Now(), // ev.CreatedAt
args[16], // PR.User.Login
nil, // PR.Assignee.Login
nil, // PR.MergedBy.Login
}
_, err = lib.ExecSQL(
con,
ctx,
"insert into gha_pull_requests("+
"id, event_id, user_id, base_sha, head_sha, merged_by_id, assignee_id, milestone_id, "+
"number, state, locked, title, body, created_at, updated_at, closed_at, merged_at, "+
"merge_commit_sha, merged, mergeable, rebaseable, mergeable_state, comments, "+
"review_comments, maintainer_can_modify, commits, additions, deletions, changed_files, "+
"dup_actor_id, dup_actor_login, dup_repo_id, dup_repo_name, dup_type, dup_created_at, "+
"dup_user_login, dupn_assignee_login, dupn_merged_by_login) "+lib.NValues(38),
newArgs...,
)
return
}
// Add Issue PR
// issue_id, pr_id, number, repo_id, repo_name, created_at
func addIssuePR(con *sql.DB, ctx *lib.Ctx, args ...interface{}) (err error) {
if len(args) != 6 {
err = fmt.Errorf("addIssuePR: expects 6 variadic parameters, got %d %+v", len(args), args)
return
}
_, err = lib.ExecSQL(
con,
ctx,
"insert into gha_issues_pull_requests("+
"issue_id, pull_request_id, number, repo_id, repo_name, created_at"+
") "+lib.NValues(6),
args...,
)
return
}
// Add Issue
// id, event_id, assignee_id, body, closed_at, created_at, number, state, title, updated_at
// user_id, dup_actor_id, dup_actor_login, dup_repo_id, dup_repo_name, dup_type,
// is_pull_request, milestone_id, dup_created_at
func addIssue(con *sql.DB, ctx *lib.Ctx, args ...interface{}) (err error) {
if len(args) != 19 {
err = fmt.Errorf("addIssue: expects 19 variadic parameters, got %d %+v", len(args), args)
return
}
newArgs := lib.AnyArray{
args[0], // id
args[1], // event_id
args[2], // assignee_id
args[3], // body
args[4], // closed_at
0, // comments
args[5], // created_at
false, // locked
args[17], // milestone_id
args[6], // number
args[7], // state
args[8], // title
args[9], // updated_at
args[10], // user_id
args[11], // dup_actor_id
args[12], // dup_actor_login
args[13], // dup_repo_id
args[14], // dup_repo_name
args[15], // dup_type
args[18], // dup_created_at
args[12], // dup_user_login
"", // dup_assignee_login
args[16], // is_pull_request
}
_, err = lib.ExecSQL(
con,
ctx,
"insert into gha_issues("+
"id, event_id, assignee_id, body, closed_at, comments, created_at, "+
"locked, milestone_id, number, state, title, updated_at, user_id, "+
"dup_actor_id, dup_actor_login, dup_repo_id, dup_repo_name, dup_type, dup_created_at, "+
"dup_user_login, dupn_assignee_login, is_pull_request) "+lib.NValues(23),
newArgs...,
)
return
}
// Add Milestone
// id, event_id, closed_at, created_at, actor_id, due_on, number, state, title, updated_at
// dup_actor_id, dup_actor_login, dup_repo_id, dup_repo_name, dup_type, dup_created_at
func addMilestone(con *sql.DB, ctx *lib.Ctx, args ...interface{}) (err error) {
if len(args) != 16 {
err = fmt.Errorf("addMilestone: expects 16 variadic parameters, got %d %+v", len(args), args)
return
}
newArgs := lib.AnyArray{
args[0], // id
args[1], // event_id
args[2], // closed_at
0, // closed issues
args[3], // created_at
args[4], // actor_id
"", // description
args[5], // due_on
args[6], // number
0, // open issues
args[7], // state
args[8], // title
args[9], // updated_at
args[10], // dup_actor_id
args[11], // dup_actor_login
args[12], // dup_repo_id
args[13], // dup_repo_name
args[14], // dup_type
args[15], // dup_created_at
"", // dup_creator_login
}
_, err = lib.ExecSQL(
con,
ctx,
"insert into gha_milestones("+
"id, event_id, closed_at, closed_issues, created_at, creator_id, "+
"description, due_on, number, open_issues, state, title, updated_at, "+
"dup_actor_id, dup_actor_login, dup_repo_id, dup_repo_name, dup_type, dup_created_at, "+
"dupn_creator_login) "+lib.NValues(20),
newArgs...,
)
return
}
// Helper function - save data structure to YAML
// Used when migrating test coverage from go source to yaml file
func interfaceToYaml(fn string, i *[][]interface{}) (err error) {
yml, err := yaml.Marshal(i)
lib.FatalOnError(err)
lib.FatalOnError(ioutil.WriteFile(fn, yml, 0644))
return
}
// Set dynamic dates after loaded static YAML data
func (metricTestCase) SetDates(con *sql.DB, ctx *lib.Ctx, arg string, replaces [][]string) (err error) {
//err = fmt.Errorf("got '%s'", arg)
//return
updates := strings.Split(arg, ",")
for _, update := range updates {
ary := strings.Split(update, ";")
dt := "1980-01-01"
if len(ary) > 3 {
dt = ary[3]
}
query := fmt.Sprintf(
"update %s set %s = %s where date(%s) = '%s'",
ary[0],
ary[1],
ary[2],
ary[1],
dt,
)
_, err = lib.ExecSQL(
con,
ctx,
query,
)
}
return
}
// Sets Repo alias to be the same as Name on all repos
func (metricTestCase) UpdateRepoAliasFromName(con *sql.DB, ctx *lib.Ctx, arg string, replaces [][]string) (err error) {
_, err = lib.ExecSQL(con, ctx, "update gha_repos set alias = name")
lib.FatalOnError(err)
return
}
// Create dynamic data for affiliations metric after loaded static YAML data
func (metricTestCase) RunTags(con *sql.DB, ctx *lib.Ctx, arg string, replaces [][]string) (err error) {
if arg == "" {
return fmt.Errorf("empty tags definition")
}
dataPrefix := ctx.DataDir
if ctx.Local {
dataPrefix = "./"
}
// Read tags to generate
data, err := lib.ReadFile(ctx, dataPrefix+ctx.TagsYaml)
if err != nil {
return err
}
var allTags lib.Tags
err = yaml.Unmarshal(data, &allTags)
if err != nil {
return err
}
tagsAry := strings.Split(arg, ",")
tagMap := make(map[string]bool)
for _, tag := range tagsAry {
tagMap[tag] = false
}
for _, tag := range allTags.Tags {
name := tag.Name
found, ok := tagMap[name]
if ok && !found {
lib.ProcessTag(con, nil, ctx, &tag, replaces)
tagMap[name] = true
}
}
for tag, found := range tagMap {
if !found {
return fmt.Errorf("tag: %s not found", tag)
}
}
return
}
// Create dynamic data for affiliations metric after loaded static YAML data
func (metricTestCase) AffiliationsTestHelper(con *sql.DB, ctx *lib.Ctx, arg string, replaces [][]string) (err error) {
ft := testlib.YMDHMS
// Activities counted
etypes := []string{
"PullRequestReviewCommentEvent",
"PushEvent",
"PullRequestEvent",
"IssuesEvent",
"IssueCommentEvent",
"CommitCommentEvent",
}
// Date ranges (two dates are outside metric area)
dates := []time.Time{}
dt := ft(2017, 8, 31)
dtTo := ft(2017, 10, 2)
for dt.Before(dtTo) || dt.Equal(dtTo) {
dates = append(dates, dt)
dt = lib.NextDayStart(dt)
}
// Will hold all events generated
events := [][]interface{}{}
commits := [][]interface{}{}
eid := 1
cid := 1
rid := 1
for aidx, aid := range []string{"1", "2", "3"} {
for _, etype := range etypes {
for _, dt := range dates {
// Event to add
// eid, etype, aid, rid, public, created_at, aname, rname, orgid
events = append(events, []interface{}{eid, etype, aid, rid, true, dt, "A" + aid, "R", nil})
if etype == "PushEvent" {
// Commit to add
// sha, event_id, author_name, encrypted_email, message, dup_actor_id, dup_actor_login,
// dup_repo_id, dup_repo_name, dup_type, dup_created_at,
// author_id, committer_id, dup_author_login, dup_committer_login
commits = append(
commits,
[]interface{}{
strconv.Itoa(cid), eid, "AN" + aid, "", "commit ", aidx + 1, "A" + aid,
rid, "R" + strconv.Itoa(rid), etype, dt,
aidx + 2, aidx + 3, "AU" + aid, "AC" + aid,
},
)
cid++
}
eid++
rid++
if rid > 4 {
rid = 1
}
}
}
}
// Add events
for _, event := range events {
err = addEvent(con, ctx, event...)
if err != nil {
fmt.Printf("error adding event: %v", err)
return
}
}
// Add commits
for _, commit := range commits {
err = addCommit(con, ctx, commit...)
if err != nil {
fmt.Printf("error adding commit: %v", err)
return
}
}
return
}
|
[
"\"TEST_METRICS\""
] |
[] |
[
"TEST_METRICS"
] |
[]
|
["TEST_METRICS"]
|
go
| 1 | 0 | |
src/main/java/klone/kafka/Options.java
|
/*
Copyright 2019 Willem Ferguson.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package klone.kafka;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import org.apache.kafka.clients.CommonClientConfigs;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.common.record.CompressionType;
import picocli.CommandLine.Option;
class Options {
@Option(names = {"--bootstrap-server" }, description = "A list of host/port pairs to use for establishing the initial connection to the Kafka cluster", defaultValue = "localhost:9200")
public String[] bootstrapServers = { "localhost:9200" };
@Option(names = {"--client-id" }, description = "An id string to pass to the server when making requests", defaultValue = "${env:HOSTNAME}")
public String clientId = System.getenv("HOSTNAME");
@Option(names = {"--property" }, description = "Kafka client properties", arity = "0..*")
public Map<String, String> properties = new HashMap<>();
Properties properties() {
final var props = new Properties();
props.putAll(properties);
props.put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, String.join(",", bootstrapServers));
props.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId);
return props;
}
static class Source extends Options {
@Option(names = {"--topic"}, description = "The list of topics that will be cloned", required = true, arity = "1..*")
public String[] topics;
@Option(names = {"--group-id" }, description = "A unique string that identifies the consumer group this consumer belongs to", required = true)
public String groupId;
@Override Properties properties() {
final var props = super.properties();
props.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);
return props;
}
}
static class Destination extends Options {
@Option(names = {"--topic" }, description = "Map source topic to destination topics", arity = "0..*")
public Map<String, String> topic = new HashMap<>();
@Option(names = {"--transactional-id"}, description = "The TransactionalId to use for transactional delivery", defaultValue = "${env:HOSTNAME}")
public String transactionalId = System.getenv("HOSTNAME");
@Option(names = {"--compression-type"}, description = "The compression type for all data generated by the producer", defaultValue = "zstd")
public String compressionType = CompressionType.ZSTD.name();
@Override Properties properties() {
final var props = super.properties();
props.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG, transactionalId);
props.put(ProducerConfig.COMPRESSION_TYPE_CONFIG, compressionType);
return props;
}
}
}
|
[
"\"HOSTNAME\"",
"\"HOSTNAME\""
] |
[] |
[
"HOSTNAME"
] |
[]
|
["HOSTNAME"]
|
java
| 1 | 0 | |
datamover/pkg/service.go
|
// Copyright 2019 The OpenSDS Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package pkg
import (
"os"
"strings"
"github.com/opensds/multi-cloud/dataflow/pkg/utils"
"github.com/opensds/multi-cloud/datamover/pkg/db"
"github.com/opensds/multi-cloud/datamover/pkg/kafka"
log "github.com/sirupsen/logrus"
)
var dataMoverGroup = "datamover"
func InitDatamoverService() error {
host := os.Getenv("DB_HOST")
dbstor := utils.Database{Credential: "unkonwn", Driver: "mongodb", Endpoint: host}
db.Init(&dbstor)
addrs := []string{}
config := strings.Split(os.Getenv("KAFKA_ADVERTISED_LISTENERS"), ";")
for i := 0; i < len(config); i++ {
addr := strings.Split(config[i], "//")
if len(addr) != 2 {
log.Info("invalid addr:", config[i])
} else {
addrs = append(addrs, addr[1])
}
}
topics := []string{"migration", "lifecycle"}
err := kafka.Init(addrs, dataMoverGroup, topics)
if err != nil {
log.Info("init kafka consumer failed.")
return nil
}
go kafka.LoopConsume()
datamoverID := os.Getenv("HOSTNAME")
log.Infof("init datamover[ID#%s] finished.\n", datamoverID)
return nil
}
|
[
"\"DB_HOST\"",
"\"KAFKA_ADVERTISED_LISTENERS\"",
"\"HOSTNAME\""
] |
[] |
[
"KAFKA_ADVERTISED_LISTENERS",
"HOSTNAME",
"DB_HOST"
] |
[]
|
["KAFKA_ADVERTISED_LISTENERS", "HOSTNAME", "DB_HOST"]
|
go
| 3 | 0 | |
internal/repository/phrase.go
|
package repository
import (
"context"
"database/sql"
"errors"
"fmt"
"os"
"strconv"
"strings"
"github.com/ditointernet/tradulab-service/driven"
"github.com/ditointernet/tradulab-service/internal/core/domain"
"github.com/google/uuid"
)
type Phrase struct {
cli *sql.DB
}
func MustNewPhrase(db *sql.DB) *Phrase {
return &Phrase{
cli: db,
}
}
func (p *Phrase) CreateOrUpdatePhraseTx(ctx context.Context, phrases []*domain.Phrase) error {
tx, err := p.cli.BeginTx(ctx, nil)
if err != nil {
return err
}
for _, value := range phrases {
dto := &driven.Phrase{
Id: uuid.New().String(),
FileId: value.FileId,
Key: value.Key,
Content: value.Content,
}
_, err := tx.ExecContext(
ctx,
`INSERT into phrases (id, file_id, key, content)
VALUES ($1, $2, $3, $4)
ON CONFLICT (key, file_id)
DO UPDATE SET content = $4`,
dto.Id,
dto.FileId,
dto.Key,
dto.Content,
)
if err != nil {
tx.Rollback()
return err
}
}
err = tx.Commit()
if err != nil {
return err
}
return err
}
func (p *Phrase) GetByFileId(ctx context.Context, id string) (domain.Phrase, error) {
var phrase domain.Phrase
err := p.cli.QueryRowContext(
ctx,
"SELECT key FROM phrases WHERE file_id = $1",
id).Scan(&phrase.Key)
if err != nil {
return domain.Phrase{}, err
}
return phrase, nil
}
func (p *Phrase) DeletePhrases(ctx context.Context, phrasesKey []string, fileId string) error {
var phrasesFormat []string
for _, valueKey := range phrasesKey {
phrasesFormat = append(phrasesFormat, fmt.Sprintf("'%s'", valueKey))
}
list := strings.Join(phrasesFormat[:], ", ")
query := fmt.Sprintf("DELETE FROM phrases WHERE file_id = $1 AND key NOT IN (%s)", list)
_, err := p.cli.ExecContext(
ctx,
query,
fileId,
)
if err != nil {
return err
}
return nil
}
func (p *Phrase) GetPhrasesById(ctx context.Context, phraseId string) (domain.Phrase, error) {
var phrase domain.Phrase
err := p.cli.QueryRowContext(
ctx,
"SELECT id, file_id, content, key FROM phrases WHERE id = $1",
phraseId).Scan(&phrase.Id, &phrase.FileId, &phrase.Content, &phrase.Key)
if err != nil {
if err == sql.ErrNoRows {
return domain.Phrase{}, domain.NewNotFoundError("phrase not found")
}
return domain.Phrase{}, err
}
return phrase, nil
}
func (p *Phrase) GetFilePhrases(ctx context.Context, fileId string, page int) ([]domain.Phrase, error) {
if page <= 0 {
return nil, errors.New("must be bigger zero")
}
limit, err := strconv.Atoi(os.Getenv("PAGINATION_LIMIT"))
if err != nil {
return nil, err
}
offset := limit * (page - 1)
var phrases []domain.Phrase
allPhrases, err := p.cli.QueryContext(ctx, "SELECT id, file_id, key, content FROM phrases WHERE file_id = $1 OFFSET $2 LIMIT $3", fileId, offset, limit)
if err != nil {
return nil, err
}
defer allPhrases.Close()
for allPhrases.Next() {
var phrase domain.Phrase
err = allPhrases.Scan(&phrase.Id, &phrase.FileId, &phrase.Key, &phrase.Content)
if err != nil {
return nil, err
}
phrases = append(phrases, phrase)
}
return phrases, nil
}
func (p *Phrase) CountPhrases(ctx context.Context, fileId string) (int, error) {
var totalPhrases int
err := p.cli.QueryRowContext(
ctx,
"SELECT COUNT (*) FROM phrases WHERE file_id = $1",
fileId).Scan(&totalPhrases)
if err != nil {
return 0, err
}
return totalPhrases, nil
}
|
[
"\"PAGINATION_LIMIT\""
] |
[] |
[
"PAGINATION_LIMIT"
] |
[]
|
["PAGINATION_LIMIT"]
|
go
| 1 | 0 | |
cmd/buildah/common.go
|
package main
import (
"context"
"os"
"sort"
"strings"
"time"
"github.com/containers/buildah"
"github.com/containers/buildah/util"
is "github.com/containers/image/storage"
"github.com/containers/image/types"
lu "github.com/containers/libpod/pkg/util"
"github.com/containers/storage"
digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
"github.com/urfave/cli"
)
var needToShutdownStore = false
func getStore(c *cli.Context) (storage.Store, error) {
options, _, err := lu.GetDefaultStoreOptions()
if err != nil {
return nil, err
}
if c.GlobalIsSet("root") || c.GlobalIsSet("runroot") {
options.GraphRoot = c.GlobalString("root")
options.RunRoot = c.GlobalString("runroot")
}
if err := os.Setenv("XDG_RUNTIME_DIR", options.RunRoot); err != nil {
return nil, errors.New("could not set XDG_RUNTIME_DIR")
}
if c.GlobalIsSet("storage-driver") {
options.GraphDriverName = c.GlobalString("storage-driver")
// If any options setup in config, these should be dropped if user overrode the driver
options.GraphDriverOptions = []string{}
}
if c.GlobalIsSet("storage-opt") {
opts := c.GlobalStringSlice("storage-opt")
if len(opts) > 0 {
options.GraphDriverOptions = opts
}
}
if c.GlobalIsSet("userns-uid-map") && c.GlobalIsSet("userns-gid-map") {
uopts := c.GlobalStringSlice("userns-uid-map")
gopts := c.GlobalStringSlice("userns-gid-map")
if len(uopts) == 0 {
return nil, errors.New("--userns-uid-map used with no mappings?")
}
if len(gopts) == 0 {
return nil, errors.New("--userns-gid-map used with no mappings?")
}
uidmap, gidmap, err := util.ParseIDMappings(uopts, gopts)
if err != nil {
return nil, err
}
options.UIDMap = uidmap
options.GIDMap = gidmap
} else if c.GlobalIsSet("userns-uid-map") {
return nil, errors.Errorf("--userns-uid-map requires --userns-gid-map")
} else if c.GlobalIsSet("userns-gid-map") {
return nil, errors.Errorf("--userns-gid-map requires --userns-uid-map")
}
store, err := storage.GetStore(options)
if store != nil {
is.Transport.SetStore(store)
}
needToShutdownStore = true
return store, err
}
func openBuilder(ctx context.Context, store storage.Store, name string) (builder *buildah.Builder, err error) {
if name != "" {
builder, err = buildah.OpenBuilder(store, name)
if os.IsNotExist(err) {
options := buildah.ImportOptions{
Container: name,
}
builder, err = buildah.ImportBuilder(ctx, store, options)
}
}
if err != nil {
return nil, errors.Wrapf(err, "error reading build container")
}
if builder == nil {
return nil, errors.Errorf("error finding build container")
}
return builder, nil
}
func openBuilders(store storage.Store) (builders []*buildah.Builder, err error) {
return buildah.OpenAllBuilders(store)
}
func openImage(ctx context.Context, sc *types.SystemContext, store storage.Store, name string) (builder *buildah.Builder, err error) {
options := buildah.ImportFromImageOptions{
Image: name,
SystemContext: sc,
}
builder, err = buildah.ImportBuilderFromImage(ctx, store, options)
if err != nil {
return nil, errors.Wrapf(err, "error reading image")
}
if builder == nil {
return nil, errors.Errorf("error mocking up build configuration")
}
return builder, nil
}
func getDateAndDigestAndSize(ctx context.Context, image storage.Image, store storage.Store) (time.Time, string, int64, error) {
created := time.Time{}
is.Transport.SetStore(store)
storeRef, err := is.Transport.ParseStoreReference(store, image.ID)
if err != nil {
return created, "", -1, err
}
img, err := storeRef.NewImage(ctx, nil)
if err != nil {
return created, "", -1, err
}
defer img.Close()
imgSize, sizeErr := img.Size()
if sizeErr != nil {
imgSize = -1
}
manifest, _, manifestErr := img.Manifest(ctx)
manifestDigest := ""
if manifestErr == nil && len(manifest) > 0 {
manifestDigest = digest.Canonical.FromBytes(manifest).String()
}
inspectInfo, inspectErr := img.Inspect(ctx)
if inspectErr == nil && inspectInfo != nil {
created = *inspectInfo.Created
}
if sizeErr != nil {
err = sizeErr
} else if manifestErr != nil {
err = manifestErr
} else if inspectErr != nil {
err = inspectErr
}
return created, manifestDigest, imgSize, err
}
// getContext returns a context.TODO
func getContext() context.Context {
return context.TODO()
}
var userFlags = []cli.Flag{
cli.StringFlag{
Name: "user",
Usage: "`user[:group]` to run the command as",
},
}
func defaultFormat() string {
format := os.Getenv("BUILDAH_FORMAT")
if format != "" {
return format
}
return buildah.OCI
}
// imageIsParent goes through the layers in the store and checks if i.TopLayer is
// the parent of any other layer in store. Double check that image with that
// layer exists as well.
func imageIsParent(store storage.Store, topLayer string) (bool, error) {
children, err := getChildren(store, topLayer)
if err != nil {
return false, err
}
return len(children) > 0, nil
}
// getParent returns the image ID of the parent. Return nil if a parent is not found.
func getParent(store storage.Store, topLayer string) (*storage.Image, error) {
images, err := store.Images()
if err != nil {
return nil, errors.Wrapf(err, "unable to retrieve images from store")
}
layer, err := store.Layer(topLayer)
if err != nil {
return nil, errors.Wrapf(err, "unable to retrieve layers from store")
}
for _, img := range images {
if img.TopLayer == layer.Parent {
return &img, nil
}
}
return nil, nil
}
// getChildren returns a list of the imageIDs that depend on the image
func getChildren(store storage.Store, topLayer string) ([]string, error) {
var children []string
images, err := store.Images()
if err != nil {
return nil, errors.Wrapf(err, "unable to retrieve images from store")
}
layers, err := store.Layers()
if err != nil {
return nil, errors.Wrapf(err, "unable to retrieve layers from store")
}
for _, layer := range layers {
if layer.Parent == topLayer {
if imageID := getImageOfTopLayer(images, layer.ID); len(imageID) > 0 {
children = append(children, imageID...)
}
}
}
return children, nil
}
// getImageOfTopLayer returns the image ID where layer is the top layer of the image
func getImageOfTopLayer(images []storage.Image, layer string) []string {
var matches []string
for _, img := range images {
if img.TopLayer == layer {
matches = append(matches, img.ID)
}
}
return matches
}
func getFormat(c *cli.Context) (string, error) {
format := strings.ToLower(c.String("format"))
switch format {
case buildah.OCI:
return buildah.OCIv1ImageManifest, nil
case buildah.DOCKER:
return buildah.Dockerv2ImageManifest, nil
default:
return "", errors.Errorf("unrecognized image type %q", format)
}
}
func sortFlags(flags []cli.Flag) []cli.Flag {
sort.Slice(flags, func(i, j int) bool {
return strings.Compare(flags[i].GetName(), flags[j].GetName()) < 0
})
return flags
}
|
[
"\"BUILDAH_FORMAT\""
] |
[] |
[
"BUILDAH_FORMAT"
] |
[]
|
["BUILDAH_FORMAT"]
|
go
| 1 | 0 | |
litex/soc/integration/export.py
|
#
# This file is part of LiteX.
#
# This file is Copyright (c) 2013-2014 Sebastien Bourdeauducq <[email protected]>
# This file is Copyright (c) 2014-2019 Florent Kermarrec <[email protected]>
# This file is Copyright (c) 2018 Dolu1990 <[email protected]>
# This file is Copyright (c) 2019 Gabriel L. Somlo <[email protected]>
# This file is Copyright (c) 2018 Jean-François Nguyen <[email protected]>
# This file is Copyright (c) 2019 Antmicro <www.antmicro.com>
# This file is Copyright (c) 2013 Robert Jordens <[email protected]>
# This file is Copyright (c) 2018 Sean Cross <[email protected]>
# This file is Copyright (c) 2018 Sergiusz Bazanski <[email protected]>
# This file is Copyright (c) 2018-2016 Tim 'mithro' Ansell <[email protected]>
# This file is Copyright (c) 2015 whitequark <[email protected]>
# This file is Copyright (c) 2018 William D. Jones <[email protected]>
# This file is Copyright (c) 2020 Piotr Esden-Tempski <[email protected]>
# SPDX-License-Identifier: BSD-2-Clause
import os
import json
import time
import datetime
import inspect
from shutil import which
from sysconfig import get_platform
from migen import *
from litex.soc.interconnect.csr import CSRStatus
from litex.build.tools import generated_banner
from litex.soc.doc.rst import reflow
from litex.soc.doc.module import gather_submodules, ModuleNotDocumented, DocumentedModule, DocumentedInterrupts
from litex.soc.doc.csr import DocumentedCSRRegion
from litex.soc.interconnect.csr import _CompoundCSR
# CPU files ----------------------------------------------------------------------------------------
def get_cpu_mak(cpu, compile_software):
# Select between CLANG and GCC.
clang = os.getenv("CLANG", "")
if clang != "":
clang = bool(int(clang))
else:
clang = None
if cpu.clang_triple is None:
if clang:
raise ValueError(cpu.name + " is not supported with CLANG.")
else:
clang = False
else:
# Default to gcc unless told otherwise.
if clang is None:
clang = False
assert isinstance(clang, bool)
if clang:
triple = cpu.clang_triple
flags = cpu.clang_flags
else:
triple = cpu.gcc_triple
flags = cpu.gcc_flags
# Select triple when more than one.
def select_triple(triple):
r = None
if not isinstance(triple, tuple):
triple = (triple,)
override = os.getenv("LITEX_ENV_CC_TRIPLE")
if override:
triple = (override,) + triple
p = get_platform()
for i in range(len(triple)):
t = triple[i]
# Use native toolchain if host and target platforms are the same.
if t == 'riscv64-unknown-elf' and p == 'linux-riscv64':
r = '--native--'
break
if which(t+"-gcc"):
r = t
break
if r is None:
if not compile_software:
return "--not-found--"
msg = "Unable to find any of the cross compilation toolchains:\n"
for i in range(len(triple)):
msg += "- " + triple[i] + "\n"
raise OSError(msg)
return r
# Return informations.
return [
("TRIPLE", select_triple(triple)),
("CPU", cpu.name),
("CPUFLAGS", flags),
("CPUENDIANNESS", cpu.endianness),
("CLANG", str(int(clang))),
("CPU_DIRECTORY", os.path.dirname(inspect.getfile(cpu.__class__))),
]
def get_linker_output_format(cpu):
return f"OUTPUT_FORMAT(\"{cpu.linker_output_format}\")\n"
def get_linker_regions(regions):
r = "MEMORY {\n"
for name, region in regions.items():
r += f"\t{name} : ORIGIN = 0x{region.origin:08x}, LENGTH = 0x{region.length:08x}\n"
r += "}\n"
return r
# C Export -----------------------------------------------------------------------------------------
def get_git_header():
from litex.build.tools import get_migen_git_revision, get_litex_git_revision
r = generated_banner("//")
r += "#ifndef __GENERATED_GIT_H\n#define __GENERATED_GIT_H\n\n"
r += f"#define MIGEN_GIT_SHA1 \"{get_migen_git_revision()}\"\n"
r += f"#define LITEX_GIT_SHA1 \"{get_litex_git_revision()}\"\n"
r += "#endif\n"
return r
def get_mem_header(regions):
r = generated_banner("//")
r += "#ifndef __GENERATED_MEM_H\n#define __GENERATED_MEM_H\n\n"
for name, region in regions.items():
r += f"#ifndef {name.upper()}_BASE\n"
r += f"#define {name.upper()}_BASE 0x{region.origin:08x}L\n"
r += f"#define {name.upper()}_SIZE 0x{region.length:08x}\n"
r += "#endif\n\n"
r += "#ifndef MEM_REGIONS\n"
r += "#define MEM_REGIONS \"";
for name, region in regions.items():
r += f"{name.upper()} {' '*(8-len(name))} 0x{region.origin:08x} 0x{region.size:x} \\n"
r = r[:-2]
r += "\"\n"
r += "#endif\n"
r += "#endif\n"
return r
def get_soc_header(constants, with_access_functions=True):
r = generated_banner("//")
r += "#ifndef __GENERATED_SOC_H\n#define __GENERATED_SOC_H\n"
funcs = ""
for name, value in constants.items():
if value is None:
r += "#define "+name+"\n"
continue
if isinstance(value, str):
value = "\"" + value + "\""
ctype = "const char *"
else:
value = str(value)
ctype = "int"
r += "#define "+name+" "+value+"\n"
if with_access_functions:
funcs += "static inline "+ctype+" "+name.lower()+"_read(void) {\n"
funcs += "\treturn "+value+";\n}\n"
if with_access_functions:
r += "\n#ifndef __ASSEMBLER__\n"
r += funcs
r += "#endif // !__ASSEMBLER__\n"
r += "\n#endif\n"
return r
def _get_rw_functions_c(reg_name, reg_base, nwords, busword, alignment, read_only, with_access_functions):
r = ""
addr_str = f"CSR_{reg_name.upper()}_ADDR"
size_str = f"CSR_{reg_name.upper()}_SIZE"
r += f"#define {addr_str} (CSR_BASE + {hex(reg_base)}L)\n"
r += f"#define {size_str} {nwords}\n"
size = nwords*busword//8
if size > 8:
# Downstream should select appropriate `csr_[rd|wr]_buf_uintX()` pair!
return r
elif size > 4:
ctype = "uint64_t"
elif size > 2:
ctype = "uint32_t"
elif size > 1:
ctype = "uint16_t"
else:
ctype = "uint8_t"
stride = alignment//8;
if with_access_functions:
r += f"static inline {ctype} {reg_name}_read(void) {{\n"
if nwords > 1:
r += f"\t{ctype} r = csr_read_simple(CSR_BASE + {reg_base}L);\n"
for sub in range(1, nwords):
r += f"\tr <<= {busword};\n"
r += f"\tr |= csr_read_simple(CSR_BASE + {hex(reg_base+sub*stride)}L);\n"
r += "\treturn r;\n}\n"
else:
r += f"\treturn csr_read_simple(CSR_BASE + {hex(reg_base)}L);\n}}\n"
if not read_only:
r += f"static inline void {reg_name}_write({ctype} v) {{\n"
for sub in range(nwords):
shift = (nwords-sub-1)*busword
if shift:
v_shift = "v >> {}".format(shift)
else:
v_shift = "v"
r += f"\tcsr_write_simple({v_shift}, CSR_BASE + {hex(reg_base+sub*stride)}L);\n"
r += "}\n"
return r
def get_csr_header(regions, constants, csr_base=None, with_access_functions=True):
alignment = constants.get("CONFIG_CSR_ALIGNMENT", 32)
r = generated_banner("//")
if with_access_functions: # FIXME
r += "#include <generated/soc.h>\n"
r += "#ifndef __GENERATED_CSR_H\n#define __GENERATED_CSR_H\n"
if with_access_functions:
r += "#include <stdint.h>\n"
r += "#include <system.h>\n"
r += "#ifndef CSR_ACCESSORS_DEFINED\n"
r += "#include <hw/common.h>\n"
r += "#endif /* ! CSR_ACCESSORS_DEFINED */\n"
csr_base = csr_base if csr_base is not None else regions[next(iter(regions))].origin
r += "#ifndef CSR_BASE\n"
r += f"#define CSR_BASE {hex(csr_base)}L\n"
r += "#endif\n"
for name, region in regions.items():
origin = region.origin - csr_base
r += "\n/* "+name+" */\n"
r += f"#define CSR_{name.upper()}_BASE (CSR_BASE + {hex(origin)}L)\n"
if not isinstance(region.obj, Memory):
for csr in region.obj:
nr = (csr.size + region.busword - 1)//region.busword
r += _get_rw_functions_c(name + "_" + csr.name, origin, nr, region.busword, alignment,
getattr(csr, "read_only", False), with_access_functions)
origin += alignment//8*nr
if hasattr(csr, "fields"):
for field in csr.fields.fields:
offset = str(field.offset)
size = str(field.size)
r += f"#define CSR_{name.upper()}_{csr.name.upper()}_{field.name.upper()}_OFFSET {offset}\n"
r += f"#define CSR_{name.upper()}_{csr.name.upper()}_{field.name.upper()}_SIZE {size}\n"
if with_access_functions and csr.size <= 32: # FIXME: Implement extract/read functions for csr.size > 32-bit.
reg_name = name + "_" + csr.name.lower()
field_name = reg_name + "_" + field.name.lower()
r += "static inline uint32_t " + field_name + "_extract(uint32_t oldword) {\n"
r += "\tuint32_t mask = ((1 << " + size + ")-1);\n"
r += "\treturn ( (oldword >> " + offset + ") & mask );\n}\n"
r += "static inline uint32_t " + field_name + "_read(void) {\n"
r += "\tuint32_t word = " + reg_name + "_read();\n"
r += "\treturn " + field_name + "_extract(word);\n"
r += "}\n"
if not getattr(csr, "read_only", False):
r += "static inline uint32_t " + field_name + "_replace(uint32_t oldword, uint32_t plain_value) {\n"
r += "\tuint32_t mask = ((1 << " + size + ")-1);\n"
r += "\treturn (oldword & (~(mask << " + offset + "))) | (mask & plain_value)<< " + offset + " ;\n}\n"
r += "static inline void " + field_name + "_write(uint32_t plain_value) {\n"
r += "\tuint32_t oldword = " + reg_name + "_read();\n"
r += "\tuint32_t newword = " + field_name + "_replace(oldword, plain_value);\n"
r += "\t" + reg_name + "_write(newword);\n"
r += "}\n"
r += "\n#endif\n"
return r
# JSON Export --------------------------------------------------------------------------------------
def get_csr_json(csr_regions={}, constants={}, mem_regions={}):
alignment = constants.get("CONFIG_CSR_ALIGNMENT", 32)
d = {
"csr_bases": {},
"csr_registers": {},
"constants": {},
"memories": {},
}
for name, region in csr_regions.items():
d["csr_bases"][name] = region.origin
region_origin = region.origin
if not isinstance(region.obj, Memory):
for csr in region.obj:
_size = (csr.size + region.busword - 1)//region.busword
_type = "rw"
if isinstance(csr, CSRStatus) and not hasattr(csr, "r"):
_type = "ro"
d["csr_registers"][name + "_" + csr.name] = {
"addr": region_origin,
"size": _size,
"type": _type
}
region_origin += alignment//8*_size
for name, value in constants.items():
d["constants"][name.lower()] = value.lower() if isinstance(value, str) else value
for name, region in mem_regions.items():
d["memories"][name.lower()] = {
"base": region.origin,
"size": region.length,
"type": region.type,
}
return json.dumps(d, indent=4)
# CSV Export --------------------------------------------------------------------------------------
def get_csr_csv(csr_regions={}, constants={}, mem_regions={}):
d = json.loads(get_csr_json(csr_regions, constants, mem_regions))
r = generated_banner("#")
for name, value in d["csr_bases"].items():
r += "csr_base,{},0x{:08x},,\n".format(name, value)
for name in d["csr_registers"].keys():
r += "csr_register,{},0x{:08x},{},{}\n".format(name,
d["csr_registers"][name]["addr"],
d["csr_registers"][name]["size"],
d["csr_registers"][name]["type"])
for name, value in d["constants"].items():
r += "constant,{},{},,\n".format(name, value)
for name in d["memories"].keys():
r += "memory_region,{},0x{:08x},{:d},{:s}\n".format(name,
d["memories"][name]["base"],
d["memories"][name]["size"],
d["memories"][name]["type"],
)
return r
# SVD Export --------------------------------------------------------------------------------------
def get_csr_svd(soc, vendor="litex", name="soc", description=None):
def sub_csr_bit_range(busword, csr, offset):
nwords = (csr.size + busword - 1)//busword
i = nwords - offset - 1
nbits = min(csr.size - i*busword, busword) - 1
name = (csr.name + str(i) if nwords > 1 else csr.name).upper()
origin = i*busword
return (origin, nbits, name)
def print_svd_register(csr, csr_address, description, length, svd):
svd.append(' <register>')
svd.append(' <name>{}</name>'.format(csr.short_numbered_name))
if description is not None:
svd.append(' <description><![CDATA[{}]]></description>'.format(description))
svd.append(' <addressOffset>0x{:04x}</addressOffset>'.format(csr_address))
svd.append(' <resetValue>0x{:02x}</resetValue>'.format(csr.reset_value))
svd.append(' <size>{}</size>'.format(length))
# svd.append(' <access>{}</access>'.format(csr.access)) # 'access' is a lie: "read-only" registers can legitimately change state based on a write, and is in fact used to handle the "pending" field in events
csr_address = csr_address + 4
svd.append(' <fields>')
if hasattr(csr, "fields") and len(csr.fields) > 0:
for field in csr.fields:
svd.append(' <field>')
svd.append(' <name>{}</name>'.format(field.name))
svd.append(' <msb>{}</msb>'.format(field.offset +
field.size - 1))
svd.append(' <bitRange>[{}:{}]</bitRange>'.format(
field.offset + field.size - 1, field.offset))
svd.append(' <lsb>{}</lsb>'.format(field.offset))
svd.append(' <description><![CDATA[{}]]></description>'.format(
reflow(field.description)))
svd.append(' </field>')
else:
field_size = csr.size
field_name = csr.short_name.lower()
# Strip off "ev_" from eventmanager fields
if field_name == "ev_enable":
field_name = "enable"
elif field_name == "ev_pending":
field_name = "pending"
elif field_name == "ev_status":
field_name = "status"
svd.append(' <field>')
svd.append(' <name>{}</name>'.format(field_name))
svd.append(' <msb>{}</msb>'.format(field_size - 1))
svd.append(' <bitRange>[{}:{}]</bitRange>'.format(field_size - 1, 0))
svd.append(' <lsb>{}</lsb>'.format(0))
svd.append(' </field>')
svd.append(' </fields>')
svd.append(' </register>')
interrupts = {}
for csr, irq in sorted(soc.irq.locs.items()):
interrupts[csr] = irq
documented_regions = []
for region_name, region in soc.csr.regions.items():
documented_regions.append(DocumentedCSRRegion(
name = region_name,
region = region,
csr_data_width = soc.csr.data_width)
)
svd = []
svd.append('<?xml version="1.0" encoding="utf-8"?>')
svd.append('')
svd.append('<device schemaVersion="1.1" xmlns:xs="http://www.w3.org/2001/XMLSchema-instance" xs:noNamespaceSchemaLocation="CMSIS-SVD.xsd" >')
svd.append(' <vendor>{}</vendor>'.format(vendor))
svd.append(' <name>{}</name>'.format(name.upper()))
if description is not None:
svd.append(' <description><![CDATA[{}]]></description>'.format(reflow(description)))
else:
fmt = "%Y-%m-%d %H:%M:%S"
build_time = datetime.datetime.fromtimestamp(time.time()).strftime(fmt)
svd.append(' <description><![CDATA[{}]]></description>'.format(reflow("Litex SoC " + build_time)))
svd.append('')
svd.append(' <addressUnitBits>8</addressUnitBits>')
svd.append(' <width>32</width>')
svd.append(' <size>32</size>')
svd.append(' <access>read-write</access>')
svd.append(' <resetValue>0x00000000</resetValue>')
svd.append(' <resetMask>0xFFFFFFFF</resetMask>')
svd.append('')
svd.append(' <peripherals>')
for region in documented_regions:
csr_address = 0
svd.append(' <peripheral>')
svd.append(' <name>{}</name>'.format(region.name.upper()))
svd.append(' <baseAddress>0x{:08X}</baseAddress>'.format(region.origin))
svd.append(' <groupName>{}</groupName>'.format(region.name.upper()))
if len(region.sections) > 0:
svd.append(' <description><![CDATA[{}]]></description>'.format(
reflow(region.sections[0].body())))
svd.append(' <registers>')
for csr in region.csrs:
description = None
if hasattr(csr, "description"):
description = csr.description
if isinstance(csr, _CompoundCSR) and len(csr.simple_csrs) > 1:
is_first = True
for i in range(len(csr.simple_csrs)):
(start, length, name) = sub_csr_bit_range(
region.busword, csr, i)
if length > 0:
bits_str = "Bits {}-{} of `{}`.".format(
start, start+length, csr.name)
else:
bits_str = "Bit {} of `{}`.".format(
start, csr.name)
if is_first:
if description is not None:
print_svd_register(
csr.simple_csrs[i], csr_address, bits_str + " " + description, length, svd)
else:
print_svd_register(
csr.simple_csrs[i], csr_address, bits_str, length, svd)
is_first = False
else:
print_svd_register(
csr.simple_csrs[i], csr_address, bits_str, length, svd)
csr_address = csr_address + 4
else:
length = ((csr.size + region.busword - 1) //
region.busword) * region.busword
print_svd_register(
csr, csr_address, description, length, svd)
csr_address = csr_address + 4
svd.append(' </registers>')
svd.append(' <addressBlock>')
svd.append(' <offset>0</offset>')
svd.append(' <size>0x{:x}</size>'.format(csr_address))
svd.append(' <usage>registers</usage>')
svd.append(' </addressBlock>')
if region.name in interrupts:
svd.append(' <interrupt>')
svd.append(' <name>{}</name>'.format(region.name))
svd.append(' <value>{}</value>'.format(interrupts[region.name]))
svd.append(' </interrupt>')
svd.append(' </peripheral>')
svd.append(' </peripherals>')
svd.append(' <vendorExtensions>')
if len(soc.mem_regions) > 0:
svd.append(' <memoryRegions>')
for region_name, region in soc.mem_regions.items():
svd.append(' <memoryRegion>')
svd.append(' <name>{}</name>'.format(region_name.upper()))
svd.append(' <baseAddress>0x{:08X}</baseAddress>'.format(region.origin))
svd.append(' <size>0x{:08X}</size>'.format(region.size))
svd.append(' </memoryRegion>')
svd.append(' </memoryRegions>')
svd.append(' <constants>')
for name, value in soc.constants.items():
svd.append(' <constant name="{}" value="{}" />'.format(name, value))
svd.append(' </constants>')
svd.append(' </vendorExtensions>')
svd.append('</device>')
return "\n".join(svd)
# Memory.x Export ----------------------------------------------------------------------------------
def get_memory_x(soc):
r = get_linker_regions(soc.mem_regions)
r += '\n'
r += 'REGION_ALIAS("REGION_TEXT", spiflash);\n'
r += 'REGION_ALIAS("REGION_RODATA", spiflash);\n'
r += 'REGION_ALIAS("REGION_DATA", sram);\n'
r += 'REGION_ALIAS("REGION_BSS", sram);\n'
r += 'REGION_ALIAS("REGION_HEAP", sram);\n'
r += 'REGION_ALIAS("REGION_STACK", sram);\n\n'
r += '/* CPU reset location. */\n'
r += '_stext = {:#08x};\n'.format(soc.cpu.reset_address)
return r
|
[] |
[] |
[
"CLANG",
"LITEX_ENV_CC_TRIPLE"
] |
[]
|
["CLANG", "LITEX_ENV_CC_TRIPLE"]
|
python
| 2 | 0 | |
server/server.go
|
package server
import (
"context"
"encoding/base64"
"fmt"
"math/rand"
"net"
"net/url"
"os"
"strings"
"sync"
"time"
"github.com/go-kit/kit/metrics/provider"
"github.com/oklog/run"
"github.com/owenthereal/upterm/host/api"
"github.com/owenthereal/upterm/utils"
"github.com/owenthereal/upterm/ws"
log "github.com/sirupsen/logrus"
"golang.org/x/crypto/ssh"
)
const (
tcpDialTimeout = 1 * time.Second
)
type Opt struct {
SSHAddr string
WSAddr string
NodeAddr string
KeyFiles []string
Hostnames []string
Network string
NetworkOpt []string
MetricAddr string
Debug bool
}
func Start(opt Opt) error {
rand.Seed(time.Now().UTC().UnixNano())
// must always have a ssh addr
if opt.SSHAddr == "" {
return fmt.Errorf("must specify a ssh address")
}
network := networks.Get(opt.Network)
if network == nil {
return fmt.Errorf("unsupport network provider %q", opt.Network)
}
opts := parseNetworkOpt(opt.NetworkOpt)
if err := network.SetOpts(opts); err != nil {
return fmt.Errorf("network provider option error: %s", err)
}
privateKeys, err := utils.ReadFiles(opt.KeyFiles)
if err != nil {
return nil
}
if pp := os.Getenv("PRIVATE_KEY"); pp != "" {
privateKeys = append(privateKeys, []byte(pp))
}
signers, err := utils.CreateSigners(privateKeys)
if err != nil {
return err
}
var hostSigners []ssh.Signer
for _, s := range signers {
hs := HostCertSigner{
Hostnames: opt.Hostnames,
}
ss, err := hs.SignCert(s)
if err != nil {
return err
}
hostSigners = append(hostSigners, ss)
}
l := log.New()
if opt.Debug {
l.SetLevel(log.DebugLevel)
}
logger := l.WithFields(log.Fields{"app": "uptermd", "network": opt.Network, "network-opt": opt.NetworkOpt})
var (
sshln net.Listener
wsln net.Listener
)
if opt.SSHAddr != "" {
sshln, err = net.Listen("tcp", opt.SSHAddr)
if err != nil {
return err
}
logger = logger.WithField("ssh-addr", sshln.Addr())
}
if opt.WSAddr != "" {
wsln, err = net.Listen("tcp", opt.WSAddr)
if err != nil {
return err
}
logger = logger.WithField("ws-addr", wsln.Addr())
}
// fallback node addr to ssh addr or ws addr if empty
nodeAddr := opt.NodeAddr
if nodeAddr == "" && sshln != nil {
nodeAddr = sshln.Addr().String()
}
if nodeAddr == "" && wsln != nil {
nodeAddr = wsln.Addr().String()
}
if nodeAddr == "" {
return fmt.Errorf("node address can't by empty")
}
logger = logger.WithField("node-addr", nodeAddr)
var g run.Group
{
var mp provider.Provider
if opt.MetricAddr == "" {
mp = provider.NewDiscardProvider()
} else {
mp = provider.NewPrometheusProvider("upterm", "uptermd")
}
s := &Server{
NodeAddr: nodeAddr,
HostSigners: hostSigners,
Signers: signers,
NetworkProvider: network,
Logger: logger.WithField("com", "server"),
MetricsProvider: mp,
}
g.Add(func() error {
return s.ServeWithContext(context.Background(), sshln, wsln)
}, func(err error) {
s.Shutdown()
})
}
{
if opt.MetricAddr != "" {
logger = logger.WithField("metric-addr", opt.MetricAddr)
m := &metricServer{}
g.Add(func() error {
return m.ListenAndServe(opt.MetricAddr)
}, func(err error) {
_ = m.Shutdown(context.Background())
})
}
}
logger.Info("starting server")
defer logger.Info("shutting down server")
return g.Run()
}
func parseNetworkOpt(opts []string) NetworkOptions {
result := make(NetworkOptions)
for _, opt := range opts {
split := strings.SplitN(opt, "=", 2)
result[split[0]] = split[1]
}
return result
}
type Server struct {
NodeAddr string
HostSigners []ssh.Signer
Signers []ssh.Signer
NetworkProvider NetworkProvider
MetricsProvider provider.Provider
Logger log.FieldLogger
sshln net.Listener
wsln net.Listener
mux sync.Mutex
ctx context.Context
cancel func()
}
func (s *Server) Shutdown() {
s.mux.Lock()
defer s.mux.Unlock()
if s.cancel != nil {
s.cancel()
}
if s.sshln != nil {
s.sshln.Close()
}
if s.wsln != nil {
s.wsln.Close()
}
}
func (s *Server) ServeWithContext(ctx context.Context, sshln net.Listener, wsln net.Listener) error {
s.mux.Lock()
s.sshln, s.wsln = sshln, wsln
s.ctx, s.cancel = context.WithCancel(ctx)
s.mux.Unlock()
sshdDialListener := s.NetworkProvider.SSHD()
sessionDialListener := s.NetworkProvider.Session()
sessRepo := newSessionRepo()
var g run.Group
{
g.Add(func() error {
<-s.ctx.Done()
return s.ctx.Err()
}, func(err error) {
s.cancel()
})
}
{
if sshln != nil {
cd := sidewayConnDialer{
NodeAddr: s.NodeAddr,
SSHDDialListener: sshdDialListener,
SessionDialListener: sessionDialListener,
NeighbourDialer: tcpConnDialer{},
Logger: s.Logger.WithField("com", "ssh-conn-dialer"),
}
sp := &sshProxy{
HostSigners: s.HostSigners,
Signers: s.Signers,
NodeAddr: s.NodeAddr,
ConnDialer: cd,
SessionRepo: sessRepo,
Logger: s.Logger.WithField("com", "ssh-proxy"),
MetricsProvider: s.MetricsProvider,
}
g.Add(func() error {
return sp.Serve(sshln)
}, func(err error) {
_ = sp.Shutdown()
})
}
}
{
if wsln != nil {
var cd connDialer
if sshln == nil {
cd = sidewayConnDialer{
NodeAddr: s.NodeAddr,
SSHDDialListener: sshdDialListener,
SessionDialListener: sessionDialListener,
NeighbourDialer: wsConnDialer{},
Logger: s.Logger.WithField("com", "ws-conn-dialer"),
}
} else {
// If sshln is not nil, always dial to SSHProxy.
// So Host/Client -> WSProxy -> SSHProxy -> sshd/Session
// This makes sure that SSHProxy terminates all SSH requests
// which provides a consistent authentication machanism.
cd = sshProxyDialer{
sshProxyAddr: sshln.Addr().String(),
Logger: s.Logger.WithField("com", "ws-sshproxy-dialer"),
}
}
ws := &webSocketProxy{
ConnDialer: cd,
Logger: s.Logger.WithField("com", "ws-proxy"),
}
g.Add(func() error {
return ws.Serve(wsln)
}, func(err error) {
_ = ws.Shutdown()
})
}
}
{
ln, err := sshdDialListener.Listen()
if err != nil {
return err
}
sshd := sshd{
SessionRepo: sessRepo,
HostSigners: s.HostSigners, // TODO: use different host keys
NodeAddr: s.NodeAddr,
SessionDialListener: sessionDialListener,
Logger: s.Logger.WithField("com", "sshd"),
}
g.Add(func() error {
return sshd.Serve(ln)
}, func(err error) {
_ = sshd.Shutdown()
})
}
return g.Run()
}
type connDialer interface {
Dial(id *api.Identifier) (net.Conn, error)
}
type sshProxyDialer struct {
sshProxyAddr string
Logger log.FieldLogger
}
func (d sshProxyDialer) Dial(id *api.Identifier) (net.Conn, error) {
// If it's a host request, dial to SSHProxy in the same node.
// Otherwise, dial to the specified SSHProxy.
if id.Type == api.Identifier_HOST {
d.Logger.WithFields(log.Fields{"host": id.Id, "sshproxy-addr": d.sshProxyAddr}).Info("dialing sshproxy sshd")
return net.DialTimeout("tcp", d.sshProxyAddr, tcpDialTimeout)
}
d.Logger.WithFields(log.Fields{"session": id.Id, "sshproxy-addr": d.sshProxyAddr, "addr": id.NodeAddr}).Info("dialing sshproxy session")
return net.DialTimeout("tcp", id.NodeAddr, tcpDialTimeout)
}
type tcpConnDialer struct {
}
func (d tcpConnDialer) Dial(id *api.Identifier) (net.Conn, error) {
return net.DialTimeout("tcp", id.NodeAddr, tcpDialTimeout)
}
type wsConnDialer struct {
}
func (d wsConnDialer) Dial(id *api.Identifier) (net.Conn, error) {
u, err := url.Parse("ws://" + id.NodeAddr)
if err != nil {
return nil, err
}
encodedNodeAddr := base64.StdEncoding.EncodeToString([]byte(id.NodeAddr))
u.User = url.UserPassword(id.Id, encodedNodeAddr)
return ws.NewWSConn(u, true)
}
type sidewayConnDialer struct {
NodeAddr string
SSHDDialListener SSHDDialListener
SessionDialListener SessionDialListener
NeighbourDialer connDialer
Logger log.FieldLogger
}
func (cd sidewayConnDialer) Dial(id *api.Identifier) (net.Conn, error) {
if id.Type == api.Identifier_HOST {
cd.Logger.WithFields(log.Fields{"host": id.Id, "node": cd.NodeAddr}).Info("dialing sshd")
return cd.SSHDDialListener.Dial()
} else {
host, port, ee := net.SplitHostPort(id.NodeAddr)
if ee != nil {
return nil, fmt.Errorf("host address %s is malformed: %w", id.NodeAddr, ee)
}
addr := net.JoinHostPort(host, port)
// if current node is matching, dial to session.
// Otherwise, dial to neighbour node
if cd.NodeAddr == addr {
cd.Logger.WithFields(log.Fields{"session": id.Id, "node": cd.NodeAddr, "addr": addr}).Info("dialing session")
return cd.SessionDialListener.Dial(id.Id)
}
cd.Logger.WithFields(log.Fields{"session": id.Id, "node": cd.NodeAddr, "addr": addr}).Info("dialing neighbour")
return cd.NeighbourDialer.Dial(id)
}
}
|
[
"\"PRIVATE_KEY\""
] |
[] |
[
"PRIVATE_KEY"
] |
[]
|
["PRIVATE_KEY"]
|
go
| 1 | 0 | |
examples/process.py
|
from cloudbutton.multiprocessing import Process
def f(name):
import os
print(os.environ)
print('hello', name)
if __name__ == '__main__':
p = Process(target=f, args=('bob',))
p.start()
p.join()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
simulation_assets/thesis_gazebo/launch/launch_simulation.launch.py
|
import os
from ament_index_python.packages import get_package_share_directory
from launch import LaunchDescription
from launch.actions import IncludeLaunchDescription, DeclareLaunchArgument, OpaqueFunction, ExecuteProcess
from launch.substitutions import LaunchConfiguration
from launch.launch_description_sources import PythonLaunchDescriptionSource
from launch_ros.actions import Node
from launch.conditions import IfCondition
def launch_setup(context, *args, **kwargs):
# Define input variables
use_sim_time = LaunchConfiguration('use_sim_time')
turtlebot3_model = LaunchConfiguration('turtlebot3_model').perform(context)
world_name = LaunchConfiguration('world').perform(context)
world = os.path.join(get_package_share_directory('thesis_gazebo'),
'worlds', world_name, turtlebot3_model, turtlebot3_model + '.model')
# Add the turtlebot3 model
models_dir = os.path.join(get_package_share_directory('thesis_gazebo'), 'models')
os.environ["TURTLEBOT3_MODEL"] = turtlebot3_model
env1 = os.path.join(models_dir, 'factory', 'models') + ":"
os.environ["GAZEBO_MODEL_PATH"] = models_dir + ":" + env1 + "$GAZEBO_MODEL_PATH"
print(os.getenv("GAZEBO_MODEL_PATH"))
# Robot state publisher
urdf_file_name = 'turtlebot3_' + turtlebot3_model + '.urdf'
urdf = os.path.join(
get_package_share_directory('thesis_gazebo'),'models','turtlebot3_' + turtlebot3_model,
'urdf',
urdf_file_name)
state_publisher = Node(
package='robot_state_publisher',
executable='robot_state_publisher',
name='robot_state_publisher',
output='screen',
parameters=[{'use_sim_time': use_sim_time}],
arguments=[urdf]),
launch_file_dir = os.path.join(get_package_share_directory('thesis_gazebo'), 'launch')
pkg_gazebo_ros = get_package_share_directory('gazebo_ros')
# Add the gazebo launch file
gazebo_launch = IncludeLaunchDescription(
PythonLaunchDescriptionSource(
os.path.join(pkg_gazebo_ros, 'launch', 'gzserver.launch.py')
),
launch_arguments={'world': world}.items(),
)
# Add the gzclient execution file
gzclient_launch = ExecuteProcess(
cmd=['gzclient'],
output='screen',
condition=IfCondition(LaunchConfiguration('gui')))
# Add the robot state publisher
robot_state_pub_launch = IncludeLaunchDescription(
PythonLaunchDescriptionSource([launch_file_dir, '/robot_state_publisher.launch.py']),
launch_arguments={'use_sim_time': use_sim_time}.items(),
)
return [gazebo_launch, gzclient_launch, robot_state_pub_launch]
#return [robot_state_pub_launch]
def generate_launch_description():
return LaunchDescription([
DeclareLaunchArgument('use_sim_time', default_value='true'),
DeclareLaunchArgument('gui', default_value='true'),
DeclareLaunchArgument('turtlebot3_model', default_value='burger'),
DeclareLaunchArgument('world', default_value='new_house'),
OpaqueFunction(function = launch_setup)
])
|
[] |
[] |
[
"GAZEBO_MODEL_PATH",
"TURTLEBOT3_MODEL"
] |
[]
|
["GAZEBO_MODEL_PATH", "TURTLEBOT3_MODEL"]
|
python
| 2 | 0 | |
jh_server/settings.py
|
"""
Django settings for jh_server project.
Generated by 'django-admin startproject' using Django 3.2.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
import os
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = "django-insecure-zb!f9@0$9w4v$2f0ngwur@=_9-%s#&7-$i4j98lfzz3i&&o6qo"
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ["jidou-hikki-demo.herokuapp.com"]
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"jh_server.apps.jidou_hikki",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "jh_server.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "jh_server.wsgi.application"
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": BASE_DIR / "db.sqlite3",
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = "/static/"
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = "django.db.models.BigAutoField"
AUTH_USER_MODEL = "jidou_hikki.JidouHikkiUser"
TOKENIZER_CLASS = "jh_server.apps.jidou_hikki.tokenizer.sudachi.SudachiTokenizer"
DEMO_ONLY = os.getenv("DEMO_ONLY", "false")
DEMO_ONLY = DEMO_ONLY.lower() == "true"
|
[] |
[] |
[
"DEMO_ONLY"
] |
[]
|
["DEMO_ONLY"]
|
python
| 1 | 0 | |
api/api.py
|
import os
import time
from dotenv import load_dotenv
import json
def run():
# Import enviromental variables
load_dotenv(dotenv_path='api/variables.env')
# Init database
from database import Database
database = Database(os.getenv('DATABASE_NAME'))
# Create default access
error, results = database.find(os.getenv('CLIENTS_COLLECTION'),'')
if results and len(list(results)) == 0:
default_client = json.load(open(os.getenv('DEFAULT_TOKEN')))
error = database.insert_one(os.getenv('CLIENTS_COLLECTION'), default_client)
if error:
print('Default client not created!')
return
print('Default client created!')
# Create necessary folders
for dir in [os.getenv('DATASETS_PATH'), os.getenv('INPUT_PATH'), os.getenv('OUTPUT_PATH'), os.getenv('ZIP_PATH'), os.getenv('STORAGE_PATH')]:
if not os.path.exists(dir):
os.makedirs(dir)
# Start server
from server import server
server(database)
if __name__ == "__main__":
run()
|
[] |
[] |
[
"DATABASE_NAME",
"DEFAULT_TOKEN",
"INPUT_PATH",
"DATASETS_PATH",
"STORAGE_PATH",
"ZIP_PATH",
"OUTPUT_PATH",
"CLIENTS_COLLECTION"
] |
[]
|
["DATABASE_NAME", "DEFAULT_TOKEN", "INPUT_PATH", "DATASETS_PATH", "STORAGE_PATH", "ZIP_PATH", "OUTPUT_PATH", "CLIENTS_COLLECTION"]
|
python
| 8 | 0 | |
src/main/java/cn/myafx/cache/CacheKey.java
|
package cn.myafx.cache;
import java.io.File;
import java.io.InputStream;
import java.util.ArrayList;
import java.util.List;
import javax.xml.parsers.*;
import org.w3c.dom.*;
/**
* 缓存key配置
*/
public class CacheKey implements ICacheKey {
private ArrayList<CacheKeyConfig> list;
/**
* env:xmlCacheKeyFile or src/main/resources/cache-key.xml
* @throws Exception
*/
public CacheKey() throws Exception {
String xmlFile = System.getenv("xmlCacheKeyFile");
if(xmlFile == null || xmlFile.isEmpty()){
xmlFile = "src/main/resources/cache-key.xml";
}
this.load(xmlFile);
}
/**
* 初始化
* @param xmlFile
*/
public CacheKey(String xmlFile) throws Exception {
if (xmlFile == null || xmlFile.isEmpty()) throw new Exception("cache-key.xml is not found!");
String path = xmlFile;
if(xmlFile.startsWith("classpath:")){
path = "src/main/resources/" + xmlFile.substring("classpath:".length());
}
else if(xmlFile == "env:xmlCacheKeyFile"){
path = System.getenv("xmlCacheKeyFile");
}
this.load(path);
}
/**
* CacheKey
* @param stream cache-key.xml
* @throws Exception
*/
public CacheKey(InputStream stream) throws Exception {
this.load(stream);
}
private static List<Integer> getDbList(String val) {
ArrayList<Integer> list = null;
if (val!= null && !val.isEmpty()) {
list = new ArrayList<>();
String[] arr = val.split(",");
for (int i=0; i< arr.length; i++) {
String ss = arr[i].trim();
if (ss != null && !ss.isEmpty()) {
if (ss.contains("-")) {
String[] ssarr = ss.split("-");
if (ssarr.length == 2) {
String bs = ssarr[0].trim();
String es = ssarr[1].trim();
Integer bv = 0;
Integer ev = 0;
try{
bv = Integer.parseInt(bs);
ev = Integer.parseInt(es);
if (bv <= ev) {
while (bv < ev)
{
list.add(bv++);
}
list.add(ev);
}
}catch(Exception ex){}
}
}
else
{
try{
Integer v = Integer.parseInt(ss);
list.add(v);
}catch(Exception ex){}
}
}
}
list.trimToSize();
}
return list;
}
private static Integer parseExpire(String str){
Integer expire = null;
if (str != null && !str.isEmpty()) {
var arr = str.split(":");
for(var j = 0; j < arr.length / 2; j++){
var v = arr[arr.length-j-1];
arr[arr.length-j-1] = arr[j];
arr[j] = v;
}
try{
// 秒
Integer v = Integer.parseInt(arr[0]);
// 分钟
if(arr.length > 1) v += Integer.parseInt(arr[1]) * 60;
// 小时
if(arr.length > 2) v += Integer.parseInt(arr[2]) * 60 * 60;
// 天
if(arr.length > 3) v += Integer.parseInt(arr[3]) * 60 * 60 * 24;
expire = v;
}
catch(Exception ex) { }
}
return expire;
}
private void load(Document doc) throws Exception{
var rootElement = doc.getDocumentElement();
var nodes = rootElement.getChildNodes();
this.list = new ArrayList<>();
for(int i=0; i< nodes.getLength(); i++){
Node n = nodes.item(i);
if(!(n instanceof Element node)) continue;
List<Integer> node_db = getDbList(node.getAttribute("db"));
if(node_db == null) node_db = new ArrayList<>(0);
Integer node_expire = parseExpire(node.getAttribute("expire"));
NodeList child = node.getChildNodes();
for(int j=0; j<child.getLength(); j++){
Node in = child.item(j);
if(!(in instanceof Element item)) continue;
var key = item.getAttribute("key");
var db = getDbList(item.getAttribute("db"));
if(db == null) db = node_db;
var expire = parseExpire(item.getAttribute("expire"));
if(expire == null) expire = node_expire;
this.list.add(new CacheKeyConfig(node.getNodeName(), item.getNodeName(), key, expire, db));
}
}
this.list.trimToSize();
}
private void load(String xmlFile) throws Exception {
File f = new File(xmlFile);
if (!f.exists() || !f.isFile()) throw new Exception("xmlFile(" + xmlFile + ") not found!");
DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance();
DocumentBuilder builder = factory.newDocumentBuilder();
Document doc = builder.parse(f);
this.load(doc);
}
private void load(InputStream stream) throws Exception {
if (stream == null) throw new Exception("stream is null!");
DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance();
DocumentBuilder builder = factory.newDocumentBuilder();
Document doc = builder.parse(stream);
this.load(doc);
}
/**
* get
* @param node db节点
* @param item 节点名称
* @return
*/
@Override
public CacheKeyConfig get(String node, String item) {
CacheKeyConfig m = null;
for (var q : this.list) {
if(q.Node == node && q.Item == item){
m = q;
break;
}
}
return m;
}
/**
* 获取key
* @param node 节点
* @param item 名称
* @return key
*/
@Override
public String getKey(String node, String item){
var m = this.get(node, item);
return m != null ? m.Key : null;
}
/**
* 获取过期时间, 秒
* @param node 节点
* @param item 名称
* @return 过期时间, 秒
*/
@Override
public Integer getExpire(String node, String item){
var m = this.get(node, item);
return m != null ? m.Expire : null;
}
/**
* 获取db
* @param node 节点
* @param item 名称
* @return db list
*/
@Override
public List<Integer> getDb(String node, String item){
var m = this.get(node, item);
return m != null ? m.Db : null;
}
}
|
[
"\"xmlCacheKeyFile\"",
"\"xmlCacheKeyFile\""
] |
[] |
[
"xmlCacheKeyFile"
] |
[]
|
["xmlCacheKeyFile"]
|
java
| 1 | 0 | |
platform-tools/systrace/catapult/devil/devil/android/perf/perf_control_devicetest.py
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# pylint: disable=W0212
import os
import sys
import unittest
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..'))
from devil.android import device_test_case
from devil.android import device_utils
from devil.android.perf import perf_control
class TestPerfControl(device_test_case.DeviceTestCase):
def setUp(self):
super(TestPerfControl, self).setUp()
if not os.getenv('BUILDTYPE'):
os.environ['BUILDTYPE'] = 'Debug'
self._device = device_utils.DeviceUtils(self.serial)
def testHighPerfMode(self):
perf = perf_control.PerfControl(self._device)
try:
perf.SetPerfProfilingMode()
cpu_info = perf.GetCpuInfo()
self.assertEquals(len(perf._cpu_files), len(cpu_info))
for _, online, governor in cpu_info:
self.assertTrue(online)
self.assertEquals('performance', governor)
finally:
perf.SetDefaultPerfMode()
if __name__ == '__main__':
unittest.main()
|
[] |
[] |
[
"BUILDTYPE"
] |
[]
|
["BUILDTYPE"]
|
python
| 1 | 0 | |
common/types.go
|
/*
Copyright 2016-present Wenhui Shen <www.webx.top>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package common
import (
"bytes"
"errors"
"html/template"
"io/fs"
"log"
"os"
"path/filepath"
"reflect"
"strings"
"sync"
"github.com/coscms/forms/config"
"github.com/webx-top/tagfast"
"golang.org/x/sync/singleflight"
)
// Available form themes
const (
BASE = "base"
BOOTSTRAP = "bootstrap3"
)
var (
tmplDirs = map[string]string{
BASE: "templates",
BOOTSTRAP: "templates",
}
LabelFn = func(s string) string {
return s
}
//private
cachedTemplate = make(map[string]*template.Template)
cachedConfig = make(map[string]*config.Config)
lockTemplate = new(sync.RWMutex)
lockConfig = new(sync.RWMutex)
lockTmplDir = new(sync.RWMutex)
sg singleflight.Group
)
const (
PACKAGE_NAME = "github.com/coscms/forms"
)
// Input field types
const (
BUTTON = "button"
CHECKBOX = "checkbox"
COLOR = "color"
DATE = "date"
DATETIME = "datetime"
DATETIME_LOCAL = "datetime-local"
EMAIL = "email"
FILE = "file"
HIDDEN = "hidden"
IMAGE = "image"
MONTH = "month"
NUMBER = "number"
PASSWORD = "password"
RADIO = "radio"
RANGE = "range"
RESET = "reset"
SEARCH = "search"
SUBMIT = "submit"
TEL = "tel"
TEXT = "text"
TIME = "time"
URL = "url"
WEEK = "week"
TEXTAREA = "textarea"
SELECT = "select"
STATIC = "static"
)
func SetTmplDir(theme, tmplDir string) {
lockTmplDir.Lock()
tmplDirs[theme] = tmplDir
lockTmplDir.Unlock()
}
func TmplDir(theme string) (tmplDir string) {
tmplDir, _ = tmplDirs[theme]
return
}
// LookupPath creates the complete path of the desired widget template
func LookupPath(widget string) string {
if !FileSystem.IsEmpty() {
fp, err := FileSystem.Open(widget)
if err != nil {
if !errors.Is(err, fs.ErrNotExist) {
log.Println(err.Error())
return widget
}
} else {
defer fp.Close()
fi, err := fp.Stat()
if err == nil && !fi.IsDir() {
return widget
}
}
}
if !TmplExists(widget) {
return filepath.Join(os.Getenv("GOPATH"), "src", PACKAGE_NAME, `defaults`, widget)
}
return widget
}
func TmplExists(tmpl string) bool {
_, err := os.Stat(tmpl)
return !os.IsNotExist(err)
}
func GetOrSetCachedTemplate(cachedKey string, generator func() (*template.Template, error)) (c *template.Template, err error) {
var ok bool
c, ok = cachedTemplate[cachedKey]
if ok {
return c, nil
}
getValue, getErr, _ := sg.Do(cachedKey, func() (interface{}, error) {
c, err = generator()
if err != nil {
return nil, err
}
lockTemplate.Lock()
cachedTemplate[cachedKey] = c
lockTemplate.Unlock()
return c, nil
})
if getErr != nil {
return nil, getErr
}
return getValue.(*template.Template), nil
}
func ClearCachedTemplate() {
lockTemplate.Lock()
cachedTemplate = make(map[string]*template.Template)
lockTemplate.Unlock()
}
func DelCachedTemplate(key string) bool {
lockTemplate.Lock()
defer lockTemplate.Unlock()
if _, ok := cachedTemplate[key]; ok {
delete(cachedTemplate, key)
return true
}
return false
}
func GetOrSetCachedConfig(cachedKey string, generator func() (*config.Config, error)) (c *config.Config, err error) {
var ok bool
c, ok = cachedConfig[cachedKey]
if ok {
return c, nil
}
getValue, getErr, _ := sg.Do(cachedKey, func() (interface{}, error) {
c, err = generator()
if err != nil {
return nil, err
}
lockConfig.Lock()
cachedConfig[cachedKey] = c
lockConfig.Unlock()
return c, nil
})
if getErr != nil {
return nil, getErr
}
return getValue.(*config.Config), nil
}
func ClearCachedConfig() {
lockConfig.Lock()
cachedConfig = make(map[string]*config.Config)
lockConfig.Unlock()
}
func DelCachedConfig(key string) bool {
lockConfig.Lock()
defer lockConfig.Unlock()
if _, ok := cachedConfig[key]; ok {
delete(cachedConfig, key)
return true
}
return false
}
func ParseTmpl(data interface{},
fn_tpl template.FuncMap,
fn_fixTpl func(tpls ...string) ([]string, error),
tpls ...string) string {
buf := bytes.NewBuffer(nil)
tpf := strings.Join(tpls, `|`)
tpl, err := GetOrSetCachedTemplate(tpf, func() (*template.Template, error) {
c := template.New(filepath.Base(tpls[0]))
if fn_tpl != nil {
c.Funcs(fn_tpl)
}
var err error
if fn_fixTpl != nil {
tpls, err = fn_fixTpl(tpls...)
if err != nil {
return nil, err
}
}
if !FileSystem.IsEmpty() {
return c.ParseFS(FileSystem, tpls...)
}
return c.ParseFiles(tpls...)
})
if err != nil {
return err.Error()
}
err = tpl.Execute(buf, data)
if err != nil {
return err.Error()
}
return buf.String()
}
func TagVal(t reflect.Type, fieldNo int, tagName string) string {
return tagfast.Value(t, t.Field(fieldNo), tagName)
}
func Tag(t reflect.Type, f reflect.StructField, tagName string) (value string, tf tagfast.Faster) {
return tagfast.Tag(t, f, tagName)
}
|
[
"\"GOPATH\""
] |
[] |
[
"GOPATH"
] |
[]
|
["GOPATH"]
|
go
| 1 | 0 | |
web/web.go
|
// Copyright 2013 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package web
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
stdlog "log"
"math"
"net"
"net/http"
"net/http/pprof"
"net/url"
"os"
"path"
"path/filepath"
"regexp"
"runtime"
"sort"
"strings"
"sync"
template_text "text/template"
"time"
"github.com/alecthomas/units"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
conntrack "github.com/mwitkow/go-conntrack"
"github.com/opentracing-contrib/go-stdlib/nethttp"
opentracing "github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
io_prometheus_client "github.com/prometheus/client_model/go"
"github.com/prometheus/common/model"
"github.com/prometheus/common/route"
"github.com/prometheus/common/server"
"github.com/soheilhy/cmux"
"go.uber.org/atomic"
"golang.org/x/net/netutil"
"google.golang.org/grpc"
"github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/notifier"
"github.com/prometheus/prometheus/promql"
"github.com/prometheus/prometheus/rules"
"github.com/prometheus/prometheus/scrape"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/template"
"github.com/prometheus/prometheus/tsdb"
"github.com/prometheus/prometheus/tsdb/index"
"github.com/prometheus/prometheus/util/httputil"
api_v1 "github.com/prometheus/prometheus/web/api/v1"
api_v2 "github.com/prometheus/prometheus/web/api/v2"
"github.com/prometheus/prometheus/web/ui"
)
// Paths that are handled by the React / Reach router that should all be served the main React app's index.html.
var reactRouterPaths = []string{
"/",
"/alerts",
"/config",
"/flags",
"/graph",
"/rules",
"/service-discovery",
"/status",
"/targets",
"/tsdb-status",
"/version",
}
// withStackTrace logs the stack trace in case the request panics. The function
// will re-raise the error which will then be handled by the net/http package.
// It is needed because the go-kit log package doesn't manage properly the
// panics from net/http (see https://github.com/go-kit/kit/issues/233).
func withStackTracer(h http.Handler, l log.Logger) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
defer func() {
if err := recover(); err != nil {
const size = 64 << 10
buf := make([]byte, size)
buf = buf[:runtime.Stack(buf, false)]
level.Error(l).Log("msg", "panic while serving request", "client", r.RemoteAddr, "url", r.URL, "err", err, "stack", buf)
panic(err)
}
}()
h.ServeHTTP(w, r)
})
}
type metrics struct {
requestCounter *prometheus.CounterVec
requestDuration *prometheus.HistogramVec
responseSize *prometheus.HistogramVec
}
func newMetrics(r prometheus.Registerer) *metrics {
m := &metrics{
requestCounter: prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "prometheus_http_requests_total",
Help: "Counter of HTTP requests.",
},
[]string{"handler", "code"},
),
requestDuration: prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "prometheus_http_request_duration_seconds",
Help: "Histogram of latencies for HTTP requests.",
Buckets: []float64{.1, .2, .4, 1, 3, 8, 20, 60, 120},
},
[]string{"handler"},
),
responseSize: prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "prometheus_http_response_size_bytes",
Help: "Histogram of response size for HTTP requests.",
Buckets: prometheus.ExponentialBuckets(100, 10, 8),
},
[]string{"handler"},
),
}
if r != nil {
r.MustRegister(m.requestCounter, m.requestDuration, m.responseSize)
registerFederationMetrics(r)
}
return m
}
func (m *metrics) instrumentHandlerWithPrefix(prefix string) func(handlerName string, handler http.HandlerFunc) http.HandlerFunc {
return func(handlerName string, handler http.HandlerFunc) http.HandlerFunc {
return m.instrumentHandler(prefix+handlerName, handler)
}
}
func (m *metrics) instrumentHandler(handlerName string, handler http.HandlerFunc) http.HandlerFunc {
return promhttp.InstrumentHandlerCounter(
m.requestCounter.MustCurryWith(prometheus.Labels{"handler": handlerName}),
promhttp.InstrumentHandlerDuration(
m.requestDuration.MustCurryWith(prometheus.Labels{"handler": handlerName}),
promhttp.InstrumentHandlerResponseSize(
m.responseSize.MustCurryWith(prometheus.Labels{"handler": handlerName}),
handler,
),
),
)
}
// PrometheusVersion contains build information about Prometheus.
type PrometheusVersion = api_v1.PrometheusVersion
type LocalStorage interface {
storage.Storage
api_v1.TSDBAdminStats
}
// Handler serves various HTTP endpoints of the Prometheus server
type Handler struct {
logger log.Logger
gatherer prometheus.Gatherer
metrics *metrics
scrapeManager *scrape.Manager
ruleManager *rules.Manager
queryEngine *promql.Engine
lookbackDelta time.Duration
context context.Context
storage storage.Storage
localStorage LocalStorage
notifier *notifier.Manager
apiV1 *api_v1.API
router *route.Router
quitCh chan struct{}
reloadCh chan chan error
options *Options
config *config.Config
versionInfo *PrometheusVersion
birth time.Time
cwd string
flagsMap map[string]string
mtx sync.RWMutex
now func() model.Time
ready atomic.Uint32 // ready is uint32 rather than boolean to be able to use atomic functions.
}
// ApplyConfig updates the config field of the Handler struct
func (h *Handler) ApplyConfig(conf *config.Config) error {
h.mtx.Lock()
defer h.mtx.Unlock()
h.config = conf
return nil
}
// Options for the web Handler.
type Options struct {
Context context.Context
TSDBRetentionDuration model.Duration
TSDBDir string
TSDBMaxBytes units.Base2Bytes
LocalStorage LocalStorage
Storage storage.Storage
QueryEngine *promql.Engine
LookbackDelta time.Duration
ScrapeManager *scrape.Manager
RuleManager *rules.Manager
Notifier *notifier.Manager
Version *PrometheusVersion
Flags map[string]string
ListenAddress string
CORSOrigin *regexp.Regexp
ReadTimeout time.Duration
MaxConnections int
ExternalURL *url.URL
RoutePrefix string
UseLocalAssets bool
UserAssetsPath string
ConsoleTemplatesPath string
ConsoleLibrariesPath string
EnableLifecycle bool
EnableAdminAPI bool
PageTitle string
RemoteReadSampleLimit int
RemoteReadConcurrencyLimit int
RemoteReadBytesInFrame int
Gatherer prometheus.Gatherer
Registerer prometheus.Registerer
}
// New initializes a new web Handler.
func New(logger log.Logger, o *Options) *Handler {
if logger == nil {
logger = log.NewNopLogger()
}
m := newMetrics(o.Registerer)
router := route.New().
WithInstrumentation(m.instrumentHandler).
WithInstrumentation(setPathWithPrefix(""))
cwd, err := os.Getwd()
if err != nil {
cwd = "<error retrieving current working directory>"
}
h := &Handler{
logger: logger,
gatherer: o.Gatherer,
metrics: m,
router: router,
quitCh: make(chan struct{}),
reloadCh: make(chan chan error),
options: o,
versionInfo: o.Version,
birth: time.Now().UTC(),
cwd: cwd,
flagsMap: o.Flags,
context: o.Context,
scrapeManager: o.ScrapeManager,
ruleManager: o.RuleManager,
queryEngine: o.QueryEngine,
lookbackDelta: o.LookbackDelta,
storage: o.Storage,
localStorage: o.LocalStorage,
notifier: o.Notifier,
now: model.Now,
}
h.ready.Store(0)
factoryTr := func(_ context.Context) api_v1.TargetRetriever { return h.scrapeManager }
factoryAr := func(_ context.Context) api_v1.AlertmanagerRetriever { return h.notifier }
FactoryRr := func(_ context.Context) api_v1.RulesRetriever { return h.ruleManager }
h.apiV1 = api_v1.NewAPI(h.queryEngine, h.storage, factoryTr, factoryAr,
func() config.Config {
h.mtx.RLock()
defer h.mtx.RUnlock()
return *h.config
},
o.Flags,
api_v1.GlobalURLOptions{
ListenAddress: o.ListenAddress,
Host: o.ExternalURL.Host,
Scheme: o.ExternalURL.Scheme,
},
h.testReady,
h.options.LocalStorage,
h.options.TSDBDir,
h.options.EnableAdminAPI,
logger,
FactoryRr,
h.options.RemoteReadSampleLimit,
h.options.RemoteReadConcurrencyLimit,
h.options.RemoteReadBytesInFrame,
h.options.CORSOrigin,
h.runtimeInfo,
h.versionInfo,
)
if o.RoutePrefix != "/" {
// If the prefix is missing for the root path, prepend it.
router.Get("/", func(w http.ResponseWriter, r *http.Request) {
http.Redirect(w, r, o.RoutePrefix, http.StatusFound)
})
router = router.WithPrefix(o.RoutePrefix)
}
readyf := h.testReady
router.Get("/", func(w http.ResponseWriter, r *http.Request) {
http.Redirect(w, r, path.Join(o.ExternalURL.Path, "/graph"), http.StatusFound)
})
router.Get("/alerts", readyf(h.alerts))
router.Get("/graph", readyf(h.graph))
router.Get("/status", readyf(h.status))
router.Get("/flags", readyf(h.flags))
router.Get("/config", readyf(h.serveConfig))
router.Get("/rules", readyf(h.rules))
router.Get("/targets", readyf(h.targets))
router.Get("/version", readyf(h.version))
router.Get("/service-discovery", readyf(h.serviceDiscovery))
router.Get("/metrics", promhttp.Handler().ServeHTTP)
router.Get("/federate", readyf(httputil.CompressionHandler{
Handler: http.HandlerFunc(h.federation),
}.ServeHTTP))
router.Get("/consoles/*filepath", readyf(h.consoles))
router.Get("/static/*filepath", func(w http.ResponseWriter, r *http.Request) {
r.URL.Path = path.Join("/static", route.Param(r.Context(), "filepath"))
fs := server.StaticFileServer(ui.Assets)
fs.ServeHTTP(w, r)
})
// Make sure that "<path-prefix>/new" is redirected to "<path-prefix>/new/" and
// not just the naked "/new/", which would be the default behavior of the router
// with the "RedirectTrailingSlash" option (https://godoc.org/github.com/julienschmidt/httprouter#Router.RedirectTrailingSlash),
// and which breaks users with a --web.route-prefix that deviates from the path derived
// from the external URL.
router.Get("/new", func(w http.ResponseWriter, r *http.Request) {
http.Redirect(w, r, path.Join(o.ExternalURL.Path, "new")+"/", http.StatusFound)
})
router.Get("/new/*filepath", func(w http.ResponseWriter, r *http.Request) {
p := route.Param(r.Context(), "filepath")
// For paths that the React/Reach router handles, we want to serve the
// index.html, but with replaced path prefix placeholder.
for _, rp := range reactRouterPaths {
if p != rp {
continue
}
f, err := ui.Assets.Open("/static/react/index.html")
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
fmt.Fprintf(w, "Error opening React index.html: %v", err)
return
}
idx, err := ioutil.ReadAll(f)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
fmt.Fprintf(w, "Error reading React index.html: %v", err)
return
}
replacedIdx := bytes.ReplaceAll(idx, []byte("PATH_PREFIX_PLACEHOLDER"), []byte(o.ExternalURL.Path))
replacedIdx = bytes.ReplaceAll(replacedIdx, []byte("CONSOLES_LINK_PLACEHOLDER"), []byte(h.consolesPath()))
replacedIdx = bytes.ReplaceAll(replacedIdx, []byte("TITLE_PLACEHOLDER"), []byte(h.options.PageTitle))
w.Write(replacedIdx)
return
}
// For all other paths, serve auxiliary assets.
r.URL.Path = path.Join("/static/react/", p)
fs := server.StaticFileServer(ui.Assets)
fs.ServeHTTP(w, r)
})
if o.UserAssetsPath != "" {
router.Get("/user/*filepath", route.FileServe(o.UserAssetsPath))
}
if o.EnableLifecycle {
router.Post("/-/quit", h.quit)
router.Put("/-/quit", h.quit)
router.Post("/-/reload", h.reload)
router.Put("/-/reload", h.reload)
} else {
forbiddenAPINotEnabled := func(w http.ResponseWriter, _ *http.Request) {
w.WriteHeader(http.StatusForbidden)
w.Write([]byte("Lifecycle API is not enabled."))
}
router.Post("/-/quit", forbiddenAPINotEnabled)
router.Put("/-/quit", forbiddenAPINotEnabled)
router.Post("/-/reload", forbiddenAPINotEnabled)
router.Put("/-/reload", forbiddenAPINotEnabled)
}
router.Get("/-/quit", func(w http.ResponseWriter, _ *http.Request) {
w.WriteHeader(http.StatusMethodNotAllowed)
w.Write([]byte("Only POST or PUT requests allowed"))
})
router.Get("/-/reload", func(w http.ResponseWriter, _ *http.Request) {
w.WriteHeader(http.StatusMethodNotAllowed)
w.Write([]byte("Only POST or PUT requests allowed"))
})
router.Get("/debug/*subpath", serveDebug)
router.Post("/debug/*subpath", serveDebug)
router.Get("/-/healthy", func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
fmt.Fprintf(w, "Prometheus is Healthy.\n")
})
router.Get("/-/ready", readyf(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
fmt.Fprintf(w, "Prometheus is Ready.\n")
}))
return h
}
func serveDebug(w http.ResponseWriter, req *http.Request) {
ctx := req.Context()
subpath := route.Param(ctx, "subpath")
if subpath == "/pprof" {
http.Redirect(w, req, req.URL.Path+"/", http.StatusMovedPermanently)
return
}
if !strings.HasPrefix(subpath, "/pprof/") {
http.NotFound(w, req)
return
}
subpath = strings.TrimPrefix(subpath, "/pprof/")
switch subpath {
case "cmdline":
pprof.Cmdline(w, req)
case "profile":
pprof.Profile(w, req)
case "symbol":
pprof.Symbol(w, req)
case "trace":
pprof.Trace(w, req)
default:
req.URL.Path = "/debug/pprof/" + subpath
pprof.Index(w, req)
}
}
// Ready sets Handler to be ready.
func (h *Handler) Ready() {
h.ready.Store(1)
}
// Verifies whether the server is ready or not.
func (h *Handler) isReady() bool {
return h.ready.Load() > 0
}
// Checks if server is ready, calls f if it is, returns 503 if it is not.
func (h *Handler) testReady(f http.HandlerFunc) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
if h.isReady() {
f(w, r)
} else {
w.WriteHeader(http.StatusServiceUnavailable)
fmt.Fprintf(w, "Service Unavailable")
}
}
}
// Checks if server is ready, calls f if it is, returns 503 if it is not.
func (h *Handler) testReadyHandler(f http.Handler) http.HandlerFunc {
return h.testReady(f.ServeHTTP)
}
// Quit returns the receive-only quit channel.
func (h *Handler) Quit() <-chan struct{} {
return h.quitCh
}
// Reload returns the receive-only channel that signals configuration reload requests.
func (h *Handler) Reload() <-chan chan error {
return h.reloadCh
}
// Run serves the HTTP endpoints.
func (h *Handler) Run(ctx context.Context) error {
level.Info(h.logger).Log("msg", "Start listening for connections", "address", h.options.ListenAddress)
listener, err := net.Listen("tcp", h.options.ListenAddress)
if err != nil {
return err
}
listener = netutil.LimitListener(listener, h.options.MaxConnections)
// Monitor incoming connections with conntrack.
listener = conntrack.NewListener(listener,
conntrack.TrackWithName("http"),
conntrack.TrackWithTracing())
var (
m = cmux.New(listener)
// See https://github.com/grpc/grpc-go/issues/2636 for why we need to use MatchWithWriters().
grpcl = m.MatchWithWriters(cmux.HTTP2MatchHeaderFieldSendSettings("content-type", "application/grpc"))
httpl = m.Match(cmux.HTTP1Fast())
grpcSrv = grpc.NewServer()
)
// Prevent open connections to block the shutdown of the handler.
m.SetReadTimeout(h.options.ReadTimeout)
av2 := api_v2.New(
h.options.LocalStorage,
h.options.TSDBDir,
h.options.EnableAdminAPI,
)
av2.RegisterGRPC(grpcSrv)
hh, err := av2.HTTPHandler(ctx, h.options.ListenAddress)
if err != nil {
return err
}
hhFunc := h.testReadyHandler(hh)
operationName := nethttp.OperationNameFunc(func(r *http.Request) string {
return fmt.Sprintf("%s %s", r.Method, r.URL.Path)
})
mux := http.NewServeMux()
mux.Handle("/", h.router)
apiPath := "/api"
if h.options.RoutePrefix != "/" {
apiPath = h.options.RoutePrefix + apiPath
level.Info(h.logger).Log("msg", "Router prefix", "prefix", h.options.RoutePrefix)
}
av1 := route.New().
WithInstrumentation(h.metrics.instrumentHandlerWithPrefix("/api/mysqlconfig")).
WithInstrumentation(setPathWithPrefix(apiPath + "/mysqlconfig"))
h.apiV1.Register(av1)
mux.Handle(apiPath+"/mysqlconfig/", http.StripPrefix(apiPath+"/mysqlconfig", av1))
mux.Handle(apiPath+"/", http.StripPrefix(apiPath,
http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
httputil.SetCORS(w, h.options.CORSOrigin, r)
hhFunc(w, r)
}),
))
errlog := stdlog.New(log.NewStdlibAdapter(level.Error(h.logger)), "", 0)
httpSrv := &http.Server{
Handler: withStackTracer(nethttp.Middleware(opentracing.GlobalTracer(), mux, operationName), h.logger),
ErrorLog: errlog,
ReadTimeout: h.options.ReadTimeout,
}
errCh := make(chan error)
go func() {
errCh <- httpSrv.Serve(httpl)
}()
go func() {
errCh <- grpcSrv.Serve(grpcl)
}()
go func() {
errCh <- m.Serve()
}()
select {
case e := <-errCh:
return e
case <-ctx.Done():
httpSrv.Shutdown(ctx)
stopGRPCSrv(grpcSrv)
return nil
}
}
// stopGRPCSrv stops a given GRPC server. An attempt to stop the server
// gracefully is made first. After 15s, the server to forced to stop.
func stopGRPCSrv(srv *grpc.Server) {
stop := make(chan struct{})
go func() {
srv.GracefulStop()
close(stop)
}()
select {
case <-time.After(15 * time.Second):
srv.Stop()
case <-stop:
}
}
func (h *Handler) alerts(w http.ResponseWriter, r *http.Request) {
var groups []*rules.Group
for _, group := range h.ruleManager.RuleGroups() {
if group.HasAlertingRules() {
groups = append(groups, group)
}
}
alertStatus := AlertStatus{
Groups: groups,
AlertStateToRowClass: map[rules.AlertState]string{
rules.StateInactive: "success",
rules.StatePending: "warning",
rules.StateFiring: "danger",
},
Counts: alertCounts(groups),
}
h.executeTemplate(w, "alerts.html", alertStatus)
}
func alertCounts(groups []*rules.Group) AlertByStateCount {
result := AlertByStateCount{}
for _, group := range groups {
for _, alert := range group.AlertingRules() {
switch alert.State() {
case rules.StateInactive:
result.Inactive++
case rules.StatePending:
result.Pending++
case rules.StateFiring:
result.Firing++
}
}
}
return result
}
func (h *Handler) consoles(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
name := route.Param(ctx, "filepath")
file, err := http.Dir(h.options.ConsoleTemplatesPath).Open(name)
if err != nil {
http.Error(w, err.Error(), http.StatusNotFound)
return
}
defer file.Close()
text, err := ioutil.ReadAll(file)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
ctx = httputil.ContextFromRequest(ctx, r)
// Provide URL parameters as a map for easy use. Advanced users may have need for
// parameters beyond the first, so provide RawParams.
rawParams, err := url.ParseQuery(r.URL.RawQuery)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
params := map[string]string{}
for k, v := range rawParams {
params[k] = v[0]
}
externalLabels := map[string]string{}
h.mtx.RLock()
els := h.config.GlobalConfig.ExternalLabels
h.mtx.RUnlock()
for _, el := range els {
externalLabels[el.Name] = el.Value
}
// Inject some convenience variables that are easier to remember for users
// who are not used to Go's templating system.
defs := []string{
"{{$rawParams := .RawParams }}",
"{{$params := .Params}}",
"{{$path := .Path}}",
"{{$externalLabels := .ExternalLabels}}",
}
data := struct {
RawParams url.Values
Params map[string]string
Path string
ExternalLabels map[string]string
}{
RawParams: rawParams,
Params: params,
Path: strings.TrimLeft(name, "/"),
ExternalLabels: externalLabels,
}
tmpl := template.NewTemplateExpander(
ctx,
strings.Join(append(defs, string(text)), ""),
"__console_"+name,
data,
h.now(),
template.QueryFunc(rules.EngineQueryFunc(h.queryEngine, h.storage)),
h.options.ExternalURL,
)
filenames, err := filepath.Glob(h.options.ConsoleLibrariesPath + "/*.lib")
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
result, err := tmpl.ExpandHTML(filenames)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
io.WriteString(w, result)
}
func (h *Handler) graph(w http.ResponseWriter, r *http.Request) {
h.executeTemplate(w, "graph.html", nil)
}
func (h *Handler) status(w http.ResponseWriter, r *http.Request) {
status := struct {
Birth time.Time
CWD string
Version *PrometheusVersion
Alertmanagers []*url.URL
GoroutineCount int
GOMAXPROCS int
GOGC string
GODEBUG string
CorruptionCount int64
ChunkCount int64
TimeSeriesCount int64
LastConfigTime time.Time
ReloadConfigSuccess bool
StorageRetention string
NumSeries uint64
MaxTime int64
MinTime int64
Stats *index.PostingsStats
Duration string
}{
Birth: h.birth,
CWD: h.cwd,
Version: h.versionInfo,
Alertmanagers: h.notifier.Alertmanagers(),
GoroutineCount: runtime.NumGoroutine(),
GOMAXPROCS: runtime.GOMAXPROCS(0),
GOGC: os.Getenv("GOGC"),
GODEBUG: os.Getenv("GODEBUG"),
}
if h.options.TSDBRetentionDuration != 0 {
status.StorageRetention = h.options.TSDBRetentionDuration.String()
}
if h.options.TSDBMaxBytes != 0 {
if status.StorageRetention != "" {
status.StorageRetention = status.StorageRetention + " or "
}
status.StorageRetention = status.StorageRetention + h.options.TSDBMaxBytes.String()
}
metrics, err := h.gatherer.Gather()
if err != nil {
http.Error(w, fmt.Sprintf("error gathering runtime status: %s", err), http.StatusInternalServerError)
return
}
for _, mF := range metrics {
switch *mF.Name {
case "prometheus_tsdb_head_chunks":
status.ChunkCount = int64(toFloat64(mF))
case "prometheus_tsdb_head_series":
status.TimeSeriesCount = int64(toFloat64(mF))
case "prometheus_tsdb_wal_corruptions_total":
status.CorruptionCount = int64(toFloat64(mF))
case "prometheus_config_last_reload_successful":
status.ReloadConfigSuccess = toFloat64(mF) != 0
case "prometheus_config_last_reload_success_timestamp_seconds":
status.LastConfigTime = time.Unix(int64(toFloat64(mF)), 0).UTC()
}
}
startTime := time.Now().UnixNano()
s, err := h.localStorage.Stats("__name__")
if err != nil {
if errors.Cause(err) == tsdb.ErrNotReady {
http.Error(w, tsdb.ErrNotReady.Error(), http.StatusServiceUnavailable)
return
}
http.Error(w, fmt.Sprintf("error gathering local storage statistics: %s", err), http.StatusInternalServerError)
return
}
status.Duration = fmt.Sprintf("%.3f", float64(time.Now().UnixNano()-startTime)/float64(1e9))
status.Stats = s.IndexPostingStats
status.NumSeries = s.NumSeries
status.MaxTime = s.MaxTime
status.MinTime = s.MinTime
h.executeTemplate(w, "status.html", status)
}
func (h *Handler) runtimeInfo() (api_v1.RuntimeInfo, error) {
status := api_v1.RuntimeInfo{
StartTime: h.birth,
CWD: h.cwd,
GoroutineCount: runtime.NumGoroutine(),
GOMAXPROCS: runtime.GOMAXPROCS(0),
GOGC: os.Getenv("GOGC"),
GODEBUG: os.Getenv("GODEBUG"),
}
if h.options.TSDBRetentionDuration != 0 {
status.StorageRetention = h.options.TSDBRetentionDuration.String()
}
if h.options.TSDBMaxBytes != 0 {
if status.StorageRetention != "" {
status.StorageRetention = status.StorageRetention + " or "
}
status.StorageRetention = status.StorageRetention + h.options.TSDBMaxBytes.String()
}
metrics, err := h.gatherer.Gather()
if err != nil {
return status, errors.Errorf("error gathering runtime status: %s", err)
}
for _, mF := range metrics {
switch *mF.Name {
case "prometheus_tsdb_head_chunks":
status.ChunkCount = int64(toFloat64(mF))
case "prometheus_tsdb_head_series":
status.TimeSeriesCount = int64(toFloat64(mF))
case "prometheus_tsdb_wal_corruptions_total":
status.CorruptionCount = int64(toFloat64(mF))
case "prometheus_config_last_reload_successful":
status.ReloadConfigSuccess = toFloat64(mF) != 0
case "prometheus_config_last_reload_success_timestamp_seconds":
status.LastConfigTime = time.Unix(int64(toFloat64(mF)), 0).UTC()
}
}
return status, nil
}
func toFloat64(f *io_prometheus_client.MetricFamily) float64 {
m := *f.Metric[0]
if m.Gauge != nil {
return m.Gauge.GetValue()
}
if m.Counter != nil {
return m.Counter.GetValue()
}
if m.Untyped != nil {
return m.Untyped.GetValue()
}
return math.NaN()
}
func (h *Handler) flags(w http.ResponseWriter, r *http.Request) {
h.executeTemplate(w, "flags.html", h.flagsMap)
}
func (h *Handler) serveConfig(w http.ResponseWriter, r *http.Request) {
h.mtx.RLock()
defer h.mtx.RUnlock()
h.executeTemplate(w, "config.html", h.config.String())
}
func (h *Handler) rules(w http.ResponseWriter, r *http.Request) {
h.executeTemplate(w, "rules.html", h.ruleManager)
}
func (h *Handler) serviceDiscovery(w http.ResponseWriter, r *http.Request) {
var index []string
targets := h.scrapeManager.TargetsAll()
for job := range targets {
index = append(index, job)
}
sort.Strings(index)
scrapeConfigData := struct {
Index []string
Targets map[string][]*scrape.Target
Active []int
Dropped []int
Total []int
}{
Index: index,
Targets: make(map[string][]*scrape.Target),
Active: make([]int, len(index)),
Dropped: make([]int, len(index)),
Total: make([]int, len(index)),
}
for i, job := range scrapeConfigData.Index {
scrapeConfigData.Targets[job] = make([]*scrape.Target, 0, len(targets[job]))
scrapeConfigData.Total[i] = len(targets[job])
for _, target := range targets[job] {
// Do not display more than 100 dropped targets per job to avoid
// returning too much data to the clients.
if target.Labels().Len() == 0 {
scrapeConfigData.Dropped[i]++
if scrapeConfigData.Dropped[i] > 100 {
continue
}
} else {
scrapeConfigData.Active[i]++
}
scrapeConfigData.Targets[job] = append(scrapeConfigData.Targets[job], target)
}
}
h.executeTemplate(w, "service-discovery.html", scrapeConfigData)
}
func (h *Handler) targets(w http.ResponseWriter, r *http.Request) {
tps := h.scrapeManager.TargetsActive()
for _, targets := range tps {
sort.Slice(targets, func(i, j int) bool {
iJobLabel := targets[i].Labels().Get(model.JobLabel)
jJobLabel := targets[j].Labels().Get(model.JobLabel)
if iJobLabel == jJobLabel {
return targets[i].Labels().Get(model.InstanceLabel) < targets[j].Labels().Get(model.InstanceLabel)
}
return iJobLabel < jJobLabel
})
}
h.executeTemplate(w, "targets.html", struct {
TargetPools map[string][]*scrape.Target
}{
TargetPools: tps,
})
}
func (h *Handler) version(w http.ResponseWriter, r *http.Request) {
dec := json.NewEncoder(w)
if err := dec.Encode(h.versionInfo); err != nil {
http.Error(w, fmt.Sprintf("error encoding JSON: %s", err), http.StatusInternalServerError)
}
}
func (h *Handler) quit(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, "Requesting termination... Goodbye!")
close(h.quitCh)
}
func (h *Handler) reload(w http.ResponseWriter, r *http.Request) {
rc := make(chan error)
h.reloadCh <- rc
if err := <-rc; err != nil {
http.Error(w, fmt.Sprintf("failed to reload config: %s", err), http.StatusInternalServerError)
}
}
func (h *Handler) consolesPath() string {
if _, err := os.Stat(h.options.ConsoleTemplatesPath + "/index.html"); !os.IsNotExist(err) {
return h.options.ExternalURL.Path + "/consoles/index.html"
}
if h.options.UserAssetsPath != "" {
if _, err := os.Stat(h.options.UserAssetsPath + "/index.html"); !os.IsNotExist(err) {
return h.options.ExternalURL.Path + "/user/index.html"
}
}
return ""
}
func tmplFuncs(consolesPath string, opts *Options) template_text.FuncMap {
return template_text.FuncMap{
"since": func(t time.Time) time.Duration {
return time.Since(t) / time.Millisecond * time.Millisecond
},
"unixToTime": func(i int64) time.Time {
t := time.Unix(i/int64(time.Microsecond), 0).UTC()
return t
},
"consolesPath": func() string { return consolesPath },
"pathPrefix": func() string { return opts.ExternalURL.Path },
"pageTitle": func() string { return opts.PageTitle },
"buildVersion": func() string { return opts.Version.Revision },
"globalURL": func(u *url.URL) *url.URL {
host, port, err := net.SplitHostPort(u.Host)
if err != nil {
return u
}
for _, lhr := range api_v1.LocalhostRepresentations {
if host == lhr {
_, ownPort, err := net.SplitHostPort(opts.ListenAddress)
if err != nil {
return u
}
if port == ownPort {
// Only in the case where the target is on localhost and its port is
// the same as the one we're listening on, we know for sure that
// we're monitoring our own process and that we need to change the
// scheme, hostname, and port to the externally reachable ones as
// well. We shouldn't need to touch the path at all, since if a
// path prefix is defined, the path under which we scrape ourselves
// should already contain the prefix.
u.Scheme = opts.ExternalURL.Scheme
u.Host = opts.ExternalURL.Host
} else {
// Otherwise, we only know that localhost is not reachable
// externally, so we replace only the hostname by the one in the
// external URL. It could be the wrong hostname for the service on
// this port, but it's still the best possible guess.
host, _, err := net.SplitHostPort(opts.ExternalURL.Host)
if err != nil {
return u
}
u.Host = host + ":" + port
}
break
}
}
return u
},
"numHealthy": func(pool []*scrape.Target) int {
alive := len(pool)
for _, p := range pool {
if p.Health() != scrape.HealthGood {
alive--
}
}
return alive
},
"targetHealthToClass": func(th scrape.TargetHealth) string {
switch th {
case scrape.HealthUnknown:
return "warning"
case scrape.HealthGood:
return "success"
default:
return "danger"
}
},
"ruleHealthToClass": func(rh rules.RuleHealth) string {
switch rh {
case rules.HealthUnknown:
return "warning"
case rules.HealthGood:
return "success"
default:
return "danger"
}
},
"alertStateToClass": func(as rules.AlertState) string {
switch as {
case rules.StateInactive:
return "success"
case rules.StatePending:
return "warning"
case rules.StateFiring:
return "danger"
default:
panic("unknown alert state")
}
},
}
}
func (h *Handler) getTemplate(name string) (string, error) {
var tmpl string
appendf := func(name string) error {
f, err := ui.Assets.Open(path.Join("/templates", name))
if err != nil {
return err
}
defer f.Close()
b, err := ioutil.ReadAll(f)
if err != nil {
return err
}
tmpl += string(b)
return nil
}
err := appendf("_base.html")
if err != nil {
return "", errors.Wrap(err, "error reading base template")
}
err = appendf(name)
if err != nil {
return "", errors.Wrapf(err, "error reading page template %s", name)
}
return tmpl, nil
}
func (h *Handler) executeTemplate(w http.ResponseWriter, name string, data interface{}) {
text, err := h.getTemplate(name)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
tmpl := template.NewTemplateExpander(
h.context,
text,
name,
data,
h.now(),
template.QueryFunc(rules.EngineQueryFunc(h.queryEngine, h.storage)),
h.options.ExternalURL,
)
tmpl.Funcs(tmplFuncs(h.consolesPath(), h.options))
result, err := tmpl.ExpandHTML(nil)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
io.WriteString(w, result)
}
// AlertStatus bundles alerting rules and the mapping of alert states to row classes.
type AlertStatus struct {
Groups []*rules.Group
AlertStateToRowClass map[rules.AlertState]string
Counts AlertByStateCount
}
type AlertByStateCount struct {
Inactive int32
Pending int32
Firing int32
}
func setPathWithPrefix(prefix string) func(handlerName string, handler http.HandlerFunc) http.HandlerFunc {
return func(handlerName string, handler http.HandlerFunc) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
handler(w, r.WithContext(httputil.ContextWithPath(r.Context(), prefix+r.URL.Path)))
}
}
}
|
[
"\"GOGC\"",
"\"GODEBUG\"",
"\"GOGC\"",
"\"GODEBUG\""
] |
[] |
[
"GOGC",
"GODEBUG"
] |
[]
|
["GOGC", "GODEBUG"]
|
go
| 2 | 0 | |
paho.mqtt.python-master/test/lib/01-no-clean-session.py
|
#!/usr/bin/env python
# Test whether a client produces a correct connect with clean session not set.
# The client should connect to port 1888 with keepalive=60, clean session not
# set, and client id 01-no-clean-session.
import inspect
import os
import subprocess
import socket
import sys
# From http://stackoverflow.com/questions/279237/python-import-a-module-from-a-folder
cmd_subfolder = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile( inspect.currentframe() ))[0],"..")))
if cmd_subfolder not in sys.path:
sys.path.insert(0, cmd_subfolder)
import paho_test
rc = 1
keepalive = 60
connect_packet = paho_test.gen_connect("01-no-clean-session", clean_session=False, keepalive=keepalive)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.settimeout(10)
sock.bind(('', 1888))
sock.listen(5)
client_args = sys.argv[1:]
env = dict(os.environ)
try:
pp = env['PYTHONPATH']
except KeyError:
pp = ''
env['PYTHONPATH'] = '../../src:'+pp
client = subprocess.Popen(client_args, env=env)
try:
(conn, address) = sock.accept()
conn.settimeout(10)
if paho_test.expect_packet(conn, "connect", connect_packet):
rc = 0
conn.close()
finally:
client.terminate()
client.wait()
sock.close()
exit(rc)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
kolibri/core/analytics/test/test_utils.py
|
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import csv
import datetime
import io
import os
import random
import uuid
from django.test import TransactionTestCase
from le_utils.constants import content_kinds
from kolibri.core.analytics.constants.nutrition_endpoints import PINGBACK
from kolibri.core.analytics.constants.nutrition_endpoints import STATISTICS
from kolibri.core.analytics.models import PingbackNotification
from kolibri.core.analytics.utils import create_and_update_notifications
from kolibri.core.analytics.utils import extract_channel_statistics
from kolibri.core.analytics.utils import extract_facility_statistics
from kolibri.core.auth.constants import facility_presets
from kolibri.core.auth.constants import role_kinds
from kolibri.core.auth.test.helpers import create_superuser
from kolibri.core.content.models import ChannelMetadata
from kolibri.core.content.models import ContentNode
from kolibri.core.content.models import File
from kolibri.core.content.models import LocalFile
from kolibri.core.exams.models import Exam
from kolibri.core.lessons.models import Lesson
from kolibri.core.logger.models import AttemptLog
from kolibri.core.logger.models import ContentSessionLog
from kolibri.core.logger.models import ContentSummaryLog
from kolibri.core.logger.models import ExamAttemptLog
from kolibri.core.logger.models import ExamLog
from kolibri.core.logger.models import UserSessionLog
from kolibri.core.logger.utils import user_data
USER_CSV_PATH = "kolibri/core/logger/management/commands/user_data.csv"
class BaseDeviceSetupMixin(object):
def setUp(self):
# create dummy channel
channel_id = uuid.uuid4().hex
root = ContentNode.objects.create(
id=uuid.uuid4().hex,
title="root",
channel_id=channel_id,
content_id=uuid.uuid4().hex,
)
min_timestamp = datetime.datetime(2018, 10, 11)
self.channel = ChannelMetadata.objects.create(
id=channel_id, name="channel", last_updated=min_timestamp, root=root
)
lf = LocalFile.objects.create(
id=uuid.uuid4().hex, available=True, file_size=1048576 # 1 MB
)
File.objects.create(id=uuid.uuid4().hex, contentnode=root, local_file=lf)
# Load in the user data from the csv file to give a predictable source of user data
data_path = os.path.join(USER_CSV_PATH)
with io.open(data_path, mode="r", encoding="utf-8") as f:
users = [data for data in csv.DictReader(f)]
n_facilities = 1
n_classes = 1 # 1 class x 1 facility = 1 class
n_users = 20 # 20 users x 1 facility = 20 users
max_timestamp = datetime.datetime(2019, 10, 11)
self.facilities = user_data.get_or_create_facilities(n_facilities=n_facilities)
for facility in self.facilities:
dataset = facility.dataset
# create superuser and login session
superuser = create_superuser(facility=facility)
facility.add_role(superuser, role_kinds.ADMIN)
UserSessionLog.objects.create(
user=superuser,
start_timestamp=min_timestamp,
last_interaction_timestamp=max_timestamp,
)
# create lesson and exam for facility
Lesson.objects.create(
created_by=superuser, title="lesson", collection=facility
)
exam = Exam.objects.create(
creator=superuser, title="exam", question_count=1, collection=facility
)
classrooms = user_data.get_or_create_classrooms(
n_classes=n_classes, facility=facility
)
# Get all the user data at once so that it is distinct across classrooms
facility_user_data = random.sample(users, n_classes * n_users)
# create random content id for the session logs
self.content_id = uuid.uuid4().hex
for i, classroom in enumerate(classrooms):
classroom_user_data = facility_user_data[
i * n_users : (i + 1) * n_users
]
users = user_data.get_or_create_classroom_users(
n_users=n_users,
classroom=classroom,
user_data=classroom_user_data,
facility=facility,
)
# create 1 of each type of log per user
for user in users:
for _ in range(1):
sessionlog = ContentSessionLog.objects.create(
user=user,
start_timestamp=min_timestamp,
end_timestamp=max_timestamp,
content_id=self.content_id,
channel_id=self.channel.id,
time_spent=60, # 1 minute
kind=content_kinds.EXERCISE,
)
AttemptLog.objects.create(
item="item",
start_timestamp=min_timestamp,
end_timestamp=max_timestamp,
completion_timestamp=max_timestamp,
correct=1,
sessionlog=sessionlog,
)
# create 1 anon log per user session log
ContentSessionLog.objects.create(
dataset=dataset,
user=None,
start_timestamp=min_timestamp,
end_timestamp=max_timestamp,
content_id=self.content_id,
channel_id=self.channel.id,
time_spent=60, # 1 minute,
kind=content_kinds.VIDEO,
)
for _ in range(1):
UserSessionLog.objects.create(
user=user,
start_timestamp=min_timestamp,
last_interaction_timestamp=max_timestamp,
)
for _ in range(1):
ContentSummaryLog.objects.create(
user=user,
start_timestamp=min_timestamp,
end_timestamp=max_timestamp,
completion_timestamp=max_timestamp,
content_id=uuid.uuid4().hex,
channel_id=self.channel.id,
)
for _ in range(1):
examlog = ExamLog.objects.create(exam=exam, user=user)
ExamAttemptLog.objects.create(
examlog=examlog,
start_timestamp=min_timestamp,
end_timestamp=max_timestamp,
completion_timestamp=max_timestamp,
correct=1,
content_id=uuid.uuid4().hex,
)
class FacilityStatisticsTestCase(BaseDeviceSetupMixin, TransactionTestCase):
def test_extract_facility_statistics(self):
facility = self.facilities[0]
actual = extract_facility_statistics(facility)
facility_id_hash = actual.pop("fi")
# just assert the beginning hex values of the facility id don't match
self.assertFalse(facility_id_hash.startswith(facility.id[:3]))
expected = {
"s": {
"preset": facility_presets.default,
"learner_can_edit_username": True,
"learner_can_edit_name": True,
"learner_can_edit_password": True,
"learner_can_sign_up": True,
"learner_can_delete_account": True,
"learner_can_login_with_no_password": False,
"show_download_button_in_learn": True,
"allow_guest_access": True,
},
"lc": 20, # learners_count
"llc": 20, # learner_login_count
"cc": 1, # coaches_count
"clc": 1, # coach_login_count
"f": "2018-10-11", # first interaction
"l": "2019-10-11", # last interaction
"ss": 20, # summarylog_started
"sc": 20, # summarylog_complete
"sk": {content_kinds.EXERCISE: 20, content_kinds.VIDEO: 20}, # sess_kinds
"lec": 1, # lesson_count
"ec": 1, # exam_count
"elc": 20, # exam_log_count
"alc": 20, # att_log_count
"ealc": 20, # exam_att_log_count
"suc": 20, # sess_user_count
"sac": 20, # sess_anon_count
"sut": 20, # sess_user_time
"sat": 20, # sess_anon_time
}
assert actual == expected
def test_regression_4606_no_usersessions(self):
UserSessionLog.objects.all().delete()
facility = self.facilities[0]
# will raise an exception if we haven't addressed https://github.com/learningequality/kolibri/issues/4606
actual = extract_facility_statistics(facility)
assert actual["f"] == "2018-10-11"
assert actual["l"] == "2019-10-11"
def test_regression_4606_no_contentsessions(self):
ContentSessionLog.objects.all().delete()
facility = self.facilities[0]
# will raise an exception if we haven't addressed https://github.com/learningequality/kolibri/issues/4606
actual = extract_facility_statistics(facility)
assert actual["f"] == "2018-10-11"
assert actual["l"] == "2019-10-11"
def test_regression_4606_no_contentsessions_or_usersessions(self):
ContentSessionLog.objects.all().delete()
UserSessionLog.objects.all().delete()
facility = self.facilities[0]
# will raise an exception if we haven't addressed https://github.com/learningequality/kolibri/issues/4606
actual = extract_facility_statistics(facility)
assert actual["f"] is None
assert actual["l"] is None
class ChannelStatisticsTestCase(BaseDeviceSetupMixin, TransactionTestCase):
def test_extract_channel_statistics(self):
actual = extract_channel_statistics(self.channel)
expected = {
"ci": self.channel.id[:10], # channel_id
"v": 0, # version
"u": "2018-10-11", # updated
"pi": [self.content_id[:10]], # popular_ids
"pc": [40], # popular_counts
"s": 1, # storage
"ss": 20, # summ_started
"sc": 20, # summ_complete
"sk": {content_kinds.EXERCISE: 20, content_kinds.VIDEO: 20}, # sess_kinds
"suc": 20, # sess_user_count
"sac": 20, # sess_anon_count
"sut": 20, # sess_user_time
"sat": 20, # sess_anon_time
}
assert actual == expected
class CreateUpdateNotificationsTestCase(TransactionTestCase):
def setUp(self):
self.msg = {
"i18n": {},
"msg_id": "ping",
"link_url": "le.org",
"timestamp": datetime.date(2012, 12, 12),
"version_range": "<1.0.0",
}
self.messages = {"messages": []}
self.data = {
"i18n": {},
"id": "message",
"link_url": "le.org",
"timestamp": datetime.date(2012, 12, 12),
"version_range": "<1.0.0",
"source": PINGBACK,
}
PingbackNotification.objects.create(**self.data)
def test_no_messages_still_updates(self):
create_and_update_notifications(self.messages, PINGBACK)
self.assertFalse(PingbackNotification.objects.get(id="message").active)
def test_create_and_update_notification(self):
self.messages["messages"].append(self.msg)
original_count = PingbackNotification.objects.count()
create_and_update_notifications(self.messages, PINGBACK)
# deactivate all other messages, for this source, not included in response
self.assertFalse(PingbackNotification.objects.get(id="message").active)
self.assertEqual(PingbackNotification.objects.count(), original_count + 1)
def test_update_same_notification(self):
self.data["msg_id"] = self.data["id"]
self.data["link_url"] = ""
pre_notification = PingbackNotification.objects.get(id="message")
self.messages["messages"].append(self.data)
create_and_update_notifications(self.messages, PINGBACK)
post_notification = PingbackNotification.objects.get(id="message")
# messages with same ID are overwritten
self.assertTrue(post_notification.active)
self.assertNotEqual(pre_notification.link_url, post_notification.link_url)
def test_update_other_source(self):
self.messages["messages"].append(self.msg)
create_and_update_notifications(self.messages, STATISTICS)
# messages from other source should not be modified
self.assertFalse(
PingbackNotification.objects.filter(source=PINGBACK, active=False).exists()
)
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
vendor/github.com/mongodb/mongo-go-driver/x/network/integration/pool_test.go
|
// Copyright (C) MongoDB, Inc. 2017-present.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
package integration
import (
"context"
"os"
"strings"
"testing"
"time"
"github.com/mongodb/mongo-go-driver/x/network/address"
"github.com/mongodb/mongo-go-driver/x/network/command"
"github.com/mongodb/mongo-go-driver/x/network/connection"
)
func TestPool(t *testing.T) {
noerr := func(t *testing.T, err error) {
if err != nil {
t.Helper()
t.Errorf("Unepexted error: %v", err)
t.FailNow()
}
}
opts := []connection.Option{connection.WithAppName(func(string) string { return "mongo-go-driver-test" })}
opts = append(opts, connection.WithHandshaker(func(connection.Handshaker) connection.Handshaker {
return &command.Handshake{Client: command.ClientDoc("mongo-go-driver-test")}
}))
caFile := os.Getenv("MONGO_GO_DRIVER_CA_FILE")
if len(caFile) != 0 {
config := connection.NewTLSConfig()
err := config.AddCACertFromFile(caFile)
if err != nil {
t.Errorf("Unexpected error while adding ca file to config: %v", err)
t.FailNow()
}
config.SetInsecure(true)
opts = append(opts, connection.WithTLSConfig(func(*connection.TLSConfig) *connection.TLSConfig { return config }))
}
t.Run("Cannot Create Pool With Size Larger Than Capacity", func(t *testing.T) {
_, err := connection.NewPool(address.Address(""), 4, 2, opts...)
if err != connection.ErrSizeLargerThanCapacity {
t.Errorf("Should not be able to create a pool with size larger than capacity. got %v; want %v", err, connection.ErrSizeLargerThanCapacity)
}
})
t.Run("Reuses Connections", func(t *testing.T) {
// TODO(skriptble): make this a table test.
p, err := connection.NewPool(address.Address(*host), 2, 4, opts...)
if err != nil {
t.Errorf("Unexpected error while creating pool: %v", err)
}
err = p.Connect(context.TODO())
noerr(t, err)
c1, _, err := p.Get(context.Background())
noerr(t, err)
first := c1.ID()
err = c1.Close()
noerr(t, err)
c2, _, err := p.Get(context.Background())
noerr(t, err)
second := c2.ID()
if first != second {
t.Errorf("Pool does not reuse connections. The connection ids differ. first %s; second %s", first, second)
}
})
t.Run("Expired Connections Aren't Returned", func(t *testing.T) {
p, err := connection.NewPool(address.Address(*host), 2, 4,
append(opts, connection.WithIdleTimeout(func(time.Duration) time.Duration { return 10 * time.Millisecond }))...,
)
if err != nil {
t.Errorf("Unexpected error while creating pool: %v", err)
}
err = p.Connect(context.TODO())
noerr(t, err)
c1, _, err := p.Get(context.Background())
noerr(t, err)
first := c1.ID()
err = c1.Close()
noerr(t, err)
time.Sleep(400 * time.Millisecond)
c2, _, err := p.Get(context.Background())
noerr(t, err)
second := c2.ID()
if c1 == c2 {
t.Errorf("Pool does not expire connections. IDs of first and second connection match. first %s; second %s", first, second)
}
})
t.Run("Get With Done Context", func(t *testing.T) {
p, err := connection.NewPool(address.Address(*host), 2, 4, opts...)
if err != nil {
t.Errorf("Unexpected error while creating pool: %v", err)
}
err = p.Connect(context.TODO())
noerr(t, err)
ctx, cancel := context.WithCancel(context.Background())
cancel()
_, _, err = p.Get(ctx)
if !strings.Contains(err.Error(), "context canceled") {
t.Errorf("Expected context called error, but got: %v", err)
}
})
t.Run("Get Returns Error From Creating A Connection", func(t *testing.T) {
p, err := connection.NewPool(address.Address("localhost:0"), 2, 4, opts...)
if err != nil {
t.Errorf("Unexpected error while creating pool: %v", err)
}
err = p.Connect(context.TODO())
noerr(t, err)
_, _, err = p.Get(context.Background())
if !strings.Contains(err.Error(), "dial tcp") {
t.Errorf("Expected context called error, but got: %v", err)
}
})
t.Run("Get Returns An Error After Pool Is Closed", func(t *testing.T) {
p, err := connection.NewPool(address.Address(*host), 2, 4, opts...)
if err != nil {
t.Errorf("Unexpected error while creating pool: %v", err)
}
err = p.Connect(context.TODO())
noerr(t, err)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond)
defer cancel()
err = p.Disconnect(ctx)
noerr(t, err)
_, _, err = p.Get(context.Background())
if err != connection.ErrPoolClosed {
t.Errorf("Did not get expected error. got %v; want %v", err, connection.ErrPoolClosed)
}
})
t.Run("Connection Close Does Not Error After Pool Is Closed", func(t *testing.T) {
p, err := connection.NewPool(address.Address(*host), 2, 4, opts...)
if err != nil {
t.Errorf("Unexpected error while creating pool: %v", err)
}
err = p.Connect(context.TODO())
noerr(t, err)
c1, _, err := p.Get(context.Background())
noerr(t, err)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond)
defer cancel()
err = p.Disconnect(ctx)
noerr(t, err)
err = c1.Close()
if err != nil {
t.Errorf("Connection Close should not error after Pool is closed, but got error: %v", err)
}
})
t.Run("Connection Close Does Not Close Underlying Connection If Not Expired", func(t *testing.T) {
// Implement this once there is a more testable Dialer.
t.Skip()
})
t.Run("Connection Close Does Close Underlying Connection If Expired", func(t *testing.T) {
// Implement this once there is a more testable Dialer.
t.Skip()
})
t.Run("Connection Close Closes Underlying Connection When Size Is Exceeded", func(t *testing.T) {
// Implement this once there is a more testable Dialer.
t.Skip()
})
t.Run("Drain Expires Existing Checked Out Connections", func(t *testing.T) {
p, err := connection.NewPool(address.Address(*host), 2, 4, opts...)
if err != nil {
t.Errorf("Unexpected error while creating pool: %v", err)
}
err = p.Connect(context.TODO())
noerr(t, err)
c1, _, err := p.Get(context.Background())
noerr(t, err)
if c1.Expired() != false {
t.Errorf("Newly retrieved connection should not be expired.")
}
err = p.Drain()
noerr(t, err)
if c1.Expired() != true {
t.Errorf("Existing checkout out connections should be expired once pool is drained.")
}
})
t.Run("Drain Expires Idle Connections", func(t *testing.T) {
// Implement this once there is a more testable Dialer.
t.Skip()
})
t.Run("Pool Close Closes All Connections In A Pool", func(t *testing.T) {
// Implement this once there is a more testable Dialer.
t.Skip()
})
}
|
[
"\"MONGO_GO_DRIVER_CA_FILE\""
] |
[] |
[
"MONGO_GO_DRIVER_CA_FILE"
] |
[]
|
["MONGO_GO_DRIVER_CA_FILE"]
|
go
| 1 | 0 | |
backend/main.go
|
package main
import (
"fmt"
"log"
"net/http"
"os"
"time"
)
func main() {
handler := http.DefaultServeMux
handler.HandleFunc("/", HelloServer)
port := os.Getenv("PORT")
if port == "" {
port = "8080"
}
s := &http.Server {
Addr: ":" + port,
Handler: logging(handler),
ReadTimeout: 10 * time.Second,
WriteTimeout: 10 * time.Second,
}
log.Fatal(s.ListenAndServe())
}
type statusWriter struct {
http.ResponseWriter
status int
}
func (sw *statusWriter) WriteHeader(status int) {
sw.status = status
sw.ResponseWriter.WriteHeader(status)
}
func logging(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
log.Printf("[%s] %s %s", r.RemoteAddr, r.Method, r.URL)
sw := statusWriter{ResponseWriter: w, status: 200}
h.ServeHTTP(&sw, r)
log.Printf("[%s] -> %d", r.RemoteAddr, sw.status)
})
}
func HelloServer(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, "Hello, %s!", r.URL.Path[1:])
}
|
[
"\"PORT\""
] |
[] |
[
"PORT"
] |
[]
|
["PORT"]
|
go
| 1 | 0 | |
src/cmd/cgo/gcc.go
|
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Annotate Ref in Prog with C types by parsing gcc debug output.
// Conversion of debug output to Go types.
package main
import (
"bytes"
"debug/dwarf"
"debug/elf"
"debug/macho"
"debug/pe"
"encoding/binary"
"errors"
"flag"
"fmt"
"go/ast"
"go/parser"
"go/token"
"internal/xcoff"
"math"
"os"
"strconv"
"strings"
"unicode"
"unicode/utf8"
)
var debugDefine = flag.Bool("debug-define", false, "print relevant #defines")
var debugGcc = flag.Bool("debug-gcc", false, "print gcc invocations")
var nameToC = map[string]string{
"schar": "signed char",
"uchar": "unsigned char",
"ushort": "unsigned short",
"uint": "unsigned int",
"ulong": "unsigned long",
"longlong": "long long",
"ulonglong": "unsigned long long",
"complexfloat": "float _Complex",
"complexdouble": "double _Complex",
}
// cname returns the C name to use for C.s.
// The expansions are listed in nameToC and also
// struct_foo becomes "struct foo", and similarly for
// union and enum.
func cname(s string) string {
if t, ok := nameToC[s]; ok {
return t
}
if strings.HasPrefix(s, "struct_") {
return "struct " + s[len("struct_"):]
}
if strings.HasPrefix(s, "union_") {
return "union " + s[len("union_"):]
}
if strings.HasPrefix(s, "enum_") {
return "enum " + s[len("enum_"):]
}
if strings.HasPrefix(s, "sizeof_") {
return "sizeof(" + cname(s[len("sizeof_"):]) + ")"
}
return s
}
// DiscardCgoDirectives processes the import C preamble, and discards
// all #cgo CFLAGS and LDFLAGS directives, so they don't make their
// way into _cgo_export.h.
func (f *File) DiscardCgoDirectives() {
linesIn := strings.Split(f.Preamble, "\n")
linesOut := make([]string, 0, len(linesIn))
for _, line := range linesIn {
l := strings.TrimSpace(line)
if len(l) < 5 || l[:4] != "#cgo" || !unicode.IsSpace(rune(l[4])) {
linesOut = append(linesOut, line)
} else {
linesOut = append(linesOut, "")
}
}
f.Preamble = strings.Join(linesOut, "\n")
}
// addToFlag appends args to flag. All flags are later written out onto the
// _cgo_flags file for the build system to use.
func (p *Package) addToFlag(flag string, args []string) {
p.CgoFlags[flag] = append(p.CgoFlags[flag], args...)
if flag == "CFLAGS" {
// We'll also need these when preprocessing for dwarf information.
// However, discard any -g options: we need to be able
// to parse the debug info, so stick to what we expect.
for _, arg := range args {
if !strings.HasPrefix(arg, "-g") {
p.GccOptions = append(p.GccOptions, arg)
}
}
}
}
// splitQuoted splits the string s around each instance of one or more consecutive
// white space characters while taking into account quotes and escaping, and
// returns an array of substrings of s or an empty list if s contains only white space.
// Single quotes and double quotes are recognized to prevent splitting within the
// quoted region, and are removed from the resulting substrings. If a quote in s
// isn't closed err will be set and r will have the unclosed argument as the
// last element. The backslash is used for escaping.
//
// For example, the following string:
//
// `a b:"c d" 'e''f' "g\""`
//
// Would be parsed as:
//
// []string{"a", "b:c d", "ef", `g"`}
//
func splitQuoted(s string) (r []string, err error) {
var args []string
arg := make([]rune, len(s))
escaped := false
quoted := false
quote := '\x00'
i := 0
for _, r := range s {
switch {
case escaped:
escaped = false
case r == '\\':
escaped = true
continue
case quote != 0:
if r == quote {
quote = 0
continue
}
case r == '"' || r == '\'':
quoted = true
quote = r
continue
case unicode.IsSpace(r):
if quoted || i > 0 {
quoted = false
args = append(args, string(arg[:i]))
i = 0
}
continue
}
arg[i] = r
i++
}
if quoted || i > 0 {
args = append(args, string(arg[:i]))
}
if quote != 0 {
err = errors.New("unclosed quote")
} else if escaped {
err = errors.New("unfinished escaping")
}
return args, err
}
// Translate rewrites f.AST, the original Go input, to remove
// references to the imported package C, replacing them with
// references to the equivalent Go types, functions, and variables.
func (p *Package) Translate(f *File) {
for _, cref := range f.Ref {
// Convert C.ulong to C.unsigned long, etc.
cref.Name.C = cname(cref.Name.Go)
}
var conv typeConv
conv.Init(p.PtrSize, p.IntSize)
p.loadDefines(f)
p.typedefs = map[string]bool{}
p.typedefList = nil
numTypedefs := -1
for len(p.typedefs) > numTypedefs {
numTypedefs = len(p.typedefs)
// Also ask about any typedefs we've seen so far.
for _, info := range p.typedefList {
n := &Name{
Go: info.typedef,
C: info.typedef,
}
f.Name[info.typedef] = n
f.NamePos[n] = info.pos
}
needType := p.guessKinds(f)
if len(needType) > 0 {
p.loadDWARF(f, &conv, needType)
}
// In godefs mode we're OK with the typedefs, which
// will presumably also be defined in the file, we
// don't want to resolve them to their base types.
if *godefs {
break
}
}
p.prepareNames(f)
if p.rewriteCalls(f) {
// Add `import _cgo_unsafe "unsafe"` after the package statement.
f.Edit.Insert(f.offset(f.AST.Name.End()), "; import _cgo_unsafe \"unsafe\"")
}
p.rewriteRef(f)
}
// loadDefines coerces gcc into spitting out the #defines in use
// in the file f and saves relevant renamings in f.Name[name].Define.
func (p *Package) loadDefines(f *File) {
var b bytes.Buffer
b.WriteString(builtinProlog)
b.WriteString(f.Preamble)
stdout := p.gccDefines(b.Bytes())
for _, line := range strings.Split(stdout, "\n") {
if len(line) < 9 || line[0:7] != "#define" {
continue
}
line = strings.TrimSpace(line[8:])
var key, val string
spaceIndex := strings.Index(line, " ")
tabIndex := strings.Index(line, "\t")
if spaceIndex == -1 && tabIndex == -1 {
continue
} else if tabIndex == -1 || (spaceIndex != -1 && spaceIndex < tabIndex) {
key = line[0:spaceIndex]
val = strings.TrimSpace(line[spaceIndex:])
} else {
key = line[0:tabIndex]
val = strings.TrimSpace(line[tabIndex:])
}
if key == "__clang__" {
p.GccIsClang = true
}
if n := f.Name[key]; n != nil {
if *debugDefine {
fmt.Fprintf(os.Stderr, "#define %s %s\n", key, val)
}
n.Define = val
}
}
}
// guessKinds tricks gcc into revealing the kind of each
// name xxx for the references C.xxx in the Go input.
// The kind is either a constant, type, or variable.
func (p *Package) guessKinds(f *File) []*Name {
// Determine kinds for names we already know about,
// like #defines or 'struct foo', before bothering with gcc.
var names, needType []*Name
optional := map[*Name]bool{}
for _, key := range nameKeys(f.Name) {
n := f.Name[key]
// If we've already found this name as a #define
// and we can translate it as a constant value, do so.
if n.Define != "" {
if i, err := strconv.ParseInt(n.Define, 0, 64); err == nil {
n.Kind = "iconst"
// Turn decimal into hex, just for consistency
// with enum-derived constants. Otherwise
// in the cgo -godefs output half the constants
// are in hex and half are in whatever the #define used.
n.Const = fmt.Sprintf("%#x", i)
} else if n.Define[0] == '\'' {
if _, err := parser.ParseExpr(n.Define); err == nil {
n.Kind = "iconst"
n.Const = n.Define
}
} else if n.Define[0] == '"' {
if _, err := parser.ParseExpr(n.Define); err == nil {
n.Kind = "sconst"
n.Const = n.Define
}
}
if n.IsConst() {
continue
}
}
// If this is a struct, union, or enum type name, no need to guess the kind.
if strings.HasPrefix(n.C, "struct ") || strings.HasPrefix(n.C, "union ") || strings.HasPrefix(n.C, "enum ") {
n.Kind = "type"
needType = append(needType, n)
continue
}
if goos == "darwin" && strings.HasSuffix(n.C, "Ref") {
// For FooRef, find out if FooGetTypeID exists.
s := n.C[:len(n.C)-3] + "GetTypeID"
n := &Name{Go: s, C: s}
names = append(names, n)
optional[n] = true
}
// Otherwise, we'll need to find out from gcc.
names = append(names, n)
}
// Bypass gcc if there's nothing left to find out.
if len(names) == 0 {
return needType
}
// Coerce gcc into telling us whether each name is a type, a value, or undeclared.
// For names, find out whether they are integer constants.
// We used to look at specific warning or error messages here, but that tied the
// behavior too closely to specific versions of the compilers.
// Instead, arrange that we can infer what we need from only the presence or absence
// of an error on a specific line.
//
// For each name, we generate these lines, where xxx is the index in toSniff plus one.
//
// #line xxx "not-declared"
// void __cgo_f_xxx_1(void) { __typeof__(name) *__cgo_undefined__1; }
// #line xxx "not-type"
// void __cgo_f_xxx_2(void) { name *__cgo_undefined__2; }
// #line xxx "not-int-const"
// void __cgo_f_xxx_3(void) { enum { __cgo_undefined__3 = (name)*1 }; }
// #line xxx "not-num-const"
// void __cgo_f_xxx_4(void) { static const double __cgo_undefined__4 = (name); }
// #line xxx "not-str-lit"
// void __cgo_f_xxx_5(void) { static const char __cgo_undefined__5[] = (name); }
//
// If we see an error at not-declared:xxx, the corresponding name is not declared.
// If we see an error at not-type:xxx, the corresponding name is a type.
// If we see an error at not-int-const:xxx, the corresponding name is not an integer constant.
// If we see an error at not-num-const:xxx, the corresponding name is not a number constant.
// If we see an error at not-str-lit:xxx, the corresponding name is not a string literal.
//
// The specific input forms are chosen so that they are valid C syntax regardless of
// whether name denotes a type or an expression.
var b bytes.Buffer
b.WriteString(builtinProlog)
b.WriteString(f.Preamble)
for i, n := range names {
fmt.Fprintf(&b, "#line %d \"not-declared\"\n"+
"void __cgo_f_%d_1(void) { __typeof__(%s) *__cgo_undefined__1; }\n"+
"#line %d \"not-type\"\n"+
"void __cgo_f_%d_2(void) { %s *__cgo_undefined__2; }\n"+
"#line %d \"not-int-const\"\n"+
"void __cgo_f_%d_3(void) { enum { __cgo_undefined__3 = (%s)*1 }; }\n"+
"#line %d \"not-num-const\"\n"+
"void __cgo_f_%d_4(void) { static const double __cgo_undefined__4 = (%s); }\n"+
"#line %d \"not-str-lit\"\n"+
"void __cgo_f_%d_5(void) { static const char __cgo_undefined__5[] = (%s); }\n",
i+1, i+1, n.C,
i+1, i+1, n.C,
i+1, i+1, n.C,
i+1, i+1, n.C,
i+1, i+1, n.C,
)
}
fmt.Fprintf(&b, "#line 1 \"completed\"\n"+
"int __cgo__1 = __cgo__2;\n")
stderr := p.gccErrors(b.Bytes())
if stderr == "" {
fatalf("%s produced no output\non input:\n%s", p.gccBaseCmd()[0], b.Bytes())
}
completed := false
sniff := make([]int, len(names))
const (
notType = 1 << iota
notIntConst
notNumConst
notStrLiteral
notDeclared
)
sawUnmatchedErrors := false
for _, line := range strings.Split(stderr, "\n") {
// Ignore warnings and random comments, with one
// exception: newer GCC versions will sometimes emit
// an error on a macro #define with a note referring
// to where the expansion occurs. We care about where
// the expansion occurs, so in that case treat the note
// as an error.
isError := strings.Contains(line, ": error:")
isErrorNote := strings.Contains(line, ": note:") && sawUnmatchedErrors
if !isError && !isErrorNote {
continue
}
c1 := strings.Index(line, ":")
if c1 < 0 {
continue
}
c2 := strings.Index(line[c1+1:], ":")
if c2 < 0 {
continue
}
c2 += c1 + 1
filename := line[:c1]
i, _ := strconv.Atoi(line[c1+1 : c2])
i--
if i < 0 || i >= len(names) {
if isError {
sawUnmatchedErrors = true
}
continue
}
switch filename {
case "completed":
// Strictly speaking, there is no guarantee that seeing the error at completed:1
// (at the end of the file) means we've seen all the errors from earlier in the file,
// but usually it does. Certainly if we don't see the completed:1 error, we did
// not get all the errors we expected.
completed = true
case "not-declared":
sniff[i] |= notDeclared
case "not-type":
sniff[i] |= notType
case "not-int-const":
sniff[i] |= notIntConst
case "not-num-const":
sniff[i] |= notNumConst
case "not-str-lit":
sniff[i] |= notStrLiteral
default:
if isError {
sawUnmatchedErrors = true
}
continue
}
sawUnmatchedErrors = false
}
if !completed {
fatalf("%s did not produce error at completed:1\non input:\n%s\nfull error output:\n%s", p.gccBaseCmd()[0], b.Bytes(), stderr)
}
for i, n := range names {
switch sniff[i] {
default:
if sniff[i]¬Declared != 0 && optional[n] {
// Ignore optional undeclared identifiers.
// Don't report an error, and skip adding n to the needType array.
continue
}
error_(f.NamePos[n], "could not determine kind of name for C.%s", fixGo(n.Go))
case notStrLiteral | notType:
n.Kind = "iconst"
case notIntConst | notStrLiteral | notType:
n.Kind = "fconst"
case notIntConst | notNumConst | notType:
n.Kind = "sconst"
case notIntConst | notNumConst | notStrLiteral:
n.Kind = "type"
case notIntConst | notNumConst | notStrLiteral | notType:
n.Kind = "not-type"
}
needType = append(needType, n)
}
if nerrors > 0 {
// Check if compiling the preamble by itself causes any errors,
// because the messages we've printed out so far aren't helpful
// to users debugging preamble mistakes. See issue 8442.
preambleErrors := p.gccErrors([]byte(f.Preamble))
if len(preambleErrors) > 0 {
error_(token.NoPos, "\n%s errors for preamble:\n%s", p.gccBaseCmd()[0], preambleErrors)
}
fatalf("unresolved names")
}
return needType
}
// loadDWARF parses the DWARF debug information generated
// by gcc to learn the details of the constants, variables, and types
// being referred to as C.xxx.
func (p *Package) loadDWARF(f *File, conv *typeConv, names []*Name) {
// Extract the types from the DWARF section of an object
// from a well-formed C program. Gcc only generates DWARF info
// for symbols in the object file, so it is not enough to print the
// preamble and hope the symbols we care about will be there.
// Instead, emit
// __typeof__(names[i]) *__cgo__i;
// for each entry in names and then dereference the type we
// learn for __cgo__i.
var b bytes.Buffer
b.WriteString(builtinProlog)
b.WriteString(f.Preamble)
b.WriteString("#line 1 \"cgo-dwarf-inference\"\n")
for i, n := range names {
fmt.Fprintf(&b, "__typeof__(%s) *__cgo__%d;\n", n.C, i)
if n.Kind == "iconst" {
fmt.Fprintf(&b, "enum { __cgo_enum__%d = %s };\n", i, n.C)
}
}
// We create a data block initialized with the values,
// so we can read them out of the object file.
fmt.Fprintf(&b, "long long __cgodebug_ints[] = {\n")
for _, n := range names {
if n.Kind == "iconst" {
fmt.Fprintf(&b, "\t%s,\n", n.C)
} else {
fmt.Fprintf(&b, "\t0,\n")
}
}
// for the last entry, we cannot use 0, otherwise
// in case all __cgodebug_data is zero initialized,
// LLVM-based gcc will place the it in the __DATA.__common
// zero-filled section (our debug/macho doesn't support
// this)
fmt.Fprintf(&b, "\t1\n")
fmt.Fprintf(&b, "};\n")
// do the same work for floats.
fmt.Fprintf(&b, "double __cgodebug_floats[] = {\n")
for _, n := range names {
if n.Kind == "fconst" {
fmt.Fprintf(&b, "\t%s,\n", n.C)
} else {
fmt.Fprintf(&b, "\t0,\n")
}
}
fmt.Fprintf(&b, "\t1\n")
fmt.Fprintf(&b, "};\n")
// do the same work for strings.
for i, n := range names {
if n.Kind == "sconst" {
fmt.Fprintf(&b, "const char __cgodebug_str__%d[] = %s;\n", i, n.C)
fmt.Fprintf(&b, "const unsigned long long __cgodebug_strlen__%d = sizeof(%s)-1;\n", i, n.C)
}
}
d, ints, floats, strs := p.gccDebug(b.Bytes(), len(names))
// Scan DWARF info for top-level TagVariable entries with AttrName __cgo__i.
types := make([]dwarf.Type, len(names))
r := d.Reader()
for {
e, err := r.Next()
if err != nil {
fatalf("reading DWARF entry: %s", err)
}
if e == nil {
break
}
switch e.Tag {
case dwarf.TagVariable:
name, _ := e.Val(dwarf.AttrName).(string)
typOff, _ := e.Val(dwarf.AttrType).(dwarf.Offset)
if name == "" || typOff == 0 {
if e.Val(dwarf.AttrSpecification) != nil {
// Since we are reading all the DWARF,
// assume we will see the variable elsewhere.
break
}
fatalf("malformed DWARF TagVariable entry")
}
if !strings.HasPrefix(name, "__cgo__") {
break
}
typ, err := d.Type(typOff)
if err != nil {
fatalf("loading DWARF type: %s", err)
}
t, ok := typ.(*dwarf.PtrType)
if !ok || t == nil {
fatalf("internal error: %s has non-pointer type", name)
}
i, err := strconv.Atoi(name[7:])
if err != nil {
fatalf("malformed __cgo__ name: %s", name)
}
types[i] = t.Type
p.recordTypedefs(t.Type, f.NamePos[names[i]])
}
if e.Tag != dwarf.TagCompileUnit {
r.SkipChildren()
}
}
// Record types and typedef information.
for i, n := range names {
if strings.HasSuffix(n.Go, "GetTypeID") && types[i].String() == "func() CFTypeID" {
conv.getTypeIDs[n.Go[:len(n.Go)-9]] = true
}
}
for i, n := range names {
if types[i] == nil {
continue
}
pos := f.NamePos[n]
f, fok := types[i].(*dwarf.FuncType)
if n.Kind != "type" && fok {
n.Kind = "func"
n.FuncType = conv.FuncType(f, pos)
} else {
n.Type = conv.Type(types[i], pos)
switch n.Kind {
case "iconst":
if i < len(ints) {
if _, ok := types[i].(*dwarf.UintType); ok {
n.Const = fmt.Sprintf("%#x", uint64(ints[i]))
} else {
n.Const = fmt.Sprintf("%#x", ints[i])
}
}
case "fconst":
if i >= len(floats) {
break
}
switch base(types[i]).(type) {
case *dwarf.IntType, *dwarf.UintType:
// This has an integer type so it's
// not really a floating point
// constant. This can happen when the
// C compiler complains about using
// the value as an integer constant,
// but not as a general constant.
// Treat this as a variable of the
// appropriate type, not a constant,
// to get C-style type handling,
// avoiding the problem that C permits
// uint64(-1) but Go does not.
// See issue 26066.
n.Kind = "var"
default:
n.Const = fmt.Sprintf("%f", floats[i])
}
case "sconst":
if i < len(strs) {
n.Const = fmt.Sprintf("%q", strs[i])
}
}
}
conv.FinishType(pos)
}
}
// recordTypedefs remembers in p.typedefs all the typedefs used in dtypes and its children.
func (p *Package) recordTypedefs(dtype dwarf.Type, pos token.Pos) {
p.recordTypedefs1(dtype, pos, map[dwarf.Type]bool{})
}
func (p *Package) recordTypedefs1(dtype dwarf.Type, pos token.Pos, visited map[dwarf.Type]bool) {
if dtype == nil {
return
}
if visited[dtype] {
return
}
visited[dtype] = true
switch dt := dtype.(type) {
case *dwarf.TypedefType:
if strings.HasPrefix(dt.Name, "__builtin") {
// Don't look inside builtin types. There be dragons.
return
}
if !p.typedefs[dt.Name] {
p.typedefs[dt.Name] = true
p.typedefList = append(p.typedefList, typedefInfo{dt.Name, pos})
p.recordTypedefs1(dt.Type, pos, visited)
}
case *dwarf.PtrType:
p.recordTypedefs1(dt.Type, pos, visited)
case *dwarf.ArrayType:
p.recordTypedefs1(dt.Type, pos, visited)
case *dwarf.QualType:
p.recordTypedefs1(dt.Type, pos, visited)
case *dwarf.FuncType:
p.recordTypedefs1(dt.ReturnType, pos, visited)
for _, a := range dt.ParamType {
p.recordTypedefs1(a, pos, visited)
}
case *dwarf.StructType:
for _, f := range dt.Field {
p.recordTypedefs1(f.Type, pos, visited)
}
}
}
// prepareNames finalizes the Kind field of not-type names and sets
// the mangled name of all names.
func (p *Package) prepareNames(f *File) {
for _, n := range f.Name {
if n.Kind == "not-type" {
if n.Define == "" {
n.Kind = "var"
} else {
n.Kind = "macro"
n.FuncType = &FuncType{
Result: n.Type,
Go: &ast.FuncType{
Results: &ast.FieldList{List: []*ast.Field{{Type: n.Type.Go}}},
},
}
}
}
p.mangleName(n)
}
}
// mangleName does name mangling to translate names
// from the original Go source files to the names
// used in the final Go files generated by cgo.
func (p *Package) mangleName(n *Name) {
// When using gccgo variables have to be
// exported so that they become global symbols
// that the C code can refer to.
prefix := "_C"
if *gccgo && n.IsVar() {
prefix = "C"
}
n.Mangle = prefix + n.Kind + "_" + n.Go
}
func (f *File) isMangledName(s string) bool {
prefix := "_C"
if strings.HasPrefix(s, prefix) {
t := s[len(prefix):]
for _, k := range nameKinds {
if strings.HasPrefix(t, k+"_") {
return true
}
}
}
return false
}
// rewriteCalls rewrites all calls that pass pointers to check that
// they follow the rules for passing pointers between Go and C.
// This reports whether the package needs to import unsafe as _cgo_unsafe.
func (p *Package) rewriteCalls(f *File) bool {
needsUnsafe := false
// Walk backward so that in C.f1(C.f2()) we rewrite C.f2 first.
for _, call := range f.Calls {
if call.Done {
continue
}
start := f.offset(call.Call.Pos())
end := f.offset(call.Call.End())
str, nu := p.rewriteCall(f, call)
if str != "" {
f.Edit.Replace(start, end, str)
if nu {
needsUnsafe = true
}
}
}
return needsUnsafe
}
// rewriteCall rewrites one call to add pointer checks.
// If any pointer checks are required, we rewrite the call into a
// function literal that calls _cgoCheckPointer for each pointer
// argument and then calls the original function.
// This returns the rewritten call and whether the package needs to
// import unsafe as _cgo_unsafe.
// If it returns the empty string, the call did not need to be rewritten.
func (p *Package) rewriteCall(f *File, call *Call) (string, bool) {
// This is a call to C.xxx; set goname to "xxx".
// It may have already been mangled by rewriteName.
var goname string
switch fun := call.Call.Fun.(type) {
case *ast.SelectorExpr:
goname = fun.Sel.Name
case *ast.Ident:
goname = strings.TrimPrefix(fun.Name, "_C2func_")
goname = strings.TrimPrefix(goname, "_Cfunc_")
}
if goname == "" || goname == "malloc" {
return "", false
}
name := f.Name[goname]
if name == nil || name.Kind != "func" {
// Probably a type conversion.
return "", false
}
params := name.FuncType.Params
args := call.Call.Args
// Avoid a crash if the number of arguments is
// less than the number of parameters.
// This will be caught when the generated file is compiled.
if len(args) < len(params) {
return "", false
}
any := false
for i, param := range params {
if p.needsPointerCheck(f, param.Go, args[i]) {
any = true
break
}
}
if !any {
return "", false
}
// We need to rewrite this call.
//
// Rewrite C.f(p) to
// func() {
// _cgo0 := p
// _cgoCheckPointer(_cgo0)
// C.f(_cgo0)
// }()
// Using a function literal like this lets us evaluate the
// function arguments only once while doing pointer checks.
// This is particularly useful when passing additional arguments
// to _cgoCheckPointer, as done in checkIndex and checkAddr.
//
// When the function argument is a conversion to unsafe.Pointer,
// we unwrap the conversion before checking the pointer,
// and then wrap again when calling C.f. This lets us check
// the real type of the pointer in some cases. See issue #25941.
//
// When the call to C.f is deferred, we use an additional function
// literal to evaluate the arguments at the right time.
// defer func() func() {
// _cgo0 := p
// return func() {
// _cgoCheckPointer(_cgo0)
// C.f(_cgo0)
// }
// }()()
// This works because the defer statement evaluates the first
// function literal in order to get the function to call.
var sb bytes.Buffer
sb.WriteString("func() ")
if call.Deferred {
sb.WriteString("func() ")
}
needsUnsafe := false
result := false
twoResults := false
if !call.Deferred {
// Check whether this call expects two results.
for _, ref := range f.Ref {
if ref.Expr != &call.Call.Fun {
continue
}
if ref.Context == ctxCall2 {
sb.WriteString("(")
result = true
twoResults = true
}
break
}
// Add the result type, if any.
if name.FuncType.Result != nil {
rtype := p.rewriteUnsafe(name.FuncType.Result.Go)
if rtype != name.FuncType.Result.Go {
needsUnsafe = true
}
sb.WriteString(gofmtLine(rtype))
result = true
}
// Add the second result type, if any.
if twoResults {
if name.FuncType.Result == nil {
// An explicit void result looks odd but it
// seems to be how cgo has worked historically.
sb.WriteString("_Ctype_void")
}
sb.WriteString(", error)")
}
}
sb.WriteString("{ ")
// Define _cgoN for each argument value.
// Write _cgoCheckPointer calls to sbCheck.
var sbCheck bytes.Buffer
for i, param := range params {
origArg := args[i]
arg, nu := p.mangle(f, &args[i])
if nu {
needsUnsafe = true
}
// Use "var x T = ..." syntax to explicitly convert untyped
// constants to the parameter type, to avoid a type mismatch.
ptype := p.rewriteUnsafe(param.Go)
if !p.needsPointerCheck(f, param.Go, args[i]) || param.BadPointer {
if ptype != param.Go {
needsUnsafe = true
}
fmt.Fprintf(&sb, "var _cgo%d %s = %s; ", i,
gofmtLine(ptype), gofmtPos(arg, origArg.Pos()))
continue
}
// Check for &a[i].
if p.checkIndex(&sb, &sbCheck, arg, i) {
continue
}
// Check for &x.
if p.checkAddr(&sb, &sbCheck, arg, i) {
continue
}
fmt.Fprintf(&sb, "_cgo%d := %s; ", i, gofmtPos(arg, origArg.Pos()))
fmt.Fprintf(&sbCheck, "_cgoCheckPointer(_cgo%d); ", i)
}
if call.Deferred {
sb.WriteString("return func() { ")
}
// Write out the calls to _cgoCheckPointer.
sb.WriteString(sbCheck.String())
if result {
sb.WriteString("return ")
}
m, nu := p.mangle(f, &call.Call.Fun)
if nu {
needsUnsafe = true
}
sb.WriteString(gofmtLine(m))
sb.WriteString("(")
for i := range params {
if i > 0 {
sb.WriteString(", ")
}
fmt.Fprintf(&sb, "_cgo%d", i)
}
sb.WriteString("); ")
if call.Deferred {
sb.WriteString("}")
}
sb.WriteString("}")
if call.Deferred {
sb.WriteString("()")
}
sb.WriteString("()")
return sb.String(), needsUnsafe
}
// needsPointerCheck reports whether the type t needs a pointer check.
// This is true if t is a pointer and if the value to which it points
// might contain a pointer.
func (p *Package) needsPointerCheck(f *File, t ast.Expr, arg ast.Expr) bool {
// An untyped nil does not need a pointer check, and when
// _cgoCheckPointer returns the untyped nil the type assertion we
// are going to insert will fail. Easier to just skip nil arguments.
// TODO: Note that this fails if nil is shadowed.
if id, ok := arg.(*ast.Ident); ok && id.Name == "nil" {
return false
}
return p.hasPointer(f, t, true)
}
// hasPointer is used by needsPointerCheck. If top is true it returns
// whether t is or contains a pointer that might point to a pointer.
// If top is false it reports whether t is or contains a pointer.
// f may be nil.
func (p *Package) hasPointer(f *File, t ast.Expr, top bool) bool {
switch t := t.(type) {
case *ast.ArrayType:
if t.Len == nil {
if !top {
return true
}
return p.hasPointer(f, t.Elt, false)
}
return p.hasPointer(f, t.Elt, top)
case *ast.StructType:
for _, field := range t.Fields.List {
if p.hasPointer(f, field.Type, top) {
return true
}
}
return false
case *ast.StarExpr: // Pointer type.
if !top {
return true
}
// Check whether this is a pointer to a C union (or class)
// type that contains a pointer.
if unionWithPointer[t.X] {
return true
}
return p.hasPointer(f, t.X, false)
case *ast.FuncType, *ast.InterfaceType, *ast.MapType, *ast.ChanType:
return true
case *ast.Ident:
// TODO: Handle types defined within function.
for _, d := range p.Decl {
gd, ok := d.(*ast.GenDecl)
if !ok || gd.Tok != token.TYPE {
continue
}
for _, spec := range gd.Specs {
ts, ok := spec.(*ast.TypeSpec)
if !ok {
continue
}
if ts.Name.Name == t.Name {
return p.hasPointer(f, ts.Type, top)
}
}
}
if def := typedef[t.Name]; def != nil {
return p.hasPointer(f, def.Go, top)
}
if t.Name == "string" {
return !top
}
if t.Name == "error" {
return true
}
if goTypes[t.Name] != nil {
return false
}
// We can't figure out the type. Conservative
// approach is to assume it has a pointer.
return true
case *ast.SelectorExpr:
if l, ok := t.X.(*ast.Ident); !ok || l.Name != "C" {
// Type defined in a different package.
// Conservative approach is to assume it has a
// pointer.
return true
}
if f == nil {
// Conservative approach: assume pointer.
return true
}
name := f.Name[t.Sel.Name]
if name != nil && name.Kind == "type" && name.Type != nil && name.Type.Go != nil {
return p.hasPointer(f, name.Type.Go, top)
}
// We can't figure out the type. Conservative
// approach is to assume it has a pointer.
return true
default:
error_(t.Pos(), "could not understand type %s", gofmt(t))
return true
}
}
// mangle replaces references to C names in arg with the mangled names,
// rewriting calls when it finds them.
// It removes the corresponding references in f.Ref and f.Calls, so that we
// don't try to do the replacement again in rewriteRef or rewriteCall.
func (p *Package) mangle(f *File, arg *ast.Expr) (ast.Expr, bool) {
needsUnsafe := false
f.walk(arg, ctxExpr, func(f *File, arg interface{}, context astContext) {
px, ok := arg.(*ast.Expr)
if !ok {
return
}
sel, ok := (*px).(*ast.SelectorExpr)
if ok {
if l, ok := sel.X.(*ast.Ident); !ok || l.Name != "C" {
return
}
for _, r := range f.Ref {
if r.Expr == px {
*px = p.rewriteName(f, r)
r.Done = true
break
}
}
return
}
call, ok := (*px).(*ast.CallExpr)
if !ok {
return
}
for _, c := range f.Calls {
if !c.Done && c.Call.Lparen == call.Lparen {
cstr, nu := p.rewriteCall(f, c)
if cstr != "" {
// Smuggle the rewritten call through an ident.
*px = ast.NewIdent(cstr)
if nu {
needsUnsafe = true
}
c.Done = true
}
}
}
})
return *arg, needsUnsafe
}
// checkIndex checks whether arg has the form &a[i], possibly inside
// type conversions. If so, then in the general case it writes
// _cgoIndexNN := a
// _cgoNN := &cgoIndexNN[i] // with type conversions, if any
// to sb, and writes
// _cgoCheckPointer(_cgoNN, _cgoIndexNN)
// to sbCheck, and returns true. If a is a simple variable or field reference,
// it writes
// _cgoIndexNN := &a
// and dereferences the uses of _cgoIndexNN. Taking the address avoids
// making a copy of an array.
//
// This tells _cgoCheckPointer to check the complete contents of the
// slice or array being indexed, but no other part of the memory allocation.
func (p *Package) checkIndex(sb, sbCheck *bytes.Buffer, arg ast.Expr, i int) bool {
// Strip type conversions.
x := arg
for {
c, ok := x.(*ast.CallExpr)
if !ok || len(c.Args) != 1 || !p.isType(c.Fun) {
break
}
x = c.Args[0]
}
u, ok := x.(*ast.UnaryExpr)
if !ok || u.Op != token.AND {
return false
}
index, ok := u.X.(*ast.IndexExpr)
if !ok {
return false
}
addr := ""
deref := ""
if p.isVariable(index.X) {
addr = "&"
deref = "*"
}
fmt.Fprintf(sb, "_cgoIndex%d := %s%s; ", i, addr, gofmtPos(index.X, index.X.Pos()))
origX := index.X
index.X = ast.NewIdent(fmt.Sprintf("_cgoIndex%d", i))
if deref == "*" {
index.X = &ast.StarExpr{X: index.X}
}
fmt.Fprintf(sb, "_cgo%d := %s; ", i, gofmtPos(arg, arg.Pos()))
index.X = origX
fmt.Fprintf(sbCheck, "_cgoCheckPointer(_cgo%d, %s_cgoIndex%d); ", i, deref, i)
return true
}
// checkAddr checks whether arg has the form &x, possibly inside type
// conversions. If so, it writes
// _cgoBaseNN := &x
// _cgoNN := _cgoBaseNN // with type conversions, if any
// to sb, and writes
// _cgoCheckPointer(_cgoBaseNN, true)
// to sbCheck, and returns true. This tells _cgoCheckPointer to check
// just the contents of the pointer being passed, not any other part
// of the memory allocation. This is run after checkIndex, which looks
// for the special case of &a[i], which requires different checks.
func (p *Package) checkAddr(sb, sbCheck *bytes.Buffer, arg ast.Expr, i int) bool {
// Strip type conversions.
px := &arg
for {
c, ok := (*px).(*ast.CallExpr)
if !ok || len(c.Args) != 1 || !p.isType(c.Fun) {
break
}
px = &c.Args[0]
}
if u, ok := (*px).(*ast.UnaryExpr); !ok || u.Op != token.AND {
return false
}
fmt.Fprintf(sb, "_cgoBase%d := %s; ", i, gofmtPos(*px, (*px).Pos()))
origX := *px
*px = ast.NewIdent(fmt.Sprintf("_cgoBase%d", i))
fmt.Fprintf(sb, "_cgo%d := %s; ", i, gofmtPos(arg, arg.Pos()))
*px = origX
// Use "0 == 0" to do the right thing in the unlikely event
// that "true" is shadowed.
fmt.Fprintf(sbCheck, "_cgoCheckPointer(_cgoBase%d, 0 == 0); ", i)
return true
}
// isType reports whether the expression is definitely a type.
// This is conservative--it returns false for an unknown identifier.
func (p *Package) isType(t ast.Expr) bool {
switch t := t.(type) {
case *ast.SelectorExpr:
id, ok := t.X.(*ast.Ident)
if !ok {
return false
}
if id.Name == "unsafe" && t.Sel.Name == "Pointer" {
return true
}
if id.Name == "C" && typedef["_Ctype_"+t.Sel.Name] != nil {
return true
}
return false
case *ast.Ident:
// TODO: This ignores shadowing.
switch t.Name {
case "unsafe.Pointer", "bool", "byte",
"complex64", "complex128",
"error",
"float32", "float64",
"int", "int8", "int16", "int32", "int64",
"rune", "string",
"uint", "uint8", "uint16", "uint32", "uint64", "uintptr":
return true
}
if strings.HasPrefix(t.Name, "_Ctype_") {
return true
}
case *ast.StarExpr:
return p.isType(t.X)
case *ast.ArrayType, *ast.StructType, *ast.FuncType, *ast.InterfaceType,
*ast.MapType, *ast.ChanType:
return true
}
return false
}
// isVariable reports whether x is a variable, possibly with field references.
func (p *Package) isVariable(x ast.Expr) bool {
switch x := x.(type) {
case *ast.Ident:
return true
case *ast.SelectorExpr:
return p.isVariable(x.X)
}
return false
}
// rewriteUnsafe returns a version of t with references to unsafe.Pointer
// rewritten to use _cgo_unsafe.Pointer instead.
func (p *Package) rewriteUnsafe(t ast.Expr) ast.Expr {
switch t := t.(type) {
case *ast.Ident:
// We don't see a SelectorExpr for unsafe.Pointer;
// this is created by code in this file.
if t.Name == "unsafe.Pointer" {
return ast.NewIdent("_cgo_unsafe.Pointer")
}
case *ast.ArrayType:
t1 := p.rewriteUnsafe(t.Elt)
if t1 != t.Elt {
r := *t
r.Elt = t1
return &r
}
case *ast.StructType:
changed := false
fields := *t.Fields
fields.List = nil
for _, f := range t.Fields.List {
ft := p.rewriteUnsafe(f.Type)
if ft == f.Type {
fields.List = append(fields.List, f)
} else {
fn := *f
fn.Type = ft
fields.List = append(fields.List, &fn)
changed = true
}
}
if changed {
r := *t
r.Fields = &fields
return &r
}
case *ast.StarExpr: // Pointer type.
x1 := p.rewriteUnsafe(t.X)
if x1 != t.X {
r := *t
r.X = x1
return &r
}
}
return t
}
// rewriteRef rewrites all the C.xxx references in f.AST to refer to the
// Go equivalents, now that we have figured out the meaning of all
// the xxx. In *godefs mode, rewriteRef replaces the names
// with full definitions instead of mangled names.
func (p *Package) rewriteRef(f *File) {
// Keep a list of all the functions, to remove the ones
// only used as expressions and avoid generating bridge
// code for them.
functions := make(map[string]bool)
for _, n := range f.Name {
if n.Kind == "func" {
functions[n.Go] = false
}
}
// Now that we have all the name types filled in,
// scan through the Refs to identify the ones that
// are trying to do a ,err call. Also check that
// functions are only used in calls.
for _, r := range f.Ref {
if r.Name.IsConst() && r.Name.Const == "" {
error_(r.Pos(), "unable to find value of constant C.%s", fixGo(r.Name.Go))
}
if r.Name.Kind == "func" {
switch r.Context {
case ctxCall, ctxCall2:
functions[r.Name.Go] = true
}
}
expr := p.rewriteName(f, r)
if *godefs {
// Substitute definition for mangled type name.
if id, ok := expr.(*ast.Ident); ok {
if t := typedef[id.Name]; t != nil {
expr = t.Go
}
if id.Name == r.Name.Mangle && r.Name.Const != "" {
expr = ast.NewIdent(r.Name.Const)
}
}
}
// Copy position information from old expr into new expr,
// in case expression being replaced is first on line.
// See golang.org/issue/6563.
pos := (*r.Expr).Pos()
if x, ok := expr.(*ast.Ident); ok {
expr = &ast.Ident{NamePos: pos, Name: x.Name}
}
// Change AST, because some later processing depends on it,
// and also because -godefs mode still prints the AST.
old := *r.Expr
*r.Expr = expr
// Record source-level edit for cgo output.
if !r.Done {
// Prepend a space in case the earlier code ends
// with '/', which would give us a "//" comment.
repl := " " + gofmtPos(expr, old.Pos())
end := fset.Position(old.End())
// Subtract 1 from the column if we are going to
// append a close parenthesis. That will set the
// correct column for the following characters.
sub := 0
if r.Name.Kind != "type" {
sub = 1
}
if end.Column > sub {
repl = fmt.Sprintf("%s /*line :%d:%d*/", repl, end.Line, end.Column-sub)
}
if r.Name.Kind != "type" {
repl = "(" + repl + ")"
}
f.Edit.Replace(f.offset(old.Pos()), f.offset(old.End()), repl)
}
}
// Remove functions only used as expressions, so their respective
// bridge functions are not generated.
for name, used := range functions {
if !used {
delete(f.Name, name)
}
}
}
// rewriteName returns the expression used to rewrite a reference.
func (p *Package) rewriteName(f *File, r *Ref) ast.Expr {
var expr ast.Expr = ast.NewIdent(r.Name.Mangle) // default
switch r.Context {
case ctxCall, ctxCall2:
if r.Name.Kind != "func" {
if r.Name.Kind == "type" {
r.Context = ctxType
if r.Name.Type == nil {
error_(r.Pos(), "invalid conversion to C.%s: undefined C type '%s'", fixGo(r.Name.Go), r.Name.C)
break
}
expr = r.Name.Type.Go
break
}
error_(r.Pos(), "call of non-function C.%s", fixGo(r.Name.Go))
break
}
if r.Context == ctxCall2 {
if r.Name.Go == "_CMalloc" {
error_(r.Pos(), "no two-result form for C.malloc")
break
}
// Invent new Name for the two-result function.
n := f.Name["2"+r.Name.Go]
if n == nil {
n = new(Name)
*n = *r.Name
n.AddError = true
n.Mangle = "_C2func_" + n.Go
f.Name["2"+r.Name.Go] = n
}
expr = ast.NewIdent(n.Mangle)
r.Name = n
break
}
case ctxExpr:
switch r.Name.Kind {
case "func":
if builtinDefs[r.Name.C] != "" {
error_(r.Pos(), "use of builtin '%s' not in function call", fixGo(r.Name.C))
}
// Function is being used in an expression, to e.g. pass around a C function pointer.
// Create a new Name for this Ref which causes the variable to be declared in Go land.
fpName := "fp_" + r.Name.Go
name := f.Name[fpName]
if name == nil {
name = &Name{
Go: fpName,
C: r.Name.C,
Kind: "fpvar",
Type: &Type{Size: p.PtrSize, Align: p.PtrSize, C: c("void*"), Go: ast.NewIdent("unsafe.Pointer")},
}
p.mangleName(name)
f.Name[fpName] = name
}
r.Name = name
// Rewrite into call to _Cgo_ptr to prevent assignments. The _Cgo_ptr
// function is defined in out.go and simply returns its argument. See
// issue 7757.
expr = &ast.CallExpr{
Fun: &ast.Ident{NamePos: (*r.Expr).Pos(), Name: "_Cgo_ptr"},
Args: []ast.Expr{ast.NewIdent(name.Mangle)},
}
case "type":
// Okay - might be new(T)
if r.Name.Type == nil {
error_(r.Pos(), "expression C.%s: undefined C type '%s'", fixGo(r.Name.Go), r.Name.C)
break
}
expr = r.Name.Type.Go
case "var":
expr = &ast.StarExpr{Star: (*r.Expr).Pos(), X: expr}
case "macro":
expr = &ast.CallExpr{Fun: expr}
}
case ctxSelector:
if r.Name.Kind == "var" {
expr = &ast.StarExpr{Star: (*r.Expr).Pos(), X: expr}
} else {
error_(r.Pos(), "only C variables allowed in selector expression %s", fixGo(r.Name.Go))
}
case ctxType:
if r.Name.Kind != "type" {
error_(r.Pos(), "expression C.%s used as type", fixGo(r.Name.Go))
} else if r.Name.Type == nil {
// Use of C.enum_x, C.struct_x or C.union_x without C definition.
// GCC won't raise an error when using pointers to such unknown types.
error_(r.Pos(), "type C.%s: undefined C type '%s'", fixGo(r.Name.Go), r.Name.C)
} else {
expr = r.Name.Type.Go
}
default:
if r.Name.Kind == "func" {
error_(r.Pos(), "must call C.%s", fixGo(r.Name.Go))
}
}
return expr
}
// gofmtPos returns the gofmt-formatted string for an AST node,
// with a comment setting the position before the node.
func gofmtPos(n ast.Expr, pos token.Pos) string {
s := gofmtLine(n)
p := fset.Position(pos)
if p.Column == 0 {
return s
}
return fmt.Sprintf("/*line :%d:%d*/%s", p.Line, p.Column, s)
}
// gccBaseCmd returns the start of the compiler command line.
// It uses $CC if set, or else $GCC, or else the compiler recorded
// during the initial build as defaultCC.
// defaultCC is defined in zdefaultcc.go, written by cmd/dist.
func (p *Package) gccBaseCmd() []string {
// Use $CC if set, since that's what the build uses.
if ret := strings.Fields(os.Getenv("CC")); len(ret) > 0 {
return ret
}
// Try $GCC if set, since that's what we used to use.
if ret := strings.Fields(os.Getenv("GCC")); len(ret) > 0 {
return ret
}
return strings.Fields(defaultCC(goos, goarch))
}
// gccMachine returns the gcc -m flag to use, either "-m32", "-m64" or "-marm".
func (p *Package) gccMachine() []string {
switch goarch {
case "amd64":
return []string{"-m64"}
case "386":
return []string{"-m32"}
case "arm":
return []string{"-marm"} // not thumb
case "s390":
return []string{"-m31"}
case "s390x":
return []string{"-m64"}
case "mips64", "mips64le":
return []string{"-mabi=64"}
case "mips", "mipsle":
return []string{"-mabi=32"}
}
return nil
}
func gccTmp() string {
return *objDir + "_cgo_.o"
}
// gccCmd returns the gcc command line to use for compiling
// the input.
func (p *Package) gccCmd() []string {
c := append(p.gccBaseCmd(),
"-w", // no warnings
"-Wno-error", // warnings are not errors
"-o"+gccTmp(), // write object to tmp
"-gdwarf-2", // generate DWARF v2 debugging symbols
"-c", // do not link
"-xc", // input language is C
)
if p.GccIsClang {
c = append(c,
"-ferror-limit=0",
// Apple clang version 1.7 (tags/Apple/clang-77) (based on LLVM 2.9svn)
// doesn't have -Wno-unneeded-internal-declaration, so we need yet another
// flag to disable the warning. Yes, really good diagnostics, clang.
"-Wno-unknown-warning-option",
"-Wno-unneeded-internal-declaration",
"-Wno-unused-function",
"-Qunused-arguments",
// Clang embeds prototypes for some builtin functions,
// like malloc and calloc, but all size_t parameters are
// incorrectly typed unsigned long. We work around that
// by disabling the builtin functions (this is safe as
// it won't affect the actual compilation of the C code).
// See: https://golang.org/issue/6506.
"-fno-builtin",
)
}
c = append(c, p.GccOptions...)
c = append(c, p.gccMachine()...)
if goos == "aix" {
c = append(c, "-maix64")
}
c = append(c, "-") //read input from standard input
return c
}
// gccDebug runs gcc -gdwarf-2 over the C program stdin and
// returns the corresponding DWARF data and, if present, debug data block.
func (p *Package) gccDebug(stdin []byte, nnames int) (d *dwarf.Data, ints []int64, floats []float64, strs []string) {
runGcc(stdin, p.gccCmd())
isDebugInts := func(s string) bool {
// Some systems use leading _ to denote non-assembly symbols.
return s == "__cgodebug_ints" || s == "___cgodebug_ints"
}
isDebugFloats := func(s string) bool {
// Some systems use leading _ to denote non-assembly symbols.
return s == "__cgodebug_floats" || s == "___cgodebug_floats"
}
indexOfDebugStr := func(s string) int {
// Some systems use leading _ to denote non-assembly symbols.
if strings.HasPrefix(s, "___") {
s = s[1:]
}
if strings.HasPrefix(s, "__cgodebug_str__") {
if n, err := strconv.Atoi(s[len("__cgodebug_str__"):]); err == nil {
return n
}
}
return -1
}
indexOfDebugStrlen := func(s string) int {
// Some systems use leading _ to denote non-assembly symbols.
if strings.HasPrefix(s, "___") {
s = s[1:]
}
if strings.HasPrefix(s, "__cgodebug_strlen__") {
if n, err := strconv.Atoi(s[len("__cgodebug_strlen__"):]); err == nil {
return n
}
}
return -1
}
strs = make([]string, nnames)
strdata := make(map[int]string, nnames)
strlens := make(map[int]int, nnames)
buildStrings := func() {
for n, strlen := range strlens {
data := strdata[n]
if len(data) <= strlen {
fatalf("invalid string literal")
}
strs[n] = data[:strlen]
}
}
if f, err := macho.Open(gccTmp()); err == nil {
defer f.Close()
d, err := f.DWARF()
if err != nil {
fatalf("cannot load DWARF output from %s: %v", gccTmp(), err)
}
bo := f.ByteOrder
if f.Symtab != nil {
for i := range f.Symtab.Syms {
s := &f.Symtab.Syms[i]
switch {
case isDebugInts(s.Name):
// Found it. Now find data section.
if i := int(s.Sect) - 1; 0 <= i && i < len(f.Sections) {
sect := f.Sections[i]
if sect.Addr <= s.Value && s.Value < sect.Addr+sect.Size {
if sdat, err := sect.Data(); err == nil {
data := sdat[s.Value-sect.Addr:]
ints = make([]int64, len(data)/8)
for i := range ints {
ints[i] = int64(bo.Uint64(data[i*8:]))
}
}
}
}
case isDebugFloats(s.Name):
// Found it. Now find data section.
if i := int(s.Sect) - 1; 0 <= i && i < len(f.Sections) {
sect := f.Sections[i]
if sect.Addr <= s.Value && s.Value < sect.Addr+sect.Size {
if sdat, err := sect.Data(); err == nil {
data := sdat[s.Value-sect.Addr:]
floats = make([]float64, len(data)/8)
for i := range floats {
floats[i] = math.Float64frombits(bo.Uint64(data[i*8:]))
}
}
}
}
default:
if n := indexOfDebugStr(s.Name); n != -1 {
// Found it. Now find data section.
if i := int(s.Sect) - 1; 0 <= i && i < len(f.Sections) {
sect := f.Sections[i]
if sect.Addr <= s.Value && s.Value < sect.Addr+sect.Size {
if sdat, err := sect.Data(); err == nil {
data := sdat[s.Value-sect.Addr:]
strdata[n] = string(data)
}
}
}
break
}
if n := indexOfDebugStrlen(s.Name); n != -1 {
// Found it. Now find data section.
if i := int(s.Sect) - 1; 0 <= i && i < len(f.Sections) {
sect := f.Sections[i]
if sect.Addr <= s.Value && s.Value < sect.Addr+sect.Size {
if sdat, err := sect.Data(); err == nil {
data := sdat[s.Value-sect.Addr:]
strlen := bo.Uint64(data[:8])
if strlen > (1<<(uint(p.IntSize*8)-1) - 1) { // greater than MaxInt?
fatalf("string literal too big")
}
strlens[n] = int(strlen)
}
}
}
break
}
}
}
buildStrings()
}
return d, ints, floats, strs
}
if f, err := elf.Open(gccTmp()); err == nil {
defer f.Close()
d, err := f.DWARF()
if err != nil {
fatalf("cannot load DWARF output from %s: %v", gccTmp(), err)
}
bo := f.ByteOrder
symtab, err := f.Symbols()
if err == nil {
for i := range symtab {
s := &symtab[i]
switch {
case isDebugInts(s.Name):
// Found it. Now find data section.
if i := int(s.Section); 0 <= i && i < len(f.Sections) {
sect := f.Sections[i]
if sect.Addr <= s.Value && s.Value < sect.Addr+sect.Size {
if sdat, err := sect.Data(); err == nil {
data := sdat[s.Value-sect.Addr:]
ints = make([]int64, len(data)/8)
for i := range ints {
ints[i] = int64(bo.Uint64(data[i*8:]))
}
}
}
}
case isDebugFloats(s.Name):
// Found it. Now find data section.
if i := int(s.Section); 0 <= i && i < len(f.Sections) {
sect := f.Sections[i]
if sect.Addr <= s.Value && s.Value < sect.Addr+sect.Size {
if sdat, err := sect.Data(); err == nil {
data := sdat[s.Value-sect.Addr:]
floats = make([]float64, len(data)/8)
for i := range floats {
floats[i] = math.Float64frombits(bo.Uint64(data[i*8:]))
}
}
}
}
default:
if n := indexOfDebugStr(s.Name); n != -1 {
// Found it. Now find data section.
if i := int(s.Section); 0 <= i && i < len(f.Sections) {
sect := f.Sections[i]
if sect.Addr <= s.Value && s.Value < sect.Addr+sect.Size {
if sdat, err := sect.Data(); err == nil {
data := sdat[s.Value-sect.Addr:]
strdata[n] = string(data)
}
}
}
break
}
if n := indexOfDebugStrlen(s.Name); n != -1 {
// Found it. Now find data section.
if i := int(s.Section); 0 <= i && i < len(f.Sections) {
sect := f.Sections[i]
if sect.Addr <= s.Value && s.Value < sect.Addr+sect.Size {
if sdat, err := sect.Data(); err == nil {
data := sdat[s.Value-sect.Addr:]
strlen := bo.Uint64(data[:8])
if strlen > (1<<(uint(p.IntSize*8)-1) - 1) { // greater than MaxInt?
fatalf("string literal too big")
}
strlens[n] = int(strlen)
}
}
}
break
}
}
}
buildStrings()
}
return d, ints, floats, strs
}
if f, err := pe.Open(gccTmp()); err == nil {
defer f.Close()
d, err := f.DWARF()
if err != nil {
fatalf("cannot load DWARF output from %s: %v", gccTmp(), err)
}
bo := binary.LittleEndian
for _, s := range f.Symbols {
switch {
case isDebugInts(s.Name):
if i := int(s.SectionNumber) - 1; 0 <= i && i < len(f.Sections) {
sect := f.Sections[i]
if s.Value < sect.Size {
if sdat, err := sect.Data(); err == nil {
data := sdat[s.Value:]
ints = make([]int64, len(data)/8)
for i := range ints {
ints[i] = int64(bo.Uint64(data[i*8:]))
}
}
}
}
case isDebugFloats(s.Name):
if i := int(s.SectionNumber) - 1; 0 <= i && i < len(f.Sections) {
sect := f.Sections[i]
if s.Value < sect.Size {
if sdat, err := sect.Data(); err == nil {
data := sdat[s.Value:]
floats = make([]float64, len(data)/8)
for i := range floats {
floats[i] = math.Float64frombits(bo.Uint64(data[i*8:]))
}
}
}
}
default:
if n := indexOfDebugStr(s.Name); n != -1 {
if i := int(s.SectionNumber) - 1; 0 <= i && i < len(f.Sections) {
sect := f.Sections[i]
if s.Value < sect.Size {
if sdat, err := sect.Data(); err == nil {
data := sdat[s.Value:]
strdata[n] = string(data)
}
}
}
break
}
if n := indexOfDebugStrlen(s.Name); n != -1 {
if i := int(s.SectionNumber) - 1; 0 <= i && i < len(f.Sections) {
sect := f.Sections[i]
if s.Value < sect.Size {
if sdat, err := sect.Data(); err == nil {
data := sdat[s.Value:]
strlen := bo.Uint64(data[:8])
if strlen > (1<<(uint(p.IntSize*8)-1) - 1) { // greater than MaxInt?
fatalf("string literal too big")
}
strlens[n] = int(strlen)
}
}
}
break
}
}
}
buildStrings()
return d, ints, floats, strs
}
if f, err := xcoff.Open(gccTmp()); err == nil {
defer f.Close()
d, err := f.DWARF()
if err != nil {
fatalf("cannot load DWARF output from %s: %v", gccTmp(), err)
}
bo := binary.BigEndian
for _, s := range f.Symbols {
switch {
case isDebugInts(s.Name):
if i := int(s.SectionNumber) - 1; 0 <= i && i < len(f.Sections) {
sect := f.Sections[i]
if s.Value < sect.Size {
if sdat, err := sect.Data(); err == nil {
data := sdat[s.Value:]
ints = make([]int64, len(data)/8)
for i := range ints {
ints[i] = int64(bo.Uint64(data[i*8:]))
}
}
}
}
case isDebugFloats(s.Name):
if i := int(s.SectionNumber) - 1; 0 <= i && i < len(f.Sections) {
sect := f.Sections[i]
if s.Value < sect.Size {
if sdat, err := sect.Data(); err == nil {
data := sdat[s.Value:]
floats = make([]float64, len(data)/8)
for i := range floats {
floats[i] = math.Float64frombits(bo.Uint64(data[i*8:]))
}
}
}
}
default:
if n := indexOfDebugStr(s.Name); n != -1 {
if i := int(s.SectionNumber) - 1; 0 <= i && i < len(f.Sections) {
sect := f.Sections[i]
if s.Value < sect.Size {
if sdat, err := sect.Data(); err == nil {
data := sdat[s.Value:]
strdata[n] = string(data)
}
}
}
break
}
if n := indexOfDebugStrlen(s.Name); n != -1 {
if i := int(s.SectionNumber) - 1; 0 <= i && i < len(f.Sections) {
sect := f.Sections[i]
if s.Value < sect.Size {
if sdat, err := sect.Data(); err == nil {
data := sdat[s.Value:]
strlen := bo.Uint64(data[:8])
if strlen > (1<<(uint(p.IntSize*8)-1) - 1) { // greater than MaxInt?
fatalf("string literal too big")
}
strlens[n] = int(strlen)
}
}
}
break
}
}
}
buildStrings()
return d, ints, floats, strs
}
fatalf("cannot parse gcc output %s as ELF, Mach-O, PE, XCOFF object", gccTmp())
panic("not reached")
}
// gccDefines runs gcc -E -dM -xc - over the C program stdin
// and returns the corresponding standard output, which is the
// #defines that gcc encountered while processing the input
// and its included files.
func (p *Package) gccDefines(stdin []byte) string {
base := append(p.gccBaseCmd(), "-E", "-dM", "-xc")
base = append(base, p.gccMachine()...)
stdout, _ := runGcc(stdin, append(append(base, p.GccOptions...), "-"))
return stdout
}
// gccErrors runs gcc over the C program stdin and returns
// the errors that gcc prints. That is, this function expects
// gcc to fail.
func (p *Package) gccErrors(stdin []byte) string {
// TODO(rsc): require failure
args := p.gccCmd()
// Optimization options can confuse the error messages; remove them.
nargs := make([]string, 0, len(args))
for _, arg := range args {
if !strings.HasPrefix(arg, "-O") {
nargs = append(nargs, arg)
}
}
// Force -O0 optimization but keep the trailing "-" at the end.
nargs = append(nargs, "-O0")
nl := len(nargs)
nargs[nl-2], nargs[nl-1] = nargs[nl-1], nargs[nl-2]
if *debugGcc {
fmt.Fprintf(os.Stderr, "$ %s <<EOF\n", strings.Join(nargs, " "))
os.Stderr.Write(stdin)
fmt.Fprint(os.Stderr, "EOF\n")
}
stdout, stderr, _ := run(stdin, nargs)
if *debugGcc {
os.Stderr.Write(stdout)
os.Stderr.Write(stderr)
}
return string(stderr)
}
// runGcc runs the gcc command line args with stdin on standard input.
// If the command exits with a non-zero exit status, runGcc prints
// details about what was run and exits.
// Otherwise runGcc returns the data written to standard output and standard error.
// Note that for some of the uses we expect useful data back
// on standard error, but for those uses gcc must still exit 0.
func runGcc(stdin []byte, args []string) (string, string) {
if *debugGcc {
fmt.Fprintf(os.Stderr, "$ %s <<EOF\n", strings.Join(args, " "))
os.Stderr.Write(stdin)
fmt.Fprint(os.Stderr, "EOF\n")
}
stdout, stderr, ok := run(stdin, args)
if *debugGcc {
os.Stderr.Write(stdout)
os.Stderr.Write(stderr)
}
if !ok {
os.Stderr.Write(stderr)
os.Exit(2)
}
return string(stdout), string(stderr)
}
// A typeConv is a translator from dwarf types to Go types
// with equivalent memory layout.
type typeConv struct {
// Cache of already-translated or in-progress types.
m map[string]*Type
// Map from types to incomplete pointers to those types.
ptrs map[string][]*Type
// Keys of ptrs in insertion order (deterministic worklist)
// ptrKeys contains exactly the keys in ptrs.
ptrKeys []dwarf.Type
// Type names X for which there exists an XGetTypeID function with type func() CFTypeID.
getTypeIDs map[string]bool
// Predeclared types.
bool ast.Expr
byte ast.Expr // denotes padding
int8, int16, int32, int64 ast.Expr
uint8, uint16, uint32, uint64, uintptr ast.Expr
float32, float64 ast.Expr
complex64, complex128 ast.Expr
void ast.Expr
string ast.Expr
goVoid ast.Expr // _Ctype_void, denotes C's void
goVoidPtr ast.Expr // unsafe.Pointer or *byte
ptrSize int64
intSize int64
}
var tagGen int
var typedef = make(map[string]*Type)
var goIdent = make(map[string]*ast.Ident)
// unionWithPointer is true for a Go type that represents a C union (or class)
// that may contain a pointer. This is used for cgo pointer checking.
var unionWithPointer = make(map[ast.Expr]bool)
func (c *typeConv) Init(ptrSize, intSize int64) {
c.ptrSize = ptrSize
c.intSize = intSize
c.m = make(map[string]*Type)
c.ptrs = make(map[string][]*Type)
c.getTypeIDs = make(map[string]bool)
c.bool = c.Ident("bool")
c.byte = c.Ident("byte")
c.int8 = c.Ident("int8")
c.int16 = c.Ident("int16")
c.int32 = c.Ident("int32")
c.int64 = c.Ident("int64")
c.uint8 = c.Ident("uint8")
c.uint16 = c.Ident("uint16")
c.uint32 = c.Ident("uint32")
c.uint64 = c.Ident("uint64")
c.uintptr = c.Ident("uintptr")
c.float32 = c.Ident("float32")
c.float64 = c.Ident("float64")
c.complex64 = c.Ident("complex64")
c.complex128 = c.Ident("complex128")
c.void = c.Ident("void")
c.string = c.Ident("string")
c.goVoid = c.Ident("_Ctype_void")
// Normally cgo translates void* to unsafe.Pointer,
// but for historical reasons -godefs uses *byte instead.
if *godefs {
c.goVoidPtr = &ast.StarExpr{X: c.byte}
} else {
c.goVoidPtr = c.Ident("unsafe.Pointer")
}
}
// base strips away qualifiers and typedefs to get the underlying type
func base(dt dwarf.Type) dwarf.Type {
for {
if d, ok := dt.(*dwarf.QualType); ok {
dt = d.Type
continue
}
if d, ok := dt.(*dwarf.TypedefType); ok {
dt = d.Type
continue
}
break
}
return dt
}
// unqual strips away qualifiers from a DWARF type.
// In general we don't care about top-level qualifiers.
func unqual(dt dwarf.Type) dwarf.Type {
for {
if d, ok := dt.(*dwarf.QualType); ok {
dt = d.Type
} else {
break
}
}
return dt
}
// Map from dwarf text names to aliases we use in package "C".
var dwarfToName = map[string]string{
"long int": "long",
"long unsigned int": "ulong",
"unsigned int": "uint",
"short unsigned int": "ushort",
"unsigned short": "ushort", // Used by Clang; issue 13129.
"short int": "short",
"long long int": "longlong",
"long long unsigned int": "ulonglong",
"signed char": "schar",
"unsigned char": "uchar",
}
const signedDelta = 64
// String returns the current type representation. Format arguments
// are assembled within this method so that any changes in mutable
// values are taken into account.
func (tr *TypeRepr) String() string {
if len(tr.Repr) == 0 {
return ""
}
if len(tr.FormatArgs) == 0 {
return tr.Repr
}
return fmt.Sprintf(tr.Repr, tr.FormatArgs...)
}
// Empty reports whether the result of String would be "".
func (tr *TypeRepr) Empty() bool {
return len(tr.Repr) == 0
}
// Set modifies the type representation.
// If fargs are provided, repr is used as a format for fmt.Sprintf.
// Otherwise, repr is used unprocessed as the type representation.
func (tr *TypeRepr) Set(repr string, fargs ...interface{}) {
tr.Repr = repr
tr.FormatArgs = fargs
}
// FinishType completes any outstanding type mapping work.
// In particular, it resolves incomplete pointer types.
func (c *typeConv) FinishType(pos token.Pos) {
// Completing one pointer type might produce more to complete.
// Keep looping until they're all done.
for len(c.ptrKeys) > 0 {
dtype := c.ptrKeys[0]
dtypeKey := dtype.String()
c.ptrKeys = c.ptrKeys[1:]
ptrs := c.ptrs[dtypeKey]
delete(c.ptrs, dtypeKey)
// Note Type might invalidate c.ptrs[dtypeKey].
t := c.Type(dtype, pos)
for _, ptr := range ptrs {
ptr.Go.(*ast.StarExpr).X = t.Go
ptr.C.Set("%s*", t.C)
}
}
}
// Type returns a *Type with the same memory layout as
// dtype when used as the type of a variable or a struct field.
func (c *typeConv) Type(dtype dwarf.Type, pos token.Pos) *Type {
// Always recompute bad pointer typedefs, as the set of such
// typedefs changes as we see more types.
checkCache := true
if dtt, ok := dtype.(*dwarf.TypedefType); ok && c.badPointerTypedef(dtt) {
checkCache = false
}
key := dtype.String()
if checkCache {
if t, ok := c.m[key]; ok {
if t.Go == nil {
fatalf("%s: type conversion loop at %s", lineno(pos), dtype)
}
return t
}
}
t := new(Type)
t.Size = dtype.Size() // note: wrong for array of pointers, corrected below
t.Align = -1
t.C = &TypeRepr{Repr: dtype.Common().Name}
c.m[key] = t
switch dt := dtype.(type) {
default:
fatalf("%s: unexpected type: %s", lineno(pos), dtype)
case *dwarf.AddrType:
if t.Size != c.ptrSize {
fatalf("%s: unexpected: %d-byte address type - %s", lineno(pos), t.Size, dtype)
}
t.Go = c.uintptr
t.Align = t.Size
case *dwarf.ArrayType:
if dt.StrideBitSize > 0 {
// Cannot represent bit-sized elements in Go.
t.Go = c.Opaque(t.Size)
break
}
count := dt.Count
if count == -1 {
// Indicates flexible array member, which Go doesn't support.
// Translate to zero-length array instead.
count = 0
}
sub := c.Type(dt.Type, pos)
t.Align = sub.Align
t.Go = &ast.ArrayType{
Len: c.intExpr(count),
Elt: sub.Go,
}
// Recalculate t.Size now that we know sub.Size.
t.Size = count * sub.Size
t.C.Set("__typeof__(%s[%d])", sub.C, dt.Count)
case *dwarf.BoolType:
t.Go = c.bool
t.Align = 1
case *dwarf.CharType:
if t.Size != 1 {
fatalf("%s: unexpected: %d-byte char type - %s", lineno(pos), t.Size, dtype)
}
t.Go = c.int8
t.Align = 1
case *dwarf.EnumType:
if t.Align = t.Size; t.Align >= c.ptrSize {
t.Align = c.ptrSize
}
t.C.Set("enum " + dt.EnumName)
signed := 0
t.EnumValues = make(map[string]int64)
for _, ev := range dt.Val {
t.EnumValues[ev.Name] = ev.Val
if ev.Val < 0 {
signed = signedDelta
}
}
switch t.Size + int64(signed) {
default:
fatalf("%s: unexpected: %d-byte enum type - %s", lineno(pos), t.Size, dtype)
case 1:
t.Go = c.uint8
case 2:
t.Go = c.uint16
case 4:
t.Go = c.uint32
case 8:
t.Go = c.uint64
case 1 + signedDelta:
t.Go = c.int8
case 2 + signedDelta:
t.Go = c.int16
case 4 + signedDelta:
t.Go = c.int32
case 8 + signedDelta:
t.Go = c.int64
}
case *dwarf.FloatType:
switch t.Size {
default:
fatalf("%s: unexpected: %d-byte float type - %s", lineno(pos), t.Size, dtype)
case 4:
t.Go = c.float32
case 8:
t.Go = c.float64
}
if t.Align = t.Size; t.Align >= c.ptrSize {
t.Align = c.ptrSize
}
case *dwarf.ComplexType:
switch t.Size {
default:
fatalf("%s: unexpected: %d-byte complex type - %s", lineno(pos), t.Size, dtype)
case 8:
t.Go = c.complex64
case 16:
t.Go = c.complex128
}
if t.Align = t.Size / 2; t.Align >= c.ptrSize {
t.Align = c.ptrSize
}
case *dwarf.FuncType:
// No attempt at translation: would enable calls
// directly between worlds, but we need to moderate those.
t.Go = c.uintptr
t.Align = c.ptrSize
case *dwarf.IntType:
if dt.BitSize > 0 {
fatalf("%s: unexpected: %d-bit int type - %s", lineno(pos), dt.BitSize, dtype)
}
switch t.Size {
default:
fatalf("%s: unexpected: %d-byte int type - %s", lineno(pos), t.Size, dtype)
case 1:
t.Go = c.int8
case 2:
t.Go = c.int16
case 4:
t.Go = c.int32
case 8:
t.Go = c.int64
case 16:
t.Go = &ast.ArrayType{
Len: c.intExpr(t.Size),
Elt: c.uint8,
}
}
if t.Align = t.Size; t.Align >= c.ptrSize {
t.Align = c.ptrSize
}
case *dwarf.PtrType:
// Clang doesn't emit DW_AT_byte_size for pointer types.
if t.Size != c.ptrSize && t.Size != -1 {
fatalf("%s: unexpected: %d-byte pointer type - %s", lineno(pos), t.Size, dtype)
}
t.Size = c.ptrSize
t.Align = c.ptrSize
if _, ok := base(dt.Type).(*dwarf.VoidType); ok {
t.Go = c.goVoidPtr
t.C.Set("void*")
dq := dt.Type
for {
if d, ok := dq.(*dwarf.QualType); ok {
t.C.Set(d.Qual + " " + t.C.String())
dq = d.Type
} else {
break
}
}
break
}
// Placeholder initialization; completed in FinishType.
t.Go = &ast.StarExpr{}
t.C.Set("<incomplete>*")
key := dt.Type.String()
if _, ok := c.ptrs[key]; !ok {
c.ptrKeys = append(c.ptrKeys, dt.Type)
}
c.ptrs[key] = append(c.ptrs[key], t)
case *dwarf.QualType:
t1 := c.Type(dt.Type, pos)
t.Size = t1.Size
t.Align = t1.Align
t.Go = t1.Go
if unionWithPointer[t1.Go] {
unionWithPointer[t.Go] = true
}
t.EnumValues = nil
t.Typedef = ""
t.C.Set("%s "+dt.Qual, t1.C)
return t
case *dwarf.StructType:
// Convert to Go struct, being careful about alignment.
// Have to give it a name to simulate C "struct foo" references.
tag := dt.StructName
if dt.ByteSize < 0 && tag == "" { // opaque unnamed struct - should not be possible
break
}
if tag == "" {
tag = "__" + strconv.Itoa(tagGen)
tagGen++
} else if t.C.Empty() {
t.C.Set(dt.Kind + " " + tag)
}
name := c.Ident("_Ctype_" + dt.Kind + "_" + tag)
t.Go = name // publish before recursive calls
goIdent[name.Name] = name
if dt.ByteSize < 0 {
// Size calculation in c.Struct/c.Opaque will die with size=-1 (unknown),
// so execute the basic things that the struct case would do
// other than try to determine a Go representation.
tt := *t
tt.C = &TypeRepr{"%s %s", []interface{}{dt.Kind, tag}}
tt.Go = c.Ident("struct{}")
typedef[name.Name] = &tt
break
}
switch dt.Kind {
case "class", "union":
t.Go = c.Opaque(t.Size)
if c.dwarfHasPointer(dt, pos) {
unionWithPointer[t.Go] = true
}
if t.C.Empty() {
t.C.Set("__typeof__(unsigned char[%d])", t.Size)
}
t.Align = 1 // TODO: should probably base this on field alignment.
typedef[name.Name] = t
case "struct":
g, csyntax, align := c.Struct(dt, pos)
if t.C.Empty() {
t.C.Set(csyntax)
}
t.Align = align
tt := *t
if tag != "" {
tt.C = &TypeRepr{"struct %s", []interface{}{tag}}
}
tt.Go = g
typedef[name.Name] = &tt
}
case *dwarf.TypedefType:
// Record typedef for printing.
if dt.Name == "_GoString_" {
// Special C name for Go string type.
// Knows string layout used by compilers: pointer plus length,
// which rounds up to 2 pointers after alignment.
t.Go = c.string
t.Size = c.ptrSize * 2
t.Align = c.ptrSize
break
}
if dt.Name == "_GoBytes_" {
// Special C name for Go []byte type.
// Knows slice layout used by compilers: pointer, length, cap.
t.Go = c.Ident("[]byte")
t.Size = c.ptrSize + 4 + 4
t.Align = c.ptrSize
break
}
name := c.Ident("_Ctype_" + dt.Name)
goIdent[name.Name] = name
sub := c.Type(dt.Type, pos)
if c.badPointerTypedef(dt) {
// Treat this typedef as a uintptr.
s := *sub
s.Go = c.uintptr
s.BadPointer = true
sub = &s
// Make sure we update any previously computed type.
if oldType := typedef[name.Name]; oldType != nil {
oldType.Go = sub.Go
oldType.BadPointer = true
}
}
t.Go = name
t.BadPointer = sub.BadPointer
if unionWithPointer[sub.Go] {
unionWithPointer[t.Go] = true
}
t.Size = sub.Size
t.Align = sub.Align
oldType := typedef[name.Name]
if oldType == nil {
tt := *t
tt.Go = sub.Go
tt.BadPointer = sub.BadPointer
typedef[name.Name] = &tt
}
// If sub.Go.Name is "_Ctype_struct_foo" or "_Ctype_union_foo" or "_Ctype_class_foo",
// use that as the Go form for this typedef too, so that the typedef will be interchangeable
// with the base type.
// In -godefs mode, do this for all typedefs.
if isStructUnionClass(sub.Go) || *godefs {
t.Go = sub.Go
if isStructUnionClass(sub.Go) {
// Use the typedef name for C code.
typedef[sub.Go.(*ast.Ident).Name].C = t.C
}
// If we've seen this typedef before, and it
// was an anonymous struct/union/class before
// too, use the old definition.
// TODO: it would be safer to only do this if
// we verify that the types are the same.
if oldType != nil && isStructUnionClass(oldType.Go) {
t.Go = oldType.Go
}
}
case *dwarf.UcharType:
if t.Size != 1 {
fatalf("%s: unexpected: %d-byte uchar type - %s", lineno(pos), t.Size, dtype)
}
t.Go = c.uint8
t.Align = 1
case *dwarf.UintType:
if dt.BitSize > 0 {
fatalf("%s: unexpected: %d-bit uint type - %s", lineno(pos), dt.BitSize, dtype)
}
switch t.Size {
default:
fatalf("%s: unexpected: %d-byte uint type - %s", lineno(pos), t.Size, dtype)
case 1:
t.Go = c.uint8
case 2:
t.Go = c.uint16
case 4:
t.Go = c.uint32
case 8:
t.Go = c.uint64
case 16:
t.Go = &ast.ArrayType{
Len: c.intExpr(t.Size),
Elt: c.uint8,
}
}
if t.Align = t.Size; t.Align >= c.ptrSize {
t.Align = c.ptrSize
}
case *dwarf.VoidType:
t.Go = c.goVoid
t.C.Set("void")
t.Align = 1
}
switch dtype.(type) {
case *dwarf.AddrType, *dwarf.BoolType, *dwarf.CharType, *dwarf.ComplexType, *dwarf.IntType, *dwarf.FloatType, *dwarf.UcharType, *dwarf.UintType:
s := dtype.Common().Name
if s != "" {
if ss, ok := dwarfToName[s]; ok {
s = ss
}
s = strings.Replace(s, " ", "", -1)
name := c.Ident("_Ctype_" + s)
tt := *t
typedef[name.Name] = &tt
if !*godefs {
t.Go = name
}
}
}
if t.Size < 0 {
// Unsized types are [0]byte, unless they're typedefs of other types
// or structs with tags.
// if so, use the name we've already defined.
t.Size = 0
switch dt := dtype.(type) {
case *dwarf.TypedefType:
// ok
case *dwarf.StructType:
if dt.StructName != "" {
break
}
t.Go = c.Opaque(0)
default:
t.Go = c.Opaque(0)
}
if t.C.Empty() {
t.C.Set("void")
}
}
if t.C.Empty() {
fatalf("%s: internal error: did not create C name for %s", lineno(pos), dtype)
}
return t
}
// isStructUnionClass reports whether the type described by the Go syntax x
// is a struct, union, or class with a tag.
func isStructUnionClass(x ast.Expr) bool {
id, ok := x.(*ast.Ident)
if !ok {
return false
}
name := id.Name
return strings.HasPrefix(name, "_Ctype_struct_") ||
strings.HasPrefix(name, "_Ctype_union_") ||
strings.HasPrefix(name, "_Ctype_class_")
}
// FuncArg returns a Go type with the same memory layout as
// dtype when used as the type of a C function argument.
func (c *typeConv) FuncArg(dtype dwarf.Type, pos token.Pos) *Type {
t := c.Type(unqual(dtype), pos)
switch dt := dtype.(type) {
case *dwarf.ArrayType:
// Arrays are passed implicitly as pointers in C.
// In Go, we must be explicit.
tr := &TypeRepr{}
tr.Set("%s*", t.C)
return &Type{
Size: c.ptrSize,
Align: c.ptrSize,
Go: &ast.StarExpr{X: t.Go},
C: tr,
}
case *dwarf.TypedefType:
// C has much more relaxed rules than Go for
// implicit type conversions. When the parameter
// is type T defined as *X, simulate a little of the
// laxness of C by making the argument *X instead of T.
if ptr, ok := base(dt.Type).(*dwarf.PtrType); ok {
// Unless the typedef happens to point to void* since
// Go has special rules around using unsafe.Pointer.
if _, void := base(ptr.Type).(*dwarf.VoidType); void {
break
}
// ...or the typedef is one in which we expect bad pointers.
// It will be a uintptr instead of *X.
if c.baseBadPointerTypedef(dt) {
break
}
t = c.Type(ptr, pos)
if t == nil {
return nil
}
// For a struct/union/class, remember the C spelling,
// in case it has __attribute__((unavailable)).
// See issue 2888.
if isStructUnionClass(t.Go) {
t.Typedef = dt.Name
}
}
}
return t
}
// FuncType returns the Go type analogous to dtype.
// There is no guarantee about matching memory layout.
func (c *typeConv) FuncType(dtype *dwarf.FuncType, pos token.Pos) *FuncType {
p := make([]*Type, len(dtype.ParamType))
gp := make([]*ast.Field, len(dtype.ParamType))
for i, f := range dtype.ParamType {
// gcc's DWARF generator outputs a single DotDotDotType parameter for
// function pointers that specify no parameters (e.g. void
// (*__cgo_0)()). Treat this special case as void. This case is
// invalid according to ISO C anyway (i.e. void (*__cgo_1)(...) is not
// legal).
if _, ok := f.(*dwarf.DotDotDotType); ok && i == 0 {
p, gp = nil, nil
break
}
p[i] = c.FuncArg(f, pos)
gp[i] = &ast.Field{Type: p[i].Go}
}
var r *Type
var gr []*ast.Field
if _, ok := base(dtype.ReturnType).(*dwarf.VoidType); ok {
gr = []*ast.Field{{Type: c.goVoid}}
} else if dtype.ReturnType != nil {
r = c.Type(unqual(dtype.ReturnType), pos)
gr = []*ast.Field{{Type: r.Go}}
}
return &FuncType{
Params: p,
Result: r,
Go: &ast.FuncType{
Params: &ast.FieldList{List: gp},
Results: &ast.FieldList{List: gr},
},
}
}
// Identifier
func (c *typeConv) Ident(s string) *ast.Ident {
return ast.NewIdent(s)
}
// Opaque type of n bytes.
func (c *typeConv) Opaque(n int64) ast.Expr {
return &ast.ArrayType{
Len: c.intExpr(n),
Elt: c.byte,
}
}
// Expr for integer n.
func (c *typeConv) intExpr(n int64) ast.Expr {
return &ast.BasicLit{
Kind: token.INT,
Value: strconv.FormatInt(n, 10),
}
}
// Add padding of given size to fld.
func (c *typeConv) pad(fld []*ast.Field, sizes []int64, size int64) ([]*ast.Field, []int64) {
n := len(fld)
fld = fld[0 : n+1]
fld[n] = &ast.Field{Names: []*ast.Ident{c.Ident("_")}, Type: c.Opaque(size)}
sizes = sizes[0 : n+1]
sizes[n] = size
return fld, sizes
}
// Struct conversion: return Go and (gc) C syntax for type.
func (c *typeConv) Struct(dt *dwarf.StructType, pos token.Pos) (expr *ast.StructType, csyntax string, align int64) {
// Minimum alignment for a struct is 1 byte.
align = 1
var buf bytes.Buffer
buf.WriteString("struct {")
fld := make([]*ast.Field, 0, 2*len(dt.Field)+1) // enough for padding around every field
sizes := make([]int64, 0, 2*len(dt.Field)+1)
off := int64(0)
// Rename struct fields that happen to be named Go keywords into
// _{keyword}. Create a map from C ident -> Go ident. The Go ident will
// be mangled. Any existing identifier that already has the same name on
// the C-side will cause the Go-mangled version to be prefixed with _.
// (e.g. in a struct with fields '_type' and 'type', the latter would be
// rendered as '__type' in Go).
ident := make(map[string]string)
used := make(map[string]bool)
for _, f := range dt.Field {
ident[f.Name] = f.Name
used[f.Name] = true
}
if !*godefs {
for cid, goid := range ident {
if token.Lookup(goid).IsKeyword() {
// Avoid keyword
goid = "_" + goid
// Also avoid existing fields
for _, exist := used[goid]; exist; _, exist = used[goid] {
goid = "_" + goid
}
used[goid] = true
ident[cid] = goid
}
}
}
anon := 0
for _, f := range dt.Field {
name := f.Name
ft := f.Type
// In godefs mode, if this field is a C11
// anonymous union then treat the first field in the
// union as the field in the struct. This handles
// cases like the glibc <sys/resource.h> file; see
// issue 6677.
if *godefs {
if st, ok := f.Type.(*dwarf.StructType); ok && name == "" && st.Kind == "union" && len(st.Field) > 0 && !used[st.Field[0].Name] {
name = st.Field[0].Name
ident[name] = name
ft = st.Field[0].Type
}
}
// TODO: Handle fields that are anonymous structs by
// promoting the fields of the inner struct.
t := c.Type(ft, pos)
tgo := t.Go
size := t.Size
talign := t.Align
if f.BitSize > 0 {
switch f.BitSize {
case 8, 16, 32, 64:
default:
continue
}
size = f.BitSize / 8
name := tgo.(*ast.Ident).String()
if strings.HasPrefix(name, "int") {
name = "int"
} else {
name = "uint"
}
tgo = ast.NewIdent(name + fmt.Sprint(f.BitSize))
talign = size
}
if talign > 0 && f.ByteOffset%talign != 0 {
// Drop misaligned fields, the same way we drop integer bit fields.
// The goal is to make available what can be made available.
// Otherwise one bad and unneeded field in an otherwise okay struct
// makes the whole program not compile. Much of the time these
// structs are in system headers that cannot be corrected.
continue
}
// Round off up to talign, assumed to be a power of 2.
off = (off + talign - 1) &^ (talign - 1)
if f.ByteOffset > off {
fld, sizes = c.pad(fld, sizes, f.ByteOffset-off)
off = f.ByteOffset
}
if f.ByteOffset < off {
// Drop a packed field that we can't represent.
continue
}
n := len(fld)
fld = fld[0 : n+1]
if name == "" {
name = fmt.Sprintf("anon%d", anon)
anon++
ident[name] = name
}
fld[n] = &ast.Field{Names: []*ast.Ident{c.Ident(ident[name])}, Type: tgo}
sizes = sizes[0 : n+1]
sizes[n] = size
off += size
buf.WriteString(t.C.String())
buf.WriteString(" ")
buf.WriteString(name)
buf.WriteString("; ")
if talign > align {
align = talign
}
}
if off < dt.ByteSize {
fld, sizes = c.pad(fld, sizes, dt.ByteSize-off)
off = dt.ByteSize
}
// If the last field in a non-zero-sized struct is zero-sized
// the compiler is going to pad it by one (see issue 9401).
// We can't permit that, because then the size of the Go
// struct will not be the same as the size of the C struct.
// Our only option in such a case is to remove the field,
// which means that it cannot be referenced from Go.
for off > 0 && sizes[len(sizes)-1] == 0 {
n := len(sizes)
fld = fld[0 : n-1]
sizes = sizes[0 : n-1]
}
if off != dt.ByteSize {
fatalf("%s: struct size calculation error off=%d bytesize=%d", lineno(pos), off, dt.ByteSize)
}
buf.WriteString("}")
csyntax = buf.String()
if *godefs {
godefsFields(fld)
}
expr = &ast.StructType{Fields: &ast.FieldList{List: fld}}
return
}
// dwarfHasPointer reports whether the DWARF type dt contains a pointer.
func (c *typeConv) dwarfHasPointer(dt dwarf.Type, pos token.Pos) bool {
switch dt := dt.(type) {
default:
fatalf("%s: unexpected type: %s", lineno(pos), dt)
return false
case *dwarf.AddrType, *dwarf.BoolType, *dwarf.CharType, *dwarf.EnumType,
*dwarf.FloatType, *dwarf.ComplexType, *dwarf.FuncType,
*dwarf.IntType, *dwarf.UcharType, *dwarf.UintType, *dwarf.VoidType:
return false
case *dwarf.ArrayType:
return c.dwarfHasPointer(dt.Type, pos)
case *dwarf.PtrType:
return true
case *dwarf.QualType:
return c.dwarfHasPointer(dt.Type, pos)
case *dwarf.StructType:
for _, f := range dt.Field {
if c.dwarfHasPointer(f.Type, pos) {
return true
}
}
return false
case *dwarf.TypedefType:
if dt.Name == "_GoString_" || dt.Name == "_GoBytes_" {
return true
}
return c.dwarfHasPointer(dt.Type, pos)
}
}
func upper(s string) string {
if s == "" {
return ""
}
r, size := utf8.DecodeRuneInString(s)
if r == '_' {
return "X" + s
}
return string(unicode.ToUpper(r)) + s[size:]
}
// godefsFields rewrites field names for use in Go or C definitions.
// It strips leading common prefixes (like tv_ in tv_sec, tv_usec)
// converts names to upper case, and rewrites _ into Pad_godefs_n,
// so that all fields are exported.
func godefsFields(fld []*ast.Field) {
prefix := fieldPrefix(fld)
npad := 0
for _, f := range fld {
for _, n := range f.Names {
if n.Name != prefix {
n.Name = strings.TrimPrefix(n.Name, prefix)
}
if n.Name == "_" {
// Use exported name instead.
n.Name = "Pad_cgo_" + strconv.Itoa(npad)
npad++
}
n.Name = upper(n.Name)
}
}
}
// fieldPrefix returns the prefix that should be removed from all the
// field names when generating the C or Go code. For generated
// C, we leave the names as is (tv_sec, tv_usec), since that's what
// people are used to seeing in C. For generated Go code, such as
// package syscall's data structures, we drop a common prefix
// (so sec, usec, which will get turned into Sec, Usec for exporting).
func fieldPrefix(fld []*ast.Field) string {
prefix := ""
for _, f := range fld {
for _, n := range f.Names {
// Ignore field names that don't have the prefix we're
// looking for. It is common in C headers to have fields
// named, say, _pad in an otherwise prefixed header.
// If the struct has 3 fields tv_sec, tv_usec, _pad1, then we
// still want to remove the tv_ prefix.
// The check for "orig_" here handles orig_eax in the
// x86 ptrace register sets, which otherwise have all fields
// with reg_ prefixes.
if strings.HasPrefix(n.Name, "orig_") || strings.HasPrefix(n.Name, "_") {
continue
}
i := strings.Index(n.Name, "_")
if i < 0 {
continue
}
if prefix == "" {
prefix = n.Name[:i+1]
} else if prefix != n.Name[:i+1] {
return ""
}
}
}
return prefix
}
// badPointerTypedef reports whether t is a C typedef that should not be considered a pointer in Go.
// A typedef is bad if C code sometimes stores non-pointers in this type.
// TODO: Currently our best solution is to find these manually and list them as
// they come up. A better solution is desired.
func (c *typeConv) badPointerTypedef(dt *dwarf.TypedefType) bool {
if c.badCFType(dt) {
return true
}
if c.badJNI(dt) {
return true
}
if c.badEGLDisplay(dt) {
return true
}
return false
}
// baseBadPointerTypedef reports whether the base of a chain of typedefs is a bad typedef
// as badPointerTypedef reports.
func (c *typeConv) baseBadPointerTypedef(dt *dwarf.TypedefType) bool {
for {
if t, ok := dt.Type.(*dwarf.TypedefType); ok {
dt = t
continue
}
break
}
return c.badPointerTypedef(dt)
}
func (c *typeConv) badCFType(dt *dwarf.TypedefType) bool {
// The real bad types are CFNumberRef and CFDateRef.
// Sometimes non-pointers are stored in these types.
// CFTypeRef is a supertype of those, so it can have bad pointers in it as well.
// We return true for the other *Ref types just so casting between them is easier.
// We identify the correct set of types as those ending in Ref and for which
// there exists a corresponding GetTypeID function.
// See comment below for details about the bad pointers.
if goos != "darwin" {
return false
}
s := dt.Name
if !strings.HasSuffix(s, "Ref") {
return false
}
s = s[:len(s)-3]
if s == "CFType" {
return true
}
if c.getTypeIDs[s] {
return true
}
if i := strings.Index(s, "Mutable"); i >= 0 && c.getTypeIDs[s[:i]+s[i+7:]] {
// Mutable and immutable variants share a type ID.
return true
}
return false
}
// Comment from Darwin's CFInternal.h
/*
// Tagged pointer support
// Low-bit set means tagged object, next 3 bits (currently)
// define the tagged object class, next 4 bits are for type
// information for the specific tagged object class. Thus,
// the low byte is for type info, and the rest of a pointer
// (32 or 64-bit) is for payload, whatever the tagged class.
//
// Note that the specific integers used to identify the
// specific tagged classes can and will change from release
// to release (that's why this stuff is in CF*Internal*.h),
// as can the definition of type info vs payload above.
//
#if __LP64__
#define CF_IS_TAGGED_OBJ(PTR) ((uintptr_t)(PTR) & 0x1)
#define CF_TAGGED_OBJ_TYPE(PTR) ((uintptr_t)(PTR) & 0xF)
#else
#define CF_IS_TAGGED_OBJ(PTR) 0
#define CF_TAGGED_OBJ_TYPE(PTR) 0
#endif
enum {
kCFTaggedObjectID_Invalid = 0,
kCFTaggedObjectID_Atom = (0 << 1) + 1,
kCFTaggedObjectID_Undefined3 = (1 << 1) + 1,
kCFTaggedObjectID_Undefined2 = (2 << 1) + 1,
kCFTaggedObjectID_Integer = (3 << 1) + 1,
kCFTaggedObjectID_DateTS = (4 << 1) + 1,
kCFTaggedObjectID_ManagedObjectID = (5 << 1) + 1, // Core Data
kCFTaggedObjectID_Date = (6 << 1) + 1,
kCFTaggedObjectID_Undefined7 = (7 << 1) + 1,
};
*/
func (c *typeConv) badJNI(dt *dwarf.TypedefType) bool {
// In Dalvik and ART, the jobject type in the JNI interface of the JVM has the
// property that it is sometimes (always?) a small integer instead of a real pointer.
// Note: although only the android JVMs are bad in this respect, we declare the JNI types
// bad regardless of platform, so the same Go code compiles on both android and non-android.
if parent, ok := jniTypes[dt.Name]; ok {
// Try to make sure we're talking about a JNI type, not just some random user's
// type that happens to use the same name.
// C doesn't have the notion of a package, so it's hard to be certain.
// Walk up to jobject, checking each typedef on the way.
w := dt
for parent != "" {
t, ok := w.Type.(*dwarf.TypedefType)
if !ok || t.Name != parent {
return false
}
w = t
parent, ok = jniTypes[w.Name]
if !ok {
return false
}
}
// Check that the typedef is either:
// 1:
// struct _jobject;
// typedef struct _jobject *jobject;
// 2: (in NDK16 in C++)
// class _jobject {};
// typedef _jobject* jobject;
// 3: (in NDK16 in C)
// typedef void* jobject;
if ptr, ok := w.Type.(*dwarf.PtrType); ok {
switch v := ptr.Type.(type) {
case *dwarf.VoidType:
return true
case *dwarf.StructType:
if v.StructName == "_jobject" && len(v.Field) == 0 {
switch v.Kind {
case "struct":
if v.Incomplete {
return true
}
case "class":
if !v.Incomplete {
return true
}
}
}
}
}
}
return false
}
func (c *typeConv) badEGLDisplay(dt *dwarf.TypedefType) bool {
if dt.Name != "EGLDisplay" {
return false
}
// Check that the typedef is "typedef void *EGLDisplay".
if ptr, ok := dt.Type.(*dwarf.PtrType); ok {
if _, ok := ptr.Type.(*dwarf.VoidType); ok {
return true
}
}
return false
}
// jniTypes maps from JNI types that we want to be uintptrs, to the underlying type to which
// they are mapped. The base "jobject" maps to the empty string.
var jniTypes = map[string]string{
"jobject": "",
"jclass": "jobject",
"jthrowable": "jobject",
"jstring": "jobject",
"jarray": "jobject",
"jbooleanArray": "jarray",
"jbyteArray": "jarray",
"jcharArray": "jarray",
"jshortArray": "jarray",
"jintArray": "jarray",
"jlongArray": "jarray",
"jfloatArray": "jarray",
"jdoubleArray": "jarray",
"jobjectArray": "jarray",
"jweak": "jobject",
}
|
[
"\"CC\"",
"\"GCC\""
] |
[] |
[
"GCC",
"CC"
] |
[]
|
["GCC", "CC"]
|
go
| 2 | 0 | |
core/camel-core/src/test/java/org/apache/camel/component/properties/PropertiesComponentDefaultFunctionsTest.java
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.component.properties;
import java.util.Map;
import org.apache.camel.ContextTestSupport;
import org.apache.camel.builder.RouteBuilder;
import org.junit.jupiter.api.Test;
public class PropertiesComponentDefaultFunctionsTest extends ContextTestSupport {
@Override
public boolean isUseRouteBuilder() {
return false;
}
private static Map.Entry<String, String> anyNonEmptyEnvironmentVariable() {
for (Map.Entry<String, String> entry : System.getenv().entrySet()) {
if (entry.getValue() != null && !"".equals(entry.getValue())) {
return entry;
}
}
throw new IllegalStateException();
}
@Test
public void testFunction() throws Exception {
System.setProperty("FOO", "mock:foo");
Map.Entry<String, String> env = anyNonEmptyEnvironmentVariable();
context.addRoutes(new RouteBuilder() {
@Override
public void configure() throws Exception {
from("direct:start").to("{{sys:FOO}}").transform().constant("{{env:" + env.getKey() + "}}").to("mock:bar");
}
});
context.start();
getMockEndpoint("mock:foo").expectedBodiesReceived("Hello World");
getMockEndpoint("mock:bar").expectedBodiesReceived(env.getValue());
template.sendBody("direct:start", "Hello World");
assertMockEndpointsSatisfied();
System.clearProperty("FOO");
}
@Test
public void testFunctionGetOrElse() throws Exception {
System.setProperty("FOO2", "mock:foo");
context.addRoutes(new RouteBuilder() {
@Override
public void configure() throws Exception {
from("direct:start").to("{{sys:FOO2}}").to("{{env:BAR2:mock:bar}}");
}
});
context.start();
getMockEndpoint("mock:foo").expectedMessageCount(1);
getMockEndpoint("mock:bar").expectedMessageCount(1);
template.sendBody("direct:start", "Hello World");
assertMockEndpointsSatisfied();
System.clearProperty("FOO2");
}
}
|
[] |
[] |
[] |
[]
|
[]
|
java
| 0 | 0 | |
main.go
|
package main
import (
"os"
"github.com/TinyKitten/sugoibot/bot"
"github.com/TinyKitten/sugoibot/env"
)
func main() {
env.LoadEnv()
slackToken := os.Getenv("SLACK_USER_TOKEN")
slackbot := bot.NewBot(slackToken)
slackbot.StartListenMessage()
}
|
[
"\"SLACK_USER_TOKEN\""
] |
[] |
[
"SLACK_USER_TOKEN"
] |
[]
|
["SLACK_USER_TOKEN"]
|
go
| 1 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.