filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
Tests/test_MSAProbs_tool.py | # Copyright 2013 by Christian Brueffer. All rights reserved.
#
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Tests for MSAProbs tool."""
import os
import sys
import unittest
from Bio import AlignIO
from Bio import MissingExternalDependencyError
from Bio import SeqIO
from Bio.Align.Applications import MSAProbsCommandline
from Bio.Application import ApplicationError
from subprocess import getoutput
#################################################################
# Try to avoid problems when the OS is in another language
os.environ["LANG"] = "C"
msaprobs_exe = None
try:
output = getoutput("msaprobs -version")
if output.startswith("MSAPROBS version"):
msaprobs_exe = "msaprobs"
except FileNotFoundError:
pass
if not msaprobs_exe:
raise MissingExternalDependencyError(
"Install msaprobs if you want to use MSAProbs from Biopython."
)
class MSAProbsTestCase(unittest.TestCase):
def setUp(self):
self.files_to_clean = set()
def tearDown(self):
for filename in self.files_to_clean:
if os.path.isfile(filename):
os.remove(filename)
def standard_test_procedure(self, cline):
"""Shared testing procedure used by all tests."""
# Mark output files for later cleanup.
self.add_file_to_clean(cline.outfile)
input_records = SeqIO.to_dict(SeqIO.parse(cline.infile, "fasta"))
self.assertEqual(str(eval(repr(cline))), str(cline))
output, error = cline()
def add_file_to_clean(self, filename):
"""Add a file for deferred removal by the tearDown routine."""
self.files_to_clean.add(filename)
#################################################################
class MSAProbsTestErrorConditions(MSAProbsTestCase):
def test_empty_file(self):
"""Test an empty file."""
input_file = "does_not_exist.fasta"
self.assertFalse(os.path.isfile(input_file))
cline = MSAProbsCommandline(msaprobs_exe, infile=input_file)
try:
stdout, stderr = cline()
except ApplicationError as err:
self.assertTrue(
"Cannot open sequence file" in str(err)
or "Cannot open input file" in str(err)
or "Non-zero return code " in str(err),
str(err),
)
else:
self.fail(f"Should have failed, returned:\n{stdout}\n{stderr}")
def test_single_sequence(self):
"""Test an input file containing a single sequence."""
input_file = "Fasta/f001"
self.assertTrue(os.path.isfile(input_file))
self.assertEqual(len(list(SeqIO.parse(input_file, "fasta"))), 1)
cline = MSAProbsCommandline(msaprobs_exe, infile=input_file)
try:
stdout, stderr = cline()
except ApplicationError as err:
if sys.platform == "win32":
expected = 0xC0000005
elif sys.platform == "darwin":
expected = -11
else:
expected = 139 # TODO: Check return codes on various other platforms
self.assertEqual(expected, err.returncode)
else:
self.fail(f"Should have failed, returned:\n{stdout}\n{stderr}")
def test_invalid_format(self):
"""Test an input file in an invalid format."""
input_file = "Medline/pubmed_result1.txt"
self.assertTrue(os.path.isfile(input_file))
cline = MSAProbsCommandline(msaprobs_exe, infile=input_file)
try:
stdout, stderr = cline()
except ApplicationError as err:
self.assertEqual(err.returncode, 1)
else:
self.fail(f"Should have failed, returned:\n{stdout}\n{stderr}")
#################################################################
class MSAProbsTestNormalConditions(MSAProbsTestCase):
def test_simple_fasta(self):
"""Test a simple fasta file."""
input_file = "Registry/seqs.fasta"
output_file = "temp_test.aln"
cline = MSAProbsCommandline(
msaprobs_exe, infile=input_file, outfile=output_file, clustalw=True
)
self.standard_test_procedure(cline)
def test_properties(self):
"""Test setting options via properties."""
input_file = "Registry/seqs.fasta"
output_file = "temp_test.aln"
cline = MSAProbsCommandline(msaprobs_exe)
cline.infile = input_file
cline.outfile = output_file
cline.clustalw = True
self.standard_test_procedure(cline)
def test_input_filename_with_space(self):
"""Test an input filename containing a space."""
input_file = "Clustalw/temp horses.fasta"
with open(input_file, "w") as handle:
SeqIO.write(SeqIO.parse("Phylip/hennigian.phy", "phylip"), handle, "fasta")
output_file = "temp_test.aln"
cline = MSAProbsCommandline(
msaprobs_exe, infile=input_file, outfile=output_file, clustalw=True
)
self.add_file_to_clean(input_file)
self.standard_test_procedure(cline)
def test_output_filename_with_spaces(self):
"""Test an output filename containing spaces."""
input_file = "Registry/seqs.fasta"
output_file = "temp with spaces.aln"
cline = MSAProbsCommandline(
msaprobs_exe, infile=input_file, outfile=output_file, clustalw=True
)
self.standard_test_procedure(cline)
if __name__ == "__main__":
runner = unittest.TextTestRunner(verbosity=2)
unittest.main(testRunner=runner)
| []
| []
| [
"LANG"
]
| [] | ["LANG"] | python | 1 | 0 | |
configuration-service/restapi/configure_configuration_service.go | // This file is safe to edit. Once it exists it will not be overwritten
package restapi
import (
"crypto/tls"
"fmt"
keptncommon "github.com/keptn/go-utils/pkg/lib/keptn"
"io/ioutil"
"net/http"
"os"
"os/exec"
"strings"
keptnapi "github.com/keptn/go-utils/pkg/api/utils"
"github.com/keptn/keptn/configuration-service/restapi/operations/event"
"github.com/keptn/keptn/configuration-service/restapi/operations/remediation"
"github.com/keptn/keptn/configuration-service/restapi/operations/services"
errors "github.com/go-openapi/errors"
runtime "github.com/go-openapi/runtime"
handlers "github.com/keptn/keptn/configuration-service/handlers"
"github.com/keptn/keptn/configuration-service/restapi/operations"
"github.com/keptn/keptn/configuration-service/restapi/operations/project"
"github.com/keptn/keptn/configuration-service/restapi/operations/project_resource"
"github.com/keptn/keptn/configuration-service/restapi/operations/service"
"github.com/keptn/keptn/configuration-service/restapi/operations/service_default_resource"
"github.com/keptn/keptn/configuration-service/restapi/operations/service_resource"
"github.com/keptn/keptn/configuration-service/restapi/operations/stage"
"github.com/keptn/keptn/configuration-service/restapi/operations/stage_resource"
)
//go:generate swagger generate server --target ../../configuration-service --name ConfigurationService --spec ../swagger.yaml
func configureFlags(api *operations.ConfigurationServiceAPI) {
// api.CommandLineOptionsGroups = []swag.CommandLineOptionsGroup{ ... }
}
func configureAPI(api *operations.ConfigurationServiceAPI) http.Handler {
// configure the api here
api.ServeError = errors.ServeError
// Set your custom logger if needed. Default one is log.Printf
// Expected interface func(string, ...interface{})
//
// Example:
// api.Logger = log.Printf
api.JSONConsumer = runtime.JSONConsumer()
api.JSONProducer = runtime.JSONProducer()
api.ProjectDeleteProjectProjectNameHandler = project.DeleteProjectProjectNameHandlerFunc(handlers.DeleteProjectProjectNameHandlerFunc)
api.ProjectResourceDeleteProjectProjectNameResourceResourceURIHandler = project_resource.DeleteProjectProjectNameResourceResourceURIHandlerFunc(handlers.DeleteProjectProjectNameResourceResourceURIHandlerFunc)
api.ServiceDefaultResourceDeleteProjectProjectNameServiceServiceNameResourceResourceURIHandler = service_default_resource.DeleteProjectProjectNameServiceServiceNameResourceResourceURIHandlerFunc(handlers.DeleteProjectProjectNameServiceServiceNameResourceResourceURIHandlerFunc)
api.StageDeleteProjectProjectNameStageStageNameHandler = stage.DeleteProjectProjectNameStageStageNameHandlerFunc(handlers.DeleteProjectProjectNameStageStageNameHandlerFunc)
api.StageResourceDeleteProjectProjectNameStageStageNameResourceResourceURIHandler = stage_resource.DeleteProjectProjectNameStageStageNameResourceResourceURIHandlerFunc(handlers.DeleteProjectProjectNameStageStageNameResourceResourceURIHandlerFunc)
api.ServiceDeleteProjectProjectNameStageStageNameServiceServiceNameHandler = service.DeleteProjectProjectNameStageStageNameServiceServiceNameHandlerFunc(handlers.DeleteProjectProjectNameStageStageNameServiceServiceNameHandlerFunc)
api.ServiceResourceDeleteProjectProjectNameStageStageNameServiceServiceNameResourceResourceURIHandler = service_resource.DeleteProjectProjectNameStageStageNameServiceServiceNameResourceResourceURIHandlerFunc(handlers.DeleteProjectProjectNameStageStageNameServiceServiceNameResourceResourceURIHandlerFunc)
api.ProjectGetProjectHandler = project.GetProjectHandlerFunc(handlers.GetProjectHandlerFunc)
api.ProjectGetProjectProjectNameHandler = project.GetProjectProjectNameHandlerFunc(handlers.GetProjectProjectNameHandlerFunc)
api.ProjectResourceGetProjectProjectNameResourceHandler = project_resource.GetProjectProjectNameResourceHandlerFunc(handlers.GetProjectProjectNameResourceHandlerFunc)
api.ProjectResourceGetProjectProjectNameResourceResourceURIHandler = project_resource.GetProjectProjectNameResourceResourceURIHandlerFunc(handlers.GetProjectProjectNameResourceResourceURIHandlerFunc)
api.ServiceDefaultResourceGetProjectProjectNameServiceServiceNameResourceHandler = service_default_resource.GetProjectProjectNameServiceServiceNameResourceHandlerFunc(handlers.GetProjectProjectNameServiceServiceNameResourceHandlerFunc)
api.ServiceDefaultResourceGetProjectProjectNameServiceServiceNameResourceResourceURIHandler = service_default_resource.GetProjectProjectNameServiceServiceNameResourceResourceURIHandlerFunc(handlers.GetProjectProjectNameServiceServiceNameResourceResourceURIHandlerFunc)
api.StageGetProjectProjectNameStageHandler = stage.GetProjectProjectNameStageHandlerFunc(handlers.GetProjectProjectNameStageHandlerFunc)
api.StageGetProjectProjectNameStageStageNameHandler = stage.GetProjectProjectNameStageStageNameHandlerFunc(handlers.GetProjectProjectNameStageStageNameHandlerFunc)
api.StageResourceGetProjectProjectNameStageStageNameResourceHandler = stage_resource.GetProjectProjectNameStageStageNameResourceHandlerFunc(handlers.GetProjectProjectNameStageStageNameResourceHandlerFunc)
api.StageResourceGetProjectProjectNameStageStageNameResourceResourceURIHandler = stage_resource.GetProjectProjectNameStageStageNameResourceResourceURIHandlerFunc(handlers.GetProjectProjectNameStageStageNameResourceResourceURIHandlerFunc)
api.ServiceGetProjectProjectNameStageStageNameServiceHandler = service.GetProjectProjectNameStageStageNameServiceHandlerFunc(handlers.GetProjectProjectNameStageStageNameServiceHandlerFunc)
api.ServiceGetProjectProjectNameStageStageNameServiceServiceNameHandler = service.GetProjectProjectNameStageStageNameServiceServiceNameHandlerFunc(handlers.GetProjectProjectNameStageStageNameServiceServiceNameHandlerFunc)
api.ServiceResourceGetProjectProjectNameStageStageNameServiceServiceNameResourceHandler = service_resource.GetProjectProjectNameStageStageNameServiceServiceNameResourceHandlerFunc(handlers.GetProjectProjectNameStageStageNameServiceServiceNameResourceHandlerFunc)
api.ServiceResourceGetProjectProjectNameStageStageNameServiceServiceNameResourceResourceURIHandler = service_resource.GetProjectProjectNameStageStageNameServiceServiceNameResourceResourceURIHandlerFunc(handlers.GetProjectProjectNameStageStageNameServiceServiceNameResourceResourceURIHandlerFunc)
api.ProjectPostProjectHandler = project.PostProjectHandlerFunc(handlers.PostProjectHandlerFunc)
api.ProjectResourcePostProjectProjectNameResourceHandler = project_resource.PostProjectProjectNameResourceHandlerFunc(handlers.PostProjectProjectNameResourceHandlerFunc)
api.ServiceDefaultResourcePostProjectProjectNameServiceServiceNameResourceHandler = service_default_resource.PostProjectProjectNameServiceServiceNameResourceHandlerFunc(handlers.PostProjectProjectNameServiceServiceNameResourceHandlerFunc)
api.StagePostProjectProjectNameStageHandler = stage.PostProjectProjectNameStageHandlerFunc(handlers.PostProjectProjectNameStageHandlerFunc)
api.StageResourcePostProjectProjectNameStageStageNameResourceHandler = stage_resource.PostProjectProjectNameStageStageNameResourceHandlerFunc(handlers.PostProjectProjectNameStageStageNameResourceHandlerFunc)
api.ServicePostProjectProjectNameStageStageNameServiceHandler = service.PostProjectProjectNameStageStageNameServiceHandlerFunc(handlers.PostProjectProjectNameStageStageNameServiceHandlerFunc)
api.ServiceResourcePostProjectProjectNameStageStageNameServiceServiceNameResourceHandler = service_resource.PostProjectProjectNameStageStageNameServiceServiceNameResourceHandlerFunc(handlers.PostProjectProjectNameStageStageNameServiceServiceNameResourceHandlerFunc)
api.ProjectPutProjectProjectNameHandler = project.PutProjectProjectNameHandlerFunc(handlers.PutProjectProjectNameHandlerFunc)
api.ProjectResourcePutProjectProjectNameResourceHandler = project_resource.PutProjectProjectNameResourceHandlerFunc(handlers.PutProjectProjectNameResourceHandlerFunc)
api.ProjectResourcePutProjectProjectNameResourceResourceURIHandler = project_resource.PutProjectProjectNameResourceResourceURIHandlerFunc(handlers.PutProjectProjectNameResourceResourceURIHandlerFunc)
api.ServiceDefaultResourcePutProjectProjectNameServiceServiceNameResourceHandler = service_default_resource.PutProjectProjectNameServiceServiceNameResourceHandlerFunc(handlers.PutProjectProjectNameServiceServiceNameResourceHandlerFunc)
api.ServiceDefaultResourcePutProjectProjectNameServiceServiceNameResourceResourceURIHandler = service_default_resource.PutProjectProjectNameServiceServiceNameResourceResourceURIHandlerFunc(handlers.PutProjectProjectNameServiceServiceNameResourceResourceURIHandlerFunc)
api.StagePutProjectProjectNameStageStageNameHandler = stage.PutProjectProjectNameStageStageNameHandlerFunc(handlers.PutProjectProjectNameStageStageNameHandlerFunc)
api.StageResourcePutProjectProjectNameStageStageNameResourceHandler = stage_resource.PutProjectProjectNameStageStageNameResourceHandlerFunc(handlers.PutProjectProjectNameStageStageNameResourceHandlerFunc)
api.StageResourcePutProjectProjectNameStageStageNameResourceResourceURIHandler = stage_resource.PutProjectProjectNameStageStageNameResourceResourceURIHandlerFunc(handlers.PutProjectProjectNameStageStageNameResourceResourceURIHandlerFunc)
api.ServicePutProjectProjectNameStageStageNameServiceServiceNameHandler = service.PutProjectProjectNameStageStageNameServiceServiceNameHandlerFunc(handlers.PutProjectProjectNameStageStageNameServiceServiceNameHandlerFunc)
api.ServiceResourcePutProjectProjectNameStageStageNameServiceServiceNameResourceHandler = service_resource.PutProjectProjectNameStageStageNameServiceServiceNameResourceHandlerFunc(handlers.PutProjectProjectNameStageStageNameServiceServiceNameResourceHandlerFunc)
api.ServiceResourcePutProjectProjectNameStageStageNameServiceServiceNameResourceResourceURIHandler = service_resource.PutProjectProjectNameStageStageNameServiceServiceNameResourceResourceURIHandlerFunc(handlers.PutProjectProjectNameStageStageNameServiceServiceNameResourceResourceURIHandlerFunc)
api.EventHandleEventHandler = event.HandleEventHandlerFunc(handlers.HandleEventHandlerFunc)
api.ServicesGetServicesHandler = services.GetServicesHandlerFunc(handlers.GetServices)
api.ServicesGetServiceHandler = services.GetServiceHandlerFunc(handlers.GetService)
api.RemediationCreateRemediationHandler = remediation.CreateRemediationHandlerFunc(handlers.CreateRemediation)
api.RemediationGetRemediationsHandler = remediation.GetRemediationsHandlerFunc(handlers.GetRemediations)
api.RemediationGetRemediationsForContextHandler = remediation.GetRemediationsForContextHandlerFunc(handlers.GetRemediationsForContext)
api.RemediationCloseRemediationsHandler = remediation.CloseRemediationsHandlerFunc(handlers.CloseRemediations)
api.ServerShutdown = func() {}
return setupGlobalMiddleware(api.Serve(setupMiddlewares))
}
// The TLS configuration before HTTPS server starts.
func configureTLS(tlsConfig *tls.Config) {
// Make all necessary changes to the TLS configuration here.
}
// As soon as server is initialized but not run yet, this function will be called.
// If you need to modify a config, store server instance to stop it individually later, this is the place.
// This function can be called multiple times, depending on the number of serving schemes.
// scheme value will be set accordingly: "http", "https" or "unix"
func configureServer(s *http.Server, scheme, addr string) {
logger := keptncommon.NewLogger("", "", "configuration-service")
if os.Getenv("env") == "production" {
///////// initialize git ////////////
logger.Debug("Configuring git user.email")
cmd := exec.Command("git", "config", "--global", "user.email", "[email protected]")
_, err := cmd.Output()
if err != nil {
logger.Error("Could not configure git user.email: " + err.Error())
}
logger.Debug("Configuring git user.name")
cmd = exec.Command("git", "config", "--global", "user.name", "keptn")
_, err = cmd.Output()
if err != nil {
logger.Error("Could not configure git user.name: " + err.Error())
}
////////////////////////////////////
}
}
// The middleware configuration is for the handler executors. These do not apply to the swagger.json document.
// The middleware executes after routing but before authentication, binding and validation
func setupMiddlewares(handler http.Handler) http.Handler {
return handler
}
// The middleware configuration happens before anything, this middleware also applies to serving the swagger.json document.
// So this is a good place to plug in a panic handling middleware, logging and metrics
func setupGlobalMiddleware(handler http.Handler) http.Handler {
prefixPath := os.Getenv("PREFIX_PATH")
if len(prefixPath) > 0 {
// Set the prefix-path in the swagger.yaml
input, err := ioutil.ReadFile("swagger-ui/swagger.yaml")
if err == nil {
editedSwagger := strings.Replace(string(input), "basePath: /api/configuration-service/v1",
"basePath: "+prefixPath+"/api/configuration-service/v1", -1)
err = ioutil.WriteFile("swagger-ui/swagger.yaml", []byte(editedSwagger), 0644)
if err != nil {
fmt.Println("Failed to write edited swagger.yaml")
}
} else {
fmt.Println("Failed to set basePath in swagger.yaml")
}
}
go keptnapi.RunHealthEndpoint("10999")
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// Serving ./swagger-ui/
if strings.Index(r.URL.Path, "/swagger-ui/") == 0 {
http.StripPrefix("/swagger-ui/", http.FileServer(http.Dir("swagger-ui"))).ServeHTTP(w, r)
return
}
handler.ServeHTTP(w, r)
})
}
| [
"\"env\"",
"\"PREFIX_PATH\""
]
| []
| [
"PREFIX_PATH",
"env"
]
| [] | ["PREFIX_PATH", "env"] | go | 2 | 0 | |
handlers/doctor.go | package handlers
import (
"os"
"github.com/apex/log"
"github.com/metrue/fx/constants"
"github.com/metrue/fx/context"
"github.com/metrue/fx/doctor"
)
// Doctor command handle
func Doctor(ctx context.Contexter) error {
host := os.Getenv("DOCKER_REMOTE_HOST_ADDR")
user := os.Getenv("DOCKER_REMOTE_HOST_USER")
password := os.Getenv("DOCKER_REMOTE_HOST_PASSWORD")
if host == "" {
host = "localhost"
}
if err := doctor.New(host, user, password).Start(); err != nil {
log.Warnf("machine %s is in dirty state: %v", host, err)
} else {
log.Infof("machine %s is in healthy state: %s", host, constants.CheckedSymbol)
}
return nil
}
| [
"\"DOCKER_REMOTE_HOST_ADDR\"",
"\"DOCKER_REMOTE_HOST_USER\"",
"\"DOCKER_REMOTE_HOST_PASSWORD\""
]
| []
| [
"DOCKER_REMOTE_HOST_USER",
"DOCKER_REMOTE_HOST_ADDR",
"DOCKER_REMOTE_HOST_PASSWORD"
]
| [] | ["DOCKER_REMOTE_HOST_USER", "DOCKER_REMOTE_HOST_ADDR", "DOCKER_REMOTE_HOST_PASSWORD"] | go | 3 | 0 | |
traffic_ops/traffic_ops_golang/swaggerdocs/v13/asns.go | package v13
import "github.com/apache/trafficcontrol/lib/go-tc"
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
// ASNs - ASNsResponse to get the "response" top level key
// swagger:response ASNs
// in: body
type ASNs struct {
// ASN Response Body
// in: body
ASNsResponse tc.ASNsResponse `json:"response"`
}
// ASN - ASNResponse to get the "response" top level key
// swagger:response ASN
// in: body
type ASN struct {
// ASN Response Body
// in: body
ASNResponse tc.ASNResponse
}
// ASNQueryParams
//
// swagger:parameters GetASNs
type ASNQueryParams struct {
// ASNsQueryParams
// Autonomous System Numbers per APNIC for identifying a service provider
//
asn string `json:"asn"`
// Related cachegroup name
//
Cachegroup string `json:"cachegroup"`
// Related cachegroup id
//
CachegroupID string `json:"cachegroupId"`
// Unique identifier for the CDN
//
ID string `json:"id"`
//
//
Orderby string `json:"orderby"`
}
// swagger:parameters PostASN
type ASNPostParam struct {
// ASN Request Body
//
// in: body
// required: true
ASN tc.ASN
}
// swagger:parameters GetASNById DeleteASN
type ASNPathParams struct {
// Id associated to the ASN
// in: path
ID int `json:"id"`
}
// PostASN swagger:route POST /asns ASN PostASN
//
// Create a ASN
//
// An Autonomous System Number
//
// Responses:
// 200: Alerts
func PostASN(entity ASNPostParam) (ASN, Alerts) {
return ASN{}, Alerts{}
}
// GetASNs swagger:route GET /asns ASN GetASNs
//
// Retrieve a list of ASNs
//
// A list of ASNs
//
// Responses:
// 200: ASNs
// 400: Alerts
func GetASNs() (ASNs, Alerts) {
return ASNs{}, Alerts{}
}
// swagger:parameters PutASN
type ASNPutParam struct {
// ID
// in: path
ID int `json:"id"`
// ASN Request Body
//
// in: body
// required: true
ASN tc.ASN
}
// PutASN swagger:route PUT /asns/{id} ASN PutASN
//
// Update an ASN by Id
//
// Update an ASN
//
// Responses:
// 200: ASN
func PutASN(entity ASNPutParam) (ASN, Alerts) {
return ASN{}, Alerts{}
}
// GetASNById swagger:route GET /asns/{id} ASN GetASNById
//
// Retrieve a specific ASN by Id
//
// Retrieve an ASN
//
// Responses:
// 200: ASNs
// 400: Alerts
func GetASNById() (ASNs, Alerts) {
return ASNs{}, Alerts{}
}
// DeleteASN swagger:route DELETE /asns/{id} ASN DeleteASN
//
// Delete an ASN by Id
//
// Delete an ASN
//
// Responses:
// 200: Alerts
func DeleteASN(entityId int) Alerts {
return Alerts{}
}
| []
| []
| []
| [] | [] | go | null | null | null |
main.go | package main
import (
"acad-be/router"
"fmt"
"log"
"net/http"
"os"
)
func determineListenAddress() (string, error) {
port := os.Getenv("PORT")
if port == "" {
return "", fmt.Errorf("$PORT not set")
}
return ":" + port, nil
}
func main() {
r := router.Router()
// credentials := handlers.AllowCredentials()
// methods := handlers.AllowedMethods([]string{"POST", "GET"})
// origins := handlers.AllowedOrigins([]string{"*"})
addr, err := determineListenAddress()
if err != nil {
log.Fatal(err)
}
fmt.Println("Starting server on the port " + addr)
log.Fatal(http.ListenAndServe(addr, r))
} | [
"\"PORT\""
]
| []
| [
"PORT"
]
| [] | ["PORT"] | go | 1 | 0 | |
pkg/kmm/cmd/etcdCerts.go | package cmd
import (
"fmt"
"os"
"strings"
log "github.com/Sirupsen/logrus"
"github.com/UKHomeOffice/keto-k8/pkg/etcd"
"github.com/spf13/cobra"
)
// EtcdCertsCmd represents the command for generating etcd certs
var EtcdCertsCmd = &cobra.Command{
Use: EtcdCertsCmdName,
Short: "Will generate etcd certs",
Long: "Will generate etcd server, peer and client certs from a specified ca",
Run: func(c *cobra.Command, args []string) {
cfg, err := getConfig(c)
if err == nil {
err = etcd.GenCerts(cfg)
}
if err != nil {
log.Fatal(err)
os.Exit(-1)
}
},
}
func init() {
EtcdCertsCmd.Flags().String(
"etcd-server-cert",
getDefaultFromEnvs([]string{"KMM_ETCD_SERVER_CERT", "ETCD_CERT_FILE"}, ""),
"ETCD server cert file (defaults: KMM_ETCD_SERVER_CERT / ETCD_CERT_FILE)")
EtcdCertsCmd.Flags().String(
"etcd-server-key",
getDefaultFromEnvs([]string{"KMM_ETCD_SERVER_KEY", "ETCD_KEY_FILE"}, ""),
"ETCD server key file (defaults: KMM_ETCD_SERVER_KEY, ETCD_KEY_FILE)")
EtcdCertsCmd.Flags().String(
"etcd-peer-cert",
getDefaultFromEnvs([]string{"KMM_ETCD_PEER_CERT", "ETCD_PEER_CERT_FILE"}, ""),
"ETCD peer cert file (defaults: KMM_ETCD_PEER_CERT, ETCD_PEER_CERT_FILE)")
EtcdCertsCmd.Flags().String(
"etcd-peer-key",
getDefaultFromEnvs([]string{"KMM_ETCD_PEER_KEY", "ETCD_PEER_KEY_FILE"}, ""),
"ETCD peer key file (defaults: KMM_ETCD_PEER_KEY, ETCD_PEER_KEY_FILE)")
EtcdCertsCmd.Flags().String(
"etcd-local-hostnames",
getDefaultFromEnvs([]string{"KMM_ETCD_LOCAL_HOSTNAMES"}, ""),
"ETCD hostnames (defaults: KMM_ETCD_LOCAL_HOSTNAMES or parsed from ETCD_ADVERTISE_CLIENT_URLS)")
RootCmd.AddCommand(EtcdCertsCmd)
}
// GetEtcdHostNames will get the hosts names from command flags and environment variables and a minimal defaults
func GetEtcdHostNames(cmd *cobra.Command, minimalDefaultHosts []string) ([]string, error) {
var err error
etcdClusterHostnames := strings.Split(cmd.Flag("etcd-cluster-hostnames").Value.String(), ",")
if len(etcdClusterHostnames) - 1 == 0 {
var etcdClusterUrls string
if etcdClusterUrls, err = GetUrlsFromInitialClusterString(os.Getenv("ETCD_INITIAL_CLUSTER")); err != nil {
return []string{}, err
}
if etcdClusterHostnames, err = GetHostNamesFromUrls(etcdClusterUrls, minimalDefaultHosts); err != nil {
return []string{}, err
}
}
return etcdClusterHostnames, nil
}
// Must validate flags and return valid configuration
func getConfig(cmd *cobra.Command) (etcd.ServerConfig, error) {
var err error
var cfg etcd.ServerConfig
minimalDefaultHosts := []string{"localhost", "127.0.0.1"}
etcdLocalHostnames := strings.Split(cmd.Flag("etcd-local-hostnames").Value.String(), ",")
if len(etcdLocalHostnames[0]) == 0 {
if etcdLocalHostnames, err = GetHostNamesFromEnvUrls("ETCD_ADVERTISE_CLIENT_URLS", minimalDefaultHosts); err != nil {
return cfg, err
}
}
var etcdClusterHostnames []string
if etcdClusterHostnames, err = GetEtcdHostNames(cmd, minimalDefaultHosts); err != nil {
return cfg, err
}
clientCfg, err := getEtcdClientConfig(cmd)
if err != nil {
return cfg, err
}
cfg = etcd.ServerConfig{
CaKeyFileName: cmd.Flag("etcd-ca-key").Value.String(),
ServerCertFileName: cmd.Flag("etcd-server-cert").Value.String(),
ServerKeyFileName: cmd.Flag("etcd-server-key").Value.String(),
PeerCertFileName: cmd.Flag("etcd-peer-cert").Value.String(),
PeerKeyFileName: cmd.Flag("etcd-peer-key").Value.String(),
LocalHostNames: etcdLocalHostnames,
ClusterHostNames: etcdClusterHostnames,
ClientConfig: clientCfg,
}
if len(cfg.CaKeyFileName) == 0 {
return cfg, fmt.Errorf("Missing ETCD CA key, required for generating certs")
}
if len(cfg.ClientConfig.CaFileName) == 0 {
return cfg, fmt.Errorf("Missing ETCD CA cert, required for generating certs")
}
if len(cfg.ServerCertFileName) == 0 {
return cfg, fmt.Errorf("Missing ETCD Server cert file name")
}
if len(cfg.PeerCertFileName) == 0 {
return cfg, fmt.Errorf("Missing ETCD Peer cert file name")
}
if len(cfg.PeerKeyFileName) == 0 {
return cfg, fmt.Errorf("Missing ETCD Peer key file name")
}
if len(cfg.LocalHostNames) == 0 {
return cfg, fmt.Errorf("Missing --etcd-local-hostnames option or ETCD_ADVERTISE_CLIENT_URLS")
}
if len(cfg.ClusterHostNames) == 0 {
return cfg, fmt.Errorf("Missing --etcd-cluster-hostnames option or ETCD_INITIAL_CLUSTER")
}
return cfg, nil
}
| [
"\"ETCD_INITIAL_CLUSTER\""
]
| []
| [
"ETCD_INITIAL_CLUSTER"
]
| [] | ["ETCD_INITIAL_CLUSTER"] | go | 1 | 0 | |
desktop/core/ext-py/pysaml2-4.4.0/src/saml2/mdbcache.py | #!/usr/bin/env python
import logging
from pymongo.mongo_client import MongoClient
__author__ = 'rolandh'
#import cjson
import time
from datetime import datetime
from saml2 import time_util
from saml2.cache import ToOld
from saml2.time_util import TIME_FORMAT
logger = logging.getLogger(__name__)
class Cache(object):
def __init__(self, server=None, debug=0, db=None):
if server:
connection = MongoClient(server)
else:
connection = MongoClient()
if db:
self._db = connection[db]
else:
self._db = connection.pysaml2
self._cache = self._db.collection
self.debug = debug
def delete(self, subject_id):
self._cache.remove({"subject_id": subject_id})
def get_identity(self, subject_id, entities=None,
check_not_on_or_after=True):
""" Get all the identity information that has been received and
are still valid about the subject.
:param subject_id: The identifier of the subject
:param entities: The identifiers of the entities whoes assertions are
interesting. If the list is empty all entities are interesting.
:return: A 2-tuple consisting of the identity information (a
dictionary of attributes and values) and the list of entities
whoes information has timed out.
"""
res = {}
oldees = []
if not entities:
for item in self._cache.find({"subject_id": subject_id}):
try:
info = self._get_info(item, check_not_on_or_after)
except ToOld:
oldees.append(item["entity_id"])
continue
for key, vals in info["ava"].items():
try:
tmp = set(res[key]).union(set(vals))
res[key] = list(tmp)
except KeyError:
res[key] = vals
else:
for entity_id in entities:
try:
info = self.get(subject_id, entity_id,
check_not_on_or_after)
except ToOld:
oldees.append(entity_id)
continue
for key, vals in info["ava"].items():
try:
tmp = set(res[key]).union(set(vals))
res[key] = list(tmp)
except KeyError:
res[key] = vals
return res, oldees
def _get_info(self, item, check_not_on_or_after=True):
""" Get session information about a subject gotten from a
specified IdP/AA.
:param item: Information stored
:return: The session information as a dictionary
"""
timestamp = item["timestamp"]
if check_not_on_or_after and not time_util.not_on_or_after(timestamp):
raise ToOld()
try:
return item["info"]
except KeyError:
return None
def get(self, subject_id, entity_id, check_not_on_or_after=True):
res = self._cache.find_one({"subject_id": subject_id,
"entity_id": entity_id})
if not res:
return {}
else:
return self._get_info(res, check_not_on_or_after)
def set(self, subject_id, entity_id, info, timestamp=0):
""" Stores session information in the cache. Assumes that the subject_id
is unique within the context of the Service Provider.
:param subject_id: The subject identifier
:param entity_id: The identifier of the entity_id/receiver of an
assertion
:param info: The session info, the assertion is part of this
:param timestamp: A time after which the assertion is not valid.
"""
if isinstance(timestamp, datetime) or isinstance(timestamp,
time.struct_time):
timestamp = time.strftime(TIME_FORMAT, timestamp)
doc = {"subject_id": subject_id,
"entity_id": entity_id,
"info": info,
"timestamp": timestamp}
_ = self._cache.insert(doc)
def reset(self, subject_id, entity_id):
""" Scrap the assertions received from a IdP or an AA about a special
subject.
:param subject_id: The subjects identifier
:param entity_id: The identifier of the entity_id of the assertion
:return:
"""
self._cache.update({"subject_id": subject_id, "entity_id": entity_id},
{"$set": {"info": {}, "timestamp": 0}})
def entities(self, subject_id):
""" Returns all the entities of assertions for a subject, disregarding
whether the assertion still is valid or not.
:param subject_id: The identifier of the subject
:return: A possibly empty list of entity identifiers
"""
try:
return [i["entity_id"] for i in self._cache.find({"subject_id":
subject_id})]
except ValueError:
return []
def receivers(self, subject_id):
""" Another name for entities() just to make it more logic in the IdP
scenario """
return self.entities(subject_id)
def active(self, subject_id, entity_id):
""" Returns the status of assertions from a specific entity_id.
:param subject_id: The ID of the subject
:param entity_id: The entity ID of the entity_id of the assertion
:return: True or False depending on if the assertion is still
valid or not.
"""
item = self._cache.find_one({"subject_id": subject_id,
"entity_id": entity_id})
try:
return time_util.not_on_or_after(item["timestamp"])
except ToOld:
return False
def subjects(self):
""" Return identifiers for all the subjects that are in the cache.
:return: list of subject identifiers
"""
subj = [i["subject_id"] for i in self._cache.find()]
return list(set(subj))
def update(self, subject_id, entity_id, ava):
""" """
item = self._cache.find_one({"subject_id": subject_id,
"entity_id": entity_id})
info = item["info"]
info["ava"].update(ava)
self._cache.update({"subject_id": subject_id, "entity_id": entity_id},
{"$set": {"info": info}})
def valid_to(self, subject_id, entity_id, newtime):
""" """
self._cache.update({"subject_id": subject_id, "entity_id": entity_id},
{"$set": {"timestamp": newtime}})
def clear(self):
self._cache.remove() | []
| []
| []
| [] | [] | python | null | null | null |
code/cnn.py | '''
#testing
#testingmarlijn123
#testingstefan123
TU/e BME Project Imaging 2021
Convolutional neural network for PCAM
Author: Suzanne Wetstein
'''
# disable overly verbose tensorflow logging
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # or any {'0', '1', '2'}
import tensorflow as tf
import numpy as np
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Flatten
from tensorflow.keras.layers import Conv2D, MaxPool2D
from tensorflow.keras.optimizers import SGD
from tensorflow.keras.callbacks import ModelCheckpoint, TensorBoard
# unused for now, to be used for ROC analysis
from sklearn.metrics import roc_curve, auc
# the size of the images in the PCAM dataset
IMAGE_SIZE = 96
def get_pcam_generators(base_dir, train_batch_size=32, val_batch_size=32):
# dataset parameters
train_path = os.path.join(base_dir, 'train+val', 'train')
valid_path = os.path.join(base_dir, 'train+val', 'valid')
RESCALING_FACTOR = 1./255
# instantiate data generators
datagen = ImageDataGenerator(rescale=RESCALING_FACTOR)
train_gen = datagen.flow_from_directory(train_path,
target_size=(IMAGE_SIZE, IMAGE_SIZE),
batch_size=train_batch_size,
class_mode='binary')
val_gen = datagen.flow_from_directory(valid_path,
target_size=(IMAGE_SIZE, IMAGE_SIZE),
batch_size=val_batch_size,
class_mode='binary')
return train_gen, val_gen
def get_model(kernel_size=(3,3), pool_size=(4,4), first_filters=32, second_filters=64):
# build the model
model = Sequential()
model.add(Conv2D(first_filters, kernel_size, activation = 'relu', padding = 'same', input_shape = (IMAGE_SIZE, IMAGE_SIZE, 3)))
model.add(MaxPool2D(pool_size = pool_size))
model.add(Conv2D(second_filters, kernel_size, activation = 'relu', padding = 'same'))
model.add(MaxPool2D(pool_size = pool_size))
model.add(Flatten())
model.add(Dense(64, activation = 'relu'))
model.add(Dense(1, activation = 'sigmoid'))
# compile the model
model.compile(SGD(lr=0.01, momentum=0.95), loss = 'binary_crossentropy', metrics=['accuracy'])
return model
# get the model
model = get_model()
# get the data generators
train_gen, val_gen = get_pcam_generators('/change/me/to/dataset/path')
# save the model and weights
model_name = 'my_first_cnn_model'
model_filepath = model_name + '.json'
weights_filepath = model_name + '_weights.hdf5'
model_json = model.to_json() # serialize model to JSON
with open(model_filepath, 'w') as json_file:
json_file.write(model_json)
# define the model checkpoint and Tensorboard callbacks
checkpoint = ModelCheckpoint(weights_filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='min')
tensorboard = TensorBoard(os.path.join('logs', model_name))
callbacks_list = [checkpoint, tensorboard]
# train the model
train_steps = train_gen.n//train_gen.batch_size
val_steps = val_gen.n//val_gen.batch_size
history = model.fit_generator(train_gen, steps_per_epoch=train_steps,
validation_data=val_gen,
validation_steps=val_steps,
epochs=3,
callbacks=callbacks_list)
# ROC analysis
# TODO Perform ROC analysis on the validation set
| []
| []
| [
"TF_CPP_MIN_LOG_LEVEL"
]
| [] | ["TF_CPP_MIN_LOG_LEVEL"] | python | 1 | 0 | |
tests/test_pmr.py | import json
import pytest
from app import app
@pytest.fixture
def client():
app.config['TESTING'] = True
return app.test_client()
def test_pmr_latest_exposure_no_post(client):
r = client.get('/pmr_latest_exposure')
assert r.status_code == 405
def test_pmr_latest_exposure_empty_post(client):
r = client.post("/pmr_latest_exposure", json={})
assert r.status_code == 400
def test_pmr_latest_exposure_workspace_with_latest_exposure(client):
r = client.post("/pmr_latest_exposure", json={"workspace_url": "https://models.physiomeproject.org/workspace/486"})
assert r.status_code == 200
data = r.get_json()
assert data["url"] == "https://models.physiomeproject.org/e/611"
def test_pmr_latest_exposure_workspace_without_latest_exposure(client):
r = client.post("/pmr_latest_exposure", json={"workspace_url": "https://models.physiomeproject.org/workspace/698"})
assert r.status_code == 200
data = r.get_json()
assert data["url"] == ""
def test_pmr_latest_exposure_workspace_with_invalid_workspace_url(client):
r = client.post("/pmr_latest_exposure", json={"workspace_url": "https://some.url.com/"})
print(r.get_json())
assert r.status_code == 400
| []
| []
| []
| [] | [] | python | null | null | null |
notification_ms/notification_app.go | package main
import (
"github.com/ant0ine/go-json-rest/rest"
"log"
"net/http"
"app/notification_resource"
"os"
)
func main() {
api := rest.NewApi()
api.Use(rest.DefaultDevStack...)
router, err := rest.MakeRouter(
rest.Post("/notification/messages/:file_id", notification_resource.PostSendNotificationResource),
rest.Post("/notification/confirmation",notification_resource.PostSendConfirmationResource),
rest.Post("/notification/recover",notification_resource.PostSendRecoverResource),
)
if err != nil {
log.Fatal(err)
}
api.SetApp(router)
log.Fatal(http.ListenAndServe(":"+os.Getenv("HOST_PORT"), api.MakeHandler()))
}
| [
"\"HOST_PORT\""
]
| []
| [
"HOST_PORT"
]
| [] | ["HOST_PORT"] | go | 1 | 0 | |
api/internal/gen_jwt_token/gen_jwt_token.go | package jwttoken
import (
"github.com/golang-jwt/jwt"
"os"
"time"
)
func GenerateTokenPair(user_id uint, username string) (map[string]string, error) {
token := jwt.New(jwt.SigningMethodHS256)
claims := token.Claims.(jwt.MapClaims)
claims["exp"] = time.Now().Add(time.Minute * 15).Unix()
claims["sub"] = user_id
claims["username"] = username
secret := os.Getenv("JWT_SECRET")
// generate encoded token and send it as response.
t, err := token.SignedString([]byte(secret))
if err != nil {
return nil, err
}
refreshToken := jwt.New(jwt.SigningMethodHS256)
rtClaims := refreshToken.Claims.(jwt.MapClaims)
rtClaims["sub"] = user_id
rtClaims["username"] = username
rtClaims["exp"] = time.Now().Add(time.Hour * 24).Unix()
rt, err := refreshToken.SignedString([]byte(secret))
if err != nil {
return nil, err
}
return map[string]string{
"access_token": t,
"refresh_token": rt,
}, nil
}
| [
"\"JWT_SECRET\""
]
| []
| [
"JWT_SECRET"
]
| [] | ["JWT_SECRET"] | go | 1 | 0 | |
hello/core.py | import os
from flask import Flask
app = Flask("hello")
app.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get("DB_URI") or 'sqlite://'
app.config["DEBUG"] = True
app.config["SECRET_KEY"] = 'development key'
| []
| []
| [
"DB_URI"
]
| [] | ["DB_URI"] | python | 1 | 0 | |
samples/sqs/consumer/main.go | package main
import (
"fmt"
"os"
"os/signal"
"time"
"github.com/aws/aws-sdk-go-v2/aws/external"
"github.com/aws/aws-sdk-go-v2/service/sqs"
"github.com/awslabs/k8s-cloudwatch-adapter/pkg/aws"
)
func main() {
// Using the SDK's default configuration, loading additional config
// and credentials values from the environment variables, shared
// credentials, and shared configuration files
cfg, err := external.LoadDefaultAWSConfig()
if err != nil {
panic("unable to load SDK config, " + err.Error())
}
if cfg.Region == "" {
cfg.Region = aws.GetLocalRegion()
}
fmt.Println("using AWS Region:", cfg.Region)
svc := sqs.New(cfg)
// Initialize and create a SQS Queue named helloworld if it doesn't exist
queueName := os.Getenv("QUEUE")
if queueName == "" {
queueName = "helloworld"
}
fmt.Println("listening to queue:", queueName)
q, err := svc.GetQueueUrlRequest(&sqs.GetQueueUrlInput{
QueueName: &queueName,
}).Send()
if err != nil {
// handle queue creation error
fmt.Println("cannot get queue:", err)
}
signalChan := make(chan os.Signal, 1)
signal.Notify(signalChan, os.Interrupt, os.Kill)
go func() {
<-signalChan
os.Exit(1)
}()
timeout := int64(20)
for {
msg, err := svc.ReceiveMessageRequest(&sqs.ReceiveMessageInput{
QueueUrl: q.QueueUrl,
WaitTimeSeconds: &timeout,
}).Send()
if err != nil {
fmt.Println("error receiving message from queue:", err)
} else {
fmt.Println("message:", msg)
}
_, err = svc.DeleteMessageRequest(&sqs.DeleteMessageInput{
QueueUrl: q.QueueUrl,
ReceiptHandle: msg.Messages[0].ReceiptHandle,
}).Send()
if err != nil {
fmt.Println("error deleting message from queue:", err)
}
// Implement some delay here to simulate processing time
time.Sleep(time.Duration(1000) * time.Millisecond)
}
}
| [
"\"QUEUE\""
]
| []
| [
"QUEUE"
]
| [] | ["QUEUE"] | go | 1 | 0 | |
gateway/main.go | package main
import (
context "context"
"log"
"net/http"
"os"
"github.com/grpc-ecosystem/grpc-gateway/runtime"
"github.com/infiniteprimes/grpc-gateway-template/example"
grpc "google.golang.org/grpc"
)
func run() error {
ctx := context.Background()
ctx, cancel := context.WithCancel(ctx)
defer cancel()
gwmux := runtime.NewServeMux()
opts := []grpc.DialOption{grpc.WithInsecure()}
if err := example.RegisterYourServiceHandlerFromEndpoint(ctx, gwmux, os.Getenv("GATEWAY_BINDING_ADDR"), opts); err != nil {
return err
}
log.Println("http server started, listen on 8080")
return http.ListenAndServe(":8080", gwmux)
}
func main() {
if err := run(); err != nil {
log.Fatal(err)
}
}
| [
"\"GATEWAY_BINDING_ADDR\""
]
| []
| [
"GATEWAY_BINDING_ADDR"
]
| [] | ["GATEWAY_BINDING_ADDR"] | go | 1 | 0 | |
src/app/utils/config.py | import os
DB_USER = os.getenv('DB_USER', '')
DB_PASSWORD = os.getenv('DB_PASSWORD', '')
DB_HOST = os.getenv('DB_HOST', '')
DB_NAME = os.getenv('DB_NAME', '')
DB_USER_COLLECTION_NAME = os.getenv('DB_USER_COLLECTION_NAME', 'users')
DB_CATEGORY_COLLECTION_NAME = os.getenv('DB_CATEGORY_COLLECTION_NAME', 'categories')
DB_EXPENSES_COLLECTION_NAME = os.getenv('DB_EXPENSES_COLLECTION_NAME', 'expenses')
WEBHOOK_HOST = os.getenv('WEBHOOK_HOST', '')
WEBHOOK_PATH = os.getenv('WEBHOOK_PATH', '')
HOST = os.getenv('HOST', '0.0.0.0')
PORT = os.getenv('PORT', '3001')
DEVELOP = os.getenv('DEVELOP', 'True')
| []
| []
| [
"DB_EXPENSES_COLLECTION_NAME",
"HOST",
"PORT",
"DB_PASSWORD",
"WEBHOOK_HOST",
"DB_HOST",
"WEBHOOK_PATH",
"DB_NAME",
"DB_USER_COLLECTION_NAME",
"DEVELOP",
"DB_CATEGORY_COLLECTION_NAME",
"DB_USER"
]
| [] | ["DB_EXPENSES_COLLECTION_NAME", "HOST", "PORT", "DB_PASSWORD", "WEBHOOK_HOST", "DB_HOST", "WEBHOOK_PATH", "DB_NAME", "DB_USER_COLLECTION_NAME", "DEVELOP", "DB_CATEGORY_COLLECTION_NAME", "DB_USER"] | python | 12 | 0 | |
cmd/mattermost/commands/server.go | // Copyright (c) 2016-present Mattermost, Inc. All Rights Reserved.
// See License.txt for license information.
package commands
import (
"fmt"
"net"
"net/url"
"os"
"os/signal"
"syscall"
"time"
"github.com/mattermost/mattermost-server/api4"
"github.com/mattermost/mattermost-server/app"
"github.com/mattermost/mattermost-server/manualtesting"
"github.com/mattermost/mattermost-server/mlog"
"github.com/mattermost/mattermost-server/model"
"github.com/mattermost/mattermost-server/utils"
"github.com/mattermost/mattermost-server/web"
"github.com/mattermost/mattermost-server/wsapi"
"github.com/spf13/cobra"
)
const (
SESSIONS_CLEANUP_BATCH_SIZE = 1000
)
var MaxNotificationsPerChannelDefault int64 = 1000000
var serverCmd = &cobra.Command{
Use: "server",
Short: "Run the Mattermost server",
RunE: serverCmdF,
SilenceUsage: true,
}
func init() {
RootCmd.AddCommand(serverCmd)
RootCmd.RunE = serverCmdF
}
func serverCmdF(command *cobra.Command, args []string) error {
config, err := command.Flags().GetString("config")
if err != nil {
return err
}
disableConfigWatch, _ := command.Flags().GetBool("disableconfigwatch")
usedPlatform, _ := command.Flags().GetBool("platform")
interruptChan := make(chan os.Signal, 1)
return runServer(config, disableConfigWatch, usedPlatform, interruptChan)
}
func runServer(configFileLocation string, disableConfigWatch bool, usedPlatform bool, interruptChan chan os.Signal) error {
options := []app.Option{app.ConfigFile(configFileLocation)}
if disableConfigWatch {
options = append(options, app.DisableConfigWatch)
}
a, err := app.New(options...)
if err != nil {
mlog.Critical(err.Error())
return err
}
defer a.Shutdown()
utils.TestConnection(a.Config())
pwd, _ := os.Getwd()
if usedPlatform {
mlog.Error("The platform binary has been deprecated, please switch to using the mattermost binary.")
}
if _, err := url.ParseRequestURI(*a.Config().ServiceSettings.SiteURL); err != nil {
mlog.Error("SiteURL must be set. Some features will operate incorrectly if the SiteURL is not set. See documentation for details: http://about.mattermost.com/default-site-url")
}
mlog.Info(fmt.Sprintf("Current version is %v (%v/%v/%v/%v)", model.CurrentVersion, model.BuildNumber, model.BuildDate, model.BuildHash, model.BuildHashEnterprise))
mlog.Info(fmt.Sprintf("Enterprise Enabled: %v", model.BuildEnterpriseReady))
mlog.Info(fmt.Sprintf("Current working directory is %v", pwd))
mlog.Info(fmt.Sprintf("Loaded config file from %v", utils.FindConfigFile(configFileLocation)))
backend, appErr := a.FileBackend()
if appErr == nil {
appErr = backend.TestConnection()
}
if appErr != nil {
mlog.Error("Problem with file storage settings: " + appErr.Error())
}
if model.BuildEnterpriseReady == "true" {
a.LoadLicense()
}
a.DoAdvancedPermissionsMigration()
a.DoEmojisPermissionsMigration()
a.InitPlugins(*a.Config().PluginSettings.Directory, *a.Config().PluginSettings.ClientDirectory)
a.AddConfigListener(func(prevCfg, cfg *model.Config) {
if *cfg.PluginSettings.Enable {
a.InitPlugins(*cfg.PluginSettings.Directory, *a.Config().PluginSettings.ClientDirectory)
} else {
a.ShutDownPlugins()
}
})
serverErr := a.StartServer()
if serverErr != nil {
mlog.Critical(serverErr.Error())
return serverErr
}
api := api4.Init(a, a.Srv.Router)
wsapi.Init(a, a.Srv.WebSocketRouter)
web.NewWeb(a, a.Srv.Router)
license := a.License()
if license == nil && len(a.Config().SqlSettings.DataSourceReplicas) > 1 {
mlog.Warn("More than 1 read replica functionality disabled by current license. Please contact your system administrator about upgrading your enterprise license.")
a.UpdateConfig(func(cfg *model.Config) {
cfg.SqlSettings.DataSourceReplicas = cfg.SqlSettings.DataSourceReplicas[:1]
})
}
if license == nil {
a.UpdateConfig(func(cfg *model.Config) {
cfg.TeamSettings.MaxNotificationsPerChannel = &MaxNotificationsPerChannelDefault
})
}
a.ReloadConfig()
// Enable developer settings if this is a "dev" build
if model.BuildNumber == "dev" {
a.UpdateConfig(func(cfg *model.Config) { *cfg.ServiceSettings.EnableDeveloper = true })
}
resetStatuses(a)
// If we allow testing then listen for manual testing URL hits
if a.Config().ServiceSettings.EnableTesting {
manualtesting.Init(api)
}
a.Go(func() {
runSecurityJob(a)
})
a.Go(func() {
runDiagnosticsJob(a)
})
a.Go(func() {
runSessionCleanupJob(a)
})
a.Go(func() {
runTokenCleanupJob(a)
})
a.Go(func() {
runCommandWebhookCleanupJob(a)
})
if complianceI := a.Compliance; complianceI != nil {
complianceI.StartComplianceDailyJob()
}
if a.Cluster != nil {
a.RegisterAllClusterMessageHandlers()
a.Cluster.StartInterNodeCommunication()
}
if a.Metrics != nil {
a.Metrics.StartServer()
}
if a.Elasticsearch != nil {
a.StartElasticsearch()
}
if *a.Config().JobSettings.RunJobs {
a.Jobs.StartWorkers()
}
if *a.Config().JobSettings.RunScheduler {
a.Jobs.StartSchedulers()
}
notifyReady()
// wait for kill signal before attempting to gracefully shutdown
// the running service
signal.Notify(interruptChan, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
<-interruptChan
if a.Cluster != nil {
a.Cluster.StopInterNodeCommunication()
}
if a.Metrics != nil {
a.Metrics.StopServer()
}
a.Jobs.StopSchedulers()
a.Jobs.StopWorkers()
return nil
}
func runSecurityJob(a *app.App) {
doSecurity(a)
model.CreateRecurringTask("Security", func() {
doSecurity(a)
}, time.Hour*4)
}
func runDiagnosticsJob(a *app.App) {
doDiagnostics(a)
model.CreateRecurringTask("Diagnostics", func() {
doDiagnostics(a)
}, time.Hour*24)
}
func runTokenCleanupJob(a *app.App) {
doTokenCleanup(a)
model.CreateRecurringTask("Token Cleanup", func() {
doTokenCleanup(a)
}, time.Hour*1)
}
func runCommandWebhookCleanupJob(a *app.App) {
doCommandWebhookCleanup(a)
model.CreateRecurringTask("Command Hook Cleanup", func() {
doCommandWebhookCleanup(a)
}, time.Hour*1)
}
func runSessionCleanupJob(a *app.App) {
doSessionCleanup(a)
model.CreateRecurringTask("Session Cleanup", func() {
doSessionCleanup(a)
}, time.Hour*24)
}
func resetStatuses(a *app.App) {
if result := <-a.Srv.Store.Status().ResetAll(); result.Err != nil {
mlog.Error(fmt.Sprint("mattermost.reset_status.error FIXME: NOT FOUND IN TRANSLATIONS FILE", result.Err.Error()))
}
}
func doSecurity(a *app.App) {
a.DoSecurityUpdateCheck()
}
func doDiagnostics(a *app.App) {
if *a.Config().LogSettings.EnableDiagnostics {
a.SendDailyDiagnostics()
}
}
func notifyReady() {
// If the environment vars provide a systemd notification socket,
// notify systemd that the server is ready.
systemdSocket := os.Getenv("NOTIFY_SOCKET")
if systemdSocket != "" {
mlog.Info("Sending systemd READY notification.")
err := sendSystemdReadyNotification(systemdSocket)
if err != nil {
mlog.Error(err.Error())
}
}
}
func sendSystemdReadyNotification(socketPath string) error {
msg := "READY=1"
addr := &net.UnixAddr{
Name: socketPath,
Net: "unixgram",
}
conn, err := net.DialUnix(addr.Net, nil, addr)
if err != nil {
return err
}
defer conn.Close()
_, err = conn.Write([]byte(msg))
return err
}
func doTokenCleanup(a *app.App) {
a.Srv.Store.Token().Cleanup()
}
func doCommandWebhookCleanup(a *app.App) {
a.Srv.Store.CommandWebhook().Cleanup()
}
func doSessionCleanup(a *app.App) {
a.Srv.Store.Session().Cleanup(model.GetMillis(), SESSIONS_CLEANUP_BATCH_SIZE)
}
| [
"\"NOTIFY_SOCKET\""
]
| []
| [
"NOTIFY_SOCKET"
]
| [] | ["NOTIFY_SOCKET"] | go | 1 | 0 | |
manage.py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "lcy_myblog.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
examples/contacts/create-corporate/main.go | package main
import (
"github.com/cloudmode/go-primetrust"
"github.com/cloudmode/go-primetrust/models"
"log"
"os"
)
func main() {
primetrust.Init(true, os.Getenv("PRIMETRUST_LOGIN"), os.Getenv("PRIMETRUST_PASSWORD"))
accountId := os.Getenv("PRIMETRUST_ACCOUNT_ID")
contact := models.NewCompanyContact(accountId)
contact.Data.Attributes.RegionOfFormation = "TX"
contact.Data.Attributes.Email = "[email protected]"
contact.Data.Attributes.Name = "ACME LLC"
contact.Data.Attributes.Label = "Label"
contact.Data.Attributes.TaxIDNumber = "888777666"
contact.Data.Attributes.TaxCountry = "US"
contact.Data.Attributes.TaxState = "TX"
contact.Data.Attributes.PrimaryAddress = models.Address{
Type: models.AddressTypeOffice,
City: "Houston",
Country: "US",
PostalCode: "60000",
Region: "TX",
Street1: "1 Main str.",
}
contact.Data.Attributes.PrimaryPhoneNumber = models.PhoneNumber{
Number: "+1 800 555 11 22",
}
contact.Data.Attributes.RelatedContacts = []models.RelatedContactData{
models.RelatedContactData{
Type: models.ContactTypeNaturalPerson,
DateOfBirth: "1981-10-31",
Email: "[email protected]",
Sex: "male",
Name: "Sample Person",
TaxCountry: "US",
Label: "CEO",
TaxIDNumber: "123123123",
PrimaryAddress: models.Address{
Type: models.AddressTypeHome,
City: "Beverly Hills",
Country: "US",
PostalCode: "90210",
Region: "CA",
Street1: "1 Sunset Blvd.",
},
PrimaryPhoneNumber: models.PhoneNumber{
Number: "+15555555577",
},
},
}
if newContact, err := primetrust.CreateNewContact(contact); err != nil {
log.Println("Error creating new contact:", err)
log.Printf("%+v", contact)
} else {
log.Println("Contact created OK")
log.Printf("%+v", newContact)
}
log.Println("Done")
}
| [
"\"PRIMETRUST_LOGIN\"",
"\"PRIMETRUST_PASSWORD\"",
"\"PRIMETRUST_ACCOUNT_ID\""
]
| []
| [
"PRIMETRUST_LOGIN",
"PRIMETRUST_PASSWORD",
"PRIMETRUST_ACCOUNT_ID"
]
| [] | ["PRIMETRUST_LOGIN", "PRIMETRUST_PASSWORD", "PRIMETRUST_ACCOUNT_ID"] | go | 3 | 0 | |
train.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
from six.moves import xrange
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "1"
import numpy as np
import tensorflow as tf
import utils
from model import Config, BiRNN
tf.flags.DEFINE_string("source_train_path", "",
"Path to the file containing the source sentences to "
"train the model.")
tf.flags.DEFINE_string("target_train_path", "",
"Path to the file containing the target sentences to "
"train the model.")
tf.flags.DEFINE_string("source_valid_path", "",
"Path to the file containing the source sentences to "
"evaluate the model.")
tf.flags.DEFINE_string("target_valid_path", "",
"Path to the file containing the target sentences to "
"evaluate the model.")
tf.flags.DEFINE_string("checkpoint_dir", "./tflogs",
"Directory to save checkpoints and summaries of the model.")
tf.flags.DEFINE_integer("source_vocab_size", 100000,
"Number of the most frequent words to keep in the source "
"vocabulary.")
tf.flags.DEFINE_integer("target_vocab_size", 100000,
"Number of the most frequent words to keep in target "
"vocabulary.")
tf.flags.DEFINE_float("learning_rate", 2e-4,
"Learning rate.")
tf.flags.DEFINE_float("max_gradient_norm", 5.0,
"Clip gradient to this norm.")
tf.flags.DEFINE_float("decision_threshold", 0.99,
"Decision threshold to predict a positive label.")
tf.flags.DEFINE_integer("embedding_size", 300,
"Size of each word embedding.")
tf.flags.DEFINE_integer("state_size", 300,
"Size of the recurrent state in the BiRNN encoder.")
tf.flags.DEFINE_integer("hidden_size", 128,
"Size of the hidden layer in the feed-forward neural "
"network.")
tf.flags.DEFINE_integer("num_layers", 1,
"Number of layers in the BiRNN encoder.")
tf.flags.DEFINE_string("source_embeddings_path", None,
"Pretrained embeddings to initialize the source embeddings "
"matrix.")
tf.flags.DEFINE_string("target_embeddings_path", None,
"Pretrained embeddings to initialize the target embeddings "
"matrix.")
tf.flags.DEFINE_boolean("fix_pretrained", False,
"If true fix pretrained embeddings.")
tf.flags.DEFINE_boolean("use_lstm", False,
"If true use LSTM cells. Otherwise use GRU cells.")
tf.flags.DEFINE_boolean("use_mean_pooling", False,
"If true use mean pooling for final sentence representation.")
tf.flags.DEFINE_boolean("use_max_pooling", False,
"If true use max pooling for final sentence representation.")
tf.flags.DEFINE_integer("batch_size", 128,
"Batch size to use during training.")
tf.flags.DEFINE_integer("num_epochs", 15,
"Number of epochs to train the model.")
tf.flags.DEFINE_integer("num_negative", 5,
"Number of negative examples to sample per pair of "
"parallel sentences in training dataset.")
tf.flags.DEFINE_float("keep_prob_input", 0.8,
"Keep probability for dropout applied at the embedding layer.")
tf.flags.DEFINE_float("keep_prob_output", 0.7,
"Keep probability for dropout applied at the prediction layer.")
tf.flags.DEFINE_integer("steps_per_checkpoint", 200,
"Number of steps to save a model checkpoint.")
FLAGS = tf.flags.FLAGS
def eval_epoch(sess, model, data_iterator, summary_writer):
"""Evaluate model for one epoch."""
sess.run(tf.local_variables_initializer())
num_iter = int(np.ceil(data_iterator.size / FLAGS.batch_size))
epoch_loss = 0
for step in xrange(num_iter):
source, target, label = data_iterator.next_batch(FLAGS.batch_size)
source_len = utils.sequence_length(source)
target_len = utils.sequence_length(target)
feed_dict = {model.x_source: source,
model.x_target: target,
model.labels: label,
model.source_seq_length: source_len,
model.target_seq_length: target_len,
model.decision_threshold: FLAGS.decision_threshold}
loss_value, epoch_accuracy,\
epoch_precision, epoch_recall = sess.run([model.mean_loss,
model.accuracy[1],
model.precision[1],
model.recall[1]],
feed_dict=feed_dict)
epoch_loss += loss_value
if step % FLAGS.steps_per_checkpoint == 0:
summary = sess.run(model.summaries, feed_dict=feed_dict)
summary_writer.add_summary(summary, global_step=data_iterator.global_step)
epoch_loss /= step
epoch_f1 = utils.f1_score(epoch_precision, epoch_recall)
print(" Testing: Loss = {:.6f}, Accuracy = {:.4f}, "
"Precision = {:.4f}, Recall = {:.4f}, F1 = {:.4f}"
.format(epoch_loss, epoch_accuracy,
epoch_precision, epoch_recall, epoch_f1))
def main(_):
assert FLAGS.source_train_path, ("--source_train_path is required.")
assert FLAGS.target_train_path, ("--target_train_path is required.")
assert FLAGS.source_valid_path, ("--source_valid_path is required.")
assert FLAGS.target_valid_path, ("--target_valid_path is required.")
# Create vocabularies.
source_vocab_path = os.path.join(os.path.dirname(FLAGS.source_train_path),
"vocabulary.source")
target_vocab_path = os.path.join(os.path.dirname(FLAGS.source_train_path),
"vocabulary.target")
utils.create_vocabulary(source_vocab_path, FLAGS.source_train_path, FLAGS.source_vocab_size)
utils.create_vocabulary(target_vocab_path, FLAGS.target_train_path, FLAGS.target_vocab_size)
# Read vocabularies.
source_vocab, rev_source_vocab = utils.initialize_vocabulary(source_vocab_path)
#print("source_vocab", source_vocab)
#print("rev_source_vocab", rev_source_vocab)
target_vocab, rev_target_vocab = utils.initialize_vocabulary(target_vocab_path)
#print("target_vocab", target_vocab)
# Read parallel sentences.
parallel_data = utils.read_data(FLAGS.source_train_path, FLAGS.target_train_path,
source_vocab, target_vocab)
print("parallel_data", type(parallel_data), len(parallel_data))
print("parallel_data[0]", type(parallel_data[0]), len(parallel_data[0]), parallel_data[0])
# Read validation data set.
if FLAGS.source_valid_path and FLAGS.target_valid_path:
valid_data = utils.read_data(FLAGS.source_valid_path, FLAGS.target_valid_path,
source_vocab, target_vocab)
# Initialize BiRNN.
config = Config(len(source_vocab),
len(target_vocab),
FLAGS.embedding_size,
FLAGS.state_size,
FLAGS.hidden_size,
FLAGS.num_layers,
FLAGS.learning_rate,
FLAGS.max_gradient_norm,
FLAGS.use_lstm,
FLAGS.use_mean_pooling,
FLAGS.use_max_pooling,
FLAGS.source_embeddings_path,
FLAGS.target_embeddings_path,
FLAGS.fix_pretrained)
model = BiRNN(config)
# Build graph.
model.build_graph()
# Train model.
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
train_iterator = utils.TrainingIterator(parallel_data, FLAGS.num_negative)
train_summary_writer = tf.summary.FileWriter(os.path.join(FLAGS.checkpoint_dir, "train"), sess.graph)
if FLAGS.source_valid_path and FLAGS.target_valid_path:
valid_iterator = utils.EvalIterator(valid_data)
valid_summary_writer = tf.summary.FileWriter(os.path.join(FLAGS.checkpoint_dir, "valid"), sess.graph)
epoch_loss = 0
epoch_completed = 0
batch_completed = 0
num_iter = int(np.ceil(train_iterator.size / FLAGS.batch_size * FLAGS.num_epochs))
start_time = time.time()
print("Training model on {} sentence pairs per epoch:".
format(train_iterator.size, valid_iterator.size))
for step in xrange(num_iter):
source, target, label = train_iterator.next_batch(FLAGS.batch_size)
source_len = utils.sequence_length(source)
target_len = utils.sequence_length(target)
feed_dict = {model.x_source: source,
model.x_target: target,
model.labels: label,
model.source_seq_length: source_len,
model.target_seq_length: target_len,
model.input_dropout: FLAGS.keep_prob_input,
model.output_dropout: FLAGS.keep_prob_output,
model.decision_threshold: FLAGS.decision_threshold}
_, loss_value, epoch_accuracy,\
epoch_precision, epoch_recall = sess.run([model.train_op,
model.mean_loss,
model.accuracy[1],
model.precision[1],
model.recall[1]],
feed_dict=feed_dict)
epoch_loss += loss_value
batch_completed += 1
# Write the model's training summaries.
if step % FLAGS.steps_per_checkpoint == 0:
summary = sess.run(model.summaries, feed_dict=feed_dict)
train_summary_writer.add_summary(summary, global_step=step)
# End of current epoch.
if train_iterator.epoch_completed > epoch_completed:
epoch_time = time.time() - start_time
epoch_loss /= batch_completed
epoch_f1 = utils.f1_score(epoch_precision, epoch_recall)
epoch_completed += 1
print("Epoch {} in {:.0f} sec\n"
" Training: Loss = {:.6f}, Accuracy = {:.4f}, "
"Precision = {:.4f}, Recall = {:.4f}, F1 = {:.4f}"
.format(epoch_completed, epoch_time,
epoch_loss, epoch_accuracy,
epoch_precision, epoch_recall, epoch_f1))
# Save a model checkpoint.
checkpoint_path = os.path.join(FLAGS.checkpoint_dir, "model.ckpt")
model.saver.save(sess, checkpoint_path, global_step=step)
# Evaluate model on the validation set.
if FLAGS.source_valid_path and FLAGS.target_valid_path:
eval_epoch(sess, model, valid_iterator, valid_summary_writer)
# Initialize local variables for new epoch.
batch_completed = 0
epoch_loss = 0
sess.run(tf.local_variables_initializer())
start_time = time.time()
print("Training done with {} steps.".format(num_iter))
train_summary_writer.close()
valid_summary_writer.close()
if __name__ == "__main__":
tf.app.run()
| []
| []
| [
"TF_CPP_MIN_LOG_LEVEL"
]
| [] | ["TF_CPP_MIN_LOG_LEVEL"] | python | 1 | 0 | |
Courses/Udacity/CS101/Lesson_2_Problem_Set_(Optional_2)/03-10_Row_Abacus/supplied/studentMain.py | #########################################################################
# 10-row School abacus
# by
# Michael H
#########################################################################
# Description partially extracted from from wikipedia
#
# Around the world, abaci have been used in pre-schools and elementary
#
# In Western countries, a bead frame similar to the Russian abacus but
# with straight wires and a vertical frame has been common (see image).
# Helps schools as an aid in teaching the numeral system and arithmetic
#
# |00000***** | row factor 1000000000
# |00000***** | row factor 100000000
# |00000***** | row factor 10000000
# |00000***** | row factor 1000000
# |00000***** | row factor 100000
# |00000***** | row factor 10000
# |00000***** | row factor 1000
# |00000**** *| row factor 100 * 1
# |00000*** **| row factor 10 * 2
# |00000** ***| row factor 1 * 3
# -----------
# Sum 123
#
# Each row represents a different row factor, starting with x1 at the
# bottom, ascending up to x1000000000 at the top row.
######################################################################
# TASK:
# Define a procedure print_abacus(integer) that takes a positive integer
# and prints a visual representation (image) of an abacus setup for a
# given positive integer value.
#
# Ranking
# 1 STAR: solved the problem!
# 2 STARS: 6 < lines <= 9
# 3 STARS: 3 < lines <= 6
# 4 STARS: 0 < lines <= 3
def print_abacus(value):
#
### Add you code here
#
### TEST CASES
print "Abacus showing 0:"
print_abacus(0)
#>>>|00000***** |
#>>>|00000***** |
#>>>|00000***** |
#>>>|00000***** |
#>>>|00000***** |
#>>>|00000***** |
#>>>|00000***** |
#>>>|00000***** |
#>>>|00000***** |
#>>>|00000***** |
print "Abacus showing 12345678:"
print_abacus(12345678)
#>>>|00000***** |
#>>>|00000***** |
#>>>|00000**** *|
#>>>|00000*** **|
#>>>|00000** ***|
#>>>|00000* ****|
#>>>|00000 *****|
#>>>|0000 0*****|
#>>>|000 00*****|
#>>>|00 000*****|
print "Abacus showing 1337:"
print_abacus(1337)
#>>>|00000***** |
#>>>|00000***** |
#>>>|00000***** |
#>>>|00000***** |
#>>>|00000***** |
#>>>|00000***** |
#>>>|00000**** *|
#>>>|00000** ***|
#>>>|00000** ***|
#>>>|000 00*****| | []
| []
| []
| [] | [] | python | null | null | null |
tests/fruit/manage.py | import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'settings')
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
sdk/go/common/resource/plugin/provider_plugin.go | // Copyright 2016-2018, Pulumi Corporation.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package plugin
import (
"context"
"encoding/json"
"fmt"
"io"
"os"
"strings"
"github.com/blang/semver"
pbempty "github.com/golang/protobuf/ptypes/empty"
_struct "github.com/golang/protobuf/ptypes/struct"
"github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc"
multierror "github.com/hashicorp/go-multierror"
"github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"google.golang.org/grpc/codes"
"github.com/pulumi/pulumi/sdk/v3/go/common/resource"
"github.com/pulumi/pulumi/sdk/v3/go/common/tokens"
"github.com/pulumi/pulumi/sdk/v3/go/common/util/cmdutil"
"github.com/pulumi/pulumi/sdk/v3/go/common/util/contract"
"github.com/pulumi/pulumi/sdk/v3/go/common/util/logging"
"github.com/pulumi/pulumi/sdk/v3/go/common/util/rpcutil/rpcerror"
"github.com/pulumi/pulumi/sdk/v3/go/common/workspace"
pulumirpc "github.com/pulumi/pulumi/sdk/v3/proto/go"
)
// The `Type()` for the NodeJS dynamic provider. Logically, this is the same as calling
// providers.MakeProviderType(tokens.Package("pulumi-nodejs")), but does not depend on the providers package
// (a direct dependency would cause a cyclic import issue.
//
// This is needed because we have to handle some buggy behavior that previous versions of this provider implemented.
const nodejsDynamicProviderType = "pulumi:providers:pulumi-nodejs"
// The `Type()` for the Kubernetes provider. Logically, this is the same as calling
// providers.MakeProviderType(tokens.Package("kubernetes")), but does not depend on the providers package
// (a direct dependency would cause a cyclic import issue.
//
// This is needed because we have to handle some buggy behavior that previous versions of this provider implemented.
const kubernetesProviderType = "pulumi:providers:kubernetes"
// provider reflects a resource plugin, loaded dynamically for a single package.
type provider struct {
ctx *Context // a plugin context for caching, etc.
pkg tokens.Package // the Pulumi package containing this provider's resources.
plug *plugin // the actual plugin process wrapper.
clientRaw pulumirpc.ResourceProviderClient // the raw provider client; usually unsafe to use directly.
cfgerr error // non-nil if a configure call fails.
cfgknown bool // true if all configuration values are known.
cfgdone chan bool // closed when configuration has completed.
acceptSecrets bool // true if this plugin accepts strongly-typed secrets.
acceptResources bool // true if this plugin accepts strongly-typed resource refs.
acceptOutputs bool // true if this plugin accepts output values.
supportsPreview bool // true if this plugin supports previews for Create and Update.
disableProviderPreview bool // true if previews for Create and Update are disabled.
legacyPreview bool // enables legacy behavior for unconfigured provider previews.
}
// NewProvider attempts to bind to a given package's resource plugin and then creates a gRPC connection to it. If the
// plugin could not be found, or an error occurs while creating the child process, an error is returned.
func NewProvider(host Host, ctx *Context, pkg tokens.Package, version *semver.Version,
options map[string]interface{}, disableProviderPreview bool) (Provider, error) {
// Load the plugin's path by using the standard workspace logic.
_, path, err := workspace.GetPluginPath(
workspace.ResourcePlugin, strings.Replace(string(pkg), tokens.QNameDelimiter, "_", -1), version)
if err != nil {
return nil, err
}
contract.Assert(path != "")
// Runtime options are passed as environment variables to the provider.
env := os.Environ()
for k, v := range options {
env = append(env, fmt.Sprintf("PULUMI_RUNTIME_%s=%v", strings.ToUpper(k), v))
}
plug, err := newPlugin(ctx, ctx.Pwd, path, fmt.Sprintf("%v (resource)", pkg),
[]string{host.ServerAddr()}, env, otgrpc.SpanDecorator(decorateProviderSpans))
if err != nil {
return nil, err
}
contract.Assertf(plug != nil, "unexpected nil resource plugin for %s", pkg)
legacyPreview := cmdutil.IsTruthy(os.Getenv("PULUMI_LEGACY_PROVIDER_PREVIEW"))
return &provider{
ctx: ctx,
pkg: pkg,
plug: plug,
clientRaw: pulumirpc.NewResourceProviderClient(plug.Conn),
cfgdone: make(chan bool),
disableProviderPreview: disableProviderPreview,
legacyPreview: legacyPreview,
}, nil
}
func NewProviderWithClient(ctx *Context, pkg tokens.Package, client pulumirpc.ResourceProviderClient,
disableProviderPreview bool) Provider {
return &provider{
ctx: ctx,
pkg: pkg,
clientRaw: client,
cfgdone: make(chan bool),
disableProviderPreview: disableProviderPreview,
}
}
func (p *provider) Pkg() tokens.Package { return p.pkg }
// label returns a base label for tracing functions.
func (p *provider) label() string {
return fmt.Sprintf("Provider[%s, %p]", p.pkg, p)
}
func (p *provider) requestContext() context.Context {
if p.ctx == nil {
return context.Background()
}
return p.ctx.Request()
}
// isDiffCheckConfigLogicallyUnimplemented returns true when an rpcerror.Error should be treated as if it was an error
// due to a rpc being unimplemented. Due to past mistakes, different providers returned "Unimplemented" in a variaity of
// different ways that don't always result in an Uimplemented error code.
func isDiffCheckConfigLogicallyUnimplemented(err *rpcerror.Error, providerType tokens.Type) bool {
switch string(providerType) {
// The NodeJS dynamic provider implementation incorrectly returned an empty message instead of properly implementing
// Diff/CheckConfig. This gets turned into a error with type: "Internal".
case nodejsDynamicProviderType:
if err.Code() == codes.Internal {
logging.V(8).Infof("treating error %s as unimplemented error", err)
return true
}
// The Kubernetes provider returned an "Unimplmeneted" message, but it did so by returning a status from a different
// package that the provider was expected. That caused the error to be wrapped with an "Unknown" error.
case kubernetesProviderType:
if err.Code() == codes.Unknown && strings.Contains(err.Message(), "Unimplemented") {
logging.V(8).Infof("treating error %s as unimplemented error", err)
return true
}
}
return false
}
// GetSchema fetches the schema for this resource provider, if any.
func (p *provider) GetSchema(version int) ([]byte, error) {
resp, err := p.clientRaw.GetSchema(p.requestContext(), &pulumirpc.GetSchemaRequest{
Version: int32(version),
})
if err != nil {
return nil, err
}
return []byte(resp.GetSchema()), nil
}
// CheckConfig validates the configuration for this resource provider.
func (p *provider) CheckConfig(urn resource.URN, olds,
news resource.PropertyMap, allowUnknowns bool) (resource.PropertyMap, []CheckFailure, error) {
label := fmt.Sprintf("%s.CheckConfig(%s)", p.label(), urn)
logging.V(7).Infof("%s executing (#olds=%d,#news=%d)", label, len(olds), len(news))
molds, err := MarshalProperties(olds, MarshalOptions{
Label: fmt.Sprintf("%s.olds", label),
KeepUnknowns: allowUnknowns,
KeepSecrets: p.acceptSecrets,
KeepResources: p.acceptResources,
})
if err != nil {
return nil, nil, err
}
mnews, err := MarshalProperties(news, MarshalOptions{
Label: fmt.Sprintf("%s.news", label),
KeepUnknowns: allowUnknowns,
KeepSecrets: p.acceptSecrets,
KeepResources: p.acceptResources,
})
if err != nil {
return nil, nil, err
}
resp, err := p.clientRaw.CheckConfig(p.requestContext(), &pulumirpc.CheckRequest{
Urn: string(urn),
Olds: molds,
News: mnews,
})
if err != nil {
rpcError := rpcerror.Convert(err)
code := rpcError.Code()
if code == codes.Unimplemented || isDiffCheckConfigLogicallyUnimplemented(rpcError, urn.Type()) {
// For backwards compatibility, just return the news as if the provider was okay with them.
logging.V(7).Infof("%s unimplemented rpc: returning news as is", label)
return news, nil, nil
}
logging.V(8).Infof("%s provider received rpc error `%s`: `%s`", label, rpcError.Code(),
rpcError.Message())
return nil, nil, err
}
// Unmarshal the provider inputs.
var inputs resource.PropertyMap
if ins := resp.GetInputs(); ins != nil {
inputs, err = UnmarshalProperties(ins, MarshalOptions{
Label: fmt.Sprintf("%s.inputs", label),
KeepUnknowns: allowUnknowns,
RejectUnknowns: !allowUnknowns,
KeepSecrets: true,
KeepResources: true,
})
if err != nil {
return nil, nil, err
}
}
// And now any properties that failed verification.
var failures []CheckFailure
for _, failure := range resp.GetFailures() {
failures = append(failures, CheckFailure{resource.PropertyKey(failure.Property), failure.Reason})
}
// Copy over any secret annotations, since we could not pass any to the provider, and return.
annotateSecrets(inputs, news)
logging.V(7).Infof("%s success: inputs=#%d failures=#%d", label, len(inputs), len(failures))
return inputs, failures, nil
}
func decodeDetailedDiff(resp *pulumirpc.DiffResponse) map[string]PropertyDiff {
if !resp.GetHasDetailedDiff() {
return nil
}
detailedDiff := make(map[string]PropertyDiff)
for k, v := range resp.GetDetailedDiff() {
var d DiffKind
switch v.GetKind() {
case pulumirpc.PropertyDiff_ADD:
d = DiffAdd
case pulumirpc.PropertyDiff_ADD_REPLACE:
d = DiffAddReplace
case pulumirpc.PropertyDiff_DELETE:
d = DiffDelete
case pulumirpc.PropertyDiff_DELETE_REPLACE:
d = DiffDeleteReplace
case pulumirpc.PropertyDiff_UPDATE:
d = DiffUpdate
case pulumirpc.PropertyDiff_UPDATE_REPLACE:
d = DiffUpdateReplace
default:
// Consider unknown diff kinds to be simple updates.
d = DiffUpdate
}
detailedDiff[k] = PropertyDiff{
Kind: d,
InputDiff: v.GetInputDiff(),
}
}
return detailedDiff
}
// DiffConfig checks what impacts a hypothetical change to this provider's configuration will have on the provider.
func (p *provider) DiffConfig(urn resource.URN, olds, news resource.PropertyMap,
allowUnknowns bool, ignoreChanges []string) (DiffResult, error) {
label := fmt.Sprintf("%s.DiffConfig(%s)", p.label(), urn)
logging.V(7).Infof("%s executing (#olds=%d,#news=%d)", label, len(olds), len(news))
molds, err := MarshalProperties(olds, MarshalOptions{
Label: fmt.Sprintf("%s.olds", label),
KeepUnknowns: true,
KeepSecrets: p.acceptSecrets,
KeepResources: p.acceptResources,
})
if err != nil {
return DiffResult{}, err
}
mnews, err := MarshalProperties(news, MarshalOptions{
Label: fmt.Sprintf("%s.news", label),
KeepUnknowns: true,
KeepSecrets: p.acceptSecrets,
KeepResources: p.acceptResources,
})
if err != nil {
return DiffResult{}, err
}
resp, err := p.clientRaw.DiffConfig(p.requestContext(), &pulumirpc.DiffRequest{
Urn: string(urn),
Olds: molds,
News: mnews,
IgnoreChanges: ignoreChanges,
})
if err != nil {
rpcError := rpcerror.Convert(err)
code := rpcError.Code()
if code == codes.Unimplemented || isDiffCheckConfigLogicallyUnimplemented(rpcError, urn.Type()) {
logging.V(7).Infof("%s unimplemented rpc: returning DiffUnknown with no replaces", label)
// In this case, the provider plugin did not implement this and we have to provide some answer:
//
// There are two interesting scenarios with the present gRPC interface:
// 1. Configuration differences in which all properties are known
// 2. Configuration differences in which some new property is unknown.
//
// In both cases, we return a diff result that indicates that the provider _should not_ be replaced.
// Although this decision is not conservative--indeed, the conservative decision would be to always require
// replacement of a provider if any input has changed--we believe that it results in the best possible user
// experience for providers that do not implement DiffConfig functionality. If we took the conservative
// route here, any change to a provider's configuration (no matter how inconsequential) would cause all of
// its resources to be replaced. This is clearly a bad experience, and differs from how things worked prior
// to first-class providers.
return DiffResult{Changes: DiffUnknown, ReplaceKeys: nil}, nil
}
logging.V(8).Infof("%s provider received rpc error `%s`: `%s`", label, rpcError.Code(),
rpcError.Message())
return DiffResult{}, nil
}
var replaces []resource.PropertyKey
for _, replace := range resp.GetReplaces() {
replaces = append(replaces, resource.PropertyKey(replace))
}
var stables []resource.PropertyKey
for _, stable := range resp.GetStables() {
stables = append(stables, resource.PropertyKey(stable))
}
var diffs []resource.PropertyKey
for _, diff := range resp.GetDiffs() {
diffs = append(diffs, resource.PropertyKey(diff))
}
changes := resp.GetChanges()
deleteBeforeReplace := resp.GetDeleteBeforeReplace()
logging.V(7).Infof("%s success: changes=%d #replaces=%v #stables=%v delbefrepl=%v, diffs=#%v",
label, changes, replaces, stables, deleteBeforeReplace, diffs)
return DiffResult{
Changes: DiffChanges(changes),
ReplaceKeys: replaces,
StableKeys: stables,
ChangedKeys: diffs,
DetailedDiff: decodeDetailedDiff(resp),
DeleteBeforeReplace: deleteBeforeReplace,
}, nil
}
// getClient returns the client, and ensures that the target provider has been configured. This just makes it safer
// to use without forgetting to call ensureConfigured manually.
func (p *provider) getClient() (pulumirpc.ResourceProviderClient, error) {
if err := p.ensureConfigured(); err != nil {
return nil, err
}
return p.clientRaw, nil
}
// ensureConfigured blocks waiting for the plugin to be configured. To improve parallelism, all Configure RPCs
// occur in parallel, and we await the completion of them at the last possible moment. This does mean, however, that
// we might discover failures later than we would have otherwise, but the caller of ensureConfigured will get them.
func (p *provider) ensureConfigured() error {
<-p.cfgdone
return p.cfgerr
}
// annotateSecrets copies the "secretness" from the ins to the outs. If there are values with the same keys for the
// outs and the ins, if they are both objects, they are transformed recursively. Otherwise, if the value in the ins
// contains a secret, the entire out value is marked as a secret. This is very close to how we project secrets
// in the programming model, with one small difference, which is how we treat the case where both are objects. In the
// programming model, we would say the entire output object is a secret. Here, we actually recur in. We do this because
// we don't want a single secret value in a rich structure to taint the entire object. Doing so would mean things like
// the entire value in the deployment would be encrypted instead of a small chunk. It also means the entire property
// would be displayed as `[secret]` in the CLI instead of a small part.
//
// NOTE: This means that for an array, if any value in the input version is a secret, the entire output array is
// marked as a secret. This is actually a very nice result, because often arrays are treated like sets by providers
// and the order may not be preserved across an operation. This means we do end up encrypting the entire array
// but that's better than accidentally leaking a value which just moved to a different location.
func annotateSecrets(outs, ins resource.PropertyMap) {
if outs == nil || ins == nil {
return
}
for key, inValue := range ins {
outValue, has := outs[key]
if !has {
continue
}
if outValue.IsObject() && inValue.IsObject() {
annotateSecrets(outValue.ObjectValue(), inValue.ObjectValue())
} else if !outValue.IsSecret() && inValue.ContainsSecrets() {
outs[key] = resource.MakeSecret(outValue)
}
}
}
func removeSecrets(v resource.PropertyValue) interface{} {
switch {
case v.IsNull():
return nil
case v.IsBool():
return v.BoolValue()
case v.IsNumber():
return v.NumberValue()
case v.IsString():
return v.StringValue()
case v.IsArray():
arr := []interface{}{}
for _, v := range v.ArrayValue() {
arr = append(arr, removeSecrets(v))
}
return arr
case v.IsAsset():
return v.AssetValue()
case v.IsArchive():
return v.ArchiveValue()
case v.IsComputed():
return v.Input()
case v.IsOutput():
return v.OutputValue()
case v.IsSecret():
return removeSecrets(v.SecretValue().Element)
default:
contract.Assertf(v.IsObject(), "v is not Object '%v' instead", v.TypeString())
obj := map[string]interface{}{}
for k, v := range v.ObjectValue() {
obj[string(k)] = removeSecrets(v)
}
return obj
}
}
// Configure configures the resource provider with "globals" that control its behavior.
func (p *provider) Configure(inputs resource.PropertyMap) error {
label := fmt.Sprintf("%s.Configure()", p.label())
logging.V(7).Infof("%s executing (#vars=%d)", label, len(inputs))
// Convert the inputs to a config map. If any are unknown, do not configure the underlying plugin: instead, leave
// the cfgknown bit unset and carry on.
config := make(map[string]string)
for k, v := range inputs {
if k == "version" {
continue
}
if v.ContainsUnknowns() {
p.cfgknown, p.acceptSecrets, p.acceptResources = false, false, false
close(p.cfgdone)
return nil
}
mapped := removeSecrets(v)
if _, isString := mapped.(string); !isString {
marshalled, err := json.Marshal(mapped)
if err != nil {
p.cfgerr = errors.Wrapf(err, "marshaling configuration property '%v'", k)
close(p.cfgdone)
return p.cfgerr
}
mapped = string(marshalled)
}
// Pass the older spelling of a configuration key across the RPC interface, for now, to support
// providers which are on the older plan.
config[string(p.Pkg())+":config:"+string(k)] = mapped.(string)
}
minputs, err := MarshalProperties(inputs, MarshalOptions{
Label: fmt.Sprintf("%s.inputs", label),
KeepUnknowns: true,
KeepSecrets: true,
KeepResources: true,
})
if err != nil {
p.cfgerr = errors.Wrapf(err, "marshaling provider inputs")
close(p.cfgdone)
return p.cfgerr
}
// Spawn the configure to happen in parallel. This ensures that we remain responsive elsewhere that might
// want to make forward progress, even as the configure call is happening.
go func() {
resp, err := p.clientRaw.Configure(p.requestContext(), &pulumirpc.ConfigureRequest{
AcceptSecrets: true,
AcceptResources: true,
Variables: config,
Args: minputs,
})
if err != nil {
rpcError := rpcerror.Convert(err)
logging.V(7).Infof("%s failed: err=%v", label, rpcError.Message())
err = createConfigureError(rpcError)
}
// Acquire the lock, publish the results, and notify any waiters.
p.acceptSecrets = resp.GetAcceptSecrets()
p.acceptResources = resp.GetAcceptResources()
p.supportsPreview = resp.GetSupportsPreview()
p.acceptOutputs = resp.GetAcceptOutputs()
p.cfgknown, p.cfgerr = true, err
close(p.cfgdone)
}()
return nil
}
// Check validates that the given property bag is valid for a resource of the given type.
func (p *provider) Check(urn resource.URN,
olds, news resource.PropertyMap,
allowUnknowns bool, sequenceNumber int) (resource.PropertyMap, []CheckFailure, error) {
label := fmt.Sprintf("%s.Check(%s)", p.label(), urn)
logging.V(7).Infof("%s executing (#olds=%d,#news=%d", label, len(olds), len(news))
// Get the RPC client and ensure it's configured.
client, err := p.getClient()
if err != nil {
return nil, nil, err
}
// If the configuration for this provider was not fully known--e.g. if we are doing a preview and some input
// property was sourced from another resource's output properties--don't call into the underlying provider.
if !p.cfgknown {
return news, nil, nil
}
molds, err := MarshalProperties(olds, MarshalOptions{
Label: fmt.Sprintf("%s.olds", label),
KeepUnknowns: allowUnknowns,
KeepSecrets: p.acceptSecrets,
KeepResources: p.acceptResources,
})
if err != nil {
return nil, nil, err
}
mnews, err := MarshalProperties(news, MarshalOptions{
Label: fmt.Sprintf("%s.news", label),
KeepUnknowns: allowUnknowns,
KeepSecrets: p.acceptSecrets,
KeepResources: p.acceptResources,
})
if err != nil {
return nil, nil, err
}
resp, err := client.Check(p.requestContext(), &pulumirpc.CheckRequest{
Urn: string(urn),
Olds: molds,
News: mnews,
SequenceNumber: int32(sequenceNumber),
})
if err != nil {
rpcError := rpcerror.Convert(err)
logging.V(7).Infof("%s failed: err=%v", label, rpcError.Message())
return nil, nil, rpcError
}
// Unmarshal the provider inputs.
var inputs resource.PropertyMap
if ins := resp.GetInputs(); ins != nil {
inputs, err = UnmarshalProperties(ins, MarshalOptions{
Label: fmt.Sprintf("%s.inputs", label),
KeepUnknowns: allowUnknowns,
RejectUnknowns: !allowUnknowns,
KeepSecrets: true,
KeepResources: true,
})
if err != nil {
return nil, nil, err
}
}
// If we could not pass secrets to the provider, retain the secret bit on any property with the same name. This
// allows us to retain metadata about secrets in many cases, even for providers that do not understand secrets
// natively.
if !p.acceptSecrets {
annotateSecrets(inputs, news)
}
// And now any properties that failed verification.
var failures []CheckFailure
for _, failure := range resp.GetFailures() {
failures = append(failures, CheckFailure{resource.PropertyKey(failure.Property), failure.Reason})
}
logging.V(7).Infof("%s success: inputs=#%d failures=#%d", label, len(inputs), len(failures))
return inputs, failures, nil
}
// Diff checks what impacts a hypothetical update will have on the resource's properties.
func (p *provider) Diff(urn resource.URN, id resource.ID,
olds resource.PropertyMap, news resource.PropertyMap, allowUnknowns bool,
ignoreChanges []string) (DiffResult, error) {
contract.Assert(urn != "")
contract.Assert(id != "")
contract.Assert(news != nil)
contract.Assert(olds != nil)
label := fmt.Sprintf("%s.Diff(%s,%s)", p.label(), urn, id)
logging.V(7).Infof("%s: executing (#olds=%d,#news=%d)", label, len(olds), len(news))
// Get the RPC client and ensure it's configured.
client, err := p.getClient()
if err != nil {
return DiffResult{}, err
}
// If the configuration for this provider was not fully known--e.g. if we are doing a preview and some input
// property was sourced from another resource's output properties--don't call into the underlying provider.
// Instead, indicate that the diff is unavailable and write a message
if !p.cfgknown {
logging.V(7).Infof("%s: cannot diff due to unknown config", label)
const message = "The provider for this resource has inputs that are not known during preview.\n" +
"This preview may not correctly represent the changes that will be applied during an update."
return DiffResult{}, DiffUnavailable(message)
}
molds, err := MarshalProperties(olds, MarshalOptions{
Label: fmt.Sprintf("%s.olds", label),
ElideAssetContents: true,
KeepUnknowns: allowUnknowns,
KeepSecrets: p.acceptSecrets,
KeepResources: p.acceptResources,
})
if err != nil {
return DiffResult{}, err
}
mnews, err := MarshalProperties(news, MarshalOptions{
Label: fmt.Sprintf("%s.news", label),
KeepUnknowns: allowUnknowns,
KeepSecrets: p.acceptSecrets,
KeepResources: p.acceptResources,
})
if err != nil {
return DiffResult{}, err
}
resp, err := client.Diff(p.requestContext(), &pulumirpc.DiffRequest{
Id: string(id),
Urn: string(urn),
Olds: molds,
News: mnews,
IgnoreChanges: ignoreChanges,
})
if err != nil {
rpcError := rpcerror.Convert(err)
logging.V(7).Infof("%s failed: %v", label, rpcError.Message())
return DiffResult{}, rpcError
}
var replaces []resource.PropertyKey
for _, replace := range resp.GetReplaces() {
replaces = append(replaces, resource.PropertyKey(replace))
}
var stables []resource.PropertyKey
for _, stable := range resp.GetStables() {
stables = append(stables, resource.PropertyKey(stable))
}
var diffs []resource.PropertyKey
for _, diff := range resp.GetDiffs() {
diffs = append(diffs, resource.PropertyKey(diff))
}
changes := resp.GetChanges()
deleteBeforeReplace := resp.GetDeleteBeforeReplace()
logging.V(7).Infof("%s success: changes=%d #replaces=%v #stables=%v delbefrepl=%v, diffs=#%v, detaileddiff=%v",
label, changes, replaces, stables, deleteBeforeReplace, diffs, resp.GetDetailedDiff())
return DiffResult{
Changes: DiffChanges(changes),
ReplaceKeys: replaces,
StableKeys: stables,
ChangedKeys: diffs,
DetailedDiff: decodeDetailedDiff(resp),
DeleteBeforeReplace: deleteBeforeReplace,
}, nil
}
// Create allocates a new instance of the provided resource and assigns its unique resource.ID and outputs afterwards.
func (p *provider) Create(urn resource.URN, props resource.PropertyMap, timeout float64, preview bool) (resource.ID,
resource.PropertyMap, resource.Status, error) {
contract.Assert(urn != "")
contract.Assert(props != nil)
label := fmt.Sprintf("%s.Create(%s)", p.label(), urn)
logging.V(7).Infof("%s executing (#props=%v)", label, len(props))
// Get the RPC client and ensure it's configured.
client, err := p.getClient()
if err != nil {
return "", nil, resource.StatusOK, err
}
// If this is a preview and the plugin does not support provider previews, or if the configuration for the provider
// is not fully known, hand back an empty property map. This will force the language SDK will to treat all properties
// as unknown, which is conservatively correct.
//
// If the provider does not support previews, return the inputs as the state. Note that this can cause problems for
// the language SDKs if there are input and state properties that share a name but expect differently-shaped values.
if preview {
// TODO: it would be great to swap the order of these if statements. This would prevent a behavioral change for
// providers that do not support provider previews, which will always return the inputs as state regardless of
// whether or not the config is known. Unfortunately, we can't, since the `supportsPreview` bit depends on the
// result of `Configure`, which we won't call if the `cfgknown` is false. It may be worth fixing this catch-22
// by extending the provider gRPC interface with a `SupportsFeature` API similar to the language monitor.
if !p.cfgknown {
if p.legacyPreview {
return "", props, resource.StatusOK, nil
}
return "", resource.PropertyMap{}, resource.StatusOK, nil
}
if !p.supportsPreview || p.disableProviderPreview {
return "", props, resource.StatusOK, nil
}
}
// We should only be calling {Create,Update,Delete} if the provider is fully configured.
contract.Assert(p.cfgknown)
mprops, err := MarshalProperties(props, MarshalOptions{
Label: fmt.Sprintf("%s.inputs", label),
KeepUnknowns: preview,
KeepSecrets: p.acceptSecrets,
KeepResources: p.acceptResources,
})
if err != nil {
return "", nil, resource.StatusOK, err
}
var id resource.ID
var liveObject *_struct.Struct
var resourceError error
var resourceStatus = resource.StatusOK
resp, err := client.Create(p.requestContext(), &pulumirpc.CreateRequest{
Urn: string(urn),
Properties: mprops,
Timeout: timeout,
Preview: preview,
})
if err != nil {
resourceStatus, id, liveObject, _, resourceError = parseError(err)
logging.V(7).Infof("%s failed: %v", label, resourceError)
if resourceStatus != resource.StatusPartialFailure {
return "", nil, resourceStatus, resourceError
}
// Else it's a `StatusPartialFailure`.
} else {
id = resource.ID(resp.GetId())
liveObject = resp.GetProperties()
}
if id == "" && !preview {
return "", nil, resource.StatusUnknown,
errors.Errorf("plugin for package '%v' returned empty resource.ID from create '%v'", p.pkg, urn)
}
outs, err := UnmarshalProperties(liveObject, MarshalOptions{
Label: fmt.Sprintf("%s.outputs", label),
RejectUnknowns: !preview,
KeepUnknowns: preview,
KeepSecrets: true,
KeepResources: true,
})
if err != nil {
return "", nil, resourceStatus, err
}
// If we could not pass secrets to the provider, retain the secret bit on any property with the same name. This
// allows us to retain metadata about secrets in many cases, even for providers that do not understand secrets
// natively.
if !p.acceptSecrets {
annotateSecrets(outs, props)
}
logging.V(7).Infof("%s success: id=%s; #outs=%d", label, id, len(outs))
if resourceError == nil {
return id, outs, resourceStatus, nil
}
return id, outs, resourceStatus, resourceError
}
// read the current live state associated with a resource. enough state must be include in the inputs to uniquely
// identify the resource; this is typically just the resource id, but may also include some properties.
func (p *provider) Read(urn resource.URN, id resource.ID,
inputs, state resource.PropertyMap) (ReadResult, resource.Status, error) {
contract.Assertf(urn != "", "Read URN was empty")
contract.Assertf(id != "", "Read ID was empty")
label := fmt.Sprintf("%s.Read(%s,%s)", p.label(), id, urn)
logging.V(7).Infof("%s executing (#inputs=%v, #state=%v)", label, len(inputs), len(state))
// Get the RPC client and ensure it's configured.
client, err := p.getClient()
if err != nil {
return ReadResult{}, resource.StatusUnknown, err
}
// If the provider is not fully configured, return an empty bag.
if !p.cfgknown {
return ReadResult{
Outputs: resource.PropertyMap{},
Inputs: resource.PropertyMap{},
}, resource.StatusUnknown, nil
}
// Marshal the resource inputs and state so we can perform the RPC.
var minputs *_struct.Struct
if inputs != nil {
m, err := MarshalProperties(inputs, MarshalOptions{
Label: label,
ElideAssetContents: true,
KeepSecrets: p.acceptSecrets,
KeepResources: p.acceptResources,
})
if err != nil {
return ReadResult{}, resource.StatusUnknown, err
}
minputs = m
}
mstate, err := MarshalProperties(state, MarshalOptions{
Label: label,
ElideAssetContents: true,
KeepSecrets: p.acceptSecrets,
KeepResources: p.acceptResources,
})
if err != nil {
return ReadResult{}, resource.StatusUnknown, err
}
// Now issue the read request over RPC, blocking until it finished.
var readID resource.ID
var liveObject *_struct.Struct
var liveInputs *_struct.Struct
var resourceError error
var resourceStatus = resource.StatusOK
resp, err := client.Read(p.requestContext(), &pulumirpc.ReadRequest{
Id: string(id),
Urn: string(urn),
Properties: mstate,
Inputs: minputs,
})
if err != nil {
resourceStatus, readID, liveObject, liveInputs, resourceError = parseError(err)
logging.V(7).Infof("%s failed: %v", label, err)
if resourceStatus != resource.StatusPartialFailure {
return ReadResult{}, resourceStatus, resourceError
}
// Else it's a `StatusPartialFailure`.
} else {
readID = resource.ID(resp.GetId())
liveObject = resp.GetProperties()
liveInputs = resp.GetInputs()
}
// If the resource was missing, simply return a nil property map.
if string(readID) == "" {
return ReadResult{}, resourceStatus, nil
}
// Finally, unmarshal the resulting state properties and return them.
newState, err := UnmarshalProperties(liveObject, MarshalOptions{
Label: fmt.Sprintf("%s.outputs", label),
RejectUnknowns: true,
KeepSecrets: true,
KeepResources: true,
})
if err != nil {
return ReadResult{}, resourceStatus, err
}
var newInputs resource.PropertyMap
if liveInputs != nil {
newInputs, err = UnmarshalProperties(liveInputs, MarshalOptions{
Label: label + ".inputs",
RejectUnknowns: true,
KeepSecrets: true,
KeepResources: true,
})
if err != nil {
return ReadResult{}, resourceStatus, err
}
}
// If we could not pass secrets to the provider, retain the secret bit on any property with the same name. This
// allows us to retain metadata about secrets in many cases, even for providers that do not understand secrets
// natively.
if !p.acceptSecrets {
annotateSecrets(newInputs, inputs)
annotateSecrets(newState, state)
}
logging.V(7).Infof("%s success; #outs=%d, #inputs=%d", label, len(newState), len(newInputs))
return ReadResult{
ID: readID,
Outputs: newState,
Inputs: newInputs,
}, resourceStatus, resourceError
}
// Update updates an existing resource with new values.
func (p *provider) Update(urn resource.URN, id resource.ID,
olds resource.PropertyMap, news resource.PropertyMap, timeout float64,
ignoreChanges []string, preview bool) (resource.PropertyMap, resource.Status, error) {
contract.Assert(urn != "")
contract.Assert(id != "")
contract.Assert(news != nil)
contract.Assert(olds != nil)
label := fmt.Sprintf("%s.Update(%s,%s)", p.label(), id, urn)
logging.V(7).Infof("%s executing (#olds=%v,#news=%v)", label, len(olds), len(news))
// Get the RPC client and ensure it's configured.
client, err := p.getClient()
if err != nil {
return news, resource.StatusOK, err
}
// If this is a preview and the plugin does not support provider previews, or if the configuration for the provider
// is not fully known, hand back an empty property map. This will force the language SDK to treat all properties
// as unknown, which is conservatively correct.
//
// If the provider does not support previews, return the inputs as the state. Note that this can cause problems for
// the language SDKs if there are input and state properties that share a name but expect differently-shaped values.
if preview {
// TODO: it would be great to swap the order of these if statements. This would prevent a behavioral change for
// providers that do not support provider previews, which will always return the inputs as state regardless of
// whether or not the config is known. Unfortunately, we can't, since the `supportsPreview` bit depends on the
// result of `Configure`, which we won't call if the `cfgknown` is false. It may be worth fixing this catch-22
// by extending the provider gRPC interface with a `SupportsFeature` API similar to the language monitor.
if !p.cfgknown {
if p.legacyPreview {
return news, resource.StatusOK, nil
}
return resource.PropertyMap{}, resource.StatusOK, nil
}
if !p.supportsPreview || p.disableProviderPreview {
return news, resource.StatusOK, nil
}
}
// We should only be calling {Create,Update,Delete} if the provider is fully configured.
contract.Assert(p.cfgknown)
molds, err := MarshalProperties(olds, MarshalOptions{
Label: fmt.Sprintf("%s.olds", label),
ElideAssetContents: true,
KeepSecrets: p.acceptSecrets,
KeepResources: p.acceptResources,
})
if err != nil {
return nil, resource.StatusOK, err
}
mnews, err := MarshalProperties(news, MarshalOptions{
Label: fmt.Sprintf("%s.news", label),
KeepUnknowns: preview,
KeepSecrets: p.acceptSecrets,
KeepResources: p.acceptResources,
})
if err != nil {
return nil, resource.StatusOK, err
}
var liveObject *_struct.Struct
var resourceError error
var resourceStatus = resource.StatusOK
resp, err := client.Update(p.requestContext(), &pulumirpc.UpdateRequest{
Id: string(id),
Urn: string(urn),
Olds: molds,
News: mnews,
Timeout: timeout,
IgnoreChanges: ignoreChanges,
Preview: preview,
})
if err != nil {
resourceStatus, _, liveObject, _, resourceError = parseError(err)
logging.V(7).Infof("%s failed: %v", label, resourceError)
if resourceStatus != resource.StatusPartialFailure {
return nil, resourceStatus, resourceError
}
// Else it's a `StatusPartialFailure`.
} else {
liveObject = resp.GetProperties()
}
outs, err := UnmarshalProperties(liveObject, MarshalOptions{
Label: fmt.Sprintf("%s.outputs", label),
RejectUnknowns: !preview,
KeepUnknowns: preview,
KeepSecrets: true,
KeepResources: true,
})
if err != nil {
return nil, resourceStatus, err
}
// If we could not pass secrets to the provider, retain the secret bit on any property with the same name. This
// allows us to retain metadata about secrets in many cases, even for providers that do not understand secrets
// natively.
if !p.acceptSecrets {
annotateSecrets(outs, news)
}
logging.V(7).Infof("%s success; #outs=%d", label, len(outs))
if resourceError == nil {
return outs, resourceStatus, nil
}
return outs, resourceStatus, resourceError
}
// Delete tears down an existing resource.
func (p *provider) Delete(urn resource.URN, id resource.ID, props resource.PropertyMap,
timeout float64) (resource.Status, error) {
contract.Assert(urn != "")
contract.Assert(id != "")
label := fmt.Sprintf("%s.Delete(%s,%s)", p.label(), urn, id)
logging.V(7).Infof("%s executing (#props=%d)", label, len(props))
mprops, err := MarshalProperties(props, MarshalOptions{
Label: label,
ElideAssetContents: true,
KeepSecrets: p.acceptSecrets,
KeepResources: p.acceptResources,
})
if err != nil {
return resource.StatusOK, err
}
// Get the RPC client and ensure it's configured.
client, err := p.getClient()
if err != nil {
return resource.StatusOK, err
}
// We should only be calling {Create,Update,Delete} if the provider is fully configured.
contract.Assert(p.cfgknown)
if _, err := client.Delete(p.requestContext(), &pulumirpc.DeleteRequest{
Id: string(id),
Urn: string(urn),
Properties: mprops,
Timeout: timeout,
}); err != nil {
resourceStatus, rpcErr := resourceStateAndError(err)
logging.V(7).Infof("%s failed: %v", label, rpcErr)
return resourceStatus, rpcErr
}
logging.V(7).Infof("%s success", label)
return resource.StatusOK, nil
}
// Construct creates a new component resource from the given type, name, parent, options, and inputs, and returns
// its URN and outputs.
func (p *provider) Construct(info ConstructInfo, typ tokens.Type, name tokens.QName, parent resource.URN,
inputs resource.PropertyMap, options ConstructOptions) (ConstructResult, error) {
contract.Assert(typ != "")
contract.Assert(name != "")
contract.Assert(inputs != nil)
label := fmt.Sprintf("%s.Construct(%s, %s, %s)", p.label(), typ, name, parent)
logging.V(7).Infof("%s executing (#inputs=%v)", label, len(inputs))
// Get the RPC client and ensure it's configured.
client, err := p.getClient()
if err != nil {
return ConstructResult{}, err
}
// We should only be calling Construct if the provider is fully configured.
contract.Assert(p.cfgknown)
if !p.acceptSecrets {
return ConstructResult{}, fmt.Errorf("plugins that can construct components must support secrets")
}
// Marshal the input properties.
minputs, err := MarshalProperties(inputs, MarshalOptions{
Label: fmt.Sprintf("%s.inputs", label),
KeepUnknowns: true,
KeepSecrets: p.acceptSecrets,
KeepResources: p.acceptResources,
// To initially scope the use of this new feature, we only keep output values for
// Construct and Call (when the client accepts them).
KeepOutputValues: p.acceptOutputs,
})
if err != nil {
return ConstructResult{}, err
}
// Marshal the aliases.
aliases := make([]string, len(options.Aliases))
for i, alias := range options.Aliases {
aliases[i] = string(alias)
}
// Marshal the dependencies.
dependencies := make([]string, len(options.Dependencies))
for i, dep := range options.Dependencies {
dependencies[i] = string(dep)
}
// Marshal the property dependencies.
inputDependencies := map[string]*pulumirpc.ConstructRequest_PropertyDependencies{}
for name, dependencies := range options.PropertyDependencies {
urns := make([]string, len(dependencies))
for i, urn := range dependencies {
urns[i] = string(urn)
}
inputDependencies[string(name)] = &pulumirpc.ConstructRequest_PropertyDependencies{Urns: urns}
}
// Marshal the config.
config := map[string]string{}
for k, v := range info.Config {
config[k.String()] = v
}
configSecretKeys := []string{}
for _, k := range info.ConfigSecretKeys {
configSecretKeys = append(configSecretKeys, k.String())
}
resp, err := client.Construct(p.requestContext(), &pulumirpc.ConstructRequest{
Project: info.Project,
Stack: info.Stack,
Config: config,
ConfigSecretKeys: configSecretKeys,
DryRun: info.DryRun,
Parallel: int32(info.Parallel),
MonitorEndpoint: info.MonitorAddress,
Type: string(typ),
Name: string(name),
Parent: string(parent),
Inputs: minputs,
Protect: options.Protect,
Providers: options.Providers,
InputDependencies: inputDependencies,
Aliases: aliases,
Dependencies: dependencies,
})
if err != nil {
return ConstructResult{}, err
}
outputs, err := UnmarshalProperties(resp.GetState(), MarshalOptions{
Label: fmt.Sprintf("%s.outputs", label),
KeepUnknowns: info.DryRun,
KeepSecrets: true,
KeepResources: true,
})
if err != nil {
return ConstructResult{}, err
}
outputDependencies := map[resource.PropertyKey][]resource.URN{}
for k, rpcDeps := range resp.GetStateDependencies() {
urns := make([]resource.URN, len(rpcDeps.Urns))
for i, d := range rpcDeps.Urns {
urns[i] = resource.URN(d)
}
outputDependencies[resource.PropertyKey(k)] = urns
}
logging.V(7).Infof("%s success: #outputs=%d", label, len(outputs))
return ConstructResult{
URN: resource.URN(resp.GetUrn()),
Outputs: outputs,
OutputDependencies: outputDependencies,
}, nil
}
// Invoke dynamically executes a built-in function in the provider.
func (p *provider) Invoke(tok tokens.ModuleMember, args resource.PropertyMap) (resource.PropertyMap,
[]CheckFailure, error) {
contract.Assert(tok != "")
label := fmt.Sprintf("%s.Invoke(%s)", p.label(), tok)
logging.V(7).Infof("%s executing (#args=%d)", label, len(args))
// Get the RPC client and ensure it's configured.
client, err := p.getClient()
if err != nil {
return nil, nil, err
}
// If the provider is not fully configured, return an empty property map.
if !p.cfgknown {
return resource.PropertyMap{}, nil, nil
}
margs, err := MarshalProperties(args, MarshalOptions{
Label: fmt.Sprintf("%s.args", label),
KeepSecrets: p.acceptSecrets,
KeepResources: p.acceptResources,
})
if err != nil {
return nil, nil, err
}
resp, err := client.Invoke(p.requestContext(), &pulumirpc.InvokeRequest{
Tok: string(tok),
Args: margs,
AcceptResources: p.acceptResources,
})
if err != nil {
rpcError := rpcerror.Convert(err)
logging.V(7).Infof("%s failed: %v", label, rpcError.Message())
return nil, nil, rpcError
}
// Unmarshal any return values.
ret, err := UnmarshalProperties(resp.GetReturn(), MarshalOptions{
Label: fmt.Sprintf("%s.returns", label),
RejectUnknowns: true,
KeepSecrets: true,
KeepResources: true,
})
if err != nil {
return nil, nil, err
}
// And now any properties that failed verification.
var failures []CheckFailure
for _, failure := range resp.GetFailures() {
failures = append(failures, CheckFailure{resource.PropertyKey(failure.Property), failure.Reason})
}
logging.V(7).Infof("%s success (#ret=%d,#failures=%d) success", label, len(ret), len(failures))
return ret, failures, nil
}
// StreamInvoke dynamically executes a built-in function in the provider, which returns a stream of
// responses.
func (p *provider) StreamInvoke(
tok tokens.ModuleMember,
args resource.PropertyMap,
onNext func(resource.PropertyMap) error) ([]CheckFailure, error) {
contract.Assert(tok != "")
label := fmt.Sprintf("%s.StreamInvoke(%s)", p.label(), tok)
logging.V(7).Infof("%s executing (#args=%d)", label, len(args))
// Get the RPC client and ensure it's configured.
client, err := p.getClient()
if err != nil {
return nil, err
}
// If the provider is not fully configured, return an empty property map.
if !p.cfgknown {
return nil, onNext(resource.PropertyMap{})
}
margs, err := MarshalProperties(args, MarshalOptions{
Label: fmt.Sprintf("%s.args", label),
KeepSecrets: p.acceptSecrets,
KeepResources: p.acceptResources,
})
if err != nil {
return nil, err
}
streamClient, err := client.StreamInvoke(
p.requestContext(), &pulumirpc.InvokeRequest{
Tok: string(tok),
Args: margs,
AcceptResources: p.acceptResources,
})
if err != nil {
rpcError := rpcerror.Convert(err)
logging.V(7).Infof("%s failed: %v", label, rpcError.Message())
return nil, rpcError
}
for {
in, err := streamClient.Recv()
if err == io.EOF {
return nil, nil
}
if err != nil {
return nil, err
}
// Unmarshal response.
ret, err := UnmarshalProperties(in.GetReturn(), MarshalOptions{
Label: fmt.Sprintf("%s.returns", label),
RejectUnknowns: true,
KeepSecrets: true,
KeepResources: true,
})
if err != nil {
return nil, err
}
// Check properties that failed verification.
var failures []CheckFailure
for _, failure := range in.GetFailures() {
failures = append(failures, CheckFailure{resource.PropertyKey(failure.Property), failure.Reason})
}
if len(failures) > 0 {
return failures, nil
}
// Send stream message back to whoever is consuming the stream.
if err := onNext(ret); err != nil {
return nil, err
}
}
}
// Call dynamically executes a method in the provider associated with a component resource.
func (p *provider) Call(tok tokens.ModuleMember, args resource.PropertyMap, info CallInfo,
options CallOptions) (CallResult, error) {
contract.Assert(tok != "")
label := fmt.Sprintf("%s.Call(%s)", p.label(), tok)
logging.V(7).Infof("%s executing (#args=%d)", label, len(args))
// Get the RPC client and ensure it's configured.
client, err := p.getClient()
if err != nil {
return CallResult{}, err
}
// If the provider is not fully configured, return an empty property map.
if !p.cfgknown {
return CallResult{}, nil
}
margs, err := MarshalProperties(args, MarshalOptions{
Label: fmt.Sprintf("%s.args", label),
KeepUnknowns: true,
KeepSecrets: true,
KeepResources: true,
// To initially scope the use of this new feature, we only keep output values for
// Construct and Call (when the client accepts them).
KeepOutputValues: p.acceptOutputs,
})
if err != nil {
return CallResult{}, err
}
// Marshal the arg dependencies.
argDependencies := map[string]*pulumirpc.CallRequest_ArgumentDependencies{}
for name, dependencies := range options.ArgDependencies {
urns := make([]string, len(dependencies))
for i, urn := range dependencies {
urns[i] = string(urn)
}
argDependencies[string(name)] = &pulumirpc.CallRequest_ArgumentDependencies{Urns: urns}
}
// Marshal the config.
config := map[string]string{}
for k, v := range info.Config {
config[k.String()] = v
}
resp, err := client.Call(p.requestContext(), &pulumirpc.CallRequest{
Tok: string(tok),
Args: margs,
ArgDependencies: argDependencies,
Project: info.Project,
Stack: info.Stack,
Config: config,
DryRun: info.DryRun,
Parallel: int32(info.Parallel),
MonitorEndpoint: info.MonitorAddress,
})
if err != nil {
rpcError := rpcerror.Convert(err)
logging.V(7).Infof("%s failed: %v", label, rpcError.Message())
return CallResult{}, rpcError
}
// Unmarshal any return values.
ret, err := UnmarshalProperties(resp.GetReturn(), MarshalOptions{
Label: fmt.Sprintf("%s.returns", label),
KeepUnknowns: info.DryRun,
KeepSecrets: true,
KeepResources: true,
})
if err != nil {
return CallResult{}, err
}
returnDependencies := map[resource.PropertyKey][]resource.URN{}
for k, rpcDeps := range resp.GetReturnDependencies() {
urns := make([]resource.URN, len(rpcDeps.Urns))
for i, d := range rpcDeps.Urns {
urns[i] = resource.URN(d)
}
returnDependencies[resource.PropertyKey(k)] = urns
}
// And now any properties that failed verification.
var failures []CheckFailure
for _, failure := range resp.GetFailures() {
failures = append(failures, CheckFailure{resource.PropertyKey(failure.Property), failure.Reason})
}
logging.V(7).Infof("%s success (#ret=%d,#failures=%d) success", label, len(ret), len(failures))
return CallResult{Return: ret, ReturnDependencies: returnDependencies, Failures: failures}, nil
}
// GetPluginInfo returns this plugin's information.
func (p *provider) GetPluginInfo() (workspace.PluginInfo, error) {
label := fmt.Sprintf("%s.GetPluginInfo()", p.label())
logging.V(7).Infof("%s executing", label)
// Calling GetPluginInfo happens immediately after loading, and does not require configuration to proceed.
// Thus, we access the clientRaw property, rather than calling getClient.
resp, err := p.clientRaw.GetPluginInfo(p.requestContext(), &pbempty.Empty{})
if err != nil {
rpcError := rpcerror.Convert(err)
logging.V(7).Infof("%s failed: err=%v", label, rpcError.Message())
return workspace.PluginInfo{}, rpcError
}
var version *semver.Version
if v := resp.Version; v != "" {
sv, err := semver.ParseTolerant(v)
if err != nil {
return workspace.PluginInfo{}, err
}
version = &sv
}
path := ""
if p.plug != nil {
path = p.plug.Bin
}
return workspace.PluginInfo{
Name: string(p.pkg),
Path: path,
Kind: workspace.ResourcePlugin,
Version: version,
}, nil
}
func (p *provider) SignalCancellation() error {
_, err := p.clientRaw.Cancel(p.requestContext(), &pbempty.Empty{})
if err != nil {
rpcError := rpcerror.Convert(err)
logging.V(8).Infof("provider received rpc error `%s`: `%s`", rpcError.Code(),
rpcError.Message())
switch rpcError.Code() {
case codes.Unimplemented:
// For backwards compatibility, do nothing if it's not implemented.
return nil
}
}
return err
}
// Close tears down the underlying plugin RPC connection and process.
func (p *provider) Close() error {
if p.plug == nil {
return nil
}
return p.plug.Close()
}
// createConfigureError creates a nice error message from an RPC error that
// originated from `Configure`.
//
// If we requested that a resource configure itself but omitted required configuration
// variables, resource providers will respond with a list of missing variables and their descriptions.
// If that is what occurred, we'll use that information here to construct a nice error message.
func createConfigureError(rpcerr *rpcerror.Error) error {
var err error
for _, detail := range rpcerr.Details() {
if missingKeys, ok := detail.(*pulumirpc.ConfigureErrorMissingKeys); ok {
for _, missingKey := range missingKeys.MissingKeys {
singleError := fmt.Errorf("missing required configuration key \"%s\": %s\n"+
"Set a value using the command `pulumi config set %s <value>`.",
missingKey.Name, missingKey.Description, missingKey.Name)
err = multierror.Append(err, singleError)
}
}
}
if err != nil {
return err
}
return rpcerr
}
// resourceStateAndError interprets an error obtained from a gRPC endpoint.
//
// gRPC gives us a `status.Status` structure as an `error` whenever our
// gRPC servers serve up an error. Each `status.Status` contains a code
// and a message. Based on the error code given to us, we can understand
// the state of our system and if our resource status is truly unknown.
//
// In general, our resource state is only really unknown if the server
// had an internal error, in which case it will serve one of `codes.Internal`,
// `codes.DataLoss`, or `codes.Unknown` to us.
func resourceStateAndError(err error) (resource.Status, *rpcerror.Error) {
rpcError := rpcerror.Convert(err)
logging.V(8).Infof("provider received rpc error `%s`: `%s`", rpcError.Code(), rpcError.Message())
switch rpcError.Code() {
case codes.Internal, codes.DataLoss, codes.Unknown:
logging.V(8).Infof("rpc error kind `%s` may not be recoverable", rpcError.Code())
return resource.StatusUnknown, rpcError
}
logging.V(8).Infof("rpc error kind `%s` is well-understood and recoverable", rpcError.Code())
return resource.StatusOK, rpcError
}
// parseError parses a gRPC error into a set of values that represent the state of a resource. They
// are: (1) the `resourceStatus`, indicating the last known state (e.g., `StatusOK`, representing
// success, `StatusUnknown`, representing internal failure); (2) the `*rpcerror.Error`, our internal
// representation for RPC errors; and optionally (3) `liveObject`, containing the last known live
// version of the object that has successfully created but failed to initialize (e.g., because the
// object was created, but app code is continually crashing and the resource never achieves
// liveness).
func parseError(err error) (
resourceStatus resource.Status, id resource.ID, liveInputs, liveObject *_struct.Struct, resourceErr error,
) {
var responseErr *rpcerror.Error
resourceStatus, responseErr = resourceStateAndError(err)
contract.Assert(responseErr != nil)
// If resource was successfully created but failed to initialize, the error will be packed
// with the live properties of the object.
resourceErr = responseErr
for _, detail := range responseErr.Details() {
if initErr, ok := detail.(*pulumirpc.ErrorResourceInitFailed); ok {
id = resource.ID(initErr.GetId())
liveObject = initErr.GetProperties()
liveInputs = initErr.GetInputs()
resourceStatus = resource.StatusPartialFailure
resourceErr = &InitError{Reasons: initErr.Reasons}
break
}
}
return resourceStatus, id, liveObject, liveInputs, resourceErr
}
// InitError represents a failure to initialize a resource, i.e., the resource has been successfully
// created, but it has failed to initialize.
type InitError struct {
Reasons []string
}
var _ error = (*InitError)(nil)
func (ie *InitError) Error() string {
var err error
for _, reason := range ie.Reasons {
err = multierror.Append(err, errors.New(reason))
}
return err.Error()
}
func decorateSpanWithType(span opentracing.Span, urn string) {
if urn := resource.URN(urn); urn.IsValid() {
span.SetTag("pulumi-decorator", urn.Type())
}
}
func decorateProviderSpans(span opentracing.Span, method string, req, resp interface{}, grpcError error) {
if req == nil {
return
}
switch method {
case "/pulumirpc.ResourceProvider/Check", "/pulumirpc.ResourceProvider/CheckConfig":
decorateSpanWithType(span, req.(*pulumirpc.CheckRequest).Urn)
case "/pulumirpc.ResourceProvider/Diff", "/pulumirpc.ResourceProvider/DiffConfig":
decorateSpanWithType(span, req.(*pulumirpc.DiffRequest).Urn)
case "/pulumirpc.ResourceProvider/Create":
decorateSpanWithType(span, req.(*pulumirpc.CreateRequest).Urn)
case "/pulumirpc.ResourceProvider/Update":
decorateSpanWithType(span, req.(*pulumirpc.UpdateRequest).Urn)
case "/pulumirpc.ResourceProvider/Delete":
decorateSpanWithType(span, req.(*pulumirpc.DeleteRequest).Urn)
case "/pulumirpc.ResourceProvider/Invoke":
span.SetTag("pulumi-decorator", req.(*pulumirpc.InvokeRequest).Tok)
}
}
| [
"\"PULUMI_LEGACY_PROVIDER_PREVIEW\""
]
| []
| [
"PULUMI_LEGACY_PROVIDER_PREVIEW"
]
| [] | ["PULUMI_LEGACY_PROVIDER_PREVIEW"] | go | 1 | 0 | |
Container-Root/src/python/pm/technique/technique.py | ######################################################################
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# SPDX-License-Identifier: MIT-0 #
######################################################################
import argparse
from pickle import APPEND
import lib.db.graph.dba as dba
import lib.db.graph.data as graphdata
import lib.util.list_util as list_util
import lib.util.dict_util as dict_util
import json
import os
from pathlib import Path
graph = dba.get_graph()
def list_techniques(tags, verbose=True):
technique = graphdata.get_vertices('technique',graph)
technique_id_list = []
technique_tag_list=[[]]
c = technique.all()
while (not c.empty()):
t = c.next()
technique_id = t['technique_id']
technique_name = t['technique_name']
technique_tags=t['technique_tags']
technique_description=t['technique_description']
check = all(item in technique_tags for item in tags)
if check:
if verbose:
print('%s %s %s %s'%(technique_id,technique_name,technique_tags,technique_description))
technique_id_list.append(technique_id)
technique_tag_list.append(technique_tags)
return technique_id_list,technique_tag_list
def describe_technique(technique_id):
technique = graphdata.get_vertex('technique', technique_id, graph)
print(json.dumps(technique,indent=2))
return technique
def export_task_template(technique_id, verbose=True):
technique = graphdata.get_vertex('technique', technique_id, graph)
task_template = technique['task_template']
if verbose:
print(json.dumps(task_template, indent=2))
return task_template
def search_export_task_template(tags):
technique_id_list,technique_tag_list = list_techniques(tags,verbose=False)
tag_str = ' '.join(tags)
if len(technique_id_list) < 1:
output = 'Sorry, do not know how to go %s'%tag_str
elif len(technique_id_list) > 1:
full_list = list_util.flatten_list_of_lists(technique_tag_list)
union = list_util.unique_list(full_list)
diff = list_util.diff_lists(union,tags)
output = 'Please qualify further: %s'%diff
else:
task_template = export_task_template(technique_id_list[0], verbose=False)
task_template_updated = dict_util.replace_keywords(task_template)
output=json.dumps(task_template_updated, indent=2, ensure_ascii=True)
print(output)
def delete_technique(technique_name):
techniques = graph.vertex_collection('technique')
techniques.delete(technique_name)
def get_task_template_filepath(technique_name):
technique = describe_technique(technique_name)
task_template_filepath = technique["task_template_filepath"]
return task_template_filepath
def get_task_template_file(technique_name):
task_template_filepath = get_task_template_filepath(technique_name)
template={}
with open(task_template_filepath, 'r') as fp:
template = json.load(fp)
fp.close()
return template
def get_task_executor_filepath(technique_name):
technique = describe_technique(technique_name)
task_executor_filepath = technique["task_executor_filepath"]
return task_executor_filepath
def register_technique(technique_config):
technique_properties={}
with open(technique_config, 'r') as fp:
technique_properties = json.load(fp)
fp.close()
technique_id = register_technique_json(technique_properties)
return technique_id
def register_technique_json(technique_properties):
# Construct technique folder: ${PM_ROOT_PATH}/technique/<technique_id>
cur_technique_id = technique_properties['technique_id']
pm_root_path = os.getenv('PM_ROOT_PATH', default='wd')
technique_output_json_filepath_full = '%s/technique/%s/metadata.json' % (pm_root_path, cur_technique_id)
out_file_path = Path(technique_output_json_filepath_full)
out_file_path.parent.mkdir(exist_ok=True, parents=True)
# Write technique json to folder
with open(technique_output_json_filepath_full, 'w') as fp:
json.dump(technique_properties,fp)
return cur_technique_id
if __name__ == '__main__':
#import debugpy; debugpy.listen(('0.0.0.0',5678)); debugpy.wait_for_client(); breakpoint()
parser = argparse.ArgumentParser()
parser.add_argument('--action', help='Technique action: ls | describe | template | search | register | delete', required=True)
parser.add_argument('--tags', help='Technique selection tags, a space separated string, can be combined with --action ls or --action search', nargs='+', default=[], required=False)
parser.add_argument('--technique_id', help='Technique id', required=False)
parser.add_argument('--config', help='Technique configuration, template with provided values', required=False)
args = parser.parse_args()
action = args.action
if (action == 'ls'):
list_techniques(args.tags)
elif (action == 'search'):
search_export_task_template(args.tags)
elif (action == 'describe'):
if (args.technique_id is None):
print("Please specify technique id using the --technique_id argument")
else:
describe_technique(args.technique_id)
elif (action == 'template'):
if (args.technique_id is None):
print("Please specify technique id using the --technique_id argument")
else:
export_task_template(args.technique_id)
elif (action == 'delete'):
if (args.technique is None):
print("Please specify technique id using the --technique_id argument")
else:
delete_technique(args.technique_id)
elif (action == 'register'):
if (args.config is None):
print("Please specify config path using the --config argument")
else:
register_technique(args.config)
else:
print('Action %s not recognized'%action)
| []
| []
| [
"PM_ROOT_PATH"
]
| [] | ["PM_ROOT_PATH"] | python | 1 | 0 | |
Discipline_test.go | package toornamentClient
import (
"encoding/json"
"fmt"
"os"
"testing"
)
func TestDiscipline(t *testing.T) {
var client ToornamentClient
client.ApiKey = os.Getenv("KEY")
disciplines := GetDisciplines(&client,DisciplineScope().VIEWER,NewDisciplineRange(0,7))
str, err := json.Marshal(disciplines)
if err != nil {
t.Errorf("Couldn't find anything: %v",err)
}else{
fmt.Println(string(str))
}
}
func TestSingleDiscipline(t *testing.T) {
var client ToornamentClient
client.ApiKey = os.Getenv("KEY")
discipline := GetDiscipline(&client,DisciplineScope().ORGANIZER,"hearthstone")
str, err := json.Marshal(discipline)
if err != nil {
t.Errorf("Couldn't find anything: %v",err)
}else{
fmt.Println(string(str))
}
} | [
"\"KEY\"",
"\"KEY\""
]
| []
| [
"KEY"
]
| [] | ["KEY"] | go | 1 | 0 | |
db_test.go | // Copyright 2019 Toshiki kawai
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Note:
// The bookshelf command was forked from the below.
// https://cloud.google.com/go/getting-started/tutorial-app.
//
// Google's bookshelf program has
// * demonstrating several Google Cloud APIs
// * including App Engine
// * using Firestore
// * using Cloud Storage
//
// Change:
// * Does not use Google Cloud APIs
// * Does not including App Engine
// * use Mysql instead of Firestore
// * Does not use Cloud storage
package main
import (
"fmt"
"os"
"testing"
"time"
"github.com/jinzhu/gorm"
)
func testDB(t *testing.T, db BookDatabase) {
t.Helper()
b := &Book{
Author: "testy mc testface",
Title: fmt.Sprintf("t-%d", time.Now().Unix()),
PublishedDate: fmt.Sprintf("%d", time.Now().Unix()),
Description: "desc",
}
id, err := db.AddBook(b)
if err != nil {
t.Fatal(err)
}
b.ID = id
b.Description = "newdesc"
if err := db.UpdateBook(b); err != nil {
t.Error(err)
}
gotBook, err := db.GetBook(id)
if err != nil {
t.Error(err)
}
if got, want := gotBook.Description, b.Description; got != want {
t.Errorf("Update description: got %q, want %q", got, want)
}
if err := db.DeleteBook(id); err != nil {
t.Error(err)
}
if _, err := db.GetBook(id); err == nil {
t.Error("want non-nil err")
}
}
func TestMemoryDB(t *testing.T) {
testDB(t, newMemoryDB())
}
func TestMysqlDB(t *testing.T) {
DBHost := os.Getenv("DB_HOST")
if DBHost == "" {
DBHost = "localhost"
}
DBPort := os.Getenv("DB_PORT")
if DBPort == "" {
DBPort = "3306"
}
client, err := gorm.Open(
"mysql",
"user:password@("+DBHost+":"+DBPort+")/default?charset=utf8mb4&parseTime=True&loc=Local")
if err != nil {
t.Fatalf("gorm.open: %v", err)
}
defer client.Close()
db, err := newDB(client)
if err != nil {
t.Fatalf("newDB: %v", err)
}
testDB(t, db)
}
| [
"\"DB_HOST\"",
"\"DB_PORT\""
]
| []
| [
"DB_PORT",
"DB_HOST"
]
| [] | ["DB_PORT", "DB_HOST"] | go | 2 | 0 | |
src/main/java/me/lorinlee/ui/command/OpenCommand.java | package me.lorinlee.ui.command;
import me.lorinlee.ui.command.builder.CommandBuilder;
import me.lorinlee.ui.command.builder.UserCommandBuilder;
import me.lorinlee.ui.command.exception.ExceptionHandler;
import java.io.IOException;
import java.util.List;
/**
* Created by lorin on 17-5-24.
*/
public class OpenCommand extends Command {
private String host;
private int port;
public OpenCommand(String host, int port) {
this.host = host;
this.port = port;
}
public OpenCommand(String host) {
this.host = host;
this.port = 21;
}
public OpenCommand() {
}
public String getHost() {
return host;
}
public void setHost(String host) {
this.host = host;
}
public int getPort() {
return port;
}
public void setPort(int port) {
this.port = port;
}
protected void before() {
requestSocket.setHost(host);
requestSocket.setPort(port);
}
@Override
protected void execute() {
try {
requestSocket.connect();
List<String> lines = requestSocket.readLines();
lines.forEach(System.out::println);
} catch (IOException e) {
ExceptionHandler.handle(e);
try {
requestSocket.close();
System.out.println("ftp: " + e.toString());
} catch (IOException e1) {
ExceptionHandler.handle(e1);
}
}
}
protected void after() {
if (requestSocket.isConnected()) {
String localUser = System.getenv("USER");
System.out.print("Name (" + requestSocket.getHost() + ":" + localUser + "):" );
String user = commandManager.getCommandLine();
if ("".equals(user)) {
user = localUser;
}
CommandBuilder commandBuilder = new UserCommandBuilder();
commandBuilder.setCmdAndParam(new String[]{"user", user});
Command command = commandBuilder.build();
command.run();
}
}
}
| [
"\"USER\""
]
| []
| [
"USER"
]
| [] | ["USER"] | java | 1 | 0 | |
scripts/llnl_scripts/llnl_lc_build_tools.py | #!/usr/local/bin/python
# Copyright (c) 2017-2020, Lawrence Livermore National Security, LLC and
# other Axom Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (BSD-3-Clause)
"""
file: llnl_lc_uberenv_install_tools.py
description:
helpers for installing axom tpls on llnl lc systems.
"""
import os
import socket
import sys
import subprocess
import datetime
import glob
import json
import getpass
import shutil
import time
from os.path import join as pjoin
def sexe(cmd,
ret_output=False,
output_file = None,
echo = False,
error_prefix = "ERROR:"):
""" Helper for executing shell commands. """
if echo:
print "[exe: %s]" % cmd
if ret_output:
p = subprocess.Popen(cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
res =p.communicate()[0]
return p.returncode,res
elif output_file != None:
ofile = open(output_file,"w")
p = subprocess.Popen(cmd,
shell=True,
stdout= ofile,
stderr=subprocess.STDOUT)
res =p.communicate()[0]
return p.returncode
else:
rcode = subprocess.call(cmd,shell=True)
if rcode != 0:
print "[{0} [return code: {1}] from command: {2}]".format(error_prefix, rcode,cmd)
return rcode
def get_timestamp(t=None,sep="_"):
""" Creates a timestamp that can easily be included in a filename. """
if t is None:
t = datetime.datetime.now()
sargs = (t.year,t.month,t.day,t.hour,t.minute,t.second)
sbase = "".join(["%04d",sep,"%02d",sep,"%02d",sep,"%02d",sep,"%02d",sep,"%02d"])
return sbase % sargs
def build_info(job_name):
res = {}
res["built_by"] = os.environ["USER"]
res["built_from_branch"] = "unknown"
res["built_from_sha1"] = "unknown"
res["job_name"] = job_name
res["platform"] = get_platform()
rc, out = sexe('git branch -a | grep \"*\"',ret_output=True,error_prefix="WARNING:")
out = out.strip()
if rc == 0 and out != "":
res["built_from_branch"] = out.split()[1]
rc,out = sexe('git rev-parse --verify HEAD',ret_output=True,error_prefix="WARNING:")
out = out.strip()
if rc == 0 and out != "":
res["built_from_sha1"] = out
return res
def write_build_info(ofile, job_name):
print "[build info]"
binfo_str = json.dumps(build_info(job_name),indent=2)
print binfo_str
open(ofile,"w").write(binfo_str)
def log_success(prefix, msg, timestamp=""):
"""
Called at the end of the process to signal success.
"""
info = {}
info["prefix"] = prefix
info["platform"] = get_platform()
info["status"] = "success"
info["message"] = msg
if timestamp == "":
info["timestamp"] = get_timestamp()
else:
info["timestamp"] = timestamp
json.dump(info,open(pjoin(prefix,"success.json"),"w"),indent=2)
def log_failure(prefix, msg, timestamp=""):
"""
Called when the process failed.
"""
info = {}
info["prefix"] = prefix
info["platform"] = get_platform()
info["status"] = "failed"
info["message"] = msg
if timestamp == "":
info["timestamp"] = get_timestamp()
else:
info["timestamp"] = timestamp
json.dump(info,open(pjoin(prefix,"failed.json"),"w"),indent=2)
def copy_if_exists(src, dst, verbose=True):
if os.path.exists(src):
shutil.copy2(src, dst)
if verbose:
if os.path.exists(src):
print "[File copied]"
else:
print "[File not copied because source did not exist]"
print "[ Source: {0}]".format(src)
print "[ Destination: {0}]".format(dst)
def normalize_job_name(job_name):
return job_name.replace(' ', '_').replace(',', '')
def copy_build_dir_files(build_dir, archive_spec_dir):
copy_if_exists(pjoin(build_dir, "info.json"), archive_spec_dir)
copy_if_exists(pjoin(build_dir, "failed.json"), archive_spec_dir)
copy_if_exists(pjoin(build_dir, "success.json"), archive_spec_dir)
copy_if_exists(pjoin(build_dir, "output.log.make.txt"), archive_spec_dir)
copy_if_exists(pjoin(build_dir, "output.log.make.test.txt"), archive_spec_dir)
copy_if_exists(pjoin(build_dir, "output.log.make.install.txt"), archive_spec_dir)
copy_if_exists(pjoin(build_dir, "output.log.make.docs.txt"), archive_spec_dir)
copy_if_exists(pjoin(build_dir, "output.log.install_example.cmake.txt"), archive_spec_dir)
copy_if_exists(pjoin(build_dir, "output.log.install_example.blt.txt"), archive_spec_dir)
# Note: There should only be one of these per spec
last_test_logs = glob.glob(pjoin(build_dir, "Testing", "Temporary", "LastTest*.log"))
if len(last_test_logs) > 0:
copy_if_exists(last_test_logs[0], archive_spec_dir)
# Note: There should only be one of these per spec
test_xmls = glob.glob(pjoin(build_dir, "Testing", "*", "Test.xml"))
if len(test_xmls) > 0:
copy_if_exists(test_xmls[0], archive_spec_dir)
def archive_src_logs(prefix, job_name, timestamp):
archive_dir = pjoin(get_archive_base_dir(), get_system_type())
archive_dir = pjoin(archive_dir, normalize_job_name(job_name), timestamp)
print "[Starting Archiving]"
print "[ Archive Dir: %s]" % archive_dir
print "[ Prefix: %s]" % prefix
if not os.path.exists(archive_dir):
os.makedirs(archive_dir)
copy_if_exists(pjoin(prefix, "info.json"), archive_dir)
copy_if_exists(pjoin(prefix, "failed.json"), archive_dir)
copy_if_exists(pjoin(prefix, "success.json"), archive_dir)
build_and_test_root = get_build_and_test_root(prefix, timestamp)
build_dirs = glob.glob(pjoin(build_and_test_root, "build-*"))
for build_dir in build_dirs:
spec = get_spec_from_build_dir(build_dir)
archive_spec_dir = pjoin(archive_dir, spec)
print "[ Spec Dir: %s]" % archive_spec_dir
if not os.path.exists(archive_spec_dir):
os.makedirs(archive_spec_dir)
# Note: There should only be one of these per spec
config_spec_logs = glob.glob(pjoin(build_and_test_root, "output.log.*-" + spec + ".configure.txt"))
if len(config_spec_logs) > 0:
copy_if_exists(config_spec_logs[0], pjoin(archive_spec_dir, "output.log.config-build.txt"))
# Note: There should only be one of these per spec
print "[ Build Dir: %s]" % build_dir
copy_build_dir_files(build_dir, archive_spec_dir)
set_axom_group_and_perms(archive_dir)
def archive_tpl_logs(prefix, job_name, timestamp):
archive_dir = pjoin(get_archive_base_dir(), get_system_type())
archive_dir = pjoin(archive_dir, normalize_job_name(job_name), timestamp)
print "[Starting Archiving]"
print "[ Archive Dir: %s]" % archive_dir
print "[ Prefix: %s]" % prefix
if not os.path.exists(archive_dir):
os.makedirs(archive_dir)
tpl_build_dir = pjoin(prefix, get_system_type())
tpl_build_dir = pjoin(tpl_build_dir, timestamp)
copy_if_exists(pjoin(tpl_build_dir, "info.json"), archive_dir)
build_and_test_root = get_build_and_test_root(tpl_build_dir, timestamp)
print "[Build/Test Dir: %s]" % build_and_test_root
tpl_logs = glob.glob(pjoin(tpl_build_dir, "output.log.spack.tpl.build.*"))
for tpl_log in tpl_logs:
spec = get_spec_from_tpl_log(tpl_log)
archive_spec_dir = pjoin(archive_dir, spec)
print "[ Spec Dir: %s]" % archive_spec_dir
if not os.path.exists(archive_spec_dir):
os.makedirs(archive_spec_dir)
copy_if_exists(tpl_log, pjoin(archive_spec_dir, "output.log.spack.txt"))
# Note: There should only be one of these per spec
config_spec_logs = glob.glob(pjoin(build_and_test_root, "output.log.*-" + spec + ".configure.txt"))
if len(config_spec_logs) > 0:
copy_if_exists(config_spec_logs[0], pjoin(archive_spec_dir, "output.log.config-build.txt"))
else:
print "[Error: No config-build logs found in Spec Dir.]"
# Find build dir for spec
# Note: only compiler name/version is used in build directory not full spack spec
compiler = get_compiler_from_spec(spec)
build_dir_glob = pjoin(build_and_test_root, "build-*-%s" % (compiler))
build_dirs = glob.glob(build_dir_glob)
if len(build_dirs) > 0:
build_dir = build_dirs[0]
print "[ Build Dir: %s]" % build_dir
copy_build_dir_files(build_dir, archive_spec_dir)
else:
print "[Error: No build dirs found in Build/Test root.]"
set_axom_group_and_perms(archive_dir)
def uberenv_create_mirror(prefix, project_file, mirror_path):
"""
Calls uberenv to create a spack mirror.
"""
cmd = "python scripts/uberenv/uberenv.py --create-mirror"
cmd += " --prefix=\"{0}\" --mirror=\"{1}\"".format(prefix, mirror_path)
cmd += " --project-json=\"{0}\" ".format(project_file)
print "[~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~]"
print "[ It is expected for 'spack --create-mirror' to throw warnings. ]"
print "[~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~]"
res = sexe(cmd, echo=True, error_prefix="WARNING:")
print "[~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~]"
print "[ End of expected warnings from 'spack --create-mirror' ]"
print "[~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~]"
set_axom_group_and_perms(mirror_path)
return res
def uberenv_build(prefix, spec, project_file, config_dir, mirror_path):
"""
Calls uberenv to install tpls for a given spec to given prefix.
"""
cmd = "python scripts/uberenv/uberenv.py "
cmd += "--prefix=\"{0}\" --spec=\"{1}\" ".format(prefix, spec)
cmd += "--project-json=\"{0}\" ".format(project_file)
cmd += "--mirror=\"{0}\" ".format(mirror_path)
cmd += "--spack-config-dir=\"{0}\" ".format(config_dir)
spack_tpl_build_log = pjoin(prefix,"output.log.spack.tpl.build.%s.txt" % spec.replace(" ", "_"))
print "[starting tpl install of spec %s]" % spec
print "[log file: %s]" % spack_tpl_build_log
res = sexe(cmd,
echo=True,
output_file = spack_tpl_build_log)
# Move files generated by spack in source directory to TPL install directory
print "[Moving spack generated files to TPL build directory]"
repo_dir = get_repo_dir()
for file in ["spack-build-env.txt", "spack-build-out.txt", "spack-configure-args.txt"]:
src = pjoin(repo_dir, file)
dst = pjoin(prefix, "{0}-{1}".format(spec.replace(" ", "_"),file))
if os.path.exists(src) and not os.path.exists(dst):
shutil.move(src, dst)
if res != 0:
log_failure(prefix,"[ERROR: uberenv/spack build of spec: %s failed]" % spec)
return res
############################################################
# helpers for testing a set of host configs
############################################################
def build_and_test_host_config(test_root,host_config):
host_config_root = get_host_config_root(host_config)
# setup build and install dirs
build_dir = pjoin(test_root,"build-%s" % host_config_root)
install_dir = pjoin(test_root,"install-%s" % host_config_root)
print "[Testing build, test, and install of host config file: %s]" % host_config
print "[ build dir: %s]" % build_dir
print "[ install dir: %s]" % install_dir
# configure
cfg_output_file = pjoin(test_root,"output.log.%s.configure.txt" % host_config_root)
print "[starting configure of %s]" % host_config
print "[log file: %s]" % cfg_output_file
res = sexe("python config-build.py -bp %s -ip %s -hc %s" % (build_dir,install_dir,host_config),
output_file = cfg_output_file,
echo=True)
if res != 0:
print "[ERROR: Configure for host-config: %s failed]\n" % host_config
return res
####
# build, test, and install
####
# build the code
bld_output_file = pjoin(build_dir,"output.log.make.txt")
print "[starting build]"
print "[log file: %s]" % bld_output_file
res = sexe("cd %s && make -j 16 VERBOSE=1 " % build_dir,
output_file = bld_output_file,
echo=True)
if res != 0:
print "[ERROR: Build for host-config: %s failed]\n" % host_config
return res
# test the code
tst_output_file = pjoin(build_dir,"output.log.make.test.txt")
print "[starting unit tests]"
print "[log file: %s]" % tst_output_file
tst_cmd = "cd %s && make CTEST_OUTPUT_ON_FAILURE=1 test ARGS=\"-T Test -VV -j16\"" % build_dir
res = sexe(tst_cmd,
output_file = tst_output_file,
echo=True)
if res != 0:
print "[ERROR: Tests for host-config: %s failed]\n" % host_config
return res
# build the docs
docs_output_file = pjoin(build_dir,"output.log.make.docs.txt")
print "[starting docs generation]"
print "[log file: %s]" % docs_output_file
res = sexe("cd %s && make -j16 docs " % build_dir,
output_file = docs_output_file,
echo=True)
if res != 0:
print "[ERROR: Docs generation for host-config: %s failed]\n\n" % host_config
return res
# install the code
inst_output_file = pjoin(build_dir,"output.log.make.install.txt")
print "[starting install]"
print "[log file: %s]" % inst_output_file
res = sexe("cd %s && make -j16 install " % build_dir,
output_file = inst_output_file,
echo=True)
if res != 0:
print "[ERROR: Install for host-config: %s failed]\n\n" % host_config
return res
# simple sanity check for make install
print "[checking install dir %s]" % install_dir
sexe("ls %s/include" % install_dir, echo=True, error_prefix="WARNING:")
sexe("ls %s/lib" % install_dir, echo=True, error_prefix="WARNING:")
sexe("ls %s/bin" % install_dir, echo=True, error_prefix="WARNING:")
# test the installation using installed cmake examples
# TODO: enable tests for installed examples in device configurations
# TODO: enable tests for installed makefile-based example
is_device_build = "nvcc" in host_config
should_test_installed_cmake_example = not is_device_build
should_test_installed_blt_example = not is_device_build
should_test_installed_make_example = False
if should_test_installed_cmake_example:
install_example_dir = pjoin(install_dir, "examples", "axom", "using-with-cmake")
install_example_output_file = pjoin(build_dir,"output.log.install_example.cmake.txt")
print "[testing installed 'using-with-cmake' example]"
print "[log file: %s]" % install_example_output_file
example_commands = [
"cd {0}".format(install_example_dir),
"rm -rf build",
"mkdir build",
"cd build",
"""echo "[Configuring '{}' example]" """.format("using-with-cmake"),
"cmake -C ../host-config.cmake ..",
"""echo "[Building '{}' example]" """.format("using-with-cmake"),
"make ",
"""echo "[Running '{}' example]" """.format("using-with-cmake"),
"./example",
"""echo "[Done]" """
]
res = sexe(" && ".join(example_commands),
output_file = install_example_output_file,
echo=True)
if res != 0:
print "[ERROR: Installed 'using-with-cmake' example for host-config: %s failed]\n\n" % host_config
return res
if should_test_installed_blt_example:
install_example_dir = pjoin(install_dir, "examples", "axom", "using-with-blt")
install_example_output_file = pjoin(build_dir,"output.log.install_example.blt.txt")
print "[testing installed 'using-with-blt' example]"
print "[log file: %s]" % install_example_output_file
example_commands = [
"cd {0}".format(install_example_dir),
"rm -rf build",
"mkdir build",
"cd build",
"""echo "[Configuring '{}' example]" """.format("using-with-blt"),
"cmake -C ../host-config.cmake ..",
"""echo "[Building '{}' example]" """.format("using-with-blt"),
"make ",
"""echo "[Running '{}' example]" """.format("using-with-blt"),
"./bin/example",
"""echo "[Done]" """
]
res = sexe(" && ".join(example_commands),
output_file = install_example_output_file,
echo=True)
if res != 0:
print "[ERROR: Installed 'using-with-blt' example for host-config: %s failed]\n\n" % host_config
return res
print "[SUCCESS: Build, test, and install for host-config: %s complete]\n" % host_config
set_axom_group_and_perms(build_dir)
set_axom_group_and_perms(install_dir)
return 0
def build_and_test_host_configs(prefix, job_name, timestamp, use_generated_host_configs):
host_configs = get_host_configs_for_current_machine(prefix, use_generated_host_configs)
if len(host_configs) == 0:
log_failure(prefix,"[ERROR: No host configs found at %s]" % prefix)
return 1
print "Found Host-configs:"
for host_config in host_configs:
print " " + host_config
print "\n"
test_root = get_build_and_test_root(prefix, timestamp)
os.mkdir(test_root)
write_build_info(pjoin(test_root,"info.json"), job_name)
ok = []
bad = []
for host_config in host_configs:
build_dir = get_build_dir(test_root, host_config)
start_time = time.time()
if build_and_test_host_config(test_root,host_config) == 0:
ok.append(host_config)
log_success(build_dir, job_name, timestamp)
else:
bad.append(host_config)
log_failure(build_dir, job_name, timestamp)
end_time = time.time()
print "[build time: {0}]\n".format(convertSecondsToReadableTime(end_time - start_time))
# Log overall job success/failure
if len(bad) != 0:
log_failure(test_root, job_name, timestamp)
else:
log_success(test_root, job_name, timestamp)
# Output summary of failure/succesful builds
if len(ok) > 0:
print "Succeeded:"
for host_config in ok:
print " " + host_config
if len(bad) > 0:
print "Failed:"
for host_config in bad:
print " " + host_config
print "\n"
return 1
print "\n"
return 0
def set_axom_group_and_perms(directory):
"""
Sets the proper group and access permissions of given input
directory.
"""
print "[changing group and access perms of: %s]" % directory
# change group to axomdev
print "[changing group to axomdev]"
sexe("chgrp -f -R axomdev %s" % (directory),echo=True,error_prefix="WARNING:")
# change group perms to rwX
print "[changing perms for axomdev members to rwX]"
sexe("chmod -f -R g+rwX %s" % (directory),echo=True,error_prefix="WARNING:")
# change perms for all to rX
print "[changing perms for all users to rX]"
sexe("chmod -f -R a+rX %s" % (directory),echo=True,error_prefix="WARNING:")
print "[done setting perms for: %s]" % directory
return 0
def full_build_and_test_of_tpls(builds_dir, job_name, timestamp):
project_file = "scripts/uberenv/project.json"
config_dir = "scripts/uberenv/spack_configs/{0}".format(get_system_type())
specs = get_specs_for_current_machine()
print "[Building and testing tpls for specs: "
for spec in specs:
print "{0}".format(spec)
print "]\n"
# Use shared network mirror location otherwise create local one
mirror_dir = get_shared_mirror_dir()
if not os.path.exists(mirror_dir):
mirror_dir = pjoin(builds_dir,"mirror")
print "[using mirror location: %s]" % mirror_dir
# unique install location
prefix = pjoin(builds_dir, get_system_type())
if not os.path.exists(prefix):
os.mkdir(prefix)
prefix = pjoin(prefix, timestamp)
# create a mirror
uberenv_create_mirror(prefix, project_file, mirror_dir)
# write info about this build
write_build_info(pjoin(prefix, "info.json"), job_name)
repo_dir = get_repo_dir()
# Clean previously generated host-configs into TPL install directory
print "[Cleaning previously generated host-configs if they exist]"
host_configs = get_host_configs_for_current_machine(repo_dir, True)
for host_config in host_configs:
os.remove(host_config)
# use uberenv to install for all specs
tpl_build_failed = False
for spec in specs:
start_time = time.time()
fullspec = "{0}".format(spec)
res = uberenv_build(prefix, fullspec, project_file, config_dir, mirror_dir)
end_time = time.time()
print "[build time: {0}]".format(convertSecondsToReadableTime(end_time - start_time))
if res != 0:
print "[ERROR: Failed build of tpls for spec %s]\n" % spec
tpl_build_failed = True
break
else:
print "[SUCCESS: Finished build tpls for spec %s]\n" % spec
# Copy generated host-configs into TPL install directory
print "[Copying spack generated host-configs to TPL build directory]"
host_configs = get_host_configs_for_current_machine(repo_dir, True)
for host_config in host_configs:
dst = pjoin(prefix, os.path.basename(host_config))
if os.path.exists(host_config) and not os.path.exists(dst):
shutil.copy2(host_config, dst)
if not tpl_build_failed:
# build the axom against the new tpls
res = build_and_test_host_configs(prefix, job_name, timestamp, True)
if res != 0:
print "[ERROR: build and test of axom vs tpls test failed.]\n"
else:
print "[SUCCESS: build and test of axom vs tpls test passed.]\n"
# set proper perms for installed tpls
set_axom_group_and_perms(prefix)
# set proper perms for the mirror files
set_axom_group_and_perms(mirror_dir)
return res
def build_devtools(builds_dir, job_name, timestamp):
sys_type = get_system_type()
config_dir = "scripts/uberenv/spack_configs/{0}/devtools".format(sys_type)
project_file = "scripts/uberenv/devtools.json"
if "toss_3" in sys_type:
compiler_spec = "%[email protected]"
compiler_dir = "gcc-8.1.0"
elif "blueos" in sys_type:
compiler_spec = "%[email protected]"
compiler_dir = "gcc-8.3.1"
print "[Building devtools using compiler spec: {0}]".format(compiler_spec)
# unique install location
prefix = pjoin(builds_dir, sys_type)
if not os.path.exists(prefix):
os.mkdir(prefix)
prefix = pjoin(prefix, timestamp)
if not os.path.exists(prefix):
os.makedirs(prefix)
# Use shared mirror
mirror_dir = get_shared_mirror_dir()
print "[Using mirror location: {0}]".format(mirror_dir)
uberenv_create_mirror(prefix, project_file, mirror_dir)
# write info about this build
write_build_info(pjoin(prefix,"info.json"), job_name)
# use uberenv to install devtools
start_time = time.time()
res = uberenv_build(prefix, compiler_spec, project_file, config_dir, mirror_dir)
end_time = time.time()
print "[Build time: {0}]".format(convertSecondsToReadableTime(end_time - start_time))
if res != 0:
print "[ERROR: Failed build of devtools for spec %s]\n" % compiler_spec
else:
# Only update the latest symlink if successful
link_path = pjoin(builds_dir, sys_type)
link_path = pjoin(link_path, "latest")
install_dir = pjoin(prefix, compiler_dir)
print "[Creating symlink to latest devtools build:\n{0}\n->\n{1}]".format(link_path, install_dir)
if os.path.exists(link_path) or os.path.islink(link_path):
if not os.path.islink(link_path):
print "[ERROR: Latest devtools link path exists and is not a link: {0}".format(link_path)
return 1
os.unlink(link_path)
os.symlink(install_dir, link_path)
print "[SUCCESS: Finished build devtools for spec %s]\n" % compiler_spec
# set proper perms for installed devtools
set_axom_group_and_perms(prefix)
return res
def get_host_configs_for_current_machine(src_dir, use_generated_host_configs):
host_configs = []
# Generated host-configs will be at the base of the source repository
host_configs_dir = src_dir
if not use_generated_host_configs:
host_configs_dir = pjoin(src_dir, "host-configs")
hostname_base = get_machine_name()
host_configs = glob.glob(pjoin(host_configs_dir, hostname_base + "*.cmake"))
return host_configs
def get_host_config_root(host_config):
return os.path.splitext(os.path.basename(host_config))[0]
def get_build_dir(prefix, host_config):
host_config_root = get_host_config_root(host_config)
return pjoin(prefix, "build-" + host_config_root)
def get_repo_dir():
script_dir = os.path.dirname(os.path.realpath(__file__))
return os.path.abspath(pjoin(script_dir, "../.."))
def get_build_and_test_root(prefix, timestamp):
return pjoin(prefix,"_axom_build_and_test_%s" % timestamp)
def get_machine_name():
return socket.gethostname().rstrip('1234567890')
def get_system_type():
return os.environ["SYS_TYPE"]
def get_platform():
return get_system_type() if "SYS_TYPE" in os.environ else get_machine_name()
def get_username():
return getpass.getuser()
def get_archive_base_dir():
return "/usr/WS2/axomdev/archive"
def get_shared_base_dir():
return "/usr/WS1/axom"
def get_shared_mirror_dir():
return pjoin(get_shared_base_dir(), "mirror")
def get_shared_libs_dir():
return pjoin(get_shared_base_dir(), "libs")
def get_shared_devtool_dir():
return pjoin(get_shared_base_dir(), "devtools")
def get_specs_for_current_machine():
repo_dir = get_repo_dir()
specs_json_path = pjoin(repo_dir, "scripts/uberenv/specs.json")
with open(specs_json_path, 'r') as f:
specs_json = json.load(f)
sys_type = get_system_type()
machine_name = get_machine_name()
specs = []
if machine_name in specs_json.keys():
specs = specs_json[machine_name]
else:
specs = specs_json[sys_type]
specs = ['%' + spec for spec in specs]
return specs
def get_spec_from_build_dir(build_dir):
base = "build-%s-%s-" % (get_machine_name(), get_system_type())
return os.path.basename(build_dir)[len(base):]
def get_spec_from_tpl_log(tpl_log):
basename = os.path.basename(tpl_log)
basename = basename[len("output.log.spack.tpl.build.%"):-4]
# Remove anything that isn't part of the compiler spec
index = basename.find("^")
if index > -1:
basename = basename[:index-1]
return basename
def on_rz():
machine_name = get_machine_name()
if machine_name.startswith("rz"):
return True
return False
def get_compiler_from_spec(spec):
compiler = spec
for c in ['~', '+']:
index = compiler.find(c)
if index != -1:
compiler = compiler[:index]
return compiler
def convertSecondsToReadableTime(seconds):
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
return "%d:%02d:%02d" % (h, m, s)
| []
| []
| [
"USER",
"SYS_TYPE"
]
| [] | ["USER", "SYS_TYPE"] | python | 2 | 0 | |
beater/java_attacher/java_attacher_test.go | // Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package javaattacher
import (
"context"
"os"
"path/filepath"
"strings"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/elastic/apm-server/beater/config"
)
func TestNew(t *testing.T) {
cfg := config.JavaAttacherConfig{JavaBin: ""}
jh := os.Getenv("JAVA_HOME")
os.Setenv("JAVA_HOME", "/usr/local")
f, err := os.Create(javaAttacher)
require.NoError(t, err)
defer func() {
// reset JAVA_HOME
os.Setenv("JAVA_HOME", jh)
os.Remove(f.Name())
}()
attacher, err := New(cfg)
require.NoError(t, err)
javapath := filepath.FromSlash("/usr/local/bin/java")
assert.Equal(t, javapath, attacher.cfg.JavaBin)
cfg.JavaBin = "/home/user/bin/java"
attacher, err = New(cfg)
require.NoError(t, err)
javapath = filepath.FromSlash("/home/user/bin/java")
assert.Equal(t, javapath, attacher.cfg.JavaBin)
}
func TestBuild(t *testing.T) {
args := []map[string]string{
{"exclude-user": "root"},
{"include-main": "MyApplication"},
{"include-main": "my-application.jar"},
{"include-vmarg": "elastic.apm.agent.attach=true"},
}
cfg := config.JavaAttacherConfig{
Enabled: true,
DiscoveryRules: args,
Config: map[string]string{
"server_url": "http://localhost:8200",
},
JavaBin: "/usr/bin/java",
DownloadAgentVersion: "1.25.0",
}
f, err := os.Create(javaAttacher)
require.NoError(t, err)
defer os.Remove(f.Name())
attacher, err := New(cfg)
require.NoError(t, err)
cmd := attacher.build(context.Background())
want := filepath.FromSlash("/usr/bin/java -jar ./java-attacher.jar") +
" --continuous --log-level debug --download-agent-version 1.25.0 --exclude-user root --include-main MyApplication " +
"--include-main my-application.jar --include-vmarg elastic.apm.agent.attach=true " +
"--config server_url=http://localhost:8200"
cmdArgs := strings.Join(cmd.Args, " ")
assert.Equal(t, want, cmdArgs)
cfg.Config["service_name"] = "my-cool-service"
attacher, err = New(cfg)
require.NoError(t, err)
cmd = attacher.build(context.Background())
cmdArgs = strings.Join(cmd.Args, " ")
assert.Contains(t, cmdArgs, "--config server_url=http://localhost:8200")
assert.Contains(t, cmdArgs, "--config service_name=my-cool-service")
}
| [
"\"JAVA_HOME\""
]
| []
| [
"JAVA_HOME"
]
| [] | ["JAVA_HOME"] | go | 1 | 0 | |
examples/discoveryv1/discovery_v1.go | package main
import (
"fmt"
"os"
"github.com/IBM/go-sdk-core/core"
discovery "github.com/watson-developer-cloud/go-sdk/discoveryv1"
)
func main() {
// Instantiate the Watson Discovery service
authenticator := &core.IamAuthenticator{
ApiKey: os.Getenv("YOUR IAM API KEY"),
}
service, serviceErr := discovery.NewDiscoveryV1(&discovery.DiscoveryV1Options{
URL: "YOUR SERVICE URL",
Version: "2018-03-05",
Authenticator: authenticator,
})
// Check successful instantiation
if serviceErr != nil {
fmt.Println(serviceErr)
return
}
/* LIST ENVIRONMENTS */
// Create a new ListEnvironmentsOptions and set optional parameter Name
listEnvironmentsOptions := service.NewListEnvironmentsOptions()
// Call the discovery ListEnvironments method
listEnvironmentResult, response, responseErr := service.ListEnvironments(listEnvironmentsOptions)
// Check successful call
if responseErr != nil {
fmt.Println(responseErr)
return
}
fmt.Println(response)
// Check successful casting
if listEnvironmentResult != nil {
fmt.Println(listEnvironmentResult.Environments[0])
}
/* ADD DOCUMENT */
environmentID := "<YOUR ENVIRONEMNT ID>"
collectionID := "<YOUR COLLECTION ID>"
pwd, _ := os.Getwd()
file, fileErr := os.Open(pwd + "/../../resources/example.html")
if fileErr != nil {
panic(fileErr)
}
addDocumentOptions := service.NewAddDocumentOptions(environmentID,
collectionID).
SetFile(file).
SetMetadata("{\"Creator\": \"Johnny Appleseed\", \"Subject\": \"Apples\" }")
_, response, responseErr = service.AddDocument(addDocumentOptions)
if responseErr != nil {
panic(responseErr)
}
defer file.Close()
core.PrettyPrint(response.GetResult(), "Add document: ")
/* QUERY */
queryOptions := service.NewQueryOptions(environmentID, collectionID).
SetFilter("extracted_metadata.sha1::9181d244*").
SetReturn("extracted_metadata.sha1")
_, response, responseErr = service.Query(queryOptions)
if responseErr != nil {
panic(responseErr)
}
fmt.Println(response)
}
| [
"\"YOUR IAM API KEY\""
]
| []
| [
"YOUR IAM API KEY"
]
| [] | ["YOUR IAM API KEY"] | go | 1 | 0 | |
core/mode/numeric_mode_test.go | package mode
import (
"fmt"
"image"
"image/color"
"image/draw"
"image/png"
"os"
"reflect"
"strconv"
"testing"
)
var home = os.Getenv("HOME")
var desktop = home+"/Desktop/gqrcode/"
func TestImageToImg(t *testing.T) {
img1 := image.NewRGBA(image.Rect(0,0,10,10))
for i:=0;i<10;i++{
for j:=0;j<10;j++{
img1.Set(i,j,image.Black)
}
}
file1,_:=os.Create(desktop+"img.png")
defer file1.Close()
png.Encode(file1,img1)
img2 := image.NewRGBA(image.Rect(0,0,100,100))
draw.Draw(img2, img2.Bounds(), &image.Uniform{C: image.White}, image.ZP, draw.Src)
//(0,0)-(10,10) blue
draw.Draw(img2,
image.Rectangle{image.ZP,image.Point{X: 10, Y: 10}},
&image.Uniform{C: color.RGBA{0,0,255,255}}, image.ZP, draw.Src)
//(10,10)-(20,20) black
r:= image.Rectangle{Min: image.Point{X: 10, Y: 10}, Max: image.Point{X: 20, Y: 20}}
draw.Draw(img2,r,img1,image.Pt(0,0), draw.Src)
file2,_:=os.Create(desktop+"img2.png")
defer file2.Close()
png.Encode(file2,img2)
}
// See qrcode_test.go
func TestNumericNewQRCode(t *testing.T) {
}
func TestBoolDefaultVal(t *testing.T){
var def bool
init:=false
fmt.Println("default:"+ strconv.FormatBool( reflect.ValueOf(def).IsValid()))
fmt.Printf("default:%v\n", reflect.ValueOf(def).Kind())
fmt.Printf("default:%v\n", reflect.ValueOf(def).IsZero())
fmt.Println("init:"+ strconv.FormatBool(reflect.ValueOf(init).IsValid()))
fmt.Printf("init:%v\n", reflect.ValueOf(init).Kind())
fmt.Printf("init:%v\n", reflect.ValueOf(init).IsZero())
} | [
"\"HOME\""
]
| []
| [
"HOME"
]
| [] | ["HOME"] | go | 1 | 0 | |
cmd/services.go | package cmd
import (
"fmt"
"os"
sdk "github.com/cosmos/cosmos-sdk/types"
"github.com/spf13/cobra"
)
func servicesCommand() *cobra.Command {
cmd := &cobra.Command{
Use: "services",
Aliases: []string{"svc"},
Short: "commands to generate systemd unit files for local use",
}
cmd.AddCommand(
gaiaServiceCmd(),
faucetService(),
rlyService(),
)
return cmd
}
func gaiaServiceCmd() *cobra.Command {
cmd := &cobra.Command{
Use: "gaia [user] [home]",
Short: "gaia returns a sample gaiad service file",
Args: cobra.ExactArgs(2),
Run: func(cmd *cobra.Command, args []string) {
fmt.Printf(`[Unit]
Description=gaiad
After=network.target
[Service]
Type=simple
User=%s
WorkingDirectory=%s
ExecStart=%s/go/bin/gaiad start --pruning=nothing
Restart=on-failure
RestartSec=3
LimitNOFILE=4096
[Install]
WantedBy=multi-user.target
`, args[0], args[1], args[1])
},
}
return cmd
}
func faucetService() *cobra.Command {
cmd := &cobra.Command{
Use: "faucet [user] [home] [chain-id] [key-name] [amount]",
Short: "faucet returns a sample faucet service file",
Args: cobra.ExactArgs(5),
RunE: func(cmd *cobra.Command, args []string) error {
chain, err := config.Chains.Get(args[2])
if err != nil {
return err
}
_, err = chain.Keybase.Get(args[3])
if err != nil {
return err
}
_, err = sdk.ParseCoin(args[4])
if err != nil {
return err
}
fmt.Printf(`[Unit]
Description=faucet
After=network.target
[Service]
Type=simple
User=%s
WorkingDirectory=%s
ExecStart=%s/go/bin/rly testnets faucet %s %s %s
Restart=on-failure
RestartSec=3
LimitNOFILE=4096
[Install]
WantedBy=multi-user.target
`, args[0], args[1], args[1], args[2], args[3], args[4])
return nil
},
}
return cmd
}
func rlyService() *cobra.Command {
cmd := &cobra.Command{
Use: "relayer [path-name]",
Aliases: []string{"rly"},
Short: "relayer returns a service file for the relayer to relay over an individual path",
Args: cobra.ExactArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
user, home := os.Getenv("USER"), os.Getenv("HOME")
if user == "" || home == "" {
return fmt.Errorf("$USER(%s) or $HOME(%s) not set", user, home)
}
// ensure that path is configured
path, err := config.Paths.Get(args[0])
if err != nil {
return err
}
// ensure that chains are configured
src, dst := path.Src.ChainID, path.Dst.ChainID
chains, err := config.Chains.Gets(src, dst)
if err != nil {
return err
}
// set paths on chains
if err = chains[src].SetPath(path.Src); err != nil {
return err
}
if err = chains[dst].SetPath(path.Dst); err != nil {
return err
}
// ensure that keys exist
if _, err = chains[src].GetAddress(); err != nil {
return err
}
if _, err = chains[src].GetAddress(); err != nil {
return err
}
// ensure that balances aren't == nil
var srcBal, dstBal sdk.Coins
if srcBal, err = chains[src].QueryBalance(chains[src].Key); err != nil {
return err
} else if srcBal.AmountOf(chains[src].DefaultDenom).IsZero() {
return fmt.Errorf("no balance on %s, ensure %s has a balance before continuing setup", src, chains[src].MustGetAddress())
}
if dstBal, err = chains[dst].QueryBalance(chains[dst].Key); err != nil {
return err
} else if dstBal.AmountOf(chains[dst].DefaultDenom).IsZero() {
return fmt.Errorf("no balance on %s, ensure %s has a balance before continuing setup", dst, chains[dst].MustGetAddress())
}
// ensure lite clients are initialized
if _, err = chains[src].GetLatestLiteHeight(); err != nil {
return fmt.Errorf("no lite client on %s, ensure it is initalized before continuing: %w", src, err)
}
if _, err = chains[dst].GetLatestLiteHeight(); err != nil {
return fmt.Errorf("no lite client on %s, ensure it is initalized before continuing: %w", dst, err)
}
fmt.Printf(`[Unit]
Description=%s
After=network.target
[Service]
Type=simple
User=%s
WorkingDirectory=%s
ExecStart=%s/go/bin/rly start %s %s %s -d
Restart=on-failure
RestartSec=3
LimitNOFILE=4096
[Install]
WantedBy=multi-user.target
`, args[0], user, home, home, src, dst, args[0])
return nil
},
}
return cmd
}
| [
"\"USER\"",
"\"HOME\""
]
| []
| [
"USER",
"HOME"
]
| [] | ["USER", "HOME"] | go | 2 | 0 | |
cmd/kots/cli/install.go | package cli
import (
"bytes"
"context"
"fmt"
"io"
"io/ioutil"
"mime/multipart"
"net/http"
"net/url"
"os"
"os/signal"
"path/filepath"
"strconv"
"strings"
"time"
cursor "github.com/ahmetalpbalkan/go-cursor"
"github.com/manifoldco/promptui"
"github.com/pkg/errors"
kotsv1beta1 "github.com/replicatedhq/kots/kotskinds/apis/kots/v1beta1"
"github.com/replicatedhq/kots/pkg/auth"
"github.com/replicatedhq/kots/pkg/identity"
"github.com/replicatedhq/kots/pkg/k8sutil"
"github.com/replicatedhq/kots/pkg/kotsadm"
"github.com/replicatedhq/kots/pkg/kotsadm/types"
kotsadmtypes "github.com/replicatedhq/kots/pkg/kotsadm/types"
"github.com/replicatedhq/kots/pkg/kotsutil"
"github.com/replicatedhq/kots/pkg/logger"
"github.com/replicatedhq/kots/pkg/metrics"
"github.com/replicatedhq/kots/pkg/pull"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"github.com/spf13/viper"
authorizationv1 "k8s.io/api/authorization/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
)
func InstallCmd() *cobra.Command {
cmd := &cobra.Command{
Use: "install [upstream uri]",
Short: "Install an application to a cluster",
Long: `Pull Kubernetes manifests from the remote upstream, deploy them to the specified cluster, then setup port forwarding to make the kotsadm admin console accessible.`,
SilenceUsage: true,
SilenceErrors: false,
PreRun: func(cmd *cobra.Command, args []string) {
viper.BindPFlags(cmd.Flags())
},
RunE: func(cmd *cobra.Command, args []string) (finalError error) {
v := viper.GetViper()
if len(args) == 0 {
cmd.Help()
os.Exit(1)
}
fmt.Print(cursor.Hide())
defer fmt.Print(cursor.Show())
log := logger.NewCLILogger()
signalChan := make(chan os.Signal, 1)
finalMessage := ""
go func() {
signal.Notify(signalChan, os.Interrupt)
<-signalChan
log.ActionWithoutSpinner("")
log.ActionWithoutSpinner("Cleaning up")
if finalMessage != "" {
log.ActionWithoutSpinner("")
log.ActionWithoutSpinner(finalMessage)
log.ActionWithoutSpinner("")
}
fmt.Print(cursor.Show())
os.Exit(0)
}()
if !v.GetBool("skip-rbac-check") && v.GetBool("ensure-rbac") {
err := CheckRBAC()
if err != nil {
log.Errorf("Current user has insufficient privileges to install Admin Console.\nFor more information, please visit https://kots.io/vendor/packaging/rbac\nTo bypass this check, use the --skip-rbac-check flag")
return errors.New("insufficient privileges")
}
}
license, err := getLicense(v)
if err != nil {
return errors.Wrap(err, "failed to get license")
}
registryConfig, err := getRegistryConfig(v)
if err != nil {
return errors.Wrap(err, "failed to get registry config")
}
isAirgap := false
if v.GetString("airgap-bundle") != "" || v.GetBool("airgap") {
isAirgap = true
}
disableOutboundConnections := registryConfig.OverrideRegistry != "" || isAirgap
m := metrics.InitInstallMetrics(license, disableOutboundConnections)
m.ReportInstallStart()
// only handle reporting install failures in a defer statement.
// install finish is reported at the end of the function since the function might not exist because of port forwarding.
defer func() {
if finalError != nil {
cause := strings.Split(finalError.Error(), ":")[0]
m.ReportInstallFail(cause)
}
}()
upstream := pull.RewriteUpstream(args[0])
namespace := v.GetString("namespace")
if namespace == "" {
enteredNamespace, err := promptForNamespace(upstream)
if err != nil {
return errors.Wrap(err, "failed to prompt for namespace")
}
namespace = enteredNamespace
}
if err := validateNamespace(namespace); err != nil {
return err
}
var applicationMetadata []byte
if airgapBundle := v.GetString("airgap-bundle"); airgapBundle != "" {
applicationMetadata, err = pull.GetAppMetadataFromAirgap(airgapBundle)
if err != nil {
return errors.Wrapf(err, "failed to get metadata from %s", airgapBundle)
}
} else if !v.GetBool("airgap") {
applicationMetadata, err = pull.PullApplicationMetadata(upstream)
if err != nil {
log.Info("Unable to pull application metadata. This can be ignored, but custom branding will not be available in the Admin Console until a license is installed.")
}
}
var configValues *kotsv1beta1.ConfigValues
if filepath := v.GetString("config-values"); filepath != "" {
parsedConfigValues, err := pull.ParseConfigValuesFromFile(ExpandDir(filepath))
if err != nil {
return errors.Wrap(err, "failed to parse config values")
}
configValues = parsedConfigValues
}
// alpha enablement here
// if deploy minio is set and there's no storage base uri, set it
// this is likely not going to be the final state of how this is configured
if v.GetBool("with-dockerdistribution") {
if v.GetString("storage-base-uri") == "" {
v.Set("storage-base-uri", "docker://kotsadm-storage-registry:5000")
v.Set("storage-base-uri-plainhttp", true)
}
}
isKurl, err := kotsadm.IsKurl()
if err != nil {
return errors.Wrap(err, "failed to check kURL")
}
sharedPassword := v.GetString("shared-password")
ingressConfig, err := getIngressConfig(v)
if err != nil {
return errors.Wrap(err, "failed to get ingress spec")
}
identityConfig, err := getIdentityConfig(v)
if err != nil {
return errors.Wrap(err, "failed to get identity spec")
}
if identityConfig.Spec.Enabled {
if err := identity.ValidateConfig(cmd.Context(), namespace, *identityConfig, *ingressConfig); err != nil {
return errors.Wrap(err, "failed to validate identity config")
}
}
simultaneousUploads, _ := strconv.Atoi(v.GetString("airgap-upload-parallelism"))
deployOptions := kotsadmtypes.DeployOptions{
Namespace: namespace,
Context: v.GetString("context"),
SharedPassword: sharedPassword,
ApplicationMetadata: applicationMetadata,
UpstreamURI: upstream,
License: license,
ConfigValues: configValues,
Airgap: isAirgap,
ProgressWriter: os.Stdout,
StorageBaseURI: v.GetString("storage-base-uri"),
StorageBaseURIPlainHTTP: v.GetBool("storage-base-uri-plainhttp"),
IncludeDockerDistribution: v.GetBool("with-dockerdistribution"),
Timeout: time.Minute * 2,
HTTPProxyEnvValue: v.GetString("http-proxy"),
HTTPSProxyEnvValue: v.GetString("https-proxy"),
NoProxyEnvValue: v.GetString("no-proxy"),
SkipPreflights: v.GetBool("skip-preflights"),
EnsureRBAC: v.GetBool("ensure-rbac"),
InstallID: m.InstallID,
SimultaneousUploads: simultaneousUploads,
DisableImagePush: v.GetBool("disable-image-push"),
AirgapBundle: v.GetString("airgap-bundle"),
IncludeMinio: v.GetBool("with-minio"),
KotsadmOptions: *registryConfig,
IdentityConfig: *identityConfig,
IngressConfig: *ingressConfig,
}
clientset, err := k8sutil.GetClientset()
if err != nil {
return errors.Wrap(err, "failed to get clientset")
}
deployOptions.IsOpenShift = k8sutil.IsOpenShift(clientset)
timeout, err := time.ParseDuration(v.GetString("wait-duration"))
if err != nil {
return errors.Wrap(err, "failed to parse timeout value")
}
deployOptions.Timeout = timeout
if v.GetBool("copy-proxy-env") {
deployOptions.HTTPProxyEnvValue = os.Getenv("HTTP_PROXY")
if deployOptions.HTTPProxyEnvValue == "" {
deployOptions.HTTPProxyEnvValue = os.Getenv("http_proxy")
}
deployOptions.HTTPSProxyEnvValue = os.Getenv("HTTPS_PROXY")
if deployOptions.HTTPSProxyEnvValue == "" {
deployOptions.HTTPSProxyEnvValue = os.Getenv("https_proxy")
}
deployOptions.NoProxyEnvValue = os.Getenv("NO_PROXY")
if deployOptions.NoProxyEnvValue == "" {
deployOptions.NoProxyEnvValue = os.Getenv("no_proxy")
}
}
if isKurl && deployOptions.Namespace == metav1.NamespaceDefault {
deployOptions.ExcludeAdminConsole = true
deployOptions.EnsureKotsadmConfig = true
}
if airgapArchive := v.GetString("airgap-bundle"); airgapArchive != "" {
if deployOptions.License == nil {
return errors.New("license is required when airgap bundle is specified")
}
log.ActionWithoutSpinner("Extracting airgap bundle")
airgapRootDir, err := ioutil.TempDir("", "kotsadm-airgap")
if err != nil {
return errors.Wrap(err, "failed to create temp dir")
}
defer os.RemoveAll(airgapRootDir)
err = kotsadm.ExtractAppAirgapArchive(airgapArchive, airgapRootDir, v.GetBool("disable-image-push"), deployOptions.ProgressWriter)
if err != nil {
return errors.Wrap(err, "failed to extract images")
}
deployOptions.AirgapRootDir = airgapRootDir
}
log.ActionWithoutSpinner("Deploying Admin Console")
if err := kotsadm.Deploy(deployOptions); err != nil {
if _, ok := errors.Cause(err).(*types.ErrorTimeout); ok {
return errors.Errorf("Failed to deploy: %s. Use the --wait-duration flag to increase timeout.", err)
}
return errors.Wrap(err, "failed to deploy")
}
if deployOptions.ExcludeAdminConsole && sharedPassword != "" {
if err := setKotsadmPassword(sharedPassword, namespace); err != nil {
return errors.Wrap(err, "failed to set new password")
}
}
// port forward
podName, err := k8sutil.WaitForKotsadm(clientset, namespace, timeout)
if err != nil {
if _, ok := errors.Cause(err).(*types.ErrorTimeout); ok {
return errors.Errorf("kotsadm failed to start: %s. Use the --wait-duration flag to increase timeout.", err)
}
return errors.Wrap(err, "failed to wait for web")
}
stopCh := make(chan struct{})
defer close(stopCh)
adminConsolePort, errChan, err := k8sutil.PortForward(8800, 3000, namespace, podName, true, stopCh, log)
if err != nil {
return errors.Wrap(err, "failed to forward port")
}
if deployOptions.AirgapRootDir != "" {
log.ActionWithoutSpinner("Uploading app archive")
var tryAgain bool
var err error
apiEndpoint := fmt.Sprintf("http://localhost:%d/api/v1", adminConsolePort)
for i := 0; i < 5; i++ {
tryAgain, err = uploadAirgapArchive(deployOptions, clientset, apiEndpoint, filepath.Join(deployOptions.AirgapRootDir, "app.tar.gz"))
if err == nil {
break
}
if tryAgain {
time.Sleep(10 * time.Second)
log.ActionWithoutSpinner("Retrying upload...")
continue
}
if err != nil {
return errors.Wrap(err, "failed to upload app.tar.gz")
}
}
if tryAgain {
return errors.Wrap(err, "giving up uploading app.tar.gz")
}
// remove here in case CLI is killed and defer doesn't run
_ = os.RemoveAll(deployOptions.AirgapRootDir)
}
go func() {
select {
case err := <-errChan:
if err != nil {
log.Error(err)
os.Exit(-1)
}
case <-stopCh:
}
}()
m.ReportInstallFinish()
if v.GetBool("port-forward") && !deployOptions.ExcludeAdminConsole {
log.ActionWithoutSpinner("")
if adminConsolePort != 8800 {
log.ActionWithoutSpinner("Port 8800 is not available. The Admin Console is running on port %d", adminConsolePort)
log.ActionWithoutSpinner("")
}
log.ActionWithoutSpinner("Press Ctrl+C to exit")
log.ActionWithoutSpinner("Go to http://localhost:%d to access the Admin Console", adminConsolePort)
log.ActionWithoutSpinner("")
finalMessage = fmt.Sprintf("To access the Admin Console again, run kubectl kots admin-console --namespace %s", namespace)
// pause indefinitely and let Ctrl+C handle termination
<-make(chan struct{})
} else if !deployOptions.ExcludeAdminConsole {
log.ActionWithoutSpinner("")
log.ActionWithoutSpinner("To access the Admin Console, run kubectl kots admin-console --namespace %s", namespace)
log.ActionWithoutSpinner("")
} else {
log.ActionWithoutSpinner("")
log.ActionWithoutSpinner("Done")
log.ActionWithoutSpinner("")
}
return nil
},
}
cmd.Flags().String("shared-password", "", "shared password to apply")
cmd.Flags().String("name", "", "name of the application to use in the Admin Console")
cmd.Flags().String("local-path", "", "specify a local-path to test the behavior of rendering a replicated app locally (only supported on replicated app types currently)")
cmd.Flags().String("license-file", "", "path to a license file to use when download a replicated app")
cmd.Flags().String("config-values", "", "path to a manifest containing config values (must be apiVersion: kots.io/v1beta1, kind: ConfigValues)")
cmd.Flags().Bool("port-forward", true, "set to false to disable automatic port forward")
cmd.Flags().String("wait-duration", "2m", "timeout out to be used while waiting for individual components to be ready. must be in Go duration format (eg: 10s, 2m)")
cmd.Flags().String("http-proxy", "", "sets HTTP_PROXY environment variable in all KOTS Admin Console components")
cmd.Flags().String("https-proxy", "", "sets HTTPS_PROXY environment variable in all KOTS Admin Console components")
cmd.Flags().String("no-proxy", "", "sets NO_PROXY environment variable in all KOTS Admin Console components")
cmd.Flags().Bool("copy-proxy-env", false, "copy proxy environment variables from current environment into all KOTS Admin Console components")
cmd.Flags().String("airgap-bundle", "", "path to the application airgap bundle where application metadata will be loaded from")
cmd.Flags().Bool("airgap", false, "set to true to run install in airgapped mode. setting --airgap-bundle implies --airgap=true.")
cmd.Flags().Bool("skip-preflights", false, "set to true to skip preflight checks")
cmd.Flags().Bool("disable-image-push", false, "set to true to disable images from being pushed to private registry")
cmd.Flags().String("repo", "", "repo uri to use when installing a helm chart")
cmd.Flags().StringSlice("set", []string{}, "values to pass to helm when running helm template")
registryFlags(cmd.Flags())
// the following group of flags are experiemental and can be used to pull and push images during install time
cmd.Flags().Bool("rewrite-images", false, "set to true to force all container images to be rewritten and pushed to a local registry")
cmd.Flags().String("image-namespace", "", "the namespace/org in the docker registry to push images to (required when --rewrite-images is set)")
// set this to http://127.0.0.1:30000/api/v1 in dev environment
cmd.Flags().String("registry-endpoint", "", "the endpoint of the local docker registry to use when pushing images (required when --rewrite-images is set)")
cmd.Flags().MarkHidden("rewrite-images")
cmd.Flags().MarkHidden("image-namespace")
cmd.Flags().MarkHidden("registry-endpoint")
// options for the alpha feature of using a reg instead of s3 for storage
cmd.Flags().String("storage-base-uri", "", "an s3 or oci-registry uri to use for kots persistent storage in the cluster")
cmd.Flags().Bool("with-minio", true, "when set, kots will deploy a local minio instance for storage")
cmd.Flags().Bool("with-dockerdistribution", false, "when set, kots install will deploy a local instance of docker distribution for storage")
cmd.Flags().Bool("storage-base-uri-plainhttp", false, "when set, use plain http (not https) connecting to the local oci storage")
cmd.Flags().MarkHidden("storage-base-uri")
cmd.Flags().MarkHidden("with-dockerdistribution")
cmd.Flags().MarkHidden("storage-base-uri-plainhttp")
cmd.Flags().Bool("ensure-rbac", true, "when set, kots will create the roles and rolebindings necessary to manage applications")
cmd.Flags().MarkHidden("ensure-rbac")
cmd.Flags().String("airgap-upload-parallelism", "", "the number of chunks to upload in parallel when installing or updating in airgap mode")
cmd.Flags().MarkHidden("airgap-upload-parallelism")
cmd.Flags().Bool("enable-identity-service", false, "when set, the KOTS identity service will be enabled")
cmd.Flags().MarkHidden("enable-identity-service")
cmd.Flags().String("identity-config", "", "path to a manifest containing the KOTS identity service configuration (must be apiVersion: kots.io/v1beta1, kind: IdentityConfig)")
cmd.Flags().MarkHidden("identity-config")
cmd.Flags().Bool("enable-ingress", false, "when set, ingress will be enabled for the KOTS Admin Console")
cmd.Flags().MarkHidden("enable-ingress")
cmd.Flags().String("ingress-config", "", "path to a kots.Ingress resource file")
cmd.Flags().MarkHidden("ingress-config")
// option to check if the user has cluster-wide previliges to install application
cmd.Flags().Bool("skip-rbac-check", false, "set to true to bypass rbac check")
return cmd
}
func promptForNamespace(upstreamURI string) (string, error) {
u, err := url.ParseRequestURI(upstreamURI)
if err != nil {
return "", errors.Wrap(err, "failed to parse uri")
}
templates := &promptui.PromptTemplates{
Prompt: "{{ . | bold }} ",
Valid: "{{ . | green }} ",
Invalid: "{{ . | red }} ",
Success: "{{ . | bold }} ",
}
prompt := promptui.Prompt{
Label: "Enter the namespace to deploy to:",
Templates: templates,
Default: u.Hostname(),
Validate: validateNamespace,
AllowEdit: true,
}
for {
result, err := prompt.Run()
if err != nil {
if err == promptui.ErrInterrupt {
os.Exit(-1)
}
continue
}
return result, nil
}
}
func uploadAirgapArchive(deployOptions kotsadmtypes.DeployOptions, clientset *kubernetes.Clientset, apiEndpoint string, filename string) (bool, error) {
body := &bytes.Buffer{}
bodyWriter := multipart.NewWriter(body)
metadataPart, err := bodyWriter.CreateFormField("appSlug")
if err != nil {
return false, errors.Wrap(err, "failed to add metadata")
}
if _, err := io.Copy(metadataPart, bytes.NewReader([]byte(deployOptions.License.Spec.AppSlug))); err != nil {
return false, errors.Wrap(err, "failed to copy metadata")
}
fileWriter, err := bodyWriter.CreateFormFile("appArchive", filepath.Base(filename))
if err != nil {
return false, errors.Wrap(err, "failed to create form from file")
}
fileReader, err := os.Open(filename)
if err != nil {
return false, errors.Wrap(err, "failed to open app archive")
}
defer fileReader.Close()
_, err = io.Copy(fileWriter, fileReader)
if err != nil {
return false, errors.Wrap(err, "failed to copy app archive")
}
contentType := bodyWriter.FormDataContentType()
bodyWriter.Close()
authSlug, err := auth.GetOrCreateAuthSlug(clientset, deployOptions.Namespace)
if err != nil {
return false, errors.Wrap(err, "failed to get kotsadm auth slug")
}
url := fmt.Sprintf("%s/airgap/install", apiEndpoint)
newRequest, err := http.NewRequest("POST", url, body)
if err != nil {
return false, errors.Wrap(err, "failed to create upload request")
}
newRequest.Header.Add("Authorization", authSlug)
newRequest.Header.Add("Content-Type", contentType)
resp, err := http.DefaultClient.Do(newRequest)
if err != nil {
return false, errors.Wrap(err, "failed to get from kotsadm")
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return true, errors.Errorf("unexpected response status: %v", resp.StatusCode)
}
return false, nil
}
func getIngressConfig(v *viper.Viper) (*kotsv1beta1.IngressConfig, error) {
ingressConfigPath := v.GetString("ingress-config")
enableIngress := v.GetBool("enable-ingress") || ingressConfigPath != ""
if !enableIngress {
return &kotsv1beta1.IngressConfig{}, nil
}
ingressConfig := kotsv1beta1.IngressConfig{}
if ingressConfigPath != "" {
content, err := ioutil.ReadFile(ingressConfigPath)
if err != nil {
return nil, errors.Wrap(err, "failed to read ingress service config file")
}
s, err := kotsutil.LoadIngressConfigFromContents(content)
if err != nil {
return nil, errors.Wrap(err, "failed to decoce ingress service config")
}
ingressConfig = *s
}
ingressConfig.Spec.Enabled = true
return &ingressConfig, nil
}
func getIdentityConfig(v *viper.Viper) (*kotsv1beta1.IdentityConfig, error) {
identityConfigPath := v.GetString("identity-config")
enableIdentityService := v.GetBool("enable-identity-service") || identityConfigPath != ""
if !enableIdentityService {
return &kotsv1beta1.IdentityConfig{}, nil
}
identityConfig := kotsv1beta1.IdentityConfig{}
if identityConfigPath != "" {
content, err := ioutil.ReadFile(identityConfigPath)
if err != nil {
return nil, errors.Wrap(err, "failed to read identity service config file")
}
s, err := kotsutil.LoadIdentityConfigFromContents(content)
if err != nil {
return nil, errors.Wrap(err, "failed to decoce identity service config")
}
identityConfig = *s
}
identityConfig.Spec.Enabled = true
return &identityConfig, nil
}
func registryFlags(flagset *pflag.FlagSet) {
flagset.String("kotsadm-registry", "", "set to override the registry of kotsadm images. used for airgapped installations.")
flagset.String("registry-username", "", "username to use to authenticate with the application registry. used for airgapped installations.")
flagset.String("registry-password", "", "password to use to authenticate with the application registry. used for airgapped installations.")
// the following group of flags are useful for testing, but we don't want to pollute the help screen with them
flagset.String("kotsadm-tag", "", "set to override the tag of kotsadm. this may create an incompatible deployment because the version of kots and kotsadm are designed to work together")
flagset.String("kotsadm-namespace", "", "set to override the namespace of kotsadm images. used for airgapped installations.")
flagset.MarkHidden("kotsadm-tag")
}
func getRegistryConfig(v *viper.Viper) (*kotsadmtypes.KotsadmOptions, error) {
registryEndpoint := v.GetString("kotsadm-registry")
registryNamespace := v.GetString("kotsadm-namespace")
registryUsername := v.GetString("registry-username")
registryPassword := v.GetString("registry-password")
if registryNamespace == "" {
parts := strings.Split(registryEndpoint, "/")
if len(parts) > 1 {
registryEndpoint = parts[0]
registryNamespace = strings.Join(parts[1:], "/")
}
}
isKurl, err := kotsadm.IsKurl()
if err != nil {
return nil, errors.Wrap(err, "failed to check kURL")
}
isAirgap := false
if v.GetString("airgap-bundle") != "" || v.GetBool("airgap") {
isAirgap = true
}
if registryEndpoint == "" && isKurl && isAirgap {
license, err := getLicense(v)
if err != nil {
return nil, errors.Wrap(err, "failed to get license")
}
registryEndpoint, registryUsername, registryPassword, err = kotsutil.GetKurlRegistryCreds()
if err != nil {
return nil, errors.Wrap(err, "failed to get kURL registry info")
}
if registryNamespace == "" && license != nil {
registryNamespace = license.Spec.AppSlug
}
if registryNamespace == "" {
return nil, errors.New("--kotsadm-namespace is required")
}
}
return &kotsadmtypes.KotsadmOptions{
OverrideVersion: v.GetString("kotsadm-tag"),
OverrideRegistry: registryEndpoint,
OverrideNamespace: registryNamespace,
Username: registryUsername,
Password: registryPassword,
}, nil
}
func getLicense(v *viper.Viper) (*kotsv1beta1.License, error) {
if v.GetString("license-file") == "" {
return nil, nil
}
license, err := pull.ParseLicenseFromFile(ExpandDir(v.GetString("license-file")))
if err != nil {
return nil, errors.Wrap(err, "failed to parse license file")
}
return license, nil
}
func getHttpProxyEnv(v *viper.Viper) map[string]string {
env := make(map[string]string)
if v.GetBool("copy-proxy-env") {
env["HTTP_PROXY"] = os.Getenv("HTTP_PROXY")
env["http_proxy"] = os.Getenv("http_proxy")
env["HTTPS_PROXY"] = os.Getenv("HTTPS_PROXY")
env["https_proxy"] = os.Getenv("https_proxy")
env["NO_PROXY"] = os.Getenv("NO_PROXY")
env["no_proxy"] = os.Getenv("no_proxy")
return env
}
env["HTTP_PROXY"] = v.GetString("http-proxy")
env["HTTPS_PROXY"] = v.GetString("https-proxy")
env["NO_PROXY"] = v.GetString("no-proxy")
return env
}
func CheckRBAC() error {
clientConfig, err := k8sutil.GetClusterConfig()
if err != nil {
return errors.Wrap(err, "failed to get cluster config")
}
clientset, err := kubernetes.NewForConfig(clientConfig)
if err != nil {
return errors.Wrap(err, "failed to create clientset")
}
sar := &authorizationv1.SelfSubjectAccessReview{
Spec: authorizationv1.SelfSubjectAccessReviewSpec{
ResourceAttributes: &authorizationv1.ResourceAttributes{
Namespace: "",
Verb: "*",
Group: "*",
Version: "*",
Resource: "*",
Subresource: "",
Name: "",
},
NonResourceAttributes: nil,
},
}
resp, err := clientset.AuthorizationV1().SelfSubjectAccessReviews().Create(context.Background(), sar, metav1.CreateOptions{})
if err != nil {
return errors.Wrap(err, "failed to run subject review")
}
if !resp.Status.Allowed {
return errors.New("attempting to grant RBAC permissions not currently held")
}
return nil
}
| [
"\"HTTP_PROXY\"",
"\"http_proxy\"",
"\"HTTPS_PROXY\"",
"\"https_proxy\"",
"\"NO_PROXY\"",
"\"no_proxy\"",
"\"HTTP_PROXY\"",
"\"http_proxy\"",
"\"HTTPS_PROXY\"",
"\"https_proxy\"",
"\"NO_PROXY\"",
"\"no_proxy\""
]
| []
| [
"NO_PROXY",
"https_proxy",
"HTTP_PROXY",
"HTTPS_PROXY",
"http_proxy",
"no_proxy"
]
| [] | ["NO_PROXY", "https_proxy", "HTTP_PROXY", "HTTPS_PROXY", "http_proxy", "no_proxy"] | go | 6 | 0 | |
cmd/cloudFoundryCreateSpace_generated.go | // Code generated by piper's step-generator. DO NOT EDIT.
package cmd
import (
"fmt"
"os"
"time"
"github.com/SAP/jenkins-library/pkg/config"
"github.com/SAP/jenkins-library/pkg/log"
"github.com/SAP/jenkins-library/pkg/splunk"
"github.com/SAP/jenkins-library/pkg/telemetry"
"github.com/spf13/cobra"
)
type cloudFoundryCreateSpaceOptions struct {
CfAPIEndpoint string `json:"cfApiEndpoint,omitempty"`
Username string `json:"username,omitempty"`
Password string `json:"password,omitempty"`
CfOrg string `json:"cfOrg,omitempty"`
CfSpace string `json:"cfSpace,omitempty"`
}
// CloudFoundryCreateSpaceCommand Creates a user defined space in Cloud Foundry
func CloudFoundryCreateSpaceCommand() *cobra.Command {
const STEP_NAME = "cloudFoundryCreateSpace"
metadata := cloudFoundryCreateSpaceMetadata()
var stepConfig cloudFoundryCreateSpaceOptions
var startTime time.Time
var logCollector *log.CollectorHook
var createCloudFoundryCreateSpaceCmd = &cobra.Command{
Use: STEP_NAME,
Short: "Creates a user defined space in Cloud Foundry",
Long: `Creates a cf space in Cloud Foundry
Mandatory:
* Cloud Foundry API endpoint, Organization, name of the Cf space to be created`,
PreRunE: func(cmd *cobra.Command, _ []string) error {
startTime = time.Now()
log.SetStepName(STEP_NAME)
log.SetVerbose(GeneralConfig.Verbose)
path, _ := os.Getwd()
fatalHook := &log.FatalHook{CorrelationID: GeneralConfig.CorrelationID, Path: path}
log.RegisterHook(fatalHook)
err := PrepareConfig(cmd, &metadata, STEP_NAME, &stepConfig, config.OpenPiperFile)
if err != nil {
log.SetErrorCategory(log.ErrorConfiguration)
return err
}
log.RegisterSecret(stepConfig.Username)
log.RegisterSecret(stepConfig.Password)
if len(GeneralConfig.HookConfig.SentryConfig.Dsn) > 0 {
sentryHook := log.NewSentryHook(GeneralConfig.HookConfig.SentryConfig.Dsn, GeneralConfig.CorrelationID)
log.RegisterHook(&sentryHook)
}
if len(GeneralConfig.HookConfig.SplunkConfig.Dsn) > 0 {
logCollector = &log.CollectorHook{CorrelationID: GeneralConfig.CorrelationID}
log.RegisterHook(logCollector)
}
return nil
},
Run: func(_ *cobra.Command, _ []string) {
telemetryData := telemetry.CustomData{}
telemetryData.ErrorCode = "1"
handler := func() {
config.RemoveVaultSecretFiles()
telemetryData.Duration = fmt.Sprintf("%v", time.Since(startTime).Milliseconds())
telemetryData.ErrorCategory = log.GetErrorCategory().String()
telemetry.Send(&telemetryData)
if len(GeneralConfig.HookConfig.SplunkConfig.Dsn) > 0 {
splunk.Send(&telemetryData, logCollector)
}
}
log.DeferExitHandler(handler)
defer handler()
telemetry.Initialize(GeneralConfig.NoTelemetry, STEP_NAME)
if len(GeneralConfig.HookConfig.SplunkConfig.Dsn) > 0 {
splunk.Initialize(GeneralConfig.CorrelationID,
GeneralConfig.HookConfig.SplunkConfig.Dsn,
GeneralConfig.HookConfig.SplunkConfig.Token,
GeneralConfig.HookConfig.SplunkConfig.Index,
GeneralConfig.HookConfig.SplunkConfig.SendLogs)
}
cloudFoundryCreateSpace(stepConfig, &telemetryData)
telemetryData.ErrorCode = "0"
log.Entry().Info("SUCCESS")
},
}
addCloudFoundryCreateSpaceFlags(createCloudFoundryCreateSpaceCmd, &stepConfig)
return createCloudFoundryCreateSpaceCmd
}
func addCloudFoundryCreateSpaceFlags(cmd *cobra.Command, stepConfig *cloudFoundryCreateSpaceOptions) {
cmd.Flags().StringVar(&stepConfig.CfAPIEndpoint, "cfApiEndpoint", `https://api.cf.eu10.hana.ondemand.com`, "Cloud Foundry API endpoint")
cmd.Flags().StringVar(&stepConfig.Username, "username", os.Getenv("PIPER_username"), "User or E-Mail for CF")
cmd.Flags().StringVar(&stepConfig.Password, "password", os.Getenv("PIPER_password"), "Password for Cloud Foundry User")
cmd.Flags().StringVar(&stepConfig.CfOrg, "cfOrg", os.Getenv("PIPER_cfOrg"), "Cloud Foundry org")
cmd.Flags().StringVar(&stepConfig.CfSpace, "cfSpace", os.Getenv("PIPER_cfSpace"), "The name of the Cloud Foundry Space to be created")
cmd.MarkFlagRequired("cfApiEndpoint")
cmd.MarkFlagRequired("username")
cmd.MarkFlagRequired("password")
cmd.MarkFlagRequired("cfOrg")
cmd.MarkFlagRequired("cfSpace")
}
// retrieve step metadata
func cloudFoundryCreateSpaceMetadata() config.StepData {
var theMetaData = config.StepData{
Metadata: config.StepMetadata{
Name: "cloudFoundryCreateSpace",
Aliases: []config.Alias{},
Description: "Creates a user defined space in Cloud Foundry",
},
Spec: config.StepSpec{
Inputs: config.StepInputs{
Secrets: []config.StepSecrets{
{Name: "cfCredentialsId", Description: "Jenkins credentials ID containing user and password to authenticate to the Cloud Foundry API", Type: "jenkins", Aliases: []config.Alias{{Name: "cloudFoundry/credentialsId", Deprecated: false}}},
},
Resources: []config.StepResources{
{Name: "deployDescriptor", Type: "stash"},
},
Parameters: []config.StepParameters{
{
Name: "cfApiEndpoint",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS", "GENERAL"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{{Name: "cloudFoundry/apiEndpoint"}},
Default: `https://api.cf.eu10.hana.ondemand.com`,
},
{
Name: "username",
ResourceRef: []config.ResourceReference{
{
Name: "cfCredentialsId",
Param: "username",
Type: "secret",
},
},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{},
Default: os.Getenv("PIPER_username"),
},
{
Name: "password",
ResourceRef: []config.ResourceReference{
{
Name: "cfCredentialsId",
Param: "password",
Type: "secret",
},
},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{},
Default: os.Getenv("PIPER_password"),
},
{
Name: "cfOrg",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS", "GENERAL"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{{Name: "cloudFoundry/org"}},
Default: os.Getenv("PIPER_cfOrg"),
},
{
Name: "cfSpace",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS", "GENERAL"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{{Name: "cloudFoundry/space"}},
Default: os.Getenv("PIPER_cfSpace"),
},
},
},
Containers: []config.Container{
{Name: "cf", Image: "ppiper/cf-cli:6"},
},
},
}
return theMetaData
}
| [
"\"PIPER_username\"",
"\"PIPER_password\"",
"\"PIPER_cfOrg\"",
"\"PIPER_cfSpace\"",
"\"PIPER_username\"",
"\"PIPER_password\"",
"\"PIPER_cfOrg\"",
"\"PIPER_cfSpace\""
]
| []
| [
"PIPER_cfOrg",
"PIPER_password",
"PIPER_username",
"PIPER_cfSpace"
]
| [] | ["PIPER_cfOrg", "PIPER_password", "PIPER_username", "PIPER_cfSpace"] | go | 4 | 0 | |
src/clusto/drivers/locations/racks/basicrack.py |
from clusto.drivers.base import Location, Device, Driver
class BasicRack(Location):
"""
Basic rack driver.
"""
_clusto_type = "rack"
_driver_name = "basicrack"
_properties = {'minu':1,
'maxu':45}
def _ensure_rack_u(self, rackU):
if not isinstance(rackU, int) and not isinstance(rackU, (list, tuple)):
raise TypeError("a rackU must be an Integer or list/tuple of Integers.")
if isinstance(rackU, list):
for U in rackU:
if not isinstance(U, int):
raise TypeError("a rackU must be an Integer or List of Integers.")
if isinstance(rackU, int):
rackU = [rackU]
else:
rackU = list(rackU)
# do U checks
for U in rackU:
if U > self.maxu:
raise TypeError("the rackU must be less than %d." % self.maxu)
if U < self.minu:
raise TypeError("RackUs may not be negative.")
rackU.sort()
last = rackU[0]
for i in rackU[1:]:
if i == last:
raise TypeError("you can't list the same U twice.")
if (i-1) != (last):
raise TypeError("a device can only occupy multiple Us if they're adjacent.")
last = i
return rackU
def _ensure_compatible_device(self, device):
if not isinstance(device, Device):
raise TypeError("You can only add Devices to a rack. %s is a"
" %s" % (device.name, str(device.__class__)))
def insert(self, device, rackU):
"""Insert a given device into the given rackU."""
self._ensure_compatible_device(device)
rackU = self._ensure_rack_u(rackU)
rau = self.get_rack_and_u(device)
if rau != None:
raise Exception("%s is already in rack %s"
% (device.name, rau['rack'].name))
if hasattr(device, 'rack_units') and (len(rackU) != device.rack_units):
raise TypeError("%s is a %dU device, cannot insert it in %dU"
% (device.name, device.rack_units, len(rackU)))
for U in rackU:
dev = self.get_device_in(U)
if dev:
raise TypeError("%s is already in RU %d" % (dev.name, U))
for U in rackU:
self.add_attr("_contains", device, number=U, subkey='ru')
def get_device_in(self, rackU):
if not isinstance(rackU, int):
raise TypeError("RackU must be a single integer. Got: %s" % str(rackU))
rackU = self._ensure_rack_u(rackU)[0]
owners = self.contents(number=rackU, subkey='ru')
if len(owners) > 1:
raise Exception('Somehow there is more than one thing in ru%d.'
'Only one of these should be in this space in the '
'rack: %s' % (rackU,
','.join([x.name for x in owners])))
if owners:
return owners[0]
return None
@classmethod
def get_rack_and_u(cls, device):
"""
Get the rack and rackU for a given device.
returns a tuple of (rack, u-number)
"""
rack = set(device.parents(clusto_types=[cls]))
if len(rack) > 1:
raise Exception("%s is somehow in more than one rack, this will "
"likely need to be rectified manually. It currently "
"appears to be in racks %s"
% (device.name, str(rack)))
if rack:
rack = rack.pop()
return {'rack':Driver(rack.entity),
'RU':[x.number for x in rack.content_attrs(value=device,
subkey='ru')]}
else:
return None
| []
| []
| []
| [] | [] | python | null | null | null |
agent-c4/src/test/java/com/datastax/oss/cdc/agent/PulsarSingleNodeC4Tests.java | /**
* Copyright DataStax, Inc 2021.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.datastax.oss.cdc.agent;
import com.datastax.oss.cdc.AgentTestUtil;
import com.datastax.oss.cdc.PulsarSingleNodeTests;
import com.datastax.testcontainers.cassandra.CassandraContainer;
import lombok.extern.slf4j.Slf4j;
import org.testcontainers.containers.Network;
import org.testcontainers.utility.DockerImageName;
import java.util.Optional;
@Slf4j
public class PulsarSingleNodeC4Tests extends PulsarSingleNodeTests {
public static final DockerImageName CASSANDRA_IMAGE = DockerImageName.parse(
Optional.ofNullable(System.getenv("CASSANDRA_IMAGE"))
.orElse("cassandra:" + System.getProperty("cassandraVersion"))
).asCompatibleSubstituteFor("cassandra");
public PulsarSingleNodeC4Tests() {
super(AgentTestUtil.Version.C4);
}
@Override
public CassandraContainer<?> createCassandraContainer(int nodeIndex, String pulsarServiceUrl, Network testNetwork) {
return CassandraContainer.createCassandraContainerWithAgent(
CASSANDRA_IMAGE, testNetwork, nodeIndex, "c4", pulsarServiceUrl);
}
@Override
public int getSegmentSize() {
return 1024 * 1024;
}
}
| [
"\"CASSANDRA_IMAGE\""
]
| []
| [
"CASSANDRA_IMAGE"
]
| [] | ["CASSANDRA_IMAGE"] | java | 1 | 0 | |
google/gmailsend/send.go | package main
import (
"bufio"
"bytes"
"encoding/base64"
"flag"
"fmt"
"io"
"net/smtp"
"os"
"regexp"
"strings"
"github.com/mattermost/rsc/google"
)
func enc(s string) string {
// TODO =? .. ?=
return s
}
type Addr struct {
Name string
Email string
}
func (a Addr) enc() string {
if a.Name == "" {
return "<" + a.Email + ">"
}
if a.Email == "" {
return enc(a.Name) + ":;"
}
return enc(a.Name) + " <" + a.Email + ">"
}
type Addrs []Addr
func (a *Addrs) String() string {
return "[addrlist]"
}
func (a Addrs) has(s string) bool {
for _, aa := range a {
if aa.Email == s {
return true
}
}
return false
}
func (a *Addrs) Set(s string) bool {
s = strings.TrimSpace(s)
if strings.HasSuffix(s, ">") {
j := strings.LastIndex(s, "<")
if j >= 0 {
*a = append(*a, Addr{strings.TrimSpace(s[:j]), s[j+1 : len(s)-1]})
return true
}
}
if strings.Contains(s, " ") {
fmt.Fprintf(os.Stderr, "invalid address: %s", s)
os.Exit(2)
}
*a = append(*a, Addr{"", s})
return true
}
func (a *Addrs) parseLine(s string) {
for _, f := range strings.Split(s, ",") {
f = strings.TrimSpace(f)
if f != "" {
a.Set(f)
}
}
}
func (a Addrs) fixDomain() {
i := strings.Index(acct.Email, "@")
if i < 0 {
return
}
dom := acct.Email[i:]
for i := range a {
if a[i].Email != "" && !strings.Contains(a[i].Email, "@") {
a[i].Email += dom
}
}
}
var from, to, cc, bcc, replyTo Addrs
var inReplyTo, subject string
var appendFile = flag.String("append", "", "file to append to end of body")
var acct google.Account
var acctName = flag.String("a", "", "account to use")
var inputHeader = flag.Bool("i", false, "read additional header lines from stdin")
func holdmode() {
if os.Getenv("TERM") == "9term" {
// forgive me
os.Stdout.WriteString("\x1B];*9term-hold+\x07")
}
}
func match(line, prefix string, arg *string) bool {
if len(line) < len(prefix) || !strings.EqualFold(line[:len(prefix)], prefix) {
return false
}
*arg = strings.TrimSpace(line[len(prefix):])
return true
}
func main() {
flag.StringVar(&inReplyTo, "in-reply-to", "", "In-Reply-To")
flag.StringVar(&subject, "s", "", "Subject")
flag.Var(&from, "from", "From (can repeat)")
flag.Var(&to, "to", "To (can repeat)")
flag.Var(&cc, "cc", "CC (can repeat)")
flag.Var(&bcc, "bcc", "BCC (can repeat)")
flag.Var(&replyTo, "replyTo", "Reply-To (can repeat)")
flag.Parse()
if flag.NArg() != 0 && !*inputHeader {
flag.Usage()
}
var body bytes.Buffer
input := bufio.NewReader(os.Stdin)
if *inputHeader {
holdmode()
Loop:
for {
s, err := input.ReadString('\n')
if err != nil {
if err == io.EOF {
break Loop
}
fmt.Fprintf(os.Stderr, "reading stdin: %s\n", err)
os.Exit(2)
}
var arg string
switch {
default:
if ok, _ := regexp.MatchString(`^\S+:`, s); ok {
fmt.Fprintf(os.Stderr, "unknown header line: %s", s)
os.Exit(2)
}
body.WriteString(s)
break Loop
case match(s, "from:", &arg):
from.parseLine(arg)
case match(s, "to:", &arg):
to.parseLine(arg)
case match(s, "cc:", &arg):
cc.parseLine(arg)
case match(s, "bcc:", &arg):
bcc.parseLine(arg)
case match(s, "reply-to:", &arg):
replyTo.parseLine(arg)
case match(s, "subject:", &arg):
subject = arg
case match(s, "in-reply-to:", &arg):
inReplyTo = arg
}
}
}
acct = google.Acct(*acctName)
from.fixDomain()
to.fixDomain()
cc.fixDomain()
bcc.fixDomain()
replyTo.fixDomain()
smtpTo := append(append(to, cc...), bcc...)
if len(from) == 0 {
// TODO: Much better
name := ""
email := acct.Email
if email == "[email protected]" || email == "[email protected]" {
name = "Russ Cox"
}
if email == "[email protected]" && (smtpTo.has("[email protected]") || smtpTo.has("[email protected]") || smtpTo.has("[email protected]")) {
from = append(from, Addr{name, "[email protected]"})
} else {
from = append(from, Addr{name, email})
}
}
if len(from) > 1 {
fmt.Fprintf(os.Stderr, "missing -from\n")
os.Exit(2)
}
if len(to)+len(cc)+len(bcc) == 0 {
fmt.Fprintf(os.Stderr, "missing destinations\n")
os.Exit(2)
}
if !*inputHeader {
holdmode()
}
_, err := io.Copy(&body, input)
if err != nil {
fmt.Fprintf(os.Stderr, "reading stdin: %s\n", err)
os.Exit(2)
}
if *appendFile != "" {
f, err := os.Open(*appendFile)
if err != nil {
fmt.Fprintf(os.Stderr, "append: %s\n", err)
os.Exit(2)
}
_, err = io.Copy(&body, f)
f.Close()
if err != nil {
fmt.Fprintf(os.Stderr, "append: %s\n", err)
os.Exit(2)
}
}
var msg bytes.Buffer
fmt.Fprintf(&msg, "MIME-Version: 1.0\n")
if len(from) > 0 {
fmt.Fprintf(&msg, "From: ")
for i, a := range from {
if i > 0 {
fmt.Fprintf(&msg, ", ")
}
fmt.Fprintf(&msg, "%s", a.enc())
}
fmt.Fprintf(&msg, "\n")
}
if len(to) > 0 {
fmt.Fprintf(&msg, "To: ")
for i, a := range to {
if i > 0 {
fmt.Fprintf(&msg, ", ")
}
fmt.Fprintf(&msg, "%s", a.enc())
}
fmt.Fprintf(&msg, "\n")
}
if len(cc) > 0 {
fmt.Fprintf(&msg, "CC: ")
for i, a := range cc {
if i > 0 {
fmt.Fprintf(&msg, ", ")
}
fmt.Fprintf(&msg, "%s", a.enc())
}
fmt.Fprintf(&msg, "\n")
}
if len(replyTo) > 0 {
fmt.Fprintf(&msg, "Reply-To: ")
for i, a := range replyTo {
if i > 0 {
fmt.Fprintf(&msg, ", ")
}
fmt.Fprintf(&msg, "%s", a.enc())
}
fmt.Fprintf(&msg, "\n")
}
if inReplyTo != "" {
fmt.Fprintf(&msg, "In-Reply-To: %s\n", inReplyTo)
}
if subject != "" {
fmt.Fprintf(&msg, "Subject: %s\n", enc(subject))
}
fmt.Fprintf(&msg, "Date: xxx\n")
fmt.Fprintf(&msg, "Content-Type: text/plain; charset=\"utf-8\"\n")
fmt.Fprintf(&msg, "Content-Transfer-Encoding: base64\n")
fmt.Fprintf(&msg, "\n")
enc64 := base64.StdEncoding.EncodeToString(body.Bytes())
for len(enc64) > 72 {
fmt.Fprintf(&msg, "%s\n", enc64[:72])
enc64 = enc64[72:]
}
fmt.Fprintf(&msg, "%s\n\n", enc64)
auth := smtp.PlainAuth(
"",
acct.Email,
acct.Password,
"smtp.gmail.com",
)
var smtpToEmail []string
for _, a := range smtpTo {
if a.Email != "" {
smtpToEmail = append(smtpToEmail, a.Email)
}
}
if err := sendMail("smtp.gmail.com:587", auth, from[0].Email, smtpToEmail, msg.Bytes()); err != nil {
fmt.Fprintf(os.Stderr, "sending mail: %s\n", err)
os.Exit(2)
}
}
/*
MIME-Version: 1.0
Subject: commit/plan9port: rsc: 9term: hold mode back door
From: Bitbucket <[email protected]>
To: [email protected]
Date: Tue, 11 Oct 2011 13:34:30 -0000
Message-ID: <[email protected]>
Reply-To: [email protected]
Content-Type: text/plain; charset="utf-8"
Content-Transfer-Encoding: quoted-printable
1 new changeset in plan9port:
http://bitbucket.org/rsc/plan9port/changeset/8735d7708a1b/
changeset: 8735d7708a1b
user: rsc
date: 2011-10-11 15:34:25
summary: 9term: hold mode back door
R=3Drsc
http://codereview.appspot.com/5248056
affected #: 2 files (-1 bytes)
Repository URL: https://bitbucket.org/rsc/plan9port/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
*/
func sendMail(addr string, a smtp.Auth, from string, to []string, msg []byte) error {
c, err := smtp.Dial(addr)
if err != nil {
return err
}
if err = c.StartTLS(nil); err != nil {
return err
}
if err = c.Auth(a); err != nil {
return err
}
if err = c.Mail(from); err != nil {
return err
}
for _, addr := range to {
if err = c.Rcpt(addr); err != nil {
return err
}
}
w, err := c.Data()
if err != nil {
return err
}
_, err = w.Write(msg)
if err != nil {
return err
}
err = w.Close()
if err != nil {
return err
}
return c.Quit()
}
| [
"\"TERM\""
]
| []
| [
"TERM"
]
| [] | ["TERM"] | go | 1 | 0 | |
pkg/publish/cmd.go | // Copyright Istio Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package publish
import (
"fmt"
"io/ioutil"
"os"
"path"
"strings"
"github.com/spf13/cobra"
"istio.io/pkg/log"
"istio.io/release-builder/pkg"
"istio.io/release-builder/pkg/model"
"istio.io/release-builder/pkg/util"
)
var (
flags = struct {
release string
dockerhub string
dockertags []string
gcsbucket string
gcsaliases []string
github string
githubtoken string
grafanatoken string
}{}
publishCmd = &cobra.Command{
Use: "publish",
Short: "Publish a release of Istio",
SilenceUsage: true,
Args: cobra.ExactArgs(0),
RunE: func(c *cobra.Command, _ []string) error {
if err := validateFlags(); err != nil {
return fmt.Errorf("invalid flags: %v", err)
}
log.Infof("Publishing Istio release from: %v", flags.release)
manifest, err := pkg.ReadManifest(path.Join(flags.release, "manifest.yaml"))
if err != nil {
return fmt.Errorf("failed to read manifest from release: %v", err)
}
manifest.Directory = path.Join(flags.release)
util.YamlLog("Manifest", manifest)
return Publish(manifest)
},
}
)
func init() {
publishCmd.PersistentFlags().StringVar(&flags.release, "release", flags.release,
"The directory with the Istio release binary.")
publishCmd.PersistentFlags().StringVar(&flags.dockerhub, "dockerhub", flags.dockerhub,
"The docker hub to push images to. Example: docker.io/istio.")
publishCmd.PersistentFlags().StringSliceVar(&flags.dockertags, "dockertags", flags.dockertags,
"The tags to apply to docker images. Example: latest")
publishCmd.PersistentFlags().StringVar(&flags.gcsbucket, "gcsbucket", flags.gcsbucket,
"The gcs bucket to publish binaries to. Example: gs://istio-release.")
publishCmd.PersistentFlags().StringSliceVar(&flags.gcsaliases, "gcsaliases", flags.gcsaliases,
"Alias to publish to gcs. Example: latest")
publishCmd.PersistentFlags().StringVar(&flags.github, "github", flags.github,
"The Github org to trigger a release, and tag, for. Example: istio.")
publishCmd.PersistentFlags().StringVar(&flags.githubtoken, "githubtoken", flags.githubtoken,
"The file containing a github token.")
publishCmd.PersistentFlags().StringVar(&flags.grafanatoken, "grafanatoken", flags.grafanatoken,
"The file containing a grafana.com API token.")
}
func GetPublishCommand() *cobra.Command {
return publishCmd
}
func validateFlags() error {
if flags.release == "" {
return fmt.Errorf("--release required")
}
return nil
}
func Publish(manifest model.Manifest) error {
if flags.dockerhub != "" {
if err := Docker(manifest, flags.dockerhub, flags.dockertags); err != nil {
return fmt.Errorf("failed to publish to docker: %v", err)
}
}
if flags.gcsbucket != "" {
if err := GcsArchive(manifest, flags.gcsbucket, flags.gcsaliases); err != nil {
return fmt.Errorf("failed to publish to gcs: %v", err)
}
}
if flags.github != "" {
token, err := getGithubToken(flags.githubtoken)
if err != nil {
return err
}
if err := Github(manifest, flags.github, token); err != nil {
return fmt.Errorf("failed to publish to github: %v", err)
}
}
if flags.grafanatoken != "" {
token, err := getGrafanaToken(flags.grafanatoken)
if err != nil {
return err
}
if err := Grafana(manifest, token); err != nil {
return fmt.Errorf("failed to publish to github: %v", err)
}
}
return nil
}
func getGrafanaToken(file string) (string, error) {
if file != "" {
b, err := ioutil.ReadFile(file)
if err != nil {
return "", fmt.Errorf("failed to read grafana token: %v", file)
}
return strings.TrimSpace(string(b)), nil
}
return os.Getenv("GRAFANA_TOKEN"), nil
}
func getGithubToken(file string) (string, error) {
if file != "" {
b, err := ioutil.ReadFile(file)
if err != nil {
return "", fmt.Errorf("failed to read github token: %v", file)
}
return strings.TrimSpace(string(b)), nil
}
return os.Getenv("GITHUB_TOKEN"), nil
}
| [
"\"GRAFANA_TOKEN\"",
"\"GITHUB_TOKEN\""
]
| []
| [
"GRAFANA_TOKEN",
"GITHUB_TOKEN"
]
| [] | ["GRAFANA_TOKEN", "GITHUB_TOKEN"] | go | 2 | 0 | |
tensorflow/contrib/learn/python/learn/experiment_test.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for TaskRunner and Experiment class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import tempfile
import threading
import tensorflow as tf
from tensorflow.contrib.learn.python.learn import run_config
from tensorflow.contrib.learn.python.learn.utils import saved_model_export_utils
from tensorflow.python.util import compat
from tensorflow.python.util.all_util import reveal_undocumented
patch = tf.test.mock.patch
class SheepCounter(object):
"""To be patched in for time.sleep, in order to capture how long was slept."""
def __init__(self):
self._total_time = 0
self._sleeptimes = []
def __call__(self, t):
self._total_time += t
self._sleeptimes += [t]
@property
def total_time(self):
return self._total_time
@property
def sleep_times(self):
return self._sleeptimes
class TestEstimator(tf.contrib.learn.Evaluable, tf.contrib.learn.Trainable):
def __init__(self, config=None, max_evals=5):
self.eval_count = 0
self.fit_count = 0
self._max_evals = max_evals
self.export_count = 0
self.monitors = []
self._config = config or run_config.RunConfig()
self._model_dir = tempfile.mkdtemp()
@property
def model_dir(self):
return self._model_dir
@property
def config(self):
return self._config
def evaluate(self, **kwargs):
tf.logging.info('evaluate called with args: %s' % kwargs)
self.eval_count += 1
if self.eval_count > self._max_evals:
tf.logging.info('Ran %d evals. Done.' % self.eval_count)
raise StopIteration()
return [(key, kwargs[key]) for key in sorted(kwargs.keys())]
def fake_checkpoint(self):
save_path = os.path.join(self.model_dir, 'model.ckpt')
with tf.Session() as sess:
var = tf.Variable(1.0, name='var0')
save = tf.train.Saver({var.op.name: var})
var.initializer.run()
save.save(sess, save_path, global_step=0)
def fit(self, **kwargs):
self.fake_checkpoint()
tf.logging.info('fit called with args: %s' % kwargs)
self.fit_count += 1
if 'monitors' in kwargs:
self.monitors = kwargs['monitors']
return [(key, kwargs[key]) for key in sorted(kwargs.keys())]
def export_savedmodel(self, export_dir_base, export_input_fn, **kwargs):
tf.logging.info('export_savedmodel called with args: %s, %s, %s'
% (export_dir_base, export_input_fn, kwargs))
self.export_count += 1
return os.path.join(compat.as_bytes(export_dir_base),
compat.as_bytes('bogus_timestamp'))
class ExperimentTest(tf.test.TestCase):
def setUp(self):
# The official name is tf.train, so tf.training was obliterated.
reveal_undocumented('tensorflow.python.training')
def _cluster_spec(self):
return {
tf.contrib.learn.TaskType.PS: ['host1:2222', 'host2:2222'],
tf.contrib.learn.TaskType.WORKER:
['host3:2222', 'host4:2222', 'host5:2222']
}
def test_train(self):
est = TestEstimator()
ex = tf.contrib.learn.Experiment(
est,
train_input_fn='train_input',
train_steps='train_steps',
eval_input_fn='eval_input',
eval_metrics='eval_metrics')
fit_args = ex.train(delay_secs=0)
self.assertEquals(1, est.fit_count)
self.assertIn(('max_steps', 'train_steps'), fit_args)
self.assertEquals(0, est.eval_count)
def test_train_delay(self):
est = TestEstimator()
ex = tf.contrib.learn.Experiment(
est, train_input_fn='train_input', eval_input_fn='eval_input')
for delay in [0, 1, 3]:
with patch('time.sleep', SheepCounter()) as sheep:
ex.train(delay_secs=delay)
self.assertAlmostEqual(delay, sheep.total_time, delta=0.1)
def test_train_default_delay(self):
for task_id in [0, 1, 3]:
tf_config = {'task': {'index': task_id}}
with patch.dict('os.environ', {'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
est = TestEstimator(config)
ex = tf.contrib.learn.Experiment(
est, train_input_fn='train_input', eval_input_fn='eval_input')
with patch('time.sleep', SheepCounter()) as sheep:
ex.train()
self.assertAlmostEqual(task_id * 5, sheep.total_time, delta=0.1)
@tf.test.mock.patch('tensorflow.python.training.server_lib.Server') # pylint: disable=line-too-long
def test_train_starts_server(self, mock_server):
# Arrange.
tf_config = {
'cluster': self._cluster_spec(),
'environment': tf.contrib.learn.Environment.CLOUD,
'task': {
'type': tf.contrib.learn.TaskType.WORKER,
'index': 1
}
}
with patch.dict('os.environ', {'TF_CONFIG': json.dumps(tf_config)}):
config = tf.contrib.learn.RunConfig(
master='host4:2222', num_cores=15, gpu_memory_fraction=0.314)
est = TestEstimator(config)
ex = tf.contrib.learn.Experiment(
est, train_input_fn='train_input', eval_input_fn='eval_input')
# Act.
# We want to make sure we discount the time it takes to start the server
# in our accounting of the delay, so we set a small delay here.
with patch('time.sleep', SheepCounter()) as sheep:
ex.train(delay_secs=1)
# Ensure that the delay takes into account the time to start the server.
self.assertAlmostEqual(1, sheep.total_time, delta=0.1)
# Assert.
expected_config_proto = tf.ConfigProto()
expected_config_proto.inter_op_parallelism_threads = 15
expected_config_proto.intra_op_parallelism_threads = 15
expected_config_proto.gpu_options.per_process_gpu_memory_fraction = 0.314
mock_server.assert_called_with(
config.cluster_spec,
job_name=tf.contrib.learn.TaskType.WORKER,
task_index=1,
config=expected_config_proto,
start=False)
mock_server.assert_has_calls([tf.test.mock.call().start()])
@tf.test.mock.patch('tensorflow.python.training.server_lib.Server') # pylint: disable=line-too-long
def test_train_server_does_not_start_without_cluster_spec(self, mock_server):
config = tf.contrib.learn.RunConfig(master='host4:2222')
ex = tf.contrib.learn.Experiment(
TestEstimator(config),
train_input_fn='train_input',
eval_input_fn='eval_input')
ex.train()
# The server should not have started because there was no ClusterSpec.
self.assertFalse(mock_server.called)
@tf.test.mock.patch('tensorflow.python.training.server_lib.Server') # pylint: disable=line-too-long
def test_train_server_does_not_start_with_empty_master(self, mock_server):
tf_config = {'cluster': self._cluster_spec()}
with patch.dict('os.environ', {'TF_CONFIG': json.dumps(tf_config)}):
config = tf.contrib.learn.RunConfig(master='')
ex = tf.contrib.learn.Experiment(
TestEstimator(config),
train_input_fn='train_input',
eval_input_fn='eval_input')
ex.train()
# The server should not have started because master was the empty string.
self.assertFalse(mock_server.called)
def test_train_raises_if_job_name_is_missing(self):
tf_config = {
'cluster': self._cluster_spec(),
'environment': tf.contrib.learn.Environment.CLOUD,
'task': {
'index': 1
}
}
with patch.dict(
'os.environ',
{'TF_CONFIG': json.dumps(tf_config)}), self.assertRaises(ValueError):
config = tf.contrib.learn.RunConfig(
master='host3:2222' # Normally selected by task type.
)
ex = tf.contrib.learn.Experiment(
TestEstimator(config),
train_input_fn='train_input',
eval_input_fn='eval_input')
ex.train()
def test_evaluate(self):
est = TestEstimator()
est.fake_checkpoint()
ex = tf.contrib.learn.Experiment(
est,
train_input_fn='train_input',
eval_input_fn='eval_input',
eval_metrics='eval_metrics',
eval_steps='steps',
eval_delay_secs=0)
ex.evaluate()
self.assertEquals(1, est.eval_count)
self.assertEquals(0, est.fit_count)
def test_evaluate_delay(self):
est = TestEstimator()
est.fake_checkpoint()
ex = tf.contrib.learn.Experiment(
est, train_input_fn='train_input', eval_input_fn='eval_input')
for delay in [0, 1, 3]:
with patch('time.sleep', SheepCounter()) as sheep:
ex.evaluate(delay_secs=delay)
self.assertAlmostEqual(delay, sheep.total_time, delta=0.1)
def test_continuous_eval(self):
est = TestEstimator()
est.fake_checkpoint()
ex = tf.contrib.learn.Experiment(
est,
train_input_fn='train_input',
eval_input_fn='eval_input',
eval_metrics='eval_metrics',
eval_delay_secs=0,
continuous_eval_throttle_secs=0)
self.assertRaises(StopIteration, ex.continuous_eval,
evaluate_checkpoint_only_once=False)
self.assertEquals(6, est.eval_count)
self.assertEquals(0, est.fit_count)
def test_continuous_eval_throttle_delay(self):
for delay in [0, 1, 2]:
est = TestEstimator()
est.fake_checkpoint()
ex = tf.contrib.learn.Experiment(
est,
train_input_fn='train_input',
eval_input_fn='eval_input',
eval_metrics='eval_metrics',
continuous_eval_throttle_secs=delay,
eval_delay_secs=0)
with patch('time.sleep', SheepCounter()) as sheep:
self.assertRaises(StopIteration, ex.continuous_eval,
evaluate_checkpoint_only_once=False)
self.assertAlmostEqual(5 * delay, sheep.total_time, delta=0.1)
def test_run_local(self):
est = TestEstimator()
ex = tf.contrib.learn.Experiment(
est,
train_input_fn='train_input',
eval_input_fn='eval_input',
eval_metrics='eval_metrics',
train_steps=100,
eval_steps=100,
local_eval_frequency=10)
ex.local_run()
self.assertEquals(1, est.fit_count)
self.assertEquals(1, est.eval_count)
self.assertEquals(1, len(est.monitors))
self.assertTrue(
isinstance(est.monitors[0],
tf.contrib.learn.monitors.ValidationMonitor))
def test_train_and_evaluate(self):
est = TestEstimator()
export_strategy = saved_model_export_utils.make_export_strategy(
est, 'export_input')
ex = tf.contrib.learn.Experiment(
est,
train_input_fn='train_input',
eval_input_fn='eval_input',
eval_metrics='eval_metrics',
train_steps=100,
eval_steps=100,
export_strategies=export_strategy)
ex.train_and_evaluate()
self.assertEquals(1, est.fit_count)
self.assertEquals(1, est.eval_count)
self.assertEquals(1, est.export_count)
self.assertEquals(1, len(est.monitors))
self.assertTrue(
isinstance(est.monitors[0],
tf.contrib.learn.monitors.ValidationMonitor))
@tf.test.mock.patch('tensorflow.python.training.server_lib.Server') # pylint: disable=line-too-long
def test_run_std_server(self, mock_server):
# Arrange.
tf_config = {
'cluster': self._cluster_spec(),
'task': {
'type': tf.contrib.learn.TaskType.PS,
'index': 1
}
}
with patch.dict('os.environ', {'TF_CONFIG': json.dumps(tf_config)}):
config = tf.contrib.learn.RunConfig(
master='host2:2222',
num_cores=15,
gpu_memory_fraction=0.314,)
est = TestEstimator(config)
ex = tf.contrib.learn.Experiment(
est, train_input_fn='train_input', eval_input_fn='eval_input')
# Act.
ex.run_std_server()
# Assert.
mock_server.assert_has_calls(
[tf.test.mock.call().start(), tf.test.mock.call().join()])
@tf.test.mock.patch('tensorflow.python.training.server_lib.Server') # pylint: disable=line-too-long
def test_run_std_server_raises_without_cluster_spec(self, mock_server):
config = tf.contrib.learn.RunConfig(master='host4:2222')
with self.assertRaises(ValueError):
ex = tf.contrib.learn.Experiment(
TestEstimator(config),
train_input_fn='train_input',
eval_input_fn='eval_input')
ex.run_std_server()
def test_test(self):
est = TestEstimator()
ex = tf.contrib.learn.Experiment(
est, train_input_fn='train_input', eval_input_fn='eval_input')
ex.test()
self.assertEquals(1, est.fit_count)
self.assertEquals(1, est.eval_count)
def test_continuous_eval_evaluates_checkpoint_once(self):
# Temporarily disabled until we figure out the threading story on Jenkins.
return
# pylint: disable=unreachable
# The TestEstimator will raise StopIteration the second time evaluate is
# called.
ex = tf.contrib.learn.Experiment(
TestEstimator(max_evals=1),
train_input_fn='train_input',
eval_input_fn='eval_input')
# This should not happen if the logic restricting evaluation of the same
# checkpoint works. We do need some checkpoint though, otherwise Experiment
# will never evaluate.
ex.estimator.fake_checkpoint()
# Start a separate thread with continuous eval
thread = threading.Thread(
target=lambda: ex.continuous_eval(delay_secs=0, throttle_delay_secs=0))
thread.start()
# The thread will die if it evaluates twice, and we should never evaluate
# twice since we don't write another checkpoint. Since we did not enable
# throttling, if it hasn't died after two seconds, we're good.
thread.join(2)
self.assertTrue(thread.is_alive())
# But we should have evaluated once.
count = ex.estimator.eval_count
self.assertEquals(1, count)
if __name__ == '__main__':
tf.test.main()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
pkg/plugin/plugin.go | /*
Copyright 2021 The Fission Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package plugins provides support for creating extensible CLIs
package plugin
import (
"bytes"
"context"
"encoding/json"
"errors"
"io/ioutil"
"os"
"os/exec"
"path"
"strings"
"time"
)
const (
cmdTimeout = 5 * time.Second
cmdMetadataArgs = "--plugin"
)
var (
ErrPluginNotFound = errors.New("plugin not found")
ErrPluginInvalid = errors.New("invalid plugin")
Prefix = "fission-"
)
// ObjectMeta contains the metadata of a plugin.
// The only metadata that is guaranteed to be non-empty is the path and Name. All other fields are considered optional.
type Metadata struct {
Name string `json:"name,omitempty"`
Version string `json:"version,omitempty"`
Aliases []string `json:"aliases,omitempty"`
Usage string `json:"usage,omitempty"`
Path string `json:"path,omitempty"`
}
func (md *Metadata) AddAlias(alias string) {
if alias != md.Name && !md.HasAlias(alias) {
md.Aliases = append(md.Aliases, alias)
}
}
func (md *Metadata) HasAlias(needle string) bool {
for _, alias := range md.Aliases {
if alias == needle {
return true
}
}
return false
}
// Find searches the machine for the given plugin, returning the metadata of the plugin.
// The only metadata that is guaranteed to be non-empty is the path and Name. All other fields are considered optional.
// If found it returns the plugin, otherwise it returns ErrPluginNotFound if the plugin was not found.
func Find(pluginName string) (*Metadata, error) {
// Search PATH for plugin as command-name
// To check if plugin is actually there still.
pluginPath, err := findPluginOnPath(pluginName)
if err != nil {
// Fallback: Search for alias in each command
mds := FindAll()
for _, md := range mds {
if md.HasAlias(pluginName) {
return md, nil
}
}
return nil, ErrPluginNotFound
}
md, err := fetchPluginMetadata(pluginPath)
if err != nil {
return nil, err
}
return md, nil
}
// Exec executes the plugin using the provided args.
// All input and output is redirected to stdin, stdout, and stderr.
func Exec(md *Metadata, args []string) error {
cmd := exec.Command(md.Path, args...)
cmd.Stdin = os.Stdin
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
return cmd.Run()
}
// FindAll searches the machine for all plugins currently present.
func FindAll() map[string]*Metadata {
plugins := map[string]*Metadata{}
dirs := strings.Split(os.Getenv("PATH"), ":")
for _, dir := range dirs {
fs, err := ioutil.ReadDir(dir)
if err != nil {
continue
}
for _, f := range fs {
if !strings.HasPrefix(f.Name(), Prefix) {
continue
}
fp := path.Join(dir, f.Name())
md, err := fetchPluginMetadata(fp)
if err != nil {
continue
}
if existing, ok := plugins[md.Name]; ok {
for _, alias := range existing.Aliases {
md.AddAlias(alias)
}
}
plugins[md.Name] = md
}
}
return plugins
}
func findPluginOnPath(pluginName string) (path string, err error) {
binaryName := Prefix + pluginName
path, err = exec.LookPath(binaryName)
if err != nil || len(path) == 0 {
return "", ErrPluginNotFound
}
return path, nil
}
// fetchPluginMetadata attempts to fetch the plugin metadata given the plugin path.
func fetchPluginMetadata(pluginPath string) (*Metadata, error) {
d, err := os.Stat(pluginPath)
if err != nil {
return nil, ErrPluginNotFound
}
if m := d.Mode(); m.IsDir() || m&0111 == 0 {
return nil, ErrPluginInvalid
}
// Fetch the metadata from the plugin itself.
buf := bytes.NewBuffer(nil)
ctx, cancel := context.WithTimeout(context.Background(), cmdTimeout)
defer cancel()
cmd := exec.CommandContext(ctx, pluginPath, cmdMetadataArgs) // Note: issue can occur with signal propagation
cmd.Stdout = buf
err = cmd.Run()
if err != nil {
return nil, err
}
// Parse metadata if possible
pluginName := strings.TrimPrefix(path.Base(pluginPath), Prefix)
md := &Metadata{}
err = json.Unmarshal(buf.Bytes(), md)
// If metadata could not be retrieved, or if no name was provided, use the filename of the binary
if err != nil || len(md.Name) == 0 {
md.Name = pluginName
}
md.Path = pluginPath
md.AddAlias(pluginName)
return md, nil
}
| [
"\"PATH\""
]
| []
| [
"PATH"
]
| [] | ["PATH"] | go | 1 | 0 | |
manage.py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "groves.settings.dev")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == "__main__":
main()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
hello/hello.go | package main
import "fmt"
func main() {
fmt.Println("Hey, you.")
}
| []
| []
| []
| [] | [] | go | null | null | null |
LAB03/01-DDB/backend/cloudalbum/__init__.py | """
cloudalbum/__init__.py
~~~~~~~~~~~~~~~~~~~~~~~
Environment configuration how to run application.
:description: CloudAlbum is a fully featured sample application for 'Moving to AWS serverless' training course
:copyright: © 2019 written by Dayoungle Jun, Sungshik Jou.
:license: MIT, see LICENSE for more details.
"""
import os
import logging
import sys
import json
import datetime
from bson.objectid import ObjectId
from flask import Flask
from flask_cors import CORS
from flask_jwt_extended import JWTManager
from flask_bcrypt import Bcrypt
from werkzeug.exceptions import Conflict
from cloudalbum.database import create_table
class JSONEncoder(json.JSONEncoder):
""" extend json-encoder class """
def default(self, o):
if isinstance(o, ObjectId):
return str(o)
if isinstance(o, set):
return list(o)
if isinstance(o, datetime.datetime):
return str(o)
return json.JSONEncoder.default(self, o)
def create_app(script_info=None):
# instantiate the application
app = Flask(__name__)
# initiate some config value for JWT Authentication
app.config['JWT_SECRET_KEY'] = os.getenv('JWT_SECRET_KEY', 'my_jwt')
app.config['JWT_ACCESS_TOKEN_EXPIRES'] = datetime.timedelta(days=1)
app.config['JWT_BLACKLIST_ENABLED'] = True
app.config['JWT_BLACKLIST_TOKEN_CHECKS'] = ['access']
flask_bcrypt = Bcrypt(app)
jwt = JWTManager(app)
app.json_encoder = JSONEncoder
# enable CORS
CORS(app, resources={r'/*': {'origins': '*'}})
# set config
app_settings = os.getenv('APP_SETTINGS')
app.config.from_object(app_settings)
# set logger to STDOUT
app.logger.addHandler(logging.StreamHandler(sys.stdout))
app.logger.setLevel(logging.DEBUG)
# Create database table, if it is not exists
with app.app_context():
create_table()
# register blueprints
from cloudalbum.api.users import users_blueprint
app.register_blueprint(users_blueprint, url_prefix='/users')
from cloudalbum.api.photos import photos_blueprint
app.register_blueprint(photos_blueprint, url_prefix='/photos')
from cloudalbum.api.admin import admin_blueprint
app.register_blueprint(admin_blueprint, url_prefix='/admin')
@jwt.token_in_blacklist_loader
def check_if_token_in_blacklist_DB(decrypted_token):
from cloudalbum.util.jwt_helper import is_blacklisted_token_set
try:
return is_blacklisted_token_set(decrypted_token)
except Exception as e:
app.logger.error(e)
raise Conflict('Session already expired: {0}'.format(e))
# shell context for flask cli
@app.shell_context_processor
def ctx():
return {'application': app}
return app
| []
| []
| [
"APP_SETTINGS",
"JWT_SECRET_KEY"
]
| [] | ["APP_SETTINGS", "JWT_SECRET_KEY"] | python | 2 | 0 | |
codegen/charts/service_mesh_hub/generate_service_mesh_hub.go | package main
import (
"io/ioutil"
"log"
"os"
"github.com/solo-io/service-mesh-hub/cli/pkg/cliconstants"
"github.com/solo-io/skv2/codegen"
"github.com/solo-io/skv2/codegen/model"
"github.com/solo-io/solo-kit/pkg/code-generator/sk_anyvendor"
"k8s.io/apimachinery/pkg/runtime/schema"
)
//go:generate go run generate_service_mesh_hub.go
func main() {
log.Println("starting generate SMH")
var renderTypes bool
if os.Getenv("REGENERATE_TYPES") == "" {
log.Println("REGENERATE_TYPES is not set, skipping autopilot client gen")
} else {
renderTypes = true
}
// load custom client template
customClientTemplateBytes, err := ioutil.ReadFile("../custom_client.gotmpl")
customClientTemplate := string(customClientTemplateBytes)
if err != nil {
log.Fatal(err)
}
// load custom client providers template
customClientProvidersBytes, err := ioutil.ReadFile("../custom_client_providers.gotmpl")
customClientProviders := string(customClientProvidersBytes)
if err != nil {
log.Fatal(err)
}
apImports := sk_anyvendor.CreateDefaultMatchOptions([]string{
"api/**/*.proto",
})
skv2Cmd := codegen.Command{
AppName: "service-mesh-hub",
Groups: []model.Group{
{
GroupVersion: schema.GroupVersion{
Group: "core." + cliconstants.ServiceMeshHubApiGroupSuffix,
Version: "v1alpha1",
},
Module: "github.com/solo-io/service-mesh-hub",
Resources: []model.Resource{
{
Kind: "Settings",
Spec: model.Field{
Type: model.Type{
Name: "SettingsSpec",
GoPackage: "github.com/solo-io/service-mesh-hub/pkg/api/core.zephyr.solo.io/v1alpha1/types",
},
},
Status: &model.Field{Type: model.Type{
Name: "SettingsStatus",
GoPackage: "github.com/solo-io/service-mesh-hub/pkg/api/core.zephyr.solo.io/v1alpha1/types",
}},
},
},
ApiRoot: "pkg/api/",
RenderManifests: true,
RenderTypes: renderTypes,
RenderController: true,
RenderProtos: true,
RenderClients: true,
CustomTemplates: model.CustomTemplates{
Templates: map[string]string{
"client_providers.go": customClientProviders,
},
},
},
{
GroupVersion: schema.GroupVersion{
Group: "networking." + cliconstants.ServiceMeshHubApiGroupSuffix,
Version: "v1alpha1",
},
Module: "github.com/solo-io/service-mesh-hub",
Resources: []model.Resource{
{
Kind: "TrafficPolicy",
Spec: model.Field{
Type: model.Type{
Name: "TrafficPolicySpec",
GoPackage: "github.com/solo-io/service-mesh-hub/pkg/api/networking.zephyr.solo.io/v1alpha1/types",
},
},
Status: &model.Field{Type: model.Type{
Name: "TrafficPolicyStatus",
GoPackage: "github.com/solo-io/service-mesh-hub/pkg/api/networking.zephyr.solo.io/v1alpha1/types",
}},
},
{
Kind: "AccessControlPolicy",
Spec: model.Field{
Type: model.Type{
Name: "AccessControlPolicySpec",
GoPackage: "github.com/solo-io/service-mesh-hub/pkg/api/networking.zephyr.solo.io/v1alpha1/types",
},
},
Status: &model.Field{Type: model.Type{
Name: "AccessControlPolicyStatus",
GoPackage: "github.com/solo-io/service-mesh-hub/pkg/api/networking.zephyr.solo.io/v1alpha1/types",
}},
},
{
Kind: "VirtualMesh",
Spec: model.Field{
Type: model.Type{
Name: "VirtualMeshSpec",
GoPackage: "github.com/solo-io/service-mesh-hub/pkg/api/networking.zephyr.solo.io/v1alpha1/types",
},
},
Status: &model.Field{Type: model.Type{
Name: "VirtualMeshStatus",
GoPackage: "github.com/solo-io/service-mesh-hub/pkg/api/networking.zephyr.solo.io/v1alpha1/types",
}},
},
},
RenderManifests: true,
RenderTypes: renderTypes,
RenderController: true,
RenderProtos: true,
CustomTemplates: model.CustomTemplates{
Templates: map[string]string{
"clients.go": customClientTemplate,
"client_providers.go": customClientProviders,
},
},
ApiRoot: "pkg/api",
},
{
GroupVersion: schema.GroupVersion{
Group: "discovery." + cliconstants.ServiceMeshHubApiGroupSuffix,
Version: "v1alpha1",
},
Module: "github.com/solo-io/service-mesh-hub",
Resources: []model.Resource{
{
Kind: "KubernetesCluster",
Spec: model.Field{
Type: model.Type{
Name: "KubernetesClusterSpec",
GoPackage: "github.com/solo-io/service-mesh-hub/pkg/api/discovery.zephyr.solo.io/v1alpha1/types",
},
},
},
{
Kind: "MeshService",
Spec: model.Field{
Type: model.Type{
Name: "MeshServiceSpec",
GoPackage: "github.com/solo-io/service-mesh-hub/pkg/api/discovery.zephyr.solo.io/v1alpha1/types",
},
},
Status: &model.Field{Type: model.Type{
Name: "MeshServiceStatus",
GoPackage: "github.com/solo-io/service-mesh-hub/pkg/api/discovery.zephyr.solo.io/v1alpha1/types",
}},
},
{
Kind: "MeshWorkload",
Spec: model.Field{
Type: model.Type{
Name: "MeshWorkloadSpec",
GoPackage: "github.com/solo-io/service-mesh-hub/pkg/api/discovery.zephyr.solo.io/v1alpha1/types",
},
},
Status: &model.Field{Type: model.Type{
Name: "MeshWorkloadStatus",
GoPackage: "github.com/solo-io/service-mesh-hub/pkg/api/discovery.zephyr.solo.io/v1alpha1/types",
}},
},
{
Kind: "Mesh",
Spec: model.Field{
Type: model.Type{
Name: "MeshSpec",
GoPackage: "github.com/solo-io/service-mesh-hub/pkg/api/discovery.zephyr.solo.io/v1alpha1/types",
},
},
Status: &model.Field{Type: model.Type{
Name: "MeshStatus",
GoPackage: "github.com/solo-io/service-mesh-hub/pkg/api/discovery.zephyr.solo.io/v1alpha1/types",
}},
},
},
RenderManifests: true,
RenderTypes: renderTypes,
RenderController: true,
RenderProtos: true,
CustomTemplates: model.CustomTemplates{
Templates: map[string]string{
"clients.go": customClientTemplate,
"client_providers.go": customClientProviders,
},
},
ApiRoot: "pkg/api",
},
{
GroupVersion: schema.GroupVersion{
Group: "core",
Version: "v1",
},
Module: "k8s.io/api",
Resources: []model.Resource{
{
Kind: "Secret",
},
{
Kind: "ServiceAccount",
},
{
Kind: "ConfigMap",
},
{
Kind: "Service",
},
{
Kind: "Pod",
},
{
Kind: "Namespace",
},
{
Kind: "Node",
},
},
RenderController: true,
RenderClients: true,
CustomTemplates: model.CustomTemplates{
Templates: map[string]string{
"client_providers.go": customClientProviders,
},
},
CustomTypesImportPath: "k8s.io/api/core/v1",
ApiRoot: "pkg/api/kubernetes",
},
{
GroupVersion: schema.GroupVersion{
Group: "apps",
Version: "v1",
},
Module: "k8s.io/api",
Resources: []model.Resource{
{
Kind: "Deployment",
},
{
Kind: "ReplicaSet",
},
},
RenderController: true,
RenderClients: true,
CustomTemplates: model.CustomTemplates{
Templates: map[string]string{
"client_providers.go": customClientProviders,
},
},
CustomTypesImportPath: "k8s.io/api/apps/v1",
ApiRoot: "pkg/api/kubernetes",
},
{
GroupVersion: schema.GroupVersion{
Group: "apiextensions.k8s.io",
Version: "v1beta1",
},
Module: "k8s.io/apiextensions-apiserver",
Resources: []model.Resource{
{
Kind: "CustomResourceDefinition",
},
},
RenderClients: true,
CustomTemplates: model.CustomTemplates{
Templates: map[string]string{
"client_providers.go": customClientProviders,
},
},
CustomTypesImportPath: "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1",
ApiRoot: "pkg/api/kubernetes",
},
{
GroupVersion: schema.GroupVersion{
Group: "networking",
Version: "v1alpha3",
},
Module: "istio.io/client-go/pkg/apis",
Resources: []model.Resource{
{
Kind: "DestinationRule",
},
{
Kind: "EnvoyFilter",
},
{
Kind: "Gateway",
},
{
Kind: "ServiceEntry",
},
{
Kind: "VirtualService",
},
},
CustomTypesImportPath: "istio.io/client-go/pkg/apis/networking/v1alpha3",
CustomTemplates: model.CustomTemplates{
Templates: map[string]string{
"clients.go": customClientTemplate,
"client_providers.go": customClientProviders,
},
},
ApiRoot: "pkg/api/istio",
},
{
GroupVersion: schema.GroupVersion{
Group: "security",
Version: "v1beta1",
},
Module: "istio.io/client-go/pkg/apis",
Resources: []model.Resource{
{
Kind: "AuthorizationPolicy",
},
},
CustomTypesImportPath: "istio.io/client-go/pkg/apis/security/v1beta1",
CustomTemplates: model.CustomTemplates{
Templates: map[string]string{
"clients.go": customClientTemplate,
"client_providers.go": customClientProviders,
},
},
ApiRoot: "pkg/api/istio",
},
{
GroupVersion: schema.GroupVersion{
Group: "",
Version: "v1alpha2",
},
Module: "github.com/linkerd/linkerd2",
Resources: []model.Resource{
{
Kind: "ServiceProfile",
},
},
CustomTypesImportPath: "github.com/linkerd/linkerd2/controller/gen/apis/serviceprofile/v1alpha2",
CustomTemplates: model.CustomTemplates{
Templates: map[string]string{
"clients.go": customClientTemplate,
"client_providers.go": customClientProviders,
},
},
ApiRoot: "pkg/api/linkerd",
},
{
GroupVersion: schema.GroupVersion{
Group: "split",
Version: "v1alpha1",
},
Module: "github.com/servicemeshinterface/smi-sdk-go",
Resources: []model.Resource{
{
Kind: "TrafficSplit",
},
},
CustomTypesImportPath: "github.com/servicemeshinterface/smi-sdk-go/pkg/apis/split/v1alpha1",
CustomTemplates: model.CustomTemplates{
Templates: map[string]string{
"clients.go": customClientTemplate,
"client_providers.go": customClientProviders,
},
},
ApiRoot: "pkg/api/smi",
},
},
AnyVendorConfig: apImports,
ManifestRoot: "install/helm/charts/custom-resource-definitions",
}
if err := skv2Cmd.Execute(); err != nil {
log.Fatal(err)
}
log.Printf("Finished generating Service Mesh Hub parent chart code\n")
}
| [
"\"REGENERATE_TYPES\""
]
| []
| [
"REGENERATE_TYPES"
]
| [] | ["REGENERATE_TYPES"] | go | 1 | 0 | |
ats/atsMachines/FutureMachines/flux_direct.py | #ATS:flux SELF FluxDirect 800000
"""A flux machine for ATS
"""
from __future__ import print_function
from ats import machines, debug, atsut
from ats import log, terminal
from ats import configuration
from ats.atsut import RUNNING, TIMEDOUT, PASSED, FAILED, BATCHED, CREATED, SKIPPED, HALTED, EXPECTED, statuses, AtsError
from ats import schedulers
import sys, os, time, json
import thread
import errno
import flux
import flux.kvs as kvs
import flux.jsc as jsc
import lcMachines
class FluxScheduler (schedulers.StandardScheduler):
pass
def run_broker(flux_handle):
flux_handle.reactor_run(flux_handle.get_reactor(), 0)
def update_test_status(json_response, arg, errnum):
#print >>sys.stderr, "RECEIVED RESPONSE"
#print >>sys.stderr, "JSON", json_response
response = json.loads(json_response)
#print >>sys.stderr, response, jsc.job_num2state(response['state-pair']['nstate'])
test_to_update = arg.submitted.get(response['jobid'], None)
if test_to_update is None:
print("GOT UNEXPECTED RESPONSE %s" % response)
return
new_state = response['state-pair']['nstate']
if new_state >= jsc.lib.J_NULL and new_state < jsc.lib.J_RUNNING:
test_to_update.fluxStatus = 'submitted'
test_to_update.set(RUNNING, "Submitted, pending allocation") #not really true... but as close as they come
elif new_state == jsc.lib.J_RUNNING:
if test_to_update.fluxStatus != 'running':
arg.running.append(test_to_update)
with kvs.get_dir(arg.fh, test_to_update.kvs_path) as d:
test_to_update.startTime = float(d['running-time'])
test_to_update.fluxStatus = 'running'
test_to_update.set(RUNNING, test_to_update.commandLine)
elif new_state > jsc.lib.J_RUNNING and new_state != jsc.lib.J_COMPLETING:
if test_to_update.fluxStatus != 'done':
arg.running.remove(test_to_update)
arg.submitted.pop(test_to_update.job_id, None)
test_to_update.fluxStatus = 'done'
status = HALTED # test is done, pre-set to a did-not-run state # TODO: see if this is the right code for this
if new_state == jsc.lib.J_COMPLETE:
# flux says it ran ok, check return code
with kvs.get_dir(arg.fh, test_to_update.kvs_path) as d:
try:
exit_status = d['exit_status']
except:
exit_status = 5
for k in d:
print("LWJ KVS DEBUG %s=%s" % (k, d[k]))
test_to_update.endTime = float(d['complete-time'])
if exit_status['min'] == exit_status['max'] and exit_status['min'] == 0:
status = PASSED
else:
status = FAILED
else:
# it didn't run ok, don't check anything else
if configuration.options.oneFailure:
raise AtsError("Test failed in oneFailure mode.")
print("UPDATING TEST STATUS TO %s" % status, file=sys.stderr)
test_to_update.set(status, test_to_update.elapsedTime())
arg.noteEnd(test_to_update)
# class FluxDirect (machines.Machine):
class FluxDirect (lcMachines.LCMachineCore):
def __init__(self, name, npMaxH):
self.submitted = dict()
self.fh = flux.Flux()
jsc.notify_status(self.fh, update_test_status, self)
# self.broker_thread = thread.start_new_thread(run_broker, (self.fh,))
self.cores = 0
max_cores = 0
self.numNodes = 0
self.numberCoresInUse = 0
with kvs.get_dir(self.fh, 'resource.hwloc.by_rank') as d:
for name, rankdir in d.items():
max_cores = max(max_cores, rankdir['Core'])
self.cores += rankdir['Core']
self.numNodes += 1
self.npMax = max_cores
# initialize the upper versions with the real core count
super(FluxDirect, self).__init__(name, self.cores)
# self.numberTestsRunningMax = 1 # TODO: REMOVE THIS DEBUG VALUE self.cores * 2 # for flux, this is number in the scheduling queue
self.numberTestsRunningMax = 1000 # for flux, this is number in the scheduling queue
self.scheduler = FluxScheduler()
self.timer = self.fh.timer_watcher_create(
after=self.naptime,
repeat=self.naptime,
callback=lambda fh, y, z, w:
fh.reactor_stop(fh.get_reactor()))
self.timer.start()
def addOptions(self, parser):
"Add options needed on this machine."
parser.add_option("--partition", action="store", type="string", dest='partition',
default = 'pdebug',
help = "Partition in which to run jobs with np > 0")
parser.add_option("--numNodes", action="store", type="int", dest='numNodes',
default = 2,
help="Number of nodes to use")
parser.add_option("--distribution", action="store", type="string", dest='distribution',
default = 'unset',
help="srun distribution of mpi processes across nodes")
def getNumberOfProcessors(self):
return self.cores
def label(self):
return "FluxDirect: %d nodes, %d processors per node." % (
self.numNodes, self.npMax)
def calculateCommandList(self, test):
"""Prepare for run of executable using a suitable command. First we get the plain command
line that would be executed on a vanilla serial machine, then we modify it if necessary
for use on this machines.
"""
np = max(test.np, 1)
test.cpus_per_task = 1
commandList = self.calculateBasicCommandList(test)
timeNow = time.strftime('%H%M%S',time.localtime())
test.jobname = "t%d_%d%s%s" % (np, test.serialNumber, test.namebase[0:50], timeNow)
minNodes = np / self.npMax + (np % self.npMax != 0 )
num_nodes = test.options.get('nn', -1)
#NOTE: this only works with a sub-instance per job, but it's the closest thing we have
clist = ["flux", "wreckrun", "-n %i" % np , ]
if num_nodes > 0:
test.numNodesToUse = num_nodes
clist.append("-N %i" % (num_nodes))
clist.extend(commandList)
return clist
def canRun(self, test):
"""Is this machine able to run the test interactively when resources become available?
If so return ''. Otherwise return the reason it cannot be run here.
"""
np = max(test.np, 1)
if np > self.cores:
return "Too many processors needed (%d)" % np
return ''
def startRun(self, test):
"""For interactive test object, launch the test object.
Return True if able to start the test.
"""
self.runOrder += 1
test.runOrder = self.runOrder
# TODO: consider incorporating helper into flux for this
if test.commandList == ['not run']:
test.commandList = self.calculateBasicCommandList(test)
jobspec = {
'nnodes': 0, # TODO: this should be 0, or something to say "I don't care" but that's causing issues
'ntasks': max(test.np, 1),
'ncores': max(test.np, 1),
'cmdline': test.commandList,
'environ': dict(os.environ), # TODO: add environment updating stuff
'cwd': test.directory,
'walltime': test.timelimit.value,
'output': {
'files': {
'stdout': test.outname,
'stderror': test.errname if not (hasattr(test, 'combineOutput') and test.combineOutput) else test.outname,
}
},
'opts': {
'ntasks' : max(test.np, 1),
'cores-per-task' : 1,
},
}
new_ld_library_path = "/opt/ibm/spectrum_mpi/lib/pami_port:/opt/ibm/spectrum_mpi/lib:/opt/ibm/spectrum_mpi/lib:/opt/mellanox/hcoll/lib"
if os.environ['LD_LIBRARY_PATH']:
new_ld_library_path += ":{}".format(os.environ['LD_LIBRARY_PATH'])
jobspec['environ'] = {k: v for k, v in jobspec['environ'].items() if k.split('_')[0] not in ('JSM', 'OMPI', 'PMIX', 'ENVIRONMENT')}
jobspec['environ'].update({"OMPI_MCA_osc": "pt2pt",
"OMPI_MCA_pml": "yalla",
"OMPI_MCA_btl": "self",
"OPAL_LIBDIR": "/usr/tce/packages/spectrum-mpi/ibm/spectrum-mpi-rolling-release/lib",
"LD_LIBRARY_PATH": new_ld_library_path,
"OMPI_MCA_coll_hcoll_enable": "0",
"OMPI_MCA_orte_tmpdir_base": test.directory,
#"LD_PRELOAD":"/opt/ibm/spectrum_mpi/lib/libpami_cudahook.so",
})
jobspec['environ'].pop('PMIX_SERVER_URI', None)
jobspec['environ'].pop('PMIX_SERVER_URI2', None)
# print jobspec
job_response = self.fh.rpc_send('job.submit', jobspec)
print(job_response)
if job_response is None:
raise RuntimeError("RPC response invalid")
if job_response.get('errnum', None) is not None:
raise RuntimeError("Job creation failed with error code {}".format(
job_response['errnum']))
test.job_id = job_response['jobid']
test.kvs_path = job_response['kvs_path']
test.status = RUNNING # was BATCHED, not true, but made prototyping easier, re-investigate this later
self.submitted[test.job_id] = test
self.noteLaunch(test)
return True
def noteLaunch(self, test):
self.numberTestsRunning += 1 # max(test.np, 1)
self.numberCoresInUse += max(test.np, 1)
def noteEnd(self, test):
self.numberTestsRunning -= 1 # max(test.np, 1)
self.numberCoresInUse -= max(test.np, 1)
def periodicReport(self):
"Make the machine-specific part of periodic report to the terminal."
terminal(len(self.running), "tests running on", self.numberTestsRunning,
"of", self.cores, "cores.")
def checkRunning(self):
try:
self.fh.reactor_run(self.fh.get_reactor(), self.fh.REACTOR_ONCE)
except EnvironmentError as e:
if e.errno == errno.EAGAIN:
pass
else:
raise e
# super(FluxDirect, self).checkRunning()
def getStatus (self, test):
raise RuntimeError("Should not run")
| []
| []
| [
"LD_LIBRARY_PATH"
]
| [] | ["LD_LIBRARY_PATH"] | python | 1 | 0 | |
vunit/test/runner.py | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (c) 2014-2021, Lars Asplund [email protected]
"""
Provided functionality to run a suite of test in a robust way
"""
import os
from pathlib import Path
import traceback
import threading
import sys
import time
import logging
import string
from contextlib import contextmanager
from .. import ostools
from ..hashing import hash_string
from .report import PASSED, FAILED, SKIPPED
LOGGER = logging.getLogger(__name__)
class TestRunner(object): # pylint: disable=too-many-instance-attributes
"""
Administer the execution of a list of test suites
"""
VERBOSITY_QUIET = 0
VERBOSITY_NORMAL = 1
VERBOSITY_VERBOSE = 2
def __init__( # pylint: disable=too-many-arguments
self,
report,
output_path,
verbosity=VERBOSITY_NORMAL,
num_threads=1,
fail_fast=False,
dont_catch_exceptions=False,
no_color=False,
):
self._lock = threading.Lock()
self._fail_fast = fail_fast
self._abort = False
self._local = threading.local()
self._report = report
self._output_path = output_path
assert verbosity in (
self.VERBOSITY_QUIET,
self.VERBOSITY_NORMAL,
self.VERBOSITY_VERBOSE,
)
self._verbosity = verbosity
self._num_threads = num_threads
self._stdout = sys.stdout
self._stdout_ansi = wrap(self._stdout, use_color=not no_color)
self._stderr = sys.stderr
self._dont_catch_exceptions = dont_catch_exceptions
self._no_color = no_color
ostools.PROGRAM_STATUS.reset()
@property
def _is_verbose(self):
return self._verbosity == self.VERBOSITY_VERBOSE
@property
def _is_quiet(self):
return self._verbosity == self.VERBOSITY_QUIET
def run(self, test_suites):
"""
Run a list of test suites
"""
if not Path(self._output_path).exists():
os.makedirs(self._output_path)
self._create_test_mapping_file(test_suites)
num_tests = 0
for test_suite in test_suites:
for test_name in test_suite.test_names:
num_tests += 1
if self._is_verbose:
print("Running test: " + test_name)
if self._is_verbose:
print(f"Running {num_tests:d} tests")
print()
self._report.set_expected_num_tests(num_tests)
scheduler = TestScheduler(test_suites)
threads = []
# Disable continuous output in parallel mode
write_stdout = self._is_verbose and self._num_threads == 1
try:
sys.stdout = ThreadLocalOutput(self._local, self._stdout)
sys.stderr = ThreadLocalOutput(self._local, self._stdout)
# Start P-1 worker threads
for _ in range(self._num_threads - 1):
new_thread = threading.Thread(
target=self._run_thread,
args=(write_stdout, scheduler, num_tests, False),
)
threads.append(new_thread)
new_thread.start()
# Run one worker in main thread such that P=1 is not multithreaded
self._run_thread(write_stdout, scheduler, num_tests, True)
scheduler.wait_for_finish()
except KeyboardInterrupt:
LOGGER.debug("TestRunner: Caught Ctrl-C shutting down")
ostools.PROGRAM_STATUS.shutdown()
raise
finally:
for thread in threads:
thread.join()
sys.stdout = self._stdout
sys.stderr = self._stderr
LOGGER.debug("TestRunner: Leaving")
def _run_thread(self, write_stdout, scheduler, num_tests, is_main):
"""
Run worker thread
"""
self._local.output = self._stdout
while True:
test_suite = None
try:
test_suite = scheduler.next()
output_path = self._get_output_path(test_suite.name)
output_file_name = str(Path(output_path) / "output.txt")
with self._stdout_lock():
for test_name in test_suite.test_names:
print(f"Starting {test_name!s}")
print(f"Output file: {output_file_name!s}")
self._run_test_suite(test_suite, write_stdout, num_tests, output_path, output_file_name)
except StopIteration:
return
except KeyboardInterrupt:
# Only main thread should handle KeyboardInterrupt
if is_main:
LOGGER.debug("MainWorkerThread: Caught Ctrl-C shutting down")
raise
return
finally:
if test_suite is not None:
scheduler.test_done()
def _get_output_path(self, test_suite_name):
"""
Construct the full output path of a test case.
Ensure no bad characters and no long path names.
"""
output_path = str(Path(self._output_path).resolve())
safe_name = "".join(char if _is_legal(char) else "_" for char in test_suite_name) + "_"
hash_name = hash_string(test_suite_name)
if "VUNIT_SHORT_TEST_OUTPUT_PATHS" in os.environ:
full_name = hash_name
elif sys.platform == "win32":
max_path = 260
margin = int(os.environ.get("VUNIT_TEST_OUTPUT_PATH_MARGIN", "100"))
prefix_len = len(output_path)
full_name = safe_name[: min(max_path - margin - prefix_len - len(hash_name), len(safe_name))] + hash_name
else:
full_name = safe_name + hash_name
return str(Path(output_path) / full_name)
def _add_skipped_tests(self, test_suite, results, start_time, num_tests, output_file_name):
"""
Add skipped tests
"""
for name in test_suite.test_names:
results[name] = SKIPPED
self._add_results(test_suite, results, start_time, num_tests, output_file_name)
def _run_test_suite( # pylint: disable=too-many-locals
self, test_suite, write_stdout, num_tests, output_path, output_file_name
):
"""
Run the actual test suite
"""
color_output_file_name = str(Path(output_path) / "output_with_color.txt")
output_file = None
color_output_file = None
start_time = ostools.get_time()
results = self._fail_suite(test_suite)
try:
self._prepare_test_suite_output_path(output_path)
output_file = wrap(
Path(output_file_name).open("a+", encoding="utf-8"), # pylint: disable=consider-using-with
use_color=False,
)
output_file.seek(0)
output_file.truncate()
if write_stdout:
output_from = self._stdout_ansi
else:
color_output_file = Path(color_output_file_name).open( # pylint: disable=consider-using-with
"w", encoding="utf-8"
)
output_from = color_output_file
self._local.output = Tee([output_from, output_file])
def read_output():
"""
Called to read the contents of the output file on demand
"""
output_file.flush()
prev = output_file.tell()
output_file.seek(0)
contents = output_file.read()
output_file.seek(prev)
return contents
results = test_suite.run(output_path=output_path, read_output=read_output)
except KeyboardInterrupt as exk:
self._add_skipped_tests(test_suite, results, start_time, num_tests, output_file_name)
raise KeyboardInterrupt from exk
except: # pylint: disable=bare-except
if self._dont_catch_exceptions:
raise
with self._stdout_lock():
traceback.print_exc()
finally:
self._local.output = self._stdout
for fptr in (ptr for ptr in [output_file, color_output_file] if ptr is not None):
fptr.flush()
fptr.close()
any_not_passed = any(value != PASSED for value in results.values())
with self._stdout_lock():
if (color_output_file is not None) and (any_not_passed or self._is_verbose) and not self._is_quiet:
self._print_output(color_output_file_name)
self._add_results(test_suite, results, start_time, num_tests, output_file_name)
if self._fail_fast and any_not_passed:
self._abort = True
@staticmethod
def _prepare_test_suite_output_path(output_path):
"""
Make sure the directory exists and is empty before running test.
"""
ostools.renew_path(output_path)
def _create_test_mapping_file(self, test_suites):
"""
Create a file mapping test name to test output folder.
This is to allow the user to find the test output folder when it is hashed
"""
mapping_file_name = Path(self._output_path) / "test_name_to_path_mapping.txt"
# Load old mapping to remember non-deleted test folders as well
# even when re-running only a single test case
if mapping_file_name.exists():
with mapping_file_name.open("r", encoding="utf-8") as fptr:
mapping = set(fptr.read().splitlines())
else:
mapping = set()
for test_suite in test_suites:
test_output = self._get_output_path(test_suite.name)
mapping.add(f"{Path(test_output).name!s} {test_suite.name!s}")
# Sort by everything except hash
mapping = sorted(mapping, key=lambda value: value[value.index(" ") :])
with mapping_file_name.open("w", encoding="utf-8") as fptr:
for value in mapping:
fptr.write(value + "\n")
def _print_output(self, output_file_name):
"""
Print contents of output file if it exists
"""
with Path(output_file_name).open("r") as fread:
for line in fread.readlines():
self._stdout_ansi.write(line)
def _add_results(self, test_suite, results, start_time, num_tests, output_file_name):
"""
Add results to test report
"""
runtime = ostools.get_time() - start_time
time_per_test = runtime / len(results)
for test_name in test_suite.test_names:
status = results[test_name]
self._report.add_result(test_name, status, time_per_test, output_file_name)
self._report.print_latest_status(total_tests=num_tests)
print()
@staticmethod
def _fail_suite(test_suite):
"""Return failure for all tests in suite"""
results = {}
for test_name in test_suite.test_names:
results[test_name] = FAILED
return results
@contextmanager
def _stdout_lock(self):
"""
Enter this lock when printing to stdout
Ensures no additional output is printed during abort
"""
with self._lock: # pylint: disable=not-context-manager
if self._abort:
raise KeyboardInterrupt
yield
class Tee(object):
"""
Provide a write method which writes to multiple files
like the unix 'tee' command.
"""
def __init__(self, files):
self._files = files
def write(self, txt):
for ofile in self._files:
ofile.write(txt)
def flush(self):
for ofile in self._files:
ofile.flush()
class ThreadLocalOutput(object):
"""
Replacement for stdout/err that separates re-directs
output to a thread local file interface
"""
def __init__(self, local, stdout):
self._local = local
self._stdout = stdout
def write(self, txt):
"""
Write to file object
"""
if hasattr(self._local, "output"):
self._local.output.write(txt)
else:
self._stdout.write(txt)
def flush(self):
"""
Flush file object
"""
if hasattr(self._local, "output"):
self._local.output.flush()
else:
self._stdout.flush()
class TestScheduler(object):
"""
Schedule tests to different treads
"""
def __init__(self, tests):
self._lock = threading.Lock()
self._tests = tests
self._idx = 0
self._num_done = 0
def next(self):
"""
Return the next test
"""
ostools.PROGRAM_STATUS.check_for_shutdown()
with self._lock: # pylint: disable=not-context-manager
if self._idx < len(self._tests):
idx = self._idx
self._idx += 1
return self._tests[idx]
raise StopIteration
def test_done(self):
"""
Signal that a test has been done
"""
with self._lock: # pylint: disable=not-context-manager
self._num_done += 1
def is_finished(self):
with self._lock: # pylint: disable=not-context-manager
return self._num_done >= len(self._tests)
def wait_for_finish(self):
"""
Block until all tests have been done
"""
while not self.is_finished():
time.sleep(0.05)
LEGAL_CHARS = string.printable
ILLEGAL_CHARS = ' <>"|:*%?\\/#&;()'
def _is_legal(char):
"""
Return true if the character is legal to have in a file name
"""
return (char in LEGAL_CHARS) and (char not in ILLEGAL_CHARS)
def wrap(file_obj, use_color=True):
"""
Wrap file_obj in another stream which handles ANSI color codes using colorama
NOTE:
imports colorama here to avoid dependency from setup.py importing VUnit before colorama is installed
"""
from colorama import ( # type: ignore # pylint: disable=import-outside-toplevel
AnsiToWin32,
)
if use_color:
return AnsiToWin32(file_obj).stream
return AnsiToWin32(file_obj, strip=True, convert=False).stream
| []
| []
| [
"VUNIT_TEST_OUTPUT_PATH_MARGIN"
]
| [] | ["VUNIT_TEST_OUTPUT_PATH_MARGIN"] | python | 1 | 0 | |
init.go | package main
import (
"bufio"
"io/ioutil"
"os"
"path/filepath"
"runtime"
"strconv"
"strings"
"github.com/fatih/color"
"gopkg.in/yaml.v2"
)
var (
execPath string
execDir string
)
var testEnv = os.Getenv("INFIBOMBTEST") == "1"
var argsNotExist = len(os.Args) < 2
func init() {
var err error
// Korzystanie tylko z jednego rdzenia procesora
runtime.GOMAXPROCS(1)
color.New(color.FgHiMagenta).Print("Infinite")
print(" Bomber ")
color.New(color.FgRed).Println(version)
ex, err := os.Executable()
errCheck(err)
execPath, err = filepath.Abs(ex)
errCheck(err)
execDir = filepath.Dir(execPath)
cyanPr := color.New(color.FgCyan).Print
scanner := bufio.NewScanner(os.Stdin)
argsLen := len(os.Args)
have2args := argsLen >= 2
for {
if have2args {
num = os.Args[1]
} else {
cyanPr("Wprowadź numer telefonu (np 79112345678):")
scanner.Scan()
errCheck(scanner.Err())
num = scanner.Text()
}
ok := true
for _, v := range num {
if v < '0' || v > '9' {
println("Numer może zawierać tylko cyfry!")
ok = false
break
}
}
if len(num) < 10 {
println("Numer musi zawierać co najmniej 10 cyfr!")
ok = false
}
if ok {
break
} else if !argsNotExist {
shutdown(true)
}
}
var ans string
have3args := argsLen >= 3
for {
if have3args {
ans = os.Args[2]
} else {
cyanPr(`Proszę wybrać tryb ataku (1 - tylko SMS, 2 - tylko połączenia, 3 - SMS i połączenia):`)
scanner.Scan()
errCheck(scanner.Err())
ans = scanner.Text()
}
if ans == "1" {
floodMode = 1
break
} else if ans == "2" {
floodMode = 2
break
} else if ans == "3" {
floodMode = 3
break
} else if have3args {
println("Drugi parametr musi wynosić 1, 2 lub 3!")
shutdown(true)
} else {
println("Wpisz 1, 2 lub 3!")
}
}
have4args := argsLen >= 4
for {
if have4args {
ans = os.Args[3]
} else {
cyanPr(`Proszę wejść w tryb logowania (0 - wyłączony, 1 - tylko OK, 2 - tylko błędy, 3 - OK i błędy): `)
scanner.Scan()
errCheck(scanner.Err())
ans = scanner.Text()
}
if ans == "0" {
logging = 0
okLog = func(typParam) {}
grPrntln = nil
errLog = func(typParam) {}
redPrntln = nil
break
} else if ans == "1" {
logging = 1
errLog = func(typParam) {}
redPrntln = nil
break
} else if ans == "2" {
logging = 2
okLog = func(typParam) {}
grPrntln = nil
break
} else if ans == "3" {
logging = 3
break
} else if have4args {
println("Trzeci parametr musi wynosić 0, 1, 2 lub 3!")
shutdown(true)
} else {
println("Wpisz 0, 1, 2 lub 3!")
}
}
have5args := argsLen >= 5
for {
if have5args {
ans = os.Args[4]
} else {
cyanPr(`Wprowadź czas ataku w sekundach (0 - nieskończenie): `)
scanner.Scan()
errCheck(scanner.Err())
ans = scanner.Text()
}
floodTime, err = strconv.Atoi(ans)
if err != nil || floodTime > 294967296 {
if have5args {
println("Czwarty parametr nie powinien być większy niż 294967296!")
shutdown(true)
} else {
println("Ten parametr musi być 0 lub dodatnią liczbą całkowitą mniejszą niż 294967296!")
continue
}
}
if floodTime < 0 {
if have5args {
println("Czwarty parametr musi być 0 lub dodatnią liczbą całkowitą!")
shutdown(true)
} else {
println("Wprowadź dodatnią liczbę całkowitą lub 0!")
continue
}
}
break
}
// Inicjalizacja usługi
println("Inicjalizacja usługi...")
fb, err := ioutil.ReadFile(filepath.Join(execDir, "services.yaml"))
errCheck(err)
text := strings.NewReplacer(
"<num>", num, // 79112345678
"<num2>", num[1:], // 9112345678
"<num3>", num[0:1]+" ("+num[1:4]+") "+num[4:7]+"-"+num[7:9]+"-"+num[9:], // 7 (911) 234-56-78
"<num4>", num[0:1]+" ("+num[1:4]+") "+num[4:7]+" "+num[7:9]+" "+num[9:], // 7 (911) 234 56 78
"<num5>", num[0:1]+" ("+num[1:4]+")"+num[4:7]+"-"+num[7:9]+"-"+num[9:], // 7 (911)234-56-78
"<num6>", num[0:1]+" ("+num[1:4]+")"+num[4:7]+"-"+num[7:9]+"-"+num[9:], // 7(911)234 56 78
"<num7>", num[0:1]+" "+num[1:4]+" "+num[4:7]+"-"+num[7:9]+"-"+num[9:], // 7 911 234-56-78
"<num8>", num[0:1]+" "+num[1:4]+" "+num[4:7]+" "+num[7:9]+" "+num[9:], // 7 911 234 56 78
"<num9>", num[0:1]+" ("+num[1:4]+")"+num[4:7]+"-"+num[7:9]+"-"+num[9:], // 7(911)234-56-78
"<num10>", num[0:1]+" ("+num[1:4]+")"+num[4:7]+"-"+num[7:9]+"-"+num[9:], // 7(911)2345678
"<num11>", num[0:1]+"%20("+num[1:4]+")%20"+num[4:7]+"-"+num[7:9]+"-"+num[9:], // 7%20(911)%20234-56-78
"<num12>", num[0:1]+"%20("+num[1:4]+")%20"+num[4:7]+"%20"+num[7:9]+"%20"+num[9:], // 7%20(911)%20234%2056%2078
"<num13>", num[0:1]+"%20("+num[1:4]+")"+num[4:7]+"-"+num[7:9]+"-"+num[9:], // 7%20(911)234-56-78
"<num14>", num[0:1]+"%20("+num[1:4]+")"+num[4:7]+"-"+num[7:9]+"-"+num[9:], // 7(911)234%2056%2078
"<num15>", num[0:1]+"%20"+num[1:4]+"%20"+num[4:7]+"-"+num[7:9]+"-"+num[9:], // 7%20911%20234-56-78
"<num16>", num[0:1]+"%20"+num[1:4]+"%20"+num[4:7]+"%20"+num[7:9]+"%20"+num[9:], // 7%20911%20234%2056%2078
).Replace(string(fb))
if testEnv {
println(text)
}
err = yaml.UnmarshalStrict([]byte(text), services)
errCheck(err)
// Инициализация Tor-а
attachTor()
}
| [
"\"INFIBOMBTEST\""
]
| []
| [
"INFIBOMBTEST"
]
| [] | ["INFIBOMBTEST"] | go | 1 | 0 | |
AzureFunctions.Python/process_batches/__init__.py | """
Time triggered Azure Function that processes batches of files from Azure Blob Storage for their
validation.
"""
import datetime
import logging
import os
from typing import List
import azure.functions as func
from azure.storage.blob import BlobServiceClient
from ..common import BlobStorageClient, Batch, Status, TYPES
def main(mytimer: func.TimerRequest, myqueue: func.Out[List[str]]) -> None:
"""
Entry point for this Azure Function.
"""
utc_timestamp = datetime.datetime.utcnow().replace(
tzinfo=datetime.timezone.utc).isoformat()
if mytimer.past_due:
logging.info('The timer is past due!')
logging.info('Python timer trigger function ran at %s', utc_timestamp)
try:
# Get all batches ready to be validated from the storage
blob_service_client = BlobServiceClient.from_connection_string(os.getenv('DataStorage'))
container_client = blob_service_client.get_container_client(os.getenv('DataContainer'))
blob_client: BlobStorageClient = BlobStorageClient(container_client)
batches = blob_client.get_batches(os.getenv('DataSubpath'))
if len(batches) == 0:
logging.warning('No new batches to validate')
return
# Send batches to the validation queue
for batch in batches:
batch.status = Status.RUNNING
blob_client.save_batch_status(batch)
logging.info('Sending batch %s > %s to the validation queue',
batch.customer, batch.timestamp)
myqueue.set(map(lambda b: b.serialize(), batches))
logging.info('%s new batches sent to the validation queue', len(batches))
except Exception as ex:
logging.exception('EXCEPTION while getting batches', exc_info=ex)
| []
| []
| [
"DataContainer",
"DataStorage",
"DataSubpath"
]
| [] | ["DataContainer", "DataStorage", "DataSubpath"] | python | 3 | 0 | |
test/test_util.py | import os
import unittest
from lissandra import lissandra
def test_setup():
lissandra.apply_settings(lissandra.get_default_config())
lissandra.set_riot_api_key(os.environ.get("RIOT_API_KEY"))
lissandra.apply_settings({"global": {"default_region": "EUW"}})
class BaseTest(unittest.TestCase):
def setUp(self):
test_setup()
| []
| []
| [
"RIOT_API_KEY"
]
| [] | ["RIOT_API_KEY"] | python | 1 | 0 | |
python/sync.py | #!/usr/bin/env python
import sys
import os
import argparse
import datetime
import time
import select
import paramiko
from contextlib import closing
from paramiko.client import AutoAddPolicy
from paramiko.client import SSHClient
from subprocess import call
SOURCE_DIR="{home}/projects/".format(home=os.environ.get("HOME"))
"""I got really sick of uploading code to linux boxes to test it out. Vagrant helps, but sometimes you just need a dev box in aws.
So, develop locally and every time you save a file, this tool will rsync it up to your remote.
This assumes a few things.
* I assume you stick your code in $HOME/projects/
* I assume you want $HOME/projects/foo to end up in you@remote:/home/you/foo/
* I assume you want to ignore a .venv dir at the project root.
* I assume you have ssh keys or this is going to suck.
* I assume you use inotify or fswatch or some other simlar tool to detect filesystem change.
* You may add a post-sync command if needed.
Example use:
fswatch .| ~/bin/sync.py --remote-host 54.160.179.162 --post-sync /usr/local/bin/restart-services.sh
"""
class DirSync(object):
"""Synchronize a project to the home directory of a remote server"""
def __init__(self, remote_addr, files):
'''Sync a directory to a given host'''
self.remote_addr = remote_addr
self.files = files
def sync_dir(self, dir_name):
"""Synch the directory.
Please note: I'm ignoring dir_name/.venv because that's
usually a mac locally and linux remotely in my case.
"""
cmd = ['rsync', '-av', '--delete', '--exclude', '.venv', "{root}/{target_dir}".format(root=SOURCE_DIR,
target_dir=dir_name),
"{address}:".format(address=self.remote_addr)]
rv = call(cmd)
if 0 != rv:
print "Error returned: {rv}".format(rv=rv)
def main(self):
if len(self.files) > 0:
changed_files = []
for line in self.files:
# Ignore changes in .git. only update when live files change.
if -1 != line.rfind("/.git/"):
continue
# This subdir is bullshit.
if -1 != line.rfind("/.venv/"):
continue
filename = line.split(SOURCE_DIR)[1]
changed_root = filename.split("/")[0]
changed_files.append(filename)
if len(changed_files):
#print "Need to sync {root} due to {files}".format(root=changed_root,
# files=changed_files)
self.sync_dir(changed_root)
COLOR_CODE = {
"green": 32,
"red": 31,
"yellow": 33,
}
def _color(string, color):
if color:
return '\x1b[%sm%s\x1b[0m' % (COLOR_CODE[color], string)
else:
return string
def dump_channel(chan):
"""
Keyword Arguments:
chan -- ssh channel
"""
temp_buffer = ''
finished = False
while not finished:
if chan.recv_ready():
temp_buffer = "%s%s" % (temp_buffer, chan.recv(1024))
if temp_buffer.find("\n") > -1:
tmp = temp_buffer[0:temp_buffer.find("\n")]
print _color("\t%s" % tmp, 'yellow')
temp_buffer = temp_buffer[temp_buffer.find("\n"):len(temp_buffer) + 1]
finished = chan.exit_status_ready()
# Last sweep for straggler data that showed up during the race to complete
# # NB: i suppose in theory more than 1k of data could be buffered. Drain it all
while len(temp_buffer) > 0:
if temp_buffer.find("\n") > -1:
tmp = temp_buffer[0:temp_buffer.find("\n")]
print _color("\t%s" % tmp.rstrip(), 'yellow')
temp_buffer = temp_buffer[temp_buffer.find("\n"):len(temp_buffer) + 1]
before = len(temp_buffer)
temp_buffer = "%s%s" % (temp_buffer, chan.recv(1024))
if len(temp_buffer) == before:
if temp_buffer != "\n":
print _color("\t%s" % temp_buffer.rstrip(), 'yellow')
temp_buffer = ''
if "__main__" == __name__:
parser = argparse.ArgumentParser(description='Sync directory to remote host')
parser.add_argument('--remote-host', help='host to sync to')
parser.add_argument("--post-sync", help="Command to execute on remote host post-sync")
args = parser.parse_args()
# Queue date math. Only synch at some reasonable interval.
# roll up updates over say 5 seconds so git checkouts don't dogpile
files = []
t0 = datetime.datetime.now()
while True:
while sys.stdin in select.select([sys.stdin], [], [], 0)[0]:
line = sys.stdin.readline()
files.append(line)
size = len(files)
if size:
now = datetime.datetime.now()
delta = now - t0
if 5.0 < delta.total_seconds():
runner = DirSync(args.remote_host, files)
runner.main()
if args.post_sync:
print "Running ({cmd})".format(cmd=args.post_sync)
with closing(SSHClient()) as client:
client.load_system_host_keys()
client.set_missing_host_key_policy(AutoAddPolicy())
print "Attempting to connect"
client.connect(args.remote_host, allow_agent=True, username=os.getenv('USER'))
print "Connected"
with closing(client.get_transport().open_session()) as chan:
chan.set_combine_stderr(True)
chan.exec_command(args.post_sync)
dump_channel(chan)
rv = chan.recv_exit_status()
print "Remote command exited with {rv}".format(rv=rv)
print "Done executing command"
files = []
t0 = datetime.datetime.now()
# yeild a little or we just chew cpu in a tight loop and kill battery.
time.sleep(0.25)
sys.exit(0)
| []
| []
| [
"USER",
"HOME"
]
| [] | ["USER", "HOME"] | python | 2 | 0 | |
src/test/java/com/oktay/testAutomation/driver/DriverFactory.java | package com.oktay.testAutomation.driver;
import io.github.bonigarcia.wdm.ChromeDriverManager;
import io.github.bonigarcia.wdm.DriverManagerType;
import io.github.bonigarcia.wdm.FirefoxDriverManager;
import io.github.bonigarcia.wdm.InternetExplorerDriverManager;
import org.openqa.selenium.WebDriver;
import org.openqa.selenium.chrome.ChromeDriver;
import org.openqa.selenium.firefox.FirefoxDriver;
import org.openqa.selenium.ie.InternetExplorerDriver;
import org.openqa.selenium.chrome.ChromeOptions;
/*
* Created by oktayuyar on 2019-08-23
*/
public class DriverFactory {
// Get a new WebDriver Instance.
// There are various implementations for this depending on browser. The required browser can be set as an environment variable.
// Refer http://getgauge.io/documentation/user/current/managing_environments/README.html
public static WebDriver getDriver() {
String browser = System.getenv("BROWSER");
browser = (browser == null) ? "CHROME": browser;
switch (browser) {
case "IE":
InternetExplorerDriverManager.getInstance(DriverManagerType.IEXPLORER).setup();
return new InternetExplorerDriver();
case "FIREFOX":
FirefoxDriverManager.getInstance(DriverManagerType.FIREFOX).setup();
return new FirefoxDriver();
case "CHROME":
default:
ChromeDriverManager.getInstance(DriverManagerType.CHROME).setup();
ChromeOptions options = new ChromeOptions();
if ("Y".equalsIgnoreCase(System.getenv("HEADLESS"))) {
options.addArguments("--headless");
options.addArguments("--disable-gpu");
options.addArguments("disable-popup-blocking");
options.addArguments("ignore-certificate-errors");
options.addArguments("disable-translate");
options.addArguments("--disable-notifications");
}
return new ChromeDriver(options);
}
}
}
| [
"\"BROWSER\"",
"\"HEADLESS\""
]
| []
| [
"HEADLESS",
"BROWSER"
]
| [] | ["HEADLESS", "BROWSER"] | java | 2 | 0 | |
Ch19/s19_04/textedit.py | # -*- coding: utf-8 -*-
from PyQt5.QtWidgets import QTextEdit
from PyQt5.QtGui import QPainter, QFont, QColor
from PyQt5.QtCore import Qt, QPointF
class CTextEdit(QTextEdit):
def __init__(self, parent=None) :
super(CTextEdit, self).__init__(parent)
def paintEvent(self, evt):
painter = QPainter()
painter.begin(self.viewport())
painter.setRenderHint(QPainter.Antialiasing, True)
painter.setPen(Qt.blue)
painter.fillRect(evt.rect(), QColor(0, 255, 255, 100))
ft = QFont("宋体", 18)
painter.setFont(ft)
painter.drawText(QPointF(100,100), "file read ok!")
painter.end()
QTextEdit.paintEvent(self, evt)
| []
| []
| []
| [] | [] | python | null | null | null |
cmd/latency-hist/main.go | package main
import (
"fmt"
"os"
"path/filepath"
)
func main() {
debug := false
if os.Getenv("DEBUG") != "" {
debug = true
}
if debug {
fmt.Printf("Running %v\n", filepath.Base(os.Args[0]))
}
}
| [
"\"DEBUG\""
]
| []
| [
"DEBUG"
]
| [] | ["DEBUG"] | go | 1 | 0 | |
examples/flask-kitchensink/app.py | # -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import errno
import json
import os
import sys
import tempfile
from argparse import ArgumentParser
from flask import Flask, request, abort, send_from_directory
from werkzeug.middleware.proxy_fix import ProxyFix
from linebot import (
LineBotApi, WebhookHandler
)
from linebot.exceptions import (
LineBotApiError, InvalidSignatureError
)
from linebot.models import (
MessageEvent, TextMessage, TextSendMessage,
SourceUser, SourceGroup, SourceRoom,
TemplateSendMessage, ConfirmTemplate, MessageAction,
ButtonsTemplate, ImageCarouselTemplate, ImageCarouselColumn, URIAction,
PostbackAction, DatetimePickerAction,
CameraAction, CameraRollAction, LocationAction,
CarouselTemplate, CarouselColumn, PostbackEvent,
StickerMessage, StickerSendMessage, LocationMessage, LocationSendMessage,
ImageMessage, VideoMessage, AudioMessage, FileMessage,
UnfollowEvent, FollowEvent, JoinEvent, LeaveEvent, BeaconEvent,
MemberJoinedEvent, MemberLeftEvent,
FlexSendMessage, BubbleContainer, ImageComponent, BoxComponent,
TextComponent, IconComponent, ButtonComponent,
SeparatorComponent, QuickReply, QuickReplyButton,
ImageSendMessage)
app = Flask(__name__)
app.wsgi_app = ProxyFix(app.wsgi_app, x_for=1, x_host=1, x_proto=1)
# get channel_secret and channel_access_token from your environment variable
channel_secret = os.getenv('LINE_CHANNEL_SECRET', None)
channel_access_token = os.getenv('LINE_CHANNEL_ACCESS_TOKEN', None)
if channel_secret is None or channel_access_token is None:
print('Specify LINE_CHANNEL_SECRET and LINE_CHANNEL_ACCESS_TOKEN as environment variables.')
sys.exit(1)
line_bot_api = LineBotApi(channel_access_token)
handler = WebhookHandler(channel_secret)
static_tmp_path = os.path.join(os.path.dirname(__file__), 'static', 'tmp')
# function for create tmp dir for download content
def make_static_tmp_dir():
try:
os.makedirs(static_tmp_path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(static_tmp_path):
pass
else:
raise
@app.route("/callback", methods=['POST'])
def callback():
# get X-Line-Signature header value
signature = request.headers['X-Line-Signature']
# get request body as text
body = request.get_data(as_text=True)
app.logger.info("Request body: " + body)
# handle webhook body
try:
handler.handle(body, signature)
except LineBotApiError as e:
print("Got exception from LINE Messaging API: %s\n" % e.message)
for m in e.error.details:
print(" %s: %s" % (m.property, m.message))
print("\n")
except InvalidSignatureError:
abort(400)
return 'OK'
@handler.add(MessageEvent, message=TextMessage)
def handle_text_message(event):
text = event.message.text
if text == 'profile':
if isinstance(event.source, SourceUser):
profile = line_bot_api.get_profile(event.source.user_id)
line_bot_api.reply_message(
event.reply_token, [
TextSendMessage(text='Display name: ' + profile.display_name),
TextSendMessage(text='Status message: ' + str(profile.status_message))
]
)
else:
line_bot_api.reply_message(
event.reply_token,
TextSendMessage(text="Bot can't use profile API without user ID"))
elif text == 'emojis':
emojis = [
{
"index": 0,
"productId": "5ac1bfd5040ab15980c9b435",
"emojiId": "001"
},
{
"index": 13,
"productId": "5ac1bfd5040ab15980c9b435",
"emojiId": "002"
}
]
text_message = TextSendMessage(text='$ LINE emoji $', emojis=emojis)
line_bot_api.reply_message(
event.reply_token, [
text_message
]
)
elif text == 'quota':
quota = line_bot_api.get_message_quota()
line_bot_api.reply_message(
event.reply_token, [
TextSendMessage(text='type: ' + quota.type),
TextSendMessage(text='value: ' + str(quota.value))
]
)
elif text == 'quota_consumption':
quota_consumption = line_bot_api.get_message_quota_consumption()
line_bot_api.reply_message(
event.reply_token, [
TextSendMessage(text='total usage: ' + str(quota_consumption.total_usage)),
]
)
elif text == 'push':
line_bot_api.push_message(
event.source.user_id, [
TextSendMessage(text='PUSH!'),
]
)
elif text == 'multicast':
line_bot_api.multicast(
[event.source.user_id], [
TextSendMessage(text='THIS IS A MULTICAST MESSAGE'),
]
)
elif text == 'broadcast':
line_bot_api.broadcast(
[
TextSendMessage(text='THIS IS A BROADCAST MESSAGE'),
]
)
elif text.startswith('broadcast '): # broadcast 20190505
date = text.split(' ')[1]
print("Getting broadcast result: " + date)
result = line_bot_api.get_message_delivery_broadcast(date)
line_bot_api.reply_message(
event.reply_token, [
TextSendMessage(text='Number of sent broadcast messages: ' + date),
TextSendMessage(text='status: ' + str(result.status)),
TextSendMessage(text='success: ' + str(result.success)),
]
)
elif text == 'bye':
if isinstance(event.source, SourceGroup):
line_bot_api.reply_message(
event.reply_token, TextSendMessage(text='Leaving group'))
line_bot_api.leave_group(event.source.group_id)
elif isinstance(event.source, SourceRoom):
line_bot_api.reply_message(
event.reply_token, TextSendMessage(text='Leaving group'))
line_bot_api.leave_room(event.source.room_id)
else:
line_bot_api.reply_message(
event.reply_token,
TextSendMessage(text="Bot can't leave from 1:1 chat"))
elif text == 'image':
url = request.url_root + '/static/logo.png'
app.logger.info("url=" + url)
line_bot_api.reply_message(
event.reply_token,
ImageSendMessage(url, url)
)
elif text == 'confirm':
confirm_template = ConfirmTemplate(text='Do it?', actions=[
MessageAction(label='Yes', text='Yes!'),
MessageAction(label='No', text='No!'),
])
template_message = TemplateSendMessage(
alt_text='Confirm alt text', template=confirm_template)
line_bot_api.reply_message(event.reply_token, template_message)
elif text == 'buttons':
buttons_template = ButtonsTemplate(
title='My buttons sample', text='Hello, my buttons', actions=[
URIAction(label='Go to line.me', uri='https://line.me'),
PostbackAction(label='ping', data='ping'),
PostbackAction(label='ping with text', data='ping', text='ping'),
MessageAction(label='Translate Rice', text='米')
])
template_message = TemplateSendMessage(
alt_text='Buttons alt text', template=buttons_template)
line_bot_api.reply_message(event.reply_token, template_message)
elif text == 'carousel':
carousel_template = CarouselTemplate(columns=[
CarouselColumn(text='hoge1', title='fuga1', actions=[
URIAction(label='Go to line.me', uri='https://line.me'),
PostbackAction(label='ping', data='ping')
]),
CarouselColumn(text='hoge2', title='fuga2', actions=[
PostbackAction(label='ping with text', data='ping', text='ping'),
MessageAction(label='Translate Rice', text='米')
]),
])
template_message = TemplateSendMessage(
alt_text='Carousel alt text', template=carousel_template)
line_bot_api.reply_message(event.reply_token, template_message)
elif text == 'image_carousel':
image_carousel_template = ImageCarouselTemplate(columns=[
ImageCarouselColumn(image_url='https://via.placeholder.com/1024x1024',
action=DatetimePickerAction(label='datetime',
data='datetime_postback',
mode='datetime')),
ImageCarouselColumn(image_url='https://via.placeholder.com/1024x1024',
action=DatetimePickerAction(label='date',
data='date_postback',
mode='date'))
])
template_message = TemplateSendMessage(
alt_text='ImageCarousel alt text', template=image_carousel_template)
line_bot_api.reply_message(event.reply_token, template_message)
elif text == 'imagemap':
pass
elif text == 'flex':
bubble = BubbleContainer(
direction='ltr',
hero=ImageComponent(
url='https://example.com/cafe.jpg',
size='full',
aspect_ratio='20:13',
aspect_mode='cover',
action=URIAction(uri='http://example.com', label='label')
),
body=BoxComponent(
layout='vertical',
contents=[
# title
TextComponent(text='Brown Cafe', weight='bold', size='xl'),
# review
BoxComponent(
layout='baseline',
margin='md',
contents=[
IconComponent(size='sm', url='https://example.com/gold_star.png'),
IconComponent(size='sm', url='https://example.com/grey_star.png'),
IconComponent(size='sm', url='https://example.com/gold_star.png'),
IconComponent(size='sm', url='https://example.com/gold_star.png'),
IconComponent(size='sm', url='https://example.com/grey_star.png'),
TextComponent(text='4.0', size='sm', color='#999999', margin='md',
flex=0)
]
),
# info
BoxComponent(
layout='vertical',
margin='lg',
spacing='sm',
contents=[
BoxComponent(
layout='baseline',
spacing='sm',
contents=[
TextComponent(
text='Place',
color='#aaaaaa',
size='sm',
flex=1
),
TextComponent(
text='Shinjuku, Tokyo',
wrap=True,
color='#666666',
size='sm',
flex=5
)
],
),
BoxComponent(
layout='baseline',
spacing='sm',
contents=[
TextComponent(
text='Time',
color='#aaaaaa',
size='sm',
flex=1
),
TextComponent(
text="10:00 - 23:00",
wrap=True,
color='#666666',
size='sm',
flex=5,
),
],
),
],
)
],
),
footer=BoxComponent(
layout='vertical',
spacing='sm',
contents=[
# callAction
ButtonComponent(
style='link',
height='sm',
action=URIAction(label='CALL', uri='tel:000000'),
),
# separator
SeparatorComponent(),
# websiteAction
ButtonComponent(
style='link',
height='sm',
action=URIAction(label='WEBSITE', uri="https://example.com")
)
]
),
)
message = FlexSendMessage(alt_text="hello", contents=bubble)
line_bot_api.reply_message(
event.reply_token,
message
)
elif text == 'flex_update_1':
bubble_string = """
{
"type": "bubble",
"body": {
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "image",
"url": "https://scdn.line-apps.com/n/channel_devcenter/img/flexsnapshot/clip/clip3.jpg",
"position": "relative",
"size": "full",
"aspectMode": "cover",
"aspectRatio": "1:1",
"gravity": "center"
},
{
"type": "box",
"layout": "horizontal",
"contents": [
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "Brown Hotel",
"weight": "bold",
"size": "xl",
"color": "#ffffff"
},
{
"type": "box",
"layout": "baseline",
"margin": "md",
"contents": [
{
"type": "icon",
"size": "sm",
"url": "https://scdn.line-apps.com/n/channel_devcenter/img/fx/review_gold_star_28.png"
},
{
"type": "icon",
"size": "sm",
"url": "https://scdn.line-apps.com/n/channel_devcenter/img/fx/review_gold_star_28.png"
},
{
"type": "icon",
"size": "sm",
"url": "https://scdn.line-apps.com/n/channel_devcenter/img/fx/review_gold_star_28.png"
},
{
"type": "icon",
"size": "sm",
"url": "https://scdn.line-apps.com/n/channel_devcenter/img/fx/review_gold_star_28.png"
},
{
"type": "icon",
"size": "sm",
"url": "https://scdn.line-apps.com/n/channel_devcenter/img/fx/review_gray_star_28.png"
},
{
"type": "text",
"text": "4.0",
"size": "sm",
"color": "#d6d6d6",
"margin": "md",
"flex": 0
}
]
}
]
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "¥62,000",
"color": "#a9a9a9",
"decoration": "line-through",
"align": "end"
},
{
"type": "text",
"text": "¥42,000",
"color": "#ebebeb",
"size": "xl",
"align": "end"
}
]
}
],
"position": "absolute",
"offsetBottom": "0px",
"offsetStart": "0px",
"offsetEnd": "0px",
"backgroundColor": "#00000099",
"paddingAll": "20px"
},
{
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "SALE",
"color": "#ffffff"
}
],
"position": "absolute",
"backgroundColor": "#ff2600",
"cornerRadius": "20px",
"paddingAll": "5px",
"offsetTop": "10px",
"offsetEnd": "10px",
"paddingStart": "10px",
"paddingEnd": "10px"
}
],
"paddingAll": "0px"
}
}
"""
message = FlexSendMessage(alt_text="hello", contents=json.loads(bubble_string))
line_bot_api.reply_message(
event.reply_token,
message
)
elif text == 'quick_reply':
line_bot_api.reply_message(
event.reply_token,
TextSendMessage(
text='Quick reply',
quick_reply=QuickReply(
items=[
QuickReplyButton(
action=PostbackAction(label="label1", data="data1")
),
QuickReplyButton(
action=MessageAction(label="label2", text="text2")
),
QuickReplyButton(
action=DatetimePickerAction(label="label3",
data="data3",
mode="date")
),
QuickReplyButton(
action=CameraAction(label="label4")
),
QuickReplyButton(
action=CameraRollAction(label="label5")
),
QuickReplyButton(
action=LocationAction(label="label6")
),
])))
elif text == 'link_token' and isinstance(event.source, SourceUser):
link_token_response = line_bot_api.issue_link_token(event.source.user_id)
line_bot_api.reply_message(
event.reply_token, [
TextSendMessage(text='link_token: ' + link_token_response.link_token)
]
)
elif text == 'insight_message_delivery':
today = datetime.date.today().strftime("%Y%m%d")
response = line_bot_api.get_insight_message_delivery(today)
if response.status == 'ready':
messages = [
TextSendMessage(text='broadcast: ' + str(response.broadcast)),
TextSendMessage(text='targeting: ' + str(response.targeting)),
]
else:
messages = [TextSendMessage(text='status: ' + response.status)]
line_bot_api.reply_message(event.reply_token, messages)
elif text == 'insight_followers':
today = datetime.date.today().strftime("%Y%m%d")
response = line_bot_api.get_insight_followers(today)
if response.status == 'ready':
messages = [
TextSendMessage(text='followers: ' + str(response.followers)),
TextSendMessage(text='targetedReaches: ' + str(response.targeted_reaches)),
TextSendMessage(text='blocks: ' + str(response.blocks)),
]
else:
messages = [TextSendMessage(text='status: ' + response.status)]
line_bot_api.reply_message(event.reply_token, messages)
elif text == 'insight_demographic':
response = line_bot_api.get_insight_demographic()
if response.available:
messages = ["{gender}: {percentage}".format(gender=it.gender, percentage=it.percentage)
for it in response.genders]
else:
messages = [TextSendMessage(text='available: false')]
line_bot_api.reply_message(event.reply_token, messages)
else:
line_bot_api.reply_message(
event.reply_token, TextSendMessage(text=event.message.text))
@handler.add(MessageEvent, message=LocationMessage)
def handle_location_message(event):
line_bot_api.reply_message(
event.reply_token,
LocationSendMessage(
title='Location', address=event.message.address,
latitude=event.message.latitude, longitude=event.message.longitude
)
)
@handler.add(MessageEvent, message=StickerMessage)
def handle_sticker_message(event):
line_bot_api.reply_message(
event.reply_token,
StickerSendMessage(
package_id=event.message.package_id,
sticker_id=event.message.sticker_id)
)
# Other Message Type
@handler.add(MessageEvent, message=(ImageMessage, VideoMessage, AudioMessage))
def handle_content_message(event):
if isinstance(event.message, ImageMessage):
ext = 'jpg'
elif isinstance(event.message, VideoMessage):
ext = 'mp4'
elif isinstance(event.message, AudioMessage):
ext = 'm4a'
else:
return
message_content = line_bot_api.get_message_content(event.message.id)
with tempfile.NamedTemporaryFile(dir=static_tmp_path, prefix=ext + '-', delete=False) as tf:
for chunk in message_content.iter_content():
tf.write(chunk)
tempfile_path = tf.name
dist_path = tempfile_path + '.' + ext
dist_name = os.path.basename(dist_path)
os.rename(tempfile_path, dist_path)
line_bot_api.reply_message(
event.reply_token, [
TextSendMessage(text='Save content.'),
TextSendMessage(text=request.host_url + os.path.join('static', 'tmp', dist_name))
])
@handler.add(MessageEvent, message=FileMessage)
def handle_file_message(event):
message_content = line_bot_api.get_message_content(event.message.id)
with tempfile.NamedTemporaryFile(dir=static_tmp_path, prefix='file-', delete=False) as tf:
for chunk in message_content.iter_content():
tf.write(chunk)
tempfile_path = tf.name
dist_path = tempfile_path + '-' + event.message.file_name
dist_name = os.path.basename(dist_path)
os.rename(tempfile_path, dist_path)
line_bot_api.reply_message(
event.reply_token, [
TextSendMessage(text='Save file.'),
TextSendMessage(text=request.host_url + os.path.join('static', 'tmp', dist_name))
])
@handler.add(FollowEvent)
def handle_follow(event):
app.logger.info("Got Follow event:" + event.source.user_id)
line_bot_api.reply_message(
event.reply_token, TextSendMessage(text='Got follow event'))
@handler.add(UnfollowEvent)
def handle_unfollow(event):
app.logger.info("Got Unfollow event:" + event.source.user_id)
@handler.add(JoinEvent)
def handle_join(event):
line_bot_api.reply_message(
event.reply_token,
TextSendMessage(text='Joined this ' + event.source.type))
@handler.add(LeaveEvent)
def handle_leave():
app.logger.info("Got leave event")
@handler.add(PostbackEvent)
def handle_postback(event):
if event.postback.data == 'ping':
line_bot_api.reply_message(
event.reply_token, TextSendMessage(text='pong'))
elif event.postback.data == 'datetime_postback':
line_bot_api.reply_message(
event.reply_token, TextSendMessage(text=event.postback.params['datetime']))
elif event.postback.data == 'date_postback':
line_bot_api.reply_message(
event.reply_token, TextSendMessage(text=event.postback.params['date']))
@handler.add(BeaconEvent)
def handle_beacon(event):
line_bot_api.reply_message(
event.reply_token,
TextSendMessage(
text='Got beacon event. hwid={}, device_message(hex string)={}'.format(
event.beacon.hwid, event.beacon.dm)))
@handler.add(MemberJoinedEvent)
def handle_member_joined(event):
line_bot_api.reply_message(
event.reply_token,
TextSendMessage(
text='Got memberJoined event. event={}'.format(
event)))
@handler.add(MemberLeftEvent)
def handle_member_left(event):
app.logger.info("Got memberLeft event")
@app.route('/static/<path:path>')
def send_static_content(path):
return send_from_directory('static', path)
if __name__ == "__main__":
arg_parser = ArgumentParser(
usage='Usage: python ' + __file__ + ' [--port <port>] [--help]'
)
arg_parser.add_argument('-p', '--port', type=int, default=8000, help='port')
arg_parser.add_argument('-d', '--debug', default=False, help='debug')
options = arg_parser.parse_args()
# create tmp dir for download content
make_static_tmp_dir()
app.run(debug=options.debug, port=options.port)
| []
| []
| [
"LINE_CHANNEL_SECRET",
"LINE_CHANNEL_ACCESS_TOKEN"
]
| [] | ["LINE_CHANNEL_SECRET", "LINE_CHANNEL_ACCESS_TOKEN"] | python | 2 | 0 | |
tunnel/config.go | package tunnel
import (
"fmt"
"os"
"path/filepath"
"strings"
"github.com/kevinburke/ssh_config"
log "github.com/sirupsen/logrus"
)
// SSHConfigFile finds specific attributes of a ssh server configured on a
// ssh config file.
type SSHConfigFile struct {
sshConfig *ssh_config.Config
}
// NewSSHConfigFile creates a new instance of SSHConfigFile based on the
// ssh config file from $HOME/.ssh/config.
func NewSSHConfigFile() (*SSHConfigFile, error) {
configPath := filepath.Join(os.Getenv("HOME"), ".ssh", "config")
f, err := os.Open(filepath.Clean(configPath))
if err != nil {
return nil, err
}
cfg, err := ssh_config.Decode(f)
if err != nil {
return nil, err
}
log.Debugf("using ssh config file from: %s", configPath)
return &SSHConfigFile{sshConfig: cfg}, nil
}
// Get consults a ssh config file to extract some ssh server attributes
// from it, returning a SSHHost. Any attribute which its value is an empty
// string is an attribute that could not be found in the ssh config file.
func (r SSHConfigFile) Get(host string) *SSHHost {
hostname := r.getHostname(host)
port, err := r.sshConfig.Get(host, "Port")
if err != nil {
port = ""
}
user, err := r.sshConfig.Get(host, "User")
if err != nil {
user = ""
}
localForward, err := r.getLocalForward(host)
if err != nil {
localForward = &LocalForward{Local: "", Remote: ""}
log.Warningf("error reading LocalForward configuration from ssh config file. This option will not be used: %v", err)
}
key := r.getKey(host)
return &SSHHost{
Hostname: hostname,
Port: port,
User: user,
Key: key,
LocalForward: localForward,
}
}
func (r SSHConfigFile) getHostname(host string) string {
hostname, err := r.sshConfig.Get(host, "Hostname")
if err != nil {
return ""
}
return hostname
}
func (r SSHConfigFile) getLocalForward(host string) (*LocalForward, error) {
var local, remote string
c, err := r.sshConfig.Get(host, "LocalForward")
if err != nil {
return nil, err
}
if c == "" {
return &LocalForward{Local: "", Remote: ""}, nil
}
l := strings.Fields(c)
if len(l) < 2 {
return nil, fmt.Errorf("bad forwarding specification on ssh config file: %s", l)
}
local = l[0]
remote = l[1]
if strings.HasPrefix(local, ":") {
local = fmt.Sprintf("127.0.0.1%s", local)
}
if local != "" && !strings.Contains(local, ":") {
local = fmt.Sprintf("127.0.0.1:%s", local)
}
return &LocalForward{Local: local, Remote: remote}, nil
}
func (r SSHConfigFile) getKey(host string) string {
id, err := r.sshConfig.Get(host, "IdentityFile")
if err != nil {
return ""
}
if id != "" {
if strings.HasPrefix(id, "~") {
return filepath.Join(os.Getenv("HOME"), id[1:])
}
return id
}
return ""
}
// SSHHost represents a host configuration extracted from a ssh config file.
type SSHHost struct {
Hostname string
Port string
User string
Key string
LocalForward *LocalForward
}
// String returns a string representation of a SSHHost.
func (h SSHHost) String() string {
return fmt.Sprintf("[hostname=%s, port=%s, user=%s, key=%s, local_forward=%s]", h.Hostname, h.Port, h.User, h.Key, h.LocalForward)
}
// LocalForward represents a LocalForward configuration for SSHHost.
type LocalForward struct {
Local string
Remote string
}
// String returns a string representation of LocalForward.
func (f LocalForward) String() string {
return fmt.Sprintf("[local=%s, remote=%s]", f.Local, f.Remote)
}
| [
"\"HOME\"",
"\"HOME\""
]
| []
| [
"HOME"
]
| [] | ["HOME"] | go | 1 | 0 | |
arborist/server_test.go | package arborist_test
import (
"bytes"
"crypto/rand"
"crypto/rsa"
"encoding/json"
"flag"
"fmt"
"io"
"log"
"net/http"
"net/http/httptest"
"net/url"
"os"
"sort"
"strings"
"testing"
"time"
"github.com/jmoiron/sqlx"
"github.com/stretchr/testify/assert"
"gopkg.in/square/go-jose.v2"
"gopkg.in/square/go-jose.v2/jwt"
"github.com/uc-cdis/arborist/arborist"
)
// For testing we use a mock JWT decoder which will always just return all the
// claims without trying to make HTTP calls or validating the token. The test
// server is set up using this mock JWT app to skip validation.
type mockJWTApp struct {
}
// Decode lets us use this mock JWT decoder for testing. It does zero validation
// of any tokens it receives, and just returns the decoded claims.
func (jwtApp *mockJWTApp) Decode(token string) (*map[string]interface{}, error) {
decodedToken, err := jwt.ParseSigned(token)
if err != nil {
return nil, err
}
result := make(map[string]interface{})
err = decodedToken.UnsafeClaimsWithoutVerification(&result)
if err != nil {
return nil, err
}
return &result, nil
}
// TestJWT is a utility for making fake JWTs suitable for testing.
//
// Example:
//
// token := TestJWT{username: username}
// body := []byte(fmt.Sprintf(`{"user": {"token": "%s"}}`, token.Encode()))
// req := newRequest("POST", "/auth/resources", bytes.NewBuffer(body))
//
type TestJWT struct {
username string
clientID string
policies []string
exp int64
}
// Encode takes the information in the TestJWT and creates a string of an
// encoded JWT containing some basic claims, and whatever information was
// provided in the TestJWT.
//
// To generate a signed JWT, we make up a random RSA key to sign the token ...
// and then throw away the key, because the server's mock JWT app (see above)
// doesn't care about the validation anyways.
func (testJWT *TestJWT) Encode() string {
// Make a new, random RSA key just to sign this JWT.
key, err := rsa.GenerateKey(rand.Reader, 1024)
if err != nil {
panic(err)
}
signer, err := jose.NewSigner(jose.SigningKey{Algorithm: jose.RS256, Key: key}, nil)
if err != nil {
panic(err)
}
exp := testJWT.exp
if exp == 0 {
exp = time.Now().Unix() + 10000
}
var payload []byte
if testJWT.policies == nil || len(testJWT.policies) == 0 {
payload = []byte(fmt.Sprintf(
`{
"scope": ["openid"],
"exp": %d,
"sub": "0",
"context": {
"user": {
"name": "%s"
}
},
"azp": "%s"
}`,
exp,
testJWT.username,
testJWT.clientID,
))
} else {
policies := fmt.Sprintf(`["%s"]`, strings.Join(testJWT.policies, `", "`))
payload = []byte(fmt.Sprintf(
`{
"scope": ["openid"],
"exp": %d,
"sub": "0",
"context": {
"user": {
"name": "%s",
"policies": %s
}
},
"azp": "%s"
}`,
time.Now().Unix()+10000,
testJWT.username,
policies,
testJWT.clientID,
))
}
jws, err := signer.Sign(payload)
if err != nil {
panic(err)
}
result, err := jws.CompactSerialize()
if err != nil {
panic(err)
}
return result
}
var logTo = flag.String(
"log",
"buffer",
"where to write logs to (default is buffer flushed on errors)",
)
func TestServer(t *testing.T) {
flag.Parse()
logBuffer := bytes.NewBuffer([]byte{})
logFlags := log.Ldate | log.Ltime
var logDest io.Writer
if *logTo == "stdout" {
logDest = os.Stdout
} else {
logDest = logBuffer
}
logger := log.New(logDest, "", logFlags)
jwtApp := &mockJWTApp{}
dbUrl := os.Getenv("ARBORIST_TEST_DB")
// if dbUrl is empty, should default to postgres environment
if dbUrl == "" {
fmt.Print("using postgres environment variables for test database\n")
} else {
fmt.Printf("using %s for test database\n", dbUrl)
}
db, err := sqlx.Open("postgres", dbUrl)
// no error so far, make sure ping returns OK
if err == nil {
err = db.Ping()
}
if err != nil {
fmt.Println("couldn't reach db; make sure arborist has correct database configuration!")
t.Fatal(err)
}
server, err := arborist.
NewServer().
WithLogger(logger).
WithJWTApp(jwtApp).
WithDB(db).
Init()
if err != nil {
t.Fatal(err)
}
handler := server.MakeRouter(logDest)
// some test data to work with
resourcePath := "/example(123)-X.Y*"
resourceBody := []byte(fmt.Sprintf(`{"path": "%s"}`, resourcePath))
serviceName := "zxcv"
roleName := "hjkl"
permissionName := "qwer"
methodName := permissionName
policyName := "asdf"
roleBody := []byte(fmt.Sprintf(
`{
"id": "%s",
"permissions": [
{"id": "%s", "action": {"service": "%s", "method": "%s"}}
]
}`,
roleName,
permissionName,
serviceName,
methodName,
))
policyBody := []byte(fmt.Sprintf(
`{
"id": "%s",
"resource_paths": ["%s"],
"role_ids": ["%s"]
}`,
policyName,
resourcePath,
roleName,
))
username := "wasd"
userBody := []byte(fmt.Sprintf(
`{
"name": "%s"
}`,
username,
))
clientID := "qazwsx"
clientBody := []byte(fmt.Sprintf(
`{
"clientID": "%s"
}`,
clientID,
))
// httpError is a utility function which writes some useful output after an error.
httpError := func(t *testing.T, w *httptest.ResponseRecorder, msg string) {
t.Errorf("%s; got status %d, response: %s", msg, w.Code, w.Body.String())
fmt.Println("test errored, dumping logs")
fmt.Println("logs start")
_, err = logBuffer.WriteTo(os.Stdout)
fmt.Println("logs end")
if err != nil {
t.Fatal(err)
}
}
// request is a utility function which wraps creating new http requests so
// we can ignore the errors wherever this is called.
newRequest := func(method string, url string, body io.Reader) *http.Request {
req, err := http.NewRequest(method, url, body)
if err != nil {
t.Fatal(err)
}
return req
}
createUserBytes := func(t *testing.T, body []byte) {
w := httptest.NewRecorder()
req := newRequest("POST", "/user", bytes.NewBuffer(body))
handler.ServeHTTP(w, req)
if w.Code != http.StatusCreated {
httpError(t, w, "couldn't create user")
}
result := struct {
Created struct {
Name string `json:"name"`
} `json:"created"`
}{}
err = json.Unmarshal(w.Body.Bytes(), &result)
if err != nil {
httpError(t, w, "couldn't read response from user creation")
}
}
createClientBytes := func(t *testing.T, body []byte) {
w := httptest.NewRecorder()
req := newRequest("POST", "/client", bytes.NewBuffer(body))
handler.ServeHTTP(w, req)
if w.Code != http.StatusCreated {
httpError(t, w, "couldn't create client")
}
result := struct {
Created struct {
Name string `json:"name"`
} `json:"created"`
}{}
err = json.Unmarshal(w.Body.Bytes(), &result)
if err != nil {
httpError(t, w, "couldn't read response from client creation")
}
}
updateUserBytes := func(t *testing.T, username string, body []byte, expectedHTTPCode int) {
w := httptest.NewRecorder()
url := fmt.Sprintf("/user/%s", username)
req := newRequest("PATCH", url, bytes.NewBuffer(body))
handler.ServeHTTP(w, req)
assert.Equalf(t, expectedHTTPCode, w.Code, "Wanted http response: %v \t Got: %v", expectedHTTPCode, w.Code)
}
assertUsernameAndEmail := func(t *testing.T, expectedUsername string, expectedUserEmail string) {
w := httptest.NewRecorder()
url := fmt.Sprintf("/user/%s", expectedUsername)
req := newRequest("GET", url, nil)
handler.ServeHTTP(w, req)
if w.Code != http.StatusOK {
httpError(t, w, "couldn't read user")
}
result := struct {
Name string `json:"name"`
Email string `json:"email"`
}{}
err = json.Unmarshal(w.Body.Bytes(), &result)
if err != nil {
httpError(t, w, "couldn't read response from user read")
}
assert.Equalf(t, expectedUsername, result.Name, "Wanted username: %v \t Got: %v", expectedUsername, result.Name)
assert.Equalf(t, expectedUserEmail, result.Email, "Wanted email: %v \t Got: %v", expectedUserEmail, result.Email)
}
createResourceBytes := func(t *testing.T, body []byte) {
w := httptest.NewRecorder()
req := newRequest("POST", "/resource", bytes.NewBuffer(body))
handler.ServeHTTP(w, req)
if w.Code != http.StatusCreated {
httpError(t, w, "couldn't create resource")
}
result := struct {
Created struct {
Path string `json:"path"`
} `json:"created"`
}{}
err = json.Unmarshal(w.Body.Bytes(), &result)
if err != nil {
httpError(t, w, "couldn't read response from resource creation")
}
}
getResourceWithPath := func(t *testing.T, path string) arborist.ResourceOut {
url := fmt.Sprintf("/resource%s", path)
w := httptest.NewRecorder()
req := newRequest("GET", url, nil)
handler.ServeHTTP(w, req)
if w.Code != http.StatusOK {
httpError(t, w, fmt.Sprintf("couldn't find resource %s", path))
}
result := arborist.ResourceOut{}
err := json.Unmarshal(w.Body.Bytes(), &result)
if err != nil {
httpError(t, w, "couldn't read response from resource get")
}
return result
}
getTagForResource := func(path string) string {
var tags []string
db.Select(&tags, "SELECT tag FROM resource WHERE path = $1", arborist.FormatPathForDb(path))
if len(tags) == 0 {
return ""
}
return tags[0]
}
createRoleBytes := func(t *testing.T, body []byte) {
w := httptest.NewRecorder()
req := newRequest("POST", "/role", bytes.NewBuffer(body))
handler.ServeHTTP(w, req)
if w.Code != http.StatusCreated {
httpError(t, w, "couldn't create role")
}
}
createPolicyBytes := func(t *testing.T, body []byte) {
w := httptest.NewRecorder()
req := newRequest("POST", "/policy", bytes.NewBuffer(body))
handler.ServeHTTP(w, req)
if w.Code != http.StatusCreated {
httpError(t, w, "couldn't create policy")
}
result := struct {
_ interface{} `json:"created"`
}{}
err = json.Unmarshal(w.Body.Bytes(), &result)
if err != nil {
httpError(t, w, "couldn't read response from policy creation")
}
}
createGroupBytes := func(t *testing.T, body []byte) {
w := httptest.NewRecorder()
req := newRequest("POST", "/group", bytes.NewBuffer(body))
handler.ServeHTTP(w, req)
if w.Code != http.StatusCreated {
httpError(t, w, "couldn't create group")
}
result := struct {
_ interface{} `json:"created"`
}{}
err = json.Unmarshal(w.Body.Bytes(), &result)
if err != nil {
httpError(t, w, "couldn't read response from group creation")
}
}
resourcePathA := resourcePath + "/A"
resourcePathB := resourcePath + "/B"
setupTestPolicy := func(t *testing.T) {
createResourceBytes(t, []byte(fmt.Sprintf(`{"path": "%s"}`, resourcePath)))
createResourceBytes(t, []byte(fmt.Sprintf(`{"path": "%s"}`, resourcePathA)))
createResourceBytes(t, []byte(fmt.Sprintf(`{"path": "%s"}`, resourcePathB)))
createRoleBytes(
t,
[]byte(fmt.Sprintf(
`{
"id": "%s",
"permissions": [
{"id": "%s", "action": {"service": "%s", "method": "%s"}}
]
}`,
roleName,
permissionName,
serviceName,
methodName,
)),
)
policyBody := []byte(fmt.Sprintf(
`{
"id": "%s",
"resource_paths": ["%s"],
"role_ids": ["%s"]
}`,
policyName,
resourcePath,
roleName,
))
createPolicyBytes(t, policyBody)
}
grantUserPolicy := func(t *testing.T, username string, policyName string, expiresAt string) {
if expiresAt != "null" {
expiresAt = fmt.Sprintf(`"%s"`, expiresAt)
}
w := httptest.NewRecorder()
url := fmt.Sprintf("/user/%s/policy", username)
policyBody := []byte(fmt.Sprintf(
`{
"policy": "%s",
"expires_at": %s
}`,
policyName,
expiresAt,
))
req := newRequest(
"POST",
url,
bytes.NewBuffer(policyBody),
)
handler.ServeHTTP(w, req)
if w.Code != http.StatusNoContent {
httpError(t, w, "couldn't grant policy to user")
}
}
revokeUserPolicy := func(t *testing.T, username string, policyName string) {
w := httptest.NewRecorder()
url := fmt.Sprintf("/user/%s/policy/%s", username, policyName)
req := newRequest(
"DELETE",
url,
nil,
)
handler.ServeHTTP(w, req)
if w.Code != http.StatusNoContent {
httpError(t, w, "couldn't delete user policy")
}
}
grantClientPolicy := func(t *testing.T, clientID string, policyName string) {
w := httptest.NewRecorder()
url := fmt.Sprintf("/client/%s/policy", clientID)
req := newRequest(
"POST",
url,
bytes.NewBuffer([]byte(fmt.Sprintf(`{"policy": "%s"}`, policyName))),
)
handler.ServeHTTP(w, req)
if w.Code != http.StatusNoContent {
httpError(t, w, "couldn't grant policy to client")
}
}
addUserToGroup := func(t *testing.T, username string, groupName string) {
w := httptest.NewRecorder()
url := fmt.Sprintf("/group/%s/user", groupName)
req := newRequest(
"POST",
url,
bytes.NewBuffer([]byte(fmt.Sprintf(`{"username": "%s"}`, username))),
)
handler.ServeHTTP(w, req)
if w.Code != http.StatusNoContent {
httpError(t, w, "couldn't add user to group")
}
}
grantGroupPolicy := func(t *testing.T, groupName string, policyName string) {
w := httptest.NewRecorder()
url := fmt.Sprintf("/group/%s/policy", groupName)
req := newRequest(
"POST",
url,
bytes.NewBuffer([]byte(fmt.Sprintf(`{"policy": "%s"}`, policyName))),
)
handler.ServeHTTP(w, req)
if w.Code != http.StatusNoContent {
httpError(t, w, "couldn't grant policy to group")
}
}
// setupAnonymousPolicies creates policies for the Anonymous group
// and returns the policies, resource paths, and auth mapping for the group.
setupAnonymousPolicies := func(t *testing.T) ([]arborist.Policy, []string, arborist.AuthMapping) {
// create test resources
resourcePath := "/anonymous-resource-path"
createResourceBytes(t, []byte(fmt.Sprintf(`{"path": "%s"}`, resourcePath)))
// create test role
roleName := "anonymous-test-role"
permissionName := "qwer"
serviceName := "zxcv"
methodName := permissionName
createRoleBytes(
t,
[]byte(fmt.Sprintf(
`{
"id": "%s",
"permissions": [
{"id": "%s", "action": {"service": "%s", "method": "%s"}}
]
}`,
roleName,
permissionName,
serviceName,
methodName,
)),
)
// create test policy
policyName := "anonymous-test-policy"
createPolicyBytes(
t,
[]byte(fmt.Sprintf(
`{
"id": "%s",
"resource_paths": ["%s"],
"role_ids": ["%s"]
}`,
policyName,
resourcePath,
roleName,
)),
)
// assign test policy to anonymous group
grantGroupPolicy(t, arborist.AnonymousGroup, policyName)
// return policy and authMapping
policy := arborist.Policy{policyName, "", []string{resourcePath}, []string{roleName}}
authMapping := map[string][]arborist.Action{
resourcePath: []arborist.Action{arborist.Action{serviceName, methodName}},
}
return []arborist.Policy{policy}, []string{resourcePath}, authMapping
}
// setupLoggedInPolicies creates policies for the LoggedIn group
// and returns the policies, resource paths, and auth mapping of the group.
setupLoggedInPolicies := func(t *testing.T) ([]arborist.Policy, []string, arborist.AuthMapping) {
// create test resources
resourcePath := "/loggedin-resource-path"
createResourceBytes(t, []byte(fmt.Sprintf(`{"path": "%s"}`, resourcePath)))
// create test role
roleName := "loggedin-test-role"
permissionName := "qwer"
serviceName := "zxcv"
methodName := permissionName
createRoleBytes(
t,
[]byte(fmt.Sprintf(
`{
"id": "%s",
"permissions": [
{"id": "%s", "action": {"service": "%s", "method": "%s"}}
]
}`,
roleName,
permissionName,
serviceName,
methodName,
)),
)
// create test policy
policyName := "loggedin-test-policy"
createPolicyBytes(
t,
[]byte(fmt.Sprintf(
`{
"id": "%s",
"resource_paths": ["%s"],
"role_ids": ["%s"]
}`,
policyName,
resourcePath,
roleName,
)),
)
// assign test policy to loggedIn group
grantGroupPolicy(t, arborist.LoggedInGroup, policyName)
// return policy and authMapping
policy := arborist.Policy{policyName, "", []string{resourcePath}, []string{roleName}}
authMapping := map[string][]arborist.Action{
resourcePath: []arborist.Action{arborist.Action{serviceName, methodName}},
}
return []arborist.Policy{policy}, []string{resourcePath}, authMapping
}
deleteEverything := func() {
_ = db.MustExec("DELETE FROM policy_role")
_ = db.MustExec("DELETE FROM policy_resource")
_ = db.MustExec("DELETE FROM permission")
_ = db.MustExec("DELETE FROM resource")
_ = db.MustExec("DELETE FROM role")
_ = db.MustExec("DELETE FROM usr_grp")
_ = db.MustExec("DELETE FROM usr_policy")
_ = db.MustExec("DELETE FROM client_policy")
_ = db.MustExec("DELETE FROM grp_policy")
_ = db.MustExec("DELETE FROM policy")
_ = db.MustExec("DELETE FROM usr")
_ = db.MustExec("DELETE FROM client")
deleteGroups := fmt.Sprintf(
"DELETE FROM grp WHERE (name != '%s' AND name != '%s')",
arborist.AnonymousGroup,
arborist.LoggedInGroup,
)
_ = db.MustExec(deleteGroups)
_ = db.MustExec("DELETE FROM usr")
}
checkAuthSuccess := func(t *testing.T, body []byte, outcome bool) {
w := httptest.NewRecorder()
req := newRequest("POST", "/auth/request", bytes.NewBuffer(body))
handler.ServeHTTP(w, req)
if w.Code != http.StatusOK {
httpError(t, w, "auth request failed")
}
result := struct {
Auth bool `json:"auth"`
}{}
err := json.Unmarshal(w.Body.Bytes(), &result)
if err != nil {
httpError(t, w, "couldn't read response from auth request")
}
msg := fmt.Sprintf("got response body: %s", w.Body.String())
assert.Equal(t, outcome, result.Auth, msg)
}
// testSetup should be used for any setup or teardown that should go in all
// the tests. Use like this:
//
// tearDown := testSetup(t)
// ...
// tearDown(t)
//
// `testSetup(t)` returns the teardown function, which when passed to defer
// will run the teardown code at the end of the function.
testSetup := func(t *testing.T) func(t *testing.T) {
// ADD TEST SETUP HERE
tearDown := func(t *testing.T) {
// ADD TEST TEARDOWN HERE
// wipe the database
deleteEverything()
// clear the logs currently stored in the buffer
logBuffer.Reset()
}
return tearDown
}
// NOTE:
// - Every `t.Run` at this level should be completely isolated from the
// others, and clean up after itself.
// - Within the `t.Run` calls at this level, it's OK to have sequential
// tests that depend on results from the previous ones within that run.
// However, be careful not to shoot yourself in the foot.
t.Run("HealthCheck", func(t *testing.T) {
tearDown := testSetup(t)
w := httptest.NewRecorder()
req := newRequest("GET", "/health", nil)
handler.ServeHTTP(w, req)
if w.Code != http.StatusOK {
httpError(t, w, "health check failed")
}
tearDown(t)
})
t.Run("NotFound", func(t *testing.T) {
w := httptest.NewRecorder()
req := newRequest("GET", "/bogus/url", nil)
handler.ServeHTTP(w, req)
if w.Code != http.StatusNotFound {
httpError(t, w, "didn't get 404 for nonexistent URL")
}
result := struct {
Error struct {
Message string `json:"message"`
Code int `json:"code"`
} `json:"error"`
}{}
err = json.Unmarshal(w.Body.Bytes(), &result)
if err != nil {
httpError(t, w, "couldn't read response from 404 handler")
}
assert.Equal(t, 404, result.Error.Code, "unexpected response for 404")
})
t.Run("Resource", func(t *testing.T) {
tearDown := testSetup(t)
t.Run("ListEmpty", func(t *testing.T) {
w := httptest.NewRecorder()
req := newRequest("GET", "/resource", nil)
handler.ServeHTTP(w, req)
if w.Code != http.StatusOK {
httpError(t, w, "can't list resources")
}
result := struct {
Resources []interface{} `json:"resources"`
}{}
err = json.Unmarshal(w.Body.Bytes(), &result)
if err != nil {
httpError(t, w, "couldn't read response from resources list")
}
msg := fmt.Sprintf("got response body: %s", w.Body.String())
assert.Equal(t, []interface{}{}, result.Resources, msg)
})
t.Run("CreateWithError", func(t *testing.T) {
t.Run("UnexpectedField", func(t *testing.T) {
w := httptest.NewRecorder()
// missing required field
body := []byte(`{"path": "/a", "barrnt": "unexpected"}`)
req := newRequest("POST", "/resource", bytes.NewBuffer(body))
handler.ServeHTTP(w, req)
if w.Code != http.StatusBadRequest {
httpError(t, w, "resource creation didn't fail as expected")
}
})
t.Run("BadJSON", func(t *testing.T) {
w := httptest.NewRecorder()
req := newRequest("POST", "/resource", nil)
handler.ServeHTTP(w, req)
if w.Code != http.StatusBadRequest {
httpError(t, w, "expected 400 from request missing JSON")
}
})
})
// We're going to create a resource and save the tag into this variable
// so we can test looking it up using the tag.
var resourceTag string
t.Run("Create", func(t *testing.T) {
w := httptest.NewRecorder()
path := "/a"
name := "a"
body := []byte(fmt.Sprintf(`{"path": "%s"}`, path))
req := newRequest("POST", "/resource", bytes.NewBuffer(body))
handler.ServeHTTP(w, req)
if w.Code != http.StatusCreated {
httpError(t, w, "couldn't create resource")
}
// make one-off struct to read the response into
result := struct {
Resource struct {
Name string `json:"name"`
Path string `json:"path"`
Tag string `json:"tag"`
} `json:"created"`
}{}
err = json.Unmarshal(w.Body.Bytes(), &result)
if err != nil {
httpError(t, w, "couldn't read response from resource creation")
}
msg := fmt.Sprintf("got response body: %s", w.Body.String())
assert.Equal(t, name, result.Resource.Name, msg)
assert.Equal(t, path, result.Resource.Path, msg)
assert.NotEqual(t, "", result.Resource.Tag, msg)
resourceTag = result.Resource.Tag
t.Run("Punctuation", func(t *testing.T) {
w := httptest.NewRecorder()
body := []byte(`{"path": "/!@#punctuation$%^-_is_-&*(allowed)-==[].<>{},?\\"}`)
req := newRequest("POST", "/resource", bytes.NewBuffer(body))
handler.ServeHTTP(w, req)
if w.Code != http.StatusCreated {
httpError(t, w, "couldn't create resource with punctuation")
}
})
t.Run("AlreadyExists", func(t *testing.T) {
w := httptest.NewRecorder()
body := []byte(fmt.Sprintf(`{"path": "%s"}`, path))
req := newRequest("POST", "/resource", bytes.NewBuffer(body))
handler.ServeHTTP(w, req)
if w.Code != http.StatusConflict {
httpError(t, w, "expected error from creating resource that already exists")
}
})
t.Run("MissingParent", func(t *testing.T) {
w := httptest.NewRecorder()
body := []byte(`{"path": "/parent/doesnt/exist"}`)
req := newRequest("POST", "/resource", bytes.NewBuffer(body))
handler.ServeHTTP(w, req)
if w.Code != http.StatusBadRequest {
httpError(t, w, "expected error from creating resource before parent exists")
}
})
t.Run("CreateParents", func(t *testing.T) {
w := httptest.NewRecorder()
body := []byte(`{
"path": "/parent/doesnt/exist",
"description": "we did it"
}`)
req := newRequest("POST", "/resource?p", bytes.NewBuffer(body))
handler.ServeHTTP(w, req)
if w.Code != http.StatusCreated {
httpError(t, w, "could't create resource with parents")
}
getResourceWithPath(t, "/parent")
getResourceWithPath(t, "/parent/doesnt")
resource := getResourceWithPath(t, "/parent/doesnt/exist")
assert.Equal(
t,
"we did it",
resource.Description,
"resource description doesn't match",
)
t.Run("AlreadyExists", func(t *testing.T) {
w := httptest.NewRecorder()
body := []byte(`{"path": "/parent/doesnt/exist"}`)
req := newRequest("POST", "/resource?p", bytes.NewBuffer(body))
handler.ServeHTTP(w, req)
if w.Code != http.StatusConflict {
httpError(t, w, "expected conflict from creating resource again")
}
})
t.Run("SomeParentsExist", func(t *testing.T) {
w := httptest.NewRecorder()
body := []byte(`{"path": "/parent/sometimes/exist"}`)
req := newRequest("POST", "/resource?p", bytes.NewBuffer(body))
handler.ServeHTTP(w, req)
if w.Code != http.StatusCreated {
httpError(t, w, "couldn't create resource")
}
})
})
t.Run("RedundantSlashes", func(t *testing.T) {
createResourceBytes(t, []byte(`{"path": "/too"}`))
createResourceBytes(t, []byte(`{"path": "/too/many"}`))
w := httptest.NewRecorder()
path := "/too//many////slashes"
body := []byte(fmt.Sprintf(`{"path": "%s"}`, path))
req := newRequest("POST", "/resource", bytes.NewBuffer(body))
handler.ServeHTTP(w, req)
if w.Code != http.StatusCreated {
httpError(t, w, "couldn't create resource")
}
// make one-off struct to read the response into
result := struct {
Resource struct {
Name string `json:"name"`
Path string `json:"path"`
Tag string `json:"tag"`
} `json:"created"`
}{}
err = json.Unmarshal(w.Body.Bytes(), &result)
if err != nil {
httpError(t, w, "couldn't read response from resource creation")
}
msg := fmt.Sprintf("got response body: %s", w.Body.String())
assert.Equal(t, "slashes", result.Resource.Name, msg)
assert.Equal(t, "/too/many/slashes", result.Resource.Path, msg)
assert.NotEqual(t, "", result.Resource.Tag, msg)
})
})
t.Run("ReadByTag", func(t *testing.T) {
w := httptest.NewRecorder()
url := fmt.Sprintf("/resource/tag/%s", resourceTag)
req := newRequest("GET", url, nil)
handler.ServeHTTP(w, req)
if w.Code != http.StatusOK {
httpError(t, w, "couldn't create resource using tag")
}
result := struct {
Name string `json:"name"`
Path string `json:"path"`
Tag string `json:"tag"`
}{}
err = json.Unmarshal(w.Body.Bytes(), &result)
if err != nil {
httpError(t, w, "couldn't read response from resource creation")
}
msg := fmt.Sprintf("got response body: %s", w.Body.String())
assert.Equal(t, "a", result.Name, msg)
assert.Equal(t, "/a", result.Path, msg)
assert.Equal(t, resourceTag, result.Tag, msg)
})
t.Run("CreateSubresource", func(t *testing.T) {
w := httptest.NewRecorder()
body := []byte(`{"name": "b"}`)
// try to create under the resource created with the previous test
req := newRequest("POST", "/resource/a", bytes.NewBuffer(body))
handler.ServeHTTP(w, req)
if w.Code != http.StatusCreated {
httpError(t, w, "couldn't create resource")
}
expected := struct {
_ interface{} `json:"created"`
}{}
err = json.Unmarshal(w.Body.Bytes(), &expected)
if err != nil {
httpError(t, w, "couldn't read response from resource creation")
}
})
t.Run("CreateWithSubresources", func(t *testing.T) {
w := httptest.NewRecorder()
body := []byte(`{
"name": "x",
"subresources": [
{
"name": "y",
"subresources": [{"name": "z"}]
}
]
}`)
// try to create under the resource created with the previous test
req := newRequest("POST", "/resource", bytes.NewBuffer(body))
handler.ServeHTTP(w, req)
if w.Code != http.StatusCreated {
httpError(t, w, "couldn't create resource")
}
expected := struct {
_ interface{} `json:"created"`
}{}
err = json.Unmarshal(w.Body.Bytes(), &expected)
if err != nil {
httpError(t, w, "couldn't read response from resource creation")
}
// now check that the child resources exist
w = httptest.NewRecorder()
req = newRequest("GET", "/resource/x/y", nil)
handler.ServeHTTP(w, req)
if w.Code != http.StatusOK {
httpError(t, w, "couldn't find subresource")
}
w = httptest.NewRecorder()
req = newRequest("GET", "/resource/x/y/z", nil)
handler.ServeHTTP(w, req)
if w.Code != http.StatusOK {
httpError(t, w, "couldn't find subresource")
}
// re-POST the same x with different subresources should fail
w = httptest.NewRecorder()
body = []byte(`{
"name": "x",
"subresources": [
{
"name": "b",
"subresources": [{"name": "c"}]
}
]
}`)
req = newRequest("POST", "/resource", bytes.NewBuffer(body))
handler.ServeHTTP(w, req)
if w.Code != http.StatusConflict {
httpError(t, w, "didn't conflict")
}
// use PUT (force-create) shall recreate the whole tree under x
w = httptest.NewRecorder()
req = newRequest("PUT", "/resource", bytes.NewBuffer(body))
handler.ServeHTTP(w, req)
if w.Code != http.StatusCreated {
httpError(t, w, "couldn't create resource")
}
expected = struct {
_ interface{} `json:"created"`
}{}
err = json.Unmarshal(w.Body.Bytes(), &expected)
if err != nil {
httpError(t, w, "couldn't read response from resource creation")
}
// previous child resources should be gone
w = httptest.NewRecorder()
req = newRequest("GET", "/resource/x/y", nil)
handler.ServeHTTP(w, req)
if w.Code != http.StatusNotFound {
httpError(t, w, "could find subresource")
}
w = httptest.NewRecorder()
req = newRequest("GET", "/resource/x/y/z", nil)
handler.ServeHTTP(w, req)
if w.Code != http.StatusNotFound {
httpError(t, w, "could find subresource")
}
// now check that the new child resources exist
w = httptest.NewRecorder()
req = newRequest("GET", "/resource/x/b", nil)
handler.ServeHTTP(w, req)
if w.Code != http.StatusOK {
httpError(t, w, "couldn't find subresource")
}
w = httptest.NewRecorder()
req = newRequest("GET", "/resource/x/b/c", nil)
handler.ServeHTTP(w, req)
if w.Code != http.StatusOK {
httpError(t, w, "couldn't find subresource")
}
})
t.Run("ListSubresources", func(t *testing.T) {
w := httptest.NewRecorder()
req := newRequest("GET", "/resource/a", nil)
handler.ServeHTTP(w, req)
if w.Code != http.StatusOK {
httpError(t, w, "couldn't read resource")
}
result := struct {
Path string `json:"path"`
Name string `json:"name"`
Subresources []string `json:"subresources"`
}{}
err = json.Unmarshal(w.Body.Bytes(), &result)
if err != nil {
httpError(t, w, "couldn't read response from resource listing")
}
msg := fmt.Sprintf("got response body: %s", w.Body.String())
assert.Equal(t, "a", result.Name, msg)
assert.Equal(t, "/a", result.Path, msg)
assert.Equal(t, []string{"/a/b"}, result.Subresources, msg)
})
t.Run("Overwrite", func(t *testing.T) {
w := httptest.NewRecorder()
body := []byte(`{
"name": "Godel",
"subresources": [
{
"name": "Escher",
"subresources": [{"name": "Bach"}]
}
]
}`)
req := newRequest("PUT", "/resource", bytes.NewBuffer(body))
handler.ServeHTTP(w, req)
if w.Code != http.StatusCreated {
httpError(t, w, "couldn't create resource using PUT")
}
expected := struct {
_ interface{} `json:"created"`
}{}
err = json.Unmarshal(w.Body.Bytes(), &expected)
if err != nil {
httpError(t, w, "couldn't read response from resource creation")
}
escherTag := getTagForResource("Godel.Escher")
bachTag := getTagForResource("Godel.Escher.Bach")
// now PUT over the same resource, but keep the subresources
w = httptest.NewRecorder()
body = []byte(`{
"name": "Godel,",
"subresources": [
{"name": "Escher,", "subresources": [{"name": "Bach"}]},
{"name": "completeness_theorem"}
]
}`)
req = newRequest("PUT", "/resource", bytes.NewBuffer(body))
handler.ServeHTTP(w, req)
if w.Code != http.StatusCreated {
httpError(t, w, "couldn't update resource using PUT")
}
newEscherTag := getTagForResource("Godel.Escher")
newBachTag := getTagForResource("Godel.Escher.Bach")
assert.Equal(t, escherTag, newEscherTag, "subresource tag changed after PUT")
assert.Equal(t, bachTag, newBachTag, "subresource tag changed after PUT")
getResourceWithPath(t, "/Godel,/completeness_theorem")
})
t.Run("Delete", func(t *testing.T) {
w := httptest.NewRecorder()
req := newRequest("DELETE", "/resource/a", nil)
handler.ServeHTTP(w, req)
if w.Code != http.StatusNoContent {
httpError(t, w, "couldn't delete resource")
}
})
t.Run("CheckDeleted", func(t *testing.T) {
w := httptest.NewRecorder()
req := newRequest("GET", "/resource/a", nil)
handler.ServeHTTP(w, req)
if w.Code != http.StatusNotFound {
httpError(t, w, "deleted resource still present")
}
})
t.Run("CheckDeletedSubresource", func(t *testing.T) {
w := httptest.NewRecorder()
req := newRequest("GET", "/resource/a/b", nil)
handler.ServeHTTP(w, req)
if w.Code != http.StatusNotFound {
httpError(t, w, "deleted subresource still present")
}
})
tearDown(t)
})
t.Run("Role", func(t *testing.T) {
tearDown := testSetup(t)
t.Run("Create", func(t *testing.T) {
w := httptest.NewRecorder()
body := []byte(`{
"id": "foo",
"permissions": [
{"id": "foo", "action": {"service": "test", "method": "foo"}}
]
}`)
req := newRequest("POST", "/role", bytes.NewBuffer(body))
handler.ServeHTTP(w, req)
if w.Code != http.StatusCreated {
httpError(t, w, "couldn't create role")
}
// make one-off struct to read the response into
result := struct {
_ interface{} `json:"created"`
}{}
err = json.Unmarshal(w.Body.Bytes(), &result)
if err != nil {
httpError(t, w, "couldn't read response from role creation")
}
t.Run("OverwriteCreate", func(t *testing.T) {
w := httptest.NewRecorder()
body := []byte(`{
"id": "thisNewRole",
"permissions": [
{"id": "thisNewID", "action": {"service": "test-overwrite", "method": "bar"}}
]
}`)
req := newRequest("PUT", "/role/thisNewRole", bytes.NewBuffer(body))
handler.ServeHTTP(w, req)
if w.Code != http.StatusCreated {
httpError(t, w, "couldn't create role")
}
// make one-off struct to read the response into
result := struct {
_ interface{} `json:"created"`
}{}
err = json.Unmarshal(w.Body.Bytes(), &result)
if err != nil {
httpError(t, w, "couldn't read response from role creation")
}
})
t.Run("AlreadyExists", func(t *testing.T) {
w := httptest.NewRecorder()
body := []byte(`{
"id": "foo",
"permissions": [
{"id": "foo", "action": {"service": "test", "method": "foo"}}
]
}`)
req := newRequest("POST", "/role", bytes.NewBuffer(body))
handler.ServeHTTP(w, req)
if w.Code != http.StatusConflict {
httpError(t, w, "expected conflict error from trying to create role again")
}
})
t.Run("MissingPermissions", func(t *testing.T) {
w := httptest.NewRecorder()
body := []byte(`{"id": "no-permissions", "permissions": []}`)
req := newRequest("POST", "/role", bytes.NewBuffer(body))
handler.ServeHTTP(w, req)
if w.Code != http.StatusBadRequest {
httpError(t, w, "expected error from trying to create role with no permissions")
}
})
})
t.Run("Read", func(t *testing.T) {
w := httptest.NewRecorder()
req := newRequest("GET", "/role/foo", nil)
handler.ServeHTTP(w, req)
if w.Code != http.StatusOK {
httpError(t, w, "couldn't create role")
}
result := struct {
Name string `json:"id"`
}{}
err = json.Unmarshal(w.Body.Bytes(), &result)
if err != nil {
httpError(t, w, "couldn't read response from role read")
}
msg := fmt.Sprintf("got response body: %s", w.Body.String())
assert.Equal(t, "foo", result.Name, msg)
})
t.Run("Overwrite", func(t *testing.T) {
w := httptest.NewRecorder()
body := []byte(`{
"id": "foo",
"permissions": [
{"id": "foo", "action": {"service": "*", "method": "bar"}}
]
}`)
req := newRequest("PUT", "/role/foo", bytes.NewBuffer(body))
handler.ServeHTTP(w, req)
if w.Code != http.StatusOK {
httpError(t, w, "couldn't update role")
}
// make one-off struct to read the response into
result := struct {
_ interface{} `json:"updated"`
}{}
err = json.Unmarshal(w.Body.Bytes(), &result)
if err != nil {
httpError(t, w, "couldn't read response from role overwrite")
}
})
t.Run("FailOverwrite", func(t *testing.T) {
w := httptest.NewRecorder()
body := []byte(`{
"id": "notFoo",
"permissions": [
{"id": "foo", "action": {"service": "*", "method": "bar"}}
]
}`)
req := newRequest("PUT", "/role/foo", bytes.NewBuffer(body))
handler.ServeHTTP(w, req)
if w.Code != http.StatusBadRequest {
httpError(t, w, "wrong response code from invalid role overwrite request")
}
})
t.Run("List", func(t *testing.T) {
w := httptest.NewRecorder()
req := newRequest("GET", "/role", nil)
handler.ServeHTTP(w, req)
if w.Code != http.StatusOK {
httpError(t, w, "can't list roles")
}
result := struct {
Roles []interface{} `json:"roles"`
}{}
err = json.Unmarshal(w.Body.Bytes(), &result)
if err != nil {
httpError(t, w, "couldn't read response from roles list")
}
msg := fmt.Sprintf("got response body: %s", w.Body.String())
assert.Equal(t, 2, len(result.Roles), msg)
})
t.Run("Delete", func(t *testing.T) {
w := httptest.NewRecorder()
req := newRequest("DELETE", "/role/foo", nil)
handler.ServeHTTP(w, req)
if w.Code != http.StatusNoContent {
httpError(t, w, "couldn't delete role")
}
})
t.Run("CheckDeleted", func(t *testing.T) {
w := httptest.NewRecorder()
req := newRequest("GET", "/role/foo", nil)
handler.ServeHTTP(w, req)
if w.Code != http.StatusNotFound {
httpError(t, w, "role was not actually deleted")
}
})
tearDown(t)
})
t.Run("Policy", func(t *testing.T) {
tearDown := testSetup(t)
roleName := "bazgo-create"
policyName := "bazgo-create-b"
policyNameA := "bazgoA-create-b"
t.Run("Create", func(t *testing.T) {
w := httptest.NewRecorder()
// set up some resources to work with
// TODO: make more of this setup into "fixtures", not hard-coded
body := []byte(`{"path": "/a"}`)
req := newRequest("POST", "/resource", bytes.NewBuffer(body))
handler.ServeHTTP(w, req)
if w.Code != http.StatusCreated {
httpError(t, w, "couldn't create resource")
}
w = httptest.NewRecorder()
body = []byte(`{"path": "/a/b"}`)
req = newRequest("POST", "/resource", bytes.NewBuffer(body))
handler.ServeHTTP(w, req)
if w.Code != http.StatusCreated {
httpError(t, w, "couldn't create resource")
}
w = httptest.NewRecorder()
body = []byte(`{"path": "/a/b/c"}`)
req = newRequest("POST", "/resource", bytes.NewBuffer(body))
handler.ServeHTTP(w, req)
if w.Code != http.StatusCreated {
httpError(t, w, "couldn't create resource")
}
// set up roles
w = httptest.NewRecorder()
body = []byte(fmt.Sprintf(
`{
"id": "%s",
"permissions": [
{
"id": "foo",
"action": {"service": "bazgo", "method": "create"}
}
]
}`,
roleName,
))
req = newRequest("POST", "/role", bytes.NewBuffer(body))
handler.ServeHTTP(w, req)
if w.Code != http.StatusCreated {
httpError(t, w, "couldn't create role")
}
// create policies
w = httptest.NewRecorder()
body = []byte(fmt.Sprintf(
`{
"id": "%s",
"resource_paths": ["/a/b"],
"role_ids": ["%s"]
}`,
policyName,
roleName,
))
req = newRequest("POST", "/policy", bytes.NewBuffer(body))
handler.ServeHTTP(w, req)
if w.Code != http.StatusCreated {
httpError(t, w, "couldn't create policy")
}
result := struct {
_ interface{} `json:"created"`
}{}
err = json.Unmarshal(w.Body.Bytes(), &result)
if err != nil {
httpError(t, w, "couldn't read response from resource creation")
}
w = httptest.NewRecorder()
body = []byte(fmt.Sprintf(
`{
"id": "%s",
"resource_paths": ["/a/b"],
"role_ids": ["%s"]
}`,
policyNameA,
roleName,
))
req = newRequest("POST", "/policy", bytes.NewBuffer(body))
handler.ServeHTTP(w, req)
if w.Code != http.StatusCreated {
httpError(t, w, "couldn't create policy")
}
err = json.Unmarshal(w.Body.Bytes(), &result)
if err != nil {
httpError(t, w, "couldn't read response from resource creation")
}
t.Run("RoleNotExist", func(t *testing.T) {
w := httptest.NewRecorder()
createResourceBytes(t, []byte(`{"path": "/test_resource"}`))
body := []byte(`{
"id": "testPolicyRoleNotExist",
"resource_paths": ["/test_resource"],
"role_ids": ["does_not_exist"]
}`)
req = newRequest("POST", "/policy", bytes.NewBuffer(body))
handler.ServeHTTP(w, req)
if w.Code != http.StatusBadRequest {
httpError(t, w, "expected error creating policy with nonexistent role")
}
})
t.Run("ResourceNotExist", func(t *testing.T) {
w := httptest.NewRecorder()
body := []byte(fmt.Sprintf(
`{
"id": "testPolicyResourceNotExist",
"resource_paths": ["/does/not/exist"],
"role_ids": ["%s"]
}`,
roleName,
))
req = newRequest("POST", "/policy", bytes.NewBuffer(body))
handler.ServeHTTP(w, req)
if w.Code != http.StatusBadRequest {
httpError(t, w, "expected error creating policy with nonexistent resource")
}
})
t.Run("BulkPolicyOverwrite", func(t *testing.T) {
w := httptest.NewRecorder()
body := []byte(fmt.Sprintf(
`[
{
"id": "%s",
"resource_paths": ["/a/b"],
"role_ids": ["%s"]
},
{
"id": "%s",
"resource_paths": ["/a/b"],
"role_ids": ["%s"]
}
]`,
policyName, roleName, policyNameA, roleName,
))
req = newRequest("PUT", "/bulk/policy", bytes.NewBuffer(body))
handler.ServeHTTP(w, req)
if w.Code != http.StatusCreated {
httpError(t, w, "couldn't put policies")
}
result := struct {
Policies struct {
policy []string `json:"policy"`
}
}{}
err = json.Unmarshal(w.Body.Bytes(), &result)
if err != nil {
httpError(t, w, "couldn't read response from resource creation")
}
})
})
t.Run("Read", func(t *testing.T) {
w := httptest.NewRecorder()
url := fmt.Sprintf("/policy/%s", policyName)
req := newRequest("GET", url, nil)
handler.ServeHTTP(w, req)
if w.Code != http.StatusOK {
httpError(t, w, "policy not found")
}
result := struct {
Name string `json:"id"`
Resources []string `json:"resource_paths"`
Roles []string `json:"role_ids"`
}{}
err = json.Unmarshal(w.Body.Bytes(), &result)
if err != nil {
httpError(t, w, "couldn't read response from GET policy")
}
msg := fmt.Sprintf("got response body: %s", w.Body.String())
assert.Equal(t, policyName, result.Name, msg)
assert.Equal(t, []string{"/a/b"}, result.Resources, msg)
assert.Equal(t, []string{roleName}, result.Roles, msg)
})
t.Run("Overwrite", func(t *testing.T) {
createResourceBytes(t, []byte(`{"path": "/a/z"}`))
w := httptest.NewRecorder()
body := []byte(fmt.Sprintf(
`{
"id": "%s",
"resource_paths": ["/a/z"],
"role_ids": ["%s"]
}`,
policyName,
roleName,
))
url := fmt.Sprintf("/policy/%s", policyName)
req := newRequest("PUT", url, bytes.NewBuffer(body))
handler.ServeHTTP(w, req)
if w.Code != http.StatusCreated {
httpError(t, w, "couldn't put policy")
}
result := struct {
Policy struct {
Paths []string `json:"resource_paths"`
} `json:"updated"`
}{}
err = json.Unmarshal(w.Body.Bytes(), &result)
if err != nil {
httpError(t, w, "couldn't read response from resource creation")
}
assert.Equal(t, []string{"/a/z"}, result.Policy.Paths)
})
t.Run("List", func(t *testing.T) {
w := httptest.NewRecorder()
req := newRequest("GET", "/policy", nil)
handler.ServeHTTP(w, req)
if w.Code != http.StatusOK {
httpError(t, w, "can't list policies")
}
result := struct {
Policies []arborist.Policy `json:"policies"`
}{}
err = json.Unmarshal(w.Body.Bytes(), &result)
if err != nil {
httpError(t, w, "couldn't read response from policies list")
}
msg := fmt.Sprintf("got response body: %s", w.Body.String())
assert.Equal(t, 2, len(result.Policies), msg)
msg = fmt.Sprintf("non expanded policies should contain 'role_ids'. got response body: %s", w.Body.String())
assert.NotNil(t, result.Policies[0].RoleIDs, msg)
// TODO (rudyardrichter, 2019-04-15): more checks here on response
})
t.Run("ListExpanded", func(t *testing.T) {
w := httptest.NewRecorder()
req := newRequest("GET", "/policy?expand", nil)
handler.ServeHTTP(w, req)
if w.Code != http.StatusOK {
httpError(t, w, "can't list policies")
}
result := struct {
Policies []arborist.ExpandedPolicy `json:"policies"`
}{}
err = json.Unmarshal(w.Body.Bytes(), &result)
if err != nil {
httpError(t, w, "couldn't read response from policies list")
}
msg := fmt.Sprintf("got response body: %s", w.Body.String())
assert.Equal(t, 2, len(result.Policies), msg)
msg = fmt.Sprintf("expanded policies should contain 'roles'. got response body: %s", w.Body.String())
assert.NotNil(t, result.Policies[0].Roles, msg)
})
t.Run("Delete", func(t *testing.T) {
w := httptest.NewRecorder()
req := newRequest("DELETE", "/policy/foo", nil)
handler.ServeHTTP(w, req)
if w.Code != http.StatusNoContent {
httpError(t, w, "couldn't delete policy")
}
t.Run("NotExist", func(t *testing.T) {
w := httptest.NewRecorder()
req := newRequest("DELETE", "/policy/does-not-exist", nil)
handler.ServeHTTP(w, req)
if w.Code != http.StatusNoContent {
httpError(t, w, "expected 204 trying delete nonexistent policy")
}
})
})
t.Run("CheckDeleted", func(t *testing.T) {
w := httptest.NewRecorder()
req := newRequest("GET", "/policy/foo", nil)
handler.ServeHTTP(w, req)
if w.Code != http.StatusNotFound {
httpError(t, w, "policy was not actually deleted")
}
})
tearDown(t)
})
t.Run("User", func(t *testing.T) {
tearDown := testSetup(t)
username := "foo"
userEmail := "[email protected]"
t.Run("ListEmpty", func(t *testing.T) {
w := httptest.NewRecorder()
req := newRequest("GET", "/user", nil)
handler.ServeHTTP(w, req)
if w.Code != http.StatusOK {
httpError(t, w, "can't list users")
}
result := struct {
Users interface{} `json:"users"`
}{}
err = json.Unmarshal(w.Body.Bytes(), &result)
if err != nil {
httpError(t, w, "couldn't read response from users list")
}
msg := fmt.Sprintf("got response body: %s", w.Body.String())
assert.Equal(t, []interface{}{}, result.Users, msg)
})
t.Run("NotFound", func(t *testing.T) {
w := httptest.NewRecorder()
req := newRequest("GET", "/user/nonexistent", nil)
handler.ServeHTTP(w, req)
if w.Code != http.StatusNotFound {
httpError(t, w, "didn't get 404 for nonexistent user")
}
})
t.Run("Create", func(t *testing.T) {
w := httptest.NewRecorder()
body := []byte(fmt.Sprintf(
`{
"name": "%s",
"email": "%s"
}`,
username,
userEmail,
))
req := newRequest("POST", "/user", bytes.NewBuffer(body))
handler.ServeHTTP(w, req)
if w.Code != http.StatusCreated {
httpError(t, w, "couldn't create user")
}
result := struct {
_ interface{} `json:"created"`
}{}
err = json.Unmarshal(w.Body.Bytes(), &result)
if err != nil {
httpError(t, w, "couldn't read response from user creation")
}
t.Run("AlreadyExists", func(t *testing.T) {
w := httptest.NewRecorder()
body := []byte(fmt.Sprintf(
`{
"name": "%s",
"email": "%s"
}`,
username,
userEmail,
))
req := newRequest("POST", "/user", bytes.NewBuffer(body))
handler.ServeHTTP(w, req)
if w.Code != http.StatusConflict {
httpError(t, w, "expected 409 from trying to create same user again")
}
})
t.Run("MissingName", func(t *testing.T) {
w := httptest.NewRecorder()
body := []byte(`{"email": "asdf"}`)
req := newRequest("POST", "/user", bytes.NewBuffer(body))
handler.ServeHTTP(w, req)
if w.Code != http.StatusBadRequest {
httpError(t, w, "expected 400 from trying to create user without name")
}
})
})
anonymousPolicies, anonymousResourcePaths, _ := setupAnonymousPolicies(t)
loggedInPolicies, loggedInResourcePaths, _ := setupLoggedInPolicies(t)
t.Run("Read", func(t *testing.T) {
w := httptest.NewRecorder()
url := fmt.Sprintf("/user/%s", username)
req := newRequest("GET", url, nil)
handler.ServeHTTP(w, req)
if w.Code != http.StatusOK {
httpError(t, w, "couldn't read user")
}
result := struct {
Name string `json:"name"`
Email string `json:"email"`
Policies []struct {
Policy string `json:"policy"`
ExpiresAt *string `json:"expires_at"`
} `json:"policies"`
Groups []string `json:"groups"`
}{}
err = json.Unmarshal(w.Body.Bytes(), &result)
if err != nil {
httpError(t, w, "couldn't read response from user read")
}
assert.Equalf(t, username, result.Name, "Wanted username: %v \t Got: %v", username, result.Name)
assert.Equalf(t, userEmail, result.Email, "Wanted email: %v \t Got: %v", userEmail, result.Email)
// expect to receive policies from user's groups (in this case, 0 policies
// are assigned to user) as well as policies from Anonymous and LoggedIn groups.
var expectedPolicyNames []string
for _, policy := range anonymousPolicies {
expectedPolicyNames = append(expectedPolicyNames, policy.Name)
}
for _, policy := range loggedInPolicies {
expectedPolicyNames = append(expectedPolicyNames, policy.Name)
}
var actualPolicyNames []string
for _, policy := range result.Policies {
actualPolicyNames = append(actualPolicyNames, policy.Policy)
}
assert.ElementsMatchf(t, expectedPolicyNames, actualPolicyNames, "Wanted policies: %v \t Got: %v", expectedPolicyNames, actualPolicyNames)
// expect to receive user's groups (in this case, 0 groups) as well
// as Anonymous and LoggedIn groups.
expectedGroups := []string{arborist.LoggedInGroup, arborist.AnonymousGroup}
assert.ElementsMatchf(t, expectedGroups, result.Groups, "Wanted groups: %v \t Got: %v", expectedGroups, result.Groups)
})
t.Run("Update", func(t *testing.T) {
originalUsername := "johnsmith"
originalUserEmail := "[email protected]"
createUserBytes(t, []byte(fmt.Sprintf(`{"name": "%s", "email": "%s"}`, originalUsername, originalUserEmail)))
t.Run("OnlyName", func(t *testing.T) {
newUsername := "jsmith"
updateUserBytes(
t,
originalUsername,
[]byte(fmt.Sprintf(`{"name": "%s"}`, newUsername)),
http.StatusNoContent,
)
assertUsernameAndEmail(t, newUsername, originalUserEmail)
})
originalUsername = "jsmith"
t.Run("OnlyEmail", func(t *testing.T) {
newUserEmail := "[email protected]"
updateUserBytes(
t,
originalUsername,
[]byte(fmt.Sprintf(`{"email": "%s"}`, newUserEmail)),
http.StatusNoContent,
)
assertUsernameAndEmail(t, originalUsername, newUserEmail)
})
t.Run("BothNameAndEmail", func(t *testing.T) {
newUsername := "janesmith"
newUserEmail := "[email protected]"
updateUserBytes(
t,
originalUsername,
[]byte(fmt.Sprintf(`{"name": "%s", "email": "%s"}`, newUsername, newUserEmail)),
http.StatusNoContent,
)
assertUsernameAndEmail(t, newUsername, newUserEmail)
})
originalUsername = "janesmith"
t.Run("NeitherNameNorEmail", func(t *testing.T) {
updateUserBytes(
t,
originalUsername,
[]byte("{}"),
http.StatusBadRequest,
)
})
t.Run("InvalidName", func(t *testing.T) {
updateUserBytes(
t,
originalUsername,
[]byte(`{"name": 42}`),
http.StatusBadRequest,
)
})
t.Run("NonExistentUsername", func(t *testing.T) {
updateUserBytes(
t,
"nonexistentuser",
[]byte(`{"name": "existentuser"}`),
http.StatusNotFound,
)
})
t.Run("ConflictingName", func(t *testing.T) {
otherUsername := "joesmith"
createUserBytes(t, []byte(fmt.Sprintf(`{"name": "%s"}`, otherUsername)))
updateUserBytes(
t,
originalUsername,
[]byte(fmt.Sprintf(`{"name": "%s"}`, otherUsername)),
http.StatusConflict,
)
})
})
// do some preliminary setup so we have a policy to work with
createResourceBytes(t, resourceBody)
createRoleBytes(t, roleBody)
createPolicyBytes(t, policyBody)
t.Run("GrantPolicy", func(t *testing.T) {
w := httptest.NewRecorder()
url := fmt.Sprintf("/user/%s/policy", username)
req := newRequest(
"POST",
url,
bytes.NewBuffer([]byte(fmt.Sprintf(`{"policy": "%s"}`, policyName))),
)
req.Header.Add("X-AuthZ-Provider", "xxx")
handler.ServeHTTP(w, req)
if w.Code != http.StatusNoContent {
httpError(t, w, "couldn't grant policy to user")
}
// look up user again and check that policy is there
w = httptest.NewRecorder()
url = fmt.Sprintf("/user/%s", username)
req = newRequest("GET", url, nil)
handler.ServeHTTP(w, req)
if w.Code != http.StatusOK {
httpError(t, w, "couldn't read user")
}
result := struct {
Name string `json:"name"`
Email string `json:"email"`
Policies []struct {
Policy string `json:"policy"`
ExpiresAt *string `json:"expires_at"`
} `json:"policies"`
Groups []string `json:"groups"`
}{}
err = json.Unmarshal(w.Body.Bytes(), &result)
if err != nil {
httpError(t, w, "couldn't read response from user read")
}
msg := fmt.Sprintf(
"didn't grant policy correctly; got response body: %s",
w.Body.String(),
)
// expect that policy with policyName is in response
var actualPolicy struct {
Policy string `json:"policy"`
ExpiresAt *string `json:"expires_at"`
}
for _, policy := range result.Policies {
if policy.Policy == policyName {
actualPolicy = policy
}
}
assert.NotNil(t, actualPolicy, msg)
// expect the expiresAt field to be nil, because expiration was not set.
assert.Nil(t, actualPolicy.ExpiresAt, msg)
t.Run("BulkGrantPolicy", func(t *testing.T) {
// create test policy
policyNameA := "pqrs"
createPolicyBytes(
t,
[]byte(fmt.Sprintf(
`{
"id": "%s",
"resource_paths": ["%s"],
"role_ids": ["%s"]
}`,
policyNameA,
resourcePath,
roleName,
)),
)
w := httptest.NewRecorder()
url := fmt.Sprintf("/user/%s/bulk/policy", username)
body := []byte(fmt.Sprintf(
`
[
{"policy": "%s"},
{"policy": "%s"}
]`,
policyName, policyNameA,
))
req := newRequest(
"POST",
url,
bytes.NewBuffer(body))
req.Header.Set("X-AuthZ-Provider", "xxx")
handler.ServeHTTP(w, req)
if w.Code != http.StatusNoContent {
httpError(t, w, "couldn't grant policies to user")
}
w = httptest.NewRecorder()
url = fmt.Sprintf("/user/%s", username)
req = newRequest("GET", url, nil)
handler.ServeHTTP(w, req)
if w.Code != http.StatusOK {
httpError(t, w, "couldn't read user")
}
})
t.Run("PolicyNotExist", func(t *testing.T) {
w := httptest.NewRecorder()
url := fmt.Sprintf("/user/%s/policy", username)
req := newRequest(
"POST",
url,
bytes.NewBuffer([]byte(`{"policy": "nonexistent"}`)),
)
handler.ServeHTTP(w, req)
if w.Code != http.StatusBadRequest {
httpError(t, w, "didn't get 400 for nonexistent policy")
}
})
t.Run("UserNotExist", func(t *testing.T) {
w := httptest.NewRecorder()
url := "/user/nonexistent/policy"
req := newRequest(
"POST",
url,
bytes.NewBuffer([]byte(fmt.Sprintf(`{"policy": "%s"}`, policyName))),
)
handler.ServeHTTP(w, req)
if w.Code != http.StatusNotFound {
httpError(t, w, "didn't get 404 for nonexistent user")
}
})
})
t.Run("ListResources", func(t *testing.T) {
w := httptest.NewRecorder()
url := fmt.Sprintf("/user/%s/resources", username)
req := newRequest("GET", url, nil)
handler.ServeHTTP(w, req)
if w.Code != http.StatusOK {
httpError(t, w, "couldn't list user's authed resources")
}
result := struct {
Resources []string `json:"resources"`
}{}
err = json.Unmarshal(w.Body.Bytes(), &result)
if err != nil {
httpError(t, w, "couldn't read response from user resources")
}
// expect to see resources from user's policies, as well as
// resources from Anonymous and LoggedIn policies.
expectedResources := append(anonymousResourcePaths, loggedInResourcePaths...)
expectedResources = append(expectedResources, resourcePath)
msg := fmt.Sprintf(
"didn't get expected resources; got response body: %s \t Wanted resources: %v",
w.Body.String(),
expectedResources,
)
assert.ElementsMatch(t, expectedResources, result.Resources, msg)
t.Run("UserNotFound", func(t *testing.T) {
w := httptest.NewRecorder()
req := newRequest("GET", "/user/nonexistent/resources", nil)
handler.ServeHTTP(w, req)
if w.Code != http.StatusNotFound {
httpError(t, w, "expected 404 trying to list resources for fake user")
}
})
// TODO (rudyardrichter, 2019-05-09): also test response with tag
})
t.Run("RevokePolicy", func(t *testing.T) {
test := func(authzProvider string, expected bool, msg string) {
w := httptest.NewRecorder()
url := fmt.Sprintf("/user/%s/policy/%s", username, policyName)
req := newRequest("DELETE", url, nil)
req.Header.Add("X-AuthZ-Provider", authzProvider)
handler.ServeHTTP(w, req)
if w.Code != http.StatusNoContent {
httpError(t, w, "couldn't revoke policy")
}
// look up user again and check if policy is still there
w = httptest.NewRecorder()
url = fmt.Sprintf("/user/%s", username)
req = newRequest("GET", url, nil)
handler.ServeHTTP(w, req)
if w.Code != http.StatusOK {
httpError(t, w, "couldn't read user")
}
result := struct {
Name string `json:"name"`
Email string `json:"email"`
Policies []struct {
Name string `json:"policy"`
ExpiresAt string `json:"expires_at"`
} `json:"policies"`
Groups []string `json:"groups"`
}{}
err = json.Unmarshal(w.Body.Bytes(), &result)
if err != nil {
httpError(t, w, "couldn't read response from user read")
}
found := false
for _, policy := range result.Policies {
if policy.Name == policyName {
found = true
break
}
}
if found != expected {
assert.Fail(t, fmt.Sprintf(msg, w.Body.String()))
}
}
test("yyy", true, "shouldn't revoke policy; got response body: %s")
test("xxx", false, "didn't revoke policy correctly; got response body: %s")
})
timestamp := time.Now().Add(time.Hour).Format(time.RFC3339)
t.Run("GrantPolicyWithExpiration", func(t *testing.T) {
w := httptest.NewRecorder()
url := fmt.Sprintf("/user/%s/policy", username)
req := newRequest(
"POST",
url,
bytes.NewBuffer([]byte(fmt.Sprintf(`{"policy": "%s", "expires_at": "%s"}`, policyName, timestamp))),
)
handler.ServeHTTP(w, req)
if w.Code != http.StatusNoContent {
httpError(t, w, "couldn't grant policy to user with expiration")
}
})
t.Run("CheckPolicyHasExpiration", func(t *testing.T) {
w := httptest.NewRecorder()
url := fmt.Sprintf("/user/%s", username)
req := newRequest("GET", url, nil)
handler.ServeHTTP(w, req)
result := struct {
Policies []struct {
Policy string `json:"policy"`
ExpiresAt *string `json:"expires_at"`
} `json:"policies"`
}{}
err := json.Unmarshal(w.Body.Bytes(), &result)
if err != nil {
httpError(t, w, "couldn't read response from user info")
}
// Assert that the policy we added in GrantPolicyWithExpiration has expiration.
var addedPolicy struct {
Policy string `json:"policy"`
ExpiresAt *string `json:"expires_at"`
}
for _, policy := range result.Policies {
if policy.Policy == policyName {
addedPolicy = policy
}
}
assert.NotNilf(t, addedPolicy, "Expected to find policy %v in response: %v", addedPolicy, result.Policies)
assert.NotNil(t, addedPolicy.ExpiresAt, "missing `expires_at` in response")
expect, _ := time.Parse(time.RFC3339, timestamp)
got, _ := time.Parse(time.RFC3339, *addedPolicy.ExpiresAt)
assert.True(t, expect.Equal(got), "wrong value for `expires_at`")
})
t.Run("Delete", func(t *testing.T) {
w := httptest.NewRecorder()
req := newRequest("DELETE", "/user/foo", nil)
handler.ServeHTTP(w, req)
if w.Code != http.StatusNoContent {
httpError(t, w, "couldn't delete user")
}
t.Run("NotExist", func(t *testing.T) {
w := httptest.NewRecorder()
req := newRequest("DELETE", "/user/foo", nil)
handler.ServeHTTP(w, req)
if w.Code != http.StatusNoContent {
httpError(t, w, "expected 204 from trying to delete nonexistent user")
}
})
})
t.Run("DeleteNotExist", func(t *testing.T) {
w := httptest.NewRecorder()
url := fmt.Sprintf("/user/%s", username)
req := newRequest("DELETE", url, nil)
handler.ServeHTTP(w, req)
if w.Code != http.StatusNoContent {
httpError(t, w, "wrong response from deleting user that doesn't exist")
}
})
t.Run("CheckDeleted", func(t *testing.T) {
w := httptest.NewRecorder()
req := newRequest("GET", "/user/foo", nil)
handler.ServeHTTP(w, req)
if w.Code != http.StatusNotFound {
httpError(t, w, "user was not actually deleted")
}
})
tearDown(t)
})
t.Run("Client", func(t *testing.T) {
tearDown := testSetup(t)
clientID := "foo"
t.Run("ListEmpty", func(t *testing.T) {
w := httptest.NewRecorder()
req := newRequest("GET", "/client", nil)
handler.ServeHTTP(w, req)
if w.Code != http.StatusOK {
httpError(t, w, "can't list clients")
}
result := struct {
Clients interface{} `json:"clients"`
}{}
err = json.Unmarshal(w.Body.Bytes(), &result)
if err != nil {
httpError(t, w, "couldn't read response from clients list")
}
msg := fmt.Sprintf("got response body: %s", w.Body.String())
assert.Equal(t, []interface{}{}, result.Clients, msg)
})
t.Run("NotFound", func(t *testing.T) {
w := httptest.NewRecorder()
req := newRequest("GET", "/client/nonexistent", nil)
handler.ServeHTTP(w, req)
if w.Code != http.StatusNotFound {
httpError(t, w, "didn't get 404 for nonexistent client")
}
})
// do some preliminary setup so we have a policy to work with
createResourceBytes(t, resourceBody)
createRoleBytes(t, roleBody)
createPolicyBytes(t, policyBody)
t.Run("Create", func(t *testing.T) {
w := httptest.NewRecorder()
body := []byte(fmt.Sprintf(
`{
"clientID": "%s",
"policies": ["%s"]
}`,
clientID, policyName,
))
req := newRequest("POST", "/client", bytes.NewBuffer(body))
req.Header.Add("X-AuthZ-Provider", "xxx")
handler.ServeHTTP(w, req)
if w.Code != http.StatusCreated {
httpError(t, w, "couldn't create client")
}
result := struct {
_ interface{} `json:"created"`
}{}
err = json.Unmarshal(w.Body.Bytes(), &result)
if err != nil {
httpError(t, w, "couldn't read response from client creation")
}
})
t.Run("Read", func(t *testing.T) {
w := httptest.NewRecorder()
url := fmt.Sprintf("/client/%s", clientID)
req := newRequest("GET", url, nil)
handler.ServeHTTP(w, req)
if w.Code != http.StatusOK {
httpError(t, w, "couldn't read client")
}
result := struct {
ClientID string `json:"clientID"`
Policies []string `json:"policies"`
}{}
err = json.Unmarshal(w.Body.Bytes(), &result)
if err != nil {
httpError(t, w, "couldn't read response from client read")
}
msg := fmt.Sprintf("got response body: %s", w.Body.String())
assert.Equal(t, clientID, result.ClientID, msg)
assert.Equal(t, []string{policyName}, result.Policies, msg)
})
t.Run("RevokePolicy", func(t *testing.T) {
test := func(authzProvider string, expected bool, msg string) {
w := httptest.NewRecorder()
url := fmt.Sprintf("/client/%s/policy/%s", clientID, policyName)
req := newRequest("DELETE", url, nil)
req.Header.Add("X-AuthZ-Provider", authzProvider)
handler.ServeHTTP(w, req)
if w.Code != http.StatusNoContent {
httpError(t, w, "couldn't revoke policy")
}
// look up client again and check that policy is gone
w = httptest.NewRecorder()
url = fmt.Sprintf("/client/%s", clientID)
req = newRequest("GET", url, nil)
handler.ServeHTTP(w, req)
if w.Code != http.StatusOK {
httpError(t, w, "couldn't read client")
}
result := struct {
ClientID string `json:"clientID"`
Policies []string `json:"policies"`
}{}
err = json.Unmarshal(w.Body.Bytes(), &result)
if err != nil {
httpError(t, w, "couldn't read response from client read")
}
msg = fmt.Sprintf(msg, w.Body.String())
if expected {
assert.Contains(t, result.Policies, policyName, msg)
} else {
assert.NotContains(t, result.Policies, policyName, msg)
}
}
test("yyy", true, "shouldn't revoke policy; got response body: %s")
test("xxx", false, "didn't revoke policy correctly; got response body: %s")
})
t.Run("GrantPolicy", func(t *testing.T) {
w := httptest.NewRecorder()
url := fmt.Sprintf("/client/%s/policy", clientID)
req := newRequest(
"POST",
url,
bytes.NewBuffer([]byte(fmt.Sprintf(`{"policy": "%s"}`, policyName))),
)
handler.ServeHTTP(w, req)
if w.Code != http.StatusNoContent {
httpError(t, w, "couldn't grant policy to client")
}
// look up client again and check that policy is there
w = httptest.NewRecorder()
url = fmt.Sprintf("/client/%s", clientID)
req = newRequest("GET", url, nil)
handler.ServeHTTP(w, req)
if w.Code != http.StatusOK {
httpError(t, w, "couldn't read client")
}
result := struct {
ClientID string `json:"clientID"`
Policies []string `json:"policies"`
}{}
err = json.Unmarshal(w.Body.Bytes(), &result)
if err != nil {
httpError(t, w, "couldn't read response from client read")
}
msg := fmt.Sprintf(
"didn't grant policy correctly; got response body: %s",
w.Body.String(),
)
assert.Equal(t, []string{policyName}, result.Policies, msg)
t.Run("PolicyNotExist", func(t *testing.T) {
w := httptest.NewRecorder()
url := fmt.Sprintf("/client/%s/policy", clientID)
req := newRequest(
"POST",
url,
bytes.NewBuffer([]byte(`{"policy": "nonexistent"}`)),
)
handler.ServeHTTP(w, req)
if w.Code != http.StatusNotFound {
httpError(t, w, "didn't get 404 for nonexistent policy")
}
})
t.Run("ClientNotExist", func(t *testing.T) {
w := httptest.NewRecorder()
url := "/client/nonexistent/policy"
req := newRequest(
"POST",
url,
bytes.NewBuffer([]byte(fmt.Sprintf(`{"policy": "%s"}`, policyName))),
)
handler.ServeHTTP(w, req)
if w.Code != http.StatusNotFound {
httpError(t, w, "didn't get 404 for nonexistent client")
}
})
})
t.Run("Delete", func(t *testing.T) {
w := httptest.NewRecorder()
req := newRequest("DELETE", "/client/foo", nil)
handler.ServeHTTP(w, req)
if w.Code != http.StatusNoContent {
httpError(t, w, "couldn't delete client")
}
})
t.Run("DeleteNotExist", func(t *testing.T) {
w := httptest.NewRecorder()
url := fmt.Sprintf("/client/%s", clientID)
req := newRequest("DELETE", url, nil)
handler.ServeHTTP(w, req)
if w.Code != http.StatusNoContent {
httpError(t, w, "wrong response from deleting client that doesn't exist")
}
})
t.Run("CheckDeleted", func(t *testing.T) {
w := httptest.NewRecorder()
req := newRequest("GET", "/client/foo", nil)
handler.ServeHTTP(w, req)
if w.Code != http.StatusNotFound {
httpError(t, w, "client was not actually deleted")
}
})
tearDown(t)
})
t.Run("Group", func(t *testing.T) {
tearDown := testSetup(t)
t.Run("NotFound", func(t *testing.T) {
w := httptest.NewRecorder()
req := newRequest("GET", "/group/nonexistent", nil)
handler.ServeHTTP(w, req)
if w.Code != http.StatusNotFound {
httpError(t, w, "didn't get 404 for nonexistent group")
}
})
testGroupName := "test-group"
testGroupUser1 := "test-group-user-1"
testGroupUser2 := "test-group-user-2"
testGroupUser3 := "test-group-user-3"
testGroupUsers := []string{
testGroupUser1,
testGroupUser2,
testGroupUser3,
}
t.Run("Create", func(t *testing.T) {
w := httptest.NewRecorder()
body := []byte(fmt.Sprintf(`{"name": "%s"}`, testGroupName))
req := newRequest("POST", "/group", bytes.NewBuffer(body))
handler.ServeHTTP(w, req)
if w.Code != http.StatusCreated {
httpError(t, w, "couldn't create group")
}
result := struct {
_ interface{} `json:"created"`
}{}
err = json.Unmarshal(w.Body.Bytes(), &result)
if err != nil {
httpError(t, w, "couldn't read response from group creation")
}
t.Run("MissingName", func(t *testing.T) {
w := httptest.NewRecorder()
body := []byte(fmt.Sprintf(`{"users": ["%s"]}`, testGroupUser1))
req := newRequest("POST", "/group", bytes.NewBuffer(body))
handler.ServeHTTP(w, req)
if w.Code != http.StatusBadRequest {
httpError(t, w, "expected 400 from creating group without name")
}
})
t.Run("AlreadyExists", func(t *testing.T) {
w := httptest.NewRecorder()
body := []byte(fmt.Sprintf(`{"name": "%s"}`, testGroupName))
req := newRequest("POST", "/group", bytes.NewBuffer(body))
handler.ServeHTTP(w, req)
if w.Code != http.StatusConflict {
httpError(t, w, "creating group that already exists didn't error as expected")
}
})
})
t.Run("List", func(t *testing.T) {
w := httptest.NewRecorder()
req := newRequest("GET", "/group", nil)
handler.ServeHTTP(w, req)
if w.Code != http.StatusOK {
httpError(t, w, "can't list groups")
}
result := struct {
Groups []struct {
Name string `json:"name"`
} `json:"groups"`
}{}
err = json.Unmarshal(w.Body.Bytes(), &result)
if err != nil {
httpError(t, w, "couldn't read response from groups list")
}
msg := fmt.Sprintf("got response body: %s", w.Body.String())
// check test group is in the results
groupNames := []string{}
for _, group := range result.Groups {
groupNames = append(groupNames, group.Name)
}
assert.Contains(t, groupNames, testGroupName, msg)
})
t.Run("Read", func(t *testing.T) {
w := httptest.NewRecorder()
req := newRequest("GET", fmt.Sprintf("/group/%s", testGroupName), nil)
handler.ServeHTTP(w, req)
if w.Code != http.StatusOK {
httpError(t, w, "couldn't read group")
}
result := struct {
Name string `json:"name"`
Users []string `json:"users"`
Policies []string `json:"policies"`
}{}
err = json.Unmarshal(w.Body.Bytes(), &result)
if err != nil {
httpError(t, w, "couldn't read response from group read")
}
msg := fmt.Sprintf("got response body: %s", w.Body.String())
assert.Equal(t, testGroupName, result.Name, msg)
assert.Equal(t, []string{}, result.Users, msg)
assert.Equal(t, []string{}, result.Policies, msg)
})
t.Run("AddUsers", func(t *testing.T) {
for _, testUsername := range testGroupUsers {
createUserBytes(t, []byte(fmt.Sprintf(`{"name": "%s"}`, testUsername)))
w := httptest.NewRecorder()
groupUserURL := fmt.Sprintf("/group/%s/user", testGroupName)
body := []byte(fmt.Sprintf(`{"username": "%s"}`, testUsername))
req := newRequest("POST", groupUserURL, bytes.NewBuffer(body))
req.Header.Add("X-AuthZ-Provider", "xxx")
handler.ServeHTTP(w, req)
if w.Code != http.StatusNoContent {
httpError(t, w, "couldn't add user to group")
}
}
t.Run("UserNotExist", func(t *testing.T) {
w := httptest.NewRecorder()
groupUserURL := fmt.Sprintf("/group/%s/user", testGroupName)
body := []byte(`{"username": "does-not-exist"}`)
req := newRequest("POST", groupUserURL, bytes.NewBuffer(body))
handler.ServeHTTP(w, req)
if w.Code != http.StatusBadRequest {
httpError(t, w, "expected 400 from trying to add nonexistent user to group")
}
})
t.Run("GroupNotExist", func(t *testing.T) {
w := httptest.NewRecorder()
groupUserURL := "/group/does-not-exist/user"
body := []byte(fmt.Sprintf(`{"username": "%s"}`, testGroupUser1))
req := newRequest("POST", groupUserURL, bytes.NewBuffer(body))
handler.ServeHTTP(w, req)
if w.Code != http.StatusNotFound {
httpError(t, w, "expected 404 from trying to add user to nonexistent group")
}
})
})
t.Run("CheckUsersAdded", func(t *testing.T) {
w := httptest.NewRecorder()
req := newRequest("GET", fmt.Sprintf("/group/%s", testGroupName), nil)
handler.ServeHTTP(w, req)
if w.Code != http.StatusOK {
httpError(t, w, "couldn't read group")
}
result := struct {
Name string `json:"name"`
Users []string `json:"users"`
Policies []string `json:"policies"`
}{}
err = json.Unmarshal(w.Body.Bytes(), &result)
if err != nil {
httpError(t, w, "couldn't read response from group read")
}
msg := fmt.Sprintf("got response body: %s", w.Body.String())
resultUsers := make([]string, len(result.Users))
copy(resultUsers, result.Users)
sort.Strings(resultUsers)
expectUsers := make([]string, len(testGroupUsers))
copy(expectUsers, testGroupUsers)
sort.Strings(expectUsers)
assert.Equal(t, expectUsers, resultUsers, msg)
})
t.Run("CreateWithUsers", func(t *testing.T) {
groupName := "test-group-with-users"
// create a group with some users in it
w := httptest.NewRecorder()
body := []byte(fmt.Sprintf(
`{"name": "%s", "users": ["%s", "%s"]}`,
groupName,
testGroupUser1,
testGroupUser2,
))
req := newRequest("POST", "/group", bytes.NewBuffer(body))
handler.ServeHTTP(w, req)
if w.Code != http.StatusCreated {
httpError(t, w, "couldn't create group")
}
result := struct {
Created struct {
Users []string `json:"users"`
} `json:"created"`
}{}
err = json.Unmarshal(w.Body.Bytes(), &result)
if err != nil {
httpError(t, w, "couldn't read response from group creation")
}
sort.Strings(result.Created.Users)
expectUsers := []string{testGroupUser1, testGroupUser2}
sort.Strings(expectUsers)
msg := fmt.Sprintf("didn't get expected users; got response body: %s", w.Body.String())
assert.Equal(t, expectUsers, result.Created.Users, msg)
// check that users were added correctly using read request
w = httptest.NewRecorder()
req = newRequest("GET", fmt.Sprintf("/group/%s", groupName), nil)
handler.ServeHTTP(w, req)
if w.Code != http.StatusOK {
httpError(t, w, "couldn't read group")
}
resultRead := struct {
Users []string `json:"users"`
}{}
err = json.Unmarshal(w.Body.Bytes(), &resultRead)
if err != nil {
httpError(t, w, "couldn't read response from group read")
}
sort.Strings(resultRead.Users)
msg = fmt.Sprintf("group doesn't have users; got response body: %s", w.Body.String())
assert.Equal(t, expectUsers, resultRead.Users, msg)
})
userToRemove := testGroupUser1
t.Run("RemoveUser", func(t *testing.T) {
test := func(authzProvider string, expected bool, msg string) {
w := httptest.NewRecorder()
url := fmt.Sprintf("/group/%s/user/%s", testGroupName, userToRemove)
req := newRequest("DELETE", url, nil)
req.Header.Add("X-AuthZ-Provider", authzProvider)
handler.ServeHTTP(w, req)
if w.Code != http.StatusNoContent {
httpError(t, w, "couldn't remove user from group")
}
// look up group again and check that user is gone
url = fmt.Sprintf("/group/%s", testGroupName)
w = httptest.NewRecorder()
req = newRequest("GET", url, nil)
handler.ServeHTTP(w, req)
if w.Code != http.StatusOK {
httpError(t, w, "couldn't read group")
}
result := struct {
Name string `json:"name"`
Users []string `json:"users"`
Policies []string `json:"policies"`
}{}
err = json.Unmarshal(w.Body.Bytes(), &result)
if err != nil {
httpError(t, w, "couldn't read response from group read")
}
msg = fmt.Sprintf(msg, w.Body.String())
if expected {
assert.Contains(t, result.Users, userToRemove, msg)
} else {
assert.NotContains(t, result.Users, userToRemove, msg)
}
}
test("yyy", true, "shouldn't remove user; got response body: %s")
test("xxx", false, "didn't remove user; got response body: %s")
})
// do some preliminary setup so we have a policy to work with
createResourceBytes(t, resourceBody)
createRoleBytes(t, roleBody)
createPolicyBytes(t, policyBody)
t.Run("GrantPolicy", func(t *testing.T) {
w := httptest.NewRecorder()
url := fmt.Sprintf("/group/%s/policy", testGroupName)
req := newRequest(
"POST",
url,
bytes.NewBuffer([]byte(fmt.Sprintf(`{"policy": "%s"}`, policyName))),
)
req.Header.Add("X-AuthZ-Provider", "xxx")
handler.ServeHTTP(w, req)
if w.Code != http.StatusNoContent {
httpError(t, w, "couldn't grant policy to group")
}
// look up group again and check that policy is there
w = httptest.NewRecorder()
url = fmt.Sprintf("/group/%s", testGroupName)
req = newRequest("GET", url, nil)
handler.ServeHTTP(w, req)
if w.Code != http.StatusOK {
httpError(t, w, "couldn't read group")
}
result := struct {
Name string `json:"name"`
Users []string `json:"users"`
Policies []string `json:"policies"`
}{}
err = json.Unmarshal(w.Body.Bytes(), &result)
if err != nil {
httpError(t, w, "couldn't read response from group read")
}
msg := fmt.Sprintf(
"didn't grant policy correctly; got response body: %s",
w.Body.String(),
)
assert.Equal(t, []string{policyName}, result.Policies, msg)
t.Run("PolicyNotExist", func(t *testing.T) {
w := httptest.NewRecorder()
url := fmt.Sprintf("/group/%s/policy", testGroupName)
req := newRequest(
"POST",
url,
bytes.NewBuffer([]byte(`{"policy": "nonexistent"}`)),
)
handler.ServeHTTP(w, req)
if w.Code != http.StatusBadRequest {
httpError(t, w, "didn't get 400 for nonexistent policy")
}
})
t.Run("GroupNotExist", func(t *testing.T) {
w := httptest.NewRecorder()
url := "/group/nonexistent/policy"
req := newRequest(
"POST",
url,
bytes.NewBuffer([]byte(fmt.Sprintf(`{"policy": "%s"}`, policyName))),
)
handler.ServeHTTP(w, req)
if w.Code != http.StatusNotFound {
httpError(t, w, "didn't get 404 for nonexistent group")
}
})
t.Run("InvalidJSON", func(t *testing.T) {
})
})
t.Run("RevokePolicy", func(t *testing.T) {
test := func(authzProvider string, expected bool, msg string) {
w := httptest.NewRecorder()
url := fmt.Sprintf("/group/%s/policy/%s", testGroupName, policyName)
req := newRequest("DELETE", url, nil)
req.Header.Add("X-AuthZ-Provider", authzProvider)
handler.ServeHTTP(w, req)
if w.Code != http.StatusNoContent {
httpError(t, w, "couldn't revoke policy from group")
}
w = httptest.NewRecorder()
url = fmt.Sprintf("/group/%s", testGroupName)
req = newRequest("GET", url, nil)
handler.ServeHTTP(w, req)
if w.Code != http.StatusOK {
httpError(t, w, "couldn't read group")
}
result := struct {
Name string `json:"name"`
Users []string `json:"users"`
Policies []string `json:"policies"`
}{}
err = json.Unmarshal(w.Body.Bytes(), &result)
if err != nil {
httpError(t, w, "couldn't read response from group read")
}
msg = fmt.Sprintf(msg, w.Body.String())
if expected {
assert.Contains(t, result.Policies, policyName, msg)
} else {
assert.NotContains(t, result.Policies, policyName, msg)
}
}
test("yyy", true, "shouldn't revoke policy; got response body: %s")
test("xxx", false, "didn't revoke policy correctly; got response body: %s")
})
t.Run("Delete", func(t *testing.T) {
w := httptest.NewRecorder()
url := fmt.Sprintf("/group/%s", testGroupName)
req := newRequest("DELETE", url, nil)
handler.ServeHTTP(w, req)
if w.Code != http.StatusNoContent {
httpError(t, w, "couldn't delete group")
}
})
t.Run("CheckDeleted", func(t *testing.T) {
w := httptest.NewRecorder()
url := fmt.Sprintf("/group/%s", testGroupName)
req := newRequest("GET", url, nil)
handler.ServeHTTP(w, req)
if w.Code != http.StatusNotFound {
httpError(t, w, "group was not actually deleted")
}
})
t.Run("DeleteNotExist", func(t *testing.T) {
w := httptest.NewRecorder()
url := fmt.Sprintf("/group/%s", testGroupName)
req := newRequest("DELETE", url, nil)
handler.ServeHTTP(w, req)
if w.Code != http.StatusNoContent {
httpError(t, w, "wrong response from deleting group that doesn't exist")
}
})
t.Run("BuiltIn", func(t *testing.T) {
groups := [][]string{
[]string{arborist.AnonymousGroup, "Anonymous"},
[]string{arborist.LoggedInGroup, "LoggedIn"},
}
for _, groupInfo := range groups {
groupName := groupInfo[0]
testName := groupInfo[1]
t.Run(testName, func(t *testing.T) {
t.Run("Exists", func(t *testing.T) {
w := httptest.NewRecorder()
req := newRequest("GET", fmt.Sprintf("/group/%s", groupName), nil)
handler.ServeHTTP(w, req)
if w.Code != http.StatusOK {
httpError(t, w, "couldn't read group")
}
result := struct {
Name string `json:"name"`
Users []string `json:"users"`
_ []string `json:"policies"`
}{}
err = json.Unmarshal(w.Body.Bytes(), &result)
if err != nil {
httpError(t, w, "couldn't read response from group read")
}
msg := fmt.Sprintf("got response body: %s", w.Body.String())
assert.Equal(t, groupName, result.Name, msg)
assert.Equal(t, []string{}, result.Users, msg)
})
t.Run("CannotDelete", func(t *testing.T) {
w := httptest.NewRecorder()
req := newRequest("DELETE", fmt.Sprintf("/group/%s", groupName), nil)
handler.ServeHTTP(w, req)
if w.Code != http.StatusBadRequest {
msg := fmt.Sprintf(
"expected error from trying to delete built-in group %s",
groupName,
)
httpError(t, w, msg)
}
})
t.Run("CannotAddUser", func(t *testing.T) {
w := httptest.NewRecorder()
username := "user-not-getting-added"
body := []byte(fmt.Sprintf(`{"name": "%s"}`, username))
req := newRequest("POST", "/user", bytes.NewBuffer(body))
handler.ServeHTTP(w, req)
w = httptest.NewRecorder()
url := fmt.Sprintf("/group/%s/user", groupName)
body = []byte(fmt.Sprintf(`{"username": "%s"}`, username))
req = newRequest("POST", url, bytes.NewBuffer(body))
handler.ServeHTTP(w, req)
if w.Code != http.StatusBadRequest {
httpError(t, w, "expected error adding user to built in group")
}
})
})
}
})
tearDown(t)
})
t.Run("Auth", func(t *testing.T) {
tearDown := testSetup(t)
t.Run("Mapping", func(t *testing.T) {
setupTestPolicy(t)
_, _, anonymousAuthMapping := setupAnonymousPolicies(t)
_, _, loggedInAuthMapping := setupLoggedInPolicies(t)
createUserBytes(t, userBody)
grantUserPolicy(t, username, policyName, "null")
// testAuthMappingResponse checks whether the AuthMapping in the HTTP
// response 'w' contains the correct resources and actions that belong to the user.
// This includes the resources and actions that belong to the anonymous and loggedIn groups.
testAuthMappingResponse := func(t *testing.T, w *httptest.ResponseRecorder) {
msg := fmt.Sprintf("got response body: %s", w.Body.String())
assert.Equal(t, 200, w.Code, msg)
result := make(map[string][]arborist.Action)
err = json.Unmarshal(w.Body.Bytes(), &result)
if err != nil {
httpError(t, w, "couldn't read response from auth mapping")
}
msg = fmt.Sprintf("result does not contain expected resource %s", resourcePath)
assert.Contains(t, result, resourcePath, msg)
action := arborist.Action{Service: serviceName, Method: methodName}
msg = fmt.Sprintf("result does not contain expected action %s", action)
assert.Contains(t, result[resourcePath], action, msg)
// Expect response to also contain anonymous and loggedIn groups.
msg = fmt.Sprintf("Expected to see these auth mappings from anonymous group in response: %v", anonymousAuthMapping)
for resource, actions := range anonymousAuthMapping {
assert.Contains(t, result, resource, msg)
assert.ElementsMatch(t, result[resource], actions, msg)
}
msg = fmt.Sprintf("Expected to see these auth mappings from loggedIn group in response: %v", loggedInAuthMapping)
for resource, actions := range loggedInAuthMapping {
assert.Contains(t, result, resource, msg)
assert.ElementsMatch(t, result[resource], actions, msg)
}
}
t.Run("GET", func(t *testing.T) {
w := httptest.NewRecorder()
url := fmt.Sprintf("/auth/mapping?username=%s", username)
req := newRequest("GET", url, nil)
handler.ServeHTTP(w, req)
// expect to receive user's auth mappings, as well as auth mappings of anonymous and logged-in policies
testAuthMappingResponse(t, w)
})
t.Run("GET_userDoesNotExist", func(t *testing.T) {
w := httptest.NewRecorder()
badUsername := "hulkhogan12"
url := fmt.Sprintf("/auth/mapping?username=%s", badUsername)
req := newRequest("GET", url, nil)
handler.ServeHTTP(w, req)
// expect a 200 OK response
assert.Equal(t, w.Code, http.StatusOK, "expected a 200 OK")
// expect result to only contain anonymous and loggedIn auth mappings.
result := make(arborist.AuthMapping)
err = json.Unmarshal(w.Body.Bytes(), &result)
if err != nil {
httpError(t, w, "couldn't read response from auth mapping")
}
expectedMappings := make(arborist.AuthMapping)
for k, v := range anonymousAuthMapping {
expectedMappings[k] = v
}
for k, v := range loggedInAuthMapping {
expectedMappings[k] = v
}
msg := fmt.Sprintf("Expected to see these auth mappings from anonymous and logged-in groups in response: %v", expectedMappings)
for resource, actions := range result {
assert.Contains(t, expectedMappings, resource, msg)
assert.ElementsMatch(t, expectedMappings[resource], actions, msg)
}
})
t.Run("GETwithJWT", func(t *testing.T) {
w := httptest.NewRecorder()
url := "/auth/mapping"
req := newRequest("GET", url, nil)
token := TestJWT{username: username}
req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", token.Encode()))
handler.ServeHTTP(w, req)
// expect to receive user's auth mappings, as well as auth mappings of anonymous and logged-in policies
testAuthMappingResponse(t, w)
})
t.Run("GETwithJWT_userDoesNotExist", func(t *testing.T) {
w := httptest.NewRecorder()
url := "/auth/mapping"
req := newRequest("GET", url, nil)
badUsername := "hulkhogan12"
token := TestJWT{username: badUsername}
req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", token.Encode()))
handler.ServeHTTP(w, req)
// expect a 200 OK response
assert.Equal(t, w.Code, http.StatusOK, "expected a 200 OK")
// expect result to only contain anonymous and loggedIn auth mappings.
result := make(arborist.AuthMapping)
err = json.Unmarshal(w.Body.Bytes(), &result)
if err != nil {
httpError(t, w, "couldn't read response from auth mapping")
}
expectedMappings := make(arborist.AuthMapping)
for k, v := range anonymousAuthMapping {
expectedMappings[k] = v
}
for k, v := range loggedInAuthMapping {
expectedMappings[k] = v
}
msg := fmt.Sprintf("Expected response to be these auth mappings from anonymous and logged-in groups: %v", expectedMappings)
for resource, actions := range result {
assert.Contains(t, expectedMappings, resource, msg)
assert.ElementsMatch(t, expectedMappings[resource], actions, msg)
}
})
t.Run("GET_noUsernameProvided", func(t *testing.T) {
w := httptest.NewRecorder()
url := "/auth/mapping"
req := newRequest("GET", url, nil)
handler.ServeHTTP(w, req)
if w.Code != http.StatusOK {
httpError(t, w, "expected to get policies for Anonymous group; got bad response instead")
}
// expect a 200 OK response
assert.Equal(t, w.Code, http.StatusOK, "expected a 200 OK")
// expect result to contain only authMappings of anonymous policies
result := make(arborist.AuthMapping)
err = json.Unmarshal(w.Body.Bytes(), &result)
if err != nil {
httpError(t, w, "couldn't read response from auth mapping")
}
msg := fmt.Sprintf("Expected these auth mappings from anonymous group: %v \t Got: %v", anonymousAuthMapping, result)
for resource, actions := range result {
assert.Contains(t, anonymousAuthMapping, resource, msg)
assert.ElementsMatch(t, anonymousAuthMapping[resource], actions, msg)
}
})
t.Run("GET_expiredPolicy", func(t *testing.T) {
expiredTimestamp := time.Now().Add(time.Duration(-1) * time.Minute).Format(time.RFC3339)
grantUserPolicy(t, username, policyName, expiredTimestamp)
w := httptest.NewRecorder()
url := fmt.Sprintf("/auth/mapping?username=%s", username)
req := newRequest("GET", url, nil)
handler.ServeHTTP(w, req)
result := make(map[string][]arborist.Action)
err = json.Unmarshal(w.Body.Bytes(), &result)
if err != nil {
httpError(t, w, "couldn't read response from auth mapping")
}
msg := fmt.Sprintf("result contains resource %s corresponding to expired policy %s", resourcePath, policyName)
assert.NotContains(t, result, resourcePath, msg)
grantUserPolicy(t, username, policyName, "null")
})
t.Run("POST", func(t *testing.T) {
w := httptest.NewRecorder()
body := []byte(fmt.Sprintf(`{"username": "%s"}`, username))
req := newRequest("POST", "/auth/mapping", bytes.NewBuffer(body))
handler.ServeHTTP(w, req)
// expect to also receive auth mappings of anonymous and logged-in policies
testAuthMappingResponse(t, w)
})
t.Run("POST_userDoesNotExist", func(t *testing.T) {
w := httptest.NewRecorder()
badUsername := "hulkhogan12"
body := []byte(fmt.Sprintf(`{"username": "%s"}`, badUsername))
req := newRequest("POST", "/auth/mapping", bytes.NewBuffer(body))
handler.ServeHTTP(w, req)
// expect a 200 OK response
assert.Equal(t, w.Code, http.StatusOK, "expected a 200 OK")
// expect result to only contain anonymous and loggedIn auth mappings.
result := make(arborist.AuthMapping)
err = json.Unmarshal(w.Body.Bytes(), &result)
if err != nil {
httpError(t, w, "couldn't read response from auth mapping")
}
expectedMappings := make(arborist.AuthMapping)
for k, v := range anonymousAuthMapping {
expectedMappings[k] = v
}
for k, v := range loggedInAuthMapping {
expectedMappings[k] = v
}
msg := fmt.Sprintf("Expected response to be these auth mappings from anonymous and logged-in groups: %v", expectedMappings)
for resource, actions := range result {
assert.Contains(t, expectedMappings, resource, msg)
assert.ElementsMatch(t, expectedMappings[resource], actions, msg)
}
})
t.Run("POST_noUsernameProvided", func(t *testing.T) {
w := httptest.NewRecorder()
body := []byte("")
req := newRequest("POST", "/auth/mapping", bytes.NewBuffer(body))
handler.ServeHTTP(w, req)
// expect a 400 response
assert.Equal(t, w.Code, http.StatusBadRequest, "expected a 400 response")
})
})
deleteEverything()
t.Run("Request", func(t *testing.T) {
setupTestPolicy(t)
createUserBytes(t, userBody)
grantUserPolicy(t, username, policyName, "null")
w := httptest.NewRecorder()
token := TestJWT{username: username}
body := []byte(fmt.Sprintf(
`{
"user": {"token": "%s"},
"request": {
"resource": "%s",
"action": {
"service": "%s",
"method": "%s"
}
}
}`,
token.Encode(),
resourcePath,
serviceName,
methodName,
))
req := newRequest("POST", "/auth/request", bytes.NewBuffer(body))
handler.ServeHTTP(w, req)
if w.Code != http.StatusOK {
httpError(t, w, "auth request failed")
}
// request should succeed, user has authorization
result := struct {
Auth bool `json:"auth"`
}{}
err = json.Unmarshal(w.Body.Bytes(), &result)
if err != nil {
httpError(t, w, "couldn't read response from auth request")
}
msg := fmt.Sprintf("got response body: %s", w.Body.String())
assert.Equal(t, true, result.Auth, msg)
t.Run("Tag", func(t *testing.T) {
w := httptest.NewRecorder()
tag := getTagForResource(resourcePath)
body := []byte(fmt.Sprintf(
`{
"user": {"token": "%s"},
"request": {
"resource": "%s",
"action": {"service": "%s", "method": "%s"}
}
}`,
token.Encode(),
tag,
serviceName,
methodName,
))
req := newRequest("POST", "/auth/request", bytes.NewBuffer(body))
handler.ServeHTTP(w, req)
if w.Code != http.StatusOK {
httpError(t, w, "auth request failed")
}
// request should succeed, user has authorization
result := struct {
Auth bool `json:"auth"`
}{}
err = json.Unmarshal(w.Body.Bytes(), &result)
if err != nil {
httpError(t, w, "couldn't read response from auth request")
}
msg := fmt.Sprintf("got response body: %s", w.Body.String())
assert.Equal(t, true, result.Auth, msg)
})
t.Run("Unauthorized", func(t *testing.T) {
w = httptest.NewRecorder()
token = TestJWT{username: username}
body = []byte(fmt.Sprintf(
`{
"user": {"token": "%s"},
"request": {
"resource": "%s",
"action": {
"service": "%s",
"method": "%s"
}
}
}`,
token.Encode(),
"/wrongresource", // TODO: get errors if these contain slashes
serviceName,
methodName,
))
req = newRequest("POST", "/auth/request", bytes.NewBuffer(body))
handler.ServeHTTP(w, req)
if w.Code != http.StatusOK {
httpError(t, w, "auth request failed")
}
// request should fail
result = struct {
Auth bool `json:"auth"`
}{}
err = json.Unmarshal(w.Body.Bytes(), &result)
if err != nil {
httpError(t, w, "couldn't read response from auth request")
}
msg = fmt.Sprintf("got response body: %s", w.Body.String())
assert.Equal(t, false, result.Auth, msg)
})
t.Run("ExpiredPolicy", func(t *testing.T) {
expiredTimestamp := time.Now().Add(time.Duration(-1) * time.Minute).Format(time.RFC3339)
grantUserPolicy(t, username, policyName, expiredTimestamp)
w = httptest.NewRecorder()
token = TestJWT{username: username}
body = []byte(fmt.Sprintf(
`{
"user": {"token": "%s"},
"request": {
"resource": "%s",
"action": {
"service": "%s",
"method": "%s"
}
}
}`,
token.Encode(),
resourcePath,
serviceName,
methodName,
))
req = newRequest("POST", "/auth/request", bytes.NewBuffer(body))
handler.ServeHTTP(w, req)
if w.Code != http.StatusOK {
httpError(t, w, "auth request failed")
}
// request should fail
result = struct {
Auth bool `json:"auth"`
}{}
err = json.Unmarshal(w.Body.Bytes(), &result)
if err != nil {
httpError(t, w, "couldn't read response from auth request")
}
msg = fmt.Sprintf("got response body: %s", w.Body.String())
assert.Equal(t, false, result.Auth, msg)
grantUserPolicy(t, username, policyName, "null")
})
t.Run("BadRequest", func(t *testing.T) {
t.Run("NotJSON", func(t *testing.T) {
w = httptest.NewRecorder()
token = TestJWT{username: username}
body = []byte("not real JSON")
req = newRequest("POST", "/auth/request", bytes.NewBuffer(body))
handler.ServeHTTP(w, req)
if w.Code != http.StatusBadRequest {
httpError(t, w, "expected error")
}
})
t.Run("MissingFields", func(t *testing.T) {
w = httptest.NewRecorder()
token = TestJWT{username: username}
body = []byte(fmt.Sprintf(
`{
"user": {"token": "%s"}
}`,
token.Encode(),
))
req = newRequest("POST", "/auth/request", bytes.NewBuffer(body))
handler.ServeHTTP(w, req)
if w.Code != http.StatusBadRequest {
httpError(t, w, "expected error from request missing fields")
}
})
})
createClientBytes(t, clientBody)
t.Run("ClientForbidden", func(t *testing.T) {
w = httptest.NewRecorder()
token = TestJWT{username: username, clientID: clientID}
body = []byte(fmt.Sprintf(
`{
"user": {"token": "%s"},
"request": {
"resource": "%s",
"action": {
"service": "%s",
"method": "%s"
}
}
}`,
token.Encode(),
resourcePath,
serviceName,
methodName,
))
req = newRequest("POST", "/auth/request", bytes.NewBuffer(body))
handler.ServeHTTP(w, req)
if w.Code != http.StatusOK {
httpError(t, w, "auth request failed")
}
// request should fail
result = struct {
Auth bool `json:"auth"`
}{}
err = json.Unmarshal(w.Body.Bytes(), &result)
if err != nil {
httpError(t, w, "couldn't read response from auth request")
}
msg = fmt.Sprintf("got response body: %s", w.Body.String())
assert.Equal(t, false, result.Auth, msg)
})
grantClientPolicy(t, clientID, policyName)
t.Run("ClientBothOK", func(t *testing.T) {
w = httptest.NewRecorder()
token = TestJWT{username: username, clientID: clientID}
body = []byte(fmt.Sprintf(
`{
"user": {"token": "%s"},
"request": {
"resource": "%s",
"action": {
"service": "%s",
"method": "%s"
}
}
}`,
token.Encode(),
resourcePath,
serviceName,
methodName,
))
req = newRequest("POST", "/auth/request", bytes.NewBuffer(body))
handler.ServeHTTP(w, req)
if w.Code != http.StatusOK {
httpError(t, w, "auth request failed")
}
// request should fail
result = struct {
Auth bool `json:"auth"`
}{}
err = json.Unmarshal(w.Body.Bytes(), &result)
if err != nil {
httpError(t, w, "couldn't read response from auth request")
}
msg = fmt.Sprintf("got response body: %s", w.Body.String())
assert.Equal(t, true, result.Auth, msg)
})
t.Run("QueryUsingUserID", func(t *testing.T) {
w = httptest.NewRecorder()
body = []byte(fmt.Sprintf(
`{
"user": {"user_id": "%s"},
"request": {
"resource": "%s",
"action": {
"service": "%s",
"method": "%s"
}
}
}`,
username,
resourcePath,
serviceName,
methodName,
))
req = newRequest("POST", "/auth/request", bytes.NewBuffer(body))
handler.ServeHTTP(w, req)
if w.Code != http.StatusOK {
httpError(t, w, "auth request failed")
}
// request should fail
result = struct {
Auth bool `json:"auth"`
}{}
err = json.Unmarshal(w.Body.Bytes(), &result)
if err != nil {
httpError(t, w, "couldn't read response from auth request")
}
msg = fmt.Sprintf("got response body: %s", w.Body.String())
assert.Equal(t, true, result.Auth, msg)
})
})
deleteEverything()
t.Run("RequestMultiple", func(t *testing.T) {
setupTestPolicy(t)
createUserBytes(t, userBody)
grantUserPolicy(t, username, policyName, "null")
w := httptest.NewRecorder()
token := TestJWT{username: username}
// TODO (rudyardrichter, 2019-04-22): this works just for testing
// the `requests` thing but it would be better if it actually was
// using distinct policies
body := []byte(fmt.Sprintf(
`{
"user": {"token": "%s"},
"requests": [
{
"resource": "%s",
"action": {
"service": "%s",
"method": "%s"
}
},
{
"resource": "%s",
"action": {
"service": "%s",
"method": "%s"
}
}
]
}`,
token.Encode(),
resourcePath,
serviceName,
methodName,
resourcePath,
serviceName,
methodName,
))
req := newRequest("POST", "/auth/request", bytes.NewBuffer(body))
handler.ServeHTTP(w, req)
if w.Code != http.StatusOK {
httpError(t, w, "auth request failed")
}
// request should succeed, user has authorization
result := struct {
Auth bool `json:"auth"`
}{}
err = json.Unmarshal(w.Body.Bytes(), &result)
if err != nil {
httpError(t, w, "couldn't read response from auth request")
}
msg := fmt.Sprintf("got response body: %s", w.Body.String())
assert.Equal(t, true, result.Auth, msg)
t.Run("UsingStar", func(t *testing.T) {
createRoleBytes(
t,
[]byte(`{
"id": "roleUsingStar",
"permissions": [
{"id": "serviceStar", "action": {"service": "*", "method": "read"}}
]
}`),
)
createPolicyBytes(
t,
[]byte(fmt.Sprintf(
`{
"id": "policyUsingStar",
"resource_paths": ["%s"],
"role_ids": ["roleUsingStar"]
}`,
resourcePath,
)),
)
grantUserPolicy(t, username, "policyUsingStar", "null")
w := httptest.NewRecorder()
token := TestJWT{username: username}
body := []byte(fmt.Sprintf(
`{
"user": {"token": "%s"},
"request": {
"resource": "%s",
"action": {
"service": "shouldNotMatter",
"method": "read"
}
}
}`,
token.Encode(),
resourcePath,
))
req := newRequest("POST", "/auth/request", bytes.NewBuffer(body))
handler.ServeHTTP(w, req)
if w.Code != http.StatusOK {
httpError(t, w, "auth request failed")
}
// request should succeed, user has authorization
result := struct {
Auth bool `json:"auth"`
}{}
err = json.Unmarshal(w.Body.Bytes(), &result)
if err != nil {
httpError(t, w, "couldn't read response from auth request")
}
msg := fmt.Sprintf("got response body: %s", w.Body.String())
assert.Equal(t, true, result.Auth, msg)
})
t.Run("Unauthorized", func(t *testing.T) {
w = httptest.NewRecorder()
token = TestJWT{username: username}
body = []byte(fmt.Sprintf(
`{
"user": {"token": "%s"},
"requests": [
{
"resource": "%s",
"action": {
"service": "%s",
"method": "%s"
}
},
{
"resource": "%s",
"action": {
"service": "%s",
"method": "%s"
}
}
]
}`,
token.Encode(),
"/wrongresource", // TODO: get errors if these contain dashes
serviceName,
methodName,
resourcePath,
serviceName,
methodName,
))
req = newRequest("POST", "/auth/request", bytes.NewBuffer(body))
handler.ServeHTTP(w, req)
if w.Code != http.StatusOK {
httpError(t, w, "auth request failed")
}
// request should fail
result = struct {
Auth bool `json:"auth"`
}{}
err = json.Unmarshal(w.Body.Bytes(), &result)
if err != nil {
httpError(t, w, "couldn't read response from auth request")
}
msg = fmt.Sprintf("got response body: %s", w.Body.String())
assert.Equal(t, false, result.Auth, msg)
})
})
deleteEverything()
t.Run("Anonymous", func(t *testing.T) {
// user with a JWT also gets privileges from the anonymous group
setupTestPolicy(t)
createUserBytes(t, userBody)
grantGroupPolicy(t, arborist.AnonymousGroup, policyName)
w := httptest.NewRecorder()
token := TestJWT{username: username}
body := []byte(fmt.Sprintf(
`{
"user": {"token": "%s"},
"request": {
"resource": "%s",
"action": {
"service": "%s",
"method": "%s"
}
}
}`,
token.Encode(),
resourcePath,
serviceName,
methodName,
))
req := newRequest("POST", "/auth/request", bytes.NewBuffer(body))
handler.ServeHTTP(w, req)
if w.Code != http.StatusOK {
httpError(t, w, "auth request failed")
}
// request should succeed, user has authorization
result := struct {
Auth bool `json:"auth"`
}{}
err = json.Unmarshal(w.Body.Bytes(), &result)
if err != nil {
httpError(t, w, "couldn't read response from auth request")
}
msg := fmt.Sprintf("got response body: %s", w.Body.String())
assert.Equal(t, true, result.Auth, msg)
// request with no JWT will still work if granted policy through
// the anonymous group
w = httptest.NewRecorder()
body = []byte(fmt.Sprintf(
`{
"user": {"token": ""},
"request": {
"resource": "%s",
"action": {
"service": "%s",
"method": "%s"
}
}
}`,
resourcePath,
serviceName,
methodName,
))
req = newRequest("POST", "/auth/request", bytes.NewBuffer(body))
handler.ServeHTTP(w, req)
if w.Code != http.StatusOK {
httpError(t, w, "auth request failed")
}
// request should succeed, user has authorization
result = struct {
Auth bool `json:"auth"`
}{}
err = json.Unmarshal(w.Body.Bytes(), &result)
if err != nil {
httpError(t, w, "couldn't read response from auth request")
}
msg = fmt.Sprintf("got response body: %s", w.Body.String())
assert.Equal(t, true, result.Auth, msg)
t.Run("UsingStar", func(t *testing.T) {
createRoleBytes(
t,
[]byte(`{
"id": "roleForAnonUsingStar",
"permissions": [
{"id": "serviceStar", "action": {"service": "*", "method": "read"}}
]
}`),
)
createPolicyBytes(
t,
[]byte(fmt.Sprintf(
`{
"id": "policyForAnonUsingStar",
"resource_paths": ["%s"],
"role_ids": ["roleForAnonUsingStar"]
}`,
resourcePath,
)),
)
grantGroupPolicy(t, arborist.AnonymousGroup, "policyForAnonUsingStar")
authRequestBody := []byte(fmt.Sprintf(
`{
"user": {"token": ""},
"request": {
"resource": "%s",
"action": {
"service": "%s",
"method": "%s"
}
}
}`,
resourcePath,
serviceName,
"read",
))
checkAuthSuccess(t, authRequestBody, true)
})
t.Run("CheckingStar", func(t *testing.T) {
createRoleBytes(
t,
[]byte(`{
"id": "roleForAnonCheckingStar",
"permissions": [
{"id": "wanabeserviceStar", "action": {"service": "*", "method": "create"}}
]
}`),
)
createPolicyBytes(
t,
[]byte(fmt.Sprintf(
`{
"id": "policyForAnonCheckingStar",
"resource_paths": ["%s"],
"role_ids": ["roleForAnonCheckingStar"]
}`,
resourcePath,
)),
)
grantGroupPolicy(t, arborist.AnonymousGroup, "policyForAnonCheckingStar")
authRequestBody := []byte(fmt.Sprintf(
`{
"user": {"token": ""},
"request": {
"resource": "%s",
"action": {
"service": "%s",
"method": "%s"
}
}
}`,
resourcePath,
serviceName,
"write", // Attempt to write when only allowed to create
))
checkAuthSuccess(t, authRequestBody, false)
})
})
deleteEverything()
t.Run("LoggedIn", func(t *testing.T) {
// user with a JWT gets privileges from the logged-in group
setupTestPolicy(t)
createUserBytes(t, userBody)
grantGroupPolicy(t, arborist.LoggedInGroup, policyName)
w := httptest.NewRecorder()
token := TestJWT{username: username}
body := []byte(fmt.Sprintf(
`{
"user": {"token": "%s"},
"request": {
"resource": "%s",
"action": {
"service": "%s",
"method": "%s"
}
}
}`,
token.Encode(),
resourcePath,
serviceName,
methodName,
))
req := newRequest("POST", "/auth/request", bytes.NewBuffer(body))
handler.ServeHTTP(w, req)
if w.Code != http.StatusOK {
httpError(t, w, "auth request failed")
}
// request should succeed, user has authorization
result := struct {
Auth bool `json:"auth"`
}{}
err = json.Unmarshal(w.Body.Bytes(), &result)
if err != nil {
httpError(t, w, "couldn't read response from auth request")
}
msg := fmt.Sprintf("got response body: %s", w.Body.String())
assert.Equal(t, true, result.Auth, msg)
})
deleteEverything()
t.Run("Resources", func(t *testing.T) {
createUserBytes(t, userBody)
t.Run("Empty", func(t *testing.T) {
w := httptest.NewRecorder()
token := TestJWT{username: username}
body := []byte(fmt.Sprintf(`{"user": {"token": "%s"}}`, token.Encode()))
req := newRequest("POST", "/auth/resources", bytes.NewBuffer(body))
handler.ServeHTTP(w, req)
if w.Code != http.StatusOK {
httpError(t, w, "auth resources request failed")
}
result := struct {
Resources []string `json:"resources"`
}{}
err = json.Unmarshal(w.Body.Bytes(), &result)
if err != nil {
httpError(t, w, "couldn't read response from auth resources")
}
msg := fmt.Sprintf("got response body: %s", w.Body.String())
assert.Equal(t, []string{}, result.Resources, msg)
})
createResourceBytes(t, resourceBody)
createRoleBytes(t, roleBody)
createPolicyBytes(t, policyBody)
grantUserPolicy(t, username, policyName, "null")
anonymousPolicies, anonymousResourcePaths, _ := setupAnonymousPolicies(t)
_, loggedInResourcePaths, _ := setupLoggedInPolicies(t)
t.Run("Granted", func(t *testing.T) {
token := TestJWT{username: username}
body := []byte(fmt.Sprintf(`{"user": {"token": "%s"}}`, token.Encode()))
t.Run("GET", func(t *testing.T) {
w := httptest.NewRecorder()
req := newRequest("GET", "/auth/resources", nil)
req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", token.Encode()))
handler.ServeHTTP(w, req)
if w.Code != http.StatusOK {
httpError(t, w, "auth resources request failed")
}
// expect to receive the resources from the policy granted to the user,
// as well as the resources from the policies granted to the
// anonymous and loggedin groups.
result := struct {
Resources []string `json:"resources"`
}{}
err = json.Unmarshal(w.Body.Bytes(), &result)
if err != nil {
httpError(t, w, "couldn't read response from auth resources")
}
expectedResources := append(anonymousResourcePaths, loggedInResourcePaths...)
expectedResources = append(expectedResources, resourcePath)
msg := fmt.Sprintf("got resources: %v \t Wanted: %v", result.Resources, expectedResources)
assert.ElementsMatch(t, expectedResources, result.Resources, msg)
// check the response returning tags is also correct:
// expect to receive tags corresponding to resources from the
// policy granted to the user and from the policies granted
// to the Anonymous and LoggedIn groups.
w = httptest.NewRecorder()
req = newRequest("GET", "/auth/resources?tags", nil)
req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", token.Encode()))
handler.ServeHTTP(w, req)
if w.Code != http.StatusOK {
httpError(t, w, "auth resources request failed")
}
err = json.Unmarshal(w.Body.Bytes(), &result)
if err != nil {
httpError(t, w, "couldn't read response from auth resources")
}
msg = fmt.Sprintf("got response body: %s", w.Body.String())
expectedTags := make([]string, 0)
for _, resourcePath := range expectedResources {
resource := getResourceWithPath(t, resourcePath)
expectedTags = append(expectedTags, resource.Tag)
}
// result.Resources actually contains tags, not resources, when
// using GET `/auth/resources?tags`.
for _, tag := range result.Resources {
// assert there is some resource in expectedResources
// which has this tag.
assert.Containsf(t, expectedTags, tag, "tag %s not found in%v", tag, expectedTags)
}
})
t.Run("GET_userDoesNotExist", func(t *testing.T) {
w := httptest.NewRecorder()
req := newRequest("GET", "/auth/resources", nil)
badUsername := "hulkhogan12"
token := TestJWT{username: badUsername}
req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", token.Encode()))
handler.ServeHTTP(w, req)
if w.Code != http.StatusOK {
httpError(t, w, "auth resources request failed")
}
// expect to receive only the resources in policies granted
// to the Anonymous and LoggedIn groups.
result := struct {
Resources []string `json:"resources"`
}{}
err = json.Unmarshal(w.Body.Bytes(), &result)
if err != nil {
httpError(t, w, "couldn't read response from auth resources")
}
expectedResources := append(anonymousResourcePaths, loggedInResourcePaths...)
msg := fmt.Sprintf("got resources: %v \t Wanted: %v", result.Resources, expectedResources)
assert.ElementsMatch(t, expectedResources, result.Resources, msg)
// check the response returning tags is also correct:
// expect to receive only tags corresponding to resources
// in policies granted to the Anonymous and LoggedIn groups.
w = httptest.NewRecorder()
req = newRequest("GET", "/auth/resources?tags", nil)
req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", token.Encode()))
handler.ServeHTTP(w, req)
if w.Code != http.StatusOK {
httpError(t, w, "auth resources request failed")
}
err = json.Unmarshal(w.Body.Bytes(), &result)
if err != nil {
httpError(t, w, "couldn't read response from auth resources")
}
msg = fmt.Sprintf("got response body: %s", w.Body.String())
expectedTags := make([]string, 0)
for _, resourcePath := range expectedResources {
resource := getResourceWithPath(t, resourcePath)
expectedTags = append(expectedTags, resource.Tag)
}
// result.Resources actually contains tags, not resources, when
// using GET `/auth/resources?tags`.
for _, tag := range result.Resources {
// assert there is some resource in expectedResources
// which has this tag.
assert.Containsf(t, expectedTags, tag, "tag %s not found in expectedTags %v", tag, expectedTags)
}
})
t.Run("GET_noUsernameProvided", func(t *testing.T) {
w := httptest.NewRecorder()
req := newRequest("GET", "/auth/resources", nil)
handler.ServeHTTP(w, req)
if w.Code != http.StatusOK {
httpError(t, w, "auth resources request failed")
}
result := struct {
Resources []string `json:"resources"`
}{}
err = json.Unmarshal(w.Body.Bytes(), &result)
if err != nil {
httpError(t, w, "couldn't read response from auth resources")
}
// expect to receive only resources from policies granted to
// the Anonymous group.
msg := fmt.Sprintf("got resources: %v \t Wanted: %v", result.Resources, anonymousResourcePaths)
assert.ElementsMatch(t, anonymousResourcePaths, result.Resources, msg)
// check the response returning tags is also correct:
// expect to receive tags corresponding to resources from
// the policies granted to the Anonymous group.
w = httptest.NewRecorder()
req = newRequest("GET", "/auth/resources?tags", nil)
handler.ServeHTTP(w, req)
if w.Code != http.StatusOK {
httpError(t, w, "auth resources request failed")
}
err = json.Unmarshal(w.Body.Bytes(), &result)
if err != nil {
httpError(t, w, "couldn't read response from auth resources")
}
msg = fmt.Sprintf("got response body: %s", w.Body.String())
anonymousTags := make([]string, 0)
for _, resourcePath := range anonymousResourcePaths {
resource := getResourceWithPath(t, resourcePath)
anonymousTags = append(anonymousTags, resource.Tag)
}
// result.Resources actually contains tags, not resources, when
// using GET `/auth/resources?tags`.
for _, tag := range result.Resources {
// assert there is some resource in anonymousResourcePaths
// which has this tag.
assert.Containsf(t, anonymousTags, tag, "tag %s not found in anonymousTags %v", tag, anonymousTags)
}
})
t.Run("GET_expiredPolicy", func(t *testing.T) {
expiredTimestamp := time.Now().Add(time.Duration(-1) * time.Minute).Format(time.RFC3339)
grantUserPolicy(t, username, policyName, expiredTimestamp)
w := httptest.NewRecorder()
req := newRequest("GET", "/auth/resources", nil)
req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", token.Encode()))
handler.ServeHTTP(w, req)
if w.Code != http.StatusOK {
httpError(t, w, "auth resources request failed")
}
result := struct {
Resources []string `json:"resources"`
}{}
err = json.Unmarshal(w.Body.Bytes(), &result)
if err != nil {
httpError(t, w, "couldn't read response from auth mapping")
}
msg := fmt.Sprintf("result contains resource %s corresponding to expired policy %s", resourcePath, policyName)
assert.NotContains(t, result.Resources, resourcePath, msg)
grantUserPolicy(t, username, policyName, "null")
})
t.Run("POST", func(t *testing.T) {
w := httptest.NewRecorder()
req := newRequest("POST", "/auth/resources", bytes.NewBuffer(body))
handler.ServeHTTP(w, req)
if w.Code != http.StatusOK {
httpError(t, w, "auth resources request failed")
}
// expect to receive the resources from the policy granted to the user,
// as well as the resources from the policies granted to the
// anonymous and loggedin groups.
result := struct {
Resources []string `json:"resources"`
}{}
err = json.Unmarshal(w.Body.Bytes(), &result)
if err != nil {
httpError(t, w, "couldn't read response from auth resources")
}
expectedResources := append(anonymousResourcePaths, loggedInResourcePaths...)
expectedResources = append(expectedResources, resourcePath)
msg := fmt.Sprintf("got resources: %v \t Wanted: %v", result.Resources, expectedResources)
assert.ElementsMatch(t, expectedResources, result.Resources, msg)
// check the response returning tags is also correct:
// expect to receive tags corresponding to resources from the
// policy granted to the user and from the policies granted
// to the Anonymous and LoggedIn groups.
w = httptest.NewRecorder()
req = newRequest("POST", "/auth/resources?tags", bytes.NewBuffer(body))
handler.ServeHTTP(w, req)
if w.Code != http.StatusOK {
httpError(t, w, "auth resources request failed")
}
err = json.Unmarshal(w.Body.Bytes(), &result)
if err != nil {
httpError(t, w, "couldn't read response from auth resources")
}
msg = fmt.Sprintf("got response body: %s", w.Body.String())
expectedTags := make([]string, 0)
for _, resourcePath := range expectedResources {
resource := getResourceWithPath(t, resourcePath)
expectedTags = append(expectedTags, resource.Tag)
}
// result.Resources actually contains tags, not resources, when
// using GET `/auth/resources?tags`.
for _, tag := range result.Resources {
// assert there is some resource in expectedResources
// which has this tag.
assert.Containsf(t, expectedTags, tag, "tag %s not found in %v", tag, expectedTags)
}
})
t.Run("POST_userDoesNotExist", func(t *testing.T) {
w := httptest.NewRecorder()
badUsername := "hulkhogan12"
token := TestJWT{username: badUsername}
body := []byte(fmt.Sprintf(`{"user": {"token": "%s"}}`, token.Encode()))
req := newRequest("POST", "/auth/resources", bytes.NewBuffer(body))
handler.ServeHTTP(w, req)
if w.Code != http.StatusOK {
httpError(t, w, "auth resources request failed")
}
// expect to receive the resources from the policies granted to the
// anonymous and loggedin groups.
result := struct {
Resources []string `json:"resources"`
}{}
err = json.Unmarshal(w.Body.Bytes(), &result)
if err != nil {
httpError(t, w, "couldn't read response from auth resources")
}
expectedResources := append(anonymousResourcePaths, loggedInResourcePaths...)
msg := fmt.Sprintf("got resources: %v \t Wanted: %v", result.Resources, expectedResources)
assert.ElementsMatch(t, expectedResources, result.Resources, msg)
// check the response returning tags is also correct:
// expect to receive tags corresponding to resources from the
// policies granted to the Anonymous and LoggedIn groups.
w = httptest.NewRecorder()
req = newRequest("POST", "/auth/resources?tags", bytes.NewBuffer(body))
handler.ServeHTTP(w, req)
if w.Code != http.StatusOK {
httpError(t, w, "auth resources request failed")
}
err = json.Unmarshal(w.Body.Bytes(), &result)
if err != nil {
httpError(t, w, "couldn't read response from auth resources")
}
msg = fmt.Sprintf("got response body: %s", w.Body.String())
expectedTags := make([]string, 0)
for _, resourcePath := range expectedResources {
resource := getResourceWithPath(t, resourcePath)
expectedTags = append(expectedTags, resource.Tag)
}
// result.Resources actually contains tags, not resources, when
// using GET `/auth/resources?tags`.
for _, tag := range result.Resources {
// assert there is some resource in expectedResources
// which has this tag.
assert.Containsf(t, expectedTags, tag, "tag %s not found in %v", tag, expectedTags)
}
})
t.Run("Policies", func(t *testing.T) {
w := httptest.NewRecorder()
body := []byte(fmt.Sprintf(
`{"user": {"token": "%s", "policies": ["%s"]}}`,
token.Encode(),
policyName,
))
req := newRequest("POST", "/auth/resources", bytes.NewBuffer(body))
handler.ServeHTTP(w, req)
if w.Code != http.StatusOK {
httpError(t, w, "auth resources request failed")
}
// in this case, since the user has zero access yet, should be empty
result := struct {
Resources []string `json:"resources"`
}{}
err = json.Unmarshal(w.Body.Bytes(), &result)
if err != nil {
httpError(t, w, "couldn't read response from auth resources")
}
msg := fmt.Sprintf("got response body: %s", w.Body.String())
assert.Equal(t, []string{resourcePath}, result.Resources, msg)
})
t.Run("GET_noDuplicatedMappings", func(t *testing.T) {
// Setup: Add the policies in the `anonymous` group to the user.
// Adding the policies of the `anonymous` group to the user also adds
// the resources of the `anonymous` group to the user.
for _, policy := range anonymousPolicies {
grantUserPolicy(t, username, policy.Name, "null")
}
// Expect these shared mappings to not be duplicated in AuthMapping response.
w := httptest.NewRecorder()
req := newRequest("GET", "/auth/resources", nil)
req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", token.Encode()))
handler.ServeHTTP(w, req)
if w.Code != http.StatusOK {
httpError(t, w, "auth resources request failed")
}
// expect to receive the resources from the policy granted to the user,
// as well as the resources from the policies granted to the
// anonymous and loggedin groups.
result := struct {
Resources []string `json:"resources"`
}{}
err = json.Unmarshal(w.Body.Bytes(), &result)
if err != nil {
httpError(t, w, "couldn't read response from auth resources")
}
// Assert that there are no duplicates in the slice.
// (ElementsMatch compares slices ignoring the order of the elements,
// so it will fail if one slice contains duplicate values).
expectedResources := append(anonymousResourcePaths, loggedInResourcePaths...)
expectedResources = append(expectedResources, resourcePath)
msg := fmt.Sprintf("got resources: %v \t Expected resources: %v", result.Resources, expectedResources)
assert.ElementsMatch(t, expectedResources, result.Resources, msg)
// Teardown: remove the policies we just added.
for _, policy := range anonymousPolicies {
revokeUserPolicy(t, username, policy.Name)
}
})
})
t.Run("BadRequest", func(t *testing.T) {
t.Run("NotJSON", func(t *testing.T) {
w := httptest.NewRecorder()
body := []byte("not real JSON")
req := newRequest("POST", "/auth/resources", bytes.NewBuffer(body))
handler.ServeHTTP(w, req)
if w.Code != http.StatusBadRequest {
httpError(t, w, "expected error")
}
})
/*
t.Run("MissingFields", func(t *testing.T) {
w := httptest.NewRecorder()
body := []byte(`{}`)
req := newRequest("POST", "/auth/resources", bytes.NewBuffer(body))
handler.ServeHTTP(w, req)
fmt.Println(w.Body.String())
if w.Code != http.StatusBadRequest {
httpError(t, w, "expected error from request missing fields")
}
})
*/
})
policyName := "client_policy"
clientResourcePath := "/client_resource"
resourceBody := []byte(fmt.Sprintf(`{"path": "%s"}`, clientResourcePath))
policyBody := []byte(fmt.Sprintf(
`{
"id": "%s",
"resource_paths": ["%s"],
"role_ids": ["%s"]
}`,
policyName,
clientResourcePath,
roleName,
))
createClientBytes(t, clientBody)
createResourceBytes(t, resourceBody)
createPolicyBytes(t, policyBody)
grantClientPolicy(t, clientID, policyName)
grantUserPolicy(t, username, policyName, "null")
t.Run("Client", func(t *testing.T) {
w := httptest.NewRecorder()
token := TestJWT{username: username, clientID: clientID}
body := []byte(fmt.Sprintf(`{"user": {"token": "%s"}}`, token.Encode()))
req := newRequest("POST", "/auth/resources", bytes.NewBuffer(body))
handler.ServeHTTP(w, req)
if w.Code != http.StatusOK {
httpError(t, w, "auth resources request failed")
}
// in this case, since the user has zero access yet, should be empty
result := struct {
Resources []string `json:"resources"`
}{}
err = json.Unmarshal(w.Body.Bytes(), &result)
if err != nil {
httpError(t, w, "couldn't read response from auth resources")
}
msg := fmt.Sprintf("got response body: %s", w.Body.String())
assert.Contains(t, result.Resources, clientResourcePath, msg)
})
t.Run("Both", func(t *testing.T) {
w := httptest.NewRecorder()
token := TestJWT{username: username, clientID: clientID}
body := []byte(fmt.Sprintf(`{"user": {"token": "%s"}}`, token.Encode()))
req := newRequest("POST", "/auth/resources", bytes.NewBuffer(body))
handler.ServeHTTP(w, req)
if w.Code != http.StatusOK {
httpError(t, w, "auth resources request failed")
}
result := struct {
Resources []string `json:"resources"`
}{}
err = json.Unmarshal(w.Body.Bytes(), &result)
if err != nil {
httpError(t, w, "couldn't read response from auth resources")
}
msg := fmt.Sprintf("got response body: %s", w.Body.String())
assert.Contains(t, result.Resources, clientResourcePath, msg)
assert.Contains(t, result.Resources, resourcePath, msg)
})
groupName := "test_resources_group"
policyName = "group_policy"
groupResourcePath := "/group_resource"
resourceBody = []byte(fmt.Sprintf(`{"path": "%s"}`, groupResourcePath))
policyBody = []byte(fmt.Sprintf(
`{
"id": "%s",
"resource_paths": ["%s"],
"role_ids": ["%s"]
}`,
policyName,
groupResourcePath,
roleName,
))
createResourceBytes(t, resourceBody)
createPolicyBytes(t, policyBody)
groupBody := []byte(fmt.Sprintf(
`{
"name": "%s",
"policies": ["%s"],
"users": []
}`,
groupName,
policyName,
))
createGroupBytes(t, groupBody)
addUserToGroup(t, username, groupName)
t.Run("Group", func(t *testing.T) {
w := httptest.NewRecorder()
token := TestJWT{username: username, clientID: clientID}
body := []byte(fmt.Sprintf(`{"user": {"token": "%s"}}`, token.Encode()))
req := newRequest("POST", "/auth/resources", bytes.NewBuffer(body))
handler.ServeHTTP(w, req)
if w.Code != http.StatusOK {
httpError(t, w, "auth resources request failed")
}
result := struct {
Resources []string `json:"resources"`
}{}
err = json.Unmarshal(w.Body.Bytes(), &result)
if err != nil {
httpError(t, w, "couldn't read response from auth resources")
}
msg := fmt.Sprintf("got response body: %s", w.Body.String())
assert.Contains(t, result.Resources, groupResourcePath, msg)
assert.Contains(t, result.Resources, resourcePath, msg)
})
})
deleteEverything()
t.Run("Proxy", func(t *testing.T) {
createResourceBytes(t, resourceBody)
createRoleBytes(t, roleBody)
createPolicyBytes(t, policyBody)
createUserBytes(t, userBody)
grantUserPolicy(t, username, policyName, "null")
token := TestJWT{username: username}
t.Run("Authorized", func(t *testing.T) {
w := httptest.NewRecorder()
authUrl := fmt.Sprintf(
"/auth/proxy?resource=%s&service=%s&method=%s",
url.QueryEscape(resourcePath),
url.QueryEscape(serviceName),
url.QueryEscape(methodName),
)
req := newRequest("GET", authUrl, nil)
req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", token.Encode()))
handler.ServeHTTP(w, req)
if w.Code != http.StatusOK {
httpError(t, w, "auth proxy request failed")
}
})
t.Run("BadRequest", func(t *testing.T) {
w := httptest.NewRecorder()
authUrl := fmt.Sprintf(
"/auth/proxy?resource=%s&service=%s&method=%s",
url.QueryEscape("not-even-a-resource-path"),
url.QueryEscape(serviceName),
url.QueryEscape(methodName),
)
req := newRequest("GET", authUrl, nil)
req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", token.Encode()))
handler.ServeHTTP(w, req)
if w.Code != http.StatusForbidden {
httpError(t, w, "auth proxy request succeeded when it should not have")
}
})
t.Run("Unauthorized", func(t *testing.T) {
t.Run("BadHeader", func(t *testing.T) {
w := httptest.NewRecorder()
authUrl := fmt.Sprintf(
"/auth/proxy?resource=%s&service=%s&method=%s",
url.QueryEscape(resourcePath),
url.QueryEscape(serviceName),
url.QueryEscape(methodName),
)
req := newRequest("GET", authUrl, nil)
req.Header.Add("Authorization", "Bearer garbage")
handler.ServeHTTP(w, req)
if w.Code != http.StatusUnauthorized {
httpError(t, w, "auth proxy request succeeded when it should not have")
}
})
t.Run("TokenExpired", func(t *testing.T) {
token := TestJWT{username: username, exp: 1}
w := httptest.NewRecorder()
authUrl := fmt.Sprintf(
"/auth/proxy?resource=%s&service=%s&method=%s",
url.QueryEscape(resourcePath),
url.QueryEscape(serviceName),
url.QueryEscape(methodName),
)
req := newRequest("GET", authUrl, nil)
req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", token.Encode()))
handler.ServeHTTP(w, req)
if w.Code != http.StatusUnauthorized {
httpError(t, w, "auth proxy request succeeded when it should not have")
}
})
t.Run("ResourceNotExist", func(t *testing.T) {
w := httptest.NewRecorder()
authUrl := fmt.Sprintf(
"/auth/proxy?resource=%s&service=%s&method=%s",
url.QueryEscape("/not/authorized"),
url.QueryEscape(serviceName),
url.QueryEscape(methodName),
)
req := newRequest("GET", authUrl, nil)
req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", token.Encode()))
handler.ServeHTTP(w, req)
if w.Code != http.StatusForbidden {
httpError(t, w, "auth proxy request succeeded when it should not have")
}
})
t.Run("WrongMethod", func(t *testing.T) {
w := httptest.NewRecorder()
authUrl := fmt.Sprintf(
"/auth/proxy?resource=%s&service=%s&method=%s",
url.QueryEscape(resourcePath),
url.QueryEscape(serviceName),
url.QueryEscape("bogus_method"),
)
req := newRequest("GET", authUrl, nil)
req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", token.Encode()))
handler.ServeHTTP(w, req)
if w.Code != http.StatusForbidden {
httpError(t, w, "auth proxy request succeeded when it should not have")
}
})
t.Run("WrongService", func(t *testing.T) {
w := httptest.NewRecorder()
authUrl := fmt.Sprintf(
"/auth/proxy?resource=%s&service=%s&method=%s",
url.QueryEscape(resourcePath),
url.QueryEscape("bogus_service"),
url.QueryEscape(methodName),
)
req := newRequest("GET", authUrl, nil)
req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", token.Encode()))
handler.ServeHTTP(w, req)
if w.Code != http.StatusForbidden {
httpError(t, w, "auth proxy request succeeded when it should not have")
}
})
})
t.Run("MissingAuthHeader", func(t *testing.T) {
w := httptest.NewRecorder()
// request is good
authUrl := fmt.Sprintf(
"/auth/proxy?resource=%s&service=%s&method=%s",
url.QueryEscape(resourcePath),
url.QueryEscape(serviceName),
url.QueryEscape(methodName),
)
req := newRequest("GET", authUrl, nil)
// but no header added to the request!
handler.ServeHTTP(w, req)
if w.Code != http.StatusUnauthorized {
httpError(t, w, "auth proxy request without auth header didn't fail as expected")
}
})
t.Run("MissingMethod", func(t *testing.T) {
w := httptest.NewRecorder()
// omit method
authUrl := fmt.Sprintf(
"/auth/proxy?resource=%s&service=%s",
url.QueryEscape(resourcePath),
url.QueryEscape(serviceName),
)
req := newRequest("GET", authUrl, nil)
req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", token.Encode()))
handler.ServeHTTP(w, req)
if w.Code != http.StatusBadRequest {
httpError(t, w, "auth proxy request did not error as expected")
}
})
t.Run("MissingService", func(t *testing.T) {
w := httptest.NewRecorder()
// omit service
authUrl := fmt.Sprintf(
"/auth/proxy?resource=%s&method=%s",
url.QueryEscape(resourcePath),
url.QueryEscape(methodName),
)
req := newRequest("GET", authUrl, nil)
req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", token.Encode()))
handler.ServeHTTP(w, req)
if w.Code != http.StatusBadRequest {
httpError(t, w, "auth proxy request did not error as expected")
}
})
t.Run("MissingResource", func(t *testing.T) {
w := httptest.NewRecorder()
// omit resource
authUrl := fmt.Sprintf(
"/auth/proxy?&method=%sservice=%s",
url.QueryEscape(methodName),
url.QueryEscape(serviceName),
)
req := newRequest("GET", authUrl, nil)
req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", token.Encode()))
handler.ServeHTTP(w, req)
if w.Code != http.StatusBadRequest {
httpError(t, w, "auth proxy request did not error as expected")
}
})
t.Run("Client", func(t *testing.T) {
createClientBytes(t, clientBody)
t.Run("Forbidden", func(t *testing.T) {
w := httptest.NewRecorder()
authUrl := fmt.Sprintf(
"/auth/proxy?resource=%s&service=%s&method=%s",
url.QueryEscape(resourcePath),
url.QueryEscape(serviceName),
url.QueryEscape(methodName),
)
req := newRequest("GET", authUrl, nil)
token := TestJWT{username: username, clientID: clientID}
req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", token.Encode()))
handler.ServeHTTP(w, req)
if w.Code != http.StatusForbidden {
httpError(t, w, "auth proxy request succeeded when it should not have")
}
})
grantClientPolicy(t, clientID, policyName)
t.Run("Granted", func(t *testing.T) {
w := httptest.NewRecorder()
authUrl := fmt.Sprintf(
"/auth/proxy?resource=%s&service=%s&method=%s",
url.QueryEscape(resourcePath),
url.QueryEscape(serviceName),
url.QueryEscape(methodName),
)
req := newRequest("GET", authUrl, nil)
token := TestJWT{username: username, clientID: clientID}
req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", token.Encode()))
handler.ServeHTTP(w, req)
if w.Code != http.StatusOK {
httpError(t, w, "auth proxy request failed")
}
})
})
})
tearDown(t)
})
deleteEverything()
}
| [
"\"ARBORIST_TEST_DB\""
]
| []
| [
"ARBORIST_TEST_DB"
]
| [] | ["ARBORIST_TEST_DB"] | go | 1 | 0 | |
app/sql.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import funct
mysql_enable = funct.get_config_var('mysql', 'enable')
if mysql_enable == '1':
import mysql.connector as sqltool
else:
db = "/var/www/haproxy-wi/app/haproxy-wi.db"
import sqlite3 as sqltool
def get_cur():
try:
if mysql_enable == '0':
con = sqltool.connect(db, isolation_level=None)
else:
mysql_user = funct.get_config_var('mysql', 'mysql_user')
mysql_password = funct.get_config_var('mysql', 'mysql_password')
mysql_db = funct.get_config_var('mysql', 'mysql_db')
mysql_host = funct.get_config_var('mysql', 'mysql_host')
mysql_port = funct.get_config_var('mysql', 'mysql_port')
con = sqltool.connect(user=mysql_user, password=mysql_password,
host=mysql_host, port=mysql_port,
database=mysql_db)
cur = con.cursor()
except sqltool.Error as e:
funct.logging('DB ', ' '+e, haproxywi=1, login=1)
else:
return con, cur
def add_user(user, email, password, role, group, activeuser):
con, cur = get_cur()
if password != 'aduser':
sql = """INSERT INTO user (username, email, password, role, groups, activeuser) VALUES ('%s', '%s', '%s', '%s', '%s', '%s')""" % (user, email, funct.get_hash(password), role, group, activeuser)
else:
sql = """INSERT INTO user (username, email, role, groups, ldap_user, activeuser) VALUES ('%s', '%s', '%s', '%s', '1', '%s')""" % (user, email, role, group, activeuser)
try:
cur.execute(sql)
con.commit()
except sqltool.Error as e:
funct.out_error(e)
con.rollback()
return False
else:
return True
cur.close()
con.close()
def update_user(user, email, role, group, id, activeuser):
con, cur = get_cur()
sql = """update user set username = '%s',
email = '%s',
role = '%s',
groups = '%s',
activeuser = '%s'
where id = '%s'""" % (user, email, role, group, activeuser, id)
try:
cur.execute(sql)
con.commit()
except sqltool.Error as e:
funct.out_error(e)
con.rollback()
return False
else:
return True
cur.close()
con.close()
def update_user_password(password, id):
con, cur = get_cur()
sql = """update user set password = '%s'
where id = '%s'""" % (funct.get_hash(password), id)
try:
cur.execute(sql)
con.commit()
except sqltool.Error as e:
funct.out_error(e)
con.rollback()
return False
else:
return True
cur.close()
con.close()
def delete_user(id):
con, cur = get_cur()
sql = """delete from user where id = '%s'""" % (id)
try:
cur.execute(sql)
con.commit()
except sqltool.Error as e:
funct.out_error(e)
con.rollback()
return False
else:
return True
cur.close()
con.close()
def add_group(name, description):
con, cur = get_cur()
sql = """INSERT INTO groups (name, description) VALUES ('%s', '%s')""" % (name, description)
try:
cur.execute(sql)
con.commit()
except sqltool.Error as e:
funct.out_error(e)
con.rollback()
return False
else:
return True
cur.close()
con.close()
def delete_group(id):
con, cur = get_cur()
sql = """ delete from groups where id = '%s'""" % (id)
try:
cur.execute(sql)
con.commit()
except sqltool.Error as e:
funct.out_error(e)
con.rollback()
else:
return True
cur.close()
con.close()
def update_group(name, descript, id):
con, cur = get_cur()
sql = """ update groups set
name = '%s',
description = '%s'
where id = '%s';
""" % (name, descript, id)
try:
cur.execute(sql)
con.commit()
except sqltool.Error as e:
funct.out_error(e)
con.rollback()
return False
else:
return True
cur.close()
con.close()
def add_server(hostname, ip, group, typeip, enable, master, cred, port, desc, haproxy, nginx):
con, cur = get_cur()
sql = """ INSERT INTO servers (hostname, ip, groups, type_ip, enable, master, cred, port, `desc`, haproxy, nginx)
VALUES ('%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s')
""" % (hostname, ip, group, typeip, enable, master, cred, port, desc, haproxy, nginx)
try:
cur.execute(sql)
con.commit()
return True
except sqltool.Error as e:
funct.out_error(e)
con.rollback()
return False
cur.close()
con.close()
def delete_server(id):
con, cur = get_cur()
sql = """ delete from servers where id = '%s'""" % (id)
try:
cur.execute(sql)
con.commit()
except sqltool.Error as e:
funct.out_error(e)
con.rollback()
else:
return True
cur.close()
con.close()
def update_hapwi_server(id, alert, metrics, active):
con, cur = get_cur()
sql = """ update servers set
alert = '%s',
metrics = '%s',
active = '%s'
where id = '%s'""" % (alert, metrics, active, id)
try:
cur.execute(sql)
con.commit()
except sqltool.Error as e:
funct.out_error(e)
con.rollback()
cur.close()
con.close()
def update_server(hostname, group, typeip, enable, master, id, cred, port, desc, haproxy, nginx):
con, cur = get_cur()
sql = """ update servers set
hostname = '%s',
groups = '%s',
type_ip = '%s',
enable = '%s',
master = '%s',
cred = '%s',
port = '%s',
`desc` = '%s',
haproxy = '%s',
nginx = '%s'
where id = '%s'""" % (hostname, group, typeip, enable, master, cred, port, desc, haproxy, nginx, id)
try:
cur.execute(sql)
con.commit()
except sqltool.Error as e:
funct.out_error(e)
con.rollback()
cur.close()
con.close()
def update_server_master(master, slave):
con, cur = get_cur()
sql = """ select id from servers where ip = '%s' """ % master
try:
cur.execute(sql)
except sqltool.Error as e:
funct.out_error(e)
for id in cur.fetchall():
sql = """ update servers set master = '%s' where ip = '%s' """ % (id[0], slave)
try:
cur.execute(sql)
con.commit()
except sqltool.Error as e:
funct.out_error(e)
con.rollback()
cur.close()
con.close()
def select_users(**kwargs):
con, cur = get_cur()
sql = """select * from user ORDER BY id"""
if kwargs.get("user") is not None:
sql = """select * from user where username='%s' """ % kwargs.get("user")
if kwargs.get("id") is not None:
sql = """select * from user where id='%s' """ % kwargs.get("id")
try:
cur.execute(sql)
except sqltool.Error as e:
funct.out_error(e)
else:
return cur.fetchall()
cur.close()
con.close()
def select_groups(**kwargs):
con, cur = get_cur()
sql = """select * from groups ORDER BY id"""
if kwargs.get("group") is not None:
sql = """select * from groups where name='%s' """ % kwargs.get("group")
if kwargs.get("id") is not None:
sql = """select * from groups where id='%s' """ % kwargs.get("id")
try:
cur.execute(sql)
except sqltool.Error as e:
funct.out_error(e)
else:
return cur.fetchall()
cur.close()
con.close()
def select_user_name_group(id):
con, cur = get_cur()
sql = """select name from groups where id='%s' """ % id
try:
cur.execute(sql)
except sqltool.Error as e:
funct.out_error(e)
else:
for group in cur.fetchone():
return group
cur.close()
con.close()
def select_server_by_name(name):
con, cur = get_cur()
sql = """select ip from servers where hostname='%s' """ % name
try:
cur.execute(sql)
except sqltool.Error as e:
funct.out_error(e)
else:
for name in cur.fetchone():
return name
cur.close()
con.close()
def select_servers(**kwargs):
con, cur = get_cur()
sql = """select * from servers where enable = '1' ORDER BY groups """
if kwargs.get("server") is not None:
sql = """select * from servers where ip='%s' """ % kwargs.get("server")
if kwargs.get("full") is not None:
sql = """select * from servers ORDER BY hostname """
if kwargs.get("get_master_servers") is not None:
sql = """select id,hostname from servers where master = 0 and type_ip = 0 and enable = 1 ORDER BY groups """
if kwargs.get("get_master_servers") is not None and kwargs.get('uuid') is not None:
sql = """ select servers.id, servers.hostname from servers
left join user as user on servers.groups = user.groups
left join uuid as uuid on user.id = uuid.user_id
where uuid.uuid = '%s' and servers.master = 0 and servers.type_ip = 0 and servers.enable = 1 ORDER BY servers.groups
""" % kwargs.get('uuid')
if kwargs.get("id"):
sql = """select * from servers where id='%s' """ % kwargs.get("id")
if kwargs.get("hostname"):
sql = """select * from servers where hostname='%s' """ % kwargs.get("hostname")
if kwargs.get("id_hostname"):
sql = """select * from servers where hostname='%s' or id = '%s' or ip = '%s'""" % (kwargs.get("id_hostname"), kwargs.get("id_hostname"), kwargs.get("id_hostname"))
if kwargs.get("server") and kwargs.get("keep_alive"):
sql = """select active from servers where ip='%s' """ % kwargs.get("server")
try:
cur.execute(sql)
except sqltool.Error as e:
funct.out_error(e)
else:
return cur.fetchall()
cur.close()
con.close()
def write_user_uuid(login, user_uuid):
con, cur = get_cur()
session_ttl = get_setting('session_ttl')
session_ttl = int(session_ttl)
sql = """ select id from user where username = '%s' """ % login
try:
cur.execute(sql)
except sqltool.Error as e:
funct.out_error(e)
for id in cur.fetchall():
if mysql_enable == '1':
sql = """ insert into uuid (user_id, uuid, exp) values('%s', '%s', now()+ INTERVAL '%s' day) """ % (id[0], user_uuid, session_ttl)
else:
sql = """ insert into uuid (user_id, uuid, exp) values('%s', '%s', datetime('now', '+%s days')) """ % (id[0], user_uuid, session_ttl)
try:
cur.execute(sql)
con.commit()
except sqltool.Error as e:
funct.out_error(e)
con.rollback()
cur.close()
con.close()
def write_user_token(login, user_token):
con, cur = get_cur()
token_ttl = get_setting('token_ttl')
sql = """ select id from user where username = '%s' """ % login
try:
cur.execute(sql)
except sqltool.Error as e:
funct.out_error(e)
for id in cur.fetchall():
if mysql_enable == '1':
sql = """ insert into token (user_id, token, exp) values('%s', '%s', now()+ INTERVAL %s day) """ % (id[0], user_token, token_ttl)
else:
sql = """ insert into token (user_id, token, exp) values('%s', '%s', datetime('now', '+%s days')) """ % (id[0], user_token, token_ttl)
try:
cur.execute(sql)
con.commit()
except sqltool.Error as e:
funct.out_error(e)
con.rollback()
cur.close()
con.close()
def get_token(uuid):
con, cur = get_cur()
sql = """ select token.token from token left join uuid as uuid on uuid.user_id = token.user_id where uuid.uuid = '%s' """ % uuid
try:
cur.execute(sql)
except sqltool.Error as e:
funct.out_error(e)
else:
for token in cur.fetchall():
return token[0]
cur.close()
con.close()
def delete_uuid(uuid):
con, cur = get_cur()
sql = """ delete from uuid where uuid = '%s' """ % uuid
try:
cur.execute(sql)
con.commit()
except sqltool.Error as e:
pass
cur.close()
con.close()
def delete_old_uuid():
con, cur = get_cur()
if mysql_enable == '1':
sql = """ delete from uuid where exp < now() or exp is NULL """
sql1 = """ delete from token where exp < now() or exp is NULL """
else:
sql = """ delete from uuid where exp < datetime('now') or exp is NULL"""
sql1 = """ delete from token where exp < datetime('now') or exp is NULL"""
try:
cur.execute(sql)
cur.execute(sql1)
con.commit()
except sqltool.Error as e:
funct.out_error(e)
con.rollback()
cur.close()
con.close()
def update_last_act_user(uuid):
con, cur = get_cur()
session_ttl = get_setting('session_ttl')
if mysql_enable == '1':
sql = """ update uuid set exp = now()+ INTERVAL %s day where uuid = '%s' """ % (session_ttl, uuid)
else:
sql = """ update uuid set exp = datetime('now', '+%s days') where uuid = '%s' """ % (session_ttl, uuid)
try:
cur.execute(sql)
con.commit()
except sqltool.Error as e:
funct.out_error(e)
con.rollback()
cur.close()
con.close()
def get_user_name_by_uuid(uuid):
con, cur = get_cur()
sql = """ select user.username from user left join uuid as uuid on user.id = uuid.user_id where uuid.uuid = '%s' """ % uuid
try:
cur.execute(sql)
except sqltool.Error as e:
funct.out_error(e)
else:
for user_id in cur.fetchall():
return user_id[0]
cur.close()
con.close()
def get_user_role_by_uuid(uuid):
con, cur = get_cur()
try:
if mysql_enable == '1':
cur.execute( """ select role.id from user left join uuid as uuid on user.id = uuid.user_id left join role on role.name = user.role where uuid.uuid = '%s' """ % uuid )
else:
cur.execute("select role.id from user left join uuid as uuid on user.id = uuid.user_id left join role on role.name = user.role where uuid.uuid = ?", (uuid,))
except sqltool.Error as e:
funct.out_error(e)
else:
for user_id in cur.fetchall():
return int(user_id[0])
cur.close()
con.close()
def get_role_id_by_name(name):
con, cur = get_cur()
sql = """ select id from role where name = '%s' """ % name
try:
cur.execute(sql)
except sqltool.Error as e:
funct.out_error(e)
else:
for user_id in cur.fetchall():
return user_id[0]
cur.close()
con.close()
def get_user_group_by_uuid(uuid):
con, cur = get_cur()
sql = """ select user.groups from user left join uuid as uuid on user.id = uuid.user_id where uuid.uuid = '%s' """ % uuid
try:
cur.execute(sql)
except sqltool.Error as e:
funct.out_error(e)
else:
for user_id in cur.fetchall():
return user_id[0]
cur.close()
con.close()
def get_user_telegram_by_uuid(uuid):
con, cur = get_cur()
sql = """ select telegram.* from telegram left join user as user on telegram.groups = user.groups left join uuid as uuid on user.id = uuid.user_id where uuid.uuid = '%s' """ % uuid
try:
cur.execute(sql)
except sqltool.Error as e:
funct.out_error(e)
else:
return cur.fetchall()
cur.close()
con.close()
def get_telegram_by_ip(ip):
con, cur = get_cur()
sql = """ select telegram.* from telegram left join servers as serv on serv.groups = telegram.groups where serv.ip = '%s' """ % ip
try:
cur.execute(sql)
except sqltool.Error as e:
funct.out_error(e)
else:
return cur.fetchall()
cur.close()
con.close()
def get_dick_permit(**kwargs):
import http.cookies
import os
cookie = http.cookies.SimpleCookie(os.environ.get("HTTP_COOKIE"))
user_id = cookie.get('uuid')
disable = ''
haproxy = ''
nginx = ''
keepalived = ''
ip = ''
con, cur = get_cur()
if kwargs.get('username'):
sql = """ select * from user where username = '%s' """ % kwargs.get('username')
else:
sql = """ select * from user where username = '%s' """ % get_user_name_by_uuid(user_id.value)
if kwargs.get('virt'):
type_ip = ""
else:
type_ip = "and type_ip = 0"
if kwargs.get('disable') == 0:
disable = 'or enable = 0'
if kwargs.get('ip'):
ip = "and ip = '%s'" % kwargs.get('ip')
if kwargs.get('haproxy'):
haproxy = "and haproxy = 1"
if kwargs.get('nginx'):
nginx = "and nginx = 1"
if kwargs.get('keepalived'):
nginx = "and keepalived = 1"
try:
cur.execute(sql)
except sqltool.Error as e:
print("An error occurred:", e)
else:
for group in cur:
if group[5] == '1':
sql = """ select * from servers where enable = 1 %s %s %s """ % (disable, type_ip, nginx)
else:
sql = """ select * from servers where groups like '%{group}%' and (enable = 1 {disable}) {type_ip} {ip} {haproxy} {nginx} {keepalived}
""".format(group=group[5], disable=disable, type_ip=type_ip, ip=ip, haproxy=haproxy, nginx=nginx, keepalived=keepalived)
try:
cur.execute(sql)
except sqltool.Error as e:
funct.out_error(e)
else:
return cur.fetchall()
cur.close()
con.close()
def is_master(ip, **kwargs):
con, cur = get_cur()
sql = """ select slave.ip, slave.hostname from servers as master left join servers as slave on master.id = slave.master where master.ip = '%s' """ % ip
if kwargs.get('master_slave'):
sql = """ select master.hostname, master.ip, slave.hostname, slave.ip from servers as master left join servers as slave on master.id = slave.master where slave.master > 0 """
try:
cur.execute(sql)
except sqltool.Error as e:
funct.out_error(e)
else:
return cur.fetchall()
cur.close()
con.close()
def select_ssh(**kwargs):
con, cur = get_cur()
sql = """select * from cred """
if kwargs.get("name") is not None:
sql = """select * from cred where name = '%s' """ % kwargs.get("name")
if kwargs.get("id") is not None:
sql = """select * from cred where id = '%s' """ % kwargs.get("id")
if kwargs.get("serv") is not None:
sql = """select serv.cred, cred.* from servers as serv left join cred on cred.id = serv.cred where serv.ip = '%s' """ % kwargs.get("serv")
try:
cur.execute(sql)
except sqltool.Error as e:
funct.out_error(e)
else:
return cur.fetchall()
cur.close()
con.close()
def insert_new_ssh(name, enable, group, username, password):
con, cur = get_cur()
sql = """insert into cred(name, enable, groups, username, password) values ('%s', '%s', '%s', '%s', '%s') """ % (name, enable, group, username, password)
try:
cur.execute(sql)
con.commit()
except sqltool.Error as e:
funct.out_error(e)
con.rollback()
else:
return True
cur.close()
con.close()
def delete_ssh(id):
con, cur = get_cur()
sql = """ delete from cred where id = %s """ % (id)
try:
cur.execute(sql)
con.commit()
except sqltool.Error as e:
funct.out_error(e)
con.rollback()
else:
return True
cur.close()
con.close()
def update_ssh(id, name, enable, group, username, password):
con, cur = get_cur()
sql = """ update cred set
name = '%s',
enable = '%s',
groups = %s,
username = '%s',
password = '%s' where id = '%s' """ % (name, enable, group, username, password, id)
try:
cur.execute(sql)
con.commit()
except sqltool.Error as e:
funct.out_error(e)
con.rollback()
cur.close()
con.close()
def insert_backup_job(server, rserver, rpath, type, time, cred, description):
con, cur = get_cur()
sql = """insert into backups(server, rhost, rpath, type, time, cred, description) values ('%s', '%s', '%s', '%s', '%s', '%s', '%s') """ % (server, rserver, rpath, type, time, cred, description)
try:
cur.execute(sql)
con.commit()
except sqltool.Error as e:
funct.out_error(e)
con.rollback()
return False
else:
return True
cur.close()
con.close()
def select_backups(**kwargs):
con, cur = get_cur()
sql = """select * from backups ORDER BY id"""
if kwargs.get("server") is not None and kwargs.get("rserver") is not None:
sql = """select * from backups where server='%s' and rhost = '%s' """ % (kwargs.get("server"), kwargs.get("rserver"))
try:
cur.execute(sql)
except sqltool.Error as e:
funct.out_error(e)
else:
return cur.fetchall()
cur.close()
con.close()
def update_backup(server, rserver, rpath, type, time, cred, description, id):
con, cur = get_cur()
sql = """update backups set server = '%s',
rhost = '%s',
rpath = '%s',
type = '%s',
time = '%s',
cred = '%s',
description = '%s' where id = '%s' """ % (server, rserver, rpath, type, time, cred, description, id)
try:
cur.execute(sql)
con.commit()
except sqltool.Error as e:
funct.out_error(e)
con.rollback()
return False
else:
return True
cur.close()
con.close()
def delete_backups(id):
con, cur = get_cur()
sql = """ delete from backups where id = %s """ % (id)
try:
cur.execute(sql)
con.commit()
except sqltool.Error as e:
funct.out_error(e)
con.rollback()
else:
return True
cur.close()
con.close()
def check_exists_backup(server):
con, cur = get_cur()
sql = """ select id from backups where server = '%s' """ % server
try:
cur.execute(sql)
except sqltool.Error as e:
funct.out_error(e)
else:
for s in cur.fetchall():
if s[0] is not None:
return True
else:
return False
cur.close()
con.close()
def insert_new_telegram(token, chanel, group):
con, cur = get_cur()
sql = """insert into telegram(`token`, `chanel_name`, `groups`) values ('%s', '%s', '%s') """ % (token, chanel, group)
try:
cur.execute(sql)
con.commit()
except sqltool.Error as e:
print('<span class="alert alert-danger" id="error">An error occurred: ' + e.args[0] + ' <a title="Close" id="errorMess"><b>X</b></a></span>')
con.rollback()
else:
return True
cur.close()
con.close()
def delete_telegram(id):
con, cur = get_cur()
sql = """ delete from telegram where id = %s """ % (id)
try:
cur.execute(sql)
con.commit()
except sqltool.Error as e:
funct.out_error(e)
con.rollback()
else:
return True
cur.close()
con.close()
def select_telegram(**kwargs):
con, cur = get_cur()
sql = """select * from telegram """
if kwargs.get('group'):
sql = """select * from telegram where groups = '%s' """ % kwargs.get('group')
if kwargs.get('token'):
sql = """select * from telegram where token = '%s' """ % kwargs.get('token')
if kwargs.get('id'):
sql = """select * from telegram where id = '%s' """ % kwargs.get('id')
try:
cur.execute(sql)
except sqltool.Error as e:
funct.out_error(e)
else:
return cur.fetchall()
cur.close()
con.close()
def insert_new_telegram(token, chanel, group):
con, cur = get_cur()
sql = """insert into telegram(`token`, `chanel_name`, `groups`) values ('%s', '%s', '%s') """ % (token, chanel, group)
try:
cur.execute(sql)
con.commit()
except sqltool.Error as e:
print('<span class="alert alert-danger" id="error">An error occurred: ' + e.args[0] + ' <a title="Close" id="errorMess"><b>X</b></a></span>')
con.rollback()
else:
return True
cur.close()
con.close()
def update_telegram(token, chanel, group, id):
con, cur = get_cur()
sql = """ update telegram set
`token` = '%s',
`chanel_name` = '%s',
`groups` = '%s'
where id = '%s' """ % (token, chanel, group, id)
try:
cur.execute(sql)
con.commit()
except sqltool.Error as e:
funct.out_error(e)
con.rollback()
cur.close()
con.close()
def insert_new_option(option, group):
con, cur = get_cur()
sql = """insert into options(`options`, `groups`) values ('%s', '%s') """ % (option, group)
try:
cur.execute(sql)
con.commit()
except sqltool.Error as e:
funct.out_error(e)
con.rollback()
else:
return True
cur.close()
con.close()
def select_options(**kwargs):
con, cur = get_cur()
sql = """select * from options """
if kwargs.get('option'):
sql = """select * from options where options = '%s' """ % kwargs.get('option')
if kwargs.get('group'):
sql = """select options from options where groups = '{}' and options like '{}%' """.format(kwargs.get('group'), kwargs.get('term'))
try:
cur.execute(sql)
except sqltool.Error as e:
funct.out_error(e)
else:
return cur.fetchall()
cur.close()
con.close()
def update_options(option, id):
con, cur = get_cur()
sql = """ update options set
options = '%s'
where id = '%s' """ % (option, id)
try:
cur.execute(sql)
con.commit()
except sqltool.Error as e:
funct.out_error(e)
con.rollback()
cur.close()
con.close()
def delete_option(id):
con, cur = get_cur()
sql = """ delete from options where id = %s """ % (id)
try:
cur.execute(sql)
con.commit()
except sqltool.Error as e:
funct.out_error(e)
con.rollback()
else:
return True
cur.close()
con.close()
def insert_new_savedserver(server, description, group):
con, cur = get_cur()
sql = """insert into saved_servers(`server`, `description`, `groups`) values ('%s', '%s', '%s') """ % (server, description, group)
try:
cur.execute(sql)
con.commit()
except sqltool.Error as e:
funct.out_error(e)
con.rollback()
else:
return True
cur.close()
con.close()
def select_saved_servers(**kwargs):
con, cur = get_cur()
sql = """select * from saved_servers """
if kwargs.get('server'):
sql = """select * from saved_servers where server = '%s' """ % kwargs.get('server')
if kwargs.get('group'):
sql = """select server,description from saved_servers where groups = '{}' and server like '{}%' """.format(kwargs.get('group'), kwargs.get('term'))
try:
cur.execute(sql)
except sqltool.Error as e:
funct.out_error(e)
else:
return cur.fetchall()
cur.close()
con.close()
def update_savedserver(server, description, id):
con, cur = get_cur()
sql = """ update saved_servers set
server = '%s',
description = '%s'
where id = '%s' """ % (server, description, id)
try:
cur.execute(sql)
con.commit()
except sqltool.Error as e:
funct.out_error(e)
con.rollback()
cur.close()
con.close()
def delete_savedserver(id):
con, cur = get_cur()
sql = """ delete from saved_servers where id = %s """ % (id)
try:
cur.execute(sql)
con.commit()
except sqltool.Error as e:
funct.out_error(e)
con.rollback()
else:
return True
cur.close()
con.close()
def insert_mentrics(serv, curr_con, cur_ssl_con, sess_rate, max_sess_rate):
con, cur = get_cur()
if mysql_enable == '1':
sql = """ insert into metrics (serv, curr_con, cur_ssl_con, sess_rate, max_sess_rate, date) values('%s', '%s', '%s', '%s', '%s', now()) """ % (serv, curr_con, cur_ssl_con, sess_rate, max_sess_rate)
else:
sql = """ insert into metrics (serv, curr_con, cur_ssl_con, sess_rate, max_sess_rate, date) values('%s', '%s', '%s', '%s', '%s', datetime('now', 'localtime')) """ % (serv, curr_con, cur_ssl_con, sess_rate, max_sess_rate)
try:
cur.execute(sql)
con.commit()
except sqltool.Error as e:
funct.out_error(e)
con.rollback()
cur.close()
con.close()
def select_waf_metrics_enable(id):
con, cur = get_cur()
sql = """ select waf.metrics from waf left join servers as serv on waf.server_id = serv.id where server_id = '%s' """ % id
try:
cur.execute(sql)
except sqltool.Error as e:
funct.out_error(e)
else:
return cur.fetchall()
cur.close()
con.close()
def select_waf_metrics_enable_server(ip):
con, cur = get_cur()
sql = """ select waf.metrics from waf left join servers as serv on waf.server_id = serv.id where ip = '%s' """ % ip
try:
cur.execute(sql)
except sqltool.Error as e:
funct.out_error(e)
else:
for enable in cur.fetchall():
return enable[0]
cur.close()
con.close()
def select_waf_servers(serv):
con, cur = get_cur()
sql = """ select serv.ip from waf left join servers as serv on waf.server_id = serv.id where serv.ip = '%s' """ % serv
try:
cur.execute(sql)
except sqltool.Error as e:
funct.out_error(e)
else:
return cur.fetchall()
cur.close()
con.close()
def select_all_waf_servers():
con, cur = get_cur()
sql = """ select serv.ip from waf left join servers as serv on waf.server_id = serv.id """
try:
cur.execute(sql)
except sqltool.Error as e:
funct.out_error(e)
else:
return cur.fetchall()
cur.close()
con.close()
def select_waf_servers_metrics(uuid, **kwargs):
con, cur = get_cur()
sql = """ select * from user where username = '%s' """ % get_user_name_by_uuid(uuid)
try:
cur.execute(sql)
except sqltool.Error as e:
print("An error occurred:", e)
else:
for group in cur:
if group[5] == '1':
sql = """ select servers.ip from servers left join waf as waf on waf.server_id = servers.id where servers.enable = 1 and waf.metrics = '1' """
else:
sql = """ select servers.ip from servers left join waf as waf on waf.server_id = servers.id where servers.enable = 1 and waf.metrics = '1' and servers.groups like '%{group}%' """.format(group=group[5])
try:
cur.execute(sql)
except sqltool.Error as e:
funct.out_error(e)
else:
return cur.fetchall()
cur.close()
con.close()
def select_waf_metrics(serv, **kwargs):
con, cur = get_cur()
sql = """ select * from (select * from waf_metrics where serv = '%s' order by `date` desc limit 60) order by `date`""" % serv
try:
cur.execute(sql)
except sqltool.Error as e:
funct.out_error(e)
else:
return cur.fetchall()
cur.close()
con.close()
def insert_waf_metrics_enable(serv, enable):
con, cur = get_cur()
sql = """ insert into waf (server_id, metrics) values((select id from servers where ip = '%s'), '%s') """ % (serv, enable)
try:
cur.execute(sql)
con.commit()
except sqltool.Error as e:
funct.out_error(e)
con.rollback()
cur.close()
con.close()
def delete_waf_server(id):
con, cur = get_cur()
sql = """ delete from waf where server_id = '%s' """ % id
try:
cur.execute(sql)
con.commit()
except sqltool.Error as e:
funct.out_error(e)
con.rollback()
cur.close()
con.close()
def insert_waf_mentrics(serv, conn):
con, cur = get_cur()
if mysql_enable == '1':
sql = """ insert into waf_metrics (serv, conn, date) values('%s', '%s', now()) """ % (serv, conn)
else:
sql = """ insert into waf_metrics (serv, conn, date) values('%s', '%s', datetime('now', 'localtime')) """ % (serv, conn)
try:
cur.execute(sql)
con.commit()
except sqltool.Error as e:
funct.out_error(e)
con.rollback()
cur.close()
con.close()
def delete_waf_mentrics():
con, cur = get_cur()
if mysql_enable == '1':
sql = """ delete from metrics where date < now() - INTERVAL 3 day """
else:
sql = """ delete from metrics where date < datetime('now', '-3 days') """
try:
cur.execute(sql)
con.commit()
except sqltool.Error as e:
funct.out_error(e)
con.rollback()
cur.close()
con.close()
def update_waf_metrics_enable(name, enable):
con, cur = get_cur()
sql = """ update waf set metrics = %s where server_id = (select id from servers where hostname = '%s') """ % (enable, name)
try:
cur.execute(sql)
con.commit()
except sqltool.Error as e:
funct.out_error(e)
con.rollback()
cur.close()
con.close()
def delete_mentrics():
con, cur = get_cur()
if mysql_enable == '1':
sql = """ delete from metrics where date < now() - INTERVAL 3 day """
else:
sql = """ delete from metrics where date < datetime('now', '-3 days') """
try:
cur.execute(sql)
con.commit()
except sqltool.Error as e:
funct.out_error(e)
con.rollback()
cur.close()
con.close()
def select_metrics(serv, **kwargs):
con, cur = get_cur()
sql = """ select * from (select * from metrics where serv = '%s' order by `date` desc limit 60) order by `date` """ % serv
try:
cur.execute(sql)
except sqltool.Error as e:
funct.out_error(e)
else:
return cur.fetchall()
cur.close()
con.close()
def select_servers_metrics_for_master():
con, cur = get_cur()
sql = """select ip from servers where metrics = 1 """
try:
cur.execute(sql)
except sqltool.Error as e:
funct.out_error(e)
else:
return cur.fetchall()
cur.close()
con.close()
def select_servers_metrics(uuid, **kwargs):
con, cur = get_cur()
sql = """ select * from user where username = '%s' """ % get_user_name_by_uuid(uuid)
try:
cur.execute(sql)
except sqltool.Error as e:
print("An error occurred:", e)
else:
for group in cur:
if group[5] == '1':
sql = """ select ip from servers where enable = 1 and metrics = '1' """
else:
sql = """ select ip from servers where groups like '%{group}%' and metrics = '1'""".format(group=group[5])
try:
cur.execute(sql)
except sqltool.Error as e:
funct.out_error(e)
else:
return cur.fetchall()
cur.close()
con.close()
def select_table_metrics(uuid):
con, cur = get_cur()
groups = ""
sql = """ select * from user where username = '%s' """ % get_user_name_by_uuid(uuid)
try:
cur.execute(sql)
except sqltool.Error as e:
print("An error occurred:", e)
else:
for group in cur:
if group[5] == '1':
groups = ""
else:
groups = "and servers.groups like '%{group}%' ".format(group=group[5])
if mysql_enable == '1':
sql = """
select ip.ip, hostname, avg_sess_1h, avg_sess_24h, avg_sess_3d, max_sess_1h, max_sess_24h, max_sess_3d, avg_cur_1h, avg_cur_24h, avg_cur_3d, max_con_1h, max_con_24h, max_con_3d from
(select servers.ip from servers where metrics = 1 ) as ip,
(select servers.ip, servers.hostname as hostname from servers left join metrics as metr on servers.ip = metr.serv where servers.metrics = 1 %s) as hostname,
(select servers.ip,round(avg(metr.sess_rate), 1) as avg_sess_1h from servers
left join metrics as metr on metr.serv = servers.ip
where servers.metrics = 1 and
metr.date <= now() and metr.date >= DATE_ADD(NOW(), INTERVAL -1 HOUR)
group by servers.ip) as avg_sess_1h,
(select servers.ip,round(avg(metr.sess_rate), 1) as avg_sess_24h from servers
left join metrics as metr on metr.serv = servers.ip
where servers.metrics = 1 and
metr.date <= now() and metr.date >= DATE_ADD(NOW(),INTERVAL -24 HOUR)
group by servers.ip) as avg_sess_24h,
(select servers.ip,round(avg(metr.sess_rate), 1) as avg_sess_3d from servers
left join metrics as metr on metr.serv = servers.ip
where servers.metrics = 1 and
metr.date <= now() and metr.date >= DATE_ADD(NOW(), INTERVAL -3 DAY)
group by servers.ip ) as avg_sess_3d,
(select servers.ip,max(metr.sess_rate) as max_sess_1h from servers
left join metrics as metr on metr.serv = servers.ip
where servers.metrics = 1 and
metr.date <= now() and metr.date >= DATE_ADD(NOW(),INTERVAL -1 HOUR)
group by servers.ip) as max_sess_1h,
(select servers.ip,max(metr.sess_rate) as max_sess_24h from servers
left join metrics as metr on metr.serv = servers.ip
where servers.metrics = 1 and
metr.date <= now() and metr.date >= DATE_ADD(NOW(),INTERVAL -24 HOUR)
group by servers.ip) as max_sess_24h,
(select servers.ip,max(metr.sess_rate) as max_sess_3d from servers
left join metrics as metr on metr.serv = servers.ip
where servers.metrics = 1 and
metr.date <= now() and metr.date >= DATE_ADD(NOW(),INTERVAL -3 DAY)
group by servers.ip ) as max_sess_3d,
(select servers.ip,round(avg(metr.curr_con+metr.cur_ssl_con), 1) as avg_cur_1h from servers
left join metrics as metr on metr.serv = servers.ip
where servers.metrics = 1 and
metr.date <= now() and metr.date >= DATE_ADD(NOW(),INTERVAL -1 HOUR)
group by servers.ip) as avg_cur_1h,
(select servers.ip,round(avg(metr.curr_con+metr.cur_ssl_con), 1) as avg_cur_24h from servers
left join metrics as metr on metr.serv = servers.ip
where servers.metrics = 1 and
metr.date <= now() and metr.date >= DATE_ADD(NOW(),INTERVAL -24 HOUR)
group by servers.ip) as avg_cur_24h,
(select servers.ip,round(avg(metr.curr_con+metr.cur_ssl_con), 1) as avg_cur_3d from servers
left join metrics as metr on metr.serv = servers.ip
where servers.metrics = 1 and
metr.date <= now() and metr.date >= DATE_ADD(NOW(),INTERVAL -3 DAY)
group by servers.ip ) as avg_cur_3d,
(select servers.ip,max(metr.curr_con) as max_con_1h from servers
left join metrics as metr on metr.serv = servers.ip
where servers.metrics = 1 and
metr.date <= now() and metr.date >= DATE_ADD(NOW(),INTERVAL -1 HOUR)
group by servers.ip) as max_con_1h,
(select servers.ip,max(metr.curr_con) as max_con_24h from servers
left join metrics as metr on metr.serv = servers.ip
where servers.metrics = 1 and
metr.date <= now() and metr.date >= DATE_ADD(NOW(),INTERVAL -24 HOUR)
group by servers.ip) as max_con_24h,
(select servers.ip,max(metr.curr_con) as max_con_3d from servers
left join metrics as metr on metr.serv = servers.ip
where servers.metrics = 1 and
metr.date <= now() and metr.date >= DATE_ADD(NOW(),INTERVAL -3 DAY)
group by servers.ip ) as max_con_3d
where ip.ip=hostname.ip
and ip.ip=avg_sess_1h.ip
and ip.ip=avg_sess_24h.ip
and ip.ip=avg_sess_3d.ip
and ip.ip=max_sess_1h.ip
and ip.ip=max_sess_24h.ip
and ip.ip=max_sess_3d.ip
and ip.ip=avg_cur_1h.ip
and ip.ip=avg_cur_24h.ip
and ip.ip=avg_cur_3d.ip
and ip.ip=max_con_1h.ip
and ip.ip=max_con_24h.ip
and ip.ip=max_con_3d.ip
group by hostname.ip """ % groups
else:
sql = """
select ip.ip, hostname, avg_sess_1h, avg_sess_24h, avg_sess_3d, max_sess_1h, max_sess_24h, max_sess_3d, avg_cur_1h, avg_cur_24h, avg_cur_3d, max_con_1h, max_con_24h, max_con_3d from
(select servers.ip from servers where metrics = 1 ) as ip,
(select servers.ip, servers.hostname as hostname from servers left join metrics as metr on servers.ip = metr.serv where servers.metrics = 1 %s) as hostname,
(select servers.ip,round(avg(metr.sess_rate), 1) as avg_sess_1h from servers
left join metrics as metr on metr.serv = servers.ip
where servers.metrics = 1 and
metr.date <= datetime('now', 'localtime') and metr.date >= datetime('now', '-1 hours', 'localtime')
group by servers.ip) as avg_sess_1h,
(select servers.ip,round(avg(metr.sess_rate), 1) as avg_sess_24h from servers
left join metrics as metr on metr.serv = servers.ip
where servers.metrics = 1 and
metr.date <= datetime('now', 'localtime') and metr.date >= datetime('now', '-24 hours', 'localtime')
group by servers.ip) as avg_sess_24h,
(select servers.ip,round(avg(metr.sess_rate), 1) as avg_sess_3d from servers
left join metrics as metr on metr.serv = servers.ip
where servers.metrics = 1 and
metr.date <= datetime('now', 'localtime') and metr.date >= datetime('now', '-3 days', 'localtime')
group by servers.ip ) as avg_sess_3d,
(select servers.ip,max(metr.sess_rate) as max_sess_1h from servers
left join metrics as metr on metr.serv = servers.ip
where servers.metrics = 1 and
metr.date <= datetime('now', 'localtime') and metr.date >= datetime('now', '-1 hours', 'localtime')
group by servers.ip) as max_sess_1h,
(select servers.ip,max(metr.sess_rate) as max_sess_24h from servers
left join metrics as metr on metr.serv = servers.ip
where servers.metrics = 1 and
metr.date <= datetime('now', 'localtime') and metr.date >= datetime('now', '-24 hours', 'localtime')
group by servers.ip) as max_sess_24h,
(select servers.ip,max(metr.sess_rate) as max_sess_3d from servers
left join metrics as metr on metr.serv = servers.ip
where servers.metrics = 1 and
metr.date <= datetime('now', 'localtime') and metr.date >= datetime('now', '-3 days', 'localtime')
group by servers.ip ) as max_sess_3d,
(select servers.ip,round(avg(metr.curr_con+metr.cur_ssl_con), 1) as avg_cur_1h from servers
left join metrics as metr on metr.serv = servers.ip
where servers.metrics = 1 and
metr.date <= datetime('now', 'localtime') and metr.date >= datetime('now', '-1 hours', 'localtime')
group by servers.ip) as avg_cur_1h,
(select servers.ip,round(avg(metr.curr_con+metr.cur_ssl_con), 1) as avg_cur_24h from servers
left join metrics as metr on metr.serv = servers.ip
where servers.metrics = 1 and
metr.date <= datetime('now', 'localtime') and metr.date >= datetime('now', '-24 hours', 'localtime')
group by servers.ip) as avg_cur_24h,
(select servers.ip,round(avg(metr.curr_con+metr.cur_ssl_con), 1) as avg_cur_3d from servers
left join metrics as metr on metr.serv = servers.ip
where servers.metrics = 1 and
metr.date <= datetime('now', 'localtime') and metr.date >= datetime('now', '-3 days', 'localtime')
group by servers.ip ) as avg_cur_3d,
(select servers.ip,max(metr.curr_con) as max_con_1h from servers
left join metrics as metr on metr.serv = servers.ip
where servers.metrics = 1 and
metr.date <= datetime('now', 'localtime') and metr.date >= datetime('now', '-1 hours', 'localtime')
group by servers.ip) as max_con_1h,
(select servers.ip,max(metr.curr_con) as max_con_24h from servers
left join metrics as metr on metr.serv = servers.ip
where servers.metrics = 1 and
metr.date <= datetime('now', 'localtime') and metr.date >= datetime('now', '-24 hours', 'localtime')
group by servers.ip) as max_con_24h,
(select servers.ip,max(metr.curr_con) as max_con_3d from servers
left join metrics as metr on metr.serv = servers.ip
where servers.metrics = 1 and
metr.date <= datetime('now', 'localtime') and metr.date >= datetime('now', '-3 days', 'localtime')
group by servers.ip ) as max_con_3d
where ip.ip=hostname.ip
and ip.ip=avg_sess_1h.ip
and ip.ip=avg_sess_24h.ip
and ip.ip=avg_sess_3d.ip
and ip.ip=max_sess_1h.ip
and ip.ip=max_sess_24h.ip
and ip.ip=max_sess_3d.ip
and ip.ip=avg_cur_1h.ip
and ip.ip=avg_cur_24h.ip
and ip.ip=avg_cur_3d.ip
and ip.ip=max_con_1h.ip
and ip.ip=max_con_24h.ip
and ip.ip=max_con_3d.ip
group by hostname.ip """ % groups
try:
cur.execute(sql)
except sqltool.Error as e:
funct.out_error(e)
else:
return cur.fetchall()
cur.close()
con.close()
def get_setting(param, **kwargs):
con, cur = get_cur()
sql = """select value from `settings` where param='%s' """ % param
if kwargs.get('all'):
sql = """select * from `settings` order by section desc"""
try:
cur.execute(sql)
except sqltool.Error as e:
funct.out_error(e)
else:
if kwargs.get('all'):
return cur.fetchall()
else:
for value in cur.fetchone():
return value
cur.close()
con.close()
def update_setting(param, val):
con, cur = get_cur()
sql = """update `settings` set `value` = '%s' where param = '%s' """ % (val, param)
try:
cur.execute(sql)
con.commit()
return True
except sqltool.Error as e:
funct.out_error(e)
con.rollback()
return False
cur.close()
con.close()
def get_ver():
con, cur = get_cur()
sql = """ select * from version; """
try:
cur.execute(sql)
except sqltool.Error as e:
funct.out_error(e)
else:
for ver in cur.fetchall():
return ver[0]
cur.close()
con.close()
def select_roles(**kwargs):
con, cur = get_cur()
sql = """select * from role ORDER BY id"""
if kwargs.get("roles") is not None:
sql = """select * from role where name='%s' """ % kwargs.get("roles")
try:
cur.execute(sql)
except sqltool.Error as e:
funct.out_error(e)
else:
return cur.fetchall()
cur.close()
con.close()
def select_alert(**kwargs):
con, cur = get_cur()
sql = """select ip from servers where alert = 1 """
try:
cur.execute(sql)
except sqltool.Error as e:
funct.out_error(e)
else:
return cur.fetchall()
cur.close()
con.close()
def select_keep_alive(**kwargs):
con, cur = get_cur()
sql = """select ip from servers where active = 1 """
try:
cur.execute(sql)
except sqltool.Error as e:
funct.out_error(e)
else:
return cur.fetchall()
cur.close()
con.close()
def select_keealived(serv, **kwargs):
con, cur = get_cur()
sql = """select keepalived from `servers` where ip='%s' """ % serv
try:
cur.execute(sql)
except sqltool.Error as e:
funct.out_error(e)
else:
for value in cur.fetchone():
return value
cur.close()
con.close()
def update_keepalived(serv):
con, cur = get_cur()
sql = """update `servers` set `keepalived` = '1' where ip = '%s' """ % serv
try:
cur.execute(sql)
con.commit()
return True
except sqltool.Error as e:
funct.out_error(e)
con.rollback()
return False
cur.close()
con.close()
def select_nginx(serv, **kwargs):
con, cur = get_cur()
sql = """select nginx from `servers` where ip='%s' """ % serv
try:
cur.execute(sql)
except sqltool.Error as e:
funct.out_error(e)
else:
for value in cur.fetchone():
return value
cur.close()
con.close()
def update_nginx(serv):
con, cur = get_cur()
sql = """update `servers` set `nginx` = '1' where ip = '%s' """ % serv
try:
cur.execute(sql)
con.commit()
return True
except sqltool.Error as e:
funct.out_error(e)
con.rollback()
return False
cur.close()
con.close()
def update_haproxy(serv):
con, cur = get_cur()
sql = """update `servers` set `haproxy` = '1' where ip = '%s' """ % serv
try:
cur.execute(sql)
con.commit()
return True
except sqltool.Error as e:
funct.out_error(e)
con.rollback()
return False
cur.close()
con.close()
def check_token_exists(token):
try:
import http.cookies
import os
cookie = http.cookies.SimpleCookie(os.environ.get("HTTP_COOKIE"))
user_id = cookie.get('uuid')
if get_token(user_id.value) == token:
return True
else:
try:
funct.logging('localhost', ' tried do action with wrong token', haproxywi=1, login=1)
except:
funct.logging('localhost', ' An action with wrong token', haproxywi=1)
return False
except:
try:
funct.logging('localhost', ' cannot check token', haproxywi=1, login=1)
except:
funct.logging('localhost', ' Cannot check token', haproxywi=1)
return False
form = funct.form
error_mess = '<span class="alert alert-danger" id="error">All fields must be completed <a title="Close" id="errorMess"><b>X</b></a></span>'
def check_token():
if not check_token_exists(form.getvalue('token')):
print('Content-type: text/html\n')
print("Your token has been expired")
import sys
sys.exit()
def show_update_option(option):
from jinja2 import Environment, FileSystemLoader
env = Environment(loader=FileSystemLoader('templates/ajax'), autoescape=True)
template = env.get_template('/new_option.html')
print('Content-type: text/html\n')
template = template.render(options=select_options(option=option))
print(template)
def show_update_savedserver(server):
from jinja2 import Environment, FileSystemLoader
env = Environment(loader=FileSystemLoader('templates/ajax'), autoescape=True)
template = env.get_template('/new_saved_servers.html')
print('Content-type: text/html\n')
template = template.render(server=select_saved_servers(server=server))
print(template)
if form.getvalue('getoption'):
group = form.getvalue('getoption')
term = form.getvalue('term')
print('Content-type: application/json\n')
check_token()
options = select_options(group=group,term=term)
a = {}
v = 0
for i in options:
a[v] = i[0]
v = v + 1
import json
print(json.dumps(a))
if form.getvalue('newtoption'):
option = form.getvalue('newtoption')
group = form.getvalue('newoptiongroup')
print('Content-type: text/html\n')
check_token()
if option is None or group is None:
print(error_mess)
else:
if insert_new_option(option, group):
show_update_option(option)
if form.getvalue('updateoption') is not None:
option = form.getvalue('updateoption')
id = form.getvalue('id')
check_token()
if option is None or id is None:
print('Content-type: text/html\n')
print(error_mess)
else:
update_options(option, id)
if form.getvalue('optiondel') is not None:
print('Content-type: text/html\n')
check_token()
if delete_option(form.getvalue('optiondel')):
print("Ok")
if form.getvalue('getsavedserver'):
group = form.getvalue('getsavedserver')
term = form.getvalue('term')
print('Content-type: application/json\n')
check_token()
servers = select_saved_servers(group=group,term=term)
a = {}
v = 0
for i in servers:
a[v] = {}
a[v]['value'] = {}
a[v]['desc'] = {}
a[v]['value'] = i[0]
a[v]['desc'] = i[1]
v = v + 1
import json
print(json.dumps(a))
if form.getvalue('newsavedserver'):
savedserver = form.getvalue('newsavedserver')
description = form.getvalue('newsavedserverdesc')
group = form.getvalue('newsavedservergroup')
print('Content-type: text/html\n')
check_token()
if savedserver is None or group is None:
print(error_mess)
else:
if insert_new_savedserver(savedserver, description, group):
show_update_savedserver(savedserver)
if form.getvalue('updatesavedserver') is not None:
savedserver = form.getvalue('updatesavedserver')
description = form.getvalue('description')
id = form.getvalue('id')
print('Content-type: text/html\n')
check_token()
if savedserver is None or id is None:
print(error_mess)
else:
update_savedserver(savedserver, description, id)
if form.getvalue('savedserverdel') is not None:
print('Content-type: text/html\n')
check_token()
if delete_savedserver(form.getvalue('savedserverdel')):
print("Ok")
| []
| []
| [
"HTTP_COOKIE"
]
| [] | ["HTTP_COOKIE"] | python | 1 | 0 | |
CLUSTER_MANAGEMENT/ClusterManagement/ClusterManagement/asgi.py | """
ASGI config for ClusterManagement project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ClusterManagement.settings')
application = get_asgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
learning/alembic/env.py | # type: ignore
import os
from logging.config import fileConfig
from alembic import context
from sqlalchemy import engine_from_config, pool
from learning.entities import Base
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
target_metadata = Base.metadata
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = os.environ["DB_URL"]
context.configure(
url=url,
target_metadata=target_metadata,
literal_binds=True,
dialect_opts={"paramstyle": "named"},
)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
connectable = engine_from_config(
config.get_section(config.config_ini_section),
prefix="sqlalchemy.",
poolclass=pool.NullPool,
url=os.environ["ALEMBIC_DB_URL"],
)
with connectable.connect() as connection:
context.configure(connection=connection, target_metadata=target_metadata)
with context.begin_transaction():
context.run_migrations()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
| []
| []
| [
"ALEMBIC_DB_URL",
"DB_URL"
]
| [] | ["ALEMBIC_DB_URL", "DB_URL"] | python | 2 | 0 | |
vendor/github.com/gofiber/fiber/v2/app.go | // ⚡️ Fiber is an Express inspired web framework written in Go with ☕️
// 🤖 Github Repository: https://github.com/gofiber/fiber
// 📌 API Documentation: https://docs.gofiber.io
// Package fiber
// Fiber is an Express inspired web framework built on top of Fasthttp,
// the fastest HTTP engine for Go. Designed to ease things up for fast
// development with zero memory allocation and performance in mind.
package fiber
import (
"bufio"
"crypto/tls"
"errors"
"fmt"
"net"
"net/http"
"net/http/httputil"
"os"
"reflect"
"runtime"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/gofiber/fiber/v2/internal/colorable"
"github.com/gofiber/fiber/v2/internal/go-json"
"github.com/gofiber/fiber/v2/internal/isatty"
"github.com/gofiber/fiber/v2/utils"
"github.com/valyala/fasthttp"
)
// Version of current fiber package
const Version = "2.23.0"
// Handler defines a function to serve HTTP requests.
type Handler = func(*Ctx) error
// Map is a shortcut for map[string]interface{}, useful for JSON returns
type Map map[string]interface{}
// Storage interface for communicating with different database/key-value
// providers
type Storage interface {
// Get gets the value for the given key.
// It returns ErrNotFound if the storage does not contain the key.
Get(key string) ([]byte, error)
// Set stores the given value for the given key along with a
// time-to-live expiration value, 0 means live for ever
// Empty key or value will be ignored without an error.
Set(key string, val []byte, ttl time.Duration) error
// Delete deletes the value for the given key.
// It returns no error if the storage does not contain the key,
Delete(key string) error
// Reset resets the storage and delete all keys.
Reset() error
// Close closes the storage and will stop any running garbage
// collectors and open connections.
Close() error
}
// ErrorHandler defines a function that will process all errors
// returned from any handlers in the stack
// cfg := fiber.Config{}
// cfg.ErrorHandler = func(c *Ctx, err error) error {
// code := StatusInternalServerError
// if e, ok := err.(*Error); ok {
// code = e.Code
// }
// c.Set(HeaderContentType, MIMETextPlainCharsetUTF8)
// return c.Status(code).SendString(err.Error())
// }
// app := fiber.New(cfg)
type ErrorHandler = func(*Ctx, error) error
// Error represents an error that occurred while handling a request.
type Error struct {
Code int `json:"code"`
Message string `json:"message"`
}
// App denotes the Fiber application.
type App struct {
mutex sync.Mutex
// Route stack divided by HTTP methods
stack [][]*Route
// Route stack divided by HTTP methods and route prefixes
treeStack []map[string][]*Route
// contains the information if the route stack has been changed to build the optimized tree
routesRefreshed bool
// Amount of registered routes
routesCount uint32
// Amount of registered handlers
handlerCount uint32
// Ctx pool
pool sync.Pool
// Fasthttp server
server *fasthttp.Server
// App config
config Config
// Converts string to a byte slice
getBytes func(s string) (b []byte)
// Converts byte slice to a string
getString func(b []byte) string
// mount prefix -> error handler
errorHandlers map[string]ErrorHandler
}
// Config is a struct holding the server settings.
type Config struct {
// When set to true, this will spawn multiple Go processes listening on the same port.
//
// Default: false
Prefork bool `json:"prefork"`
// Enables the "Server: value" HTTP header.
//
// Default: ""
ServerHeader string `json:"server_header"`
// When set to true, the router treats "/foo" and "/foo/" as different.
// By default this is disabled and both "/foo" and "/foo/" will execute the same handler.
//
// Default: false
StrictRouting bool `json:"strict_routing"`
// When set to true, enables case sensitive routing.
// E.g. "/FoO" and "/foo" are treated as different routes.
// By default this is disabled and both "/FoO" and "/foo" will execute the same handler.
//
// Default: false
CaseSensitive bool `json:"case_sensitive"`
// When set to true, this relinquishes the 0-allocation promise in certain
// cases in order to access the handler values (e.g. request bodies) in an
// immutable fashion so that these values are available even if you return
// from handler.
//
// Default: false
Immutable bool `json:"immutable"`
// When set to true, converts all encoded characters in the route back
// before setting the path for the context, so that the routing,
// the returning of the current url from the context `ctx.Path()`
// and the parameters `ctx.Params(%key%)` with decoded characters will work
//
// Default: false
UnescapePath bool `json:"unescape_path"`
// Enable or disable ETag header generation, since both weak and strong etags are generated
// using the same hashing method (CRC-32). Weak ETags are the default when enabled.
//
// Default: false
ETag bool `json:"etag"`
// Max body size that the server accepts.
// -1 will decline any body size
//
// Default: 4 * 1024 * 1024
BodyLimit int `json:"body_limit"`
// Maximum number of concurrent connections.
//
// Default: 256 * 1024
Concurrency int `json:"concurrency"`
// Views is the interface that wraps the Render function.
//
// Default: nil
Views Views `json:"-"`
// Views Layout is the global layout for all template render until override on Render function.
//
// Default: ""
ViewsLayout string `json:"views_layout"`
// The amount of time allowed to read the full request including body.
// It is reset after the request handler has returned.
// The connection's read deadline is reset when the connection opens.
//
// Default: unlimited
ReadTimeout time.Duration `json:"read_timeout"`
// The maximum duration before timing out writes of the response.
// It is reset after the request handler has returned.
//
// Default: unlimited
WriteTimeout time.Duration `json:"write_timeout"`
// The maximum amount of time to wait for the next request when keep-alive is enabled.
// If IdleTimeout is zero, the value of ReadTimeout is used.
//
// Default: unlimited
IdleTimeout time.Duration `json:"idle_timeout"`
// Per-connection buffer size for requests' reading.
// This also limits the maximum header size.
// Increase this buffer if your clients send multi-KB RequestURIs
// and/or multi-KB headers (for example, BIG cookies).
//
// Default: 4096
ReadBufferSize int `json:"read_buffer_size"`
// Per-connection buffer size for responses' writing.
//
// Default: 4096
WriteBufferSize int `json:"write_buffer_size"`
// CompressedFileSuffix adds suffix to the original file name and
// tries saving the resulting compressed file under the new file name.
//
// Default: ".fiber.gz"
CompressedFileSuffix string `json:"compressed_file_suffix"`
// ProxyHeader will enable c.IP() to return the value of the given header key
// By default c.IP() will return the Remote IP from the TCP connection
// This property can be useful if you are behind a load balancer: X-Forwarded-*
// NOTE: headers are easily spoofed and the detected IP addresses are unreliable.
//
// Default: ""
ProxyHeader string `json:"proxy_header"`
// GETOnly rejects all non-GET requests if set to true.
// This option is useful as anti-DoS protection for servers
// accepting only GET requests. The request size is limited
// by ReadBufferSize if GETOnly is set.
//
// Default: false
GETOnly bool `json:"get_only"`
// ErrorHandler is executed when an error is returned from fiber.Handler.
//
// Default: DefaultErrorHandler
ErrorHandler ErrorHandler `json:"-"`
// When set to true, disables keep-alive connections.
// The server will close incoming connections after sending the first response to client.
//
// Default: false
DisableKeepalive bool `json:"disable_keepalive"`
// When set to true, causes the default date header to be excluded from the response.
//
// Default: false
DisableDefaultDate bool `json:"disable_default_date"`
// When set to true, causes the default Content-Type header to be excluded from the response.
//
// Default: false
DisableDefaultContentType bool `json:"disable_default_content_type"`
// When set to true, disables header normalization.
// By default all header names are normalized: conteNT-tYPE -> Content-Type.
//
// Default: false
DisableHeaderNormalizing bool `json:"disable_header_normalizing"`
// When set to true, it will not print out the «Fiber» ASCII art and listening address.
//
// Default: false
DisableStartupMessage bool `json:"disable_startup_message"`
// This function allows to setup app name for the app
//
// Default: nil
AppName string `json:"app_name"`
// StreamRequestBody enables request body streaming,
// and calls the handler sooner when given body is
// larger then the current limit.
StreamRequestBody bool
// Will not pre parse Multipart Form data if set to true.
//
// This option is useful for servers that desire to treat
// multipart form data as a binary blob, or choose when to parse the data.
//
// Server pre parses multipart form data by default.
DisablePreParseMultipartForm bool
// Aggressively reduces memory usage at the cost of higher CPU usage
// if set to true.
//
// Try enabling this option only if the server consumes too much memory
// serving mostly idle keep-alive connections. This may reduce memory
// usage by more than 50%.
//
// Default: false
ReduceMemoryUsage bool `json:"reduce_memory_usage"`
// FEATURE: v2.3.x
// The router executes the same handler by default if StrictRouting or CaseSensitive is disabled.
// Enabling RedirectFixedPath will change this behaviour into a client redirect to the original route path.
// Using the status code 301 for GET requests and 308 for all other request methods.
//
// Default: false
// RedirectFixedPath bool
// When set by an external client of Fiber it will use the provided implementation of a
// JSONMarshal
//
// Allowing for flexibility in using another json library for encoding
// Default: json.Marshal
JSONEncoder utils.JSONMarshal `json:"-"`
// When set by an external client of Fiber it will use the provided implementation of a
// JSONUnmarshal
//
// Allowing for flexibility in using another json library for encoding
// Default: json.Unmarshal
JSONDecoder utils.JSONUnmarshal `json:"-"`
// Known networks are "tcp", "tcp4" (IPv4-only), "tcp6" (IPv6-only)
// WARNING: When prefork is set to true, only "tcp4" and "tcp6" can be chose.
//
// Default: NetworkTCP4
Network string
// If you find yourself behind some sort of proxy, like a load balancer,
// then certain header information may be sent to you using special X-Forwarded-* headers or the Forwarded header.
// For example, the Host HTTP header is usually used to return the requested host.
// But when you’re behind a proxy, the actual host may be stored in an X-Forwarded-Host header.
//
// If you are behind a proxy, you should enable TrustedProxyCheck to prevent header spoofing.
// If you enable EnableTrustedProxyCheck and leave TrustedProxies empty Fiber will skip
// all headers that could be spoofed.
// If request ip in TrustedProxies whitelist then:
// 1. c.Protocol() get value from X-Forwarded-Proto, X-Forwarded-Protocol, X-Forwarded-Ssl or X-Url-Scheme header
// 2. c.IP() get value from ProxyHeader header.
// 3. c.Hostname() get value from X-Forwarded-Host header
// But if request ip NOT in Trusted Proxies whitelist then:
// 1. c.Protocol() WON't get value from X-Forwarded-Proto, X-Forwarded-Protocol, X-Forwarded-Ssl or X-Url-Scheme header,
// will return https in case when tls connection is handled by the app, of http otherwise
// 2. c.IP() WON'T get value from ProxyHeader header, will return RemoteIP() from fasthttp context
// 3. c.Hostname() WON'T get value from X-Forwarded-Host header, fasthttp.Request.URI().Host()
// will be used to get the hostname.
//
// Default: false
EnableTrustedProxyCheck bool `json:"enable_trusted_proxy_check"`
// Read EnableTrustedProxyCheck doc.
//
// Default: []string
TrustedProxies []string `json:"trusted_proxies"`
trustedProxiesMap map[string]struct{}
}
// Static defines configuration options when defining static assets.
type Static struct {
// When set to true, the server tries minimizing CPU usage by caching compressed files.
// This works differently than the github.com/gofiber/compression middleware.
// Optional. Default value false
Compress bool `json:"compress"`
// When set to true, enables byte range requests.
// Optional. Default value false
ByteRange bool `json:"byte_range"`
// When set to true, enables directory browsing.
// Optional. Default value false.
Browse bool `json:"browse"`
// The name of the index file for serving a directory.
// Optional. Default value "index.html".
Index string `json:"index"`
// Expiration duration for inactive file handlers.
// Use a negative time.Duration to disable it.
//
// Optional. Default value 10 * time.Second.
CacheDuration time.Duration `json:"cache_duration"`
// The value for the Cache-Control HTTP-header
// that is set on the file response. MaxAge is defined in seconds.
//
// Optional. Default value 0.
MaxAge int `json:"max_age"`
// Next defines a function to skip this middleware when returned true.
//
// Optional. Default: nil
Next func(c *Ctx) bool
}
// Default Config values
const (
DefaultBodyLimit = 4 * 1024 * 1024
DefaultConcurrency = 256 * 1024
DefaultReadBufferSize = 4096
DefaultWriteBufferSize = 4096
DefaultCompressedFileSuffix = ".fiber.gz"
)
// DefaultErrorHandler that process return errors from handlers
var DefaultErrorHandler = func(c *Ctx, err error) error {
code := StatusInternalServerError
if e, ok := err.(*Error); ok {
code = e.Code
}
c.Set(HeaderContentType, MIMETextPlainCharsetUTF8)
return c.Status(code).SendString(err.Error())
}
// New creates a new Fiber named instance.
// app := fiber.New()
// You can pass optional configuration options by passing a Config struct:
// app := fiber.New(fiber.Config{
// Prefork: true,
// ServerHeader: "Fiber",
// })
func New(config ...Config) *App {
// Create a new app
app := &App{
// Create router stack
stack: make([][]*Route, len(intMethod)),
treeStack: make([]map[string][]*Route, len(intMethod)),
// Create Ctx pool
pool: sync.Pool{
New: func() interface{} {
return new(Ctx)
},
},
// Create config
config: Config{},
getBytes: utils.UnsafeBytes,
getString: utils.UnsafeString,
errorHandlers: make(map[string]ErrorHandler),
}
// Override config if provided
if len(config) > 0 {
app.config = config[0]
}
if app.config.ETag {
if !IsChild() {
fmt.Println("[Warning] Config.ETag is deprecated since v2.0.6, please use 'middleware/etag'.")
}
}
// Override default values
if app.config.BodyLimit == 0 {
app.config.BodyLimit = DefaultBodyLimit
}
if app.config.Concurrency <= 0 {
app.config.Concurrency = DefaultConcurrency
}
if app.config.ReadBufferSize <= 0 {
app.config.ReadBufferSize = DefaultReadBufferSize
}
if app.config.WriteBufferSize <= 0 {
app.config.WriteBufferSize = DefaultWriteBufferSize
}
if app.config.CompressedFileSuffix == "" {
app.config.CompressedFileSuffix = DefaultCompressedFileSuffix
}
if app.config.Immutable {
app.getBytes, app.getString = getBytesImmutable, getStringImmutable
}
if app.config.ErrorHandler == nil {
app.config.ErrorHandler = DefaultErrorHandler
}
if app.config.JSONEncoder == nil {
app.config.JSONEncoder = json.Marshal
}
if app.config.JSONDecoder == nil {
app.config.JSONDecoder = json.Unmarshal
}
if app.config.Network == "" {
app.config.Network = NetworkTCP4
}
app.config.trustedProxiesMap = make(map[string]struct{}, len(app.config.TrustedProxies))
for _, ip := range app.config.TrustedProxies {
app.handleTrustedProxy(ip)
}
// Init app
app.init()
// Return app
return app
}
// Checks if the given IP address is a range whether or not, adds it to the trustedProxiesMap
func (app *App) handleTrustedProxy(ipAddress string) {
// Detects IP address is range whether or not
if strings.Contains(ipAddress, "/") {
// Parsing IP address
ip, ipnet, err := net.ParseCIDR(ipAddress)
if err != nil {
fmt.Printf("[Warning] IP range `%s` could not be parsed. \n", ipAddress)
return
}
// Iterates IP address which is between range
for ip := ip.Mask(ipnet.Mask); ipnet.Contains(ip); utils.IncrementIPRange(ip) {
app.config.trustedProxiesMap[ip.String()] = struct{}{}
}
return
}
app.config.trustedProxiesMap[ipAddress] = struct{}{}
}
// Mount attaches another app instance as a sub-router along a routing path.
// It's very useful to split up a large API as many independent routers and
// compose them as a single service using Mount. The fiber's error handler and
// any of the fiber's sub apps are added to the application's error handlers
// to be invoked on errors that happen within the prefix route.
func (app *App) Mount(prefix string, fiber *App) Router {
stack := fiber.Stack()
for m := range stack {
for r := range stack[m] {
route := app.copyRoute(stack[m][r])
app.addRoute(route.Method, app.addPrefixToRoute(prefix, route))
}
}
// Save the fiber's error handler and its sub apps
prefix = strings.TrimRight(prefix, "/")
if fiber.config.ErrorHandler != nil {
app.errorHandlers[prefix] = fiber.config.ErrorHandler
}
for mountedPrefixes, errHandler := range fiber.errorHandlers {
app.errorHandlers[prefix+mountedPrefixes] = errHandler
}
atomic.AddUint32(&app.handlerCount, fiber.handlerCount)
return app
}
// Use registers a middleware route that will match requests
// with the provided prefix (which is optional and defaults to "/").
//
// app.Use(func(c *fiber.Ctx) error {
// return c.Next()
// })
// app.Use("/api", func(c *fiber.Ctx) error {
// return c.Next()
// })
// app.Use("/api", handler, func(c *fiber.Ctx) error {
// return c.Next()
// })
//
// This method will match all HTTP verbs: GET, POST, PUT, HEAD etc...
func (app *App) Use(args ...interface{}) Router {
var prefix string
var handlers []Handler
for i := 0; i < len(args); i++ {
switch arg := args[i].(type) {
case string:
prefix = arg
case Handler:
handlers = append(handlers, arg)
default:
panic(fmt.Sprintf("use: invalid handler %v\n", reflect.TypeOf(arg)))
}
}
app.register(methodUse, prefix, handlers...)
return app
}
// Get registers a route for GET methods that requests a representation
// of the specified resource. Requests using GET should only retrieve data.
func (app *App) Get(path string, handlers ...Handler) Router {
return app.Add(MethodHead, path, handlers...).Add(MethodGet, path, handlers...)
}
// Head registers a route for HEAD methods that asks for a response identical
// to that of a GET request, but without the response body.
func (app *App) Head(path string, handlers ...Handler) Router {
return app.Add(MethodHead, path, handlers...)
}
// Post registers a route for POST methods that is used to submit an entity to the
// specified resource, often causing a change in state or side effects on the server.
func (app *App) Post(path string, handlers ...Handler) Router {
return app.Add(MethodPost, path, handlers...)
}
// Put registers a route for PUT methods that replaces all current representations
// of the target resource with the request payload.
func (app *App) Put(path string, handlers ...Handler) Router {
return app.Add(MethodPut, path, handlers...)
}
// Delete registers a route for DELETE methods that deletes the specified resource.
func (app *App) Delete(path string, handlers ...Handler) Router {
return app.Add(MethodDelete, path, handlers...)
}
// Connect registers a route for CONNECT methods that establishes a tunnel to the
// server identified by the target resource.
func (app *App) Connect(path string, handlers ...Handler) Router {
return app.Add(MethodConnect, path, handlers...)
}
// Options registers a route for OPTIONS methods that is used to describe the
// communication options for the target resource.
func (app *App) Options(path string, handlers ...Handler) Router {
return app.Add(MethodOptions, path, handlers...)
}
// Trace registers a route for TRACE methods that performs a message loop-back
// test along the path to the target resource.
func (app *App) Trace(path string, handlers ...Handler) Router {
return app.Add(MethodTrace, path, handlers...)
}
// Patch registers a route for PATCH methods that is used to apply partial
// modifications to a resource.
func (app *App) Patch(path string, handlers ...Handler) Router {
return app.Add(MethodPatch, path, handlers...)
}
// Add allows you to specify a HTTP method to register a route
func (app *App) Add(method, path string, handlers ...Handler) Router {
return app.register(method, path, handlers...)
}
// Static will create a file server serving static files
func (app *App) Static(prefix, root string, config ...Static) Router {
return app.registerStatic(prefix, root, config...)
}
// All will register the handler on all HTTP methods
func (app *App) All(path string, handlers ...Handler) Router {
for _, method := range intMethod {
_ = app.Add(method, path, handlers...)
}
return app
}
// Group is used for Routes with common prefix to define a new sub-router with optional middleware.
// api := app.Group("/api")
// api.Get("/users", handler)
func (app *App) Group(prefix string, handlers ...Handler) Router {
if len(handlers) > 0 {
app.register(methodUse, prefix, handlers...)
}
return &Group{prefix: prefix, app: app}
}
// Error makes it compatible with the `error` interface.
func (e *Error) Error() string {
return e.Message
}
// NewError creates a new Error instance with an optional message
func NewError(code int, message ...string) *Error {
e := &Error{
Code: code,
}
if len(message) > 0 {
e.Message = message[0]
} else {
e.Message = utils.StatusMessage(code)
}
return e
}
// Listener can be used to pass a custom listener.
func (app *App) Listener(ln net.Listener) error {
// Prefork is supported for custom listeners
if app.config.Prefork {
addr, tlsConfig := lnMetadata(app.config.Network, ln)
return app.prefork(app.config.Network, addr, tlsConfig)
}
// prepare the server for the start
app.startupProcess()
// Print startup message
if !app.config.DisableStartupMessage {
app.startupMessage(ln.Addr().String(), getTlsConfig(ln) != nil, "")
}
// Start listening
return app.server.Serve(ln)
}
// Listen serves HTTP requests from the given addr.
//
// app.Listen(":8080")
// app.Listen("127.0.0.1:8080")
func (app *App) Listen(addr string) error {
// Start prefork
if app.config.Prefork {
return app.prefork(app.config.Network, addr, nil)
}
// Setup listener
ln, err := net.Listen(app.config.Network, addr)
if err != nil {
return err
}
// prepare the server for the start
app.startupProcess()
// Print startup message
if !app.config.DisableStartupMessage {
app.startupMessage(ln.Addr().String(), false, "")
}
// Start listening
return app.server.Serve(ln)
}
// ListenTLS serves HTTPs requests from the given addr.
// certFile and keyFile are the paths to TLS certificate and key file.
// app.ListenTLS(":8080", "./cert.pem", "./cert.key")
// app.ListenTLS(":8080", "./cert.pem", "./cert.key")
func (app *App) ListenTLS(addr, certFile, keyFile string) error {
// Check for valid cert/key path
if len(certFile) == 0 || len(keyFile) == 0 {
return errors.New("tls: provide a valid cert or key path")
}
// Prefork is supported
if app.config.Prefork {
cert, err := tls.LoadX509KeyPair(certFile, keyFile)
if err != nil {
return fmt.Errorf("tls: cannot load TLS key pair from certFile=%q and keyFile=%q: %s", certFile, keyFile, err)
}
config := &tls.Config{
MinVersion: tls.VersionTLS12,
PreferServerCipherSuites: true,
Certificates: []tls.Certificate{
cert,
},
}
return app.prefork(app.config.Network, addr, config)
}
// Setup listener
ln, err := net.Listen(app.config.Network, addr)
if err != nil {
return err
}
// prepare the server for the start
app.startupProcess()
// Print startup message
if !app.config.DisableStartupMessage {
app.startupMessage(ln.Addr().String(), true, "")
}
// Start listening
return app.server.ServeTLS(ln, certFile, keyFile)
}
// Config returns the app config as value ( read-only ).
func (app *App) Config() Config {
return app.config
}
// Handler returns the server handler.
func (app *App) Handler() fasthttp.RequestHandler {
// prepare the server for the start
app.startupProcess()
return app.handler
}
// Stack returns the raw router stack.
func (app *App) Stack() [][]*Route {
return app.stack
}
// Shutdown gracefully shuts down the server without interrupting any active connections.
// Shutdown works by first closing all open listeners and then waiting indefinitely for all connections to return to idle and then shut down.
//
// Make sure the program doesn't exit and waits instead for Shutdown to return.
//
// Shutdown does not close keepalive connections so its recommended to set ReadTimeout to something else than 0.
func (app *App) Shutdown() error {
app.mutex.Lock()
defer app.mutex.Unlock()
if app.server == nil {
return fmt.Errorf("shutdown: server is not running")
}
return app.server.Shutdown()
}
// Server returns the underlying fasthttp server
func (app *App) Server() *fasthttp.Server {
return app.server
}
// Test is used for internal debugging by passing a *http.Request.
// Timeout is optional and defaults to 1s, -1 will disable it completely.
func (app *App) Test(req *http.Request, msTimeout ...int) (resp *http.Response, err error) {
// Set timeout
timeout := 1000
if len(msTimeout) > 0 {
timeout = msTimeout[0]
}
// Add Content-Length if not provided with body
if req.Body != http.NoBody && req.Header.Get(HeaderContentLength) == "" {
req.Header.Add(HeaderContentLength, strconv.FormatInt(req.ContentLength, 10))
}
// Dump raw http request
dump, err := httputil.DumpRequest(req, true)
if err != nil {
return nil, err
}
// Create test connection
conn := new(testConn)
// Write raw http request
if _, err = conn.r.Write(dump); err != nil {
return nil, err
}
// prepare the server for the start
app.startupProcess()
// Serve conn to server
channel := make(chan error)
go func() {
channel <- app.server.ServeConn(conn)
}()
// Wait for callback
if timeout >= 0 {
// With timeout
select {
case err = <-channel:
case <-time.After(time.Duration(timeout) * time.Millisecond):
return nil, fmt.Errorf("test: timeout error %vms", timeout)
}
} else {
// Without timeout
err = <-channel
}
// Check for errors
if err != nil && err != fasthttp.ErrGetOnly {
return nil, err
}
// Read response
buffer := bufio.NewReader(&conn.w)
// Convert raw http response to *http.Response
return http.ReadResponse(buffer, req)
}
type disableLogger struct{}
func (dl *disableLogger) Printf(_ string, _ ...interface{}) {
// fmt.Println(fmt.Sprintf(format, args...))
}
func (app *App) init() *App {
// lock application
app.mutex.Lock()
// Only load templates if a view engine is specified
if app.config.Views != nil {
if err := app.config.Views.Load(); err != nil {
fmt.Printf("views: %v\n", err)
}
}
// create fasthttp server
app.server = &fasthttp.Server{
Logger: &disableLogger{},
LogAllErrors: false,
ErrorHandler: app.serverErrorHandler,
}
// fasthttp server settings
app.server.Handler = app.handler
app.server.Name = app.config.ServerHeader
app.server.Concurrency = app.config.Concurrency
app.server.NoDefaultDate = app.config.DisableDefaultDate
app.server.NoDefaultContentType = app.config.DisableDefaultContentType
app.server.DisableHeaderNamesNormalizing = app.config.DisableHeaderNormalizing
app.server.DisableKeepalive = app.config.DisableKeepalive
app.server.MaxRequestBodySize = app.config.BodyLimit
app.server.NoDefaultServerHeader = app.config.ServerHeader == ""
app.server.ReadTimeout = app.config.ReadTimeout
app.server.WriteTimeout = app.config.WriteTimeout
app.server.IdleTimeout = app.config.IdleTimeout
app.server.ReadBufferSize = app.config.ReadBufferSize
app.server.WriteBufferSize = app.config.WriteBufferSize
app.server.GetOnly = app.config.GETOnly
app.server.ReduceMemoryUsage = app.config.ReduceMemoryUsage
app.server.StreamRequestBody = app.config.StreamRequestBody
app.server.DisablePreParseMultipartForm = app.config.DisablePreParseMultipartForm
// unlock application
app.mutex.Unlock()
return app
}
// ErrorHandler is the application's method in charge of finding the
// appropriate handler for the given request. It searches any mounted
// sub fibers by their prefixes and if it finds a match, it uses that
// error handler. Otherwise it uses the configured error handler for
// the app, which if not set is the DefaultErrorHandler.
func (app *App) ErrorHandler(ctx *Ctx, err error) error {
var (
mountedErrHandler ErrorHandler
mountedPrefixParts int
)
for prefix, errHandler := range app.errorHandlers {
if strings.HasPrefix(ctx.path, prefix) {
parts := len(strings.Split(prefix, "/"))
if mountedPrefixParts <= parts {
mountedErrHandler = errHandler
mountedPrefixParts = parts
}
}
}
if mountedErrHandler != nil {
return mountedErrHandler(ctx, err)
}
return app.config.ErrorHandler(ctx, err)
}
// serverErrorHandler is a wrapper around the application's error handler method
// user for the fasthttp server configuration. It maps a set of fasthttp errors to fiber
// errors before calling the application's error handler method.
func (app *App) serverErrorHandler(fctx *fasthttp.RequestCtx, err error) {
c := app.AcquireCtx(fctx)
if _, ok := err.(*fasthttp.ErrSmallBuffer); ok {
err = ErrRequestHeaderFieldsTooLarge
} else if netErr, ok := err.(*net.OpError); ok && netErr.Timeout() {
err = ErrRequestTimeout
} else if err == fasthttp.ErrBodyTooLarge {
err = ErrRequestEntityTooLarge
} else if err == fasthttp.ErrGetOnly {
err = ErrMethodNotAllowed
} else if strings.Contains(err.Error(), "timeout") {
err = ErrRequestTimeout
} else {
err = ErrBadRequest
}
if catch := app.ErrorHandler(c, err); catch != nil {
_ = c.SendStatus(StatusInternalServerError)
}
app.ReleaseCtx(c)
}
// startupProcess Is the method which executes all the necessary processes just before the start of the server.
func (app *App) startupProcess() *App {
app.mutex.Lock()
app.buildTree()
app.mutex.Unlock()
return app
}
// startupMessage prepares the startup message with the handler number, port, address and other information
func (app *App) startupMessage(addr string, tls bool, pids string) {
// ignore child processes
if IsChild() {
return
}
const (
cBlack = "\u001b[90m"
// cRed = "\u001b[91m"
cCyan = "\u001b[96m"
// cGreen = "\u001b[92m"
// cYellow = "\u001b[93m"
// cBlue = "\u001b[94m"
// cMagenta = "\u001b[95m"
// cWhite = "\u001b[97m"
cReset = "\u001b[0m"
)
value := func(s string, width int) string {
pad := width - len(s)
str := ""
for i := 0; i < pad; i++ {
str += "."
}
if s == "Disabled" {
str += " " + s
} else {
str += fmt.Sprintf(" %s%s%s", cCyan, s, cBlack)
}
return str
}
center := func(s string, width int) string {
pad := strconv.Itoa((width - len(s)) / 2)
str := fmt.Sprintf("%"+pad+"s", " ")
str += s
str += fmt.Sprintf("%"+pad+"s", " ")
if len(str) < width {
str += " "
}
return str
}
centerValue := func(s string, width int) string {
pad := strconv.Itoa((width - len(s)) / 2)
str := fmt.Sprintf("%"+pad+"s", " ")
str += fmt.Sprintf("%s%s%s", cCyan, s, cBlack)
str += fmt.Sprintf("%"+pad+"s", " ")
if len(str)-10 < width {
str += " "
}
return str
}
pad := func(s string, width int) (str string) {
toAdd := width - len(s)
str += s
for i := 0; i < toAdd; i++ {
str += " "
}
return
}
host, port := parseAddr(addr)
if host == "" {
if app.config.Network == NetworkTCP6 {
host = "[::1]"
} else {
host = "0.0.0.0"
}
}
scheme := "http"
if tls {
scheme = "https"
}
isPrefork := "Disabled"
if app.config.Prefork {
isPrefork = "Enabled"
}
procs := strconv.Itoa(runtime.GOMAXPROCS(0))
if !app.config.Prefork {
procs = "1"
}
mainLogo := cBlack + " ┌───────────────────────────────────────────────────┐\n"
if app.config.AppName != "" {
mainLogo += " │ " + centerValue(app.config.AppName, 49) + " │\n"
}
mainLogo += " │ " + centerValue(" Fiber v"+Version, 49) + " │\n"
if host == "0.0.0.0" {
mainLogo +=
" │ " + center(fmt.Sprintf("%s://127.0.0.1:%s", scheme, port), 49) + " │\n" +
" │ " + center(fmt.Sprintf("(bound on host 0.0.0.0 and port %s)", port), 49) + " │\n"
} else {
mainLogo +=
" │ " + center(fmt.Sprintf("%s://%s:%s", scheme, host, port), 49) + " │\n"
}
mainLogo += fmt.Sprintf(
" │ │\n"+
" │ Handlers %s Processes %s │\n"+
" │ Prefork .%s PID ....%s │\n"+
" └───────────────────────────────────────────────────┘"+
cReset,
value(strconv.Itoa(int(app.handlerCount)), 14), value(procs, 12),
value(isPrefork, 14), value(strconv.Itoa(os.Getpid()), 14),
)
var childPidsLogo string
if app.config.Prefork {
var childPidsTemplate string
childPidsTemplate += "%s"
childPidsTemplate += " ┌───────────────────────────────────────────────────┐\n%s"
childPidsTemplate += " └───────────────────────────────────────────────────┘"
childPidsTemplate += "%s"
newLine := " │ %s%s%s │"
// Turn the `pids` variable (in the form ",a,b,c,d,e,f,etc") into a slice of PIDs
var pidSlice []string
for _, v := range strings.Split(pids, ",") {
if v != "" {
pidSlice = append(pidSlice, v)
}
}
var lines []string
thisLine := "Child PIDs ... "
var itemsOnThisLine []string
addLine := func() {
lines = append(lines,
fmt.Sprintf(
newLine,
cBlack,
thisLine+cCyan+pad(strings.Join(itemsOnThisLine, ", "), 49-len(thisLine)),
cBlack,
),
)
}
for _, pid := range pidSlice {
if len(thisLine+strings.Join(append(itemsOnThisLine, pid), ", ")) > 49 {
addLine()
thisLine = ""
itemsOnThisLine = []string{pid}
} else {
itemsOnThisLine = append(itemsOnThisLine, pid)
}
}
// Add left over items to their own line
if len(itemsOnThisLine) != 0 {
addLine()
}
// Form logo
childPidsLogo = fmt.Sprintf(childPidsTemplate,
cBlack,
strings.Join(lines, "\n")+"\n",
cReset,
)
}
// Combine both the child PID logo and the main Fiber logo
// Pad the shorter logo to the length of the longer one
splitMainLogo := strings.Split(mainLogo, "\n")
splitChildPidsLogo := strings.Split(childPidsLogo, "\n")
mainLen := len(splitMainLogo)
childLen := len(splitChildPidsLogo)
if mainLen > childLen {
diff := mainLen - childLen
for i := 0; i < diff; i++ {
splitChildPidsLogo = append(splitChildPidsLogo, "")
}
} else {
diff := childLen - mainLen
for i := 0; i < diff; i++ {
splitMainLogo = append(splitMainLogo, "")
}
}
// Combine the two logos, line by line
output := "\n"
for i := range splitMainLogo {
output += cBlack + splitMainLogo[i] + " " + splitChildPidsLogo[i] + "\n"
}
out := colorable.NewColorableStdout()
if os.Getenv("TERM") == "dumb" || (!isatty.IsTerminal(os.Stdout.Fd()) && !isatty.IsCygwinTerminal(os.Stdout.Fd())) {
out = colorable.NewNonColorable(os.Stdout)
}
_, _ = fmt.Fprintln(out, output)
}
| [
"\"TERM\""
]
| []
| [
"TERM"
]
| [] | ["TERM"] | go | 1 | 0 | |
check.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import os
from core import urlproc
from core import fileproc
NEWLINE_ESCAPED = "%0A"
def clone_repo(git_path):
"""
clone and name a git repository.
"""
base_path = os.path.basename(git_path)
os.system("git clone " + git_path + " " + base_path)
return base_path
def del_repo(base_path):
"""
delete repository.
"""
os.system("rm -R -f " + base_path)
return True
def white_listed(url, white_listed_urls, white_listed_patterns):
"""
check if link is in the white listed URLs or patterns to ignore.
"""
# check white listed urls
if url in white_listed_urls:
return True
# check white listed patterns
i = 0
while i < len(white_listed_patterns):
if white_listed_patterns[i] in url:
return True
i += 1
# default return
return False
def check_repo(file_paths, print_all, white_listed_urls, white_listed_patterns):
"""
check all urls extracted from all files in a repository.
"""
# loop files
results = []
for file in file_paths:
# collect links from each file
urls = fileproc.collect_links_from_file(file)
# eliminate white listed urls and white listed white listed patterns
if len(white_listed_urls) > 0 or len(white_listed_patterns) > 0:
urls = [url for url in urls
if not white_listed(url,
white_listed_urls,
white_listed_patterns)]
# if some links are found, check them
if urls != []:
print("\n", file, "\n", "-" * len(file))
file_results = urlproc.check_urls(file, urls)
results.append(f"#### {file}{NEWLINE_ESCAPED}{file_results}")
# if no urls are found, mention it if required
else:
if print_all == "True":
print("\n", file, "\n", "-" * len(file))
print("No urls found.")
output = f"{NEWLINE_ESCAPED}{NEWLINE_ESCAPED}".join(results)
os.system(f"echo \"::set-output name=urls::{output}\"")
if __name__ == "__main__":
# read input variables
git_path = os.getenv("INPUT_GIT_PATH", "")
file_types = os.getenv("INPUT_FILE_TYPES", "").split(",")
print_all = os.getenv("INPUT_PRINT_ALL", "")
white_listed_urls = os.getenv("INPUT_WHITE_LISTED_URLS", "").split(",")
white_listed_patterns = os.getenv(
"INPUT_WHITE_LISTED_PATTERNS", "").split(",")
# clone project repo
base_path = os.path.basename(git_path)
# get all file paths
file_paths = fileproc.get_file_paths(base_path, file_types)
# check repo urls
check_repo(file_paths, print_all, white_listed_urls, white_listed_patterns)
# delete repo when done
deletion_status = del_repo(base_path)
print("Finished")
| []
| []
| [
"INPUT_WHITE_LISTED_PATTERNS",
"INPUT_PRINT_ALL",
"INPUT_GIT_PATH",
"INPUT_FILE_TYPES",
"INPUT_WHITE_LISTED_URLS"
]
| [] | ["INPUT_WHITE_LISTED_PATTERNS", "INPUT_PRINT_ALL", "INPUT_GIT_PATH", "INPUT_FILE_TYPES", "INPUT_WHITE_LISTED_URLS"] | python | 5 | 0 | |
vendor/github.com/hashicorp/terraform/builtin/providers/aws/auth_helpers.go | package aws
import (
"errors"
"fmt"
"log"
"os"
"strings"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
awsCredentials "github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds"
"github.com/aws/aws-sdk-go/aws/credentials/stscreds"
"github.com/aws/aws-sdk-go/aws/ec2metadata"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/iam"
"github.com/aws/aws-sdk-go/service/sts"
"github.com/hashicorp/errwrap"
"github.com/hashicorp/go-cleanhttp"
)
func GetAccountInfo(iamconn *iam.IAM, stsconn *sts.STS, authProviderName string) (string, string, error) {
// If we have creds from instance profile, we can use metadata API
if authProviderName == ec2rolecreds.ProviderName {
log.Println("[DEBUG] Trying to get account ID via AWS Metadata API")
cfg := &aws.Config{}
setOptionalEndpoint(cfg)
sess, err := session.NewSession(cfg)
if err != nil {
return "", "", errwrap.Wrapf("Error creating AWS session: {{err}}", err)
}
metadataClient := ec2metadata.New(sess)
info, err := metadataClient.IAMInfo()
if err != nil {
// This can be triggered when no IAM Role is assigned
// or AWS just happens to return invalid response
return "", "", fmt.Errorf("Failed getting EC2 IAM info: %s", err)
}
return parseAccountInfoFromArn(info.InstanceProfileArn)
}
// Then try IAM GetUser
log.Println("[DEBUG] Trying to get account ID via iam:GetUser")
outUser, err := iamconn.GetUser(nil)
if err == nil {
return parseAccountInfoFromArn(*outUser.User.Arn)
}
awsErr, ok := err.(awserr.Error)
// AccessDenied and ValidationError can be raised
// if credentials belong to federated profile, so we ignore these
if !ok || (awsErr.Code() != "AccessDenied" && awsErr.Code() != "ValidationError") {
return "", "", fmt.Errorf("Failed getting account ID via 'iam:GetUser': %s", err)
}
log.Printf("[DEBUG] Getting account ID via iam:GetUser failed: %s", err)
// Then try STS GetCallerIdentity
log.Println("[DEBUG] Trying to get account ID via sts:GetCallerIdentity")
outCallerIdentity, err := stsconn.GetCallerIdentity(&sts.GetCallerIdentityInput{})
if err == nil {
return parseAccountInfoFromArn(*outCallerIdentity.Arn)
}
log.Printf("[DEBUG] Getting account ID via sts:GetCallerIdentity failed: %s", err)
// Then try IAM ListRoles
log.Println("[DEBUG] Trying to get account ID via iam:ListRoles")
outRoles, err := iamconn.ListRoles(&iam.ListRolesInput{
MaxItems: aws.Int64(int64(1)),
})
if err != nil {
return "", "", fmt.Errorf("Failed getting account ID via 'iam:ListRoles': %s", err)
}
if len(outRoles.Roles) < 1 {
return "", "", errors.New("Failed getting account ID via 'iam:ListRoles': No roles available")
}
return parseAccountInfoFromArn(*outRoles.Roles[0].Arn)
}
func parseAccountInfoFromArn(arn string) (string, string, error) {
parts := strings.Split(arn, ":")
if len(parts) < 5 {
return "", "", fmt.Errorf("Unable to parse ID from invalid ARN: %q", arn)
}
return parts[1], parts[4], nil
}
// This function is responsible for reading credentials from the
// environment in the case that they're not explicitly specified
// in the Terraform configuration.
func GetCredentials(c *Config) (*awsCredentials.Credentials, error) {
// build a chain provider, lazy-evaulated by aws-sdk
providers := []awsCredentials.Provider{
&awsCredentials.StaticProvider{Value: awsCredentials.Value{
AccessKeyID: c.AccessKey,
SecretAccessKey: c.SecretKey,
SessionToken: c.Token,
}},
&awsCredentials.EnvProvider{},
&awsCredentials.SharedCredentialsProvider{
Filename: c.CredsFilename,
Profile: c.Profile,
},
}
// Build isolated HTTP client to avoid issues with globally-shared settings
client := cleanhttp.DefaultClient()
// Keep the timeout low as we don't want to wait in non-EC2 environments
client.Timeout = 100 * time.Millisecond
cfg := &aws.Config{
HTTPClient: client,
}
usedEndpoint := setOptionalEndpoint(cfg)
if !c.SkipMetadataApiCheck {
// Real AWS should reply to a simple metadata request.
// We check it actually does to ensure something else didn't just
// happen to be listening on the same IP:Port
metadataClient := ec2metadata.New(session.New(cfg))
if metadataClient.Available() {
providers = append(providers, &ec2rolecreds.EC2RoleProvider{
Client: metadataClient,
})
log.Print("[INFO] AWS EC2 instance detected via default metadata" +
" API endpoint, EC2RoleProvider added to the auth chain")
} else {
if usedEndpoint == "" {
usedEndpoint = "default location"
}
log.Printf("[INFO] Ignoring AWS metadata API endpoint at %s "+
"as it doesn't return any instance-id", usedEndpoint)
}
}
// This is the "normal" flow (i.e. not assuming a role)
if c.AssumeRoleARN == "" {
return awsCredentials.NewChainCredentials(providers), nil
}
// Otherwise we need to construct and STS client with the main credentials, and verify
// that we can assume the defined role.
log.Printf("[INFO] Attempting to AssumeRole %s (SessionName: %q, ExternalId: %q, Policy: %q)",
c.AssumeRoleARN, c.AssumeRoleSessionName, c.AssumeRoleExternalID, c.AssumeRolePolicy)
creds := awsCredentials.NewChainCredentials(providers)
cp, err := creds.Get()
if err != nil {
if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "NoCredentialProviders" {
return nil, errors.New(`No valid credential sources found for AWS Provider.
Please see https://terraform.io/docs/providers/aws/index.html for more information on
providing credentials for the AWS Provider`)
}
return nil, fmt.Errorf("Error loading credentials for AWS Provider: %s", err)
}
log.Printf("[INFO] AWS Auth provider used: %q", cp.ProviderName)
awsConfig := &aws.Config{
Credentials: creds,
Region: aws.String(c.Region),
MaxRetries: aws.Int(c.MaxRetries),
HTTPClient: cleanhttp.DefaultClient(),
S3ForcePathStyle: aws.Bool(c.S3ForcePathStyle),
}
stsclient := sts.New(session.New(awsConfig))
assumeRoleProvider := &stscreds.AssumeRoleProvider{
Client: stsclient,
RoleARN: c.AssumeRoleARN,
}
if c.AssumeRoleSessionName != "" {
assumeRoleProvider.RoleSessionName = c.AssumeRoleSessionName
}
if c.AssumeRoleExternalID != "" {
assumeRoleProvider.ExternalID = aws.String(c.AssumeRoleExternalID)
}
if c.AssumeRolePolicy != "" {
assumeRoleProvider.Policy = aws.String(c.AssumeRolePolicy)
}
providers = []awsCredentials.Provider{assumeRoleProvider}
assumeRoleCreds := awsCredentials.NewChainCredentials(providers)
_, err = assumeRoleCreds.Get()
if err != nil {
if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "NoCredentialProviders" {
return nil, fmt.Errorf("The role %q cannot be assumed.\n\n"+
" There are a number of possible causes of this - the most common are:\n"+
" * The credentials used in order to assume the role are invalid\n"+
" * The credentials do not have appropriate permission to assume the role\n"+
" * The role ARN is not valid",
c.AssumeRoleARN)
}
return nil, fmt.Errorf("Error loading credentials for AWS Provider: %s", err)
}
return assumeRoleCreds, nil
}
func setOptionalEndpoint(cfg *aws.Config) string {
endpoint := os.Getenv("AWS_METADATA_URL")
if endpoint != "" {
log.Printf("[INFO] Setting custom metadata endpoint: %q", endpoint)
cfg.Endpoint = aws.String(endpoint)
return endpoint
}
return ""
}
| [
"\"AWS_METADATA_URL\""
]
| []
| [
"AWS_METADATA_URL"
]
| [] | ["AWS_METADATA_URL"] | go | 1 | 0 | |
common/gobuild.py.launcher.go | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"bufio"
"bytes"
"encoding/json"
"fmt"
"io"
"log"
"os"
"strings"
)
func main() {
// debugging
var debug = os.Getenv("OW_DEBUG") != ""
if debug {
filename := os.Getenv("OW_DEBUG")
f, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666)
if err == nil {
log.SetOutput(f)
defer f.Close()
}
log.Printf("ACTION ENV: %v", os.Environ())
}
// assign the main function
type Action func(event map[string]interface{}) map[string]interface{}
var action Action
action = Main
// input
out := os.NewFile(3, "pipe")
defer out.Close()
reader := bufio.NewReader(os.Stdin)
// read-eval-print loop
if debug {
log.Println("started")
}
for {
// read one line
inbuf, err := reader.ReadBytes('\n')
if err != nil {
if err != io.EOF {
log.Println(err)
}
break
}
if debug {
log.Printf(">>>'%s'>>>", inbuf)
}
// parse one line
var input map[string]interface{}
err = json.Unmarshal(inbuf, &input)
if err != nil {
log.Println(err.Error())
fmt.Fprintf(out, "{ error: %q}\n", err.Error())
continue
}
if debug {
log.Printf("%v\n", input)
}
// set environment variables
for k, v := range input {
if k == "value" {
continue
}
if s, ok := v.(string); ok {
os.Setenv("__OW_"+strings.ToUpper(k), s)
}
}
// get payload if not empty
var payload map[string]interface{}
if value, ok := input["value"].(map[string]interface{}); ok {
payload = value
}
// process the request
result := action(payload)
// encode the answer
output, err := json.Marshal(&result)
if err != nil {
log.Println(err.Error())
fmt.Fprintf(out, "{ error: %q}\n", err.Error())
continue
}
output = bytes.Replace(output, []byte("\n"), []byte(""), -1)
if debug {
log.Printf("'<<<%s'<<<", output)
}
fmt.Fprintf(out, "%s\n", output)
}
}
| [
"\"OW_DEBUG\"",
"\"OW_DEBUG\""
]
| []
| [
"OW_DEBUG"
]
| [] | ["OW_DEBUG"] | go | 1 | 0 | |
holobot/extensions/moderation/repositories/__init__.py | from .ilog_settings_repository import ILogSettingsRepository
from .imutes_repository import IMutesRepository
from .ipermission_repository import IPermissionRepository
from .iwarn_repository import IWarnRepository
from .iwarn_settings_repository import IWarnSettingsRepository
from .log_settings_repository import LogSettingsRepository
from .mutes_repository import MutesRepository
from .permission_repository import PermissionRepository
from .warn_repository import WarnRepository
from .warn_settings_repository import WarnSettingsRepository
| []
| []
| []
| [] | [] | python | null | null | null |
cmd/rmrebuttal/rmrebuttal.go | package main
import (
"encoding/csv"
"fmt"
"io"
"os"
"regexp"
"strconv"
"strings"
"time"
)
// Get cols from results/prefix_top_I.csv files I={n1, n2, ..., nN} and save to ofn.
// Prefix is the project name, like kubernetes, prometheus ... it is taken from PG_DB env variable
func mergeCSVs(stat, cols, ns, rowRegexp, ofn string) error {
// Static columns, they are not dependent on N
// Static columns are from the left.
nStatic, err := strconv.Atoi(stat)
if err != nil {
return err
}
// Debug/verbose mode
debug := os.Getenv("DEBUG") != ""
// Get filename prefix
prefix := os.Getenv("PG_DB")
if prefix == "" || prefix == "gha" {
prefix = "kubernetes"
}
// Row regexp handle
ary := strings.Split(rowRegexp, ";;;")
lAry := len(ary)
if lAry > 2 || (lAry == 1 && rowRegexp != "") {
return fmt.Errorf("'%s' should be einter empty or in 'colname;;;regexp' format", rowRegexp)
}
var (
reColumn *string
colRegexp *regexp.Regexp
)
if lAry == 2 {
if ary[0] != "" && ary[1] != "" {
colRegexp = regexp.MustCompile(ary[1])
reColumn = &ary[0]
}
}
// column names set
colsMap := make(map[string]struct{})
// Column number - to be able to detect if this is a static column
colNum := make(map[string]int)
colsAry := strings.Split(cols, ";")
for i, col := range colsAry {
if col == "" {
return fmt.Errorf("empty column definition in '%s'", cols)
}
colsMap[col] = struct{}{}
colNum[col] = i
}
lColsAry := len(colsAry)
// No dynamic columns
if nStatic >= lColsAry {
return fmt.Errorf("no dynamic columns, all columns: %d, static columns: %d", lColsAry, nStatic)
}
// n values set
nMap := make(map[int]struct{})
nAry := strings.Split(ns, ";")
for _, col := range nAry {
iCol, err := strconv.Atoi(col)
if err != nil {
return err
}
nMap[iCol] = struct{}{}
}
// Column name mappings, column types and formats
// "colName1,newName1,type1,fmt1;,,,;colNameN,newNameN,typeN,fmtN"
// colNameI - required, column name to apply mapping
// newnameI - new name for column, optional, it can contain '%s' which will be replaced with N if column is dynamic
// typeI - type of column (to apply format): can be n (numeric), d (datetime), optional
// fmtI - format of column (if type given), can be Sprintf format for n, or date format for d, optional
// fmtI for "n" (numeric) column can be for example "%.1f%%".
// fmtI for "d" (datetime) column can be for example: "2012-11-01T22:08:41+00:00".
// I = {1,2,...N}
// Example: COLFMT="release,Release,,;date_from,Date,d,2012-11-01;top_commits_perc,Percent of top %s committers commits,n,%.1f%%"
colNameMap := make(map[string]string)
colFmtMap := make(map[string]func(string) string)
colFmt := os.Getenv("COLFMT")
if colFmt != "" {
colFmtAry := strings.Split(colFmt, ";")
for i, data := range colFmtAry {
if data == "" {
return fmt.Errorf("empty column format definition: '%s'", colFmt)
}
ary := strings.Split(data, ",")
lAry := len(ary)
if lAry != 4 {
return fmt.Errorf("#%d column format must contain 4 values: '%s', all: '%s'", i, data, colFmt)
}
col := ary[0]
if col == "" {
return fmt.Errorf("#%d column format must contain column name: '%s', all: '%s'", i, data, colFmt)
}
applied := false
if ary[1] != "" && ary[1] != col {
colNameMap[col] = ary[1]
applied = true
}
if ary[2] != "" && ary[3] != "" {
typ := ary[2]
form := ary[3]
switch typ {
case "n":
colFmtMap[col] = func(in string) string {
if in == "" {
return ""
}
fl, err := strconv.ParseFloat(in, 64)
if err != nil {
fmt.Printf("Cannot parse number '%s'\n", in)
return ""
}
if debug {
fmt.Printf("n_func: form=%s in=%s, fl=%f, out=%s\n", form, in, fl, fmt.Sprintf(form, fl))
}
return fmt.Sprintf(form, fl)
}
applied = true
case "d":
colFmtMap[col] = func(in string) string {
//tm, e := time.Parse("2006-01-02T15:04:05Z", in)
tm, err := time.Parse(time.RFC3339, in)
if err != nil {
fmt.Printf("Cannot parse datetime '%s'\n", in)
return ""
}
if debug {
fmt.Printf("d_func: form=%s in=%s, tm=%v, out=%s\n", form, in, tm, tm.Format(form))
}
return tm.Format(form)
}
applied = true
default:
return fmt.Errorf("#%d column contains unknown type specification (allowed: n, d): '%s', all: '%s'", i, data, colFmt)
}
}
if !applied {
return fmt.Errorf("#%d column contains no usable transformation(s): '%s', all: '%s'", i, data, colFmt)
}
}
}
// main output: column name --> values
// each column name ins "columnI J" I-th column and J-th N
// First nStatic columns doesn not have N added.
values := make(map[string][]string)
nN := 0
for n := range nMap {
// Read Top N file (current n)
ifn := fmt.Sprintf("results/%s_top_%d.csv", prefix, n)
iFile, err := os.Open(ifn)
if err != nil {
return err
}
defer func() { _ = iFile.Close() }()
reader := csv.NewReader(iFile)
rows := 0
// Will keep column name -> data index map
colIndex := make(map[string]int)
for {
record, err := reader.Read()
// No more rows
if err == io.EOF {
break
} else if err != nil {
return err
}
rows++
// Get column -> data index map (from the header row)
if rows == 1 {
for i, col := range record {
_, ok := colsMap[col]
if ok {
colIndex[col] = i
}
}
// Check if all columns found
for col := range colsMap {
_, ok := colIndex[col]
if !ok {
return fmt.Errorf("column '%s' not found in data files", col)
}
}
continue
}
// Handle filtering rows by row[reColumn] matching colRegexp
if reColumn != nil && colRegexp != nil {
cNum, ok := colNum[*reColumn]
if !ok {
return fmt.Errorf("regexp filtering column '%s' not found", *reColumn)
}
if cNum >= nStatic {
return fmt.Errorf(
"regexp filtering column '%s' is not static (there are %d static cols, this column is #%d)",
*reColumn,
nStatic,
cNum+1,
)
}
index, ok := colIndex[*reColumn]
if !ok {
return fmt.Errorf("regexp filtering column '%s' not found by index", *reColumn)
}
value := record[index]
if !colRegexp.MatchString(value) {
if debug {
fmt.Printf("Skipping %s=%s, not matching %s\n", *reColumn, value, colRegexp.String())
}
continue
}
}
for col, i := range colIndex {
// Column is "ColName I J" I-th column and J-th N
// First nStatic columns doesn not have N added.
colName := col
cNum := colNum[col]
if cNum >= nStatic {
colName = fmt.Sprintf("%s %d", col, n)
} else {
// Static column should only be inserted once
if nN > 0 {
continue
}
}
_, ok := values[colName]
if !ok {
values[colName] = []string{}
}
values[colName] = append(values[colName], record[i])
}
}
nN++
}
// Write output CSV
oFile, err := os.Create(ofn)
if err != nil {
return err
}
defer func() { _ = oFile.Close() }()
writer := csv.NewWriter(oFile)
defer writer.Flush()
// Create header row
hdr := []string{}
hdrNoMap := []string{}
hdrNoN := []string{}
for _, col := range colsAry {
name, ok := colNameMap[col]
// Handle static columns
cNum := colNum[col]
if cNum >= nStatic {
for _, n := range nAry {
if ok {
hdr = append(hdr, fmt.Sprintf(name, n))
} else {
hdr = append(hdr, fmt.Sprintf("%s %s", col, n))
}
hdrNoMap = append(hdrNoMap, fmt.Sprintf("%s %s", col, n))
hdrNoN = append(hdrNoN, col)
}
} else {
if ok {
hdr = append(hdr, name)
} else {
hdr = append(hdr, col)
}
hdrNoMap = append(hdrNoMap, col)
hdrNoN = append(hdrNoN, col)
}
}
// Write header
err = writer.Write(hdr)
if err != nil {
return err
}
// Get length of data and write data
dataLen := len(values[hdrNoMap[0]])
for i := 0; i < dataLen; i++ {
data := []string{}
for j, col := range hdrNoMap {
fmtFunc, ok := colFmtMap[hdrNoN[j]]
if ok {
data = append(data, fmtFunc(values[col][i]))
} else {
data = append(data, values[col][i])
}
}
err = writer.Write(data)
if err != nil {
return err
}
}
return nil
}
func main() {
dtStart := time.Now()
if len(os.Args) < 6 {
fmt.Printf("%s: required nStaticCols 'col1;col2;..;colN' 'n1;n2;..;nN' 'colname;;;regexp' output.csv\n", os.Args[0])
fmt.Printf(
"Example: %s 3 'date_from;date_to;release;n_top_contributing_coms;top_contributions_perc;"+
"n_top_committing_coms;top_commits_perc' '10;30;100' 'release;;;(?im)cncf'\n",
os.Args[0],
)
fmt.Printf("%s: use empty colname or empty regexp to skip selecting rows\n", os.Args[0])
os.Exit(1)
return
}
err := mergeCSVs(os.Args[1], os.Args[2], os.Args[3], os.Args[4], os.Args[5])
if err != nil {
fmt.Printf("Error: %s\n", err)
}
dtEnd := time.Now()
fmt.Printf("Time: %v\n", dtEnd.Sub(dtStart))
}
| [
"\"DEBUG\"",
"\"PG_DB\"",
"\"COLFMT\""
]
| []
| [
"COLFMT",
"PG_DB",
"DEBUG"
]
| [] | ["COLFMT", "PG_DB", "DEBUG"] | go | 3 | 0 | |
ai_flow/test/test_settings.py | #
# Copyright 2022 The AI Flow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import os
import unittest
import ai_flow.settings
from ai_flow.settings import get_configuration, get_aiflow_home, get_configuration_file_path
class TestSettings(unittest.TestCase):
def setUp(self) -> None:
self.prev_AIFLOW_HOME = ai_flow.settings.AIFLOW_HOME
def tearDown(self) -> None:
ai_flow.settings.AIFLOW_HOME = self.prev_AIFLOW_HOME
def test_get_configuration(self):
ai_flow.settings.AIFLOW_HOME = os.path.dirname(__file__)
config = get_configuration()
self.assertEqual('sqlite:///aiflow.db', config.get_db_uri())
self.assertEqual(50051, config.get_server_port())
self.assertEqual('localhost:50052', config.get_notification_server_uri())
def test_get_configuration_file_path(self):
aiflow_home = os.path.dirname(__file__)
ai_flow.settings.AIFLOW_HOME = aiflow_home
self.assertEqual(os.path.join(aiflow_home, "aiflow_server.yaml"), get_configuration_file_path())
def test_get_non_exist_configuration_file_path(self):
ai_flow.settings.AIFLOW_HOME = '/non-exist-home'
with self.assertRaises(FileNotFoundError):
get_configuration_file_path()
def test_get_aiflow_home(self):
prev_home = os.environ['HOME']
try:
os.environ['HOME'] = '/home'
self.assertEqual(os.path.join('/home', 'aiflow'), get_aiflow_home())
os.environ['AIFLOW_HOME'] = '/aiflow_home'
self.assertEqual('/aiflow_home', get_aiflow_home())
finally:
os.environ['HOME'] = prev_home
if 'AIFLOW_HOME' in os.environ:
del os.environ['AIFLOW_HOME']
| []
| []
| [
"HOME",
"AIFLOW_HOME"
]
| [] | ["HOME", "AIFLOW_HOME"] | python | 2 | 0 | |
fstest/fstest.go | // Package fstest provides utilities for testing the Fs
package fstest
// FIXME put name of test FS in Fs structure
import (
"bytes"
"context"
"flag"
"fmt"
"io"
"io/ioutil"
"log"
"math/rand"
"os"
"path"
"path/filepath"
"regexp"
"runtime"
"sort"
"strings"
"testing"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configfile"
"github.com/rclone/rclone/fs/hash"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/random"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"golang.org/x/text/unicode/norm"
)
// Globals
var (
RemoteName = flag.String("remote", "", "Remote to test with, defaults to local filesystem")
Verbose = flag.Bool("verbose", false, "Set to enable logging")
DumpHeaders = flag.Bool("dump-headers", false, "Set to dump headers (needs -verbose)")
DumpBodies = flag.Bool("dump-bodies", false, "Set to dump bodies (needs -verbose)")
Individual = flag.Bool("individual", false, "Make individual bucket/container/directory for each test - much slower")
LowLevelRetries = flag.Int("low-level-retries", 10, "Number of low level retries")
UseListR = flag.Bool("fast-list", false, "Use recursive list if available. Uses more memory but fewer transactions.")
// SizeLimit signals tests to skip maximum test file size and skip inappropriate runs
SizeLimit = flag.Int64("size-limit", 0, "Limit maximum test file size")
// ListRetries is the number of times to retry a listing to overcome eventual consistency
ListRetries = flag.Int("list-retries", 3, "Number or times to retry listing")
// MatchTestRemote matches the remote names used for testing
MatchTestRemote = regexp.MustCompile(`^rclone-test-[abcdefghijklmnopqrstuvwxyz0123456789]{24}$`)
)
// Seed the random number generator
func init() {
rand.Seed(time.Now().UnixNano())
}
// Initialise rclone for testing
func Initialise() {
ctx := context.Background()
ci := fs.GetConfig(ctx)
// Never ask for passwords, fail instead.
// If your local config is encrypted set environment variable
// "RCLONE_CONFIG_PASS=hunter2" (or your password)
ci.AskPassword = false
// Override the config file from the environment - we don't
// parse the flags any more so this doesn't happen
// automatically
if envConfig := os.Getenv("RCLONE_CONFIG"); envConfig != "" {
_ = config.SetConfigPath(envConfig)
}
configfile.Install()
accounting.Start(ctx)
if *Verbose {
ci.LogLevel = fs.LogLevelDebug
}
if *DumpHeaders {
ci.Dump |= fs.DumpHeaders
}
if *DumpBodies {
ci.Dump |= fs.DumpBodies
}
ci.LowLevelRetries = *LowLevelRetries
ci.UseListR = *UseListR
}
// Item represents an item for checking
type Item struct {
Path string
Hashes map[hash.Type]string
ModTime time.Time
Size int64
}
// NewItem creates an item from a string content
func NewItem(Path, Content string, modTime time.Time) Item {
i := Item{
Path: Path,
ModTime: modTime,
Size: int64(len(Content)),
}
hash := hash.NewMultiHasher()
buf := bytes.NewBufferString(Content)
_, err := io.Copy(hash, buf)
if err != nil {
log.Fatalf("Failed to create item: %v", err)
}
i.Hashes = hash.Sums()
return i
}
// CheckTimeEqualWithPrecision checks the times are equal within the
// precision, returns the delta and a flag
func CheckTimeEqualWithPrecision(t0, t1 time.Time, precision time.Duration) (time.Duration, bool) {
dt := t0.Sub(t1)
if dt >= precision || dt <= -precision {
return dt, false
}
return dt, true
}
// AssertTimeEqualWithPrecision checks that want is within precision
// of got, asserting that with t and logging remote
func AssertTimeEqualWithPrecision(t *testing.T, remote string, want, got time.Time, precision time.Duration) {
dt, ok := CheckTimeEqualWithPrecision(want, got, precision)
assert.True(t, ok, fmt.Sprintf("%s: Modification time difference too big |%s| > %s (want %s vs got %s) (precision %s)", remote, dt, precision, want, got, precision))
}
// CheckModTime checks the mod time to the given precision
func (i *Item) CheckModTime(t *testing.T, obj fs.Object, modTime time.Time, precision time.Duration) {
AssertTimeEqualWithPrecision(t, obj.Remote(), i.ModTime, modTime, precision)
}
// CheckHashes checks all the hashes the object supports are correct
func (i *Item) CheckHashes(t *testing.T, obj fs.Object) {
require.NotNil(t, obj)
types := obj.Fs().Hashes().Array()
for _, Hash := range types {
// Check attributes
sum, err := obj.Hash(context.Background(), Hash)
require.NoError(t, err)
assert.True(t, hash.Equals(i.Hashes[Hash], sum), fmt.Sprintf("%s/%s: %v hash incorrect - expecting %q got %q", obj.Fs().String(), obj.Remote(), Hash, i.Hashes[Hash], sum))
}
}
// Check checks all the attributes of the object are correct
func (i *Item) Check(t *testing.T, obj fs.Object, precision time.Duration) {
i.CheckHashes(t, obj)
assert.Equal(t, i.Size, obj.Size(), fmt.Sprintf("%s: size incorrect file=%d vs obj=%d", i.Path, i.Size, obj.Size()))
i.CheckModTime(t, obj, obj.ModTime(context.Background()), precision)
}
// Normalize runs a utf8 normalization on the string if running on OS
// X. This is because OS X denormalizes file names it writes to the
// local file system.
func Normalize(name string) string {
if runtime.GOOS == "darwin" {
name = norm.NFC.String(name)
}
return name
}
// Items represents all items for checking
type Items struct {
byName map[string]*Item
byNameAlt map[string]*Item
items []Item
}
// NewItems makes an Items
func NewItems(items []Item) *Items {
is := &Items{
byName: make(map[string]*Item),
byNameAlt: make(map[string]*Item),
items: items,
}
// Fill up byName
for i := range items {
is.byName[Normalize(items[i].Path)] = &items[i]
}
return is
}
// Find checks off an item
func (is *Items) Find(t *testing.T, obj fs.Object, precision time.Duration) {
remote := Normalize(obj.Remote())
i, ok := is.byName[remote]
if !ok {
i, ok = is.byNameAlt[remote]
assert.True(t, ok, fmt.Sprintf("Unexpected file %q", remote))
}
if i != nil {
delete(is.byName, i.Path)
i.Check(t, obj, precision)
}
}
// Done checks all finished
func (is *Items) Done(t *testing.T) {
if len(is.byName) != 0 {
for name := range is.byName {
t.Logf("Not found %q", name)
}
}
assert.Equal(t, 0, len(is.byName), fmt.Sprintf("%d objects not found", len(is.byName)))
}
// makeListingFromItems returns a string representation of the items
//
// it returns two possible strings, one normal and one for windows
func makeListingFromItems(items []Item) string {
nameLengths := make([]string, len(items))
for i, item := range items {
remote := Normalize(item.Path)
nameLengths[i] = fmt.Sprintf("%s (%d)", remote, item.Size)
}
sort.Strings(nameLengths)
return strings.Join(nameLengths, ", ")
}
// makeListingFromObjects returns a string representation of the objects
func makeListingFromObjects(objs []fs.Object) string {
nameLengths := make([]string, len(objs))
for i, obj := range objs {
nameLengths[i] = fmt.Sprintf("%s (%d)", Normalize(obj.Remote()), obj.Size())
}
sort.Strings(nameLengths)
return strings.Join(nameLengths, ", ")
}
// filterEmptyDirs removes any empty (or containing only directories)
// directories from expectedDirs
func filterEmptyDirs(t *testing.T, items []Item, expectedDirs []string) (newExpectedDirs []string) {
dirs := map[string]struct{}{"": {}}
for _, item := range items {
base := item.Path
for {
base = path.Dir(base)
if base == "." || base == "/" {
break
}
dirs[base] = struct{}{}
}
}
for _, expectedDir := range expectedDirs {
if _, found := dirs[expectedDir]; found {
newExpectedDirs = append(newExpectedDirs, expectedDir)
} else {
t.Logf("Filtering empty directory %q", expectedDir)
}
}
return newExpectedDirs
}
// CheckListingWithRoot checks the fs to see if it has the
// expected contents with the given precision.
//
// If expectedDirs is non nil then we check those too. Note that no
// directories returned is also OK as some remotes don't return
// directories.
//
// dir is the directory used for the listing.
func CheckListingWithRoot(t *testing.T, f fs.Fs, dir string, items []Item, expectedDirs []string, precision time.Duration) {
if expectedDirs != nil && !f.Features().CanHaveEmptyDirectories {
expectedDirs = filterEmptyDirs(t, items, expectedDirs)
}
is := NewItems(items)
ctx := context.Background()
oldErrors := accounting.Stats(ctx).GetErrors()
var objs []fs.Object
var dirs []fs.Directory
var err error
var retries = *ListRetries
sleep := time.Second / 2
wantListing := makeListingFromItems(items)
gotListing := "<unset>"
listingOK := false
for i := 1; i <= retries; i++ {
objs, dirs, err = walk.GetAll(ctx, f, dir, true, -1)
if err != nil && err != fs.ErrorDirNotFound {
t.Fatalf("Error listing: %v", err)
}
gotListing = makeListingFromObjects(objs)
listingOK = wantListing == gotListing
if listingOK && (expectedDirs == nil || len(dirs) == len(expectedDirs)) {
// Put an extra sleep in if we did any retries just to make sure it really
// is consistent (here is looking at you Amazon Drive!)
if i != 1 {
extraSleep := 5*time.Second + sleep
t.Logf("Sleeping for %v just to make sure", extraSleep)
time.Sleep(extraSleep)
}
break
}
sleep *= 2
t.Logf("Sleeping for %v for list eventual consistency: %d/%d", sleep, i, retries)
time.Sleep(sleep)
if doDirCacheFlush := f.Features().DirCacheFlush; doDirCacheFlush != nil {
t.Logf("Flushing the directory cache")
doDirCacheFlush()
}
}
assert.True(t, listingOK, fmt.Sprintf("listing wrong, want\n %s got\n %s", wantListing, gotListing))
for _, obj := range objs {
require.NotNil(t, obj)
is.Find(t, obj, precision)
}
is.Done(t)
// Don't notice an error when listing an empty directory
if len(items) == 0 && oldErrors == 0 && accounting.Stats(ctx).GetErrors() == 1 {
accounting.Stats(ctx).ResetErrors()
}
// Check the directories
if expectedDirs != nil {
expectedDirsCopy := make([]string, len(expectedDirs))
for i, dir := range expectedDirs {
expectedDirsCopy[i] = Normalize(dir)
}
actualDirs := []string{}
for _, dir := range dirs {
actualDirs = append(actualDirs, Normalize(dir.Remote()))
}
sort.Strings(actualDirs)
sort.Strings(expectedDirsCopy)
assert.Equal(t, expectedDirsCopy, actualDirs, "directories")
}
}
// CheckListingWithPrecision checks the fs to see if it has the
// expected contents with the given precision.
//
// If expectedDirs is non nil then we check those too. Note that no
// directories returned is also OK as some remotes don't return
// directories.
func CheckListingWithPrecision(t *testing.T, f fs.Fs, items []Item, expectedDirs []string, precision time.Duration) {
CheckListingWithRoot(t, f, "", items, expectedDirs, precision)
}
// CheckListing checks the fs to see if it has the expected contents
func CheckListing(t *testing.T, f fs.Fs, items []Item) {
precision := f.Precision()
CheckListingWithPrecision(t, f, items, nil, precision)
}
// CheckItems checks the fs to see if it has only the items passed in
// using a precision of fs.Config.ModifyWindow
func CheckItems(t *testing.T, f fs.Fs, items ...Item) {
CheckListingWithPrecision(t, f, items, nil, fs.GetModifyWindow(context.TODO(), f))
}
// CompareItems compares a set of DirEntries to a slice of items and a list of dirs
// The modtimes are compared with the precision supplied
func CompareItems(t *testing.T, entries fs.DirEntries, items []Item, expectedDirs []string, precision time.Duration, what string) {
is := NewItems(items)
var objs []fs.Object
var dirs []fs.Directory
wantListing := makeListingFromItems(items)
for _, entry := range entries {
switch x := entry.(type) {
case fs.Directory:
dirs = append(dirs, x)
case fs.Object:
objs = append(objs, x)
// do nothing
default:
t.Fatalf("unknown object type %T", entry)
}
}
gotListing := makeListingFromObjects(objs)
listingOK := wantListing == gotListing
assert.True(t, listingOK, fmt.Sprintf("%s not equal, want\n %s got\n %s", what, wantListing, gotListing))
for _, obj := range objs {
require.NotNil(t, obj)
is.Find(t, obj, precision)
}
is.Done(t)
// Check the directories
if expectedDirs != nil {
expectedDirsCopy := make([]string, len(expectedDirs))
for i, dir := range expectedDirs {
expectedDirsCopy[i] = Normalize(dir)
}
actualDirs := []string{}
for _, dir := range dirs {
actualDirs = append(actualDirs, Normalize(dir.Remote()))
}
sort.Strings(actualDirs)
sort.Strings(expectedDirsCopy)
assert.Equal(t, expectedDirsCopy, actualDirs, "directories not equal")
}
}
// Time parses a time string or logs a fatal error
func Time(timeString string) time.Time {
t, err := time.Parse(time.RFC3339Nano, timeString)
if err != nil {
log.Fatalf("Failed to parse time %q: %v", timeString, err)
}
return t
}
// LocalRemote creates a temporary directory name for local remotes
func LocalRemote() (path string, err error) {
path, err = ioutil.TempDir("", "rclone")
if err == nil {
// Now remove the directory
err = os.Remove(path)
}
path = filepath.ToSlash(path)
return
}
// RandomRemoteName makes a random bucket or subdirectory name
//
// Returns a random remote name plus the leaf name
func RandomRemoteName(remoteName string) (string, string, error) {
var err error
var leafName string
// Make a directory if remote name is null
if remoteName == "" {
remoteName, err = LocalRemote()
if err != nil {
return "", "", err
}
} else {
if !strings.HasSuffix(remoteName, ":") {
remoteName += "/"
}
leafName = "rclone-test-" + random.String(24)
if !MatchTestRemote.MatchString(leafName) {
log.Fatalf("%q didn't match the test remote name regexp", leafName)
}
remoteName += leafName
}
return remoteName, leafName, nil
}
// RandomRemote makes a random bucket or subdirectory on the remote
// from the -remote parameter
//
// Call the finalise function returned to Purge the fs at the end (and
// the parent if necessary)
//
// Returns the remote, its url, a finaliser and an error
func RandomRemote() (fs.Fs, string, func(), error) {
var err error
var parentRemote fs.Fs
remoteName := *RemoteName
remoteName, _, err = RandomRemoteName(remoteName)
if err != nil {
return nil, "", nil, err
}
remote, err := fs.NewFs(context.Background(), remoteName)
if err != nil {
return nil, "", nil, err
}
finalise := func() {
Purge(remote)
if parentRemote != nil {
Purge(parentRemote)
if err != nil {
log.Printf("Failed to purge %v: %v", parentRemote, err)
}
}
}
return remote, remoteName, finalise, nil
}
// Purge is a simplified re-implementation of operations.Purge for the
// test routine cleanup to avoid circular dependencies.
//
// It logs errors rather than returning them
func Purge(f fs.Fs) {
ctx := context.Background()
var err error
doFallbackPurge := true
if doPurge := f.Features().Purge; doPurge != nil {
doFallbackPurge = false
fs.Debugf(f, "Purge remote")
err = doPurge(ctx, "")
if err == fs.ErrorCantPurge {
doFallbackPurge = true
}
}
if doFallbackPurge {
dirs := []string{""}
err = walk.ListR(ctx, f, "", true, -1, walk.ListAll, func(entries fs.DirEntries) error {
var err error
entries.ForObject(func(obj fs.Object) {
fs.Debugf(f, "Purge object %q", obj.Remote())
err = obj.Remove(ctx)
if err != nil {
log.Printf("purge failed to remove %q: %v", obj.Remote(), err)
}
})
entries.ForDir(func(dir fs.Directory) {
dirs = append(dirs, dir.Remote())
})
return nil
})
sort.Strings(dirs)
for i := len(dirs) - 1; i >= 0; i-- {
dir := dirs[i]
fs.Debugf(f, "Purge dir %q", dir)
err := f.Rmdir(ctx, dir)
if err != nil {
log.Printf("purge failed to rmdir %q: %v", dir, err)
}
}
}
if err != nil {
log.Printf("purge failed: %v", err)
}
}
| [
"\"RCLONE_CONFIG\""
]
| []
| [
"RCLONE_CONFIG"
]
| [] | ["RCLONE_CONFIG"] | go | 1 | 0 | |
tools/report-converter/tests/unit/report_hash/__init__.py | # coding=utf-8
# -------------------------------------------------------------------------
#
# Part of the CodeChecker project, under the Apache License v2.0 with
# LLVM Exceptions. See LICENSE for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
# -------------------------------------------------------------------------
""" Setup for the test package analyze. """
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import os
import shutil
import tempfile
# Test workspace should be initialized in this module.
TEST_WORKSPACE = None
def get_workspace(test_id='test'):
""" Return a temporary workspace for the tests. """
workspace_root = os.environ.get("CC_REPORT_HASH_TEST_WORKSPACE_ROOT")
if not workspace_root:
# if no external workspace is set create under the build dir
workspace_root = os.path.join(os.environ['REPO_ROOT'], 'build',
'workspace')
if not os.path.exists(workspace_root):
os.makedirs(workspace_root)
if test_id:
return tempfile.mkdtemp(prefix=test_id + "-", dir=workspace_root)
else:
return workspace_root
def setup_package():
""" Setup the environment for the tests. """
global TEST_WORKSPACE
TEST_WORKSPACE = get_workspace('report_hash')
print(TEST_WORKSPACE)
os.environ['TEST_WORKSPACE'] = TEST_WORKSPACE
def teardown_package():
""" Delete the workspace associated with this test. """
# TODO: If environment variable is set keep the workspace
# and print out the path.
global TEST_WORKSPACE
print("Removing: " + TEST_WORKSPACE)
shutil.rmtree(TEST_WORKSPACE)
| []
| []
| [
"TEST_WORKSPACE",
"CC_REPORT_HASH_TEST_WORKSPACE_ROOT",
"REPO_ROOT"
]
| [] | ["TEST_WORKSPACE", "CC_REPORT_HASH_TEST_WORKSPACE_ROOT", "REPO_ROOT"] | python | 3 | 0 | |
test/integration/test_build.py | import pytest
import os
def test_definition_syntax_error(cli, data_dir):
ee_def = os.path.join(data_dir, 'definition_files', 'invalid.yml')
r = cli(
f'ansible-builder build -f {ee_def} --container-runtime podman',
allow_error=True
)
assert r.rc != 0
assert 'An error occured while parsing the definition file' in (r.stdout + r.stderr), (r.stdout + r.stderr)
def test_build_fail_exitcode(cli, container_runtime, ee_tag, tmpdir, data_dir):
"""Test that when a build fails, the ansible-builder exits with non-zero exit code.
Example: https://github.com/ansible/ansible-builder/issues/51
"""
bc = str(tmpdir)
ee_def = os.path.join(data_dir, 'build_fail', 'execution-environment.yml')
r = cli(
f"ansible-builder build -c {bc} -f {ee_def} -t {ee_tag} --container-runtime {container_runtime} -v3",
allow_error=True
)
assert r.rc != 0, (r.stdout + r.stderr)
assert 'RUN thisisnotacommand' in (r.stdout + r.stderr), (r.stdout + r.stderr)
assert 'thisisnotacommand: command not found' in (r.stdout + r.stderr), (r.stdout + r.stderr)
def test_missing_python_requirements_file():
"""If a user specifies a python requirements file, but we can't find it, fail sanely."""
pytest.skip("Not implemented")
def test_missing_galaxy_requirements_file():
"""If a user specifies a galaxy requirements file, but we can't find it, fail sanely."""
pytest.skip("Not implemented")
def test_build_streams_output_with_verbosity_on(cli, container_runtime, build_dir_and_ee_yml, ee_tag):
"""Test that 'ansible-builder build' streams build output."""
tmpdir, eeyml = build_dir_and_ee_yml("")
result = cli(f"ansible-builder build -c {tmpdir} -f {eeyml} -t {ee_tag} --container-runtime {container_runtime} -v 3")
assert f'{container_runtime} build -f {tmpdir}' in result.stdout
assert f'Ansible Builder is building your execution environment image, "{ee_tag}".' in result.stdout
assert f'The build context can be found at: {tmpdir}' in result.stdout
def test_build_streams_output_with_verbosity_off(cli, container_runtime, build_dir_and_ee_yml, ee_tag):
"""
Like the test_build_streams_output_with_verbosity_on test but making sure less output is shown with default verbosity level of 2.
"""
tmpdir, eeyml = build_dir_and_ee_yml("")
result = cli(f"ansible-builder build -c {tmpdir} -f {eeyml} -t {ee_tag} --container-runtime {container_runtime}")
assert f'Ansible Builder is building your execution environment image, "{ee_tag}".' not in result.stdout
assert f'The build context can be found at: {tmpdir}' in result.stdout
def test_build_streams_output_with_invalid_verbosity(cli, container_runtime, build_dir_and_ee_yml, ee_tag):
"""
Like the test_build_streams_output_with_verbosity_off test but making sure it errors out correctly with invalid verbosity level.
"""
tmpdir, eeyml = build_dir_and_ee_yml("")
result = cli(f"ansible-builder build -c {tmpdir} -f {eeyml} -t {ee_tag} --container-runtime {container_runtime} -v 6", allow_error=True)
assert result.rc != 0
assert 'invalid choice: 6 (choose from 0, 1, 2, 3)' in (result.stdout + result.stderr)
def test_blank_execution_environment(cli, container_runtime, ee_tag, tmpdir, data_dir):
"""Just makes sure that the buld process does not require any particular input"""
bc = str(tmpdir)
ee_def = os.path.join(data_dir, 'blank', 'execution-environment.yml')
cli(
f'ansible-builder build -c {bc} -f {ee_def} -t {ee_tag} --container-runtime {container_runtime}'
)
result = cli(f'{container_runtime} run --rm {ee_tag} echo "This is a simple test"')
assert 'This is a simple test' in result.stdout, result.stdout
def test_user_system_requirement(cli, container_runtime, ee_tag, tmpdir, data_dir):
bc = str(tmpdir)
ee_def = os.path.join(data_dir, 'subversion', 'execution-environment.yml')
command = f'ansible-builder build -c {bc} -f {ee_def} -t {ee_tag} --container-runtime {container_runtime}'
cli(command)
result = cli(
f'{container_runtime} run --rm {ee_tag} svn --help'
)
assert 'Subversion is a tool for version control' in result.stdout, result.stdout
def test_collection_system_requirement(cli, container_runtime, ee_tag, tmpdir, data_dir):
bc = str(tmpdir)
ee_def = os.path.join(data_dir, 'ansible.posix.at', 'execution-environment.yml')
cli(
f'ansible-builder build -c {bc} -f {ee_def} -t {ee_tag} --container-runtime {container_runtime} -v3'
)
result = cli(
f'{container_runtime} run --rm {ee_tag} at -V'
)
assert 'at version' in result.stderr, result.stderr
def test_user_python_requirement(cli, container_runtime, ee_tag, tmpdir, data_dir):
bc = str(tmpdir)
ee_def = os.path.join(data_dir, 'pip', 'execution-environment.yml')
command = f'ansible-builder build -c {bc} -f {ee_def} -t {ee_tag} --container-runtime {container_runtime}'
cli(command)
result = cli(
f'{container_runtime} run --rm {ee_tag} pip3 show awxkit'
)
assert 'The official command line interface for Ansible AWX' in result.stdout, result.stdout
def test_prepended_steps(cli, container_runtime, ee_tag, tmpdir, data_dir):
"""
Tests that prepended steps are in final stage
"""
bc = str(tmpdir)
ee_def = os.path.join(data_dir, 'prepend_steps', 'execution-environment.yml')
cli(
f'ansible-builder build -c {bc} -f {ee_def} -t {ee_tag} --container-runtime {container_runtime}'
)
_file = 'Dockerfile' if container_runtime == 'docker' else 'Containerfile'
content = open(os.path.join(bc, _file), 'r').read()
stages_content = content.split('FROM')
assert 'RUN whoami' in stages_content[-1]
def test_build_args_basic(cli, container_runtime, ee_tag, tmpdir, data_dir):
bc = str(tmpdir)
ee_def = os.path.join(data_dir, 'build_args', 'execution-environment.yml')
result = cli(
f'ansible-builder build -c {bc} -f {ee_def} -t {ee_tag} --container-runtime {container_runtime} --build-arg FOO=bar -v3'
)
assert 'FOO=bar' in result.stdout
def test_build_args_from_environment(cli, container_runtime, ee_tag, tmpdir, data_dir):
if container_runtime == 'podman':
pytest.skip('Skipped. Podman does not support this')
bc = str(tmpdir)
ee_def = os.path.join(data_dir, 'build_args', 'execution-environment.yml')
os.environ['FOO'] = 'secretsecret'
result = cli(
f'ansible-builder build -c {bc} -f {ee_def} -t {ee_tag} --container-runtime {container_runtime} --build-arg FOO -v3'
)
assert 'secretsecret' in result.stdout
def test_base_image_build_arg(cli, container_runtime, ee_tag, tmpdir, data_dir):
bc = str(tmpdir)
ee_def = os.path.join(data_dir, 'build_args', 'base-image.yml')
os.environ['FOO'] = 'secretsecret'
# Build with custom image tag, then use that as input to --build-arg ANSIBLE_RUNNER_IMAGE
cli(f'ansible-builder build -c {bc} -f {ee_def} -t {ee_tag}-custom --container-runtime {container_runtime} -v3')
cli(f'ansible-builder build -c {bc} -f {ee_def} -t {ee_tag}-custom '
f'--container-runtime {container_runtime} --build-arg ANSIBLE_RUNNER_IMAGE={ee_tag}-custom -v3')
result = cli(f"{container_runtime} run {ee_tag}-custom cat /base_image")
assert f"{ee_tag}-custom" in result.stdout
class TestPytz:
@pytest.fixture(scope='class')
def pytz(self, cli_class, container_runtime, ee_tag_class, data_dir, tmpdir_factory):
bc_folder = str(tmpdir_factory.mktemp('bc'))
ee_def = os.path.join(data_dir, 'pytz', 'execution-environment.yml')
r = cli_class(
f'ansible-builder build -c {bc_folder} -f {ee_def} -t {ee_tag_class} --container-runtime {container_runtime} -v 3'
)
# Because of test multi-processing, this may or may not use cache, so allow either
assert 'RUN /output/install-from-bindep && rm -rf /output/wheels' in r.stdout, r.stdout
return (ee_tag_class, bc_folder)
def test_has_pytz(self, cli, container_runtime, pytz):
ee_tag, bc_folder = pytz
r = cli(f'{container_runtime} run --rm {ee_tag} pip3 show pytz')
assert 'World timezone definitions, modern and historical' in r.stdout, r.stdout
def test_build_layer_reuse(self, cli, container_runtime, data_dir, pytz):
ee_tag, bc_folder = pytz
ee_def = os.path.join(data_dir, 'pytz', 'execution-environment.yml')
r = cli(
f'ansible-builder build -c {bc_folder} -f {ee_def} -t {ee_tag} --container-runtime {container_runtime} -v 3'
)
assert 'Collecting pytz' not in r.stdout, r.stdout
assert 'requirements_combined.txt is already up-to-date' in r.stdout, r.stdout
stdout_no_whitespace = r.stdout.replace('--->', '-->').replace('\n', ' ').replace(' ', ' ').replace(' ', ' ')
assert 'RUN /output/install-from-bindep && rm -rf /output/wheels --> Using cache' in stdout_no_whitespace, r.stdout
| []
| []
| [
"FOO"
]
| [] | ["FOO"] | python | 1 | 0 | |
google/cloud/translate_v3/services/translation_service/client.py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from distutils import util
import os
import re
from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core import client_options as client_options_lib # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
from google.api_core import operation # type: ignore
from google.api_core import operation_async # type: ignore
from google.cloud.translate_v3.services.translation_service import pagers
from google.cloud.translate_v3.types import translation_service
from google.protobuf import timestamp_pb2 # type: ignore
from .transports.base import TranslationServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import TranslationServiceGrpcTransport
from .transports.grpc_asyncio import TranslationServiceGrpcAsyncIOTransport
class TranslationServiceClientMeta(type):
"""Metaclass for the TranslationService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[TranslationServiceTransport]]
_transport_registry["grpc"] = TranslationServiceGrpcTransport
_transport_registry["grpc_asyncio"] = TranslationServiceGrpcAsyncIOTransport
def get_transport_class(
cls, label: str = None,
) -> Type[TranslationServiceTransport]:
"""Returns an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class TranslationServiceClient(metaclass=TranslationServiceClientMeta):
"""Provides natural language translation operations."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Converts api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "translate.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
TranslationServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(info)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
TranslationServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> TranslationServiceTransport:
"""Returns the transport used by the client instance.
Returns:
TranslationServiceTransport: The transport used by the client
instance.
"""
return self._transport
@staticmethod
def glossary_path(project: str, location: str, glossary: str,) -> str:
"""Returns a fully-qualified glossary string."""
return "projects/{project}/locations/{location}/glossaries/{glossary}".format(
project=project, location=location, glossary=glossary,
)
@staticmethod
def parse_glossary_path(path: str) -> Dict[str, str]:
"""Parses a glossary path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/glossaries/(?P<glossary>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Returns a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Returns a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Returns a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Returns a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Returns a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, TranslationServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the translation service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, TranslationServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
use_client_cert = bool(
util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))
)
client_cert_source_func = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
is_mtls = True
client_cert_source_func = client_options.client_cert_source
else:
is_mtls = mtls.has_default_client_cert_source()
if is_mtls:
client_cert_source_func = mtls.default_client_cert_source()
else:
client_cert_source_func = None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
if is_mtls:
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
else:
api_endpoint = self.DEFAULT_ENDPOINT
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted "
"values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, TranslationServiceTransport):
# transport is a TranslationServiceTransport instance.
if credentials or client_options.credentials_file:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
if client_options.scopes:
raise ValueError(
"When providing a transport instance, provide its scopes "
"directly."
)
self._transport = transport
else:
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials,
credentials_file=client_options.credentials_file,
host=api_endpoint,
scopes=client_options.scopes,
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
always_use_jwt_access=(
Transport == type(self).get_transport_class("grpc")
or Transport == type(self).get_transport_class("grpc_asyncio")
),
)
def translate_text(
self,
request: translation_service.TranslateTextRequest = None,
*,
parent: str = None,
target_language_code: str = None,
contents: Sequence[str] = None,
model: str = None,
mime_type: str = None,
source_language_code: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> translation_service.TranslateTextResponse:
r"""Translates input text and returns translated text.
Args:
request (google.cloud.translate_v3.types.TranslateTextRequest):
The request object. The request message for synchronous
translation.
parent (str):
Required. Project or location to make a call. Must refer
to a caller's project.
Format: ``projects/{project-number-or-id}`` or
``projects/{project-number-or-id}/locations/{location-id}``.
For global calls, use
``projects/{project-number-or-id}/locations/global`` or
``projects/{project-number-or-id}``.
Non-global location is required for requests using
AutoML models or custom glossaries.
Models and glossaries must be within the same region
(have same location-id), otherwise an INVALID_ARGUMENT
(400) error is returned.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
target_language_code (str):
Required. The BCP-47 language code to
use for translation of the input text,
set to one of the language codes listed
in Language Support.
This corresponds to the ``target_language_code`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
contents (Sequence[str]):
Required. The content of the input in
string format. We recommend the total
content be less than 30k codepoints. The
max length of this field is 1024.
Use BatchTranslateText for larger text.
This corresponds to the ``contents`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
model (str):
Optional. The ``model`` type requested for this
translation.
The format depends on model type:
- AutoML Translation models:
``projects/{project-number-or-id}/locations/{location-id}/models/{model-id}``
- General (built-in) models:
``projects/{project-number-or-id}/locations/{location-id}/models/general/nmt``,
For global (non-regionalized) requests, use
``location-id`` ``global``. For example,
``projects/{project-number-or-id}/locations/global/models/general/nmt``.
If not provided, the default Google model (NMT) will be
used.
This corresponds to the ``model`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
mime_type (str):
Optional. The format of the source
text, for example, "text/html",
"text/plain". If left blank, the MIME
type defaults to "text/html".
This corresponds to the ``mime_type`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
source_language_code (str):
Optional. The BCP-47 language code of
the input text if known, for example,
"en-US" or "sr-Latn". Supported language
codes are listed in Language Support. If
the source language isn't specified, the
API attempts to identify the source
language automatically and returns the
source language within the response.
This corresponds to the ``source_language_code`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.translate_v3.types.TranslateTextResponse:
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any(
[
parent,
target_language_code,
contents,
model,
mime_type,
source_language_code,
]
)
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a translation_service.TranslateTextRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, translation_service.TranslateTextRequest):
request = translation_service.TranslateTextRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if target_language_code is not None:
request.target_language_code = target_language_code
if contents is not None:
request.contents = contents
if model is not None:
request.model = model
if mime_type is not None:
request.mime_type = mime_type
if source_language_code is not None:
request.source_language_code = source_language_code
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.translate_text]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def detect_language(
self,
request: translation_service.DetectLanguageRequest = None,
*,
parent: str = None,
model: str = None,
mime_type: str = None,
content: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> translation_service.DetectLanguageResponse:
r"""Detects the language of text within a request.
Args:
request (google.cloud.translate_v3.types.DetectLanguageRequest):
The request object. The request message for language
detection.
parent (str):
Required. Project or location to make a call. Must refer
to a caller's project.
Format:
``projects/{project-number-or-id}/locations/{location-id}``
or ``projects/{project-number-or-id}``.
For global calls, use
``projects/{project-number-or-id}/locations/global`` or
``projects/{project-number-or-id}``.
Only models within the same region (has same
location-id) can be used. Otherwise an INVALID_ARGUMENT
(400) error is returned.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
model (str):
Optional. The language detection model to be used.
Format:
``projects/{project-number-or-id}/locations/{location-id}/models/language-detection/{model-id}``
Only one language detection model is currently
supported:
``projects/{project-number-or-id}/locations/{location-id}/models/language-detection/default``.
If not specified, the default model is used.
This corresponds to the ``model`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
mime_type (str):
Optional. The format of the source
text, for example, "text/html",
"text/plain". If left blank, the MIME
type defaults to "text/html".
This corresponds to the ``mime_type`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
content (str):
The content of the input stored as a
string.
This corresponds to the ``content`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.translate_v3.types.DetectLanguageResponse:
The response message for language
detection.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, model, mime_type, content])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a translation_service.DetectLanguageRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, translation_service.DetectLanguageRequest):
request = translation_service.DetectLanguageRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if model is not None:
request.model = model
if mime_type is not None:
request.mime_type = mime_type
if content is not None:
request.content = content
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.detect_language]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def get_supported_languages(
self,
request: translation_service.GetSupportedLanguagesRequest = None,
*,
parent: str = None,
model: str = None,
display_language_code: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> translation_service.SupportedLanguages:
r"""Returns a list of supported languages for
translation.
Args:
request (google.cloud.translate_v3.types.GetSupportedLanguagesRequest):
The request object. The request message for discovering
supported languages.
parent (str):
Required. Project or location to make a call. Must refer
to a caller's project.
Format: ``projects/{project-number-or-id}`` or
``projects/{project-number-or-id}/locations/{location-id}``.
For global calls, use
``projects/{project-number-or-id}/locations/global`` or
``projects/{project-number-or-id}``.
Non-global location is required for AutoML models.
Only models within the same region (have same
location-id) can be used, otherwise an INVALID_ARGUMENT
(400) error is returned.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
model (str):
Optional. Get supported languages of this model.
The format depends on model type:
- AutoML Translation models:
``projects/{project-number-or-id}/locations/{location-id}/models/{model-id}``
- General (built-in) models:
``projects/{project-number-or-id}/locations/{location-id}/models/general/nmt``,
Returns languages supported by the specified model. If
missing, we get supported languages of Google general
NMT model.
This corresponds to the ``model`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
display_language_code (str):
Optional. The language to use to
return localized, human readable names
of supported languages. If missing, then
display names are not returned in a
response.
This corresponds to the ``display_language_code`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.translate_v3.types.SupportedLanguages:
The response message for discovering
supported languages.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, model, display_language_code])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a translation_service.GetSupportedLanguagesRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, translation_service.GetSupportedLanguagesRequest):
request = translation_service.GetSupportedLanguagesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if model is not None:
request.model = model
if display_language_code is not None:
request.display_language_code = display_language_code
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_supported_languages]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def translate_document(
self,
request: translation_service.TranslateDocumentRequest = None,
*,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> translation_service.TranslateDocumentResponse:
r"""Translates documents in synchronous mode.
Args:
request (google.cloud.translate_v3.types.TranslateDocumentRequest):
The request object. A document translation request.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.translate_v3.types.TranslateDocumentResponse:
A translated document response
message.
"""
# Create or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a translation_service.TranslateDocumentRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, translation_service.TranslateDocumentRequest):
request = translation_service.TranslateDocumentRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.translate_document]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def batch_translate_text(
self,
request: translation_service.BatchTranslateTextRequest = None,
*,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Translates a large volume of text in asynchronous
batch mode. This function provides real-time output as
the inputs are being processed. If caller cancels a
request, the partial results (for an input file, it's
all or nothing) may still be available on the specified
output location.
This call returns immediately and you can
use google.longrunning.Operation.name to poll the status
of the call.
Args:
request (google.cloud.translate_v3.types.BatchTranslateTextRequest):
The request object. The batch translation request.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.cloud.translate_v3.types.BatchTranslateResponse` Stored in the
[google.longrunning.Operation.response][google.longrunning.Operation.response]
field returned by BatchTranslateText if at least one
sentence is translated successfully.
"""
# Create or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a translation_service.BatchTranslateTextRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, translation_service.BatchTranslateTextRequest):
request = translation_service.BatchTranslateTextRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.batch_translate_text]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
translation_service.BatchTranslateResponse,
metadata_type=translation_service.BatchTranslateMetadata,
)
# Done; return the response.
return response
def batch_translate_document(
self,
request: translation_service.BatchTranslateDocumentRequest = None,
*,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Translates a large volume of document in asynchronous
batch mode. This function provides real-time output as
the inputs are being processed. If caller cancels a
request, the partial results (for an input file, it's
all or nothing) may still be available on the specified
output location.
This call returns immediately and you can use
google.longrunning.Operation.name to poll the status of
the call.
Args:
request (google.cloud.translate_v3.types.BatchTranslateDocumentRequest):
The request object. The BatchTranslateDocument request.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.cloud.translate_v3.types.BatchTranslateDocumentResponse` Stored in the
[google.longrunning.Operation.response][google.longrunning.Operation.response]
field returned by BatchTranslateDocument if at least
one document is translated successfully.
"""
# Create or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a translation_service.BatchTranslateDocumentRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, translation_service.BatchTranslateDocumentRequest):
request = translation_service.BatchTranslateDocumentRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.batch_translate_document]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
translation_service.BatchTranslateDocumentResponse,
metadata_type=translation_service.BatchTranslateDocumentMetadata,
)
# Done; return the response.
return response
def create_glossary(
self,
request: translation_service.CreateGlossaryRequest = None,
*,
parent: str = None,
glossary: translation_service.Glossary = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Creates a glossary and returns the long-running operation.
Returns NOT_FOUND, if the project doesn't exist.
Args:
request (google.cloud.translate_v3.types.CreateGlossaryRequest):
The request object. Request message for CreateGlossary.
parent (str):
Required. The project name.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
glossary (google.cloud.translate_v3.types.Glossary):
Required. The glossary to create.
This corresponds to the ``glossary`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.translate_v3.types.Glossary`
Represents a glossary built from user provided data.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, glossary])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a translation_service.CreateGlossaryRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, translation_service.CreateGlossaryRequest):
request = translation_service.CreateGlossaryRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if glossary is not None:
request.glossary = glossary
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.create_glossary]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
translation_service.Glossary,
metadata_type=translation_service.CreateGlossaryMetadata,
)
# Done; return the response.
return response
def list_glossaries(
self,
request: translation_service.ListGlossariesRequest = None,
*,
parent: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListGlossariesPager:
r"""Lists glossaries in a project. Returns NOT_FOUND, if the project
doesn't exist.
Args:
request (google.cloud.translate_v3.types.ListGlossariesRequest):
The request object. Request message for ListGlossaries.
parent (str):
Required. The name of the project
from which to list all of the
glossaries.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.translate_v3.services.translation_service.pagers.ListGlossariesPager:
Response message for ListGlossaries.
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a translation_service.ListGlossariesRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, translation_service.ListGlossariesRequest):
request = translation_service.ListGlossariesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_glossaries]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListGlossariesPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
def get_glossary(
self,
request: translation_service.GetGlossaryRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> translation_service.Glossary:
r"""Gets a glossary. Returns NOT_FOUND, if the glossary doesn't
exist.
Args:
request (google.cloud.translate_v3.types.GetGlossaryRequest):
The request object. Request message for GetGlossary.
name (str):
Required. The name of the glossary to
retrieve.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.translate_v3.types.Glossary:
Represents a glossary built from user
provided data.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a translation_service.GetGlossaryRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, translation_service.GetGlossaryRequest):
request = translation_service.GetGlossaryRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_glossary]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def delete_glossary(
self,
request: translation_service.DeleteGlossaryRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Deletes a glossary, or cancels glossary construction if the
glossary isn't created yet. Returns NOT_FOUND, if the glossary
doesn't exist.
Args:
request (google.cloud.translate_v3.types.DeleteGlossaryRequest):
The request object. Request message for DeleteGlossary.
name (str):
Required. The name of the glossary to
delete.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.cloud.translate_v3.types.DeleteGlossaryResponse` Stored in the
[google.longrunning.Operation.response][google.longrunning.Operation.response]
field returned by DeleteGlossary.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a translation_service.DeleteGlossaryRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, translation_service.DeleteGlossaryRequest):
request = translation_service.DeleteGlossaryRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.delete_glossary]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
translation_service.DeleteGlossaryResponse,
metadata_type=translation_service.DeleteGlossaryMetadata,
)
# Done; return the response.
return response
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-cloud-translate",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("TranslationServiceClient",)
| []
| []
| [
"GOOGLE_API_USE_MTLS_ENDPOINT",
"GOOGLE_API_USE_CLIENT_CERTIFICATE"
]
| [] | ["GOOGLE_API_USE_MTLS_ENDPOINT", "GOOGLE_API_USE_CLIENT_CERTIFICATE"] | python | 2 | 0 | |
config.py | # MIT License
# Copyright (c) 2021 SUBIN
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE
import os
import re
from youtube_dl import YoutubeDL
ydl_opts = {"geo-bypass": True, "nocheckcertificate": True}
ydl = YoutubeDL(ydl_opts)
links = []
finalurl = ""
C_PLAY = False
Y_PLAY = False
STREAM = os.environ.get("STREAM_URL", "https://t.me/DumpPlaylist/30")
regex = r"^(https?\:\/\/)?(www\.youtube\.com|youtu\.?be)\/.+"
match = re.match(regex, STREAM)
regex_ = r"http.*"
match_ = re.match(regex_, STREAM)
if match:
meta = ydl.extract_info(STREAM, download=False)
formats = meta.get("formats", [meta])
for f in formats:
links.append(f["url"])
finalurl = links[-1]
elif STREAM.startswith("https://t.me/DumpPlaylist"):
try:
msg_id = STREAM.split("/", 4)[4]
finalurl = int(msg_id)
Y_PLAY = True
except:
finalurl = "https://eu10.fastcast4u.com/clubfmuae"
print("Unable to fetch youtube playlist, starting CLUB FM")
pass
elif match_:
finalurl = STREAM
else:
C_PLAY = True
finalurl = STREAM
class Config:
ADMIN = os.environ.get("ADMINS", None)
ADMINS = [
int(admin) if re.search("^\d+$", admin) else admin for admin in (ADMIN).split()
]
API_ID = int(os.environ.get("API_ID", 0))
CHAT = int(os.environ.get("CHAT", 0))
LOG_GROUP = os.environ.get("LOG_GROUP", 0)
if LOG_GROUP:
LOG_GROUP = int(LOG_GROUP)
else:
LOG_GROUP = None
STREAM_URL = finalurl
CPLAY = C_PLAY
YPLAY = Y_PLAY
SHUFFLE = bool(os.environ.get("SHUFFLE", True))
DELETE_HISTORY = bool(os.environ.get("DELETE_HISTORY", True))
LIMIT = int(os.environ.get("LIMIT", 1500))
ADMIN_ONLY = os.environ.get("ADMIN_ONLY", "N")
REPLY_MESSAGE = os.environ.get("REPLY_MESSAGE", None)
if REPLY_MESSAGE:
REPLY_MESSAGE = REPLY_MESSAGE
else:
REPLY_MESSAGE = None
EDIT_TITLE = os.environ.get("EDIT_TITLE", True)
if EDIT_TITLE == "NO":
EDIT_TITLE = None
DURATION_LIMIT = int(os.environ.get("MAXIMUM_DURATION", 15))
DELAY = int(os.environ.get("DELAY", 10))
API_HASH = os.environ.get("API_HASH", "")
BOT_TOKEN = os.environ.get("BOT_TOKEN", "")
SESSION = os.environ.get("SESSION_STRING", "")
playlist = []
msg = {}
CONV = {}
| []
| []
| [
"ADMINS",
"LOG_GROUP",
"CHAT",
"SHUFFLE",
"API_HASH",
"DELAY",
"BOT_TOKEN",
"DELETE_HISTORY",
"EDIT_TITLE",
"SESSION_STRING",
"STREAM_URL",
"API_ID",
"MAXIMUM_DURATION",
"REPLY_MESSAGE",
"LIMIT",
"ADMIN_ONLY"
]
| [] | ["ADMINS", "LOG_GROUP", "CHAT", "SHUFFLE", "API_HASH", "DELAY", "BOT_TOKEN", "DELETE_HISTORY", "EDIT_TITLE", "SESSION_STRING", "STREAM_URL", "API_ID", "MAXIMUM_DURATION", "REPLY_MESSAGE", "LIMIT", "ADMIN_ONLY"] | python | 16 | 0 | |
server/src/test/java/umm3601/product/ProductControllerPutSpec.java | package umm3601.product;
import static com.mongodb.client.model.Filters.eq;
import static io.javalin.plugin.json.JsonMapperKt.JSON_MAPPER_KEY;
import static java.util.Map.entry;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertTrue;
import java.io.IOException;
import java.net.HttpURLConnection;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.mockrunner.mock.web.MockHttpServletRequest;
import com.mockrunner.mock.web.MockHttpServletResponse;
import com.mongodb.MongoClientSettings;
import com.mongodb.ServerAddress;
import com.mongodb.client.MongoClient;
import com.mongodb.client.MongoClients;
import com.mongodb.client.MongoCollection;
import com.mongodb.client.MongoDatabase;
import org.bson.Document;
import org.bson.types.ObjectId;
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import io.javalin.core.JavalinConfig;
import io.javalin.http.Context;
import io.javalin.http.HandlerType;
import io.javalin.http.util.ContextUtil;
import io.javalin.plugin.json.JavalinJackson;
/**
* Tests the logic of the ProductController
*
* @throws IOException
*/
// The tests here include a ton of "magic numbers" (numeric constants).
// It wasn't clear to me that giving all of them names would actually
// help things. The fact that it wasn't obvious what to call some
// of them says a lot. Maybe what this ultimately means is that
// these tests can/should be restructured so the constants (there are
// also a lot of "magic strings" that Checkstyle doesn't actually
// flag as a problem) make more sense.
@SuppressWarnings({ "MagicNumber", "NoWhitespaceAfter", "LineLength" })
public class ProductControllerPutSpec {
// Mock requests and responses that will be reset in `setupEach()`
// and then (re)used in each of the tests.
private MockHttpServletRequest mockReq = new MockHttpServletRequest();
private MockHttpServletResponse mockRes = new MockHttpServletResponse();
// An instance of the controller we're testing that is prepared in
// `setupEach()`, and then exercised in the various tests below.
private ProductController productController;
// A Mongo object ID that is initialized in `setupEach()` and used
// in a few of the tests. It isn't used all that often, though,
// which suggests that maybe we should extract the tests that
// care about it into their own spec file?
private ObjectId milksId;
// The client and database that will be used
// for all the tests in this spec file.
private static MongoClient mongoClient;
private static MongoDatabase db;
// Used to translate between JSON and POJOs.
private static JavalinJackson javalinJackson = new JavalinJackson();
/**
* Sets up (the connection to the) DB once; that connection and DB will
* then be (re)used for all the tests, and closed in the `teardown()`
* method. It's somewhat expensive to establish a connection to the
* database, and there are usually limits to how many connections
* a database will support at once. Limiting ourselves to a single
* connection that will be shared across all the tests in this spec
* file helps both speed things up and reduce the load on the DB
* engine.
*/
@BeforeAll
public static void setupAll() {
String mongoAddr = System.getenv().getOrDefault("MONGO_ADDR", "localhost");
mongoClient = MongoClients.create(
MongoClientSettings.builder()
.applyToClusterSettings(builder -> builder
.hosts(Arrays.asList(new ServerAddress(mongoAddr))))
.build());
db = mongoClient.getDatabase("test");
}
@AfterAll
public static void teardown() {
db.drop();
mongoClient.close();
}
@BeforeEach
public void setupEach() throws IOException {
// Reset our mock request and response objects
mockReq.resetAll();
mockRes.resetAll();
// Setup database
MongoCollection<Document> productDocuments = db.getCollection("products");
productDocuments.drop();
List<Document> testProducts = new ArrayList<>();
testProducts.add(
new Document()
.append("productName", "Banana")
.append("description", "A yellow fruit")
.append("brand", "Dole")
.append("category", "produce")
.append("store", "Willies")
.append("location", "They're In A Wall")
.append("lifespan", 14)
.append("image", "https://gravatar.com/avatar/8c9616d6cc5de638ea6920fb5d65fc6c?d=identicon")
.append("notes", "I eat these with toothpaste, yum-yum.")
.append("tags", new ArrayList<String>(Arrays
.asList(new String[] { "yellow fruit", "potassium" })))
.append("lifespan", 4)
.append("threshold", 40));
testProducts.add(
new Document()
.append("productName", "Canned Pinto Beans")
.append("description", "A can of pinto beans")
.append("brand", "Our Family")
.append("category", "canned goods")
.append("store", "Willies")
.append("location", "They're In the Walls")
.append("lifespan", 2000)
.append("image", "https://gravatar.com/avatar/8c9616d6cc5de638ea6920fb5d65fc6c?d=identicon")
.append("notes", "I eat these with toothpaste, yum-yum.")
.append("tags", new ArrayList<String>(Arrays.asList(new String[] {
"canned food", "non-perishable", "beans" })))
.append("lifespan", 4)
.append("threshold", 4));
testProducts.add(
new Document()
.append("productName", "Bread")
.append("description", "You know what this is.")
.append("brand", "Richard's Castle")
.append("category", "bakery")
.append("store", "Willies")
.append("location", "They're In the Walls")
.append("lifespan", 14)
.append("image", "https://gravatar.com/avatar/8c9616d6cc5de638ea6920fb5d65fc6c?d=identicon")
.append("notes", "I eat these with toothpaste, yum-yum.")
.append("tags", new ArrayList<String>(Arrays.asList(
new String[] { "Yeast", "contains gluten", "toast" })))
.append("lifespan", 2)
.append("threshold", 3));
testProducts.add(
new Document()
.append("productName", "Rock")
.append("description", "")
.append("brand", "Hurt Ball")
.append("category", "miscellaneous")
.append("store", "Co-op")
.append("location", "")
.append("lifespan", "")
.append("image", "")
.append("notes", "")
.append("tags", new ArrayList<String>() {
})
.append("lifespan", 6)
.append("threshold", 0));
milksId = new ObjectId();
Document milk = new Document()
.append("_id", milksId)
.append("productName", "Milk")
.append("description",
"A dairy liquid obtained from the teat of an unsuspecting animal")
.append("brand", "Gerbil Goods")
.append("category", "dairy")
.append("store", "Co-op")
.append("location", "They're In the Walls")
.append("lifespan", 14)
.append("image", "https://gravatar.com/avatar/8c9616d6cc5de638ea6920fb5d65fc6c?d=identicon")
.append("notes", "check on gerbils every 3 days")
.append("tags", new ArrayList<String>(
Arrays.asList(new String[] { "dairy", "perishable", "cold storage" })))
.append("lifespan", 4)
.append("threshold", 2);
productDocuments.insertMany(testProducts);
productDocuments.insertOne(milk);
productController = new ProductController(db);
}
/**
* Construct an instance of `Context` using `ContextUtil`, providing a mock
* context in Javalin. We need to provide a couple of attributes, which is
* the fifth argument, which forces us to also provide the (default) value
* for the fourth argument. There are two attributes we need to provide:
*
* - One is a `JsonMapper` that is used to translate between POJOs and JSON
* objects. This is needed by almost every test.
* - The other is `maxRequestSize`, which is needed for all the ADD requests,
* since `ContextUtil` checks to make sure that the request isn't "too big".
* Those tests fails if you don't provide a value for `maxRequestSize` for
* it to use in those comparisons.
*/
private Context mockContext(String path, Map<String, String> pathParams) {
return ContextUtil.init(
mockReq, mockRes,
path,
pathParams,
HandlerType.INVALID,
Map.ofEntries(
entry(JSON_MAPPER_KEY, javalinJackson),
entry(ContextUtil.maxRequestSizeKey,
new JavalinConfig().maxRequestSize)));
}
@Test
public void testEditingItem() {
String testId = milksId.toHexString();
String testNewProduct = "{"
+ "\"_id\": \"" + testId + "\","
+ "\"productName\": \"Other Milk\","
+ "\"description\":\"A dairy liquid obtained from the teat of an unsuspecting animal\","
+ "\"brand\": \"test brand\","
+ "\"category\": \"test category\","
+ "\"store\": \"test store\","
+ "\"location\": \"test location\","
+ "\"notes\": \"tastes like test\","
+ "\"tags\": [\"test tag\"],"
+ "\"lifespan\": 10,"
+ "\"threshold\": 84,"
+ "\"image\": \"https://gravatar.com/avatar/8c9616d6cc5de638ea6920fb5d65fc6c?d=identicon\""
+ "}";
mockReq.setBodyContent(testNewProduct);
mockReq.setMethod("PUT");
Context ctx = mockContext("api/products/{id}", Map.of("id", milksId.toHexString()));
productController.editProduct(ctx);
String result = ctx.resultString();
String id = javalinJackson.fromJsonString(result, ObjectNode.class).get("_id").asText();
assertEquals(HttpURLConnection.HTTP_CREATED, mockRes.getStatus());
// Make sure that the id of the newly edited product doesn't change
assertEquals(milksId.toHexString(), id);
assertEquals(1, db.getCollection("products").countDocuments(eq("_id", milksId)));
// Verify that the product was added to the database with the correct ID
Document addedProduct = db.getCollection("products").find(eq("_id", new ObjectId(id))).first();
assertNotNull(addedProduct);
assertEquals("Other Milk", addedProduct.getString("productName"));
assertEquals("A dairy liquid obtained from the teat of an unsuspecting animal",
addedProduct.getString("description"));
assertEquals("test brand", addedProduct.getString("brand"));
assertEquals("test category", addedProduct.getString("category"));
assertEquals("test store", addedProduct.getString("store"));
assertEquals("test location", addedProduct.getString("location"));
assertEquals("tastes like test", addedProduct.getString("notes"));
assertEquals(10, addedProduct.getInteger("lifespan"));
assertEquals(84, addedProduct.getInteger("threshold"));
assertTrue(addedProduct.containsKey("image"));
}
}
| []
| []
| []
| [] | [] | java | 0 | 0 | |
conf/config.go | // This file "config.go" is created by Jermine Hu at 6/22/16.
// Copyright © 2016 - Jermine Hu. All rights reserved
package conf
import "os"
const (
ZIPKIN_ADDRESS = "ENV_ZIPKIN_ADDR"
KAFKA_ADDR = "ENV_KAFKA_ADDR"
REDIS_ADDRESSES = "ENV_REDIS_ADDR"
REDIS_PORT = "ENV_REDIS_PORT"
MGO_ADDRESSES = "ENV_MGO_ADDR"
MGO_PORT = "ENV_MGO_PORT"
MGO_DATABASE = "ENV_MGO_SMS_DATABASE"
MGO_USERNAME = "ENV_MGO_USERNAME"
MGO_PASSWORD = "ENV_MGO_PASSWORD"
ENV_RESOURCE_ADDR = "ENV_RESOURCE_ADDR"
ENV_RESOURCE_PATH = "ENV_RESOURCE_PATH"
)
func Conf_GetValue(key string) string {
return conf.Get(key).String("")
}
func Conf_GetValues(key string) []string {
var vals []string
return conf.Get(key).StringSlice(vals)
}
func GetWXAppID() string {
return os.Getenv("WX_AppID")
}
func GetWXAppSecret() string {
return os.Getenv("WX_AppSecret")
}
| [
"\"WX_AppID\"",
"\"WX_AppSecret\""
]
| []
| [
"WX_AppID",
"WX_AppSecret"
]
| [] | ["WX_AppID", "WX_AppSecret"] | go | 2 | 0 | |
backend/young_art_29039/wsgi.py | """
WSGI config for young_art_29039 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'young_art_29039.settings')
application = get_wsgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
tests/test_clustering.py | import os
import sys
import math
import tempfile
import pytest
import logging
from pathlib import Path
import numpy as np
from dtaidistance import dtw, clustering
logger = logging.getLogger("be.kuleuven.dtai.distance")
directory = None
def test_clustering():
s = np.array([
[0., 0, 1, 2, 1, 0, 1, 0, 0],
[0., 1, 2, 0, 0, 0, 0, 0, 0],
[1., 2, 0, 0, 0, 0, 0, 1, 1],
[0., 0, 1, 2, 1, 0, 1, 0, 0],
[0., 1, 2, 0, 0, 0, 0, 0, 0],
[1., 2, 0, 0, 0, 0, 0, 1, 1]])
def test_hook(from_idx, to_idx, distance):
assert (from_idx, to_idx) in [(3, 0), (4, 1), (5, 2), (1, 0)]
model = clustering.Hierarchical(dtw.distance_matrix_fast, {}, 2, merge_hook=test_hook,
show_progress=False)
cluster_idx = model.fit(s)
assert cluster_idx[0] == {0, 1, 3, 4}
assert cluster_idx[2] == {2, 5}
def test_clustering_tree():
s = np.array([
[0., 0, 1, 2, 1, 0, 1, 0, 0],
[0., 1, 2, 0, 0, 0, 0, 0, 0],
[1., 2, 0, 0, 0, 0, 0, 1, 1],
[0., 0, 1, 2, 1, 0, 1, 0, 0],
[0., 1, 2, 0, 0, 0, 0, 0, 0],
[1., 2, 0, 0, 0, 0, 0, 1, 1],
[1., 2, 0, 0, 0, 0, 0, 1, 1]])
def test_hook(from_idx, to_idx, distance):
assert (from_idx, to_idx) in [(3, 0), (4, 1), (5, 2), (6, 2), (1, 0), (2, 0)]
model = clustering.Hierarchical(dtw.distance_matrix_fast, {}, merge_hook=test_hook,
show_progress=False)
modelw = clustering.HierarchicalTree(model)
cluster_idx = modelw.fit(s)
assert cluster_idx[0] == {0, 1, 2, 3, 4, 5, 6}
if directory:
hierarchy_fn = os.path.join(directory, "hierarchy.png")
graphviz_fn = os.path.join(directory, "hierarchy.dot")
else:
file = tempfile.NamedTemporaryFile()
hierarchy_fn = file.name + "_hierarchy.png"
graphviz_fn = file.name + "_hierarchy.dot"
modelw.plot(hierarchy_fn)
print("Figure saved to", hierarchy_fn)
with open(graphviz_fn, "w") as ofile:
print(modelw.to_dot(), file=ofile)
print("Dot saved to", graphviz_fn)
def test_linkage_tree():
s = np.array([
[0., 0, 1, 2, 1, 0, 1, 0, 0],
[0., 1, 2, 0, 0, 0, 0, 0, 0],
[1., 2, 0, 0, 0, 0, 0, 1, 1],
[0., 0, 1, 2, 1, 0, 1, 0, 0],
[0., 1, 2, 0, 0, 0, 0, 0, 0],
[1., 2, 0, 0, 0, 0, 0, 1, 1],
[1., 2, 0, 0, 0, 0, 0, 1, 1]])
model = clustering.LinkageTree(dtw.distance_matrix_fast, {})
cluster_idx = model.fit(s)
if directory:
hierarchy_fn = os.path.join(directory, "hierarchy.png")
graphviz_fn = os.path.join(directory, "hierarchy.dot")
else:
file = tempfile.NamedTemporaryFile()
hierarchy_fn = file.name + "_hierarchy.png"
graphviz_fn = file.name + "_hierarchy.dot"
model.plot(hierarchy_fn)
print("Figure saved to", hierarchy_fn)
with open(graphviz_fn, "w") as ofile:
print(model.to_dot(), file=ofile)
print("Dot saved to", graphviz_fn)
def test_controlchart():
import matplotlib.pyplot as plt
series = np.zeros((600, 60))
rsrc_fn = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'rsrc', 'synthetic_control.data')
with open(rsrc_fn, 'r') as ifile:
for idx, line in enumerate(ifile.readlines()):
series[idx, :] = line.split()
s = []
for idx in range(0, 600, 20):
s.append(series[idx, :])
model = clustering.LinkageTree(dtw.distance_matrix_fast, {})
cluster_idx = model.fit(s)
if directory:
hierarchy_fn = os.path.join(directory, "hierarchy.png")
else:
file = tempfile.NamedTemporaryFile()
hierarchy_fn = file.name + "_hierarchy.png"
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(10, 10))
show_ts_label = lambda idx: "ts-" + str(idx)
# show_ts_label = list(range(len(s)))
model.plot(hierarchy_fn, axes=ax, show_ts_label=show_ts_label,
show_tr_label=True, ts_label_margin=-10,
ts_left_margin=10, ts_sample_length=1)
print("Figure saved to", hierarchy_fn)
def test_plotbug1():
s1 = np.array([0., 0, 1, 2, 1, 0, 1, 0, 0, 2, 1, 0, 0])
s2 = np.array([0., 1, 2, 3, 1, 0, 0, 0, 2, 1, 0, 0])
series = s1, s2
m = clustering.LinkageTree(dtw.distance_matrix, {})
m.fit(series)
if directory:
hierarchy_fn = os.path.join(directory, "clustering.png")
else:
file = tempfile.NamedTemporaryFile()
hierarchy_fn = file.name + "_clustering.png"
m.plot(hierarchy_fn)
print("Figure save to", hierarchy_fn)
if __name__ == "__main__":
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.StreamHandler(sys.stdout))
directory = Path(os.environ.get('TESTDIR', Path(__file__).parent))
print(f"Saving files to {directory}")
# test_clustering_tree(directory="/Users/wannes/Desktop/")
# test_linkage_tree(directory="/Users/wannes/Desktop/")
# test_controlchart()
test_plotbug1()
| []
| []
| [
"TESTDIR"
]
| [] | ["TESTDIR"] | python | 1 | 0 | |
tests/core/test_impersonation_tests.py | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import errno
import functools
import logging
import os
import subprocess
import sys
import unittest
from tests.compat import mock
from copy import deepcopy
import pytest
from airflow import jobs, models
from airflow.utils.db import add_default_pool_if_not_exists
from airflow.utils.state import State
from airflow.utils.timezone import datetime
DEV_NULL = '/dev/null'
TEST_DAG_FOLDER = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'dags')
TEST_DAG_CORRUPTED_FOLDER = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'dags_corrupted')
TEST_UTILS_FOLDER = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'test_utils')
DEFAULT_DATE = datetime(2015, 1, 1)
TEST_USER = 'airflow_test_user'
logger = logging.getLogger(__name__)
def mock_custom_module_path(path):
"""
This decorator adds a path to sys.path to simulate running the current script with the ``PYTHONPATH``
environment variable set and sets the environment variable ``PYTHONPATH`` to change the
module load directory for child scripts.
"""
def wrapper(func):
@functools.wraps(func)
def decorator(*args, **kwargs):
copy_sys_path = deepcopy(sys.path)
sys.path.append(path)
try:
with mock.patch.dict('os.environ', {'PYTHONPATH': path}):
return func(*args, **kwargs)
finally:
sys.path = copy_sys_path
return decorator
return wrapper
def grant_permissions():
airflow_home = os.environ['AIRFLOW_HOME']
subprocess.check_call(
'find "%s" -exec sudo chmod og+w {} +; sudo chmod og+rx /root' % airflow_home, shell=True)
def revoke_permissions():
airflow_home = os.environ['AIRFLOW_HOME']
subprocess.check_call(
'find "%s" -exec sudo chmod og-w {} +; sudo chmod og-rx /root' % airflow_home, shell=True)
def check_original_docker_image():
if not os.path.isfile('/.dockerenv') or os.environ.get('PYTHON_BASE_IMAGE') is None:
raise unittest.SkipTest("""Adding/removing a user as part of a test is very bad for host os
(especially if the user already existed to begin with on the OS), therefore we check if we run inside a
the official docker container and only allow to run the test there. This is done by checking /.dockerenv
file (always present inside container) and checking for PYTHON_BASE_IMAGE variable.
""")
def create_user():
try:
subprocess.check_output(['sudo', 'useradd', '-m', TEST_USER, '-g',
str(os.getegid())])
except OSError as e:
if e.errno == errno.ENOENT:
raise unittest.SkipTest(
"The 'useradd' command did not exist so unable to test "
"impersonation; Skipping Test. These tests can only be run on a "
"linux host that supports 'useradd'."
)
else:
raise unittest.SkipTest(
"The 'useradd' command exited non-zero; Skipping tests. Does the "
"current user have permission to run 'useradd' without a password "
"prompt (check sudoers file)?"
)
@pytest.mark.heisentests
class TestImpersonation(unittest.TestCase):
def setUp(self):
check_original_docker_image()
grant_permissions()
add_default_pool_if_not_exists()
self.dagbag = models.DagBag(
dag_folder=TEST_DAG_FOLDER,
include_examples=False,
)
logger.info('Loaded DAGS:')
logger.info(self.dagbag.dagbag_report())
create_user()
def tearDown(self):
subprocess.check_output(['sudo', 'userdel', '-r', TEST_USER])
revoke_permissions()
def run_backfill(self, dag_id, task_id):
dag = self.dagbag.get_dag(dag_id)
dag.clear()
jobs.BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE).run()
ti = models.TaskInstance(
task=dag.get_task(task_id),
execution_date=DEFAULT_DATE)
ti.refresh_from_db()
self.assertEqual(ti.state, State.SUCCESS)
def test_impersonation(self):
"""
Tests that impersonating a unix user works
"""
self.run_backfill(
'test_impersonation',
'test_impersonated_user'
)
def test_no_impersonation(self):
"""
If default_impersonation=None, tests that the job is run
as the current user (which will be a sudoer)
"""
self.run_backfill(
'test_no_impersonation',
'test_superuser',
)
def test_default_impersonation(self):
"""
If default_impersonation=TEST_USER, tests that the job defaults
to running as TEST_USER for a test without run_as_user set
"""
os.environ['AIRFLOW__CORE__DEFAULT_IMPERSONATION'] = TEST_USER
try:
self.run_backfill(
'test_default_impersonation',
'test_deelevated_user'
)
finally:
del os.environ['AIRFLOW__CORE__DEFAULT_IMPERSONATION']
def test_impersonation_subdag(self):
"""
Tests that impersonation using a subdag correctly passes the right configuration
:return:
"""
self.run_backfill(
'impersonation_subdag',
'test_subdag_operation'
)
@pytest.mark.quarantined
class TestImpersonationWithCustomPythonPath(unittest.TestCase):
@mock_custom_module_path(TEST_UTILS_FOLDER)
def setUp(self):
check_original_docker_image()
grant_permissions()
add_default_pool_if_not_exists()
self.dagbag = models.DagBag(
dag_folder=TEST_DAG_CORRUPTED_FOLDER,
include_examples=False,
)
logger.info('Loaded DAGS:')
logger.info(self.dagbag.dagbag_report())
create_user()
def tearDown(self):
subprocess.check_output(['sudo', 'userdel', '-r', TEST_USER])
revoke_permissions()
def run_backfill(self, dag_id, task_id):
dag = self.dagbag.get_dag(dag_id)
dag.clear()
jobs.BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE).run()
ti = models.TaskInstance(
task=dag.get_task(task_id),
execution_date=DEFAULT_DATE)
ti.refresh_from_db()
self.assertEqual(ti.state, State.SUCCESS)
@mock_custom_module_path(TEST_UTILS_FOLDER)
def test_impersonation_custom(self):
"""
Tests that impersonation using a unix user works with custom packages in
PYTHONPATH
"""
# PYTHONPATH is already set in script triggering tests
assert 'PYTHONPATH' in os.environ
self.run_backfill(
'impersonation_with_custom_pkg',
'exec_python_fn'
)
| []
| []
| [
"PYTHON_BASE_IMAGE",
"AIRFLOW_HOME",
"AIRFLOW__CORE__DEFAULT_IMPERSONATION"
]
| [] | ["PYTHON_BASE_IMAGE", "AIRFLOW_HOME", "AIRFLOW__CORE__DEFAULT_IMPERSONATION"] | python | 3 | 0 | |
src/fever_cs.py | import json
from logging.config import dictConfig
from typing import List, Dict
from allennlp.models import load_archive
from allennlp.predictors import Predictor
from fever.api.web_server import fever_web_api
from fever.evidence.retrieval_methods.retrieval_method import RetrievalMethod
import os
import logging
from fever.evidence.retrieval_methods.top_docs import TopNDocsTopNSents
from fever.reader import FEVERDocumentDatabase
def predict_single(predictor, retrieval_method, instance):
evidence = retrieval_method.get_sentences_for_claim(instance["claim"])
test_instance = predictor._json_to_instance({"claim": instance["claim"], "predicted_sentences": evidence})
predicted = predictor.predict_instance(test_instance)
max_id = predicted["label_logits"].index(max(predicted["label_logits"]))
return {
"predicted_label": predictor._model.vocab.get_token_from_index(max_id, namespace="labels"),
"predicted_evidence": evidence
}
def make_api():
logger = logging.getLogger()
dictConfig({
'version': 1,
'formatters': {'default': {
'format': '[%(asctime)s] %(levelname)s in %(module)s: %(message)s',
}},
'handlers': {'wsgi': {
'class': 'logging.StreamHandler',
'stream': 'ext://sys.stderr',
'formatter': 'default'
}},
'root': {
'level': 'INFO',
'handlers': ['wsgi']
},
'allennlp': {
'level': 'INFO',
'handlers': ['wsgi']
},
})
logger.info("My sample FEVER application")
config = json.load(open(os.getenv("CONFIG_PATH","configs/predict_docker.json")))
# Create document retrieval model
logger.info("Load FEVER Document database from {0}".format(config["database"]))
db = FEVERDocumentDatabase(config["database"])
logger.info("Load DrQA Document retrieval index from {0}".format(config['index']))
retrieval_method = RetrievalMethod.by_name("top_docs")(db,
config["index"],
config["n_docs"],
config["n_sents"])
# Load the pre-trained predictor and model from the .tar.gz in the config file.
# Override the database location for our model as this now comes from a read-only volume
logger.info("Load Model from {0}".format(config['model']))
archive = load_archive(config["model"],
cuda_device=config["cuda_device"],
overrides='{"dataset_reader":{"database":"' + config["database"] + '" }}')
predictor = Predictor.from_archive(archive, predictor_name="fever")
def baseline_predict(instances):
predictions = []
for instance in instances:
predictions.append(predict_single(predictor, retrieval_method, instance))
return predictions
return fever_web_api(baseline_predict)
| []
| []
| [
"CONFIG_PATH"
]
| [] | ["CONFIG_PATH"] | python | 1 | 0 | |
test/unit_tests/test_catkin_make_isolated.py | from __future__ import print_function
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
try:
from catkin.builder import extract_cmake_and_make_arguments
except ImportError as e:
raise ImportError(
'Please adjust your pythonpath before running this test: %s' % str(e)
)
import imp
imp.load_source('catkin_make_isolated',
os.path.join(os.path.dirname(__file__),
'..', '..', 'bin', 'catkin_make_isolated'))
from catkin_make_isolated import handle_cmake_args
from catkin_make_isolated import main
from catkin_make_isolated import parse_args
class CatkinMakeIsolatedTests(unittest.TestCase):
def test_extract_cmake_and_make_arguments(self):
args = []
args, cmake_args, make_args = extract_cmake_and_make_arguments(args)
assert cmake_args == []
assert args == []
args = ['-DCMAKE_INSTALL_PREFIX=install']
args, cmake_args, make_args = extract_cmake_and_make_arguments(args)
assert cmake_args == ['-DCMAKE_INSTALL_PREFIX=install']
assert args == []
args = ['-DCMAKE_INSTALL_PREFIX=install', '--install']
args, cmake_args, make_args = extract_cmake_and_make_arguments(args)
assert cmake_args == ['-DCMAKE_INSTALL_PREFIX=install']
assert args == ['--install']
args = [
'-DCMAKE_INSTALL_PREFIX=install', '--install', '--install-space',
'install_isolated'
]
args, cmake_args, make_args = extract_cmake_and_make_arguments(args)
assert cmake_args == ['-DCMAKE_INSTALL_PREFIX=install']
assert args == ['--install', '--install-space', 'install_isolated']
args = ['-DCATKIN_DEVEL_PREFIX=devel']
args, cmake_args, make_args = extract_cmake_and_make_arguments(args)
assert cmake_args == ['-DCATKIN_DEVEL_PREFIX=devel']
assert args == []
args = ['-DCATKIN_DEVEL_PREFIX=devel']
args, cmake_args, make_args = extract_cmake_and_make_arguments(args)
assert cmake_args == ['-DCATKIN_DEVEL_PREFIX=devel']
assert args == []
args = [
'-DCATKIN_DEVEL_PREFIX=devel', '--devel-space',
'devel_isolated'
]
args, cmake_args, make_args = extract_cmake_and_make_arguments(args)
assert cmake_args == ['-DCATKIN_DEVEL_PREFIX=devel']
assert args == ['--devel-space', 'devel_isolated']
def test_handle_cmake_args(self):
args = ['-DCMAKE_INSTALL_PREFIX=install', '--install']
args, cmake_args, make_args = extract_cmake_and_make_arguments(args)
assert cmake_args == ['-DCMAKE_INSTALL_PREFIX=install'], cmake_args
opts = parse_args(args)
cmake_args, opts = handle_cmake_args(cmake_args, opts)
assert cmake_args == [], cmake_args
assert opts.install == True
assert opts.install_space == 'install'
args = [
'-DCMAKE_INSTALL_PREFIX=install', '--install', '--install-space',
'install_isolated'
]
args, cmake_args, make_args = extract_cmake_and_make_arguments(args)
assert cmake_args == ['-DCMAKE_INSTALL_PREFIX=install'], cmake_args
opts = parse_args(args)
cmake_args, opts = handle_cmake_args(cmake_args, opts)
assert cmake_args == [], cmake_args
assert opts.install == True
assert opts.install_space == 'install_isolated'
args = ['-DCATKIN_DEVEL_PREFIX=devel']
args, cmake_args, make_args = extract_cmake_and_make_arguments(args)
assert cmake_args == ['-DCATKIN_DEVEL_PREFIX=devel'], cmake_args
opts = parse_args(args)
cmake_args, opts = handle_cmake_args(cmake_args, opts)
assert cmake_args == [], cmake_args
assert opts.devel == 'devel'
args = [
'-DCATKIN_DEVEL_PREFIX=devel', '--devel-space',
'devel_isolated'
]
args, cmake_args, make_args = extract_cmake_and_make_arguments(args)
assert cmake_args == ['-DCATKIN_DEVEL_PREFIX=devel'], cmake_args
opts = parse_args(args)
cmake_args, opts = handle_cmake_args(cmake_args, opts)
assert cmake_args == [], cmake_args
assert opts.devel == 'devel_isolated'
def test_empty_workspace(self):
argv = sys.argv
environ = os.environ
error_msg = None
try:
ws_dir = tempfile.mkdtemp()
src_dir = os.path.join(ws_dir, 'src')
os.mkdir(src_dir)
sys.argv = ['catkin_make_isolated', '-C', ws_dir]
environ['CMAKE_PREFIX_PATH'] = os.path.join(ws_dir, 'install')
main()
except Exception as e:
error_msg = str(e)
finally:
shutil.rmtree(ws_dir)
sys.argv = argv
os.environ = environ
assert error_msg is None, error_msg
def test_symlinked_src(self):
argv = sys.argv
environ = os.environ
cwd = os.getcwd()
error_msg = None
try:
base_dir = tempfile.mkdtemp()
ws_dir = os.path.join(base_dir, 'ws')
os.mkdir(ws_dir)
other_dir = os.path.join(base_dir, 'other')
os.mkdir(other_dir)
src_dir = os.path.join(ws_dir, 'src')
os.symlink(other_dir, src_dir)
cmi = os.path.join(os.path.dirname(__file__), '..', '..', 'bin', 'catkin_make_isolated')
environ['CMAKE_PREFIX_PATH'] = os.path.join(ws_dir, 'install')
environ['PWD'] = src_dir
subprocess.check_output(' '.join([cmi, '-C', '..']), cwd=src_dir, env=environ, shell=True)
except Exception as e:
error_msg = str(e)
finally:
shutil.rmtree(ws_dir)
sys.argv = argv
os.environ = environ
os.chdir(cwd)
assert error_msg is None, error_msg
| []
| []
| []
| [] | [] | python | 0 | 0 | |
myproject/settings.py | """
Django settings for myproject project.
Generated by 'django-admin startproject' using Django 1.11.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
import dj_database_url
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
# SECURITY WARNING: don't run with debug turned on in production!
SECRET_KEY = os.environ.get('SECRET_KEY')
"""SECRET_KEY = 'put your secret key here' """
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'portfolio',
'analytical',
'widget_tweaks',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'social_django',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'social_django.middleware.SocialAuthExceptionMiddleware',
]
ROOT_URLCONF = 'myproject.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'social_django.context_processors.backends',
'social_django.context_processors.login_redirect',
],
},
},
]
WSGI_APPLICATION = 'myproject.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': dj_database_url.config(
default='sqlite:////{0}'.format(os.path.join(BASE_DIR, 'db.sqlite3'))
)
}
ACCOUNT_ACTIVATION_DAYS = 2
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
AUTHENTICATION_BACKENDS = (
'social_core.backends.github.GithubOAuth2',
'social_core.backends.twitter.TwitterOAuth',
'social_core.backends.facebook.FacebookOAuth2',
'social_core.backends.google.GoogleOAuth2',
'django.contrib.auth.backends.ModelBackend',
)
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Kolkata'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'portfolio/static'),
)
LOGIN_REDIRECT_URL = "/"
"""smtp details here(sensitive_email_data)"""
EMAIL_BACKEND = os.environ.get('EMAIL_BACKEND')
EMAIL_USE_TLS = os.environ.get('EMAIL_USE_TLS')
EMAIL_HOST = os.environ.get('EMAIL_HOST')
EMAIL_PORT = os.environ.get('EMAIL_PORT')
EMAIL_HOST_USER = os.environ.get('EMAIL_HOST_USER')
EMAIL_HOST_PASSWORD = os.environ.get('EMAIL_HOST_PASSWORD')
SERVER_EMAIL = os.environ.get('SERVER_EMAIL')
DEFULT_FROM_EMAIL = os.environ.get('DEFULT_FROM_EMAIL')
"""oauth and analytics here"""
GOOGLE_ANALYTICS_PROPERTY_ID = os.environ.get('GOOGLE_ANALYTICS_PROPERTY_ID')
GOOGLE_ANALYTICS_SITE_SPEED = os.environ.get('GOOGLE_ANALYTICS_SITE_SPEED')
SOCIAL_AUTH_FACEBOOK_KEY = os.environ.get('SOCIAL_AUTH_FACEBOOK_KEY')
SOCIAL_AUTH_FACEBOOK_SECRET = os.environ.get('SOCIAL_AUTH_FACEBOOK_SECRET')
SOCIAL_AUTH_GOOGLE_OAUTH2_KEY = os.environ.get('SOCIAL_AUTH_GOOGLE_OAUTH2_KEY')
SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET = os.environ.get('SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET') | []
| []
| [
"EMAIL_HOST_PASSWORD",
"EMAIL_HOST_USER",
"EMAIL_USE_TLS",
"SOCIAL_AUTH_FACEBOOK_SECRET",
"DEFULT_FROM_EMAIL",
"GOOGLE_ANALYTICS_PROPERTY_ID",
"SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET",
"GOOGLE_ANALYTICS_SITE_SPEED",
"SECRET_KEY",
"SOCIAL_AUTH_FACEBOOK_KEY",
"EMAIL_PORT",
"EMAIL_HOST",
"SERVER_EMAIL",
"SOCIAL_AUTH_GOOGLE_OAUTH2_KEY",
"EMAIL_BACKEND"
]
| [] | ["EMAIL_HOST_PASSWORD", "EMAIL_HOST_USER", "EMAIL_USE_TLS", "SOCIAL_AUTH_FACEBOOK_SECRET", "DEFULT_FROM_EMAIL", "GOOGLE_ANALYTICS_PROPERTY_ID", "SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET", "GOOGLE_ANALYTICS_SITE_SPEED", "SECRET_KEY", "SOCIAL_AUTH_FACEBOOK_KEY", "EMAIL_PORT", "EMAIL_HOST", "SERVER_EMAIL", "SOCIAL_AUTH_GOOGLE_OAUTH2_KEY", "EMAIL_BACKEND"] | python | 15 | 0 | |
cmds/raid/drp-raid/perccli.go | package main
import (
"bytes"
"errors"
"fmt"
"log"
"os/exec"
"regexp"
"strconv"
"strings"
)
type PercCli struct {
name string
executable string
order int
log *log.Logger
}
var (
percEidLine = regexp.MustCompile(`^([0-9]+):([0-9]+)[\t ]+([0-9]+)[\t ]+([^ ]+)[\t ]+([^ ]+)[\t ]+([0-9.]+ [TG]B)[\t ]+([^ \t]+)[ \t]+([^ \t]+)[ \t]+([^ \t]+)[ \t]+([^ \t]+)[ \t]+([^ \t]+)[ \t]+([^ \t]+)`)
percArrayRE = regexp.MustCompile(`^Enclosure Information :$`)
percPdRE = regexp.MustCompile(`^Drive ([^ ]+) :$`)
percLdRE = regexp.MustCompile(`^Logical Drive (.*) :$`)
percCliCSlotRE = regexp.MustCompile(`^Basics :$`)
percPciRE = regexp.MustCompile(`([[:xdigit:]]{2}):([[:xdigit:]]{2}):([[:xdigit:]]{2}):([[:xdigit:]]{2})`)
)
func (s *PercCli) Logger(l *log.Logger) {
s.log = l
}
func (s *PercCli) Order() int { return s.order }
func (s *PercCli) Name() string { return s.name }
func (s *PercCli) Executable() string { return s.executable }
func (s *PercCli) checkLinesForError(lines []string) error {
if len(lines) > 1 && strings.HasPrefix(lines[1], "Error:") {
return errors.New(lines[1])
}
return nil
}
func (s *PercCli) run(args ...string) ([]string, error) {
if fake {
return []string{}, nil
}
cmd := exec.Command(s.executable, args...)
outBuf := &bytes.Buffer{}
cmd.Stdout, cmd.Stderr = outBuf, outBuf
if err := cmd.Run(); err != nil {
return nil, err
}
out := strings.Split(outBuf.String(), "\n")
return out, s.checkLinesForError(out)
}
func (s *PercCli) Useable() bool {
_, err := s.run("/call", "show")
return err == nil
}
func (s *PercCli) fillDisk(d *PhysicalDisk, lines []string) {
d.Info = map[string]string{}
for _, line := range lines[1:] {
k, v := kv(line, "= ")
if k == "" {
// Check for EID Line
if percEidLine.MatchString(line) {
pieces := percEidLine.FindStringSubmatch(line)
d.Enclosure = pieces[1]
d.Slot, _ = strconv.ParseUint(pieces[2], 10, 64)
d.Status = pieces[4]
d.Protocol = pieces[7]
d.MediaType = pieces[8]
}
continue
}
d.Info[k] = v
switch k {
case "Raw size":
d.Size, _ = sizeParser(v)
case "Number of Blocks":
d.SectorCount, _ = strconv.ParseUint(v, 10, 64)
case "Sector Size":
d.LogicalSectorSize, _ = sizeParser(v)
d.PhysicalSectorSize, _ = sizeParser(v)
case "Drive exposed to OS":
d.JBOD = v == "True"
}
}
}
// XXX: This is not currently supported.
func (s *PercCli) fillVolume(vol *Volume, lines []string) {
vol.Info = map[string]string{}
for _, line := range lines[1:] {
k, v := kv(line, "= ")
if k == "" {
continue
}
vol.Info[k] = v
switch k {
case "Logical Drive Label":
vol.Name = v
case "Logical Drive":
vol.ID = v
case "Status":
vol.Status = v
case "Fault Tolerance":
switch v {
case "0":
vol.RaidLevel = "raid0"
case "1", "1adm":
vol.RaidLevel = "raid1"
case "5":
vol.RaidLevel = "raid5"
case "6":
vol.RaidLevel = "raid6"
case "1+0":
vol.RaidLevel = "raid10"
case "1+0adm":
vol.RaidLevel = "raid10"
case "50":
vol.RaidLevel = "raid50"
case "60":
vol.RaidLevel = "raid60"
default:
vol.RaidLevel = "raid" + v
}
case "Size":
vol.Size, _ = sizeParser(v)
case "Strip Size":
vol.StripeSize, _ = sizeParser(v)
}
}
}
func (s *PercCli) fillArray(c *Controller, array []string) {
ldstuff, phys := partitionAt(array, percPdRE)
arrayInfo, lds := partitionAt(ldstuff, percLdRE)
disks := make([]*PhysicalDisk, len(phys))
for i, phy := range phys {
d := &PhysicalDisk{
controller: c,
ControllerID: c.ID,
ControllerDriver: c.Driver,
driver: s,
}
s.fillDisk(d, phy)
disks[i] = d
if d.JBOD {
c.addJBODVolume(d)
}
}
c.Disks = append(c.Disks, disks...)
if len(lds) > 0 {
for _, ld := range lds {
vol := &Volume{
ControllerID: c.ID,
ControllerDriver: c.Driver,
Disks: disks,
controller: c,
driver: s,
}
s.fillVolume(vol, append(ld, arrayInfo...))
c.Volumes = append(c.Volumes, vol)
}
}
// Make fake jbods
seenDisks := map[string][]string{}
for _, vol := range c.Volumes {
for _, vd := range vol.Disks {
as, ok := seenDisks[vol.ID]
if !ok {
as = []string{}
}
as = append(as, vd.Name())
seenDisks[vol.ID] = as
}
}
for vid, vpids := range seenDisks {
for _, vpid := range vpids {
for i, d := range c.Disks {
if d.Name() == vpid {
c.Disks[i].VolumeID = vid
}
}
}
}
for _, d := range c.Disks {
if d.VolumeID == "" {
c.addJBODVolume(d)
}
}
}
func (s *PercCli) fillController(c *Controller, lines []string) {
c.Disks = []*PhysicalDisk{}
c.Volumes = []*Volume{}
controller, arrays := partitionAt(lines, percArrayRE)
c.JBODCapable = true
c.RaidCapable = false
c.AutoJBOD = true
c.RaidLevels = []string{"jbod"}
c.Info = map[string]string{}
for _, line := range controller[1:] {
k, v := kv(line, "= ")
if k == "" {
continue
}
c.Info[k] = v
switch k {
case "Controller":
c.ID = v
case "PCI Address":
pciParts := percPciRE.FindStringSubmatch(v)
if len(pciParts) != 5 {
s.log.Printf("Error parsing PCI address %s", v)
} else {
c.PCI.Bus, _ = strconv.ParseInt(pciParts[2], 16, 64)
c.PCI.Device, _ = strconv.ParseInt(pciParts[3], 16, 64)
c.PCI.Function, _ = strconv.ParseInt(pciParts[4], 16, 64)
}
}
}
for _, array := range arrays {
s.fillArray(c, array)
}
}
func (s *PercCli) Controllers() []*Controller {
out, err := s.run("/call", "show", "all")
if err != nil {
return nil
}
_, controllers := partitionAt(out, percCliCSlotRE)
res := make([]*Controller, len(controllers))
for i, controller := range controllers {
res[i] = &Controller{
Driver: s.name,
driver: s,
idx: i,
}
s.fillController(res[i], controller)
}
return res
}
func (s *PercCli) canBeCleared(c *Controller) bool {
for _, vol := range c.Volumes {
if vol.Fake {
continue
}
return true
}
return false
}
// XXX: This is somewhat implemented - initial testing done on HBA only controller
func (s *PercCli) Clear(c *Controller, onlyForeign bool) error {
if onlyForeign {
// as far as I can tell, ssacli has no notion of a foreign config.
// So if we are asked to clear just the foreign config, do nothing.
return nil
}
if !s.canBeCleared(c) {
return nil
}
out, err := s.run("controller", "slot="+c.ID, "delete", "forced", "override")
s.log.Printf("GREG: perccli: clear: %v %v\n", out, err)
return err
}
func (s PercCli) Refresh(c *Controller) {
lines, err := s.run("/c"+c.ID, "show", "all")
if err != nil {
return
}
s.fillController(c, lines)
}
func (s *PercCli) diskList(disks []VolSpecDisk) string {
parts := make([]string, len(disks))
for i := range disks {
parts[i] = fmt.Sprintf("%s:%d", disks[i].Enclosure, disks[i].Slot)
}
return fmt.Sprintf("drives=%s", strings.Join(parts, "|"))
}
// XXX: This is somewhat implemented - initial testing done on HBA only controller
func (s *PercCli) Create(c *Controller, v *VolSpec, forceGood bool) error {
if !v.compiled {
return fmt.Errorf("Cannot create a VolSpec that has not been compiled")
}
cmdLine := []string{
"add",
"/c" + c.ID,
"vd",
fmt.Sprintf("name=%s", v.Name),
fmt.Sprintf("Strip=%d", v.stripeSize()>>10),
}
switch v.RaidLevel {
case "jbod":
if len(v.Disks) == 1 {
// Controller will automatically expose non-configured drives to the OS
// So, do nothing.
s.log.Printf("Controller in mixed, drive already exposed to OS")
return nil
}
// Yes, I know this is wrong for jbod, but ssacli Is Not Helpful.
cmdLine = append(cmdLine, "r0")
case "raid0":
cmdLine = append(cmdLine, "r0")
case "raid1":
if len(v.Disks) == 2 {
cmdLine = append(cmdLine, "r1")
} else {
cmdLine = append(cmdLine, "r1adm")
}
case "raid5":
cmdLine = append(cmdLine, "r5")
case "raid6":
cmdLine = append(cmdLine, "r6")
case "raid10":
cmdLine = append(cmdLine, "r10")
case "raid50":
cmdLine = append(cmdLine, "r50")
case "raid60":
cmdLine = append(cmdLine, "r60")
default:
return fmt.Errorf("Raid level %s not supported", v.RaidLevel)
}
if v.Name != "" {
cmdLine = append(cmdLine, fmt.Sprintf("logicaldrivelabel=%s", v.Name))
}
cmdLine = append(cmdLine, s.diskList(v.Disks), "forced")
s.log.Printf("Running %s %s", s.executable, strings.Join(cmdLine, " "))
res, err := s.run(cmdLine...)
if len(res) > 0 {
s.log.Println(strings.Join(res, "\n"))
}
if err != nil {
s.log.Printf("Error running command: %s", strings.Join(cmdLine, " "))
}
return err
}
func (s *PercCli) Encrypt(c *Controller, key, password string) error {
return fmt.Errorf("Encryption is not currently supported")
}
| []
| []
| []
| [] | [] | go | null | null | null |
tkge/models/model.py | import torch
from torch import nn
from torch.nn import functional as F
import numpy as np
from enum import Enum
import os
from collections import defaultdict
from typing import Mapping, Dict
import random
from tkge.common.registry import Registrable
from tkge.common.config import Config
from tkge.common.error import ConfigurationError
from tkge.data.dataset import DatasetProcessor
from tkge.models.layers import LSTMModel
from tkge.models.utils import *
class BaseModel(nn.Module, Registrable):
def __init__(self, config: Config, dataset: DatasetProcessor):
nn.Module.__init__(self)
Registrable.__init__(self, config=config)
self.dataset = dataset
@staticmethod
def create(config: Config, dataset: DatasetProcessor):
"""Factory method for sampler creation"""
model_type = config.get("model.name")
if model_type in BaseModel.list_available():
# kwargs = config.get("model.args") # TODO: 需要改成key的格式
return BaseModel.by_name(model_type)(config, dataset)
else:
raise ConfigurationError(
f"{model_type} specified in configuration file is not supported"
f"implement your model class with `BaseModel.register(name)"
)
def load_config(self):
# TODO(gengyuan): 有参数的话加载,没指定参数的话用默认,最好可以直接读config文件然后setattr,需不需要做assert?
raise NotImplementedError
def prepare_embedding(self):
raise NotImplementedError
def get_embedding(self, **kwargs):
raise NotImplementedError
def forward(self, samples, **kwargs):
raise NotImplementedError
def predict(self, queries: torch.Tensor):
"""
Should be a wrapper of method forward or a computation flow same as that in forward.
Particularly for prediction task with incomplete queries as inputs.
New modules or learnable parameter constructed in this namespace should be avoided since it's not evolved in training procedure.
"""
raise NotImplementedError
def fit(self, samples: torch.Tensor):
# TODO(gengyuan): wrapping all the models
"""
Should be a wrapper of forward or a computation flow same as that in forward.
This method is intended to handle arbitrarily-shaped samples due to negative sampling, either matrix or flatteded.
Especially when training procedure and prediction procedure are different.
Samples should be processed in this method and then passed to forward.
Input samples are the direct output of the negative sampling.
"""
raise NotImplementedError
@BaseModel.register(name='de_simple')
class DeSimplEModel(BaseModel):
def __init__(self, config: Config, dataset: DatasetProcessor):
super().__init__(config, dataset)
self.prepare_embedding()
self.time_nl = torch.sin # TODO add to configuration file
def prepare_embedding(self):
num_ent = self.dataset.num_entities()
num_rel = self.dataset.num_relations()
emb_dim = self.config.get("model.embedding.emb_dim")
se_prop = self.config.get("model.embedding.se_prop")
s_emb_dim = int(se_prop * emb_dim)
t_emb_dim = emb_dim - s_emb_dim
# torch.manual_seed(0)
# torch.cuda.manual_seed_all(0)
# np.random.seed(0)
# random.seed(0)
# torch.backends.cudnn.deterministic = True
# os.environ['PYTHONHASHSEED'] = str(0)
self.embedding: Dict[str, nn.Module] = defaultdict(dict)
self.embedding.update({'ent_embs_h': nn.Embedding(num_ent, s_emb_dim)})
self.embedding.update({'ent_embs_t': nn.Embedding(num_ent, s_emb_dim)})
self.embedding.update({'rel_embs_f': nn.Embedding(num_rel, s_emb_dim + t_emb_dim)})
self.embedding.update({'rel_embs_i': nn.Embedding(num_rel, s_emb_dim + t_emb_dim)})
# frequency embeddings for the entities
self.embedding.update({'m_freq_h': nn.Embedding(num_ent, t_emb_dim)})
self.embedding.update({'m_freq_t': nn.Embedding(num_ent, t_emb_dim)})
self.embedding.update({'d_freq_h': nn.Embedding(num_ent, t_emb_dim)})
self.embedding.update({'d_freq_t': nn.Embedding(num_ent, t_emb_dim)})
self.embedding.update({'y_freq_h': nn.Embedding(num_ent, t_emb_dim)})
self.embedding.update({'y_freq_t': nn.Embedding(num_ent, t_emb_dim)})
# phi embeddings for the entities
self.embedding.update({'m_phi_h': nn.Embedding(num_ent, t_emb_dim)})
self.embedding.update({'m_phi_t': nn.Embedding(num_ent, t_emb_dim)})
self.embedding.update({'d_phi_h': nn.Embedding(num_ent, t_emb_dim)})
self.embedding.update({'d_phi_t': nn.Embedding(num_ent, t_emb_dim)})
self.embedding.update({'y_phi_h': nn.Embedding(num_ent, t_emb_dim)})
self.embedding.update({'y_phi_t': nn.Embedding(num_ent, t_emb_dim)})
# frequency embeddings for the entities
self.embedding.update({'m_amps_h': nn.Embedding(num_ent, t_emb_dim)})
self.embedding.update({'m_amps_t': nn.Embedding(num_ent, t_emb_dim)})
self.embedding.update({'d_amps_h': nn.Embedding(num_ent, t_emb_dim)})
self.embedding.update({'d_amps_t': nn.Embedding(num_ent, t_emb_dim)})
self.embedding.update({'y_amps_h': nn.Embedding(num_ent, t_emb_dim)})
self.embedding.update({'y_amps_t': nn.Embedding(num_ent, t_emb_dim)})
self.embedding = nn.ModuleDict(self.embedding)
for k, v in self.embedding.items():
nn.init.xavier_uniform_(v.weight)
# nn.init.xavier_uniform_(self.ent_embs_h.weight)
# nn.init.xavier_uniform_(self.ent_embs_t.weight)
# nn.init.xavier_uniform_(self.rel_embs_f.weight)
# nn.init.xavier_uniform_(self.rel_embs_i.weight)
#
# nn.init.xavier_uniform_(self.m_freq_h.weight)
# nn.init.xavier_uniform_(self.d_freq_h.weight)
# nn.init.xavier_uniform_(self.y_freq_h.weight)
# nn.init.xavier_uniform_(self.m_freq_t.weight)
# nn.init.xavier_uniform_(self.d_freq_t.weight)
# nn.init.xavier_uniform_(self.y_freq_t.weight)
#
# nn.init.xavier_uniform_(self.m_phi_h.weight)
# nn.init.xavier_uniform_(self.d_phi_h.weight)
# nn.init.xavier_uniform_(self.y_phi_h.weight)
# nn.init.xavier_uniform_(self.m_phi_t.weight)
# nn.init.xavier_uniform_(self.d_phi_t.weight)
# nn.init.xavier_uniform_(self.y_phi_t.weight)
#
# nn.init.xavier_uniform_(self.m_amps_h.weight)
# nn.init.xavier_uniform_(self.d_amps_h.weight)
# nn.init.xavier_uniform_(self.y_amps_h.weight)
# nn.init.xavier_uniform_(self.m_amps_t.weight)
# nn.init.xavier_uniform_(self.d_amps_t.weight)
# nn.init.xavier_uniform_(self.y_amps_t.weight)
# nn.init.xavier_uniform_(self.embedding['ent_embs_h'].weight)
# nn.init.xavier_uniform_(self.embedding['ent_embs_t'].weight)
# nn.init.xavier_uniform_(self.embedding['rel_embs_f'].weight)
# nn.init.xavier_uniform_(self.embedding['rel_embs_i'].weight)
#
# nn.init.xavier_uniform_(self.embedding['m_freq_h'].weight)
# nn.init.xavier_uniform_(self.embedding['d_freq_h'].weight)
# nn.init.xavier_uniform_(self.embedding['y_freq_h'].weight)
# nn.init.xavier_uniform_(self.embedding['m_freq_t'].weight)
# nn.init.xavier_uniform_(self.embedding['d_freq_t'].weight)
# nn.init.xavier_uniform_(self.embedding['y_freq_t'].weight)
#
# nn.init.xavier_uniform_(self.embedding['m_phi_h'].weight)
# nn.init.xavier_uniform_(self.embedding['d_phi_h'].weight)
# nn.init.xavier_uniform_(self.embedding['y_phi_h'].weight)
# nn.init.xavier_uniform_(self.embedding['m_phi_t'].weight)
# nn.init.xavier_uniform_(self.embedding['d_phi_t'].weight)
# nn.init.xavier_uniform_(self.embedding['y_phi_t'].weight)
#
# nn.init.xavier_uniform_(self.embedding['m_amps_h'].weight)
# nn.init.xavier_uniform_(self.embedding['d_amps_h'].weight)
# nn.init.xavier_uniform_(self.embedding['y_amps_h'].weight)
# nn.init.xavier_uniform_(self.embedding['m_amps_t'].weight)
# nn.init.xavier_uniform_(self.embedding['d_amps_t'].weight)
# nn.init.xavier_uniform_(self.embedding['y_amps_t'].weight)
# for name, params in self.named_parameters():
# print(name)
# print(params)
# print(params.size())
#
# assert False
def get_time_embedding(self, ent, year, month, day, ent_pos):
# TODO: enum
if ent_pos == "head":
time_emb = self.embedding['y_amps_h'](ent) * self.time_nl(
self.embedding['y_freq_h'](ent) * year + self.embedding['y_phi_h'](ent))
time_emb += self.embedding['m_amps_h'](ent) * self.time_nl(
self.embedding['m_freq_h'](ent) * month + self.embedding['m_phi_h'](ent))
time_emb += self.embedding['d_amps_h'](ent) * self.time_nl(
self.embedding['d_freq_h'](ent) * day + self.embedding['d_phi_h'](ent))
else:
time_emb = self.embedding['y_amps_t'](ent) * self.time_nl(
self.embedding['y_freq_t'](ent) * year + self.embedding['y_phi_t'](ent))
time_emb += self.embedding['m_amps_t'](ent) * self.time_nl(
self.embedding['m_freq_t'](ent) * month + self.embedding['m_phi_t'](ent))
time_emb += self.embedding['d_amps_t'](ent) * self.time_nl(
self.embedding['d_freq_t'](ent) * day + self.embedding['d_phi_t'](ent))
return time_emb
def get_embedding(self, head, rel, tail, year, month, day):
year = year.view(-1, 1)
month = month.view(-1, 1)
day = day.view(-1, 1)
h_emb1 = self.embedding['ent_embs_h'](head)
r_emb1 = self.embedding['rel_embs_f'](rel)
t_emb1 = self.embedding['ent_embs_t'](tail)
h_emb2 = self.embedding['ent_embs_h'](tail)
r_emb2 = self.embedding['rel_embs_i'](rel)
t_emb2 = self.embedding['ent_embs_t'](head)
h_emb1 = torch.cat((h_emb1, self.get_time_embedding(head, year, month, day, 'head')), 1)
t_emb1 = torch.cat((t_emb1, self.get_time_embedding(tail, year, month, day, 'tail')), 1)
h_emb2 = torch.cat((h_emb2, self.get_time_embedding(tail, year, month, day, 'head')), 1)
t_emb2 = torch.cat((t_emb2, self.get_time_embedding(head, year, month, day, 'tail')), 1)
return h_emb1, r_emb1, t_emb1, h_emb2, r_emb2, t_emb2
def forward(self, samples, **kwargs):
head = samples[:, 0].long()
rel = samples[:, 1].long()
tail = samples[:, 2].long()
year = samples[:, 3]
month = samples[:, 4]
day = samples[:, 5]
h_emb1, r_emb1, t_emb1, h_emb2, r_emb2, t_emb2 = self.get_embedding(head, rel, tail, year, month, day)
p = self.config.get('model.dropout')
scores = ((h_emb1 * r_emb1) * t_emb1 + (h_emb2 * r_emb2) * t_emb2) / 2.0
scores = F.dropout(scores, p=p, training=self.training) # TODO training
scores = torch.sum(scores, dim=1)
return scores, None
def fit(self, samples: torch.Tensor):
bs = samples.size(0)
dim = samples.size(1) // (1 + self.config.get("negative_sampling.num_samples"))
samples = samples.view(-1, dim)
scores, factor = self.forward(samples)
scores = scores.view(bs, -1)
return scores, factor
def predict(self, queries: torch.Tensor):
assert torch.isnan(queries).sum(1).byte().all(), "Either head or tail should be absent."
bs = queries.size(0)
dim = queries.size(0)
candidates = all_candidates_of_ent_queries(queries, self.dataset.num_entities())
scores, _ = self.forward(candidates)
scores = scores.view(bs, -1)
return scores
@BaseModel.register(name="tcomplex")
class TComplExModel(BaseModel):
def __init__(self, config: Config, dataset: DatasetProcessor):
super().__init__(config, dataset)
self.rank = self.config.get("model.rank")
self.no_time_emb = self.config.get("model.no_time_emb")
self.init_size = self.config.get("model.init_size")
self.num_ent = self.dataset.num_entities()
self.num_rel = self.dataset.num_relations()
self.num_ts = self.dataset.num_timestamps()
self.prepare_embedding()
def prepare_embedding(self):
self.embeddings = nn.ModuleList([
nn.Embedding(s, 2 * self.rank, sparse=True)
for s in [self.num_ent, self.num_rel, self.num_ts]
])
for emb in self.embeddings:
emb.weight.data *= self.init_size
def forward(self, x):
"""
x is spot
"""
lhs = self.embeddings[0](x[:, 0].long())
rel = self.embeddings[1](x[:, 1].long())
rhs = self.embeddings[0](x[:, 2].long())
time = self.embeddings[2](x[:, 3].long())
lhs = lhs[:, :self.rank], lhs[:, self.rank:]
rel = rel[:, :self.rank], rel[:, self.rank:]
rhs = rhs[:, :self.rank], rhs[:, self.rank:]
time = time[:, :self.rank], time[:, self.rank:]
right = self.embeddings[0].weight # all ent tensor
right = right[:, :self.rank], right[:, self.rank:]
rt = rel[0] * time[0], rel[1] * time[0], rel[0] * time[1], rel[1] * time[1]
full_rel = rt[0] - rt[3], rt[1] + rt[2]
# 1st item: scores
# 2nd item: reg item factors
# 3rd item: time
scores = (lhs[0] * full_rel[0] - lhs[1] * full_rel[1]) @ right[0].t() + \
(lhs[1] * full_rel[0] + lhs[0] * full_rel[1]) @ right[1].t()
factors = {
"n3": (torch.sqrt(lhs[0] ** 2 + lhs[1] ** 2),
torch.sqrt(full_rel[0] ** 2 + full_rel[1] ** 2),
torch.sqrt(rhs[0] ** 2 + rhs[1] ** 2)),
"lambda3": (self.embeddings[2].weight[:-1] if self.no_time_emb else self.embeddings[2].weight)
}
return scores, factors
def predict(self, x):
assert torch.isnan(x).sum(1).byte().all(), "Either head or tail should be absent."
missing_head_ind = torch.isnan(x)[:, 0].byte().unsqueeze(1)
reversed_x = x.clone()
reversed_x[:, 1] += 1
reversed_x[:, (0, 2)] = reversed_x[:, (2, 0)]
x = torch.where(missing_head_ind,
reversed_x,
x)
lhs = self.embeddings[0](x[:, 0].long())
rel = self.embeddings[1](x[:, 1].long())
time = self.embeddings[2](x[:, 3].long())
lhs = lhs[:, :self.rank], lhs[:, self.rank:]
rel = rel[:, :self.rank], rel[:, self.rank:]
time = time[:, :self.rank], time[:, self.rank:]
right = self.embeddings[0].weight
right = right[:, :self.rank], right[:, self.rank:]
scores = (lhs[0] * rel[0] * time[0] - lhs[1] * rel[1] * time[0] -
lhs[1] * rel[0] * time[1] - lhs[0] * rel[1] * time[1]) @ right[0].t() + \
(lhs[1] * rel[0] * time[0] + lhs[0] * rel[1] * time[0] +
lhs[0] * rel[0] * time[1] - lhs[1] * rel[1] * time[1]) @ right[1].t()
return scores
def forward_over_time(self, x):
lhs = self.embeddings[0](x[:, 0])
rel = self.embeddings[1](x[:, 1])
rhs = self.embeddings[0](x[:, 2])
time = self.embeddings[2].weight
lhs = lhs[:, :self.rank], lhs[:, self.rank:]
rel = rel[:, :self.rank], rel[:, self.rank:]
rhs = rhs[:, :self.rank], rhs[:, self.rank:]
time = time[:, :self.rank], time[:, self.rank:]
return (
(lhs[0] * rel[0] * rhs[0] - lhs[1] * rel[1] * rhs[0] -
lhs[1] * rel[0] * rhs[1] + lhs[0] * rel[1] * rhs[1]) @ time[0].t() +
(lhs[1] * rel[0] * rhs[0] - lhs[0] * rel[1] * rhs[0] +
lhs[0] * rel[0] * rhs[1] - lhs[1] * rel[1] * rhs[1]) @ time[1].t()
)
@BaseModel.register(name="hyte")
class HyTEModel(BaseModel):
def __init__(self, config: Config, dataset: DatasetProcessor):
super().__init__(config, dataset)
@BaseModel.register(name="atise")
class ATiSEModel(BaseModel):
def __init__(self, config: Config, dataset: DatasetProcessor):
super().__init__(config, dataset)
# TODO(gengyuan) load params before initialize
self.cmin = self.config.get("model.cmin")
self.cmax = self.config.get("model.cmax")
self.emb_dim = self.config.get("model.embedding_dim")
self.prepare_embedding()
def prepare_embedding(self):
num_ent = self.dataset.num_entities()
num_rel = self.dataset.num_relations()
self.embedding: Dict[str, nn.Module] = defaultdict(None)
self.embedding.update({'emb_E': nn.Embedding(num_ent, self.emb_dim, padding_idx=0)})
self.embedding.update({'emb_E_var': nn.Embedding(num_ent, self.emb_dim, padding_idx=0)})
self.embedding.update({'emb_R': nn.Embedding(num_rel, self.emb_dim, padding_idx=0)})
self.embedding.update({'emb_R_var': nn.Embedding(num_rel, self.emb_dim, padding_idx=0)})
self.embedding.update({'emb_TE': nn.Embedding(num_ent, self.emb_dim, padding_idx=0)})
self.embedding.update({'alpha_E': nn.Embedding(num_ent, 1, padding_idx=0)})
self.embedding.update({'beta_E': nn.Embedding(num_ent, self.emb_dim, padding_idx=0)})
self.embedding.update({'omega_E': nn.Embedding(num_ent, self.emb_dim, padding_idx=0)})
self.embedding.update({'emb_TR': nn.Embedding(num_rel, self.emb_dim, padding_idx=0)})
self.embedding.update({'alpha_R': nn.Embedding(num_rel, 1, padding_idx=0)})
self.embedding.update({'beta_R': nn.Embedding(num_rel, self.emb_dim, padding_idx=0)})
self.embedding.update({'omega_R': nn.Embedding(num_rel, self.emb_dim, padding_idx=0)})
self.embedding = nn.ModuleDict(self.embedding)
r = 6 / np.sqrt(self.emb_dim)
self.embedding['emb_E'].weight.data.uniform_(-r, r)
self.embedding['emb_E_var'].weight.data.uniform_(self.cmin, self.cmax)
self.embedding['emb_R'].weight.data.uniform_(-r, r)
self.embedding['emb_R_var'].weight.data.uniform_(self.cmin, self.cmax)
self.embedding['emb_TE'].weight.data.uniform_(-r, r)
self.embedding['alpha_E'].weight.data.uniform_(0, 0)
self.embedding['beta_E'].weight.data.uniform_(0, 0)
self.embedding['omega_E'].weight.data.uniform_(-r, r)
self.embedding['emb_TR'].weight.data.uniform_(-r, r)
self.embedding['alpha_R'].weight.data.uniform_(0, 0)
self.embedding['beta_R'].weight.data.uniform_(0, 0)
self.embedding['omega_R'].weight.data.uniform_(-r, r)
self.embedding['emb_E'].weight.data.renorm_(p=2, dim=0, maxnorm=1)
self.embedding['emb_E_var'].weight.data.uniform_(self.cmin, self.cmax)
self.embedding['emb_R'].weight.data.renorm_(p=2, dim=0, maxnorm=1)
self.embedding['emb_R_var'].weight.data.uniform_(self.cmin, self.cmax)
self.embedding['emb_TE'].weight.data.renorm_(p=2, dim=0, maxnorm=1)
self.embedding['emb_TR'].weight.data.renorm_(p=2, dim=0, maxnorm=1)
def forward(self, sample: torch.Tensor):
bs = sample.size(0)
# TODO(gengyuan)
dim = sample.size(1) // (1 + self.config.get("negative_sampling.num_samples"))
sample = sample.view(-1, dim)
# TODO(gengyuan) type conversion when feeding the data instead of running the models
h_i, t_i, r_i, d_i = sample[:, 0].long(), sample[:, 2].long(), sample[:, 1].long(), sample[:, 3]
pi = 3.14159265358979323846
h_mean = self.embedding['emb_E'](h_i).view(-1, self.emb_dim) + \
d_i.view(-1, 1) * self.embedding['alpha_E'](h_i).view(-1, 1) * self.embedding['emb_TE'](h_i).view(-1,
self.emb_dim) \
+ self.embedding['beta_E'](h_i).view(-1, self.emb_dim) * torch.sin(
2 * pi * self.embedding['omega_E'](h_i).view(-1, self.emb_dim) * d_i.view(-1, 1))
t_mean = self.embedding['emb_E'](t_i).view(-1, self.emb_dim) + \
d_i.view(-1, 1) * self.embedding['alpha_E'](t_i).view(-1, 1) * self.embedding['emb_TE'](t_i).view(-1,
self.emb_dim) \
+ self.embedding['beta_E'](t_i).view(-1, self.emb_dim) * torch.sin(
2 * pi * self.embedding['omega_E'](t_i).view(-1, self.emb_dim) * d_i.view(-1, 1))
r_mean = self.embedding['emb_R'](r_i).view(-1, self.emb_dim) + \
d_i.view(-1, 1) * self.embedding['alpha_R'](r_i).view(-1, 1) * self.embedding['emb_TR'](r_i).view(-1,
self.emb_dim) \
+ self.embedding['beta_R'](r_i).view(-1, self.emb_dim) * torch.sin(
2 * pi * self.embedding['omega_R'](r_i).view(-1, self.emb_dim) * d_i.view(-1, 1))
h_var = self.embedding['emb_E_var'](h_i).view(-1, self.emb_dim)
t_var = self.embedding['emb_E_var'](t_i).view(-1, self.emb_dim)
r_var = self.embedding['emb_R_var'](r_i).view(-1, self.emb_dim)
out1 = torch.sum((h_var + t_var) / r_var, 1) + torch.sum(((r_mean - h_mean + t_mean) ** 2) / r_var,
1) - self.emb_dim
out2 = torch.sum(r_var / (h_var + t_var), 1) + torch.sum(((h_mean - t_mean - r_mean) ** 2) / (h_var + t_var),
1) - self.emb_dim
scores = (out1 + out2) / 4
scores = scores.view(bs, -1)
factors = {
"renorm": (self.embedding['emb_E'].weight,
self.embedding['emb_R'].weight,
self.embedding['emb_TE'].weight,
self.embedding['emb_TR'].weight),
"clamp": (self.embedding['emb_E_var'].weight,
self.embedding['emb_R_var'].weight)
}
return scores, factors
# TODO(gengyaun):
# walkaround
def predict(self, sample: torch.Tensor):
bs = sample.size(0)
# TODO(gengyuan)
dim = sample.size(1) // (self.dataset.num_entities())
sample = sample.view(-1, dim)
# TODO(gengyuan) type conversion when feeding the data instead of running the models
h_i, t_i, r_i, d_i = sample[:, 0].long(), sample[:, 2].long(), sample[:, 1].long(), sample[:, 3]
pi = 3.14159265358979323846
h_mean = self.embedding['emb_E'](h_i).view(-1, self.emb_dim) + \
d_i.view(-1, 1) * self.embedding['alpha_E'](h_i).view(-1, 1) * self.embedding['emb_TE'](h_i).view(-1,
self.emb_dim) \
+ self.embedding['beta_E'](h_i).view(-1, self.emb_dim) * torch.sin(
2 * pi * self.embedding['omega_E'](h_i).view(-1, self.emb_dim) * d_i.view(-1, 1))
t_mean = self.embedding['emb_E'](t_i).view(-1, self.emb_dim) + \
d_i.view(-1, 1) * self.embedding['alpha_E'](t_i).view(-1, 1) * self.embedding['emb_TE'](t_i).view(-1,
self.emb_dim) \
+ self.embedding['beta_E'](t_i).view(-1, self.emb_dim) * torch.sin(
2 * pi * self.embedding['omega_E'](t_i).view(-1, self.emb_dim) * d_i.view(-1, 1))
r_mean = self.embedding['emb_R'](r_i).view(-1, self.emb_dim) + \
d_i.view(-1, 1) * self.embedding['alpha_R'](r_i).view(-1, 1) * self.embedding['emb_TR'](r_i).view(-1,
self.emb_dim) \
+ self.embedding['beta_R'](r_i).view(-1, self.emb_dim) * torch.sin(
2 * pi * self.embedding['omega_R'](r_i).view(-1, self.emb_dim) * d_i.view(-1, 1))
h_var = self.embedding['emb_E_var'](h_i).view(-1, self.emb_dim)
t_var = self.embedding['emb_E_var'](t_i).view(-1, self.emb_dim)
r_var = self.embedding['emb_R_var'](r_i).view(-1, self.emb_dim)
out1 = torch.sum((h_var + t_var) / r_var, 1) + torch.sum(((r_mean - h_mean + t_mean) ** 2) / r_var,
1) - self.emb_dim
out2 = torch.sum(r_var / (h_var + t_var), 1) + torch.sum(((h_mean - t_mean - r_mean) ** 2) / (h_var + t_var),
1) - self.emb_dim
scores = (out1 + out2) / 4
scores = scores.view(bs, -1)
factors = {
"renorm": (self.embedding['emb_E'].weight,
self.embedding['emb_R'].weight,
self.embedding['emb_TE'].weight,
self.embedding['emb_TR'].weight),
"clamp": (self.embedding['emb_E_var'].weight,
self.embedding['emb_R_var'].weight)
}
return scores, factors
# reference: https://github.com/bsantraigi/TA_TransE/blob/master/model.py
# reference: https://github.com/jimmywangheng/knowledge_representation_pytorch
@BaseModel.register(name="ta_transe")
class TATransEModel(BaseModel):
def __init__(self, config: Config, dataset: DatasetProcessor):
super().__init__(config, dataset)
# model params from files
self.emb_dim = self.config.get("model.emb_dim")
self.l1_flag = self.config.get("model.l1_flag")
self.p = self.config.get("model.p")
self.dropout = torch.nn.Dropout(p=self.p)
self.lstm = LSTMModel(self.emb_dim, n_layer=1)
self.prepare_embedding()
def prepare_embedding(self):
num_ent = self.dataset.num_entities()
num_rel = self.dataset.num_relations()
num_tem = 32 # should be 32
self.embedding: Dict[str, torch.nn.Embedding] = defaultdict(None)
self.embedding['ent'] = torch.nn.Embedding(num_ent, self.emb_dim)
self.embedding['rel'] = torch.nn.Embedding(num_rel, self.emb_dim)
self.embedding['tem'] = torch.nn.Embedding(num_tem, self.emb_dim)
self.embedding = nn.ModuleDict(self.embedding)
for _, emb in self.embedding.items():
torch.nn.init.xavier_uniform_(emb.weight)
emb.weight.data.renorm(p=2, dim=1, maxnorm=1)
def get_rseq(self, rel: torch.LongTensor, tem: torch.LongTensor):
r_e = self.embedding['rel'](rel)
r_e = r_e.unsqueeze(0).transpose(0, 1)
bs = tem.size(0)
tem_len = tem.size(1)
tem = tem.contiguous()
tem = tem.view(bs * tem_len)
token_e = self.embedding['tem'](tem)
token_e = token_e.view(bs, tem_len, self.emb_dim)
seq_e = torch.cat((r_e, token_e), 1)
hidden_tem = self.lstm(seq_e)
hidden_tem = hidden_tem[0, :, :]
rseq_e = hidden_tem
return rseq_e
def forward(self, samples: torch.Tensor):
h, r, t, tem = samples[:, 0].long(), samples[:, 1].long(), samples[:, 2].long(), samples[:, 3:].long()
h_e = self.embedding['ent'](h)
t_e = self.embedding['ent'](t)
rseq_e = self.get_rseq(r, tem)
h_e = self.dropout(h_e)
t_e = self.dropout(t_e)
rseq_e = self.dropout(rseq_e)
if self.l1_flag:
scores = torch.sum(torch.abs(h_e + rseq_e - t_e), 1)
else:
scores = torch.sum((h_e + rseq_e - t_e) ** 2, 1)
factors = {
"norm": (h_e,
t_e,
rseq_e)
}
return scores, factors
def fit(self, samples: torch.Tensor):
bs = samples.size(0)
dim = samples.size(1) // (1 + self.config.get("negative_sampling.num_samples"))
samples = samples.view(-1, dim)
scores, factor = self.forward(samples)
scores = scores.view(bs, -1)
return scores, factor
def predict(self, queries: torch.Tensor):
assert torch.isnan(queries).sum(1).byte().all(), "Either head or tail should be absent."
bs = queries.size(0)
dim = queries.size(0)
candidates = all_candidates_of_ent_queries(queries, self.dataset.num_entities())
scores, _ = self.forward(candidates)
scores = scores.view(bs, -1)
return scores
# reference: https://github.com/bsantraigi/TA_TransE/blob/master/model.py
@BaseModel.register(name="ta_distmult")
class TADistmultModel(BaseModel):
def __init__(self, config: Config, dataset: DatasetProcessor):
super().__init__(config, dataset)
# model params from files
self.emb_dim = self.config.get("model.emb_dim")
self.l1_flag = self.config.get("model.l1_flag")
self.p = self.config.get("model.p")
self.dropout = torch.nn.Dropout(p=self.p)
self.lstm = LSTMModel(self.emb_dim, n_layer=1)
self.criterion = nn.Softplus()
self.prepare_embedding()
def prepare_embedding(self):
num_ent = self.dataset.num_entities()
num_rel = self.dataset.num_relations()
num_tem = 32 # should be 32
self.embedding: Dict[str, torch.nn.Embedding] = defaultdict(None)
self.embedding['ent'] = torch.nn.Embedding(num_ent, self.emb_dim)
self.embedding['rel'] = torch.nn.Embedding(num_rel, self.emb_dim)
self.embedding['tem'] = torch.nn.Embedding(num_tem, self.emb_dim)
self.embedding = nn.ModuleDict(self.embedding)
for _, emb in self.embedding.items():
torch.nn.init.xavier_uniform_(emb.weight)
emb.weight.data.renorm(p=2, dim=1, maxnorm=1)
def forward(self, samples: torch.Tensor):
h, r, t, tem = samples[:, 0].long(), samples[:, 1].long(), samples[:, 2].long(), samples[:, 3:].long()
h_e = self.embedding['ent'](h)
t_e = self.embedding['ent'](t)
rseq_e = self.get_rseq(r, tem)
h_e = self.dropout(h_e)
t_e = self.dropout(t_e)
rseq_e = self.dropout(rseq_e)
scores = torch.sum(h_e * t_e * rseq_e, 1, False)
factors = {
"norm": (self.embedding['ent'].weight,
self.embedding['rel'].weight,
self.embedding['tem'].weight)
}
return scores, factors
def get_rseq(self, rel, tem):
r_e = self.embedding['rel'](rel)
r_e = r_e.unsqueeze(0).transpose(0, 1)
bs = tem.size(0)
tem_len = tem.size(1)
tem = tem.contiguous()
tem = tem.view(bs * tem_len)
token_e = self.embedding['tem'](tem)
token_e = token_e.view(bs, tem_len, self.emb_dim)
seq_e = torch.cat((r_e, token_e), 1)
hidden_tem = self.lstm(seq_e)
hidden_tem = hidden_tem[0, :, :]
rseq_e = hidden_tem
return rseq_e
def fit(self, samples: torch.Tensor):
bs = samples.size(0)
dim = samples.size(1) // (1 + self.config.get("negative_sampling.num_samples"))
samples = samples.view(-1, dim)
scores, factor = self.forward(samples)
scores = scores.view(bs, -1)
return scores, factor
def predict(self, queries: torch.Tensor):
assert torch.isnan(queries).sum(1).byte().all(), "Either head or tail should be absent."
bs = queries.size(0)
dim = queries.size(0)
candidates = all_candidates_of_ent_queries(queries, self.dataset.num_entities())
scores, _ = self.forward(candidates)
scores = scores.view(bs, -1)
return scores
| []
| []
| [
"PYTHONHASHSEED"
]
| [] | ["PYTHONHASHSEED"] | python | 1 | 0 | |
backend/server.go | package main
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"log"
"net/http"
"net/url"
"os"
oauth1Login "github.com/dghubble/gologin/oauth1"
twitterLogin "github.com/dghubble/gologin/twitter"
"github.com/dghubble/oauth1"
twitterOAuth1 "github.com/dghubble/oauth1/twitter"
"github.com/gorilla/mux"
"github.com/gorilla/sessions"
"github.com/wipeinc/wipeinc/model"
"github.com/wipeinc/wipeinc/repository"
"github.com/wipeinc/wipeinc/twitter"
)
// Config struct for backend server
type Config struct {
TwitterConsumerKey string
TwitterConsumerSecret string
TwitterCallbackURL string
CookieSecretToken string
}
const (
sessionName = "wipeinc"
sessionUserKey = "twitterID"
userAccessTokenKey = "twitterAccessToken"
userAccessTokenSecretKey = "twitterAccessTokenSecret"
)
var sessionStore sessions.Store
var config *Config
func init() {
config = &Config{
TwitterConsumerKey: os.Getenv("TWITTER_CONSUMER_KEY"),
TwitterConsumerSecret: os.Getenv("TWITTER_CONSUMER_SECRET"),
TwitterCallbackURL: os.Getenv("TWITTER_CALLBACK_URL"),
}
if config.TwitterConsumerKey == "" {
log.Fatal("twitter consumer key not set")
}
if config.TwitterConsumerSecret == "" {
log.Fatal("twitter consumer secret not set")
}
if _, err := url.Parse(config.TwitterCallbackURL); err != nil {
log.Fatalf("invalid twitter callback url : %s", err.Error())
}
sessionSecret := os.Getenv("SESSION_SECRET_KEY")
if sessionSecret == "" {
log.Fatal("session secret not set")
}
if len(sessionSecret) != 32 && len(sessionSecret) != 64 {
log.Fatalf("invalid session secret size: %d\n", len(sessionSecret))
}
sessionStore = sessions.NewCookieStore([]byte(sessionSecret), nil)
}
func main() {
oauth1Config := &oauth1.Config{
ConsumerKey: config.TwitterConsumerKey,
ConsumerSecret: config.TwitterConsumerSecret,
CallbackURL: config.TwitterCallbackURL,
Endpoint: twitterOAuth1.AuthorizeEndpoint,
}
mux := mux.NewRouter()
mux.Handle("/twitter/login", twitterLogin.LoginHandler(oauth1Config, nil))
mux.Handle("/twitter/callback", twitterLogin.CallbackHandler(oauth1Config, issueSession(), nil))
mux.HandleFunc("/api/profile/{name}", showProfile)
mux.HandleFunc("/api/profile/{name}/analyze", showProfile)
mux.HandleFunc("/api/sessions/logout", logoutHandler)
mux.PathPrefix("/").HandlerFunc(showIndex)
log.Fatal(http.ListenAndServe(":8080", mux))
}
// issueSession issues a cookie session after successful Twitter login
func issueSession() http.Handler {
fn := func(w http.ResponseWriter, req *http.Request) {
ctx := req.Context()
twitterUser, err := twitterLogin.UserFromContext(ctx)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
accessToken, accessTokenSecret, err := oauth1Login.AccessTokenFromContext(ctx)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
session, err := sessionStore.New(req, sessionName)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
session.Values[sessionUserKey] = twitterUser.ID
session.Values[userAccessTokenKey] = accessToken
session.Values[userAccessTokenSecretKey] = accessTokenSecret
err = session.Save(req, w)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
http.Redirect(w, req, "/", http.StatusFound)
}
return http.HandlerFunc(fn)
}
// logoutHandler destroys the session on POSTs and redirects to home.
func logoutHandler(w http.ResponseWriter, req *http.Request) {
if req.Method == "POST" {
session, err := sessionStore.Get(req, "session")
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
delete(session.Values, sessionUserKey)
session.Options.MaxAge = -1
err = session.Save(req, w)
if err != nil {
data, errJSON := json.Marshal(session)
if errJSON != nil {
log.Printf("could not delete session %s\n", data)
}
log.Printf("could not delete session err: %s\n", err)
}
}
http.Redirect(w, req, "/", http.StatusFound)
}
// showIndex show empty page with js scripts
func showIndex(w http.ResponseWriter, r *http.Request) {
index, err := Asset("static/index.html")
if err != nil {
w.WriteHeader(http.StatusNotFound)
}
indexReader := bytes.NewBuffer(index)
if _, err := io.Copy(w, indexReader); err != nil {
log.Printf("error writing index to http connection: %s\n", err)
}
}
func getUserTwitterClient(req *http.Request) (*twitter.Client, error) {
session, err := sessionStore.Get(req, sessionName)
if err != nil {
return nil, err
}
var accessToken string
var accessTokenSecret string
var ok bool
val := session.Values[userAccessTokenKey]
if accessToken, ok = val.(string); !ok {
return nil, errors.New("user token key absent")
}
val = session.Values[userAccessTokenSecretKey]
if accessTokenSecret, ok = val.(string); !ok {
return nil, errors.New("user token key secret absent")
}
ctx := req.Context()
return twitter.NewUserClient(ctx, accessToken, accessTokenSecret), nil
}
// showProfile route for /api/profile/{screenName}
func showProfile(w http.ResponseWriter, req *http.Request) {
var err error
var user *model.User
params := mux.Vars(req)
screenName := params["name"]
user, err = repository.DB.GetUserByScreenName(screenName)
if err == nil {
log.Println("cache hit")
}
if err != nil {
var client *twitter.Client
client, err = getUserTwitterClient(req)
if err != nil {
http.Error(w, "unauthroized", http.StatusUnauthorized)
return
}
user, err = client.GetUserShow(screenName)
if err != nil {
http.Error(w, err.Error(), http.StatusNotFound)
return
}
go saveUser(*user)
}
if err = json.NewEncoder(w).Encode(user); err != nil {
log.Printf("error trying to serialize twitter user profile: %s", err.Error())
}
}
// analyzeProfile route for /api/profile/{screenName}/analyze
func analyzeProfile(w http.ResponseWriter, req *http.Request) {
client, err := getUserTwitterClient(req)
if err != nil {
http.Error(w, "unauthorized", http.StatusUnauthorized)
return
}
stats := twitter.NewTweetStats()
for i := 0; i < 4; i++ {
fmt.Printf("[%d/4]fetching user since: %d\n", i+1, since)
tweets, limit, err := client.GetUserTimeline(user, since)
if err != nil {
fmt.Println("error")
fmt.Printf("%+v\n", err)
break
}
stats.AnalyzeTweets(tweets)
if len(tweets) < 199 {
fmt.Printf("got %d tweets\n", len(tweets))
break
}
since = tweets[len(tweets)-1].ID
limit.Delay()
}
}
func saveUser(user model.User) {
err := repository.DB.AddUser(&user)
if err != nil {
log.Printf("Error callling AddUser: %s", err.Error())
}
}
| [
"\"TWITTER_CONSUMER_KEY\"",
"\"TWITTER_CONSUMER_SECRET\"",
"\"TWITTER_CALLBACK_URL\"",
"\"SESSION_SECRET_KEY\""
]
| []
| [
"TWITTER_CONSUMER_SECRET",
"SESSION_SECRET_KEY",
"TWITTER_CONSUMER_KEY",
"TWITTER_CALLBACK_URL"
]
| [] | ["TWITTER_CONSUMER_SECRET", "SESSION_SECRET_KEY", "TWITTER_CONSUMER_KEY", "TWITTER_CALLBACK_URL"] | go | 4 | 0 | |
plugins/global/launcher/frame_range_setter.py |
import os
import sys
from avalon import api, lib, io
class FrameRangeSetterAction(api.Action):
"""Only project admin can access
"""
name = "framerangesetter"
label = "Frame Range"
icon = "scissors"
color = "#FA9576"
order = 999 # at the end
def is_compatible(self, session):
required = ["AVALON_PROJECTS",
"AVALON_PROJECT",
"AVALON_SILO"]
missing = [x for x in required
if session.get(x) in (None, "placeholder")]
return not missing
def process(self, session, **kwargs):
env = os.environ.copy()
env.update(session)
return lib.launch(executable="python",
environment=env,
args=[__file__])
if __name__ == "__main__":
from avalon import style, Session
from avalon.vendor.Qt import QtWidgets, QtCore
from reveries import utils
class FrameRangeSetter(QtWidgets.QDialog):
MAX = 9999
def __init__(self, parent=None):
super(FrameRangeSetter, self).__init__(parent)
self.setWindowTitle("Set Frame Range")
self.setFocusPolicy(QtCore.Qt.StrongFocus)
self.setAttribute(QtCore.Qt.WA_DeleteOnClose)
self.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint)
asset_menu = QtWidgets.QComboBox()
asset_grp = self.make_group(asset_menu, "Asset Name")
start_box = QtWidgets.QSpinBox()
start_grp = self.make_group(start_box, "Start Frame")
end_box = QtWidgets.QSpinBox()
end_grp = self.make_group(end_box, "End Frame")
handle_box = QtWidgets.QSpinBox()
handle_grp = self.make_group(handle_box, "Handles")
save_btn = QtWidgets.QPushButton("Save")
body = QtWidgets.QVBoxLayout(self)
body.addLayout(asset_grp)
body.addLayout(start_grp)
body.addLayout(end_grp)
body.addLayout(handle_grp)
body.addWidget(save_btn)
asset_menu.currentIndexChanged.connect(self.on_asset_changed)
start_box.valueChanged.connect(self.on_value_changed)
end_box.valueChanged.connect(self.on_value_changed)
handle_box.valueChanged.connect(self.on_value_changed)
save_btn.clicked.connect(self.save_range)
self.assets = asset_menu
self.start = start_box
self.end = end_box
self.handles = handle_box
project = io.find_one({"type": "project"})
self.handles_min = project["data"]["handles"]
self.end.setMaximum(self.MAX)
self.find_assets()
def make_group(self, widget, label):
group = QtWidgets.QHBoxLayout()
label = QtWidgets.QLabel(label)
group.addWidget(label)
group.addWidget(widget)
return group
def min_handles(self, handles):
return handles if handles < self.handles_min else self.handles_min
def on_asset_changed(self):
asset = self.assets.currentText()
start, end, handles, _ = utils.get_timeline_data(asset_name=asset)
self.end.setValue(end)
self.start.setValue(start)
self.handles.setValue(handles)
def on_value_changed(self):
start = self.start.value()
end = self.end.value()
handles = self.handles.value()
self.start.setMaximum(end - 1)
self.start.setMinimum(handles)
self.end.setMaximum(self.MAX)
self.end.setMinimum(start + 1)
self.handles.setMaximum(start)
self.handles.setMinimum(self.min_handles(handles))
def find_assets(self):
for asset in io.find({"silo": Session["AVALON_SILO"]},
{"name": True},
sort=[("name", 1)]):
self.assets.addItem(asset["name"])
def save_range(self):
asset = self.assets.currentText()
update = {
"data.edit_in": self.start.value(),
"data.edit_out": self.end.value(),
"data.handles": self.handles.value(),
}
io.update_many({"type": "asset", "name": asset},
update={"$set": update})
io.install()
app = QtWidgets.QApplication(sys.argv)
dialog = FrameRangeSetter()
dialog.setStyleSheet(style.load_stylesheet())
dialog.show()
sys.exit(app.exec_())
| []
| []
| []
| [] | [] | python | 0 | 0 | |
test/integration_test.go | package test
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"math/rand"
"os"
"path/filepath"
"strings"
"testing"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/dynamodb"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/gruntwork-io/terragrunt/aws_helper"
"github.com/gruntwork-io/terragrunt/cli"
"github.com/gruntwork-io/terragrunt/config"
"github.com/gruntwork-io/terragrunt/configstack"
terragruntDynamoDb "github.com/gruntwork-io/terragrunt/dynamodb"
"github.com/gruntwork-io/terragrunt/errors"
"github.com/gruntwork-io/terragrunt/options"
"github.com/gruntwork-io/terragrunt/remote"
"github.com/gruntwork-io/terragrunt/shell"
"github.com/gruntwork-io/terragrunt/util"
"github.com/stretchr/testify/assert"
)
// hard-code this to match the test fixture for now
const (
TERRAFORM_REMOTE_STATE_S3_REGION = "us-west-2"
TEST_FIXTURE_PATH = "fixture/"
TEST_FIXTURE_INCLUDE_PATH = "fixture-include/"
TEST_FIXTURE_INCLUDE_CHILD_REL_PATH = "qa/my-app"
TEST_FIXTURE_STACK = "fixture-stack/"
TEST_FIXTURE_OUTPUT_ALL = "fixture-output-all"
TEST_FIXTURE_STDOUT = "fixture-download/stdout-test"
TEST_FIXTURE_EXTRA_ARGS_PATH = "fixture-extra-args/"
TEST_FIXTURE_ENV_VARS_BLOCK_PATH = "fixture-env-vars-block/"
TEST_FIXTURE_LOCAL_DOWNLOAD_PATH = "fixture-download/local"
TEST_FIXTURE_REMOTE_DOWNLOAD_PATH = "fixture-download/remote"
TEST_FIXTURE_OVERRIDE_DOWNLOAD_PATH = "fixture-download/override"
TEST_FIXTURE_LOCAL_RELATIVE_DOWNLOAD_PATH = "fixture-download/local-relative"
TEST_FIXTURE_REMOTE_RELATIVE_DOWNLOAD_PATH = "fixture-download/remote-relative"
TEST_FIXTURE_LOCAL_WITH_BACKEND = "fixture-download/local-with-backend"
TEST_FIXTURE_LOCAL_WITH_EXCLUDE_DIR = "fixture-download/local-with-exclude-dir"
TEST_FIXTURE_REMOTE_WITH_BACKEND = "fixture-download/remote-with-backend"
TEST_FIXTURE_REMOTE_MODULE_IN_ROOT = "fixture-download/remote-module-in-root"
TEST_FIXTURE_LOCAL_MISSING_BACKEND = "fixture-download/local-with-missing-backend"
TEST_FIXTURE_LOCAL_WITH_HIDDEN_FOLDER = "fixture-download/local-with-hidden-folder"
TEST_FIXTURE_LOCAL_PREVENT_DESTROY = "fixture-download/local-with-prevent-destroy"
TEST_FIXTURE_LOCAL_PREVENT_DESTROY_DEPENDENCIES = "fixture-download/local-with-prevent-destroy-dependencies"
TEST_FIXTURE_LOCAL_INCLUDE_PREVENT_DESTROY_DEPENDENCIES = "fixture-download/local-include-with-prevent-destroy-dependencies"
TEST_FIXTURE_OLD_CONFIG_INCLUDE_PATH = "fixture-old-terragrunt-config/include"
TEST_FIXTURE_OLD_CONFIG_INCLUDE_CHILD_UPDATED_PATH = "fixture-old-terragrunt-config/include-child-updated"
TEST_FIXTURE_OLD_CONFIG_INCLUDE_PARENT_UPDATED_PATH = "fixture-old-terragrunt-config/include-parent-updated"
TEST_FIXTURE_OLD_CONFIG_STACK_PATH = "fixture-old-terragrunt-config/stack"
TEST_FIXTURE_OLD_CONFIG_DOWNLOAD_PATH = "fixture-old-terragrunt-config/download"
TEST_FIXTURE_HOOKS_BEFORE_ONLY_PATH = "fixture-hooks/before-only"
TEST_FIXTURE_HOOKS_AFTER_ONLY_PATH = "fixture-hooks/after-only"
TEST_FIXTURE_HOOKS_BEFORE_AND_AFTER_PATH = "fixture-hooks/before-and-after"
TEST_FIXTURE_HOOKS_BEFORE_AND_AFTER_MERGE_PATH = "fixture-hooks/before-and-after-merge"
TEST_FIXTURE_HOOKS_SKIP_ON_ERROR_PATH = "fixture-hooks/skip-on-error"
TEST_FIXTURE_HOOKS_ONE_ARG_ACTION_PATH = "fixture-hooks/one-arg-action"
TEST_FIXTURE_HOOKS_EMPTY_STRING_COMMAND_PATH = "fixture-hooks/bad-arg-action/empty-string-command"
TEST_FIXTURE_HOOKS_EMPTY_COMMAND_LIST_PATH = "fixture-hooks/bad-arg-action/empty-command-list"
TEST_FIXTURE_HOOKS_INTERPOLATIONS_PATH = "fixture-hooks/interpolations"
TEST_FIXTURE_HOOKS_INIT_ONCE_NO_SOURCE_NO_BACKEND = "fixture-hooks/init-once/no-source-no-backend"
TEST_FIXTURE_HOOKS_INIT_ONCE_NO_SOURCE_WITH_BACKEND = "fixture-hooks/init-once/no-source-with-backend"
TEST_FIXTURE_HOOKS_INIT_ONCE_WITH_SOURCE_NO_BACKEND = "fixture-hooks/init-once/with-source-no-backend"
TEST_FIXTURE_HOOKS_INIT_ONCE_WITH_SOURCE_WITH_BACKEND = "fixture-hooks/init-once/with-source-with-backend"
TEST_FIXTURE_FAILED_TERRAFORM = "fixture-failure"
TEST_FIXTURE_EXIT_CODE = "fixture-exit-code"
TEST_FIXTURE_AUTO_RETRY_RERUN = "fixture-auto-retry/re-run"
TEST_FIXTURE_AUTO_RETRY_EXHAUST = "fixture-auto-retry/exhaust"
TEST_FIXTURE_AUTO_RETRY_APPLY_ALL_RETRIES = "fixture-auto-retry/apply-all"
TERRAFORM_FOLDER = ".terraform"
TERRAFORM_STATE = "terraform.tfstate"
TERRAFORM_STATE_BACKUP = "terraform.tfstate.backup"
TERRAGRUNT_CACHE = ".terragrunt-cache"
)
func init() {
rand.Seed(time.Now().UnixNano())
}
func TestTerragruntInitHookNoSourceNoBackend(t *testing.T) {
t.Parallel()
cleanupTerraformFolder(t, TEST_FIXTURE_HOOKS_INIT_ONCE_NO_SOURCE_NO_BACKEND)
tmpEnvPath := copyEnvironment(t, "fixture-hooks/init-once")
rootPath := util.JoinPath(tmpEnvPath, TEST_FIXTURE_HOOKS_INIT_ONCE_NO_SOURCE_NO_BACKEND)
var (
stdout bytes.Buffer
stderr bytes.Buffer
)
err := runTerragruntCommand(t, fmt.Sprintf("terragrunt apply --terragrunt-non-interactive --terragrunt-working-dir %s", rootPath), &stdout, &stderr)
output := stderr.String()
if err != nil {
t.Errorf("Did not expect to get error: %s", err.Error())
}
// `init` hook should execute only once (2 occurrences due to the echo and its output)
assert.Equal(t, 2, strings.Count(output, "AFTER_INIT_ONLY_ONCE"), "Hooks on init command executed more than once")
// With no source, `init-from-module` should not execute
assert.NotContains(t, output, "AFTER_INIT_FROM_MODULE_ONLY_ONCE", "Hooks on init-from-module command executed when no source was specified")
}
func TestTerragruntInitHookNoSourceWithBackend(t *testing.T) {
t.Parallel()
s3BucketName := fmt.Sprintf("terragrunt-test-bucket-%s", strings.ToLower(uniqueId()))
cleanupTerraformFolder(t, TEST_FIXTURE_HOOKS_INIT_ONCE_NO_SOURCE_WITH_BACKEND)
tmpEnvPath := copyEnvironment(t, "fixture-hooks/init-once")
rootPath := util.JoinPath(tmpEnvPath, TEST_FIXTURE_HOOKS_INIT_ONCE_NO_SOURCE_WITH_BACKEND)
defer deleteS3Bucket(t, TERRAFORM_REMOTE_STATE_S3_REGION, s3BucketName)
rootTerragruntConfigPath := util.JoinPath(rootPath, config.DefaultTerragruntConfigPath)
copyTerragruntConfigAndFillPlaceholders(t, rootTerragruntConfigPath, rootTerragruntConfigPath, s3BucketName, "not-used", TERRAFORM_REMOTE_STATE_S3_REGION)
var (
stdout bytes.Buffer
stderr bytes.Buffer
)
err := runTerragruntCommand(t, fmt.Sprintf("terragrunt apply --terragrunt-non-interactive --terragrunt-working-dir %s", rootPath), &stdout, &stderr)
output := stderr.String()
if err != nil {
t.Errorf("Did not expect to get error: %s", err.Error())
}
// `init` hook should execute only once (2 occurrences due to the echo and its output)
assert.Equal(t, 2, strings.Count(output, "AFTER_INIT_ONLY_ONCE"), "Hooks on init command executed more than once")
// With no source, `init-from-module` should not execute
assert.NotContains(t, output, "AFTER_INIT_FROM_MODULE_ONLY_ONCE", "Hooks on init-from-module command executed when no source was specified")
}
func TestTerragruntInitHookWithSourceNoBackend(t *testing.T) {
t.Parallel()
cleanupTerraformFolder(t, TEST_FIXTURE_HOOKS_INIT_ONCE_WITH_SOURCE_NO_BACKEND)
tmpEnvPath := copyEnvironment(t, "fixture-hooks/init-once")
rootPath := util.JoinPath(tmpEnvPath, TEST_FIXTURE_HOOKS_INIT_ONCE_WITH_SOURCE_NO_BACKEND)
var (
stdout bytes.Buffer
stderr bytes.Buffer
)
err := runTerragruntCommand(t, fmt.Sprintf("terragrunt apply --terragrunt-non-interactive --terragrunt-working-dir %s", rootPath), &stdout, &stderr)
output := stderr.String()
if err != nil {
t.Errorf("Did not expect to get error: %s", err.Error())
}
// `init` hook should execute only once (2 occurrences due to the echo and its output)
assert.Equal(t, 2, strings.Count(output, "AFTER_INIT_ONLY_ONCE"), "Hooks on init command executed more than once")
// `init-from-module` hook should execute only once (2 occurrences due to the echo and its output)
assert.Equal(t, 2, strings.Count(output, "AFTER_INIT_FROM_MODULE_ONLY_ONCE"), "Hooks on init-from-module command executed more than once")
}
func TestTerragruntInitHookWithSourceWithBackend(t *testing.T) {
t.Parallel()
s3BucketName := fmt.Sprintf("terragrunt-test-bucket-%s", strings.ToLower(uniqueId()))
cleanupTerraformFolder(t, TEST_FIXTURE_HOOKS_INIT_ONCE_WITH_SOURCE_WITH_BACKEND)
tmpEnvPath := copyEnvironment(t, "fixture-hooks/init-once")
rootPath := util.JoinPath(tmpEnvPath, TEST_FIXTURE_HOOKS_INIT_ONCE_WITH_SOURCE_WITH_BACKEND)
defer deleteS3Bucket(t, TERRAFORM_REMOTE_STATE_S3_REGION, s3BucketName)
rootTerragruntConfigPath := util.JoinPath(rootPath, config.DefaultTerragruntConfigPath)
copyTerragruntConfigAndFillPlaceholders(t, rootTerragruntConfigPath, rootTerragruntConfigPath, s3BucketName, "not-used", TERRAFORM_REMOTE_STATE_S3_REGION)
var (
stdout bytes.Buffer
stderr bytes.Buffer
)
err := runTerragruntCommand(t, fmt.Sprintf("terragrunt apply --terragrunt-non-interactive --terragrunt-working-dir %s", rootPath), &stdout, &stderr)
output := stderr.String()
if err != nil {
t.Errorf("Did not expect to get error: %s", err.Error())
}
// `init` hook should execute only once (2 occurrences due to the echo and its output)
assert.Equal(t, 2, strings.Count(output, "AFTER_INIT_ONLY_ONCE"), "Hooks on init command executed more than once")
// `init-from-module` hook should execute only once (2 occurrences due to the echo and its output)
assert.Equal(t, 2, strings.Count(output, "AFTER_INIT_FROM_MODULE_ONLY_ONCE"), "Hooks on init-from-module command executed more than once")
}
func TestTerragruntBeforeHook(t *testing.T) {
t.Parallel()
cleanupTerraformFolder(t, TEST_FIXTURE_HOOKS_BEFORE_ONLY_PATH)
tmpEnvPath := copyEnvironment(t, TEST_FIXTURE_HOOKS_BEFORE_ONLY_PATH)
rootPath := util.JoinPath(tmpEnvPath, TEST_FIXTURE_HOOKS_BEFORE_ONLY_PATH)
runTerragrunt(t, fmt.Sprintf("terragrunt apply --terragrunt-non-interactive --terragrunt-working-dir %s", rootPath))
_, exception := ioutil.ReadFile(rootPath + "/file.out")
assert.NoError(t, exception)
}
func TestTerragruntAfterHook(t *testing.T) {
t.Parallel()
cleanupTerraformFolder(t, TEST_FIXTURE_HOOKS_AFTER_ONLY_PATH)
tmpEnvPath := copyEnvironment(t, TEST_FIXTURE_HOOKS_AFTER_ONLY_PATH)
rootPath := util.JoinPath(tmpEnvPath, TEST_FIXTURE_HOOKS_AFTER_ONLY_PATH)
runTerragrunt(t, fmt.Sprintf("terragrunt apply --terragrunt-non-interactive --terragrunt-working-dir %s", rootPath))
_, exception := ioutil.ReadFile(rootPath + "/file.out")
assert.NoError(t, exception)
}
func TestTerragruntBeforeAndAfterHook(t *testing.T) {
t.Parallel()
cleanupTerraformFolder(t, TEST_FIXTURE_HOOKS_BEFORE_AND_AFTER_PATH)
tmpEnvPath := copyEnvironment(t, TEST_FIXTURE_HOOKS_BEFORE_AND_AFTER_PATH)
rootPath := util.JoinPath(tmpEnvPath, TEST_FIXTURE_HOOKS_BEFORE_AND_AFTER_PATH)
runTerragrunt(t, fmt.Sprintf("terragrunt apply --terragrunt-non-interactive --terragrunt-working-dir %s", rootPath))
_, beforeException := ioutil.ReadFile(rootPath + "/before.out")
_, afterException := ioutil.ReadFile(rootPath + "/after.out")
assert.NoError(t, beforeException)
assert.NoError(t, afterException)
}
func TestTerragruntBeforeAndAfterMergeHook(t *testing.T) {
t.Parallel()
childPath := util.JoinPath(TEST_FIXTURE_HOOKS_BEFORE_AND_AFTER_MERGE_PATH, TEST_FIXTURE_INCLUDE_CHILD_REL_PATH)
cleanupTerraformFolder(t, childPath)
s3BucketName := fmt.Sprintf("terragrunt-test-bucket-%s", strings.ToLower(uniqueId()))
t.Logf("bucketName: %s", s3BucketName)
defer deleteS3Bucket(t, TERRAFORM_REMOTE_STATE_S3_REGION, s3BucketName)
tmpTerragruntConfigPath := createTmpTerragruntConfigWithParentAndChild(t, TEST_FIXTURE_HOOKS_BEFORE_AND_AFTER_MERGE_PATH, TEST_FIXTURE_INCLUDE_CHILD_REL_PATH, s3BucketName, config.DefaultTerragruntConfigPath, config.DefaultTerragruntConfigPath)
runTerragrunt(t, fmt.Sprintf("terragrunt apply --terragrunt-non-interactive --terragrunt-config %s --terragrunt-working-dir %s", tmpTerragruntConfigPath, childPath))
_, beforeException := ioutil.ReadFile(childPath + "/before.out")
_, beforeChildException := ioutil.ReadFile(childPath + "/before-child.out")
_, beforeOverriddenParentException := ioutil.ReadFile(childPath + "/before-parent.out")
_, afterException := ioutil.ReadFile(childPath + "/after.out")
_, afterParentException := ioutil.ReadFile(childPath + "/after-parent.out")
assert.NoError(t, beforeException)
assert.NoError(t, beforeChildException)
assert.NoError(t, afterException)
assert.NoError(t, afterParentException)
// PathError because no file found
assert.Error(t, beforeOverriddenParentException)
}
func TestTerragruntSkipOnError(t *testing.T) {
t.Parallel()
cleanupTerraformFolder(t, TEST_FIXTURE_HOOKS_SKIP_ON_ERROR_PATH)
tmpEnvPath := copyEnvironment(t, TEST_FIXTURE_HOOKS_SKIP_ON_ERROR_PATH)
rootPath := util.JoinPath(tmpEnvPath, TEST_FIXTURE_HOOKS_SKIP_ON_ERROR_PATH)
var (
stdout bytes.Buffer
stderr bytes.Buffer
)
err := runTerragruntCommand(t, fmt.Sprintf("terragrunt apply --terragrunt-non-interactive --terragrunt-working-dir %s", rootPath), &stdout, &stderr)
output := stderr.String()
if err != nil {
assert.Contains(t, output, "BEFORE_SHOULD_DISPLAY")
assert.NotContains(t, output, "BEFORE_NODISPLAY")
assert.Contains(t, output, "AFTER_SHOULD_DISPLAY")
assert.NotContains(t, output, "AFTER_NODISPLAY")
} else {
t.Error("Expected NO terragrunt execution due to previous errors but it did run.")
}
}
func TestTerragruntBeforeOneArgAction(t *testing.T) {
t.Parallel()
cleanupTerraformFolder(t, TEST_FIXTURE_HOOKS_ONE_ARG_ACTION_PATH)
tmpEnvPath := copyEnvironment(t, TEST_FIXTURE_HOOKS_ONE_ARG_ACTION_PATH)
rootPath := util.JoinPath(tmpEnvPath, TEST_FIXTURE_HOOKS_ONE_ARG_ACTION_PATH)
var (
stdout bytes.Buffer
stderr bytes.Buffer
)
err := runTerragruntCommand(t, fmt.Sprintf("terragrunt apply --terragrunt-non-interactive --terragrunt-working-dir %s", rootPath), &stdout, &stderr)
output := stderr.String()
if err != nil {
t.Error("Expected successful execution of terragrunt with 1 before hook execution.")
} else {
assert.Contains(t, output, "Running command: date")
}
}
func TestTerragruntEmptyStringCommandHook(t *testing.T) {
t.Parallel()
cleanupTerraformFolder(t, TEST_FIXTURE_HOOKS_EMPTY_STRING_COMMAND_PATH)
tmpEnvPath := copyEnvironment(t, TEST_FIXTURE_HOOKS_EMPTY_STRING_COMMAND_PATH)
rootPath := util.JoinPath(tmpEnvPath, TEST_FIXTURE_HOOKS_EMPTY_STRING_COMMAND_PATH)
var (
stdout bytes.Buffer
stderr bytes.Buffer
)
err := runTerragruntCommand(t, fmt.Sprintf("terragrunt apply --terragrunt-non-interactive --terragrunt-working-dir %s", rootPath), &stdout, &stderr)
if err != nil {
assert.Contains(t, err.Error(), "Need at least one non-empty argument in 'execute'.")
} else {
t.Error("Expected an Error with message: 'Need at least one argument'")
}
}
func TestTerragruntEmptyCommandListHook(t *testing.T) {
t.Parallel()
cleanupTerraformFolder(t, TEST_FIXTURE_HOOKS_EMPTY_COMMAND_LIST_PATH)
tmpEnvPath := copyEnvironment(t, TEST_FIXTURE_HOOKS_EMPTY_COMMAND_LIST_PATH)
rootPath := util.JoinPath(tmpEnvPath, TEST_FIXTURE_HOOKS_EMPTY_COMMAND_LIST_PATH)
var (
stdout bytes.Buffer
stderr bytes.Buffer
)
err := runTerragruntCommand(t, fmt.Sprintf("terragrunt apply --terragrunt-non-interactive --terragrunt-working-dir %s", rootPath), &stdout, &stderr)
if err != nil {
assert.Contains(t, err.Error(), "Need at least one non-empty argument in 'execute'.")
} else {
t.Error("Expected an Error with message: 'Need at least one argument'")
}
}
func TestTerragruntHookInterpolation(t *testing.T) {
t.Parallel()
cleanupTerraformFolder(t, TEST_FIXTURE_HOOKS_INTERPOLATIONS_PATH)
tmpEnvPath := copyEnvironment(t, TEST_FIXTURE_HOOKS_INTERPOLATIONS_PATH)
rootPath := util.JoinPath(tmpEnvPath, TEST_FIXTURE_HOOKS_INTERPOLATIONS_PATH)
var (
stdout bytes.Buffer
stderr bytes.Buffer
)
err := runTerragruntCommand(t, fmt.Sprintf("terragrunt apply --terragrunt-non-interactive --terragrunt-working-dir %s", rootPath), &stdout, &stderr)
erroutput := stderr.String()
homePath := os.Getenv("HOME")
if homePath == "" {
homePath = "HelloWorld"
}
if err != nil {
t.Errorf("Did not expect to get error: %s", err.Error())
}
assert.Contains(t, erroutput, homePath)
}
func TestTerragruntWorksWithLocalTerraformVersion(t *testing.T) {
t.Parallel()
cleanupTerraformFolder(t, TEST_FIXTURE_PATH)
s3BucketName := fmt.Sprintf("terragrunt-test-bucket-%s", strings.ToLower(uniqueId()))
lockTableName := fmt.Sprintf("terragrunt-test-locks-%s", strings.ToLower(uniqueId()))
defer deleteS3Bucket(t, TERRAFORM_REMOTE_STATE_S3_REGION, s3BucketName)
defer cleanupTableForTest(t, lockTableName, TERRAFORM_REMOTE_STATE_S3_REGION)
tmpTerragruntConfigPath := createTmpTerragruntConfig(t, TEST_FIXTURE_PATH, s3BucketName, lockTableName, config.DefaultTerragruntConfigPath)
runTerragrunt(t, fmt.Sprintf("terragrunt apply --terragrunt-non-interactive --terragrunt-config %s --terragrunt-working-dir %s", tmpTerragruntConfigPath, TEST_FIXTURE_PATH))
var expectedS3Tags = map[string]string{
"owner": "terragrunt integration test",
"name": "Terraform state storage"}
validateS3BucketExistsAndIsTagged(t, TERRAFORM_REMOTE_STATE_S3_REGION, s3BucketName, expectedS3Tags)
var expectedDynamoDBTableTags = map[string]string{
"owner": "terragrunt integration test",
"name": "Terraform lock table"}
validateDynamoDBTableExistsAndIsTagged(t, TERRAFORM_REMOTE_STATE_S3_REGION, lockTableName, expectedDynamoDBTableTags)
}
func TestTerragruntWorksWithIncludes(t *testing.T) {
t.Parallel()
childPath := util.JoinPath(TEST_FIXTURE_INCLUDE_PATH, TEST_FIXTURE_INCLUDE_CHILD_REL_PATH)
cleanupTerraformFolder(t, childPath)
s3BucketName := fmt.Sprintf("terragrunt-test-bucket-%s", strings.ToLower(uniqueId()))
defer deleteS3Bucket(t, TERRAFORM_REMOTE_STATE_S3_REGION, s3BucketName)
tmpTerragruntConfigPath := createTmpTerragruntConfigWithParentAndChild(t, TEST_FIXTURE_INCLUDE_PATH, TEST_FIXTURE_INCLUDE_CHILD_REL_PATH, s3BucketName, config.DefaultTerragruntConfigPath, config.DefaultTerragruntConfigPath)
runTerragrunt(t, fmt.Sprintf("terragrunt apply --terragrunt-non-interactive --terragrunt-config %s --terragrunt-working-dir %s", tmpTerragruntConfigPath, childPath))
}
func TestTerragruntWorksWithIncludesAndOldConfig(t *testing.T) {
t.Parallel()
childPath := util.JoinPath(TEST_FIXTURE_OLD_CONFIG_INCLUDE_PATH, "child")
cleanupTerraformFolder(t, childPath)
s3BucketName := fmt.Sprintf("terragrunt-test-bucket-%s", strings.ToLower(uniqueId()))
defer deleteS3Bucket(t, TERRAFORM_REMOTE_STATE_S3_REGION, s3BucketName)
tmpTerragruntConfigPath := createTmpTerragruntConfigWithParentAndChild(t, TEST_FIXTURE_OLD_CONFIG_INCLUDE_PATH, "child", s3BucketName, config.OldTerragruntConfigPath, config.OldTerragruntConfigPath)
runTerragrunt(t, fmt.Sprintf("terragrunt apply --terragrunt-non-interactive --terragrunt-config %s --terragrunt-working-dir %s", tmpTerragruntConfigPath, childPath))
}
func TestTerragruntWorksWithIncludesChildUpdatedAndOldConfig(t *testing.T) {
t.Parallel()
childPath := util.JoinPath(TEST_FIXTURE_OLD_CONFIG_INCLUDE_CHILD_UPDATED_PATH, "child")
cleanupTerraformFolder(t, childPath)
s3BucketName := fmt.Sprintf("terragrunt-test-bucket-%s", strings.ToLower(uniqueId()))
defer deleteS3Bucket(t, TERRAFORM_REMOTE_STATE_S3_REGION, s3BucketName)
tmpTerragruntConfigPath := createTmpTerragruntConfigWithParentAndChild(t, TEST_FIXTURE_OLD_CONFIG_INCLUDE_CHILD_UPDATED_PATH, "child", s3BucketName, config.OldTerragruntConfigPath, config.DefaultTerragruntConfigPath)
runTerragrunt(t, fmt.Sprintf("terragrunt apply --terragrunt-non-interactive --terragrunt-config %s --terragrunt-working-dir %s", tmpTerragruntConfigPath, childPath))
}
func TestTerragruntWorksWithIncludesParentUpdatedAndOldConfig(t *testing.T) {
t.Parallel()
childPath := util.JoinPath(TEST_FIXTURE_OLD_CONFIG_INCLUDE_PARENT_UPDATED_PATH, "child")
cleanupTerraformFolder(t, childPath)
s3BucketName := fmt.Sprintf("terragrunt-test-bucket-%s", strings.ToLower(uniqueId()))
defer deleteS3Bucket(t, TERRAFORM_REMOTE_STATE_S3_REGION, s3BucketName)
tmpTerragruntConfigPath := createTmpTerragruntConfigWithParentAndChild(t, TEST_FIXTURE_OLD_CONFIG_INCLUDE_PARENT_UPDATED_PATH, "child", s3BucketName, config.DefaultTerragruntConfigPath, config.OldTerragruntConfigPath)
runTerragrunt(t, fmt.Sprintf("terragrunt apply --terragrunt-non-interactive --terragrunt-config %s --terragrunt-working-dir %s", tmpTerragruntConfigPath, childPath))
}
func TestTerragruntReportsTerraformErrorsWithPlanAll(t *testing.T) {
cleanupTerraformFolder(t, TEST_FIXTURE_FAILED_TERRAFORM)
tmpEnvPath := copyEnvironment(t, TEST_FIXTURE_FAILED_TERRAFORM)
rootTerragruntConfigPath := util.JoinPath(tmpEnvPath, "fixture-failure")
cmd := fmt.Sprintf("terragrunt plan-all --terragrunt-non-interactive --terragrunt-working-dir %s", rootTerragruntConfigPath)
var (
stdout bytes.Buffer
stderr bytes.Buffer
)
// Call runTerragruntCommand directly because this command contains failures (which causes runTerragruntRedirectOutput to abort) but we don't care.
if err := runTerragruntCommand(t, cmd, &stdout, &stderr); err == nil {
t.Fatalf("Failed to properly fail command: %v. The terraform should be bad", cmd)
}
output := stdout.String()
errOutput := stderr.String()
fmt.Printf("STDERR is %s.\n STDOUT is %s", errOutput, output)
assert.True(t, strings.Contains(errOutput, "missingvar1") || strings.Contains(output, "missingvar1"))
assert.True(t, strings.Contains(errOutput, "missingvar2") || strings.Contains(output, "missingvar2"))
}
func TestTerragruntOutputAllCommand(t *testing.T) {
t.Parallel()
s3BucketName := fmt.Sprintf("terragrunt-test-bucket-%s", strings.ToLower(uniqueId()))
defer deleteS3Bucket(t, TERRAFORM_REMOTE_STATE_S3_REGION, s3BucketName)
tmpEnvPath := copyEnvironment(t, TEST_FIXTURE_OUTPUT_ALL)
rootTerragruntConfigPath := util.JoinPath(tmpEnvPath, TEST_FIXTURE_OUTPUT_ALL, config.DefaultTerragruntConfigPath)
copyTerragruntConfigAndFillPlaceholders(t, rootTerragruntConfigPath, rootTerragruntConfigPath, s3BucketName, "not-used", "not-used")
environmentPath := fmt.Sprintf("%s/%s/env1", tmpEnvPath, TEST_FIXTURE_OUTPUT_ALL)
runTerragrunt(t, fmt.Sprintf("terragrunt apply-all --terragrunt-non-interactive --terragrunt-working-dir %s -var terraform_remote_state_s3_bucket=\"%s\"", environmentPath, s3BucketName))
var (
stdout bytes.Buffer
stderr bytes.Buffer
)
runTerragruntRedirectOutput(t, fmt.Sprintf("terragrunt output-all --terragrunt-non-interactive --terragrunt-working-dir %s", environmentPath), &stdout, &stderr)
output := stdout.String()
assert.True(t, strings.Contains(output, "app1 output"))
assert.True(t, strings.Contains(output, "app2 output"))
assert.True(t, strings.Contains(output, "app3 output"))
assert.True(t, (strings.Index(output, "app3 output") < strings.Index(output, "app1 output")) && (strings.Index(output, "app1 output") < strings.Index(output, "app2 output")))
}
func TestTerragruntValidateAllCommand(t *testing.T) {
t.Parallel()
s3BucketName := fmt.Sprintf("terragrunt-test-bucket-%s", strings.ToLower(uniqueId()))
defer deleteS3Bucket(t, TERRAFORM_REMOTE_STATE_S3_REGION, s3BucketName)
tmpEnvPath := copyEnvironment(t, TEST_FIXTURE_OUTPUT_ALL)
rootTerragruntConfigPath := util.JoinPath(tmpEnvPath, TEST_FIXTURE_OUTPUT_ALL, config.DefaultTerragruntConfigPath)
copyTerragruntConfigAndFillPlaceholders(t, rootTerragruntConfigPath, rootTerragruntConfigPath, s3BucketName, "not-used", "not-used")
environmentPath := fmt.Sprintf("%s/%s/env1", tmpEnvPath, TEST_FIXTURE_OUTPUT_ALL)
runTerragrunt(t, fmt.Sprintf("terragrunt validate-all --terragrunt-non-interactive --terragrunt-working-dir %s -var terraform_remote_state_s3_bucket=\"%s\"", environmentPath, s3BucketName))
}
// Check that Terragrunt does not pollute stdout with anything
func TestTerragruntStdOut(t *testing.T) {
t.Parallel()
var (
stdout bytes.Buffer
stderr bytes.Buffer
)
runTerragrunt(t, fmt.Sprintf("terragrunt apply --terragrunt-non-interactive --terragrunt-working-dir %s", TEST_FIXTURE_STDOUT))
runTerragruntRedirectOutput(t, fmt.Sprintf("terragrunt output foo --terragrunt-non-interactive --terragrunt-working-dir %s", TEST_FIXTURE_STDOUT), &stdout, &stderr)
output := stdout.String()
assert.Equal(t, "foo\n", output)
}
func TestTerragruntOutputAllCommandSpecificVariableIgnoreDependencyErrors(t *testing.T) {
t.Parallel()
s3BucketName := fmt.Sprintf("terragrunt-test-bucket-%s", strings.ToLower(uniqueId()))
defer deleteS3Bucket(t, TERRAFORM_REMOTE_STATE_S3_REGION, s3BucketName)
tmpEnvPath := copyEnvironment(t, TEST_FIXTURE_OUTPUT_ALL)
rootTerragruntConfigPath := util.JoinPath(tmpEnvPath, TEST_FIXTURE_OUTPUT_ALL, config.DefaultTerragruntConfigPath)
copyTerragruntConfigAndFillPlaceholders(t, rootTerragruntConfigPath, rootTerragruntConfigPath, s3BucketName, "not-used", "not-used")
environmentPath := fmt.Sprintf("%s/%s/env1", tmpEnvPath, TEST_FIXTURE_OUTPUT_ALL)
runTerragrunt(t, fmt.Sprintf("terragrunt apply-all --terragrunt-non-interactive --terragrunt-working-dir %s -var terraform_remote_state_s3_bucket=\"%s\"", environmentPath, s3BucketName))
var (
stdout bytes.Buffer
stderr bytes.Buffer
)
// Call runTerragruntCommand directly because this command contains failures (which causes runTerragruntRedirectOutput to abort) but we don't care.
runTerragruntCommand(t, fmt.Sprintf("terragrunt output-all app2_text --terragrunt-ignore-dependency-errors --terragrunt-non-interactive --terragrunt-working-dir %s", environmentPath), &stdout, &stderr)
output := stdout.String()
// Without --terragrunt-ignore-dependency-errors, app2 never runs because its dependencies have "errors" since they don't have the output "app2_text".
assert.True(t, strings.Contains(output, "app2 output"))
}
func TestTerragruntStackCommands(t *testing.T) {
t.Parallel()
s3BucketName := fmt.Sprintf("terragrunt-test-bucket-%s", strings.ToLower(uniqueId()))
lockTableName := fmt.Sprintf("terragrunt-test-locks-%s", strings.ToLower(uniqueId()))
defer deleteS3Bucket(t, TERRAFORM_REMOTE_STATE_S3_REGION, s3BucketName)
defer cleanupTableForTest(t, lockTableName, TERRAFORM_REMOTE_STATE_S3_REGION)
tmpEnvPath := copyEnvironment(t, TEST_FIXTURE_STACK)
rootTerragruntConfigPath := util.JoinPath(tmpEnvPath, "fixture-stack", config.DefaultTerragruntConfigPath)
copyTerragruntConfigAndFillPlaceholders(t, rootTerragruntConfigPath, rootTerragruntConfigPath, s3BucketName, lockTableName, "not-used")
mgmtEnvironmentPath := fmt.Sprintf("%s/fixture-stack/mgmt", tmpEnvPath)
stageEnvironmentPath := fmt.Sprintf("%s/fixture-stack/stage", tmpEnvPath)
runTerragrunt(t, fmt.Sprintf("terragrunt apply-all --terragrunt-non-interactive --terragrunt-working-dir %s -var terraform_remote_state_s3_bucket=\"%s\"", mgmtEnvironmentPath, s3BucketName))
runTerragrunt(t, fmt.Sprintf("terragrunt apply-all --terragrunt-non-interactive --terragrunt-working-dir %s -var terraform_remote_state_s3_bucket=\"%s\"", stageEnvironmentPath, s3BucketName))
runTerragrunt(t, fmt.Sprintf("terragrunt output-all --terragrunt-non-interactive --terragrunt-working-dir %s", mgmtEnvironmentPath))
runTerragrunt(t, fmt.Sprintf("terragrunt output-all --terragrunt-non-interactive --terragrunt-working-dir %s", stageEnvironmentPath))
runTerragrunt(t, fmt.Sprintf("terragrunt destroy-all --terragrunt-non-interactive --terragrunt-working-dir %s -var terraform_remote_state_s3_bucket=\"%s\"", stageEnvironmentPath, s3BucketName))
runTerragrunt(t, fmt.Sprintf("terragrunt destroy-all --terragrunt-non-interactive --terragrunt-working-dir %s -var terraform_remote_state_s3_bucket=\"%s\"", mgmtEnvironmentPath, s3BucketName))
}
func TestTerragruntStackCommandsWithOldConfig(t *testing.T) {
t.Parallel()
s3BucketName := fmt.Sprintf("terragrunt-test-bucket-%s", strings.ToLower(uniqueId()))
defer deleteS3Bucket(t, TERRAFORM_REMOTE_STATE_S3_REGION, s3BucketName)
tmpEnvPath := copyEnvironment(t, TEST_FIXTURE_OLD_CONFIG_STACK_PATH)
rootPath := util.JoinPath(tmpEnvPath, "fixture-old-terragrunt-config/stack")
stagePath := util.JoinPath(rootPath, "stage")
rootTerragruntConfigPath := util.JoinPath(rootPath, config.OldTerragruntConfigPath)
copyTerragruntConfigAndFillPlaceholders(t, rootTerragruntConfigPath, rootTerragruntConfigPath, s3BucketName, "not-used", "not-used")
runTerragrunt(t, fmt.Sprintf("terragrunt apply-all --terragrunt-non-interactive --terragrunt-working-dir %s -var terraform_remote_state_s3_bucket=\"%s\"", stagePath, s3BucketName))
runTerragrunt(t, fmt.Sprintf("terragrunt output-all --terragrunt-non-interactive --terragrunt-working-dir %s", stagePath))
runTerragrunt(t, fmt.Sprintf("terragrunt destroy-all --terragrunt-non-interactive --terragrunt-working-dir %s -var terraform_remote_state_s3_bucket=\"%s\"", stagePath, s3BucketName))
}
func TestLocalDownload(t *testing.T) {
t.Parallel()
cleanupTerraformFolder(t, TEST_FIXTURE_LOCAL_DOWNLOAD_PATH)
runTerragrunt(t, fmt.Sprintf("terragrunt apply --terragrunt-non-interactive --terragrunt-working-dir %s", TEST_FIXTURE_LOCAL_DOWNLOAD_PATH))
// Run a second time to make sure the temporary folder can be reused without errors
runTerragrunt(t, fmt.Sprintf("terragrunt apply --terragrunt-non-interactive --terragrunt-working-dir %s", TEST_FIXTURE_LOCAL_DOWNLOAD_PATH))
}
func TestLocalDownloadWithHiddenFolder(t *testing.T) {
t.Parallel()
cleanupTerraformFolder(t, TEST_FIXTURE_LOCAL_WITH_HIDDEN_FOLDER)
runTerragrunt(t, fmt.Sprintf("terragrunt apply --terragrunt-non-interactive --terragrunt-working-dir %s", TEST_FIXTURE_LOCAL_WITH_HIDDEN_FOLDER))
// Run a second time to make sure the temporary folder can be reused without errors
runTerragrunt(t, fmt.Sprintf("terragrunt apply --terragrunt-non-interactive --terragrunt-working-dir %s", TEST_FIXTURE_LOCAL_WITH_HIDDEN_FOLDER))
}
func TestLocalDownloadWithOldConfig(t *testing.T) {
t.Parallel()
cleanupTerraformFolder(t, TEST_FIXTURE_OLD_CONFIG_DOWNLOAD_PATH)
runTerragrunt(t, fmt.Sprintf("terragrunt apply --terragrunt-non-interactive --terragrunt-working-dir %s", TEST_FIXTURE_OLD_CONFIG_DOWNLOAD_PATH))
// Run a second time to make sure the temporary folder can be reused without errors
runTerragrunt(t, fmt.Sprintf("terragrunt apply --terragrunt-non-interactive --terragrunt-working-dir %s", TEST_FIXTURE_OLD_CONFIG_DOWNLOAD_PATH))
}
func TestLocalDownloadWithRelativePath(t *testing.T) {
t.Parallel()
cleanupTerraformFolder(t, TEST_FIXTURE_LOCAL_RELATIVE_DOWNLOAD_PATH)
runTerragrunt(t, fmt.Sprintf("terragrunt apply --terragrunt-non-interactive --terragrunt-working-dir %s", TEST_FIXTURE_LOCAL_RELATIVE_DOWNLOAD_PATH))
// Run a second time to make sure the temporary folder can be reused without errors
runTerragrunt(t, fmt.Sprintf("terragrunt apply --terragrunt-non-interactive --terragrunt-working-dir %s", TEST_FIXTURE_LOCAL_RELATIVE_DOWNLOAD_PATH))
}
func TestRemoteDownload(t *testing.T) {
t.Parallel()
cleanupTerraformFolder(t, TEST_FIXTURE_REMOTE_DOWNLOAD_PATH)
runTerragrunt(t, fmt.Sprintf("terragrunt apply --terragrunt-non-interactive --terragrunt-working-dir %s", TEST_FIXTURE_REMOTE_DOWNLOAD_PATH))
// Run a second time to make sure the temporary folder can be reused without errors
runTerragrunt(t, fmt.Sprintf("terragrunt apply --terragrunt-non-interactive --terragrunt-working-dir %s", TEST_FIXTURE_REMOTE_DOWNLOAD_PATH))
}
func TestRemoteDownloadWithRelativePath(t *testing.T) {
t.Parallel()
cleanupTerraformFolder(t, TEST_FIXTURE_REMOTE_RELATIVE_DOWNLOAD_PATH)
runTerragrunt(t, fmt.Sprintf("terragrunt apply --terragrunt-non-interactive --terragrunt-working-dir %s", TEST_FIXTURE_REMOTE_RELATIVE_DOWNLOAD_PATH))
// Run a second time to make sure the temporary folder can be reused without errors
runTerragrunt(t, fmt.Sprintf("terragrunt apply --terragrunt-non-interactive --terragrunt-working-dir %s", TEST_FIXTURE_REMOTE_RELATIVE_DOWNLOAD_PATH))
}
func TestRemoteDownloadOverride(t *testing.T) {
t.Parallel()
cleanupTerraformFolder(t, TEST_FIXTURE_OVERRIDE_DOWNLOAD_PATH)
runTerragrunt(t, fmt.Sprintf("terragrunt apply --terragrunt-non-interactive --terragrunt-working-dir %s --terragrunt-source %s", TEST_FIXTURE_OVERRIDE_DOWNLOAD_PATH, "../hello-world"))
// Run a second time to make sure the temporary folder can be reused without errors
runTerragrunt(t, fmt.Sprintf("terragrunt apply --terragrunt-non-interactive --terragrunt-working-dir %s --terragrunt-source %s", TEST_FIXTURE_OVERRIDE_DOWNLOAD_PATH, "../hello-world"))
}
func TestLocalWithBackend(t *testing.T) {
t.Parallel()
s3BucketName := fmt.Sprintf("terragrunt-test-bucket-%s", strings.ToLower(uniqueId()))
lockTableName := fmt.Sprintf("terragrunt-lock-table-%s", strings.ToLower(uniqueId()))
defer deleteS3Bucket(t, TERRAFORM_REMOTE_STATE_S3_REGION, s3BucketName)
defer cleanupTableForTest(t, lockTableName, TERRAFORM_REMOTE_STATE_S3_REGION)
tmpEnvPath := copyEnvironment(t, "fixture-download")
rootPath := util.JoinPath(tmpEnvPath, TEST_FIXTURE_LOCAL_WITH_BACKEND)
rootTerragruntConfigPath := util.JoinPath(rootPath, config.DefaultTerragruntConfigPath)
copyTerragruntConfigAndFillPlaceholders(t, rootTerragruntConfigPath, rootTerragruntConfigPath, s3BucketName, lockTableName, "not-used")
runTerragrunt(t, fmt.Sprintf("terragrunt apply --terragrunt-non-interactive --terragrunt-working-dir %s", rootPath))
// Run a second time to make sure the temporary folder can be reused without errors
runTerragrunt(t, fmt.Sprintf("terragrunt apply --terragrunt-non-interactive --terragrunt-working-dir %s", rootPath))
}
func TestLocalWithMissingBackend(t *testing.T) {
t.Parallel()
s3BucketName := fmt.Sprintf("terragrunt-test-bucket-%s", strings.ToLower(uniqueId()))
lockTableName := fmt.Sprintf("terragrunt-lock-table-%s", strings.ToLower(uniqueId()))
defer deleteS3Bucket(t, TERRAFORM_REMOTE_STATE_S3_REGION, s3BucketName)
defer cleanupTableForTest(t, lockTableName, TERRAFORM_REMOTE_STATE_S3_REGION)
tmpEnvPath := copyEnvironment(t, "fixture-download")
rootPath := util.JoinPath(tmpEnvPath, TEST_FIXTURE_LOCAL_MISSING_BACKEND)
rootTerragruntConfigPath := util.JoinPath(rootPath, config.DefaultTerragruntConfigPath)
copyTerragruntConfigAndFillPlaceholders(t, rootTerragruntConfigPath, rootTerragruntConfigPath, s3BucketName, lockTableName, "not-used")
err := runTerragruntCommand(t, fmt.Sprintf("terragrunt apply --terragrunt-non-interactive --terragrunt-working-dir %s", rootPath), os.Stdout, os.Stderr)
if assert.Error(t, err) {
underlying := errors.Unwrap(err)
assert.IsType(t, cli.BackendNotDefined{}, underlying)
}
}
func TestRemoteWithBackend(t *testing.T) {
t.Parallel()
s3BucketName := fmt.Sprintf("terragrunt-test-bucket-%s", strings.ToLower(uniqueId()))
lockTableName := fmt.Sprintf("terragrunt-lock-table-%s", strings.ToLower(uniqueId()))
defer deleteS3Bucket(t, TERRAFORM_REMOTE_STATE_S3_REGION, s3BucketName)
defer cleanupTableForTest(t, lockTableName, TERRAFORM_REMOTE_STATE_S3_REGION)
tmpEnvPath := copyEnvironment(t, TEST_FIXTURE_REMOTE_WITH_BACKEND)
rootPath := util.JoinPath(tmpEnvPath, TEST_FIXTURE_REMOTE_WITH_BACKEND)
rootTerragruntConfigPath := util.JoinPath(rootPath, config.DefaultTerragruntConfigPath)
copyTerragruntConfigAndFillPlaceholders(t, rootTerragruntConfigPath, rootTerragruntConfigPath, s3BucketName, lockTableName, "not-used")
runTerragrunt(t, fmt.Sprintf("terragrunt apply --terragrunt-non-interactive --terragrunt-working-dir %s", rootPath))
// Run a second time to make sure the temporary folder can be reused without errors
runTerragrunt(t, fmt.Sprintf("terragrunt apply --terragrunt-non-interactive --terragrunt-working-dir %s", rootPath))
}
func TestRemoteWithModuleInRoot(t *testing.T) {
t.Parallel()
tmpEnvPath := copyEnvironment(t, TEST_FIXTURE_REMOTE_MODULE_IN_ROOT)
rootPath := util.JoinPath(tmpEnvPath, TEST_FIXTURE_REMOTE_MODULE_IN_ROOT)
runTerragrunt(t, fmt.Sprintf("terragrunt apply --terragrunt-non-interactive --terragrunt-working-dir %s", rootPath))
// Run a second time to make sure the temporary folder can be reused without errors
runTerragrunt(t, fmt.Sprintf("terragrunt apply --terragrunt-non-interactive --terragrunt-working-dir %s", rootPath))
}
// Run terragrunt plan -detailed-exitcode on a folder with some uncreated resources and make sure that you get an exit
// code of "2", which means there are changes to apply.
func TestExitCode(t *testing.T) {
t.Parallel()
rootPath := copyEnvironment(t, TEST_FIXTURE_EXIT_CODE)
modulePath := util.JoinPath(rootPath, TEST_FIXTURE_EXIT_CODE)
err := runTerragruntCommand(t, fmt.Sprintf("terragrunt plan -detailed-exitcode --terragrunt-non-interactive --terragrunt-working-dir %s", modulePath), os.Stdout, os.Stderr)
exitCode, exitCodeErr := shell.GetExitCode(err)
assert.Nil(t, exitCodeErr)
assert.Equal(t, 2, exitCode)
}
func TestExtraArguments(t *testing.T) {
// Do not use t.Parallel() on this test, it will infers with the other TestExtraArguments.* tests
out := new(bytes.Buffer)
runTerragruntRedirectOutput(t, fmt.Sprintf("terragrunt apply --terragrunt-non-interactive --terragrunt-working-dir %s", TEST_FIXTURE_EXTRA_ARGS_PATH), out, os.Stderr)
t.Log(out.String())
assert.Contains(t, out.String(), "Hello, World from dev!")
}
func TestExtraArgumentsWithEnv(t *testing.T) {
// Do not use t.Parallel() on this test, it will infers with the other TestExtraArguments.* tests
out := new(bytes.Buffer)
os.Setenv("TF_VAR_env", "prod")
defer os.Unsetenv("TF_VAR_env")
runTerragruntRedirectOutput(t, fmt.Sprintf("terragrunt apply --terragrunt-non-interactive --terragrunt-working-dir %s", TEST_FIXTURE_EXTRA_ARGS_PATH), out, os.Stderr)
t.Log(out.String())
assert.Contains(t, out.String(), "Hello, World!")
}
func TestExtraArgumentsWithEnvVarBlock(t *testing.T) {
out := new(bytes.Buffer)
runTerragruntRedirectOutput(t, fmt.Sprintf("terragrunt apply --terragrunt-non-interactive --terragrunt-working-dir %s", TEST_FIXTURE_ENV_VARS_BLOCK_PATH), out, os.Stderr)
t.Log(out.String())
assert.Contains(t, out.String(), "I'm set in extra_arguments env_vars")
}
func TestExtraArgumentsWithRegion(t *testing.T) {
// Do not use t.Parallel() on this test, it will infers with the other TestExtraArguments.* tests
out := new(bytes.Buffer)
os.Setenv("TF_VAR_region", "us-west-2")
defer os.Unsetenv("TF_VAR_region")
runTerragruntRedirectOutput(t, fmt.Sprintf("terragrunt apply --terragrunt-non-interactive --terragrunt-working-dir %s", TEST_FIXTURE_EXTRA_ARGS_PATH), out, os.Stderr)
t.Log(out.String())
assert.Contains(t, out.String(), "Hello, World from Oregon!")
}
func TestPriorityOrderOfArgument(t *testing.T) {
// Do not use t.Parallel() on this test, it will infers with the other TestExtraArguments.* tests
out := new(bytes.Buffer)
injectedValue := "Injected-directly-by-argument"
runTerragruntRedirectOutput(t, fmt.Sprintf("terragrunt apply -var extra_var=%s --terragrunt-non-interactive --terragrunt-working-dir %s", injectedValue, TEST_FIXTURE_EXTRA_ARGS_PATH), out, os.Stderr)
t.Log(out.String())
// And the result value for test should be the injected variable since the injected arguments are injected before the suplied parameters,
// so our override of extra_var should be the last argument.
assert.Contains(t, out.String(), fmt.Sprintf("test = %s", injectedValue))
}
func TestAutoRetryBasicRerun(t *testing.T) {
t.Parallel()
out := new(bytes.Buffer)
rootPath := copyEnvironment(t, TEST_FIXTURE_AUTO_RETRY_RERUN)
modulePath := util.JoinPath(rootPath, TEST_FIXTURE_AUTO_RETRY_RERUN)
err := runTerragruntCommand(t, fmt.Sprintf("terragrunt apply --auto-approve --terragrunt-non-interactive --terragrunt-working-dir %s", modulePath), out, os.Stderr)
assert.Nil(t, err)
assert.Contains(t, out.String(), "Apply complete!")
}
func TestAutoRetrySkip(t *testing.T) {
t.Parallel()
out := new(bytes.Buffer)
rootPath := copyEnvironment(t, TEST_FIXTURE_AUTO_RETRY_RERUN)
modulePath := util.JoinPath(rootPath, TEST_FIXTURE_AUTO_RETRY_RERUN)
err := runTerragruntCommand(t, fmt.Sprintf("terragrunt apply --auto-approve --terragrunt-no-auto-retry --terragrunt-non-interactive --terragrunt-working-dir %s", modulePath), out, os.Stderr)
assert.NotNil(t, err)
assert.NotContains(t, out.String(), "Apply complete!")
}
func TestAutoRetryExhaustRetries(t *testing.T) {
t.Parallel()
out := new(bytes.Buffer)
rootPath := copyEnvironment(t, TEST_FIXTURE_AUTO_RETRY_EXHAUST)
modulePath := util.JoinPath(rootPath, TEST_FIXTURE_AUTO_RETRY_EXHAUST)
err := runTerragruntCommand(t, fmt.Sprintf("terragrunt apply --auto-approve --terragrunt-non-interactive --terragrunt-working-dir %s", modulePath), out, os.Stderr)
assert.NotNil(t, err)
assert.Contains(t, out.String(), "Failed to load backend")
assert.NotContains(t, out.String(), "Apply complete!")
}
func TestAutoRetryFlagWithRecoverableError(t *testing.T) {
t.Parallel()
out := new(bytes.Buffer)
rootPath := copyEnvironment(t, TEST_FIXTURE_AUTO_RETRY_RERUN)
modulePath := util.JoinPath(rootPath, TEST_FIXTURE_AUTO_RETRY_RERUN)
err := runTerragruntCommand(t, fmt.Sprintf("terragrunt apply --auto-approve --terragrunt-no-auto-retry --terragrunt-non-interactive --terragrunt-working-dir %s", modulePath), out, os.Stderr)
assert.NotNil(t, err)
assert.NotContains(t, out.String(), "Apply complete!")
}
func TestAutoRetryEnvVarWithRecoverableError(t *testing.T) {
os.Setenv("TERRAGRUNT_AUTO_RETRY", "false")
defer os.Unsetenv("TERRAGRUNT_AUTO_RETRY")
out := new(bytes.Buffer)
rootPath := copyEnvironment(t, TEST_FIXTURE_AUTO_RETRY_RERUN)
modulePath := util.JoinPath(rootPath, TEST_FIXTURE_AUTO_RETRY_RERUN)
err := runTerragruntCommand(t, fmt.Sprintf("terragrunt apply --auto-approve --terragrunt-non-interactive --terragrunt-working-dir %s", modulePath), out, os.Stderr)
assert.NotNil(t, err)
assert.NotContains(t, out.String(), "Apply complete!")
}
func TestAutoRetryApplyAllDependentModuleRetries(t *testing.T) {
t.Parallel()
out := new(bytes.Buffer)
rootPath := copyEnvironment(t, TEST_FIXTURE_AUTO_RETRY_APPLY_ALL_RETRIES)
modulePath := util.JoinPath(rootPath, TEST_FIXTURE_AUTO_RETRY_APPLY_ALL_RETRIES)
err := runTerragruntCommand(t, fmt.Sprintf("terragrunt apply-all --auto-approve --terragrunt-non-interactive --terragrunt-working-dir %s", modulePath), out, os.Stderr)
assert.Nil(t, err)
s := out.String()
assert.Contains(t, s, "app1 output")
assert.Contains(t, s, "app2 output")
assert.Contains(t, s, "app3 output")
assert.Contains(t, s, "Apply complete!")
}
// This tests terragrunt properly passes through terraform commands and any number of specified args
func TestTerraformCommandCliArgs(t *testing.T) {
t.Parallel()
testCases := []struct {
command []string
expected string
}{
{
[]string{"version"},
"terraform version",
},
{
[]string{"version", "foo"},
"terraform version foo",
},
{
[]string{"version", "foo", "bar", "baz"},
"terraform version foo bar baz",
},
{
[]string{"version", "foo", "bar", "baz", "foobar"},
"terraform version foo bar baz foobar",
},
}
for _, testCase := range testCases {
cmd := fmt.Sprintf("terragrunt %s --terragrunt-non-interactive --terragrunt-working-dir %s", strings.Join(testCase.command, " "), TEST_FIXTURE_EXTRA_ARGS_PATH)
var (
stdout bytes.Buffer
stderr bytes.Buffer
)
runTerragruntRedirectOutput(t, cmd, &stdout, &stderr)
output := stdout.String()
errOutput := stderr.String()
assert.True(t, strings.Contains(errOutput, testCase.expected) || strings.Contains(output, testCase.expected))
}
}
// This tests terragrunt properly passes through terraform commands with sub commands
// and any number of specified args
func TestTerraformSubcommandCliArgs(t *testing.T) {
t.Parallel()
testCases := []struct {
command []string
expected string
}{
{
[]string{"force-unlock"},
"terraform force-unlock",
},
{
[]string{"force-unlock", "foo"},
"terraform force-unlock foo",
},
{
[]string{"force-unlock", "foo", "bar", "baz"},
"terraform force-unlock foo bar baz",
},
{
[]string{"force-unlock", "foo", "bar", "baz", "foobar"},
"terraform force-unlock foo bar baz foobar",
},
}
for _, testCase := range testCases {
cmd := fmt.Sprintf("terragrunt %s --terragrunt-non-interactive --terragrunt-working-dir %s", strings.Join(testCase.command, " "), TEST_FIXTURE_EXTRA_ARGS_PATH)
var (
stdout bytes.Buffer
stderr bytes.Buffer
)
// Call runTerragruntCommand directly because this command contains failures (which causes runTerragruntRedirectOutput to abort) but we don't care.
if err := runTerragruntCommand(t, cmd, &stdout, &stderr); err == nil {
t.Fatalf("Failed to properly fail command: %v.", cmd)
}
output := stdout.String()
errOutput := stderr.String()
assert.True(t, strings.Contains(errOutput, testCase.expected) || strings.Contains(output, testCase.expected))
}
}
func TestPreventDestroy(t *testing.T) {
t.Parallel()
cleanupTerraformFolder(t, TEST_FIXTURE_LOCAL_PREVENT_DESTROY)
runTerragrunt(t, fmt.Sprintf("terragrunt apply --terragrunt-non-interactive --terragrunt-working-dir %s", TEST_FIXTURE_LOCAL_PREVENT_DESTROY))
err := runTerragruntCommand(t, fmt.Sprintf("terragrunt destroy --terragrunt-non-interactive --terragrunt-working-dir %s", TEST_FIXTURE_LOCAL_PREVENT_DESTROY), os.Stdout, os.Stderr)
if assert.Error(t, err) {
underlying := errors.Unwrap(err)
assert.IsType(t, cli.ModuleIsProtected{}, underlying)
}
}
func TestPreventDestroyDependencies(t *testing.T) {
t.Parallel()
// Populate module paths.
moduleNames := []string{
"module-a",
"module-b",
"module-c",
"module-d",
"module-e",
}
modulePaths := make(map[string]string, len(moduleNames))
for _, moduleName := range moduleNames {
modulePaths[moduleName] = util.JoinPath(TEST_FIXTURE_LOCAL_PREVENT_DESTROY_DEPENDENCIES, moduleName)
}
// Cleanup all modules directories.
cleanupTerraformFolder(t, TEST_FIXTURE_LOCAL_PREVENT_DESTROY_DEPENDENCIES)
for _, modulePath := range modulePaths {
cleanupTerraformFolder(t, modulePath)
}
var (
applyAllStdout bytes.Buffer
applyAllStderr bytes.Buffer
)
// Apply and destroy all modules.
err := runTerragruntCommand(t, fmt.Sprintf("terragrunt apply-all --terragrunt-non-interactive --terragrunt-working-dir %s", TEST_FIXTURE_LOCAL_PREVENT_DESTROY_DEPENDENCIES), &applyAllStdout, &applyAllStderr)
logBufferContentsLineByLine(t, applyAllStdout, "apply-all stdout")
logBufferContentsLineByLine(t, applyAllStderr, "apply-all stderr")
if err != nil {
t.Fatalf("apply-all in TestPreventDestroyDependencies failed with error: %v. Full std", err)
}
var (
destroyAllStdout bytes.Buffer
destroyAllStderr bytes.Buffer
)
err = runTerragruntCommand(t, fmt.Sprintf("terragrunt destroy-all --terragrunt-non-interactive --terragrunt-working-dir %s", TEST_FIXTURE_LOCAL_PREVENT_DESTROY_DEPENDENCIES), &destroyAllStdout, &destroyAllStderr)
logBufferContentsLineByLine(t, destroyAllStdout, "destroy-all stdout")
logBufferContentsLineByLine(t, destroyAllStderr, "destroy-all stderr")
if assert.Error(t, err) {
underlying := errors.Unwrap(err)
assert.IsType(t, configstack.MultiError{}, underlying)
}
// Check that modules C, D and E were deleted and modules A and B weren't.
for moduleName, modulePath := range modulePaths {
var (
showStdout bytes.Buffer
showStderr bytes.Buffer
)
err = runTerragruntCommand(t, fmt.Sprintf("terragrunt show --terragrunt-non-interactive --terragrunt-working-dir %s", modulePath), &showStdout, &showStderr)
logBufferContentsLineByLine(t, showStdout, fmt.Sprintf("show stdout for %s", modulePath))
logBufferContentsLineByLine(t, showStderr, fmt.Sprintf("show stderr for %s", modulePath))
assert.NoError(t, err)
output := showStdout.String()
switch moduleName {
case "module-a":
assert.Contains(t, output, "Hello, Module A")
case "module-b":
assert.Contains(t, output, "Hello, Module B")
case "module-c":
assert.NotContains(t, output, "Hello, Module C")
case "module-d":
assert.NotContains(t, output, "Hello, Module D")
case "module-e":
assert.NotContains(t, output, "Hello, Module E")
}
}
}
func TestPreventDestroyDependenciesIncludedConfig(t *testing.T) {
t.Parallel()
// Populate module paths.
moduleNames := []string{
"module-a",
"module-b",
"module-c",
}
modulePaths := make(map[string]string, len(moduleNames))
for _, moduleName := range moduleNames {
modulePaths[moduleName] = util.JoinPath(TEST_FIXTURE_LOCAL_INCLUDE_PREVENT_DESTROY_DEPENDENCIES, moduleName)
}
// Cleanup all modules directories.
cleanupTerraformFolder(t, TEST_FIXTURE_LOCAL_INCLUDE_PREVENT_DESTROY_DEPENDENCIES)
for _, modulePath := range modulePaths {
cleanupTerraformFolder(t, modulePath)
}
var (
applyAllStdout bytes.Buffer
applyAllStderr bytes.Buffer
)
// Apply and destroy all modules.
err := runTerragruntCommand(t, fmt.Sprintf("terragrunt apply-all --terragrunt-non-interactive --terragrunt-working-dir %s", TEST_FIXTURE_LOCAL_INCLUDE_PREVENT_DESTROY_DEPENDENCIES), &applyAllStdout, &applyAllStderr)
logBufferContentsLineByLine(t, applyAllStdout, "apply-all stdout")
logBufferContentsLineByLine(t, applyAllStderr, "apply-all stderr")
if err != nil {
t.Fatalf("apply-all in TestPreventDestroyDependenciesIncludedConfig failed with error: %v. Full std", err)
}
var (
destroyAllStdout bytes.Buffer
destroyAllStderr bytes.Buffer
)
err = runTerragruntCommand(t, fmt.Sprintf("terragrunt destroy-all --terragrunt-non-interactive --terragrunt-working-dir %s", TEST_FIXTURE_LOCAL_INCLUDE_PREVENT_DESTROY_DEPENDENCIES), &destroyAllStdout, &destroyAllStderr)
logBufferContentsLineByLine(t, destroyAllStdout, "destroy-all stdout")
logBufferContentsLineByLine(t, destroyAllStderr, "destroy-all stderr")
if assert.Error(t, err) {
underlying := errors.Unwrap(err)
assert.IsType(t, configstack.MultiError{}, underlying)
}
// Check that modules C, D and E were deleted and modules A and B weren't.
for moduleName, modulePath := range modulePaths {
var (
showStdout bytes.Buffer
showStderr bytes.Buffer
)
err = runTerragruntCommand(t, fmt.Sprintf("terragrunt show --terragrunt-non-interactive --terragrunt-working-dir %s", modulePath), &showStdout, &showStderr)
logBufferContentsLineByLine(t, showStdout, fmt.Sprintf("show stdout for %s", modulePath))
logBufferContentsLineByLine(t, showStderr, fmt.Sprintf("show stderr for %s", modulePath))
assert.NoError(t, err)
output := showStdout.String()
switch moduleName {
case "module-a":
assert.Contains(t, output, "Hello, Module A")
case "module-b":
assert.Contains(t, output, "Hello, Module B")
case "module-c":
assert.NotContains(t, output, "Hello, Module C")
}
}
}
func TestExcludeDirs(t *testing.T) {
t.Parallel()
// Populate module paths.
moduleNames := []string{
"integration-env/aws/module-aws-a",
"integration-env/gce/module-gce-b",
"integration-env/gce/module-gce-c",
"production-env/aws/module-aws-d",
"production-env/gce/module-gce-e",
}
testCases := []struct {
workingDir string
excludeArgs string
excludedModuleOutputs []string
}{
{TEST_FIXTURE_LOCAL_WITH_EXCLUDE_DIR, "--terragrunt-exclude-dir */gce", []string{"Module GCE B", "Module GCE C", "Module GCE E"}},
{TEST_FIXTURE_LOCAL_WITH_EXCLUDE_DIR, "--terragrunt-exclude-dir production-env --terragrunt-exclude-dir **/module-gce-c", []string{"Module GCE C", "Module AWS D", "Module GCE E"}},
{TEST_FIXTURE_LOCAL_WITH_EXCLUDE_DIR, "--terragrunt-exclude-dir integration-env/gce/module-gce-b --terragrunt-exclude-dir integration-env/gce/module-gce-c --terragrunt-exclude-dir **/module-aws*", []string{"Module AWS A", "Module GCE B", "Module GCE C", "Module AWS D"}},
}
modulePaths := make(map[string]string, len(moduleNames))
for _, moduleName := range moduleNames {
modulePaths[moduleName] = util.JoinPath(TEST_FIXTURE_LOCAL_WITH_EXCLUDE_DIR, moduleName)
}
for _, testCase := range testCases {
applyAllStdout := bytes.Buffer{}
applyAllStderr := bytes.Buffer{}
// Cleanup all modules directories.
cleanupTerragruntFolder(t, TEST_FIXTURE_LOCAL_WITH_EXCLUDE_DIR)
for _, modulePath := range modulePaths {
cleanupTerragruntFolder(t, modulePath)
}
// Apply modules according to test cases
err := runTerragruntCommand(t, fmt.Sprintf("terragrunt apply-all --terragrunt-non-interactive --terragrunt-working-dir %s %s", testCase.workingDir, testCase.excludeArgs), &applyAllStdout, &applyAllStderr)
logBufferContentsLineByLine(t, applyAllStdout, "apply-all stdout")
logBufferContentsLineByLine(t, applyAllStderr, "apply-all stderr")
if err != nil {
t.Fatalf("apply-all in TestExcludeDirs failed with error: %v. Full std", err)
}
// Check that the excluded module output is not present
for _, modulePath := range modulePaths {
showStdout := bytes.Buffer{}
showStderr := bytes.Buffer{}
err = runTerragruntCommand(t, fmt.Sprintf("terragrunt show --terragrunt-non-interactive --terragrunt-working-dir %s", modulePath), &showStdout, &showStderr)
logBufferContentsLineByLine(t, showStdout, fmt.Sprintf("show stdout for %s", modulePath))
logBufferContentsLineByLine(t, showStderr, fmt.Sprintf("show stderr for %s", modulePath))
assert.NoError(t, err)
output := showStdout.String()
for _, excludedModuleOutput := range testCase.excludedModuleOutputs {
assert.NotContains(t, output, excludedModuleOutput)
}
}
}
}
func logBufferContentsLineByLine(t *testing.T, out bytes.Buffer, label string) {
t.Logf("[%s] Full contents of %s:", t.Name(), label)
lines := strings.Split(out.String(), "\n")
for _, line := range lines {
t.Logf("[%s] %s", t.Name(), line)
}
}
func cleanupTerraformFolder(t *testing.T, templatesPath string) {
removeFile(t, util.JoinPath(templatesPath, TERRAFORM_STATE))
removeFile(t, util.JoinPath(templatesPath, TERRAFORM_STATE_BACKUP))
removeFolder(t, util.JoinPath(templatesPath, TERRAFORM_FOLDER))
}
func cleanupTerragruntFolder(t *testing.T, templatesPath string) {
removeFolder(t, util.JoinPath(templatesPath, TERRAGRUNT_CACHE))
}
func removeFile(t *testing.T, path string) {
if util.FileExists(path) {
if err := os.Remove(path); err != nil {
t.Fatalf("Error while removing %s: %v", path, err)
}
}
}
func removeFolder(t *testing.T, path string) {
if util.FileExists(path) {
if err := os.RemoveAll(path); err != nil {
t.Fatalf("Error while removing %s: %v", path, err)
}
}
}
func runTerragruntCommand(t *testing.T, command string, writer io.Writer, errwriter io.Writer) error {
args := strings.Split(command, " ")
app := cli.CreateTerragruntCli("TEST", writer, errwriter)
return app.Run(args)
}
func runTerragrunt(t *testing.T, command string) {
runTerragruntRedirectOutput(t, command, os.Stdout, os.Stderr)
}
func runTerragruntRedirectOutput(t *testing.T, command string, writer io.Writer, errwriter io.Writer) {
if err := runTerragruntCommand(t, command, writer, errwriter); err != nil {
t.Fatalf("Failed to run Terragrunt command '%s' due to error: %s", command, err)
}
}
func copyEnvironment(t *testing.T, environmentPath string) string {
tmpDir, err := ioutil.TempDir("", "terragrunt-test")
if err != nil {
t.Fatalf("Failed to create temp dir due to error: %v", err)
}
t.Logf("Copying %s to %s", environmentPath, tmpDir)
err = filepath.Walk(environmentPath, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
// The info.Mode() check is to catch symlinks to directories:
// https://stackoverflow.com/questions/32908277/fileinfo-isdir-isnt-detecting-directory
if info.IsDir() || (info.Mode()&os.ModeSymlink) == os.ModeSymlink {
return nil
}
destPath := util.JoinPath(tmpDir, path)
destPathDir := filepath.Dir(destPath)
if err := os.MkdirAll(destPathDir, 0777); err != nil {
return err
}
return copyFile(path, destPath)
})
if err != nil {
t.Fatalf("Error walking file path %s due to error: %v", environmentPath, err)
}
return tmpDir
}
func copyFile(srcPath string, destPath string) error {
contents, err := ioutil.ReadFile(srcPath)
if err != nil {
return err
}
return ioutil.WriteFile(destPath, contents, 0644)
}
func createTmpTerragruntConfigWithParentAndChild(t *testing.T, parentPath string, childRelPath string, s3BucketName string, parentConfigFileName string, childConfigFileName string) string {
tmpDir, err := ioutil.TempDir("", "terragrunt-parent-child-test")
if err != nil {
t.Fatalf("Failed to create temp dir due to error: %v", err)
}
childDestPath := util.JoinPath(tmpDir, childRelPath)
if err := os.MkdirAll(childDestPath, 0777); err != nil {
t.Fatalf("Failed to create temp dir %s due to error %v", childDestPath, err)
}
parentTerragruntSrcPath := util.JoinPath(parentPath, parentConfigFileName)
parentTerragruntDestPath := util.JoinPath(tmpDir, parentConfigFileName)
copyTerragruntConfigAndFillPlaceholders(t, parentTerragruntSrcPath, parentTerragruntDestPath, s3BucketName, "not-used", "not-used")
childTerragruntSrcPath := util.JoinPath(util.JoinPath(parentPath, childRelPath), childConfigFileName)
childTerragruntDestPath := util.JoinPath(childDestPath, childConfigFileName)
copyTerragruntConfigAndFillPlaceholders(t, childTerragruntSrcPath, childTerragruntDestPath, s3BucketName, "not-used", "not-used")
return childTerragruntDestPath
}
func createTmpTerragruntConfig(t *testing.T, templatesPath string, s3BucketName string, lockTableName string, configFileName string) string {
tmpFolder, err := ioutil.TempDir("", "terragrunt-test")
if err != nil {
t.Fatalf("Failed to create temp folder due to error: %v", err)
}
tmpTerragruntConfigFile := util.JoinPath(tmpFolder, configFileName)
originalTerragruntConfigPath := util.JoinPath(templatesPath, configFileName)
copyTerragruntConfigAndFillPlaceholders(t, originalTerragruntConfigPath, tmpTerragruntConfigFile, s3BucketName, lockTableName, "not-used")
return tmpTerragruntConfigFile
}
func copyTerragruntConfigAndFillPlaceholders(t *testing.T, configSrcPath string, configDestPath string, s3BucketName string, lockTableName string, region string) {
contents, err := util.ReadFileAsString(configSrcPath)
if err != nil {
t.Fatalf("Error reading Terragrunt config at %s: %v", configSrcPath, err)
}
contents = strings.Replace(contents, "__FILL_IN_BUCKET_NAME__", s3BucketName, -1)
contents = strings.Replace(contents, "__FILL_IN_LOCK_TABLE_NAME__", lockTableName, -1)
contents = strings.Replace(contents, "__FILL_IN_REGION__", region, -1)
if err := ioutil.WriteFile(configDestPath, []byte(contents), 0444); err != nil {
t.Fatalf("Error writing temp Terragrunt config to %s: %v", configDestPath, err)
}
}
// Returns a unique (ish) id we can attach to resources and tfstate files so they don't conflict with each other
// Uses base 62 to generate a 6 character string that's unlikely to collide with the handful of tests we run in
// parallel. Based on code here: http://stackoverflow.com/a/9543797/483528
func uniqueId() string {
const BASE_62_CHARS = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
const UNIQUE_ID_LENGTH = 6 // Should be good for 62^6 = 56+ billion combinations
var out bytes.Buffer
for i := 0; i < UNIQUE_ID_LENGTH; i++ {
out.WriteByte(BASE_62_CHARS[rand.Intn(len(BASE_62_CHARS))])
}
return out.String()
}
// Check that the S3 Bucket of the given name and region exists. Terragrunt should create this bucket during the test.
// Also check if bucket got tagged properly
func validateS3BucketExistsAndIsTagged(t *testing.T, awsRegion string, bucketName string, expectedTags map[string]string) {
mockOptions, err := options.NewTerragruntOptionsForTest("integration_test")
if err != nil {
t.Fatalf("Error creating mockOptions: %v", err)
}
sessionConfig := &aws_helper.AwsSessionConfig{
Region: awsRegion,
}
s3Client, err := remote.CreateS3Client(sessionConfig, mockOptions)
if err != nil {
t.Fatalf("Error creating S3 client: %v", err)
}
remoteStateConfig := remote.RemoteStateConfigS3{Bucket: bucketName, Region: awsRegion}
assert.True(t, remote.DoesS3BucketExist(s3Client, &remoteStateConfig), "Terragrunt failed to create remote state S3 bucket %s", bucketName)
if expectedTags != nil {
assertS3Tags(expectedTags, bucketName, s3Client, t)
}
}
// Check that the DynamoDB table of the given name and region exists. Terragrunt should create this table during the test.
// Also check if table got tagged properly
func validateDynamoDBTableExistsAndIsTagged(t *testing.T, awsRegion string, tableName string, expectedTags map[string]string) {
client := createDynamoDbClientForTest(t, awsRegion)
var description, err = client.DescribeTable(&dynamodb.DescribeTableInput{TableName: aws.String(tableName)})
if err != nil {
// This is a ResourceNotFoundException in case the table does not exist
t.Fatal(err)
}
var tags, err2 = client.ListTagsOfResource(&dynamodb.ListTagsOfResourceInput{ResourceArn: description.Table.TableArn})
if err2 != nil {
t.Fatal(err2)
}
var actualTags = make(map[string]string)
for _, element := range tags.Tags {
actualTags[*element.Key] = *element.Value
}
assert.Equal(t, expectedTags, actualTags, "Did not find expected tags on dynamo table.")
}
func assertS3Tags(expectedTags map[string]string, bucketName string, client *s3.S3, t *testing.T) {
var in = s3.GetBucketTaggingInput{}
in.SetBucket(bucketName)
var tags, err2 = client.GetBucketTagging(&in)
if err2 != nil {
t.Fatal(err2)
}
var actualTags = make(map[string]string)
for _, element := range tags.TagSet {
actualTags[*element.Key] = *element.Value
}
assert.Equal(t, expectedTags, actualTags, "Did not find expected tags on s3 bucket.")
}
// Delete the specified S3 bucket to clean up after a test
func deleteS3Bucket(t *testing.T, awsRegion string, bucketName string) {
mockOptions, err := options.NewTerragruntOptionsForTest("integration_test")
if err != nil {
t.Fatalf("Error creating mockOptions: %v", err)
}
sessionConfig := &aws_helper.AwsSessionConfig{
Region: awsRegion,
}
s3Client, err := remote.CreateS3Client(sessionConfig, mockOptions)
if err != nil {
t.Fatalf("Error creating S3 client: %v", err)
}
t.Logf("Deleting test s3 bucket %s", bucketName)
out, err := s3Client.ListObjectVersions(&s3.ListObjectVersionsInput{Bucket: aws.String(bucketName)})
if err != nil {
t.Fatalf("Failed to list object versions in s3 bucket %s: %v", bucketName, err)
}
objectIdentifiers := []*s3.ObjectIdentifier{}
for _, version := range out.Versions {
objectIdentifiers = append(objectIdentifiers, &s3.ObjectIdentifier{
Key: version.Key,
VersionId: version.VersionId,
})
}
if len(objectIdentifiers) > 0 {
deleteInput := &s3.DeleteObjectsInput{
Bucket: aws.String(bucketName),
Delete: &s3.Delete{Objects: objectIdentifiers},
}
if _, err := s3Client.DeleteObjects(deleteInput); err != nil {
t.Fatalf("Error deleting all versions of all objects in bucket %s: %v", bucketName, err)
}
}
if _, err := s3Client.DeleteBucket(&s3.DeleteBucketInput{Bucket: aws.String(bucketName)}); err != nil {
t.Fatalf("Failed to delete S3 bucket %s: %v", bucketName, err)
}
}
// Create an authenticated client for DynamoDB
func createDynamoDbClient(awsRegion, awsProfile string, iamRoleArn string) (*dynamodb.DynamoDB, error) {
mockOptions, err := options.NewTerragruntOptionsForTest("integration_test")
if err != nil {
return nil, err
}
sessionConfig := &aws_helper.AwsSessionConfig{
Region: awsRegion,
Profile: awsProfile,
RoleArn: iamRoleArn,
}
session, err := aws_helper.CreateAwsSession(sessionConfig, mockOptions)
if err != nil {
return nil, err
}
return dynamodb.New(session), nil
}
func createDynamoDbClientForTest(t *testing.T, awsRegion string) *dynamodb.DynamoDB {
client, err := createDynamoDbClient(awsRegion, "", "")
if err != nil {
t.Fatal(err)
}
return client
}
func cleanupTableForTest(t *testing.T, tableName string, awsRegion string) {
client := createDynamoDbClientForTest(t, awsRegion)
err := terragruntDynamoDb.DeleteTable(tableName, client)
assert.Nil(t, err, "Unexpected error: %v", err)
}
| [
"\"HOME\""
]
| []
| [
"HOME"
]
| [] | ["HOME"] | go | 1 | 0 | |
google/cloud/recaptchaenterprise/v1beta1/recaptchaenterprise-v1beta1-py/tests/unit/gapic/recaptchaenterprise_v1beta1/test_recaptcha_enterprise_service_v1_beta1.py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import packaging.version
import grpc
from grpc.experimental import aio
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.recaptchaenterprise_v1beta1.services.recaptcha_enterprise_service_v1_beta1 import RecaptchaEnterpriseServiceV1Beta1AsyncClient
from google.cloud.recaptchaenterprise_v1beta1.services.recaptcha_enterprise_service_v1_beta1 import RecaptchaEnterpriseServiceV1Beta1Client
from google.cloud.recaptchaenterprise_v1beta1.services.recaptcha_enterprise_service_v1_beta1 import pagers
from google.cloud.recaptchaenterprise_v1beta1.services.recaptcha_enterprise_service_v1_beta1 import transports
from google.cloud.recaptchaenterprise_v1beta1.services.recaptcha_enterprise_service_v1_beta1.transports.base import _GOOGLE_AUTH_VERSION
from google.cloud.recaptchaenterprise_v1beta1.types import recaptchaenterprise
from google.oauth2 import service_account
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
import google.auth
# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively
# through google-api-core:
# - Delete the auth "less than" test cases
# - Delete these pytest markers (Make the "greater than or equal to" tests the default).
requires_google_auth_lt_1_25_0 = pytest.mark.skipif(
packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"),
reason="This test requires google-auth < 1.25.0",
)
requires_google_auth_gte_1_25_0 = pytest.mark.skipif(
packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"),
reason="This test requires google-auth >= 1.25.0",
)
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert RecaptchaEnterpriseServiceV1Beta1Client._get_default_mtls_endpoint(None) is None
assert RecaptchaEnterpriseServiceV1Beta1Client._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint
assert RecaptchaEnterpriseServiceV1Beta1Client._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint
assert RecaptchaEnterpriseServiceV1Beta1Client._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint
assert RecaptchaEnterpriseServiceV1Beta1Client._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint
assert RecaptchaEnterpriseServiceV1Beta1Client._get_default_mtls_endpoint(non_googleapi) == non_googleapi
@pytest.mark.parametrize("client_class", [
RecaptchaEnterpriseServiceV1Beta1Client,
RecaptchaEnterpriseServiceV1Beta1AsyncClient,
])
def test_recaptcha_enterprise_service_v1_beta1_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == 'recaptchaenterprise.googleapis.com:443'
@pytest.mark.parametrize("transport_class,transport_name", [
(transports.RecaptchaEnterpriseServiceV1Beta1GrpcTransport, "grpc"),
(transports.RecaptchaEnterpriseServiceV1Beta1GrpcAsyncIOTransport, "grpc_asyncio"),
])
def test_recaptcha_enterprise_service_v1_beta1_client_service_account_always_use_jwt(transport_class, transport_name):
with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize("client_class", [
RecaptchaEnterpriseServiceV1Beta1Client,
RecaptchaEnterpriseServiceV1Beta1AsyncClient,
])
def test_recaptcha_enterprise_service_v1_beta1_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == 'recaptchaenterprise.googleapis.com:443'
def test_recaptcha_enterprise_service_v1_beta1_client_get_transport_class():
transport = RecaptchaEnterpriseServiceV1Beta1Client.get_transport_class()
available_transports = [
transports.RecaptchaEnterpriseServiceV1Beta1GrpcTransport,
]
assert transport in available_transports
transport = RecaptchaEnterpriseServiceV1Beta1Client.get_transport_class("grpc")
assert transport == transports.RecaptchaEnterpriseServiceV1Beta1GrpcTransport
@pytest.mark.parametrize("client_class,transport_class,transport_name", [
(RecaptchaEnterpriseServiceV1Beta1Client, transports.RecaptchaEnterpriseServiceV1Beta1GrpcTransport, "grpc"),
(RecaptchaEnterpriseServiceV1Beta1AsyncClient, transports.RecaptchaEnterpriseServiceV1Beta1GrpcAsyncIOTransport, "grpc_asyncio"),
])
@mock.patch.object(RecaptchaEnterpriseServiceV1Beta1Client, "DEFAULT_ENDPOINT", modify_default_endpoint(RecaptchaEnterpriseServiceV1Beta1Client))
@mock.patch.object(RecaptchaEnterpriseServiceV1Beta1AsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(RecaptchaEnterpriseServiceV1Beta1AsyncClient))
def test_recaptcha_enterprise_service_v1_beta1_client_client_options(client_class, transport_class, transport_name):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(RecaptchaEnterpriseServiceV1Beta1Client, 'get_transport_class') as gtc:
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials()
)
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(RecaptchaEnterpriseServiceV1Beta1Client, 'get_transport_class') as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class()
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}):
with pytest.raises(ValueError):
client = client_class()
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [
(RecaptchaEnterpriseServiceV1Beta1Client, transports.RecaptchaEnterpriseServiceV1Beta1GrpcTransport, "grpc", "true"),
(RecaptchaEnterpriseServiceV1Beta1AsyncClient, transports.RecaptchaEnterpriseServiceV1Beta1GrpcAsyncIOTransport, "grpc_asyncio", "true"),
(RecaptchaEnterpriseServiceV1Beta1Client, transports.RecaptchaEnterpriseServiceV1Beta1GrpcTransport, "grpc", "false"),
(RecaptchaEnterpriseServiceV1Beta1AsyncClient, transports.RecaptchaEnterpriseServiceV1Beta1GrpcAsyncIOTransport, "grpc_asyncio", "false"),
])
@mock.patch.object(RecaptchaEnterpriseServiceV1Beta1Client, "DEFAULT_ENDPOINT", modify_default_endpoint(RecaptchaEnterpriseServiceV1Beta1Client))
@mock.patch.object(RecaptchaEnterpriseServiceV1Beta1AsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(RecaptchaEnterpriseServiceV1Beta1AsyncClient))
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_recaptcha_enterprise_service_v1_beta1_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
options = client_options.ClientOptions(client_cert_source=client_cert_source_callback)
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(client_options=options)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
with mock.patch.object(transport_class, '__init__') as patched:
with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True):
with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
with mock.patch.object(transport_class, '__init__') as patched:
with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False):
patched.return_value = None
client = client_class()
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize("client_class,transport_class,transport_name", [
(RecaptchaEnterpriseServiceV1Beta1Client, transports.RecaptchaEnterpriseServiceV1Beta1GrpcTransport, "grpc"),
(RecaptchaEnterpriseServiceV1Beta1AsyncClient, transports.RecaptchaEnterpriseServiceV1Beta1GrpcAsyncIOTransport, "grpc_asyncio"),
])
def test_recaptcha_enterprise_service_v1_beta1_client_client_options_scopes(client_class, transport_class, transport_name):
# Check the case scopes are provided.
options = client_options.ClientOptions(
scopes=["1", "2"],
)
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize("client_class,transport_class,transport_name", [
(RecaptchaEnterpriseServiceV1Beta1Client, transports.RecaptchaEnterpriseServiceV1Beta1GrpcTransport, "grpc"),
(RecaptchaEnterpriseServiceV1Beta1AsyncClient, transports.RecaptchaEnterpriseServiceV1Beta1GrpcAsyncIOTransport, "grpc_asyncio"),
])
def test_recaptcha_enterprise_service_v1_beta1_client_client_options_credentials_file(client_class, transport_class, transport_name):
# Check the case credentials file is provided.
options = client_options.ClientOptions(
credentials_file="credentials.json"
)
with mock.patch.object(transport_class, '__init__') as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_recaptcha_enterprise_service_v1_beta1_client_client_options_from_dict():
with mock.patch('google.cloud.recaptchaenterprise_v1beta1.services.recaptcha_enterprise_service_v1_beta1.transports.RecaptchaEnterpriseServiceV1Beta1GrpcTransport.__init__') as grpc_transport:
grpc_transport.return_value = None
client = RecaptchaEnterpriseServiceV1Beta1Client(
client_options={'api_endpoint': 'squid.clam.whelk'}
)
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
def test_create_assessment(transport: str = 'grpc', request_type=recaptchaenterprise.CreateAssessmentRequest):
client = RecaptchaEnterpriseServiceV1Beta1Client(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_assessment),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = recaptchaenterprise.Assessment(
name='name_value',
score=0.54,
reasons=[recaptchaenterprise.Assessment.ClassificationReason.AUTOMATION],
)
response = client.create_assessment(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == recaptchaenterprise.CreateAssessmentRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, recaptchaenterprise.Assessment)
assert response.name == 'name_value'
assert math.isclose(response.score, 0.54, rel_tol=1e-6)
assert response.reasons == [recaptchaenterprise.Assessment.ClassificationReason.AUTOMATION]
def test_create_assessment_from_dict():
test_create_assessment(request_type=dict)
def test_create_assessment_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = RecaptchaEnterpriseServiceV1Beta1Client(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_assessment),
'__call__') as call:
client.create_assessment()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == recaptchaenterprise.CreateAssessmentRequest()
@pytest.mark.asyncio
async def test_create_assessment_async(transport: str = 'grpc_asyncio', request_type=recaptchaenterprise.CreateAssessmentRequest):
client = RecaptchaEnterpriseServiceV1Beta1AsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_assessment),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(recaptchaenterprise.Assessment(
name='name_value',
score=0.54,
reasons=[recaptchaenterprise.Assessment.ClassificationReason.AUTOMATION],
))
response = await client.create_assessment(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == recaptchaenterprise.CreateAssessmentRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, recaptchaenterprise.Assessment)
assert response.name == 'name_value'
assert math.isclose(response.score, 0.54, rel_tol=1e-6)
assert response.reasons == [recaptchaenterprise.Assessment.ClassificationReason.AUTOMATION]
@pytest.mark.asyncio
async def test_create_assessment_async_from_dict():
await test_create_assessment_async(request_type=dict)
def test_create_assessment_field_headers():
client = RecaptchaEnterpriseServiceV1Beta1Client(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = recaptchaenterprise.CreateAssessmentRequest()
request.parent = 'parent/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_assessment),
'__call__') as call:
call.return_value = recaptchaenterprise.Assessment()
client.create_assessment(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'parent=parent/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_create_assessment_field_headers_async():
client = RecaptchaEnterpriseServiceV1Beta1AsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = recaptchaenterprise.CreateAssessmentRequest()
request.parent = 'parent/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_assessment),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(recaptchaenterprise.Assessment())
await client.create_assessment(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'parent=parent/value',
) in kw['metadata']
def test_create_assessment_flattened():
client = RecaptchaEnterpriseServiceV1Beta1Client(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_assessment),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = recaptchaenterprise.Assessment()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_assessment(
parent='parent_value',
assessment=recaptchaenterprise.Assessment(name='name_value'),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].parent == 'parent_value'
assert args[0].assessment == recaptchaenterprise.Assessment(name='name_value')
def test_create_assessment_flattened_error():
client = RecaptchaEnterpriseServiceV1Beta1Client(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_assessment(
recaptchaenterprise.CreateAssessmentRequest(),
parent='parent_value',
assessment=recaptchaenterprise.Assessment(name='name_value'),
)
@pytest.mark.asyncio
async def test_create_assessment_flattened_async():
client = RecaptchaEnterpriseServiceV1Beta1AsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_assessment),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = recaptchaenterprise.Assessment()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(recaptchaenterprise.Assessment())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_assessment(
parent='parent_value',
assessment=recaptchaenterprise.Assessment(name='name_value'),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].parent == 'parent_value'
assert args[0].assessment == recaptchaenterprise.Assessment(name='name_value')
@pytest.mark.asyncio
async def test_create_assessment_flattened_error_async():
client = RecaptchaEnterpriseServiceV1Beta1AsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_assessment(
recaptchaenterprise.CreateAssessmentRequest(),
parent='parent_value',
assessment=recaptchaenterprise.Assessment(name='name_value'),
)
def test_annotate_assessment(transport: str = 'grpc', request_type=recaptchaenterprise.AnnotateAssessmentRequest):
client = RecaptchaEnterpriseServiceV1Beta1Client(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.annotate_assessment),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = recaptchaenterprise.AnnotateAssessmentResponse(
)
response = client.annotate_assessment(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == recaptchaenterprise.AnnotateAssessmentRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, recaptchaenterprise.AnnotateAssessmentResponse)
def test_annotate_assessment_from_dict():
test_annotate_assessment(request_type=dict)
def test_annotate_assessment_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = RecaptchaEnterpriseServiceV1Beta1Client(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.annotate_assessment),
'__call__') as call:
client.annotate_assessment()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == recaptchaenterprise.AnnotateAssessmentRequest()
@pytest.mark.asyncio
async def test_annotate_assessment_async(transport: str = 'grpc_asyncio', request_type=recaptchaenterprise.AnnotateAssessmentRequest):
client = RecaptchaEnterpriseServiceV1Beta1AsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.annotate_assessment),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(recaptchaenterprise.AnnotateAssessmentResponse(
))
response = await client.annotate_assessment(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == recaptchaenterprise.AnnotateAssessmentRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, recaptchaenterprise.AnnotateAssessmentResponse)
@pytest.mark.asyncio
async def test_annotate_assessment_async_from_dict():
await test_annotate_assessment_async(request_type=dict)
def test_annotate_assessment_field_headers():
client = RecaptchaEnterpriseServiceV1Beta1Client(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = recaptchaenterprise.AnnotateAssessmentRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.annotate_assessment),
'__call__') as call:
call.return_value = recaptchaenterprise.AnnotateAssessmentResponse()
client.annotate_assessment(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_annotate_assessment_field_headers_async():
client = RecaptchaEnterpriseServiceV1Beta1AsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = recaptchaenterprise.AnnotateAssessmentRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.annotate_assessment),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(recaptchaenterprise.AnnotateAssessmentResponse())
await client.annotate_assessment(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
def test_annotate_assessment_flattened():
client = RecaptchaEnterpriseServiceV1Beta1Client(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.annotate_assessment),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = recaptchaenterprise.AnnotateAssessmentResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.annotate_assessment(
name='name_value',
annotation=recaptchaenterprise.AnnotateAssessmentRequest.Annotation.LEGITIMATE,
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
assert args[0].annotation == recaptchaenterprise.AnnotateAssessmentRequest.Annotation.LEGITIMATE
def test_annotate_assessment_flattened_error():
client = RecaptchaEnterpriseServiceV1Beta1Client(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.annotate_assessment(
recaptchaenterprise.AnnotateAssessmentRequest(),
name='name_value',
annotation=recaptchaenterprise.AnnotateAssessmentRequest.Annotation.LEGITIMATE,
)
@pytest.mark.asyncio
async def test_annotate_assessment_flattened_async():
client = RecaptchaEnterpriseServiceV1Beta1AsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.annotate_assessment),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = recaptchaenterprise.AnnotateAssessmentResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(recaptchaenterprise.AnnotateAssessmentResponse())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.annotate_assessment(
name='name_value',
annotation=recaptchaenterprise.AnnotateAssessmentRequest.Annotation.LEGITIMATE,
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0].name == 'name_value'
assert args[0].annotation == recaptchaenterprise.AnnotateAssessmentRequest.Annotation.LEGITIMATE
@pytest.mark.asyncio
async def test_annotate_assessment_flattened_error_async():
client = RecaptchaEnterpriseServiceV1Beta1AsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.annotate_assessment(
recaptchaenterprise.AnnotateAssessmentRequest(),
name='name_value',
annotation=recaptchaenterprise.AnnotateAssessmentRequest.Annotation.LEGITIMATE,
)
def test_create_key(transport: str = 'grpc', request_type=recaptchaenterprise.CreateKeyRequest):
client = RecaptchaEnterpriseServiceV1Beta1Client(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_key),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = recaptchaenterprise.Key(
name='name_value',
display_name='display_name_value',
web_settings=recaptchaenterprise.WebKeySettings(enforce_allowed_domains=True),
)
response = client.create_key(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == recaptchaenterprise.CreateKeyRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, recaptchaenterprise.Key)
assert response.name == 'name_value'
assert response.display_name == 'display_name_value'
def test_create_key_from_dict():
test_create_key(request_type=dict)
def test_create_key_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = RecaptchaEnterpriseServiceV1Beta1Client(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_key),
'__call__') as call:
client.create_key()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == recaptchaenterprise.CreateKeyRequest()
@pytest.mark.asyncio
async def test_create_key_async(transport: str = 'grpc_asyncio', request_type=recaptchaenterprise.CreateKeyRequest):
client = RecaptchaEnterpriseServiceV1Beta1AsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_key),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(recaptchaenterprise.Key(
name='name_value',
display_name='display_name_value',
))
response = await client.create_key(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == recaptchaenterprise.CreateKeyRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, recaptchaenterprise.Key)
assert response.name == 'name_value'
assert response.display_name == 'display_name_value'
@pytest.mark.asyncio
async def test_create_key_async_from_dict():
await test_create_key_async(request_type=dict)
def test_create_key_field_headers():
client = RecaptchaEnterpriseServiceV1Beta1Client(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = recaptchaenterprise.CreateKeyRequest()
request.parent = 'parent/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_key),
'__call__') as call:
call.return_value = recaptchaenterprise.Key()
client.create_key(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'parent=parent/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_create_key_field_headers_async():
client = RecaptchaEnterpriseServiceV1Beta1AsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = recaptchaenterprise.CreateKeyRequest()
request.parent = 'parent/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_key),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(recaptchaenterprise.Key())
await client.create_key(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'parent=parent/value',
) in kw['metadata']
def test_list_keys(transport: str = 'grpc', request_type=recaptchaenterprise.ListKeysRequest):
client = RecaptchaEnterpriseServiceV1Beta1Client(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_keys),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = recaptchaenterprise.ListKeysResponse(
next_page_token='next_page_token_value',
)
response = client.list_keys(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == recaptchaenterprise.ListKeysRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListKeysPager)
assert response.next_page_token == 'next_page_token_value'
def test_list_keys_from_dict():
test_list_keys(request_type=dict)
def test_list_keys_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = RecaptchaEnterpriseServiceV1Beta1Client(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_keys),
'__call__') as call:
client.list_keys()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == recaptchaenterprise.ListKeysRequest()
@pytest.mark.asyncio
async def test_list_keys_async(transport: str = 'grpc_asyncio', request_type=recaptchaenterprise.ListKeysRequest):
client = RecaptchaEnterpriseServiceV1Beta1AsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_keys),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(recaptchaenterprise.ListKeysResponse(
next_page_token='next_page_token_value',
))
response = await client.list_keys(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == recaptchaenterprise.ListKeysRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListKeysAsyncPager)
assert response.next_page_token == 'next_page_token_value'
@pytest.mark.asyncio
async def test_list_keys_async_from_dict():
await test_list_keys_async(request_type=dict)
def test_list_keys_field_headers():
client = RecaptchaEnterpriseServiceV1Beta1Client(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = recaptchaenterprise.ListKeysRequest()
request.parent = 'parent/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_keys),
'__call__') as call:
call.return_value = recaptchaenterprise.ListKeysResponse()
client.list_keys(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'parent=parent/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_list_keys_field_headers_async():
client = RecaptchaEnterpriseServiceV1Beta1AsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = recaptchaenterprise.ListKeysRequest()
request.parent = 'parent/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_keys),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(recaptchaenterprise.ListKeysResponse())
await client.list_keys(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'parent=parent/value',
) in kw['metadata']
def test_list_keys_pager():
client = RecaptchaEnterpriseServiceV1Beta1Client(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_keys),
'__call__') as call:
# Set the response to a series of pages.
call.side_effect = (
recaptchaenterprise.ListKeysResponse(
keys=[
recaptchaenterprise.Key(),
recaptchaenterprise.Key(),
recaptchaenterprise.Key(),
],
next_page_token='abc',
),
recaptchaenterprise.ListKeysResponse(
keys=[],
next_page_token='def',
),
recaptchaenterprise.ListKeysResponse(
keys=[
recaptchaenterprise.Key(),
],
next_page_token='ghi',
),
recaptchaenterprise.ListKeysResponse(
keys=[
recaptchaenterprise.Key(),
recaptchaenterprise.Key(),
],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
('parent', ''),
)),
)
pager = client.list_keys(request={})
assert pager._metadata == metadata
results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, recaptchaenterprise.Key)
for i in results)
def test_list_keys_pages():
client = RecaptchaEnterpriseServiceV1Beta1Client(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_keys),
'__call__') as call:
# Set the response to a series of pages.
call.side_effect = (
recaptchaenterprise.ListKeysResponse(
keys=[
recaptchaenterprise.Key(),
recaptchaenterprise.Key(),
recaptchaenterprise.Key(),
],
next_page_token='abc',
),
recaptchaenterprise.ListKeysResponse(
keys=[],
next_page_token='def',
),
recaptchaenterprise.ListKeysResponse(
keys=[
recaptchaenterprise.Key(),
],
next_page_token='ghi',
),
recaptchaenterprise.ListKeysResponse(
keys=[
recaptchaenterprise.Key(),
recaptchaenterprise.Key(),
],
),
RuntimeError,
)
pages = list(client.list_keys(request={}).pages)
for page_, token in zip(pages, ['abc','def','ghi', '']):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_keys_async_pager():
client = RecaptchaEnterpriseServiceV1Beta1AsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_keys),
'__call__', new_callable=mock.AsyncMock) as call:
# Set the response to a series of pages.
call.side_effect = (
recaptchaenterprise.ListKeysResponse(
keys=[
recaptchaenterprise.Key(),
recaptchaenterprise.Key(),
recaptchaenterprise.Key(),
],
next_page_token='abc',
),
recaptchaenterprise.ListKeysResponse(
keys=[],
next_page_token='def',
),
recaptchaenterprise.ListKeysResponse(
keys=[
recaptchaenterprise.Key(),
],
next_page_token='ghi',
),
recaptchaenterprise.ListKeysResponse(
keys=[
recaptchaenterprise.Key(),
recaptchaenterprise.Key(),
],
),
RuntimeError,
)
async_pager = await client.list_keys(request={},)
assert async_pager.next_page_token == 'abc'
responses = []
async for response in async_pager:
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, recaptchaenterprise.Key)
for i in responses)
@pytest.mark.asyncio
async def test_list_keys_async_pages():
client = RecaptchaEnterpriseServiceV1Beta1AsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_keys),
'__call__', new_callable=mock.AsyncMock) as call:
# Set the response to a series of pages.
call.side_effect = (
recaptchaenterprise.ListKeysResponse(
keys=[
recaptchaenterprise.Key(),
recaptchaenterprise.Key(),
recaptchaenterprise.Key(),
],
next_page_token='abc',
),
recaptchaenterprise.ListKeysResponse(
keys=[],
next_page_token='def',
),
recaptchaenterprise.ListKeysResponse(
keys=[
recaptchaenterprise.Key(),
],
next_page_token='ghi',
),
recaptchaenterprise.ListKeysResponse(
keys=[
recaptchaenterprise.Key(),
recaptchaenterprise.Key(),
],
),
RuntimeError,
)
pages = []
async for page_ in (await client.list_keys(request={})).pages:
pages.append(page_)
for page_, token in zip(pages, ['abc','def','ghi', '']):
assert page_.raw_page.next_page_token == token
def test_get_key(transport: str = 'grpc', request_type=recaptchaenterprise.GetKeyRequest):
client = RecaptchaEnterpriseServiceV1Beta1Client(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_key),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = recaptchaenterprise.Key(
name='name_value',
display_name='display_name_value',
web_settings=recaptchaenterprise.WebKeySettings(enforce_allowed_domains=True),
)
response = client.get_key(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == recaptchaenterprise.GetKeyRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, recaptchaenterprise.Key)
assert response.name == 'name_value'
assert response.display_name == 'display_name_value'
def test_get_key_from_dict():
test_get_key(request_type=dict)
def test_get_key_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = RecaptchaEnterpriseServiceV1Beta1Client(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_key),
'__call__') as call:
client.get_key()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == recaptchaenterprise.GetKeyRequest()
@pytest.mark.asyncio
async def test_get_key_async(transport: str = 'grpc_asyncio', request_type=recaptchaenterprise.GetKeyRequest):
client = RecaptchaEnterpriseServiceV1Beta1AsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_key),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(recaptchaenterprise.Key(
name='name_value',
display_name='display_name_value',
))
response = await client.get_key(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == recaptchaenterprise.GetKeyRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, recaptchaenterprise.Key)
assert response.name == 'name_value'
assert response.display_name == 'display_name_value'
@pytest.mark.asyncio
async def test_get_key_async_from_dict():
await test_get_key_async(request_type=dict)
def test_get_key_field_headers():
client = RecaptchaEnterpriseServiceV1Beta1Client(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = recaptchaenterprise.GetKeyRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_key),
'__call__') as call:
call.return_value = recaptchaenterprise.Key()
client.get_key(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_get_key_field_headers_async():
client = RecaptchaEnterpriseServiceV1Beta1AsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = recaptchaenterprise.GetKeyRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_key),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(recaptchaenterprise.Key())
await client.get_key(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
def test_update_key(transport: str = 'grpc', request_type=recaptchaenterprise.UpdateKeyRequest):
client = RecaptchaEnterpriseServiceV1Beta1Client(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_key),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = recaptchaenterprise.Key(
name='name_value',
display_name='display_name_value',
web_settings=recaptchaenterprise.WebKeySettings(enforce_allowed_domains=True),
)
response = client.update_key(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == recaptchaenterprise.UpdateKeyRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, recaptchaenterprise.Key)
assert response.name == 'name_value'
assert response.display_name == 'display_name_value'
def test_update_key_from_dict():
test_update_key(request_type=dict)
def test_update_key_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = RecaptchaEnterpriseServiceV1Beta1Client(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_key),
'__call__') as call:
client.update_key()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == recaptchaenterprise.UpdateKeyRequest()
@pytest.mark.asyncio
async def test_update_key_async(transport: str = 'grpc_asyncio', request_type=recaptchaenterprise.UpdateKeyRequest):
client = RecaptchaEnterpriseServiceV1Beta1AsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_key),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(recaptchaenterprise.Key(
name='name_value',
display_name='display_name_value',
))
response = await client.update_key(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == recaptchaenterprise.UpdateKeyRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, recaptchaenterprise.Key)
assert response.name == 'name_value'
assert response.display_name == 'display_name_value'
@pytest.mark.asyncio
async def test_update_key_async_from_dict():
await test_update_key_async(request_type=dict)
def test_update_key_field_headers():
client = RecaptchaEnterpriseServiceV1Beta1Client(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = recaptchaenterprise.UpdateKeyRequest()
request.key.name = 'key.name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_key),
'__call__') as call:
call.return_value = recaptchaenterprise.Key()
client.update_key(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'key.name=key.name/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_update_key_field_headers_async():
client = RecaptchaEnterpriseServiceV1Beta1AsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = recaptchaenterprise.UpdateKeyRequest()
request.key.name = 'key.name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_key),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(recaptchaenterprise.Key())
await client.update_key(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'key.name=key.name/value',
) in kw['metadata']
def test_delete_key(transport: str = 'grpc', request_type=recaptchaenterprise.DeleteKeyRequest):
client = RecaptchaEnterpriseServiceV1Beta1Client(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_key),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.delete_key(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == recaptchaenterprise.DeleteKeyRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_delete_key_from_dict():
test_delete_key(request_type=dict)
def test_delete_key_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = RecaptchaEnterpriseServiceV1Beta1Client(
credentials=ga_credentials.AnonymousCredentials(),
transport='grpc',
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_key),
'__call__') as call:
client.delete_key()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == recaptchaenterprise.DeleteKeyRequest()
@pytest.mark.asyncio
async def test_delete_key_async(transport: str = 'grpc_asyncio', request_type=recaptchaenterprise.DeleteKeyRequest):
client = RecaptchaEnterpriseServiceV1Beta1AsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_key),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.delete_key(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == recaptchaenterprise.DeleteKeyRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_delete_key_async_from_dict():
await test_delete_key_async(request_type=dict)
def test_delete_key_field_headers():
client = RecaptchaEnterpriseServiceV1Beta1Client(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = recaptchaenterprise.DeleteKeyRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_key),
'__call__') as call:
call.return_value = None
client.delete_key(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
@pytest.mark.asyncio
async def test_delete_key_field_headers_async():
client = RecaptchaEnterpriseServiceV1Beta1AsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = recaptchaenterprise.DeleteKeyRequest()
request.name = 'name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_key),
'__call__') as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.delete_key(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'name=name/value',
) in kw['metadata']
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.RecaptchaEnterpriseServiceV1Beta1GrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = RecaptchaEnterpriseServiceV1Beta1Client(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.RecaptchaEnterpriseServiceV1Beta1GrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = RecaptchaEnterpriseServiceV1Beta1Client(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide scopes and a transport instance.
transport = transports.RecaptchaEnterpriseServiceV1Beta1GrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = RecaptchaEnterpriseServiceV1Beta1Client(
client_options={"scopes": ["1", "2"]},
transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.RecaptchaEnterpriseServiceV1Beta1GrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = RecaptchaEnterpriseServiceV1Beta1Client(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.RecaptchaEnterpriseServiceV1Beta1GrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.RecaptchaEnterpriseServiceV1Beta1GrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize("transport_class", [
transports.RecaptchaEnterpriseServiceV1Beta1GrpcTransport,
transports.RecaptchaEnterpriseServiceV1Beta1GrpcAsyncIOTransport,
])
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, 'default') as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = RecaptchaEnterpriseServiceV1Beta1Client(
credentials=ga_credentials.AnonymousCredentials(),
)
assert isinstance(
client.transport,
transports.RecaptchaEnterpriseServiceV1Beta1GrpcTransport,
)
def test_recaptcha_enterprise_service_v1_beta1_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.RecaptchaEnterpriseServiceV1Beta1Transport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json"
)
def test_recaptcha_enterprise_service_v1_beta1_base_transport():
# Instantiate the base transport.
with mock.patch('google.cloud.recaptchaenterprise_v1beta1.services.recaptcha_enterprise_service_v1_beta1.transports.RecaptchaEnterpriseServiceV1Beta1Transport.__init__') as Transport:
Transport.return_value = None
transport = transports.RecaptchaEnterpriseServiceV1Beta1Transport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
'create_assessment',
'annotate_assessment',
'create_key',
'list_keys',
'get_key',
'update_key',
'delete_key',
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
@requires_google_auth_gte_1_25_0
def test_recaptcha_enterprise_service_v1_beta1_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.recaptchaenterprise_v1beta1.services.recaptcha_enterprise_service_v1_beta1.transports.RecaptchaEnterpriseServiceV1Beta1Transport._prep_wrapped_messages') as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.RecaptchaEnterpriseServiceV1Beta1Transport(
credentials_file="credentials.json",
quota_project_id="octopus",
)
load_creds.assert_called_once_with("credentials.json",
scopes=None,
default_scopes=(
'https://www.googleapis.com/auth/cloud-platform',
),
quota_project_id="octopus",
)
@requires_google_auth_lt_1_25_0
def test_recaptcha_enterprise_service_v1_beta1_base_transport_with_credentials_file_old_google_auth():
# Instantiate the base transport with a credentials file
with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.recaptchaenterprise_v1beta1.services.recaptcha_enterprise_service_v1_beta1.transports.RecaptchaEnterpriseServiceV1Beta1Transport._prep_wrapped_messages') as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.RecaptchaEnterpriseServiceV1Beta1Transport(
credentials_file="credentials.json",
quota_project_id="octopus",
)
load_creds.assert_called_once_with("credentials.json", scopes=(
'https://www.googleapis.com/auth/cloud-platform',
),
quota_project_id="octopus",
)
def test_recaptcha_enterprise_service_v1_beta1_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.recaptchaenterprise_v1beta1.services.recaptcha_enterprise_service_v1_beta1.transports.RecaptchaEnterpriseServiceV1Beta1Transport._prep_wrapped_messages') as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.RecaptchaEnterpriseServiceV1Beta1Transport()
adc.assert_called_once()
@requires_google_auth_gte_1_25_0
def test_recaptcha_enterprise_service_v1_beta1_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, 'default', autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
RecaptchaEnterpriseServiceV1Beta1Client()
adc.assert_called_once_with(
scopes=None,
default_scopes=(
'https://www.googleapis.com/auth/cloud-platform',
),
quota_project_id=None,
)
@requires_google_auth_lt_1_25_0
def test_recaptcha_enterprise_service_v1_beta1_auth_adc_old_google_auth():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, 'default', autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
RecaptchaEnterpriseServiceV1Beta1Client()
adc.assert_called_once_with(
scopes=( 'https://www.googleapis.com/auth/cloud-platform',),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[
transports.RecaptchaEnterpriseServiceV1Beta1GrpcTransport,
transports.RecaptchaEnterpriseServiceV1Beta1GrpcAsyncIOTransport,
],
)
@requires_google_auth_gte_1_25_0
def test_recaptcha_enterprise_service_v1_beta1_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, 'default', autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class",
[
transports.RecaptchaEnterpriseServiceV1Beta1GrpcTransport,
transports.RecaptchaEnterpriseServiceV1Beta1GrpcAsyncIOTransport,
],
)
@requires_google_auth_lt_1_25_0
def test_recaptcha_enterprise_service_v1_beta1_transport_auth_adc_old_google_auth(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus")
adc.assert_called_once_with(scopes=(
'https://www.googleapis.com/auth/cloud-platform',
),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.RecaptchaEnterpriseServiceV1Beta1GrpcTransport, grpc_helpers),
(transports.RecaptchaEnterpriseServiceV1Beta1GrpcAsyncIOTransport, grpc_helpers_async)
],
)
def test_recaptcha_enterprise_service_v1_beta1_transport_create_channel(transport_class, grpc_helpers):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(
quota_project_id="octopus",
scopes=["1", "2"]
)
create_channel.assert_called_with(
"recaptchaenterprise.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=(
'https://www.googleapis.com/auth/cloud-platform',
),
scopes=["1", "2"],
default_host="recaptchaenterprise.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize("transport_class", [transports.RecaptchaEnterpriseServiceV1Beta1GrpcTransport, transports.RecaptchaEnterpriseServiceV1Beta1GrpcAsyncIOTransport])
def test_recaptcha_enterprise_service_v1_beta1_grpc_transport_client_cert_source_for_mtls(
transport_class
):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert,
private_key=expected_key
)
def test_recaptcha_enterprise_service_v1_beta1_host_no_port():
client = RecaptchaEnterpriseServiceV1Beta1Client(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(api_endpoint='recaptchaenterprise.googleapis.com'),
)
assert client.transport._host == 'recaptchaenterprise.googleapis.com:443'
def test_recaptcha_enterprise_service_v1_beta1_host_with_port():
client = RecaptchaEnterpriseServiceV1Beta1Client(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(api_endpoint='recaptchaenterprise.googleapis.com:8000'),
)
assert client.transport._host == 'recaptchaenterprise.googleapis.com:8000'
def test_recaptcha_enterprise_service_v1_beta1_grpc_transport_channel():
channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.RecaptchaEnterpriseServiceV1Beta1GrpcTransport(
host="squid.clam.whelk",
channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_recaptcha_enterprise_service_v1_beta1_grpc_asyncio_transport_channel():
channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.RecaptchaEnterpriseServiceV1Beta1GrpcAsyncIOTransport(
host="squid.clam.whelk",
channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize("transport_class", [transports.RecaptchaEnterpriseServiceV1Beta1GrpcTransport, transports.RecaptchaEnterpriseServiceV1Beta1GrpcAsyncIOTransport])
def test_recaptcha_enterprise_service_v1_beta1_transport_channel_mtls_with_client_cert_source(
transport_class
):
with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred:
with mock.patch.object(transport_class, "create_channel") as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, 'default') as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize("transport_class", [transports.RecaptchaEnterpriseServiceV1Beta1GrpcTransport, transports.RecaptchaEnterpriseServiceV1Beta1GrpcAsyncIOTransport])
def test_recaptcha_enterprise_service_v1_beta1_transport_channel_mtls_with_adc(
transport_class
):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(transport_class, "create_channel") as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_assessment_path():
project = "squid"
assessment = "clam"
expected = "projects/{project}/assessments/{assessment}".format(project=project, assessment=assessment, )
actual = RecaptchaEnterpriseServiceV1Beta1Client.assessment_path(project, assessment)
assert expected == actual
def test_parse_assessment_path():
expected = {
"project": "whelk",
"assessment": "octopus",
}
path = RecaptchaEnterpriseServiceV1Beta1Client.assessment_path(**expected)
# Check that the path construction is reversible.
actual = RecaptchaEnterpriseServiceV1Beta1Client.parse_assessment_path(path)
assert expected == actual
def test_key_path():
project = "oyster"
key = "nudibranch"
expected = "projects/{project}/keys/{key}".format(project=project, key=key, )
actual = RecaptchaEnterpriseServiceV1Beta1Client.key_path(project, key)
assert expected == actual
def test_parse_key_path():
expected = {
"project": "cuttlefish",
"key": "mussel",
}
path = RecaptchaEnterpriseServiceV1Beta1Client.key_path(**expected)
# Check that the path construction is reversible.
actual = RecaptchaEnterpriseServiceV1Beta1Client.parse_key_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "winkle"
expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, )
actual = RecaptchaEnterpriseServiceV1Beta1Client.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "nautilus",
}
path = RecaptchaEnterpriseServiceV1Beta1Client.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = RecaptchaEnterpriseServiceV1Beta1Client.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "scallop"
expected = "folders/{folder}".format(folder=folder, )
actual = RecaptchaEnterpriseServiceV1Beta1Client.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "abalone",
}
path = RecaptchaEnterpriseServiceV1Beta1Client.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = RecaptchaEnterpriseServiceV1Beta1Client.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "squid"
expected = "organizations/{organization}".format(organization=organization, )
actual = RecaptchaEnterpriseServiceV1Beta1Client.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "clam",
}
path = RecaptchaEnterpriseServiceV1Beta1Client.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = RecaptchaEnterpriseServiceV1Beta1Client.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "whelk"
expected = "projects/{project}".format(project=project, )
actual = RecaptchaEnterpriseServiceV1Beta1Client.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "octopus",
}
path = RecaptchaEnterpriseServiceV1Beta1Client.common_project_path(**expected)
# Check that the path construction is reversible.
actual = RecaptchaEnterpriseServiceV1Beta1Client.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "oyster"
location = "nudibranch"
expected = "projects/{project}/locations/{location}".format(project=project, location=location, )
actual = RecaptchaEnterpriseServiceV1Beta1Client.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "cuttlefish",
"location": "mussel",
}
path = RecaptchaEnterpriseServiceV1Beta1Client.common_location_path(**expected)
# Check that the path construction is reversible.
actual = RecaptchaEnterpriseServiceV1Beta1Client.parse_common_location_path(path)
assert expected == actual
def test_client_withDEFAULT_CLIENT_INFO():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(transports.RecaptchaEnterpriseServiceV1Beta1Transport, '_prep_wrapped_messages') as prep:
client = RecaptchaEnterpriseServiceV1Beta1Client(
credentials=ga_credentials.AnonymousCredentials(),
client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(transports.RecaptchaEnterpriseServiceV1Beta1Transport, '_prep_wrapped_messages') as prep:
transport_class = RecaptchaEnterpriseServiceV1Beta1Client.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(),
client_info=client_info,
)
prep.assert_called_once_with(client_info)
@pytest.mark.asyncio
async def test_transport_close_async():
client = RecaptchaEnterpriseServiceV1Beta1AsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc_asyncio",
)
with mock.patch.object(type(getattr(client.transport, "grpc_channel")), "close") as close:
async with client:
close.assert_not_called()
close.assert_called_once()
def test_transport_close():
transports = {
"grpc": "_grpc_channel",
}
for transport, close_name in transports.items():
client = RecaptchaEnterpriseServiceV1Beta1Client(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport
)
with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
'grpc',
]
for transport in transports:
client = RecaptchaEnterpriseServiceV1Beta1Client(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
lit.go | package main
import (
"fmt"
"log"
"os"
"time"
flags "github.com/jessevdk/go-flags"
"github.com/mit-dci/lit/coinparam"
"github.com/mit-dci/lit/litbamf"
"github.com/mit-dci/lit/litrpc"
"github.com/mit-dci/lit/lnutil"
"github.com/mit-dci/lit/qln"
)
type config struct { // define a struct for usage with go-flags
Tn3host string `long:"tn3" description:"Connect to bitcoin testnet3."`
Bc2host string `long:"bc2" description:"bc2 full node."`
Lt4host string `long:"lt4" description:"Connect to litecoin testnet4."`
Reghost string `long:"reg" description:"Connect to bitcoin regtest."`
Litereghost string `long:"litereg" description:"Connect to litecoin regtest."`
Tvtchost string `long:"tvtc" description:"Connect to Vertcoin test node."`
Vtchost string `long:"vtc" description:"Connect to Vertcoin."`
LitHomeDir string `long:"dir" description:"Specify Home Directory of lit as an absolute path."`
TrackerURL string `long:"tracker" description:"LN address tracker URL http|https://host:port"`
ConfigFile string
ReSync bool `short:"r" long:"reSync" description:"Resync from the given tip."`
Tower bool `long:"tower" description:"Watchtower: Run a watching node"`
Hard bool `short:"t" long:"hard" description:"Flag to set networks."`
Verbose bool `short:"v" long:"verbose" description:"Set verbosity to true."`
Rpcport uint16 `short:"p" long:"rpcport" description:"Set RPC port to connect to"`
Rpchost string `short:"h" long:"rpchost" description:"Set RPC host to listen to"`
Params *coinparam.Params
}
var (
defaultLitHomeDirName = os.Getenv("HOME") + "/.lit"
defaultTrackerURL = "http://ni.media.mit.edu:46580"
defaultKeyFileName = "privkey.hex"
defaultConfigFilename = "lit.conf"
defaultHomeDir = os.Getenv("HOME")
defaultRpcport = uint16(8001)
defaultRpchost = "localhost"
)
func fileExists(name string) bool {
if _, err := os.Stat(name); err != nil {
if os.IsNotExist(err) {
return false
}
}
return true
}
// newConfigParser returns a new command line flags parser.
func newConfigParser(conf *config, options flags.Options) *flags.Parser {
parser := flags.NewParser(conf, options)
return parser
}
func linkWallets(node *qln.LitNode, key *[32]byte, conf *config) error {
// for now, wallets are linked to the litnode on startup, and
// can't appear / disappear while it's running. Later
// could support dynamically adding / removing wallets
// order matters; the first registered wallet becomes the default
var err error
// try regtest
if !lnutil.NopeString(conf.Reghost) {
p := &coinparam.RegressionNetParams
fmt.Printf("reg: %s\n", conf.Reghost)
err = node.LinkBaseWallet(key, 120, conf.ReSync, conf.Tower, conf.Reghost, p)
if err != nil {
return err
}
}
// try testnet3
if !lnutil.NopeString(conf.Tn3host) {
p := &coinparam.TestNet3Params
err = node.LinkBaseWallet(
key, 1256000, conf.ReSync, conf.Tower,
conf.Tn3host, p)
if err != nil {
return err
}
}
// try litecoin regtest
if !lnutil.NopeString(conf.Litereghost) {
p := &coinparam.LiteRegNetParams
err = node.LinkBaseWallet(key, 120, conf.ReSync, conf.Tower, conf.Litereghost, p)
if err != nil {
return err
}
}
// try litecoin testnet4
if !lnutil.NopeString(conf.Lt4host) {
p := &coinparam.LiteCoinTestNet4Params
err = node.LinkBaseWallet(
key, p.StartHeight, conf.ReSync, conf.Tower,
conf.Lt4host, p)
if err != nil {
return err
}
}
// try vertcoin testnet
if !lnutil.NopeString(conf.Tvtchost) {
p := &coinparam.VertcoinTestNetParams
err = node.LinkBaseWallet(
key, 25000, conf.ReSync, conf.Tower,
conf.Tvtchost, p)
if err != nil {
return err
}
}
// try vertcoin mainnet
if !lnutil.NopeString(conf.Vtchost) {
p := &coinparam.VertcoinParams
err = node.LinkBaseWallet(
key, p.StartHeight, conf.ReSync, conf.Tower,
conf.Vtchost, p)
if err != nil {
return err
}
}
return nil
}
func main() {
conf := config{
LitHomeDir: defaultLitHomeDirName,
Rpcport: defaultRpcport,
Rpchost: defaultRpchost,
TrackerURL: defaultTrackerURL,
}
key := litSetup(&conf)
// Setup LN node. Activate Tower if in hard mode.
// give node and below file pathof lit home directory
node, err := qln.NewLitNode(key, conf.LitHomeDir, conf.TrackerURL)
if err != nil {
log.Fatal(err)
}
// node is up; link wallets based on args
err = linkWallets(node, key, &conf)
if err != nil {
log.Fatal(err)
}
rpcl := new(litrpc.LitRPC)
rpcl.Node = node
rpcl.OffButton = make(chan bool, 1)
go litrpc.RPCListen(rpcl, conf.Rpchost, conf.Rpcport)
litbamf.BamfListen(conf.Rpcport, conf.LitHomeDir)
<-rpcl.OffButton
fmt.Printf("Got stop request\n")
time.Sleep(time.Second)
return
// New directory being created over at PWD
// conf file being created at /
}
| [
"\"HOME\"",
"\"HOME\""
]
| []
| [
"HOME"
]
| [] | ["HOME"] | go | 1 | 0 | |
flask_qa/flask_qa/settings.py | import os
SQLALCHEMY_DATABASE_URI = os.environ.get('DB_HOST')
SECRET_KEY = os.environ.get('SECRET_KEY')
SQLALCHEMY_TRACK_MODIFICATIONS = False | []
| []
| [
"SECRET_KEY",
"DB_HOST"
]
| [] | ["SECRET_KEY", "DB_HOST"] | python | 2 | 0 | |
bot_test.go | package telebot
import (
"net/http"
"os"
"strconv"
"strings"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
const (
photoID = "AgACAgIAAxkDAAIBV16Ybpg7l2jPgMUiiLJ3WaQOUqTrAAJorjEbh2TBSPSOinaCHfydQO_pki4AAwEAAwIAA3kAA_NQAAIYBA"
)
var (
// required to test send and edit methods
token = os.Getenv("TELEBOT_SECRET")
chatID, _ = strconv.ParseInt(os.Getenv("CHAT_ID"), 10, 64)
userID, _ = strconv.Atoi(os.Getenv("USER_ID"))
b, _ = newTestBot() // cached bot instance to avoid getMe method flooding
to = &Chat{ID: chatID} // to chat recipient for send and edit methods
user = &User{ID: userID} // to user recipient for some special cases
)
func defaultSettings() Settings {
return Settings{Token: token}
}
func newTestBot() (*Bot, error) {
return NewBot(defaultSettings())
}
func TestNewBot(t *testing.T) {
var pref Settings
_, err := NewBot(pref)
assert.Error(t, err)
pref.Token = "BAD TOKEN"
_, err = NewBot(pref)
assert.Error(t, err)
pref.URL = "BAD URL"
_, err = NewBot(pref)
assert.Error(t, err)
b, err := NewBot(Settings{offline: true})
if err != nil {
t.Fatal(err)
}
assert.NotNil(t, b.Me)
assert.Equal(t, DefaultApiURL, b.URL)
assert.Equal(t, http.DefaultClient, b.client)
assert.Equal(t, 100, cap(b.Updates))
pref = defaultSettings()
client := &http.Client{Timeout: time.Minute}
pref.URL = "http://api.telegram.org" // not https
pref.Client = client
pref.Poller = &LongPoller{Timeout: time.Second}
pref.Updates = 50
pref.ParseMode = ModeHTML
pref.offline = true
b, err = NewBot(pref)
require.NoError(t, err)
assert.Equal(t, client, b.client)
assert.Equal(t, pref.URL, b.URL)
assert.Equal(t, pref.Poller, b.Poller)
assert.Equal(t, 50, cap(b.Updates))
assert.Equal(t, ModeHTML, b.parseMode)
}
func TestBotHandle(t *testing.T) {
if b == nil {
t.Skip("Cached bot instance is bad (probably wrong or empty TELEBOT_SECRET)")
}
b.Handle("/start", func(m *Message) {})
assert.Contains(t, b.handlers, "/start")
reply := ReplyButton{Text: "reply"}
b.Handle(&reply, func(m *Message) {})
inline := InlineButton{Unique: "inline"}
b.Handle(&inline, func(c *Callback) {})
btnReply := (&ReplyMarkup{}).Text("btnReply")
b.Handle(&btnReply, func(m *Message) {})
btnInline := (&ReplyMarkup{}).Data("", "btnInline")
b.Handle(&btnInline, func(c *Callback) {})
assert.Contains(t, b.handlers, btnReply.CallbackUnique())
assert.Contains(t, b.handlers, btnInline.CallbackUnique())
assert.Contains(t, b.handlers, reply.CallbackUnique())
assert.Contains(t, b.handlers, inline.CallbackUnique())
assert.Panics(t, func() { b.Handle(1, func() {}) })
}
func TestBotStart(t *testing.T) {
if token == "" {
t.Skip("TELEBOT_SECRET is required")
}
// cached bot has no poller
assert.Panics(t, func() { b.Start() })
pref := defaultSettings()
pref.Poller = &LongPoller{}
b, err := NewBot(pref)
if err != nil {
t.Fatal(err)
}
// remove webhook to be sure that bot can poll
require.NoError(t, b.RemoveWebhook())
go b.Start()
b.Stop()
tp := newTestPoller()
go func() {
tp.updates <- Update{Message: &Message{Text: "/start"}}
}()
b, err = NewBot(pref)
require.NoError(t, err)
b.Poller = tp
var ok bool
b.Handle("/start", func(m *Message) {
assert.Equal(t, m.Text, "/start")
tp.done <- struct{}{}
ok = true
})
go b.Start()
<-tp.done
b.Stop()
assert.True(t, ok)
}
func TestBotProcessUpdate(t *testing.T) {
b, err := NewBot(Settings{Synchronous: true, offline: true})
if err != nil {
t.Fatal(err)
}
b.Handle("/start", func(m *Message) {
assert.Equal(t, "/start", m.Text)
})
b.Handle("hello", func(m *Message) {
assert.Equal(t, "hello", m.Text)
})
b.Handle(OnText, func(m *Message) {
assert.Equal(t, "text", m.Text)
})
b.Handle(OnPinned, func(m *Message) {
assert.NotNil(t, m.PinnedMessage)
})
b.Handle(OnPhoto, func(m *Message) {
assert.NotNil(t, m.Photo)
})
b.Handle(OnVoice, func(m *Message) {
assert.NotNil(t, m.Voice)
})
b.Handle(OnAudio, func(m *Message) {
assert.NotNil(t, m.Audio)
})
b.Handle(OnAnimation, func(m *Message) {
assert.NotNil(t, m.Animation)
})
b.Handle(OnDocument, func(m *Message) {
assert.NotNil(t, m.Document)
})
b.Handle(OnSticker, func(m *Message) {
assert.NotNil(t, m.Sticker)
})
b.Handle(OnVideo, func(m *Message) {
assert.NotNil(t, m.Video)
})
b.Handle(OnVideoNote, func(m *Message) {
assert.NotNil(t, m.VideoNote)
})
b.Handle(OnContact, func(m *Message) {
assert.NotNil(t, m.Contact)
})
b.Handle(OnLocation, func(m *Message) {
assert.NotNil(t, m.Location)
})
b.Handle(OnVenue, func(m *Message) {
assert.NotNil(t, m.Venue)
})
b.Handle(OnAddedToGroup, func(m *Message) {
assert.NotNil(t, m.GroupCreated)
})
b.Handle(OnUserJoined, func(m *Message) {
assert.NotNil(t, m.UserJoined)
})
b.Handle(OnUserLeft, func(m *Message) {
assert.NotNil(t, m.UserLeft)
})
b.Handle(OnNewGroupTitle, func(m *Message) {
assert.Equal(t, "title", m.NewGroupTitle)
})
b.Handle(OnNewGroupPhoto, func(m *Message) {
assert.NotNil(t, m.NewGroupPhoto)
})
b.Handle(OnGroupPhotoDeleted, func(m *Message) {
assert.True(t, m.GroupPhotoDeleted)
})
b.Handle(OnMigration, func(from, to int64) {
assert.Equal(t, int64(1), from)
assert.Equal(t, int64(2), to)
})
b.Handle(OnEdited, func(m *Message) {
assert.Equal(t, "edited", m.Text)
})
b.Handle(OnChannelPost, func(m *Message) {
assert.Equal(t, "post", m.Text)
})
b.Handle(OnEditedChannelPost, func(m *Message) {
assert.Equal(t, "edited post", m.Text)
})
b.Handle(OnCallback, func(c *Callback) {
if c.Data[0] != '\f' {
assert.Equal(t, "callback", c.Data)
}
})
b.Handle("\funique", func(c *Callback) {
assert.Equal(t, "callback", c.Data)
})
b.Handle(OnQuery, func(q *Query) {
assert.Equal(t, "query", q.Text)
})
b.Handle(OnChosenInlineResult, func(r *ChosenInlineResult) {
assert.Equal(t, "result", r.ResultID)
})
b.Handle(OnCheckout, func(pre *PreCheckoutQuery) {
assert.Equal(t, "checkout", pre.ID)
})
b.Handle(OnPoll, func(p *Poll) {
assert.Equal(t, "poll", p.ID)
})
b.Handle(OnPollAnswer, func(pa *PollAnswer) {
assert.Equal(t, "poll", pa.PollID)
})
b.ProcessUpdate(Update{Message: &Message{Text: "/start"}})
b.ProcessUpdate(Update{Message: &Message{Text: "/start@other_bot"}})
b.ProcessUpdate(Update{Message: &Message{Text: "hello"}})
b.ProcessUpdate(Update{Message: &Message{Text: "text"}})
b.ProcessUpdate(Update{Message: &Message{PinnedMessage: &Message{}}})
b.ProcessUpdate(Update{Message: &Message{Photo: &Photo{}}})
b.ProcessUpdate(Update{Message: &Message{Voice: &Voice{}}})
b.ProcessUpdate(Update{Message: &Message{Audio: &Audio{}}})
b.ProcessUpdate(Update{Message: &Message{Animation: &Animation{}}})
b.ProcessUpdate(Update{Message: &Message{Document: &Document{}}})
b.ProcessUpdate(Update{Message: &Message{Sticker: &Sticker{}}})
b.ProcessUpdate(Update{Message: &Message{Video: &Video{}}})
b.ProcessUpdate(Update{Message: &Message{VideoNote: &VideoNote{}}})
b.ProcessUpdate(Update{Message: &Message{Contact: &Contact{}}})
b.ProcessUpdate(Update{Message: &Message{Location: &Location{}}})
b.ProcessUpdate(Update{Message: &Message{Venue: &Venue{}}})
b.ProcessUpdate(Update{Message: &Message{Dice: &Dice{}}})
b.ProcessUpdate(Update{Message: &Message{GroupCreated: true}})
b.ProcessUpdate(Update{Message: &Message{UserJoined: &User{ID: 1}}})
b.ProcessUpdate(Update{Message: &Message{UsersJoined: []User{{ID: 1}}}})
b.ProcessUpdate(Update{Message: &Message{UserLeft: &User{}}})
b.ProcessUpdate(Update{Message: &Message{NewGroupTitle: "title"}})
b.ProcessUpdate(Update{Message: &Message{NewGroupPhoto: &Photo{}}})
b.ProcessUpdate(Update{Message: &Message{GroupPhotoDeleted: true}})
b.ProcessUpdate(Update{Message: &Message{Chat: &Chat{ID: 1}, MigrateTo: 2}})
b.ProcessUpdate(Update{EditedMessage: &Message{Text: "edited"}})
b.ProcessUpdate(Update{ChannelPost: &Message{Text: "post"}})
b.ProcessUpdate(Update{ChannelPost: &Message{PinnedMessage: &Message{}}})
b.ProcessUpdate(Update{EditedChannelPost: &Message{Text: "edited post"}})
b.ProcessUpdate(Update{Callback: &Callback{MessageID: "inline", Data: "callback"}})
b.ProcessUpdate(Update{Callback: &Callback{Data: "callback"}})
b.ProcessUpdate(Update{Callback: &Callback{Data: "\funique|callback"}})
b.ProcessUpdate(Update{Query: &Query{Text: "query"}})
b.ProcessUpdate(Update{ChosenInlineResult: &ChosenInlineResult{ResultID: "result"}})
b.ProcessUpdate(Update{PreCheckoutQuery: &PreCheckoutQuery{ID: "checkout"}})
b.ProcessUpdate(Update{Poll: &Poll{ID: "poll"}})
b.ProcessUpdate(Update{PollAnswer: &PollAnswer{PollID: "poll"}})
}
func TestBot(t *testing.T) {
if b == nil {
t.Skip("Cached bot instance is bad (probably wrong or empty TELEBOT_SECRET)")
}
if chatID == 0 {
t.Skip("CHAT_ID is required for Bot methods test")
}
_, err := b.Send(to, nil)
assert.Equal(t, ErrUnsupportedWhat, err)
_, err = b.Edit(&Message{Chat: &Chat{}}, nil)
assert.Equal(t, ErrUnsupportedWhat, err)
_, err = b.Send(nil, "")
assert.Equal(t, ErrBadRecipient, err)
_, err = b.Forward(nil, nil)
assert.Equal(t, ErrBadRecipient, err)
photo := &Photo{
File: File{FileID: photoID},
Caption: t.Name(),
}
var msg *Message
t.Run("Send(what=Sendable)", func(t *testing.T) {
msg, err = b.Send(to, photo)
require.NoError(t, err)
assert.NotNil(t, msg.Photo)
assert.Equal(t, photo.Caption, msg.Caption)
})
t.Run("SendAlbum()", func(t *testing.T) {
_, err = b.SendAlbum(nil, nil)
assert.Equal(t, ErrBadRecipient, err)
_, err = b.SendAlbum(to, nil)
assert.Error(t, err)
msgs, err := b.SendAlbum(to, Album{photo, photo})
require.NoError(t, err)
assert.Len(t, msgs, 2)
assert.NotEmpty(t, msgs[0].AlbumID)
})
t.Run("EditCaption()+ParseMode", func(t *testing.T) {
b.parseMode = ModeHTML
edited, err := b.EditCaption(msg, "<b>new caption with html</b>")
require.NoError(t, err)
assert.Equal(t, "new caption with html", edited.Caption)
assert.Equal(t, EntityBold, edited.CaptionEntities[0].Type)
edited, err = b.EditCaption(msg, "*new caption with markdown*", ModeMarkdown)
require.NoError(t, err)
assert.Equal(t, "new caption with markdown", edited.Caption)
assert.Equal(t, EntityBold, edited.CaptionEntities[0].Type)
b.parseMode = ModeDefault
})
t.Run("Edit(what=InputMedia)", func(t *testing.T) {
edited, err := b.Edit(msg, photo)
require.NoError(t, err)
assert.Equal(t, edited.Photo.UniqueID, photo.UniqueID)
})
t.Run("Send(what=string)", func(t *testing.T) {
msg, err = b.Send(to, t.Name())
require.NoError(t, err)
assert.Equal(t, t.Name(), msg.Text)
rpl, err := b.Reply(msg, t.Name())
require.NoError(t, err)
assert.Equal(t, rpl.Text, msg.Text)
assert.NotNil(t, rpl.ReplyTo)
assert.Equal(t, rpl.ReplyTo, msg)
assert.True(t, rpl.IsReply())
fwd, err := b.Forward(to, msg)
require.NoError(t, err)
assert.NotNil(t, msg, fwd)
assert.True(t, fwd.IsForwarded())
fwd.ID += 1 // nonexistent message
_, err = b.Forward(to, fwd)
assert.Equal(t, ErrToForwardNotFound, err)
})
t.Run("Edit(what=string)", func(t *testing.T) {
msg, err = b.Edit(msg, t.Name())
require.NoError(t, err)
assert.Equal(t, t.Name(), msg.Text)
_, err = b.Edit(msg, msg.Text)
assert.Error(t, err) // message is not modified
})
t.Run("Edit(what=ReplyMarkup)", func(t *testing.T) {
good := &ReplyMarkup{
InlineKeyboard: [][]InlineButton{
{{
Data: "btn",
Text: "Hi Telebot!",
}},
},
}
bad := &ReplyMarkup{
InlineKeyboard: [][]InlineButton{
{{
Data: strings.Repeat("*", 65),
Text: "Bad Button",
}},
},
}
edited, err := b.Edit(msg, good)
require.NoError(t, err)
assert.Equal(t, edited.ReplyMarkup.InlineKeyboard, good.InlineKeyboard)
edited, err = b.EditReplyMarkup(edited, nil)
require.NoError(t, err)
assert.Nil(t, edited.ReplyMarkup.InlineKeyboard)
_, err = b.Edit(edited, bad)
assert.Equal(t, ErrButtonDataInvalid, err)
})
t.Run("Edit(what=Location)", func(t *testing.T) {
loc := &Location{Lat: 42, Lng: 69, LivePeriod: 60}
edited, err := b.Send(to, loc)
require.NoError(t, err)
assert.NotNil(t, edited.Location)
loc = &Location{Lat: loc.Lng, Lng: loc.Lat}
edited, err = b.Edit(edited, *loc)
require.NoError(t, err)
assert.NotNil(t, edited.Location)
})
// should be the last
t.Run("Delete()", func(t *testing.T) {
require.NoError(t, b.Delete(msg))
})
t.Run("Notify()", func(t *testing.T) {
assert.Equal(t, ErrBadRecipient, b.Notify(nil, Typing))
require.NoError(t, b.Notify(to, Typing))
})
t.Run("Answer()", func(t *testing.T) {
assert.Error(t, b.Answer(&Query{}, &QueryResponse{
Results: Results{&ArticleResult{}},
}))
})
t.Run("Respond()", func(t *testing.T) {
assert.Error(t, b.Respond(&Callback{}, &CallbackResponse{}))
})
t.Run("Payments", func(t *testing.T) {
assert.NotPanics(t, func() {
b.Accept(&PreCheckoutQuery{})
b.Accept(&PreCheckoutQuery{}, "error")
})
assert.NotPanics(t, func() {
b.Ship(&ShippingQuery{})
b.Ship(&ShippingQuery{}, "error")
b.Ship(&ShippingQuery{}, ShippingOption{}, ShippingOption{})
assert.Equal(t, ErrUnsupportedWhat, b.Ship(&ShippingQuery{}, 0))
})
})
t.Run("Commands", func(t *testing.T) {
orig := []Command{{
Text: "test",
Description: "test command",
}}
require.NoError(t, b.SetCommands(orig))
cmds, err := b.GetCommands()
require.NoError(t, err)
assert.Equal(t, orig, cmds)
})
}
| [
"\"TELEBOT_SECRET\"",
"\"CHAT_ID\"",
"\"USER_ID\""
]
| []
| [
"TELEBOT_SECRET",
"USER_ID",
"CHAT_ID"
]
| [] | ["TELEBOT_SECRET", "USER_ID", "CHAT_ID"] | go | 3 | 0 | |
models/vgae.py | from __future__ import division
from __future__ import print_function
import time
import os
# Train on CPU (hide GPU) due to memory constraints
os.environ['CUDA_VISIBLE_DEVICES'] = ""
import tensorflow as tf
import numpy as np
import scipy.sparse as sp
from utility.model import clustering, pairwise_precision_recall_f1
from sklearn.metrics import roc_auc_score
from sklearn.metrics import average_precision_score
from gae.optimizer import OptimizerAE, OptimizerVAE
from gae.model import GCNModelAE, GCNModelVAE
from gae.preprocessing import preprocess_graph, construct_feed_dict, sparse_to_tuple, gen_train_edges, normalize_vectors
tf.compat.v1.disable_eager_execution()
# Settings
flags = tf.compat.v1.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_float('learning_rate', 0.01, 'Initial learning rate.')
flags.DEFINE_integer('epochs', 20, 'Number of epochs to train.')
flags.DEFINE_integer('hidden1', 32, 'Number of units in hidden layer 1.')
flags.DEFINE_integer('hidden2', 16, 'Number of units in hidden layer 2.')
flags.DEFINE_float('weight_decay', 0., 'Weight for L2 loss on embedding matrix.')
flags.DEFINE_float('dropout', 0., 'Dropout rate (1 - keep probability).')
flags.DEFINE_string('dataset', 'cora', 'Dataset string.')
flags.DEFINE_integer('features', 1, 'Whether to use features (1) or not (0).')
class GraphAutoEncoders:
def __init__(self, model_type='gcn_ae'):
self.model_type = model_type
def fit(self, adj, features, labels):
adj_orig = adj
adj_orig = adj_orig - sp.dia_matrix((adj_orig.diagonal()[np.newaxis, :], [0]), shape=adj_orig.shape)
adj_orig.eliminate_zeros()
adj_train = gen_train_edges(adj)
adj = adj_train
# Some preprocessing
adj_norm = preprocess_graph(adj)
num_nodes = adj.shape[0]
input_feature_dim = features.shape[1]
features = normalize_vectors(features)
# Define placeholders
self.placeholders = {
'features': tf.compat.v1.placeholder(tf.float32, shape=(None, input_feature_dim)),
# 'features': tf.compat.v1.sparse_placeholder(tf.float32),
'adj': tf.compat.v1.sparse_placeholder(tf.float32),
'adj_orig': tf.compat.v1.sparse_placeholder(tf.float32),
'dropout': tf.compat.v1.placeholder_with_default(0., shape=())
}
if self.model_type == 'gcn_ae':
self.model = GCNModelAE(self.placeholders, input_feature_dim)
elif self.model_type == 'gcn_vae':
self.model = GCNModelVAE(self.placeholders, input_feature_dim, num_nodes)
pos_weight = float(adj.shape[0] * adj.shape[0] - adj.sum()) / adj.sum() # negative edges/pos edges
# print('positive edge weight', pos_weight)
norm = adj.shape[0] * adj.shape[0] / float((adj.shape[0] * adj.shape[0] - adj.nnz) * 2)
# Optimizer
with tf.compat.v1.name_scope('optimizer'):
if self.model_type == 'gcn_ae':
opt = OptimizerAE(preds=self.model.reconstructions,
labels=tf.reshape(tf.sparse.to_dense(self.placeholders['adj_orig'],
validate_indices=False), [-1]),
pos_weight=pos_weight,
norm=norm)
elif self.model_type == 'gcn_vae':
opt = OptimizerVAE(preds=self.model.reconstructions,
labels=tf.reshape(tf.sparse.to_dense(self.placeholders['adj_orig'],
validate_indices=False), [-1]),
model=self.model, num_nodes=num_nodes,
pos_weight=pos_weight,
norm=norm)
# Initialize session
self.sess = tf.compat.v1.Session()
self.sess.run(tf.compat.v1.global_variables_initializer())
adj_label = adj_train + sp.eye(adj_train.shape[0])
adj_label = sparse_to_tuple(adj_label)
# Train model
for epoch in range(FLAGS.epochs):
t = time.time()
# Construct feed dictionary
self.feed_dict = construct_feed_dict(adj_norm, adj_label, features, self.placeholders)
self.feed_dict.update({self.placeholders['dropout']: FLAGS.dropout})
# Run single weight update
outs = self.sess.run([opt.opt_op, opt.cost, opt.accuracy], feed_dict=self.feed_dict)
# Compute average loss
avg_cost = outs[1]
avg_accuracy = outs[2]
# print("Epoch:", '%04d' % (epoch + 1), "train_loss=", "{:.5f}".format(avg_cost),
# "train_acc=", "{:.5f}".format(avg_accuracy),
# "time=", "{:.5f}".format(time.time() - t))
def get_embs(self):
self.feed_dict.update({self.placeholders['dropout']: 0})
return self.sess.run(self.model.z_mean, feed_dict=self.feed_dict) # z_mean is better
def predict(self, labels):
emb = self.get_embs()
n_clusters = len(set(labels))
emb_norm = normalize_vectors(emb)
return clustering(emb_norm, num_clusters=n_clusters)
def score(self, labels):
clusters_pred = self.predict(labels)
prec, rec, f1 = pairwise_precision_recall_f1(clusters_pred, labels)
# print('pairwise precision', '{:.5f}'.format(prec), 'recall', '{:.5f}'.format(rec), 'f1', '{:.5f}'.format(f1))
return prec, rec, f1
| []
| []
| [
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
pycalc/MAVProxy/modules/mavproxy_map/GAreader.py | #!/usr/bin/env python
'''
Module to read DTM files published by Geoscience Australia
Written by Stephen Dade ([email protected]
'''
import os
import sys
import numpy
class ERMap:
'''Class to read GA files'''
def __init__(self):
self.header = None
self.data = None
self.startlongitude = 0
self.startlatitude = 0
self.endlongitude = 0
self.endlatitude = 0
self.deltalongitude = 0
self.deltalatitude = 0
def read_ermapper(self, ifile):
'''Read in a DEM file and associated .ers file'''
ers_index = ifile.find('.ers')
if ers_index > 0:
data_file = ifile[0:ers_index]
header_file = ifile
else:
data_file = ifile
header_file = ifile + '.ers'
self.header = self.read_ermapper_header(header_file)
nroflines = int(self.header['nroflines'])
nrofcellsperlines = int(self.header['nrofcellsperline'])
self.data = self.read_ermapper_data(data_file, offset=int(self.header['headeroffset']))
self.data = numpy.reshape(self.data,(nroflines,nrofcellsperlines))
longy = numpy.fromstring(self.getHeaderParam('longitude'), sep=':')
latty = numpy.fromstring(self.getHeaderParam('latitude'), sep=':')
self.deltalatitude = float(self.header['ydimension'])
self.deltalongitude = float(self.header['xdimension'])
if longy[0] < 0:
self.startlongitude = longy[0]+-((longy[1]/60)+(longy[2]/3600))
self.endlongitude = self.startlongitude - int(self.header['nrofcellsperline'])*self.deltalongitude
else:
self.startlongitude = longy[0]+(longy[1]/60)+(longy[2]/3600)
self.endlongitude = self.startlongitude + int(self.header['nrofcellsperline'])*self.deltalongitude
if latty[0] < 0:
self.startlatitude = latty[0]-((latty[1]/60)+(latty[2]/3600))
self.endlatitude = self.startlatitude - int(self.header['nroflines'])*self.deltalatitude
else:
self.startlatitude = latty[0]+(latty[1]/60)+(latty[2]/3600)
self.endlatitude = self.startlatitude + int(self.header['nroflines'])*self.deltalatitude
def read_ermapper_header(self, ifile):
# function for reading an ERMapper header from file
header = {}
fid = open(ifile,'rt')
header_string = fid.readlines()
fid.close()
for line in header_string:
if line.find('=') > 0:
tmp_string = line.strip().split('=')
header[tmp_string[0].strip().lower()]= tmp_string[1].strip()
return header
def read_ermapper_data(self, ifile, data_format = numpy.float32, offset=0):
# open input file in a binary format and read the input string
fid = open(ifile,'rb')
if offset != 0:
fid.seek(offset)
input_string = fid.read()
fid.close()
# convert input string to required format (Note default format is numpy.float32)
grid_as_float = numpy.fromstring(input_string,data_format)
return grid_as_float
def getHeaderParam(self, key):
'''Find a parameter in the associated .ers file'''
return self.header[key]
def printBoundingBox(self):
'''Print the bounding box that this DEM covers'''
print "Bounding Latitude: "
print self.startlatitude
print self.endlatitude
print "Bounding Longitude: "
print self.startlongitude
print self.endlongitude
def getPercentBlank(self):
'''Print how many null cells are in the DEM - Quality measure'''
blank = 0
nonblank = 0
for x in self.data.flat:
if x == -99999.0:
blank = blank + 1
else:
nonblank = nonblank + 1
print "Blank tiles = ", blank, "out of ", (nonblank+blank)
def getAltitudeAtPoint(self, latty, longy):
'''Return the altitude at a particular long/lat'''
#check the bounds
if self.startlongitude > 0 and (longy < self.startlongitude or longy > self.endlongitude):
return -1
if self.startlongitude < 0 and (longy > self.startlongitude or longy < self.endlongitude):
return -1
if self.startlatitude > 0 and (latty < self.startlatitude or longy > self.endlatitude):
return -1
if self.startlatitude < 0 and (latty > self.startlatitude or longy < self.endlatitude):
return -1
x = numpy.abs((latty - self.startlatitude)/self.deltalatitude)
y = numpy.abs((longy - self.startlongitude)/self.deltalongitude)
#do some interpolation
# print "x,y", x, y
x_int = int(x)
x_frac = x - int(x)
y_int = int(y)
y_frac = y - int(y)
#print "frac", x_int, x_frac, y_int, y_frac
value00 = self.data[x_int, y_int]
value10 = self.data[x_int+1, y_int]
value01 = self.data[x_int, y_int+1]
value11 = self.data[x_int+1, y_int+1]
#print "values ", value00, value10, value01, value11
#check for null values
if value00 == -99999:
value00 = 0
if value10 == -99999:
value10 = 0
if value01 == -99999:
value01 = 0
if value11 == -99999:
value11 = 0
value1 = self._avg(value00, value10, x_frac)
value2 = self._avg(value01, value11, x_frac)
value = self._avg(value1, value2, y_frac)
return value
@staticmethod
def _avg(value1, value2, weight):
"""Returns the weighted average of two values and handles the case where
one value is None. If both values are None, None is returned.
"""
if value1 is None:
return value2
if value2 is None:
return value1
return value2 * weight + value1 * (1 - weight)
if __name__ == '__main__':
print "./Canberra/GSNSW_P756demg"
mappy = ERMap()
mappy.read_ermapper(os.path.join(os.environ['HOME'], './Documents/Elevation/Canberra/GSNSW_P756demg'))
#print some header data
mappy.printBoundingBox()
#get a measure of data quality
#mappy.getPercentBlank()
#test the altitude (around Canberra):
alt = mappy.getAltitudeAtPoint(-35.274411, 149.097504)
print "Alt at (-35.274411, 149.097504) is 807m (Google) or " + str(alt)
alt = mappy.getAltitudeAtPoint(-35.239648, 149.126118)
print "Alt at (-35.239648, 149.126118) is 577m (Google) or " + str(alt)
alt = mappy.getAltitudeAtPoint(-35.362751, 149.165361)
print "Alt at (-35.362751, 149.165361) is 584m (Google) or " + str(alt)
alt = mappy.getAltitudeAtPoint(-35.306992, 149.194274)
print "Alt at (-35.306992, 149.194274) is 570m (Google) or " + str(alt)
alt = mappy.getAltitudeAtPoint(-35.261612, 149.542091)
print "Alt at (-35.261612, 149.542091) is 766m (Google) or " + str(alt)
alt = mappy.getAltitudeAtPoint(-35.052544, 149.509165)
print "Alt at (-35.052544, 149.509165) is 700m (Google) or " + str(alt)
alt = mappy.getAltitudeAtPoint(-35.045126, 149.257482)
print "Alt at (-35.045126, 149.257482) is 577m (Google) or " + str(alt)
alt = mappy.getAltitudeAtPoint(-35.564044, 149.177657)
print "Alt at (-35.564044, 149.177657) is 1113m (Google) or " + str(alt)
| []
| []
| [
"HOME"
]
| [] | ["HOME"] | python | 1 | 0 | |
main.py | from docycle import DocomoCycle
from line import Line
from parking import TYO_PARKING_LIST
import time
import os
# docomo cycle
user = os.environ['CYCLE_USER']
password = os.environ['CYCLE_PASS']
my_area_id = DocomoCycle.TYO_AREA_ID_LIST['chiyoda']
my_parking_id = TYO_PARKING_LIST['A1-01.Chiyoda City Office']
my_user_id = 'TYO'
# line
line_token = os.environ['LINE_TOKEN']
if __name__ == '__main__':
threashold = 3
dc = DocomoCycle(user, password, my_user_id, my_area_id)
li = Line(line_token)
while True:
time.sleep(60 * 3)
cycles = dc.get_cycle_list(my_parking_id)
if cycles == None:
li.send_message('nothing')
elif len(cycles) < threashold:
if dc.reserve_cycle(my_parking_id) != None:
message = dc.reserve_info()
li.send_message(message)
exit()
| []
| []
| [
"CYCLE_PASS",
"LINE_TOKEN",
"CYCLE_USER"
]
| [] | ["CYCLE_PASS", "LINE_TOKEN", "CYCLE_USER"] | python | 3 | 0 | |
src/main/java/no/sysco/middleware/metrics/prometheus/jdbc/WebServer.java | package no.sysco.middleware.metrics.prometheus.jdbc;
import io.prometheus.client.exporter.MetricsServlet;
import org.eclipse.jetty.server.Server;
import org.eclipse.jetty.servlet.ServletContextHandler;
import org.eclipse.jetty.servlet.ServletHolder;
import java.io.File;
import java.net.InetSocketAddress;
import java.util.Optional;
public class WebServer {
public static void main(String[] args) throws Exception {
if (args.length < 2) {
System.err.println("Usage: WebServer <[hostname:]port> <yaml configuration file>");
System.exit(1);
}
String[] hostnamePort = args[0].split(":");
int port;
InetSocketAddress socket;
if (hostnamePort.length == 2) {
port = Integer.parseInt(hostnamePort[1]);
socket = new InetSocketAddress(hostnamePort[0], port);
} else {
port = Integer.parseInt(hostnamePort[0]);
socket = new InetSocketAddress(port);
}
String prefix = Optional.ofNullable(System.getenv("METRIC_PREFIX")).orElse("jdbc");
new JdbcCollector(new File(args[1]), prefix).register();
Server server = new Server(socket);
ServletContextHandler context = new ServletContextHandler();
context.setContextPath("/");
server.setHandler(context);
context.addServlet(new ServletHolder(new MetricsServlet()), "/metrics");
server.start();
server.join();
}
}
| [
"\"METRIC_PREFIX\""
]
| []
| [
"METRIC_PREFIX"
]
| [] | ["METRIC_PREFIX"] | java | 1 | 0 | |
python/ray/tune/tune.py | from typing import Any, Callable, Dict, Mapping, Optional, Sequence, Type, \
Union
import datetime
import logging
import os
import signal
import sys
import time
import warnings
import ray
from ray.util.annotations import PublicAPI
from ray.util.ml_utils.node import force_on_current_node
from ray.util.queue import Queue, Empty
from ray.tune.analysis import ExperimentAnalysis
from ray.tune.callback import Callback
from ray.tune.error import TuneError
from ray.tune.experiment import Experiment, convert_to_experiment_list
from ray.tune.logger import Logger
from ray.tune.progress_reporter import (detect_reporter, ProgressReporter,
JupyterNotebookReporter)
from ray.tune.ray_trial_executor import RayTrialExecutor
from ray.tune.registry import get_trainable_cls
from ray.tune.schedulers import (PopulationBasedTraining,
PopulationBasedTrainingReplay)
from ray.tune.stopper import Stopper
from ray.tune.suggest import BasicVariantGenerator, SearchAlgorithm, \
SearchGenerator
from ray.tune.suggest.suggestion import ConcurrencyLimiter, Searcher
from ray.tune.suggest.util import set_search_properties_backwards_compatible
from ray.tune.suggest.variant_generator import has_unresolved_values
from ray.tune.syncer import (SyncConfig, set_sync_periods, wait_for_sync)
from ray.tune.trainable import Trainable
from ray.tune.trial import Trial
from ray.tune.trial_runner import TrialRunner
from ray.tune.utils.callback import create_default_callbacks
from ray.tune.utils.log import Verbosity, has_verbosity, set_verbosity
# Must come last to avoid circular imports
from ray.tune.schedulers import FIFOScheduler, TrialScheduler
from ray.tune.utils.placement_groups import PlacementGroupFactory
logger = logging.getLogger(__name__)
def _check_default_resources_override(run_identifier):
if not isinstance(run_identifier, str):
# If obscure dtype, assume it is overridden.
return True
trainable_cls = get_trainable_cls(run_identifier)
return hasattr(trainable_cls, "default_resource_request") and (
trainable_cls.default_resource_request.__code__ !=
Trainable.default_resource_request.__code__)
def _report_progress(runner, reporter, done=False):
"""Reports experiment progress.
Args:
runner (TrialRunner): Trial runner to report on.
reporter (ProgressReporter): Progress reporter.
done (bool): Whether this is the last progress report attempt.
"""
trials = runner.get_trials()
if reporter.should_report(trials, done=done):
sched_debug_str = runner.scheduler_alg.debug_string()
executor_debug_str = runner.trial_executor.debug_string()
reporter.report(trials, done, sched_debug_str, executor_debug_str)
@PublicAPI
def run(
run_or_experiment: Union[str, Callable, Type],
name: Optional[str] = None,
metric: Optional[str] = None,
mode: Optional[str] = None,
stop: Union[None, Mapping, Stopper, Callable[[str, Mapping],
bool]] = None,
time_budget_s: Union[None, int, float, datetime.timedelta] = None,
config: Optional[Dict[str, Any]] = None,
resources_per_trial: Union[None, Mapping[str, Union[
float, int, Mapping]], PlacementGroupFactory] = None,
num_samples: int = 1,
local_dir: Optional[str] = None,
search_alg: Optional[Union[Searcher, SearchAlgorithm, str]] = None,
scheduler: Optional[Union[TrialScheduler, str]] = None,
keep_checkpoints_num: Optional[int] = None,
checkpoint_score_attr: Optional[str] = None,
checkpoint_freq: int = 0,
checkpoint_at_end: bool = False,
verbose: Union[int, Verbosity] = Verbosity.V3_TRIAL_DETAILS,
progress_reporter: Optional[ProgressReporter] = None,
log_to_file: bool = False,
trial_name_creator: Optional[Callable[[Trial], str]] = None,
trial_dirname_creator: Optional[Callable[[Trial], str]] = None,
sync_config: Optional[SyncConfig] = None,
export_formats: Optional[Sequence] = None,
max_failures: int = 0,
fail_fast: bool = False,
restore: Optional[str] = None,
server_port: Optional[int] = None,
resume: bool = False,
reuse_actors: bool = False,
trial_executor: Optional[RayTrialExecutor] = None,
raise_on_failed_trial: bool = True,
callbacks: Optional[Sequence[Callback]] = None,
max_concurrent_trials: Optional[int] = None,
# Deprecated args
queue_trials: Optional[bool] = None,
loggers: Optional[Sequence[Type[Logger]]] = None,
_remote: Optional[bool] = None,
) -> ExperimentAnalysis:
"""Executes training.
When a SIGINT signal is received (e.g. through Ctrl+C), the tuning run
will gracefully shut down and checkpoint the latest experiment state.
Sending SIGINT again (or SIGKILL/SIGTERM instead) will skip this step.
Many aspects of Tune, such as the frequency of global checkpointing,
maximum pending placement group trials and the path of the result
directory be configured through environment variables. Refer to
:ref:`tune-env-vars` for a list of environment variables available.
Examples:
.. code-block:: python
# Run 10 trials (each trial is one instance of a Trainable). Tune runs
# in parallel and automatically determines concurrency.
tune.run(trainable, num_samples=10)
# Run 1 trial, stop when trial has reached 10 iterations
tune.run(my_trainable, stop={"training_iteration": 10})
# automatically retry failed trials up to 3 times
tune.run(my_trainable, stop={"training_iteration": 10}, max_failures=3)
# Run 1 trial, search over hyperparameters, stop after 10 iterations.
space = {"lr": tune.uniform(0, 1), "momentum": tune.uniform(0, 1)}
tune.run(my_trainable, config=space, stop={"training_iteration": 10})
# Resumes training if a previous machine crashed
tune.run(my_trainable, config=space,
local_dir=<path/to/dir>, resume=True)
# Rerun ONLY failed trials after an experiment is finished.
tune.run(my_trainable, config=space,
local_dir=<path/to/dir>, resume="ERRORED_ONLY")
Args:
run_or_experiment (function | class | str | :class:`Experiment`): If
function|class|str, this is the algorithm or model to train.
This may refer to the name of a built-on algorithm
(e.g. RLLib's DQN or PPO), a user-defined trainable
function or class, or the string identifier of a
trainable function or class registered in the tune registry.
If Experiment, then Tune will execute training based on
Experiment.spec. If you want to pass in a Python lambda, you
will need to first register the function:
``tune.register_trainable("lambda_id", lambda x: ...)``. You can
then use ``tune.run("lambda_id")``.
metric (str): Metric to optimize. This metric should be reported
with `tune.report()`. If set, will be passed to the search
algorithm and scheduler.
mode (str): Must be one of [min, max]. Determines whether objective is
minimizing or maximizing the metric attribute. If set, will be
passed to the search algorithm and scheduler.
name (str): Name of experiment.
stop (dict | callable | :class:`Stopper`): Stopping criteria. If dict,
the keys may be any field in the return result of 'train()',
whichever is reached first. If function, it must take (trial_id,
result) as arguments and return a boolean (True if trial should be
stopped, False otherwise). This can also be a subclass of
``ray.tune.Stopper``, which allows users to implement
custom experiment-wide stopping (i.e., stopping an entire Tune
run based on some time constraint).
time_budget_s (int|float|datetime.timedelta): Global time budget in
seconds after which all trials are stopped. Can also be a
``datetime.timedelta`` object.
config (dict): Algorithm-specific configuration for Tune variant
generation (e.g. env, hyperparams). Defaults to empty dict.
Custom search algorithms may ignore this.
resources_per_trial (dict|PlacementGroupFactory): Machine resources
to allocate per trial, e.g. ``{"cpu": 64, "gpu": 8}``.
Note that GPUs will not be assigned unless you specify them here.
Defaults to 1 CPU and 0 GPUs in
``Trainable.default_resource_request()``. This can also
be a PlacementGroupFactory object wrapping arguments to create a
per-trial placement group.
num_samples (int): Number of times to sample from the
hyperparameter space. Defaults to 1. If `grid_search` is
provided as an argument, the grid will be repeated
`num_samples` of times. If this is -1, (virtually) infinite
samples are generated until a stopping condition is met.
local_dir (str): Local dir to save training results to.
Defaults to ``~/ray_results``.
search_alg (Searcher|SearchAlgorithm|str): Search algorithm for
optimization. You can also use the name of the algorithm.
scheduler (TrialScheduler|str): Scheduler for executing
the experiment. Choose among FIFO (default), MedianStopping,
AsyncHyperBand, HyperBand and PopulationBasedTraining. Refer to
ray.tune.schedulers for more options. You can also use the
name of the scheduler.
keep_checkpoints_num (int): Number of checkpoints to keep. A value of
`None` keeps all checkpoints. Defaults to `None`. If set, need
to provide `checkpoint_score_attr`.
checkpoint_score_attr (str): Specifies by which attribute to rank the
best checkpoint. Default is increasing order. If attribute starts
with `min-` it will rank attribute in decreasing order, i.e.
`min-validation_loss`.
checkpoint_freq (int): How many training iterations between
checkpoints. A value of 0 (default) disables checkpointing.
This has no effect when using the Functional Training API.
checkpoint_at_end (bool): Whether to checkpoint at the end of the
experiment regardless of the checkpoint_freq. Default is False.
This has no effect when using the Functional Training API.
verbose (Union[int, Verbosity]): 0, 1, 2, or 3. Verbosity mode.
0 = silent, 1 = only status updates, 2 = status and brief trial
results, 3 = status and detailed trial results. Defaults to 3.
progress_reporter (ProgressReporter): Progress reporter for reporting
intermediate experiment progress. Defaults to CLIReporter if
running in command-line, or JupyterNotebookReporter if running in
a Jupyter notebook.
log_to_file (bool|str|Sequence): Log stdout and stderr to files in
Tune's trial directories. If this is `False` (default), no files
are written. If `true`, outputs are written to `trialdir/stdout`
and `trialdir/stderr`, respectively. If this is a single string,
this is interpreted as a file relative to the trialdir, to which
both streams are written. If this is a Sequence (e.g. a Tuple),
it has to have length 2 and the elements indicate the files to
which stdout and stderr are written, respectively.
trial_name_creator (Callable[[Trial], str]): Optional function
for generating the trial string representation.
trial_dirname_creator (Callable[[Trial], str]): Function
for generating the trial dirname. This function should take
in a Trial object and return a string representing the
name of the directory. The return value cannot be a path.
sync_config (SyncConfig): Configuration object for syncing. See
tune.SyncConfig.
export_formats (list): List of formats that exported at the end of
the experiment. Default is None.
max_failures (int): Try to recover a trial at least this many times.
Ray will recover from the latest checkpoint if present.
Setting to -1 will lead to infinite recovery retries.
Setting to 0 will disable retries. Defaults to 0.
fail_fast (bool | str): Whether to fail upon the first error.
If fail_fast='raise' provided, Tune will automatically
raise the exception received by the Trainable. fail_fast='raise'
can easily leak resources and should be used with caution (it
is best used with `ray.init(local_mode=True)`).
restore (str): Path to checkpoint. Only makes sense to set if
running 1 trial. Defaults to None.
server_port (int): Port number for launching TuneServer.
resume (str|bool): One of "LOCAL", "REMOTE", "PROMPT", "ERRORED_ONLY",
or bool. LOCAL/True restores the checkpoint from the
local experiment directory, determined
by ``name`` and ``local_dir``. REMOTE restores the checkpoint
from ``upload_dir`` (as passed to ``sync_config``).
PROMPT provides CLI feedback.
False forces a new experiment. ERRORED_ONLY resets and reruns
ERRORED trials upon resume - previous trial artifacts will
be left untouched. If resume is set but checkpoint does not exist,
ValueError will be thrown.
reuse_actors (bool): Whether to reuse actors between different trials
when possible. This can drastically speed up experiments that start
and stop actors often (e.g., PBT in time-multiplexing mode). This
requires trials to have the same resource requirements.
trial_executor (TrialExecutor): Manage the execution of trials.
raise_on_failed_trial (bool): Raise TuneError if there exists failed
trial (of ERROR state) when the experiments complete.
callbacks (list): List of callbacks that will be called at different
times in the training loop. Must be instances of the
``ray.tune.callback.Callback`` class. If not passed,
`LoggerCallback` and `SyncerCallback` callbacks are automatically
added.
max_concurrent_trials (int): Maximum number of trials to run
concurrently. Must be non-negative. If None or 0, no limit will
be applied. This is achieved by wrapping the ``search_alg`` in
a :class:`ConcurrencyLimiter`, and thus setting this argument
will raise an exception if the ``search_alg`` is already a
:class:`ConcurrencyLimiter`. Defaults to None.
_remote (bool): Whether to run the Tune driver in a remote function.
This is disabled automatically if a custom trial executor is
passed in. This is enabled by default in Ray client mode.
Returns:
ExperimentAnalysis: Object for experiment analysis.
Raises:
TuneError: Any trials failed and `raise_on_failed_trial` is True.
"""
# To be removed in 1.9.
if queue_trials is not None:
raise DeprecationWarning(
"`queue_trials` has been deprecated and is replaced by "
"the `TUNE_MAX_PENDING_TRIALS_PG` environment variable. "
"Per default at least one Trial is queued at all times, "
"so you likely don't need to change anything other than "
"removing this argument from your call to `tune.run()`")
# NO CODE IS TO BE ADDED ABOVE THIS COMMENT
# remote_run_kwargs must be defined before any other
# code is ran to ensure that at this point,
# `locals()` is equal to args and kwargs
remote_run_kwargs = locals().copy()
remote_run_kwargs.pop("_remote")
if _remote is None:
_remote = ray.util.client.ray.is_connected()
if _remote is True and trial_executor:
raise ValueError("cannot use custom trial executor")
if not trial_executor or isinstance(trial_executor, RayTrialExecutor):
_ray_auto_init()
if _remote:
remote_run = ray.remote(num_cpus=0)(run)
# Make sure tune.run is called on the sever node.
remote_run = force_on_current_node(remote_run)
# JupyterNotebooks don't work with remote tune runs out of the box
# (e.g. via Ray client) as they don't have access to the main
# process stdout. So we introduce a queue here that accepts
# callables, which will then be executed on the driver side.
if isinstance(progress_reporter, JupyterNotebookReporter):
execute_queue = Queue(actor_options={
"num_cpus": 0,
**force_on_current_node(None)
})
progress_reporter.set_output_queue(execute_queue)
def get_next_queue_item():
try:
return execute_queue.get(block=False)
except Empty:
return None
else:
# If we don't need a queue, use this dummy get fn instead of
# scheduling an unneeded actor
def get_next_queue_item():
return None
def _handle_execute_queue():
execute_item = get_next_queue_item()
while execute_item:
if isinstance(execute_item, Callable):
execute_item()
execute_item = get_next_queue_item()
remote_future = remote_run.remote(_remote=False, **remote_run_kwargs)
# ray.wait(...)[1] returns futures that are not ready, yet
while ray.wait([remote_future], timeout=0.2)[1]:
# Check if we have items to execute
_handle_execute_queue()
# Handle queue one last time
_handle_execute_queue()
return ray.get(remote_future)
del remote_run_kwargs
all_start = time.time()
if loggers:
# Raise DeprecationWarning in 1.9, remove in 1.10/1.11
warnings.warn(
"The `loggers` argument is deprecated. Please pass the respective "
"`LoggerCallback` classes to the `callbacks` argument instead. "
"See https://docs.ray.io/en/latest/tune/api_docs/logging.html")
if mode and mode not in ["min", "max"]:
raise ValueError(
"The `mode` parameter passed to `tune.run()` has to be one of "
"['min', 'max']")
set_verbosity(verbose)
config = config or {}
sync_config = sync_config or SyncConfig()
set_sync_periods(sync_config)
if num_samples == -1:
num_samples = sys.maxsize
result_buffer_length = None
# Create scheduler here as we need access to some of its properties
if isinstance(scheduler, str):
# importing at top level causes a recursive dependency
from ray.tune.schedulers import create_scheduler
scheduler = create_scheduler(scheduler)
scheduler = scheduler or FIFOScheduler()
if not scheduler.supports_buffered_results:
# Result buffering with e.g. a Hyperband scheduler is a bad idea, as
# hyperband tries to stop trials when processing brackets. With result
# buffering, we might trigger this multiple times when evaluating
# a single trial, which leads to unexpected behavior.
env_result_buffer_length = os.getenv("TUNE_RESULT_BUFFER_LENGTH", "")
if env_result_buffer_length:
warnings.warn(
f"You are using a {type(scheduler)} scheduler, but "
f"TUNE_RESULT_BUFFER_LENGTH is set "
f"({env_result_buffer_length}). This can lead to undesired "
f"and faulty behavior, so the buffer length was forcibly set "
f"to 1 instead.")
result_buffer_length = 1
if isinstance(scheduler,
(PopulationBasedTraining,
PopulationBasedTrainingReplay)) and not reuse_actors:
warnings.warn(
"Consider boosting PBT performance by enabling `reuse_actors` as "
"well as implementing `reset_config` for Trainable.")
trial_executor = trial_executor or RayTrialExecutor(
reuse_actors=reuse_actors, result_buffer_length=result_buffer_length)
if isinstance(run_or_experiment, list):
experiments = run_or_experiment
else:
experiments = [run_or_experiment]
for i, exp in enumerate(experiments):
if not isinstance(exp, Experiment):
experiments[i] = Experiment(
name=name,
run=exp,
stop=stop,
time_budget_s=time_budget_s,
config=config,
resources_per_trial=resources_per_trial,
num_samples=num_samples,
local_dir=local_dir,
sync_config=sync_config,
trial_name_creator=trial_name_creator,
trial_dirname_creator=trial_dirname_creator,
log_to_file=log_to_file,
checkpoint_freq=checkpoint_freq,
checkpoint_at_end=checkpoint_at_end,
keep_checkpoints_num=keep_checkpoints_num,
checkpoint_score_attr=checkpoint_score_attr,
export_formats=export_formats,
max_failures=max_failures,
restore=restore)
else:
logger.debug("Ignoring some parameters passed into tune.run.")
if fail_fast and max_failures != 0:
raise ValueError("max_failures must be 0 if fail_fast=True.")
if isinstance(search_alg, str):
# importing at top level causes a recursive dependency
from ray.tune.suggest import create_searcher
search_alg = create_searcher(search_alg)
# if local_mode=True is set during ray.init().
is_local_mode = ray.worker._mode() == ray.worker.LOCAL_MODE
if is_local_mode:
max_concurrent_trials = 1
if not search_alg:
search_alg = BasicVariantGenerator(
max_concurrent=max_concurrent_trials or 0)
elif max_concurrent_trials:
if isinstance(search_alg, ConcurrencyLimiter):
if search_alg.max_concurrent != max_concurrent_trials:
raise ValueError(
"You have specified `max_concurrent_trials="
f"{max_concurrent_trials}`, but the `search_alg` is "
"already a `ConcurrencyLimiter` with `max_concurrent="
f"{search_alg.max_concurrent}. FIX THIS by setting "
"`max_concurrent_trials=None`.")
else:
logger.warning(
"You have specified `max_concurrent_trials="
f"{max_concurrent_trials}`, but the `search_alg` is "
"already a `ConcurrencyLimiter`. `max_concurrent_trials` "
"will be ignored.")
else:
if max_concurrent_trials < 1:
raise ValueError(
"`max_concurrent_trials` must be greater or equal than 1, "
f"got {max_concurrent_trials}.")
if isinstance(search_alg, Searcher):
search_alg = ConcurrencyLimiter(
search_alg, max_concurrent=max_concurrent_trials)
elif not is_local_mode:
logger.warning(
"You have passed a `SearchGenerator` instance as the "
"`search_alg`, but `max_concurrent_trials` requires a "
"`Searcher` instance`. `max_concurrent_trials` "
"will be ignored.")
if isinstance(search_alg, Searcher):
search_alg = SearchGenerator(search_alg)
if config and not set_search_properties_backwards_compatible(
search_alg.set_search_properties, metric, mode, config, **
experiments[0].public_spec):
if has_unresolved_values(config):
raise ValueError(
"You passed a `config` parameter to `tune.run()` with "
"unresolved parameters, but the search algorithm was already "
"instantiated with a search space. Make sure that `config` "
"does not contain any more parameter definitions - include "
"them in the search algorithm's search space if necessary.")
if not scheduler.set_search_properties(metric, mode):
raise ValueError(
"You passed a `metric` or `mode` argument to `tune.run()`, but "
"the scheduler you are using was already instantiated with their "
"own `metric` and `mode` parameters. Either remove the arguments "
"from your scheduler or from your call to `tune.run()`")
# Create syncer callbacks
callbacks = create_default_callbacks(
callbacks, sync_config, metric=metric, loggers=loggers)
runner = TrialRunner(
search_alg=search_alg,
scheduler=scheduler,
local_checkpoint_dir=experiments[0].checkpoint_dir,
remote_checkpoint_dir=experiments[0].remote_checkpoint_dir,
sync_config=sync_config,
stopper=experiments[0].stopper,
resume=resume,
server_port=server_port,
fail_fast=fail_fast,
trial_executor=trial_executor,
callbacks=callbacks,
metric=metric,
# Driver should only sync trial checkpoints if
# checkpoints are not synced to cloud
driver_sync_trial_checkpoints=not bool(sync_config.upload_dir))
if not runner.resumed:
for exp in experiments:
search_alg.add_configurations([exp])
else:
logger.info("TrialRunner resumed, ignoring new add_experiment but "
"updating trial resources.")
if resources_per_trial:
runner.update_pending_trial_resources(resources_per_trial)
progress_reporter = progress_reporter or detect_reporter()
if not progress_reporter.set_search_properties(metric, mode):
raise ValueError(
"You passed a `metric` or `mode` argument to `tune.run()`, but "
"the reporter you are using was already instantiated with their "
"own `metric` and `mode` parameters. Either remove the arguments "
"from your reporter or from your call to `tune.run()`")
progress_reporter.set_total_samples(search_alg.total_samples)
# Calls setup on callbacks
runner.setup_experiments(
experiments=experiments, total_num_samples=search_alg.total_samples)
# User Warning for GPUs
if trial_executor.has_gpus():
if isinstance(resources_per_trial,
dict) and "gpu" in resources_per_trial:
# "gpu" is manually set.
pass
elif _check_default_resources_override(experiments[0].run_identifier):
# "default_resources" is manually overridden.
pass
else:
logger.warning("Tune detects GPUs, but no trials are using GPUs. "
"To enable trials to use GPUs, set "
"tune.run(resources_per_trial={'gpu': 1}...) "
"which allows Tune to expose 1 GPU to each trial. "
"You can also override "
"`Trainable.default_resource_request` if using the "
"Trainable API.")
original_handler = signal.getsignal(signal.SIGINT)
state = {signal.SIGINT: False}
def sigint_handler(sig, frame):
logger.warning(
"SIGINT received (e.g. via Ctrl+C), ending Ray Tune run. "
"This will try to checkpoint the experiment state one last time. "
"Press CTRL+C one more time (or send SIGINT/SIGKILL/SIGTERM) "
"to skip. ")
state[signal.SIGINT] = True
# Restore original signal handler to react to future SIGINT signals
signal.signal(signal.SIGINT, original_handler)
if not int(os.getenv("TUNE_DISABLE_SIGINT_HANDLER", "0")):
signal.signal(signal.SIGINT, sigint_handler)
tune_start = time.time()
progress_reporter.set_start_time(tune_start)
while not runner.is_finished() and not state[signal.SIGINT]:
runner.step()
if has_verbosity(Verbosity.V1_EXPERIMENT):
_report_progress(runner, progress_reporter)
tune_taken = time.time() - tune_start
try:
runner.checkpoint(force=True)
except Exception as e:
logger.warning(f"Trial Runner checkpointing failed: {str(e)}")
if has_verbosity(Verbosity.V1_EXPERIMENT):
_report_progress(runner, progress_reporter, done=True)
wait_for_sync()
runner.cleanup()
incomplete_trials = []
for trial in runner.get_trials():
if trial.status != Trial.TERMINATED:
incomplete_trials += [trial]
if incomplete_trials:
if raise_on_failed_trial and not state[signal.SIGINT]:
raise TuneError("Trials did not complete", incomplete_trials)
else:
logger.error("Trials did not complete: %s", incomplete_trials)
all_taken = time.time() - all_start
if has_verbosity(Verbosity.V1_EXPERIMENT):
logger.info(f"Total run time: {all_taken:.2f} seconds "
f"({tune_taken:.2f} seconds for the tuning loop).")
if state[signal.SIGINT]:
logger.warning(
"Experiment has been interrupted, but the most recent state was "
"saved. You can continue running this experiment by passing "
"`resume=True` to `tune.run()`")
trials = runner.get_trials()
return ExperimentAnalysis(
runner.checkpoint_file,
trials=trials,
default_metric=metric,
default_mode=mode,
sync_config=sync_config)
@PublicAPI
def run_experiments(
experiments: Union[Experiment, Mapping, Sequence[Union[Experiment,
Mapping]]],
scheduler: Optional[TrialScheduler] = None,
server_port: Optional[int] = None,
verbose: Union[int, Verbosity] = Verbosity.V3_TRIAL_DETAILS,
progress_reporter: Optional[ProgressReporter] = None,
resume: bool = False,
reuse_actors: bool = False,
trial_executor: Optional[RayTrialExecutor] = None,
raise_on_failed_trial: bool = True,
concurrent: bool = True,
# Deprecated args.
queue_trials: Optional[bool] = None,
callbacks: Optional[Sequence[Callback]] = None,
_remote: Optional[bool] = None):
"""Runs and blocks until all trials finish.
Examples:
>>> experiment_spec = Experiment("experiment", my_func)
>>> run_experiments(experiments=experiment_spec)
>>> experiment_spec = {"experiment": {"run": my_func}}
>>> run_experiments(experiments=experiment_spec)
Returns:
List of Trial objects, holding data for each executed trial.
"""
# To be removed in 1.9.
if queue_trials is not None:
raise DeprecationWarning(
"`queue_trials` has been deprecated and is replaced by "
"the `TUNE_MAX_PENDING_TRIALS_PG` environment variable. "
"Per default at least one Trial is queued at all times, "
"so you likely don't need to change anything other than "
"removing this argument from your call to `tune.run()`")
if _remote is None:
_remote = ray.util.client.ray.is_connected()
if _remote is True and trial_executor:
raise ValueError("cannot use custom trial executor")
if not trial_executor or isinstance(trial_executor, RayTrialExecutor):
_ray_auto_init()
if _remote:
remote_run = ray.remote(num_cpus=0)(run_experiments)
# Make sure tune.run_experiments is run on the server node.
remote_run = force_on_current_node(remote_run)
return ray.get(
remote_run.remote(
experiments,
scheduler,
server_port,
verbose,
progress_reporter,
resume,
reuse_actors,
trial_executor,
raise_on_failed_trial,
concurrent,
callbacks,
_remote=False))
# This is important to do this here
# because it schematize the experiments
# and it conducts the implicit registration.
experiments = convert_to_experiment_list(experiments)
if concurrent:
return run(
experiments,
server_port=server_port,
verbose=verbose,
progress_reporter=progress_reporter,
resume=resume,
reuse_actors=reuse_actors,
trial_executor=trial_executor,
raise_on_failed_trial=raise_on_failed_trial,
scheduler=scheduler,
callbacks=callbacks).trials
else:
trials = []
for exp in experiments:
trials += run(
exp,
server_port=server_port,
verbose=verbose,
progress_reporter=progress_reporter,
resume=resume,
reuse_actors=reuse_actors,
trial_executor=trial_executor,
raise_on_failed_trial=raise_on_failed_trial,
scheduler=scheduler,
callbacks=callbacks).trials
return trials
def _ray_auto_init():
"""Initialize Ray unless already configured."""
if os.environ.get("TUNE_DISABLE_AUTO_INIT") == "1":
logger.info("'TUNE_DISABLE_AUTO_INIT=1' detected.")
elif not ray.is_initialized():
logger.info("Initializing Ray automatically."
"For cluster usage or custom Ray initialization, "
"call `ray.init(...)` before `tune.run`.")
ray.init()
| []
| []
| [
"TUNE_DISABLE_AUTO_INIT",
"TUNE_RESULT_BUFFER_LENGTH",
"TUNE_DISABLE_SIGINT_HANDLER"
]
| [] | ["TUNE_DISABLE_AUTO_INIT", "TUNE_RESULT_BUFFER_LENGTH", "TUNE_DISABLE_SIGINT_HANDLER"] | python | 3 | 0 | |
toolchain/riscv/MSYS/python/Lib/test/test_readline.py | """
Very minimal unittests for parts of the readline module.
"""
from contextlib import ExitStack
from errno import EIO
import locale
import os
import selectors
import subprocess
import sys
import tempfile
import unittest
from test.support import import_module, unlink, temp_dir, TESTFN, verbose
from test.support.script_helper import assert_python_ok
# Skip tests if there is no readline module
readline = import_module('readline')
if hasattr(readline, "_READLINE_LIBRARY_VERSION"):
is_editline = ("EditLine wrapper" in readline._READLINE_LIBRARY_VERSION)
else:
is_editline = (readline.__doc__ and "libedit" in readline.__doc__)
def setUpModule():
if verbose:
# Python implementations other than CPython may not have
# these private attributes
if hasattr(readline, "_READLINE_VERSION"):
print(f"readline version: {readline._READLINE_VERSION:#x}")
print(f"readline runtime version: {readline._READLINE_RUNTIME_VERSION:#x}")
if hasattr(readline, "_READLINE_LIBRARY_VERSION"):
print(f"readline library version: {readline._READLINE_LIBRARY_VERSION!r}")
print(f"use libedit emulation? {is_editline}")
@unittest.skipUnless(hasattr(readline, "clear_history"),
"The history update test cannot be run because the "
"clear_history method is not available.")
class TestHistoryManipulation (unittest.TestCase):
"""
These tests were added to check that the libedit emulation on OSX and the
"real" readline have the same interface for history manipulation. That's
why the tests cover only a small subset of the interface.
"""
def testHistoryUpdates(self):
readline.clear_history()
readline.add_history("first line")
readline.add_history("second line")
self.assertEqual(readline.get_history_item(0), None)
self.assertEqual(readline.get_history_item(1), "first line")
self.assertEqual(readline.get_history_item(2), "second line")
readline.replace_history_item(0, "replaced line")
self.assertEqual(readline.get_history_item(0), None)
self.assertEqual(readline.get_history_item(1), "replaced line")
self.assertEqual(readline.get_history_item(2), "second line")
self.assertEqual(readline.get_current_history_length(), 2)
readline.remove_history_item(0)
self.assertEqual(readline.get_history_item(0), None)
self.assertEqual(readline.get_history_item(1), "second line")
self.assertEqual(readline.get_current_history_length(), 1)
@unittest.skipUnless(hasattr(readline, "append_history_file"),
"append_history not available")
def test_write_read_append(self):
hfile = tempfile.NamedTemporaryFile(delete=False)
hfile.close()
hfilename = hfile.name
self.addCleanup(unlink, hfilename)
# test write-clear-read == nop
readline.clear_history()
readline.add_history("first line")
readline.add_history("second line")
readline.write_history_file(hfilename)
readline.clear_history()
self.assertEqual(readline.get_current_history_length(), 0)
readline.read_history_file(hfilename)
self.assertEqual(readline.get_current_history_length(), 2)
self.assertEqual(readline.get_history_item(1), "first line")
self.assertEqual(readline.get_history_item(2), "second line")
# test append
readline.append_history_file(1, hfilename)
readline.clear_history()
readline.read_history_file(hfilename)
self.assertEqual(readline.get_current_history_length(), 3)
self.assertEqual(readline.get_history_item(1), "first line")
self.assertEqual(readline.get_history_item(2), "second line")
self.assertEqual(readline.get_history_item(3), "second line")
# test 'no such file' behaviour
os.unlink(hfilename)
with self.assertRaises(FileNotFoundError):
readline.append_history_file(1, hfilename)
# write_history_file can create the target
readline.write_history_file(hfilename)
def test_nonascii_history(self):
readline.clear_history()
try:
readline.add_history("entrée 1")
except UnicodeEncodeError as err:
self.skipTest("Locale cannot encode test data: " + format(err))
readline.add_history("entrée 2")
readline.replace_history_item(1, "entrée 22")
readline.write_history_file(TESTFN)
self.addCleanup(os.remove, TESTFN)
readline.clear_history()
readline.read_history_file(TESTFN)
if is_editline:
# An add_history() call seems to be required for get_history_
# item() to register items from the file
readline.add_history("dummy")
self.assertEqual(readline.get_history_item(1), "entrée 1")
self.assertEqual(readline.get_history_item(2), "entrée 22")
class TestReadline(unittest.TestCase):
@unittest.skipIf(readline._READLINE_VERSION < 0x0601 and not is_editline,
"not supported in this library version")
def test_init(self):
# Issue #19884: Ensure that the ANSI sequence "\033[1034h" is not
# written into stdout when the readline module is imported and stdout
# is redirected to a pipe.
rc, stdout, stderr = assert_python_ok('-c', 'import readline',
TERM='xterm-256color')
self.assertEqual(stdout, b'')
auto_history_script = """\
import readline
readline.set_auto_history({})
input()
print("History length:", readline.get_current_history_length())
"""
def test_auto_history_enabled(self):
output = run_pty(self.auto_history_script.format(True))
self.assertIn(b"History length: 1\r\n", output)
def test_auto_history_disabled(self):
output = run_pty(self.auto_history_script.format(False))
self.assertIn(b"History length: 0\r\n", output)
def test_nonascii(self):
loc = locale.setlocale(locale.LC_CTYPE, None)
if loc in ('C', 'POSIX'):
# bpo-29240: On FreeBSD, if the LC_CTYPE locale is C or POSIX,
# writing and reading non-ASCII bytes into/from a TTY works, but
# readline or ncurses ignores non-ASCII bytes on read.
self.skipTest(f"the LC_CTYPE locale is {loc!r}")
try:
readline.add_history("\xEB\xEF")
except UnicodeEncodeError as err:
self.skipTest("Locale cannot encode test data: " + format(err))
script = r"""import readline
is_editline = readline.__doc__ and "libedit" in readline.__doc__
inserted = "[\xEFnserted]"
macro = "|t\xEB[after]"
set_pre_input_hook = getattr(readline, "set_pre_input_hook", None)
if is_editline or not set_pre_input_hook:
# The insert_line() call via pre_input_hook() does nothing with Editline,
# so include the extra text that would have been inserted here
macro = inserted + macro
if is_editline:
readline.parse_and_bind(r'bind ^B ed-prev-char')
readline.parse_and_bind(r'bind "\t" rl_complete')
readline.parse_and_bind(r'bind -s ^A "{}"'.format(macro))
else:
readline.parse_and_bind(r'Control-b: backward-char')
readline.parse_and_bind(r'"\t": complete')
readline.parse_and_bind(r'set disable-completion off')
readline.parse_and_bind(r'set show-all-if-ambiguous off')
readline.parse_and_bind(r'set show-all-if-unmodified off')
readline.parse_and_bind(r'Control-a: "{}"'.format(macro))
def pre_input_hook():
readline.insert_text(inserted)
readline.redisplay()
if set_pre_input_hook:
set_pre_input_hook(pre_input_hook)
def completer(text, state):
if text == "t\xEB":
if state == 0:
print("text", ascii(text))
print("line", ascii(readline.get_line_buffer()))
print("indexes", readline.get_begidx(), readline.get_endidx())
return "t\xEBnt"
if state == 1:
return "t\xEBxt"
if text == "t\xEBx" and state == 0:
return "t\xEBxt"
return None
readline.set_completer(completer)
def display(substitution, matches, longest_match_length):
print("substitution", ascii(substitution))
print("matches", ascii(matches))
readline.set_completion_display_matches_hook(display)
print("result", ascii(input()))
print("history", ascii(readline.get_history_item(1)))
"""
input = b"\x01" # Ctrl-A, expands to "|t\xEB[after]"
input += b"\x02" * len("[after]") # Move cursor back
input += b"\t\t" # Display possible completions
input += b"x\t" # Complete "t\xEBx" -> "t\xEBxt"
input += b"\r"
output = run_pty(script, input)
self.assertIn(b"text 't\\xeb'\r\n", output)
self.assertIn(b"line '[\\xefnserted]|t\\xeb[after]'\r\n", output)
self.assertIn(b"indexes 11 13\r\n", output)
if not is_editline and hasattr(readline, "set_pre_input_hook"):
self.assertIn(b"substitution 't\\xeb'\r\n", output)
self.assertIn(b"matches ['t\\xebnt', 't\\xebxt']\r\n", output)
expected = br"'[\xefnserted]|t\xebxt[after]'"
self.assertIn(b"result " + expected + b"\r\n", output)
self.assertIn(b"history " + expected + b"\r\n", output)
# We have 2 reasons to skip this test:
# - readline: history size was added in 6.0
# See https://cnswww.cns.cwru.edu/php/chet/readline/CHANGES
# - editline: history size is broken on OS X 10.11.6.
# Newer versions were not tested yet.
@unittest.skipIf(readline._READLINE_VERSION < 0x600,
"this readline version does not support history-size")
@unittest.skipIf(is_editline,
"editline history size configuration is broken")
def test_history_size(self):
history_size = 10
with temp_dir() as test_dir:
inputrc = os.path.join(test_dir, "inputrc")
with open(inputrc, "wb") as f:
f.write(b"set history-size %d\n" % history_size)
history_file = os.path.join(test_dir, "history")
with open(history_file, "wb") as f:
# history_size * 2 items crashes readline
data = b"".join(b"item %d\n" % i
for i in range(history_size * 2))
f.write(data)
script = """
import os
import readline
history_file = os.environ["HISTORY_FILE"]
readline.read_history_file(history_file)
input()
readline.write_history_file(history_file)
"""
env = dict(os.environ)
env["INPUTRC"] = inputrc
env["HISTORY_FILE"] = history_file
run_pty(script, input=b"last input\r", env=env)
with open(history_file, "rb") as f:
lines = f.readlines()
self.assertEqual(len(lines), history_size)
self.assertEqual(lines[-1].strip(), b"last input")
def run_pty(script, input=b"dummy input\r", env=None):
pty = import_module('pty')
output = bytearray()
[master, slave] = pty.openpty()
args = (sys.executable, '-c', script)
proc = subprocess.Popen(args, stdin=slave, stdout=slave, stderr=slave, env=env)
os.close(slave)
with ExitStack() as cleanup:
cleanup.enter_context(proc)
def terminate(proc):
try:
proc.terminate()
except ProcessLookupError:
# Workaround for Open/Net BSD bug (Issue 16762)
pass
cleanup.callback(terminate, proc)
cleanup.callback(os.close, master)
# Avoid using DefaultSelector and PollSelector. Kqueue() does not
# work with pseudo-terminals on OS X < 10.9 (Issue 20365) and Open
# BSD (Issue 20667). Poll() does not work with OS X 10.6 or 10.4
# either (Issue 20472). Hopefully the file descriptor is low enough
# to use with select().
sel = cleanup.enter_context(selectors.SelectSelector())
sel.register(master, selectors.EVENT_READ | selectors.EVENT_WRITE)
os.set_blocking(master, False)
while True:
for [_, events] in sel.select():
if events & selectors.EVENT_READ:
try:
chunk = os.read(master, 0x10000)
except OSError as err:
# Linux raises EIO when slave is closed (Issue 5380)
if err.errno != EIO:
raise
chunk = b""
if not chunk:
return output
output.extend(chunk)
if events & selectors.EVENT_WRITE:
try:
input = input[os.write(master, input):]
except OSError as err:
# Apparently EIO means the slave was closed
if err.errno != EIO:
raise
input = b"" # Stop writing
if not input:
sel.modify(master, selectors.EVENT_READ)
if __name__ == "__main__":
unittest.main()
| []
| []
| [
"HISTORY_FILE"
]
| [] | ["HISTORY_FILE"] | python | 1 | 0 | |
base/settings.py | # Django settings for openeats project.
import os, datetime
# We can't set the debug just using the env var.
# Python with evaluate any string as a True bool.
DEBUG = False
if os.environ.get('DJANGO_DEBUG', 'False').lower() == 'true':
DEBUG = True
SERVE_MEDIA = True
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
PROJECT_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Make this unique, and don't share it with anybody.
SECRET_KEY = os.environ.get('DJANGO_SECRET_KEY', 'ChangeMe!')
# Force Django to use https headers if its behind a https proxy.
# See: https://docs.djangoproject.com/en/2.0/ref/settings/#secure-proxy-ssl-header
if os.environ.get('HTTP_X_FORWARDED_PROTO', 'False').lower() == 'true':
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
SITE_ID = 1
DATABASES = {
'default': {
'ENGINE': os.environ.get('DATABASE_ENGINE', 'django.db.backends.mysql'),
'NAME': os.environ.get('MYSQL_DATABASE', 'openeats'),
'USER': os.environ.get('MYSQL_USER', 'root'),
'PASSWORD': os.environ.get('MYSQL_ROOT_PASSWORD', ''),
'HOST': os.environ.get('MYSQL_HOST', 'db'),
'PORT': os.environ.get('MYSQL_PORT', '3306'),
'TEST': {
'NAME': os.environ.get('MYSQL_TEST_DATABASE', 'test_openeats')
}
}
}
if 'django.db.backends.mysql' in DATABASES['default']['ENGINE']:
DATABASES['default'].setdefault('OPTIONS', {})
DATABASES['default']['OPTIONS']['charset'] = 'utf8mb4'
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/2.0/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ['localhost', '127.0.0.1', 'node', 'web']
env_allowed_host = os.environ.get('ALLOWED_HOST', None)
if env_allowed_host is not None:
if ',' in env_allowed_host:
ALLOWED_HOSTS += [host.strip() for host in env_allowed_host.split(',')]
else:
ALLOWED_HOSTS.append(env_allowed_host)
# List of callables that know how to import templates from various sources.
TEMPLATES = [
{
'NAME': 'Core Templates',
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(PROJECT_PATH, 'templates'), ],
'APP_DIRS': False,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
{
'NAME': '3rd Party Templates',
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
MIDDLEWARE = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.middleware.locale.LocaleMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.admin',
'django.contrib.staticfiles',
'django_filters',
'rest_framework',
'rest_framework.authtoken',
'coreapi',
'base',
'v1.recipe',
'v1.recipe_groups',
'v1.ingredient',
'v1.news',
'v1.list',
'v1.menu',
'v1.rating',
'imagekit',
'django_extensions',
'corsheaders'
)
# Password validation
# https://docs.djangoproject.com/en/2.0/topics/auth/passwords/#password-validation
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
ROOT_URLCONF = 'base.urls'
WSGI_APPLICATION = 'base.wsgi.application'
# Automatically find the correct time zone to use.
USE_TZ = True
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
REST_FRAMEWORK = {
'DEFAULT_FILTER_BACKENDS': ('django_filters.rest_framework.DjangoFilterBackend',),
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework_jwt.authentication.JSONWebTokenAuthentication',
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.BasicAuthentication',
),
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination',
'PAGE_SIZE': 100,
'DEFAULT_THROTTLE_CLASSES': (
'rest_framework.throttling.AnonRateThrottle',
),
'DEFAULT_THROTTLE_RATES': {
'anon': '100/hour',
}
}
# http://getblimp.github.io/django-rest-framework-jwt/#additional-settings
JWT_AUTH = {
# We are returning custom data to our UI.
'JWT_RESPONSE_PAYLOAD_HANDLER': 'v1.accounts.jwt_handler.handler',
# Allow for token refresh and increase the timeout of the user token.
'JWT_ALLOW_REFRESH': True,
'JWT_EXPIRATION_DELTA': datetime.timedelta(days=14),
'JWT_REFRESH_EXPIRATION_DELTA': datetime.timedelta(weeks=30),
}
# We don't want the API to serve static in production.
# So we are forcing the renderer to be JSON only.
if not DEBUG:
REST_FRAMEWORK['DEFAULT_RENDERER_CLASSES'] = (
'rest_framework.renderers.JSONRenderer',
)
CORS_ORIGIN_WHITELIST = (
os.environ.get('NODE_URL', 'localhost:8080')
)
# Static and i18n settings
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'django.contrib.staticfiles.finders.FileSystemFinder',
)
LOCALE_PATHS = (
os.path.join(PROJECT_PATH, 'locale', ),
)
FIXTURE_DIRS = [
os.path.join(PROJECT_PATH, 'v1', 'fixtures'),
]
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = os.path.join(PROJECT_PATH, 'site-media')
STATIC_ROOT = os.path.join(PROJECT_PATH, 'static-files')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = '/site-media/'
STATIC_URL = '/static-files/'
ugettext = lambda s: s
LANGUAGES = (
('en', ugettext('English')),
('de', ugettext('German')),
)
try:
from local_settings import *
except ImportError:
pass
| []
| []
| [
"DJANGO_SECRET_KEY",
"MYSQL_TEST_DATABASE",
"MYSQL_USER",
"MYSQL_PORT",
"MYSQL_ROOT_PASSWORD",
"DJANGO_DEBUG",
"DATABASE_ENGINE",
"ALLOWED_HOST",
"HTTP_X_FORWARDED_PROTO",
"NODE_URL",
"MYSQL_DATABASE",
"MYSQL_HOST"
]
| [] | ["DJANGO_SECRET_KEY", "MYSQL_TEST_DATABASE", "MYSQL_USER", "MYSQL_PORT", "MYSQL_ROOT_PASSWORD", "DJANGO_DEBUG", "DATABASE_ENGINE", "ALLOWED_HOST", "HTTP_X_FORWARDED_PROTO", "NODE_URL", "MYSQL_DATABASE", "MYSQL_HOST"] | python | 12 | 0 | |
app/core/migrations/0013_remove_spaceobservatory_flight_duration.py | # Generated by Django 3.0.2 on 2020-01-06 01:16
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0012_auto_20200105_2214'),
]
operations = [
migrations.RemoveField(
model_name='spaceobservatory',
name='flight_duration',
),
]
| []
| []
| []
| [] | [] | python | null | null | null |
hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.crypto.key.kms.server;
import com.google.common.base.Supplier;
import com.google.common.cache.LoadingCache;
import org.apache.curator.test.TestingServer;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.crypto.key.KeyProviderFactory;
import org.apache.hadoop.crypto.key.KeyProvider;
import org.apache.hadoop.crypto.key.KeyProvider.KeyVersion;
import org.apache.hadoop.crypto.key.KeyProvider.Options;
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.CryptoExtension;
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersion;
import org.apache.hadoop.crypto.key.KeyProviderDelegationTokenExtension;
import org.apache.hadoop.crypto.key.kms.KMSClientProvider;
import org.apache.hadoop.crypto.key.kms.KMSDelegationToken;
import org.apache.hadoop.crypto.key.kms.LoadBalancingKMSClientProvider;
import org.apache.hadoop.crypto.key.kms.ValueQueue;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.minikdc.MiniKdc;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authorize.AuthorizationException;
import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
import org.apache.hadoop.security.ssl.SSLFactory;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.delegation.web.DelegationTokenIdentifier;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.Time;
import org.apache.http.client.utils.URIBuilder;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.Timeout;
import org.mockito.Mockito;
import org.mockito.internal.util.reflection.Whitebox;
import org.slf4j.event.Level;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.net.ssl.HttpsURLConnection;
import javax.security.auth.login.AppConfigurationEntry;
import java.io.ByteArrayInputStream;
import java.io.DataInputStream;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.io.InputStream;
import java.io.Writer;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.net.ServerSocket;
import java.net.SocketTimeoutException;
import java.net.URI;
import java.net.URL;
import java.net.URLConnection;
import java.security.GeneralSecurityException;
import java.security.PrivilegedExceptionAction;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Date;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
import java.util.UUID;
import java.util.concurrent.Callable;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import static org.mockito.Mockito.when;
public class TestKMS {
private static final Logger LOG = LoggerFactory.getLogger(TestKMS.class);
private static final String SSL_RELOADER_THREAD_NAME =
"Truststore reloader thread";
private SSLFactory sslFactory;
@Rule
public final Timeout testTimeout = new Timeout(180000);
@Before
public void setUp() throws Exception {
setUpMiniKdc();
// resetting kerberos security
Configuration conf = new Configuration();
UserGroupInformation.setConfiguration(conf);
}
public static File getTestDir() throws Exception {
File file = new File("dummy");
file = file.getAbsoluteFile();
file = file.getParentFile();
file = new File(file, "target");
file = new File(file, UUID.randomUUID().toString());
if (!file.mkdirs()) {
throw new RuntimeException("Could not create test directory: " + file);
}
return file;
}
public static abstract class KMSCallable<T> implements Callable<T> {
private URL kmsUrl;
protected URL getKMSUrl() {
return kmsUrl;
}
}
protected KeyProvider createProvider(URI uri, Configuration conf)
throws IOException {
return new LoadBalancingKMSClientProvider(
new KMSClientProvider[] { new KMSClientProvider(uri, conf) }, conf);
}
private KMSClientProvider createKMSClientProvider(URI uri, Configuration conf)
throws IOException {
return new KMSClientProvider(uri, conf);
}
protected <T> T runServer(String keystore, String password, File confDir,
KMSCallable<T> callable) throws Exception {
return runServer(-1, keystore, password, confDir, callable);
}
protected <T> T runServer(int port, String keystore, String password, File confDir,
KMSCallable<T> callable) throws Exception {
MiniKMS.Builder miniKMSBuilder = new MiniKMS.Builder().setKmsConfDir(confDir)
.setLog4jConfFile("log4j.properties");
if (keystore != null) {
miniKMSBuilder.setSslConf(new File(keystore), password);
}
if (port > 0) {
miniKMSBuilder.setPort(port);
}
MiniKMS miniKMS = miniKMSBuilder.build();
miniKMS.start();
try {
System.out.println("Test KMS running at: " + miniKMS.getKMSUrl());
callable.kmsUrl = miniKMS.getKMSUrl();
return callable.call();
} finally {
miniKMS.stop();
}
}
protected Configuration createBaseKMSConf(File keyStoreDir) throws Exception {
return createBaseKMSConf(keyStoreDir, null);
}
/**
* The Configuration object is shared by both KMS client and server in unit
* tests because UGI gets/sets it to a static variable.
* As a workaround, make sure the client configurations are copied to server
* so that client can read them.
* @param keyStoreDir where keystore is located.
* @param conf KMS client configuration
* @return KMS server configuration based on client.
* @throws Exception
*/
protected Configuration createBaseKMSConf(File keyStoreDir,
Configuration conf) throws Exception {
Configuration newConf;
if (conf == null) {
newConf = new Configuration(false);
} else {
newConf = new Configuration(conf);
}
newConf.set(KMSConfiguration.KEY_PROVIDER_URI,
"jceks://file@" + new Path(keyStoreDir.getAbsolutePath(), "kms.keystore").toUri());
newConf.set("hadoop.kms.authentication.type", "simple");
return newConf;
}
public static void writeConf(File confDir, Configuration conf)
throws Exception {
Writer writer = new FileWriter(new File(confDir,
KMSConfiguration.KMS_SITE_XML));
conf.writeXml(writer);
writer.close();
writer = new FileWriter(new File(confDir, KMSConfiguration.KMS_ACLS_XML));
conf.writeXml(writer);
writer.close();
//create empty core-site.xml
writer = new FileWriter(new File(confDir, "core-site.xml"));
new Configuration(false).writeXml(writer);
writer.close();
}
public static URI createKMSUri(URL kmsUrl) throws Exception {
String str = kmsUrl.toString();
str = str.replaceFirst("://", "@");
return new URI("kms://" + str);
}
private static class KerberosConfiguration
extends javax.security.auth.login.Configuration {
private String principal;
private String keytab;
private boolean isInitiator;
private KerberosConfiguration(String principal, File keytab,
boolean client) {
this.principal = principal;
this.keytab = keytab.getAbsolutePath();
this.isInitiator = client;
}
public static javax.security.auth.login.Configuration createClientConfig(
String principal,
File keytab) {
return new KerberosConfiguration(principal, keytab, true);
}
private static String getKrb5LoginModuleName() {
return System.getProperty("java.vendor").contains("IBM")
? "com.ibm.security.auth.module.Krb5LoginModule"
: "com.sun.security.auth.module.Krb5LoginModule";
}
@Override
public AppConfigurationEntry[] getAppConfigurationEntry(String name) {
Map<String, String> options = new HashMap<String, String>();
options.put("keyTab", keytab);
options.put("principal", principal);
options.put("useKeyTab", "true");
options.put("storeKey", "true");
options.put("doNotPrompt", "true");
options.put("useTicketCache", "true");
options.put("renewTGT", "true");
options.put("refreshKrb5Config", "true");
options.put("isInitiator", Boolean.toString(isInitiator));
String ticketCache = System.getenv("KRB5CCNAME");
if (ticketCache != null) {
options.put("ticketCache", ticketCache);
}
options.put("debug", "true");
return new AppConfigurationEntry[]{
new AppConfigurationEntry(getKrb5LoginModuleName(),
AppConfigurationEntry.LoginModuleControlFlag.REQUIRED,
options)};
}
}
private static MiniKdc kdc;
private static File keytab;
private static void setUpMiniKdc(Properties kdcConf) throws Exception {
File kdcDir = getTestDir();
kdc = new MiniKdc(kdcConf, kdcDir);
kdc.start();
keytab = new File(kdcDir, "keytab");
List<String> principals = new ArrayList<String>();
principals.add("HTTP/localhost");
principals.add("client");
principals.add("hdfs");
principals.add("otheradmin");
principals.add("client/host");
principals.add("client1");
for (KMSACLs.Type type : KMSACLs.Type.values()) {
principals.add(type.toString());
}
principals.add("CREATE_MATERIAL");
principals.add("ROLLOVER_MATERIAL");
kdc.createPrincipal(keytab,
principals.toArray(new String[principals.size()]));
}
private void setUpMiniKdc() throws Exception {
Properties kdcConf = MiniKdc.createConf();
setUpMiniKdc(kdcConf);
}
@After
public void tearDownMiniKdc() throws Exception {
if (kdc != null) {
kdc.stop();
kdc = null;
}
UserGroupInformation.setShouldRenewImmediatelyForTests(false);
UserGroupInformation.reset();
}
private <T> T doAs(String user, final PrivilegedExceptionAction<T> action)
throws Exception {
UserGroupInformation.loginUserFromKeytab(user, keytab.getAbsolutePath());
UserGroupInformation ugi = UserGroupInformation.getLoginUser();
try {
return ugi.doAs(action);
} finally {
ugi.logoutUserFromKeytab();
}
}
/**
* Read in the content from an URL connection.
* @param conn URLConnection To read
* @return the text from the output
* @throws IOException if something went wrong
*/
private static String readOutput(URLConnection conn) throws IOException {
StringBuilder out = new StringBuilder();
InputStream in = conn.getInputStream();
byte[] buffer = new byte[64 * 1024];
int len = in.read(buffer);
while (len > 0) {
out.append(new String(buffer, 0, len));
len = in.read(buffer);
}
return out.toString();
}
private static void assertReFind(String re, String value) {
Pattern p = Pattern.compile(re);
Matcher m = p.matcher(value);
Assert.assertTrue("'" + p + "' does not match " + value, m.find());
}
private URLConnection openJMXConnection(URL baseUrl, boolean kerberos)
throws Exception {
URIBuilder b = new URIBuilder(baseUrl + "/jmx");
if (!kerberos) {
b.addParameter("user.name", "dr.who");
}
URL url = b.build().toURL();
LOG.info("JMX URL " + url);
URLConnection conn = url.openConnection();
if (sslFactory != null) {
HttpsURLConnection httpsConn = (HttpsURLConnection) conn;
try {
httpsConn.setSSLSocketFactory(sslFactory.createSSLSocketFactory());
} catch (GeneralSecurityException ex) {
throw new IOException(ex);
}
httpsConn.setHostnameVerifier(sslFactory.getHostnameVerifier());
}
return conn;
}
private void testJMXQuery(URL baseUrl, boolean kerberos) throws Exception {
LOG.info("Testing JMX");
assertReFind("\"name\"\\s*:\\s*\"java.lang:type=Memory\"",
readOutput(openJMXConnection(baseUrl, kerberos)));
}
public void testStartStop(final boolean ssl, final boolean kerberos)
throws Exception {
Configuration conf = new Configuration();
if (kerberos) {
conf.set("hadoop.security.authentication", "kerberos");
}
File testDir = getTestDir();
conf = createBaseKMSConf(testDir, conf);
final String keystore;
final String password;
if (ssl) {
String sslConfDir = KeyStoreTestUtil.getClasspathDir(TestKMS.class);
KeyStoreTestUtil.setupSSLConfig(testDir.getAbsolutePath(), sslConfDir,
conf, false);
keystore = testDir.getAbsolutePath() + "/serverKS.jks";
password = "serverP";
} else {
keystore = null;
password = null;
}
conf.set("hadoop.kms.authentication.token.validity", "1");
if (kerberos) {
conf.set("hadoop.kms.authentication.type", "kerberos");
conf.set("hadoop.kms.authentication.kerberos.keytab",
keytab.getAbsolutePath());
conf.set("hadoop.kms.authentication.kerberos.principal", "HTTP/localhost");
conf.set("hadoop.kms.authentication.kerberos.name.rules", "DEFAULT");
}
writeConf(testDir, conf);
if (ssl) {
sslFactory = new SSLFactory(SSLFactory.Mode.CLIENT, conf);
try {
sslFactory.init();
} catch (GeneralSecurityException ex) {
throw new IOException(ex);
}
}
runServer(keystore, password, testDir, new KMSCallable<Void>() {
@Override
public Void call() throws Exception {
final Configuration conf = new Configuration();
URL url = getKMSUrl();
Assert.assertEquals(keystore != null,
url.getProtocol().equals("https"));
final URI uri = createKMSUri(getKMSUrl());
if (ssl) {
KeyProvider testKp = createProvider(uri, conf);
ThreadGroup threadGroup = Thread.currentThread().getThreadGroup();
while (threadGroup.getParent() != null) {
threadGroup = threadGroup.getParent();
}
Thread[] threads = new Thread[threadGroup.activeCount()];
threadGroup.enumerate(threads);
Thread reloaderThread = null;
for (Thread thread : threads) {
if ((thread.getName() != null)
&& (thread.getName().contains(SSL_RELOADER_THREAD_NAME))) {
reloaderThread = thread;
}
}
Assert.assertTrue("Reloader is not alive", reloaderThread.isAlive());
testKp.close();
boolean reloaderStillAlive = true;
for (int i = 0; i < 10; i++) {
reloaderStillAlive = reloaderThread.isAlive();
if (!reloaderStillAlive) break;
Thread.sleep(1000);
}
Assert.assertFalse("Reloader is still alive", reloaderStillAlive);
}
if (kerberos) {
for (String user : new String[]{"client", "client/host"}) {
doAs(user, new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
testJMXQuery(url, kerberos);
final KeyProvider kp = createProvider(uri, conf);
// getKeys() empty
Assert.assertTrue(kp.getKeys().isEmpty());
Thread.sleep(4000);
Token<?>[] tokens =
((KeyProviderDelegationTokenExtension.DelegationTokenExtension)kp)
.addDelegationTokens("myuser", new Credentials());
Assert.assertEquals(1, tokens.length);
Assert.assertEquals("kms-dt", tokens[0].getKind().toString());
kp.close();
return null;
}
});
}
} else {
testJMXQuery(url, kerberos);
KeyProvider kp = createProvider(uri, conf);
// getKeys() empty
Assert.assertTrue(kp.getKeys().isEmpty());
Thread.sleep(4000);
Token<?>[] tokens =
((KeyProviderDelegationTokenExtension.DelegationTokenExtension)kp)
.addDelegationTokens("myuser", new Credentials());
Assert.assertEquals(1, tokens.length);
Assert.assertEquals("kms-dt", tokens[0].getKind().toString());
kp.close();
}
return null;
}
});
if (sslFactory != null) {
sslFactory.destroy();
sslFactory = null;
}
}
@Test
public void testStartStopHttpPseudo() throws Exception {
testStartStop(false, false);
}
@Test
public void testStartStopHttpsPseudo() throws Exception {
testStartStop(true, false);
}
@Test
public void testStartStopHttpKerberos() throws Exception {
testStartStop(false, true);
}
@Test
public void testStartStopHttpsKerberos() throws Exception {
testStartStop(true, true);
}
@Test(timeout = 30000)
public void testSpecialKeyNames() throws Exception {
final String specialKey = "key %^[\n{]}|\"<>\\";
Configuration conf = new Configuration();
conf.set("hadoop.security.authentication", "kerberos");
File confDir = getTestDir();
conf = createBaseKMSConf(confDir, conf);
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + specialKey + ".ALL", "*");
writeConf(confDir, conf);
runServer(null, null, confDir, new KMSCallable<Void>() {
@Override
public Void call() throws Exception {
Configuration conf = new Configuration();
URI uri = createKMSUri(getKMSUrl());
KeyProvider kp = createProvider(uri, conf);
Assert.assertTrue(kp.getKeys().isEmpty());
Assert.assertEquals(0, kp.getKeysMetadata().length);
KeyProvider.Options options = new KeyProvider.Options(conf);
options.setCipher("AES/CTR/NoPadding");
options.setBitLength(128);
options.setDescription("l1");
LOG.info("Creating key with name '{}'", specialKey);
KeyProvider.KeyVersion kv0 = kp.createKey(specialKey, options);
Assert.assertNotNull(kv0);
Assert.assertEquals(specialKey, kv0.getName());
Assert.assertNotNull(kv0.getVersionName());
Assert.assertNotNull(kv0.getMaterial());
return null;
}
});
}
@Test
@SuppressWarnings("checkstyle:methodlength")
public void testKMSProvider() throws Exception {
Configuration conf = new Configuration();
conf.set("hadoop.security.authentication", "kerberos");
File confDir = getTestDir();
conf = createBaseKMSConf(confDir, conf);
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "k1.ALL", "*");
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "k2.MANAGEMENT", "*");
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "k2.READ", "*");
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "k3.ALL", "*");
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "k4.ALL", "*");
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "k5.ALL", "*");
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "k6.ALL", "*");
writeConf(confDir, conf);
runServer(null, null, confDir, new KMSCallable<Void>() {
@Override
public Void call() throws Exception {
Date started = new Date();
Configuration conf = new Configuration();
URI uri = createKMSUri(getKMSUrl());
KeyProvider kp = createProvider(uri, conf);
// getKeys() empty
Assert.assertTrue(kp.getKeys().isEmpty());
// getKeysMetadata() empty
Assert.assertEquals(0, kp.getKeysMetadata().length);
// createKey()
KeyProvider.Options options = new KeyProvider.Options(conf);
options.setCipher("AES/CTR/NoPadding");
options.setBitLength(128);
options.setDescription("l1");
KeyProvider.KeyVersion kv0 = kp.createKey("k1", options);
Assert.assertNotNull(kv0);
Assert.assertNotNull(kv0.getVersionName());
Assert.assertNotNull(kv0.getMaterial());
// getKeyVersion()
KeyProvider.KeyVersion kv1 = kp.getKeyVersion(kv0.getVersionName());
Assert.assertEquals(kv0.getVersionName(), kv1.getVersionName());
Assert.assertNotNull(kv1.getMaterial());
// getCurrent()
KeyProvider.KeyVersion cv1 = kp.getCurrentKey("k1");
Assert.assertEquals(kv0.getVersionName(), cv1.getVersionName());
Assert.assertNotNull(cv1.getMaterial());
// getKeyMetadata() 1 version
KeyProvider.Metadata m1 = kp.getMetadata("k1");
Assert.assertEquals("AES/CTR/NoPadding", m1.getCipher());
Assert.assertEquals("AES", m1.getAlgorithm());
Assert.assertEquals(128, m1.getBitLength());
Assert.assertEquals(1, m1.getVersions());
Assert.assertNotNull(m1.getCreated());
Assert.assertTrue(started.before(m1.getCreated()));
// getKeyVersions() 1 version
List<KeyProvider.KeyVersion> lkv1 = kp.getKeyVersions("k1");
Assert.assertEquals(1, lkv1.size());
Assert.assertEquals(kv0.getVersionName(), lkv1.get(0).getVersionName());
Assert.assertNotNull(kv1.getMaterial());
// rollNewVersion()
KeyProvider.KeyVersion kv2 = kp.rollNewVersion("k1");
Assert.assertNotSame(kv0.getVersionName(), kv2.getVersionName());
Assert.assertNotNull(kv2.getMaterial());
// getKeyVersion()
kv2 = kp.getKeyVersion(kv2.getVersionName());
boolean eq = true;
for (int i = 0; i < kv1.getMaterial().length; i++) {
eq = eq && kv1.getMaterial()[i] == kv2.getMaterial()[i];
}
Assert.assertFalse(eq);
// getCurrent()
KeyProvider.KeyVersion cv2 = kp.getCurrentKey("k1");
Assert.assertEquals(kv2.getVersionName(), cv2.getVersionName());
Assert.assertNotNull(cv2.getMaterial());
eq = true;
for (int i = 0; i < kv1.getMaterial().length; i++) {
eq = eq && cv2.getMaterial()[i] == kv2.getMaterial()[i];
}
Assert.assertTrue(eq);
// getKeyVersions() 2 versions
List<KeyProvider.KeyVersion> lkv2 = kp.getKeyVersions("k1");
Assert.assertEquals(2, lkv2.size());
Assert.assertEquals(kv1.getVersionName(), lkv2.get(0).getVersionName());
Assert.assertNotNull(lkv2.get(0).getMaterial());
Assert.assertEquals(kv2.getVersionName(), lkv2.get(1).getVersionName());
Assert.assertNotNull(lkv2.get(1).getMaterial());
// getKeyMetadata() 2 version
KeyProvider.Metadata m2 = kp.getMetadata("k1");
Assert.assertEquals("AES/CTR/NoPadding", m2.getCipher());
Assert.assertEquals("AES", m2.getAlgorithm());
Assert.assertEquals(128, m2.getBitLength());
Assert.assertEquals(2, m2.getVersions());
Assert.assertNotNull(m2.getCreated());
Assert.assertTrue(started.before(m2.getCreated()));
// getKeys() 1 key
List<String> ks1 = kp.getKeys();
Assert.assertEquals(1, ks1.size());
Assert.assertEquals("k1", ks1.get(0));
// getKeysMetadata() 1 key 2 versions
KeyProvider.Metadata[] kms1 = kp.getKeysMetadata("k1");
Assert.assertEquals(1, kms1.length);
Assert.assertEquals("AES/CTR/NoPadding", kms1[0].getCipher());
Assert.assertEquals("AES", kms1[0].getAlgorithm());
Assert.assertEquals(128, kms1[0].getBitLength());
Assert.assertEquals(2, kms1[0].getVersions());
Assert.assertNotNull(kms1[0].getCreated());
Assert.assertTrue(started.before(kms1[0].getCreated()));
// test generate and decryption of EEK
KeyProvider.KeyVersion kv = kp.getCurrentKey("k1");
KeyProviderCryptoExtension kpExt =
KeyProviderCryptoExtension.createKeyProviderCryptoExtension(kp);
EncryptedKeyVersion ek1 = kpExt.generateEncryptedKey(kv.getName());
Assert.assertEquals(KeyProviderCryptoExtension.EEK,
ek1.getEncryptedKeyVersion().getVersionName());
Assert.assertNotNull(ek1.getEncryptedKeyVersion().getMaterial());
Assert.assertEquals(kv.getMaterial().length,
ek1.getEncryptedKeyVersion().getMaterial().length);
KeyProvider.KeyVersion k1 = kpExt.decryptEncryptedKey(ek1);
Assert.assertEquals(KeyProviderCryptoExtension.EK, k1.getVersionName());
KeyProvider.KeyVersion k1a = kpExt.decryptEncryptedKey(ek1);
Assert.assertArrayEquals(k1.getMaterial(), k1a.getMaterial());
Assert.assertEquals(kv.getMaterial().length, k1.getMaterial().length);
EncryptedKeyVersion ek2 = kpExt.generateEncryptedKey(kv.getName());
KeyProvider.KeyVersion k2 = kpExt.decryptEncryptedKey(ek2);
boolean isEq = true;
for (int i = 0; isEq && i < ek2.getEncryptedKeyVersion()
.getMaterial().length; i++) {
isEq = k2.getMaterial()[i] == k1.getMaterial()[i];
}
Assert.assertFalse(isEq);
// test re-encrypt
kpExt.rollNewVersion(ek1.getEncryptionKeyName());
EncryptedKeyVersion ek1r = kpExt.reencryptEncryptedKey(ek1);
assertEquals(KeyProviderCryptoExtension.EEK,
ek1r.getEncryptedKeyVersion().getVersionName());
assertFalse(Arrays.equals(ek1.getEncryptedKeyVersion().getMaterial(),
ek1r.getEncryptedKeyVersion().getMaterial()));
assertEquals(kv.getMaterial().length,
ek1r.getEncryptedKeyVersion().getMaterial().length);
assertEquals(ek1.getEncryptionKeyName(), ek1r.getEncryptionKeyName());
assertArrayEquals(ek1.getEncryptedKeyIv(), ek1r.getEncryptedKeyIv());
assertNotEquals(ek1.getEncryptionKeyVersionName(),
ek1r.getEncryptionKeyVersionName());
KeyProvider.KeyVersion k1r = kpExt.decryptEncryptedKey(ek1r);
assertEquals(KeyProviderCryptoExtension.EK, k1r.getVersionName());
assertArrayEquals(k1.getMaterial(), k1r.getMaterial());
assertEquals(kv.getMaterial().length, k1r.getMaterial().length);
// test re-encrypt batch
EncryptedKeyVersion ek3 = kpExt.generateEncryptedKey(kv.getName());
KeyVersion latest = kpExt.rollNewVersion(kv.getName());
List<EncryptedKeyVersion> ekvs = new ArrayList<>(3);
ekvs.add(ek1);
ekvs.add(ek2);
ekvs.add(ek3);
ekvs.add(ek1);
ekvs.add(ek2);
ekvs.add(ek3);
kpExt.reencryptEncryptedKeys(ekvs);
for (EncryptedKeyVersion ekv: ekvs) {
assertEquals(latest.getVersionName(),
ekv.getEncryptionKeyVersionName());
}
// deleteKey()
kp.deleteKey("k1");
// Check decryption after Key deletion
try {
kpExt.decryptEncryptedKey(ek1);
Assert.fail("Should not be allowed !!");
} catch (Exception e) {
Assert.assertTrue(e.getMessage().contains("'k1@1' not found"));
}
// getKey()
Assert.assertNull(kp.getKeyVersion("k1"));
// getKeyVersions()
Assert.assertNull(kp.getKeyVersions("k1"));
// getMetadata()
Assert.assertNull(kp.getMetadata("k1"));
// getKeys() empty
Assert.assertTrue(kp.getKeys().isEmpty());
// getKeysMetadata() empty
Assert.assertEquals(0, kp.getKeysMetadata().length);
// createKey() no description, no tags
options = new KeyProvider.Options(conf);
options.setCipher("AES/CTR/NoPadding");
options.setBitLength(128);
KeyVersion kVer2 = kp.createKey("k2", options);
KeyProvider.Metadata meta = kp.getMetadata("k2");
Assert.assertNull(meta.getDescription());
Assert.assertEquals("k2", meta.getAttributes().get("key.acl.name"));
// test key ACL.. k2 is granted only MANAGEMENT Op access
try {
kpExt =
KeyProviderCryptoExtension.createKeyProviderCryptoExtension(kp);
kpExt.generateEncryptedKey(kVer2.getName());
Assert.fail("User should not be allowed to encrypt !!");
} catch (Exception ex) {
//
}
// createKey() description, no tags
options = new KeyProvider.Options(conf);
options.setCipher("AES/CTR/NoPadding");
options.setBitLength(128);
options.setDescription("d");
kp.createKey("k3", options);
meta = kp.getMetadata("k3");
Assert.assertEquals("d", meta.getDescription());
Assert.assertEquals("k3", meta.getAttributes().get("key.acl.name"));
Map<String, String> attributes = new HashMap<String, String>();
attributes.put("a", "A");
// createKey() no description, tags
options = new KeyProvider.Options(conf);
options.setCipher("AES/CTR/NoPadding");
options.setBitLength(128);
attributes.put("key.acl.name", "k4");
options.setAttributes(attributes);
kp.createKey("k4", options);
meta = kp.getMetadata("k4");
Assert.assertNull(meta.getDescription());
Assert.assertEquals(attributes, meta.getAttributes());
// createKey() description, tags
options = new KeyProvider.Options(conf);
options.setCipher("AES/CTR/NoPadding");
options.setBitLength(128);
options.setDescription("d");
attributes.put("key.acl.name", "k5");
options.setAttributes(attributes);
kp.createKey("k5", options);
meta = kp.getMetadata("k5");
Assert.assertEquals("d", meta.getDescription());
Assert.assertEquals(attributes, meta.getAttributes());
// test rollover draining
KeyProviderCryptoExtension kpce = KeyProviderCryptoExtension.
createKeyProviderCryptoExtension(kp);
options = new KeyProvider.Options(conf);
options.setCipher("AES/CTR/NoPadding");
options.setBitLength(128);
kpce.createKey("k6", options);
EncryptedKeyVersion ekv1 = kpce.generateEncryptedKey("k6");
kpce.rollNewVersion("k6");
kpce.invalidateCache("k6");
EncryptedKeyVersion ekv2 = kpce.generateEncryptedKey("k6");
assertNotEquals("rollover did not generate a new key even after"
+ " queue is drained", ekv1.getEncryptionKeyVersionName(),
ekv2.getEncryptionKeyVersionName());
return null;
}
});
}
@Test
public void testKMSProviderCaching() throws Exception {
Configuration conf = new Configuration();
File confDir = getTestDir();
conf = createBaseKMSConf(confDir, conf);
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "k1.ALL", "*");
writeConf(confDir, conf);
runServer(null, null, confDir, new KMSCallable<Void>() {
@Override
public Void call() throws Exception {
final String keyName = "k1";
final String mockVersionName = "mock";
final Configuration conf = new Configuration();
final URI uri = createKMSUri(getKMSUrl());
KMSClientProvider kmscp = createKMSClientProvider(uri, conf);
// get the reference to the internal cache, to test invalidation.
ValueQueue vq =
(ValueQueue) Whitebox.getInternalState(kmscp, "encKeyVersionQueue");
LoadingCache<String, LinkedBlockingQueue<EncryptedKeyVersion>> kq =
((LoadingCache<String, LinkedBlockingQueue<EncryptedKeyVersion>>)
Whitebox.getInternalState(vq, "keyQueues"));
EncryptedKeyVersion mockEKV = Mockito.mock(EncryptedKeyVersion.class);
when(mockEKV.getEncryptionKeyName()).thenReturn(keyName);
when(mockEKV.getEncryptionKeyVersionName()).thenReturn(mockVersionName);
// createKey()
KeyProvider.Options options = new KeyProvider.Options(conf);
options.setCipher("AES/CTR/NoPadding");
options.setBitLength(128);
options.setDescription("l1");
KeyProvider.KeyVersion kv0 = kmscp.createKey(keyName, options);
assertNotNull(kv0.getVersionName());
assertEquals("Default key version name is incorrect.", "k1@0",
kmscp.generateEncryptedKey(keyName).getEncryptionKeyVersionName());
kmscp.invalidateCache(keyName);
kq.get(keyName).put(mockEKV);
assertEquals("Key version incorrect after invalidating cache + putting"
+ " mock key.", mockVersionName,
kmscp.generateEncryptedKey(keyName).getEncryptionKeyVersionName());
// test new version is returned after invalidation.
for (int i = 0; i < 100; ++i) {
kq.get(keyName).put(mockEKV);
kmscp.invalidateCache(keyName);
assertEquals("Cache invalidation guarantee failed.", "k1@0",
kmscp.generateEncryptedKey(keyName)
.getEncryptionKeyVersionName());
}
return null;
}
});
}
@Test
@SuppressWarnings("checkstyle:methodlength")
public void testKeyACLs() throws Exception {
Configuration conf = new Configuration();
conf.set("hadoop.security.authentication", "kerberos");
final File testDir = getTestDir();
conf = createBaseKMSConf(testDir, conf);
conf.set("hadoop.kms.authentication.type", "kerberos");
conf.set("hadoop.kms.authentication.kerberos.keytab",
keytab.getAbsolutePath());
conf.set("hadoop.kms.authentication.kerberos.principal", "HTTP/localhost");
conf.set("hadoop.kms.authentication.kerberos.name.rules", "DEFAULT");
for (KMSACLs.Type type : KMSACLs.Type.values()) {
conf.set(type.getAclConfigKey(), type.toString());
}
conf.set(KMSACLs.Type.CREATE.getAclConfigKey(),"CREATE,ROLLOVER,GET,SET_KEY_MATERIAL,GENERATE_EEK,DECRYPT_EEK");
conf.set(KMSACLs.Type.ROLLOVER.getAclConfigKey(),"CREATE,ROLLOVER,GET,SET_KEY_MATERIAL,GENERATE_EEK,DECRYPT_EEK");
conf.set(KMSACLs.Type.GENERATE_EEK.getAclConfigKey(),"CREATE,ROLLOVER,GET,SET_KEY_MATERIAL,GENERATE_EEK,DECRYPT_EEK");
conf.set(KMSACLs.Type.DECRYPT_EEK.getAclConfigKey(),"CREATE,ROLLOVER,GET,SET_KEY_MATERIAL,GENERATE_EEK");
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "test_key.MANAGEMENT", "CREATE");
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "some_key.MANAGEMENT", "ROLLOVER");
conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "MANAGEMENT", "DECRYPT_EEK");
conf.set(KMSConfiguration.WHITELIST_KEY_ACL_PREFIX + "ALL", "DECRYPT_EEK");
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "all_access.ALL", "GENERATE_EEK");
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "all_access.DECRYPT_EEK", "ROLLOVER");
conf.set(KMSConfiguration.DEFAULT_KEY_ACL_PREFIX + "MANAGEMENT", "ROLLOVER");
conf.set(KMSConfiguration.DEFAULT_KEY_ACL_PREFIX + "GENERATE_EEK", "SOMEBODY");
conf.set(KMSConfiguration.DEFAULT_KEY_ACL_PREFIX + "ALL", "ROLLOVER");
writeConf(testDir, conf);
runServer(null, null, testDir, new KMSCallable<Void>() {
@Override
public Void call() throws Exception {
final Configuration conf = new Configuration();
conf.setInt(KeyProvider.DEFAULT_BITLENGTH_NAME, 128);
final URI uri = createKMSUri(getKMSUrl());
doAs("CREATE", new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
KeyProvider kp = createProvider(uri, conf);
try {
Options options = new KeyProvider.Options(conf);
Map<String, String> attributes = options.getAttributes();
HashMap<String,String> newAttribs = new HashMap<String, String>(attributes);
newAttribs.put("key.acl.name", "test_key");
options.setAttributes(newAttribs);
KeyProvider.KeyVersion kv = kp.createKey("k0", options);
Assert.assertNull(kv.getMaterial());
KeyVersion rollVersion = kp.rollNewVersion("k0");
Assert.assertNull(rollVersion.getMaterial());
KeyProviderCryptoExtension kpce =
KeyProviderCryptoExtension.createKeyProviderCryptoExtension(kp);
try {
kpce.generateEncryptedKey("k0");
Assert.fail("User [CREATE] should not be allowed to generate_eek on k0");
} catch (Exception e) {
// Ignore
}
newAttribs = new HashMap<String, String>(attributes);
newAttribs.put("key.acl.name", "all_access");
options.setAttributes(newAttribs);
try {
kp.createKey("kx", options);
Assert.fail("User [CREATE] should not be allowed to create kx");
} catch (Exception e) {
// Ignore
}
} catch (Exception ex) {
Assert.fail(ex.getMessage());
}
return null;
}
});
// Test whitelist key access..
// DECRYPT_EEK is whitelisted for MANAGEMENT operations only
doAs("DECRYPT_EEK", new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
KeyProvider kp = createProvider(uri, conf);
try {
Options options = new KeyProvider.Options(conf);
Map<String, String> attributes = options.getAttributes();
HashMap<String,String> newAttribs = new HashMap<String, String>(attributes);
newAttribs.put("key.acl.name", "some_key");
options.setAttributes(newAttribs);
KeyProvider.KeyVersion kv = kp.createKey("kk0", options);
Assert.assertNull(kv.getMaterial());
KeyVersion rollVersion = kp.rollNewVersion("kk0");
Assert.assertNull(rollVersion.getMaterial());
KeyProviderCryptoExtension kpce =
KeyProviderCryptoExtension.createKeyProviderCryptoExtension(kp);
try {
kpce.generateEncryptedKey("kk0");
Assert.fail("User [DECRYPT_EEK] should not be allowed to generate_eek on kk0");
} catch (Exception e) {
// Ignore
}
newAttribs = new HashMap<String, String>(attributes);
newAttribs.put("key.acl.name", "all_access");
options.setAttributes(newAttribs);
kp.createKey("kkx", options);
} catch (Exception ex) {
Assert.fail(ex.getMessage());
}
return null;
}
});
doAs("ROLLOVER", new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
KeyProvider kp = createProvider(uri, conf);
try {
Options options = new KeyProvider.Options(conf);
Map<String, String> attributes = options.getAttributes();
HashMap<String,String> newAttribs = new HashMap<String, String>(attributes);
newAttribs.put("key.acl.name", "test_key2");
options.setAttributes(newAttribs);
KeyProvider.KeyVersion kv = kp.createKey("k1", options);
Assert.assertNull(kv.getMaterial());
KeyVersion rollVersion = kp.rollNewVersion("k1");
Assert.assertNull(rollVersion.getMaterial());
try {
kp.rollNewVersion("k0");
Assert.fail("User [ROLLOVER] should not be allowed to rollover k0");
} catch (Exception e) {
// Ignore
}
KeyProviderCryptoExtension kpce =
KeyProviderCryptoExtension.createKeyProviderCryptoExtension(kp);
try {
kpce.generateEncryptedKey("k1");
Assert.fail("User [ROLLOVER] should not be allowed to generate_eek on k1");
} catch (Exception e) {
// Ignore
}
newAttribs = new HashMap<String, String>(attributes);
newAttribs.put("key.acl.name", "all_access");
options.setAttributes(newAttribs);
try {
kp.createKey("kx", options);
Assert.fail("User [ROLLOVER] should not be allowed to create kx");
} catch (Exception e) {
// Ignore
}
} catch (Exception ex) {
Assert.fail(ex.getMessage());
}
return null;
}
});
doAs("GET", new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
KeyProvider kp = createProvider(uri, conf);
try {
Options options = new KeyProvider.Options(conf);
Map<String, String> attributes = options.getAttributes();
HashMap<String,String> newAttribs = new HashMap<String, String>(attributes);
newAttribs.put("key.acl.name", "test_key");
options.setAttributes(newAttribs);
try {
kp.createKey("k2", options);
Assert.fail("User [GET] should not be allowed to create key..");
} catch (Exception e) {
// Ignore
}
newAttribs = new HashMap<String, String>(attributes);
newAttribs.put("key.acl.name", "all_access");
options.setAttributes(newAttribs);
try {
kp.createKey("kx", options);
Assert.fail("User [GET] should not be allowed to create kx");
} catch (Exception e) {
// Ignore
}
} catch (Exception ex) {
Assert.fail(ex.getMessage());
}
return null;
}
});
final EncryptedKeyVersion ekv = doAs("GENERATE_EEK", new PrivilegedExceptionAction<EncryptedKeyVersion>() {
@Override
public EncryptedKeyVersion run() throws Exception {
KeyProvider kp = createProvider(uri, conf);
try {
Options options = new KeyProvider.Options(conf);
Map<String, String> attributes = options.getAttributes();
HashMap<String,String> newAttribs = new HashMap<String, String>(attributes);
newAttribs.put("key.acl.name", "all_access");
options.setAttributes(newAttribs);
kp.createKey("kx", options);
KeyProviderCryptoExtension kpce =
KeyProviderCryptoExtension.createKeyProviderCryptoExtension(kp);
try {
return kpce.generateEncryptedKey("kx");
} catch (Exception e) {
Assert.fail("User [GENERATE_EEK] should be allowed to generate_eek on kx");
}
} catch (Exception ex) {
Assert.fail(ex.getMessage());
}
return null;
}
});
doAs("ROLLOVER", new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
KeyProvider kp = createProvider(uri, conf);
try {
KeyProviderCryptoExtension kpce =
KeyProviderCryptoExtension.createKeyProviderCryptoExtension(kp);
kpce.decryptEncryptedKey(ekv);
} catch (Exception ex) {
Assert.fail(ex.getMessage());
}
return null;
}
});
return null;
}
});
conf.set(KMSConfiguration.DEFAULT_KEY_ACL_PREFIX + "MANAGEMENT", "");
conf.set(KMSConfiguration.DEFAULT_KEY_ACL_PREFIX + "GENERATE_EEK", "*");
writeConf(testDir, conf);
runServer(null, null, testDir, new KMSCallable<Void>() {
@Override
public Void call() throws Exception {
final Configuration conf = new Configuration();
conf.setInt(KeyProvider.DEFAULT_BITLENGTH_NAME, 128);
final URI uri = createKMSUri(getKMSUrl());
doAs("GENERATE_EEK", new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
KeyProvider kp = createProvider(uri, conf);
KeyProviderCryptoExtension kpce =
KeyProviderCryptoExtension.createKeyProviderCryptoExtension(kp);
EncryptedKeyVersion ekv = kpce.generateEncryptedKey("k1");
kpce.reencryptEncryptedKey(ekv);
List<EncryptedKeyVersion> ekvs = new ArrayList<>(2);
ekvs.add(ekv);
ekvs.add(ekv);
kpce.reencryptEncryptedKeys(ekvs);
return null;
}
});
return null;
}
});
}
@Test
public void testKMSRestartKerberosAuth() throws Exception {
doKMSRestart(true);
}
@Test
public void testKMSRestartSimpleAuth() throws Exception {
doKMSRestart(false);
}
public void doKMSRestart(boolean useKrb) throws Exception {
Configuration conf = new Configuration();
conf.set("hadoop.security.authentication", "kerberos");
final File testDir = getTestDir();
conf = createBaseKMSConf(testDir, conf);
if (useKrb) {
conf.set("hadoop.kms.authentication.type", "kerberos");
}
conf.set("hadoop.kms.authentication.kerberos.keytab",
keytab.getAbsolutePath());
conf.set("hadoop.kms.authentication.kerberos.principal", "HTTP/localhost");
conf.set("hadoop.kms.authentication.kerberos.name.rules", "DEFAULT");
for (KMSACLs.Type type : KMSACLs.Type.values()) {
conf.set(type.getAclConfigKey(), type.toString());
}
conf.set(KMSACLs.Type.CREATE.getAclConfigKey(),
KMSACLs.Type.CREATE.toString() + ",SET_KEY_MATERIAL");
conf.set(KMSACLs.Type.ROLLOVER.getAclConfigKey(),
KMSACLs.Type.ROLLOVER.toString() + ",SET_KEY_MATERIAL");
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "k0.ALL", "*");
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "k1.ALL", "*");
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "k2.ALL", "*");
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "k3.ALL", "*");
writeConf(testDir, conf);
KMSCallable<KeyProvider> c =
new KMSCallable<KeyProvider>() {
@Override
public KeyProvider call() throws Exception {
final Configuration conf = new Configuration();
conf.setInt(KeyProvider.DEFAULT_BITLENGTH_NAME, 128);
final URI uri = createKMSUri(getKMSUrl());
final KeyProvider kp =
doAs("SET_KEY_MATERIAL",
new PrivilegedExceptionAction<KeyProvider>() {
@Override
public KeyProvider run() throws Exception {
KeyProvider kp = createProvider(uri, conf);
kp.createKey("k1", new byte[16],
new KeyProvider.Options(conf));
return kp;
}
});
return kp;
}
};
final KeyProvider retKp =
runServer(null, null, testDir, c);
// Restart server (using the same port)
runServer(c.getKMSUrl().getPort(), null, null, testDir,
new KMSCallable<Void>() {
@Override
public Void call() throws Exception {
final Configuration conf = new Configuration();
conf.setInt(KeyProvider.DEFAULT_BITLENGTH_NAME, 128);
doAs("SET_KEY_MATERIAL",
new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
retKp.createKey("k2", new byte[16],
new KeyProvider.Options(conf));
retKp.createKey("k3", new byte[16],
new KeyProvider.Options(conf));
return null;
}
});
return null;
}
});
}
@Test
public void testKMSAuthFailureRetry() throws Exception {
Configuration conf = new Configuration();
conf.set("hadoop.security.authentication", "kerberos");
final File testDir = getTestDir();
conf = createBaseKMSConf(testDir, conf);
conf.set("hadoop.kms.authentication.kerberos.keytab",
keytab.getAbsolutePath());
conf.set("hadoop.kms.authentication.kerberos.principal", "HTTP/localhost");
conf.set("hadoop.kms.authentication.kerberos.name.rules", "DEFAULT");
conf.set("hadoop.kms.authentication.token.validity", "1");
for (KMSACLs.Type type : KMSACLs.Type.values()) {
conf.set(type.getAclConfigKey(), type.toString());
}
conf.set(KMSACLs.Type.CREATE.getAclConfigKey(),
KMSACLs.Type.CREATE.toString() + ",SET_KEY_MATERIAL");
conf.set(KMSACLs.Type.ROLLOVER.getAclConfigKey(),
KMSACLs.Type.ROLLOVER.toString() + ",SET_KEY_MATERIAL");
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "k0.ALL", "*");
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "k1.ALL", "*");
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "k2.ALL", "*");
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "k3.ALL", "*");
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "k4.ALL", "*");
writeConf(testDir, conf);
runServer(null, null, testDir,
new KMSCallable<Void>() {
@Override
public Void call() throws Exception {
final Configuration conf = new Configuration();
conf.setInt(KeyProvider.DEFAULT_BITLENGTH_NAME, 128);
final URI uri = createKMSUri(getKMSUrl());
doAs("SET_KEY_MATERIAL",
new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
KeyProvider kp = createProvider(uri, conf);
kp.createKey("k0", new byte[16],
new KeyProvider.Options(conf));
// This happens before rollover
kp.createKey("k1", new byte[16],
new KeyProvider.Options(conf));
// Atleast 2 rollovers.. so should induce signer Exception
Thread.sleep(3500);
kp.createKey("k2", new byte[16],
new KeyProvider.Options(conf));
return null;
}
});
return null;
}
});
// Test retry count
runServer(null, null, testDir,
new KMSCallable<Void>() {
@Override
public Void call() throws Exception {
final Configuration conf = new Configuration();
conf.setInt(KeyProvider.DEFAULT_BITLENGTH_NAME, 128);
conf.setInt(KMSClientProvider.AUTH_RETRY, 0);
final URI uri = createKMSUri(getKMSUrl());
doAs("SET_KEY_MATERIAL",
new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
KeyProvider kp = createProvider(uri, conf);
kp.createKey("k3", new byte[16],
new KeyProvider.Options(conf));
// Atleast 2 rollovers.. so should induce signer Exception
Thread.sleep(3500);
try {
kp.createKey("k4", new byte[16],
new KeyProvider.Options(conf));
Assert.fail("This should not succeed..");
} catch (IOException e) {
Assert.assertTrue(
"HTTP exception must be a 401 : " + e.getMessage(), e
.getMessage().contains("401"));
}
return null;
}
});
return null;
}
});
}
@Test
@SuppressWarnings("checkstyle:methodlength")
public void testACLs() throws Exception {
Configuration conf = new Configuration();
conf.set("hadoop.security.authentication", "kerberos");
final File testDir = getTestDir();
conf = createBaseKMSConf(testDir, conf);
conf.set("hadoop.kms.authentication.type", "kerberos");
conf.set("hadoop.kms.authentication.kerberos.keytab",
keytab.getAbsolutePath());
conf.set("hadoop.kms.authentication.kerberos.principal", "HTTP/localhost");
conf.set("hadoop.kms.authentication.kerberos.name.rules", "DEFAULT");
for (KMSACLs.Type type : KMSACLs.Type.values()) {
conf.set(type.getAclConfigKey(), type.toString());
}
conf.set(KMSACLs.Type.CREATE.getAclConfigKey(),
KMSACLs.Type.CREATE.toString() + ",SET_KEY_MATERIAL");
conf.set(KMSACLs.Type.ROLLOVER.getAclConfigKey(),
KMSACLs.Type.ROLLOVER.toString() + ",SET_KEY_MATERIAL");
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "k0.ALL", "*");
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "k1.ALL", "*");
writeConf(testDir, conf);
runServer(null, null, testDir, new KMSCallable<Void>() {
@Override
public Void call() throws Exception {
final Configuration conf = new Configuration();
conf.setInt(KeyProvider.DEFAULT_BITLENGTH_NAME, 128);
final URI uri = createKMSUri(getKMSUrl());
//nothing allowed
doAs("client", new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
KeyProvider kp = createProvider(uri, conf);
try {
kp.createKey("k", new KeyProvider.Options(conf));
Assert.fail();
} catch (AuthorizationException ex) {
//NOP
} catch (Exception ex) {
Assert.fail(ex.getMessage());
}
try {
kp.createKey("k", new byte[16], new KeyProvider.Options(conf));
Assert.fail();
} catch (AuthorizationException ex) {
//NOP
} catch (Exception ex) {
Assert.fail(ex.getMessage());
}
try {
kp.rollNewVersion("k");
Assert.fail();
} catch (AuthorizationException ex) {
//NOP
} catch (Exception ex) {
Assert.fail(ex.getMessage());
}
try {
kp.rollNewVersion("k", new byte[16]);
Assert.fail();
} catch (AuthorizationException ex) {
//NOP
} catch (Exception ex) {
Assert.fail(ex.getMessage());
}
try {
kp.getKeys();
Assert.fail();
} catch (AuthorizationException ex) {
//NOP
} catch (Exception ex) {
Assert.fail(ex.getMessage());
}
try {
kp.getKeysMetadata("k");
Assert.fail();
} catch (AuthorizationException ex) {
//NOP
} catch (Exception ex) {
Assert.fail(ex.getMessage());
}
try {
// we are using JavaKeyStoreProvider for testing, so we know how
// the keyversion is created.
kp.getKeyVersion("k@0");
Assert.fail();
} catch (AuthorizationException ex) {
//NOP
} catch (Exception ex) {
Assert.fail(ex.getMessage());
}
try {
kp.getCurrentKey("k");
Assert.fail();
} catch (AuthorizationException ex) {
//NOP
} catch (Exception ex) {
Assert.fail(ex.getMessage());
}
try {
kp.getMetadata("k");
Assert.fail();
} catch (AuthorizationException ex) {
//NOP
} catch (Exception ex) {
Assert.fail(ex.getMessage());
}
try {
kp.getKeyVersions("k");
Assert.fail();
} catch (AuthorizationException ex) {
//NOP
} catch (Exception ex) {
Assert.fail(ex.getMessage());
}
return null;
}
});
doAs("CREATE", new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
KeyProvider kp = createProvider(uri, conf);
try {
KeyProvider.KeyVersion kv = kp.createKey("k0",
new KeyProvider.Options(conf));
Assert.assertNull(kv.getMaterial());
} catch (Exception ex) {
Assert.fail(ex.getMessage());
}
return null;
}
});
doAs("DELETE", new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
KeyProvider kp = createProvider(uri, conf);
try {
kp.deleteKey("k0");
} catch (Exception ex) {
Assert.fail(ex.getMessage());
}
return null;
}
});
doAs("SET_KEY_MATERIAL", new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
KeyProvider kp = createProvider(uri, conf);
try {
KeyProvider.KeyVersion kv = kp.createKey("k1", new byte[16],
new KeyProvider.Options(conf));
Assert.assertNull(kv.getMaterial());
} catch (Exception ex) {
Assert.fail(ex.getMessage());
}
return null;
}
});
doAs("ROLLOVER", new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
KeyProvider kp = createProvider(uri, conf);
try {
KeyProvider.KeyVersion kv = kp.rollNewVersion("k1");
Assert.assertNull(kv.getMaterial());
} catch (Exception ex) {
Assert.fail(ex.getMessage());
}
return null;
}
});
doAs("SET_KEY_MATERIAL", new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
KeyProvider kp = createProvider(uri, conf);
try {
KeyProvider.KeyVersion kv =
kp.rollNewVersion("k1", new byte[16]);
Assert.assertNull(kv.getMaterial());
} catch (Exception ex) {
Assert.fail(ex.getMessage());
}
return null;
}
});
final KeyVersion currKv =
doAs("GET", new PrivilegedExceptionAction<KeyVersion>() {
@Override
public KeyVersion run() throws Exception {
KeyProvider kp = createProvider(uri, conf);
try {
kp.getKeyVersion("k1@0");
KeyVersion kv = kp.getCurrentKey("k1");
return kv;
} catch (Exception ex) {
Assert.fail(ex.toString());
}
return null;
}
});
final EncryptedKeyVersion encKv =
doAs("GENERATE_EEK",
new PrivilegedExceptionAction<EncryptedKeyVersion>() {
@Override
public EncryptedKeyVersion run() throws Exception {
KeyProvider kp = createProvider(uri, conf);
try {
KeyProviderCryptoExtension kpCE = KeyProviderCryptoExtension.
createKeyProviderCryptoExtension(kp);
EncryptedKeyVersion ek1 =
kpCE.generateEncryptedKey(currKv.getName());
return ek1;
} catch (Exception ex) {
Assert.fail(ex.toString());
}
return null;
}
});
doAs("GENERATE_EEK", new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
KeyProvider kp = createProvider(uri, conf);
KeyProviderCryptoExtension kpCE = KeyProviderCryptoExtension.
createKeyProviderCryptoExtension(kp);
kpCE.reencryptEncryptedKey(encKv);
List<EncryptedKeyVersion> ekvs = new ArrayList<>(2);
ekvs.add(encKv);
ekvs.add(encKv);
kpCE.reencryptEncryptedKeys(ekvs);
return null;
}
});
doAs("DECRYPT_EEK", new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
KeyProvider kp = createProvider(uri, conf);
try {
KeyProviderCryptoExtension kpCE = KeyProviderCryptoExtension.
createKeyProviderCryptoExtension(kp);
kpCE.decryptEncryptedKey(encKv);
} catch (Exception ex) {
Assert.fail(ex.getMessage());
}
return null;
}
});
doAs("GET_KEYS", new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
KeyProvider kp = createProvider(uri, conf);
try {
kp.getKeys();
} catch (Exception ex) {
Assert.fail(ex.getMessage());
}
return null;
}
});
doAs("GET_METADATA", new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
KeyProvider kp = createProvider(uri, conf);
try {
kp.getMetadata("k1");
kp.getKeysMetadata("k1");
} catch (Exception ex) {
Assert.fail(ex.getMessage());
}
return null;
}
});
//stop the reloader, to avoid running while we are writing the new file
KMSWebApp.getACLs().stopReloader();
GenericTestUtils.setLogLevel(KMSConfiguration.LOG, Level.TRACE);
// test ACL reloading
conf.set(KMSACLs.Type.CREATE.getAclConfigKey(), "foo");
conf.set(KMSACLs.Type.GENERATE_EEK.getAclConfigKey(), "foo");
writeConf(testDir, conf);
KMSWebApp.getACLs().forceNextReloadForTesting();
KMSWebApp.getACLs().run(); // forcing a reload by hand.
// should not be able to create a key now
doAs("CREATE", new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
try {
KeyProvider kp = createProvider(uri, conf);
KeyProvider.KeyVersion kv = kp.createKey("k2",
new KeyProvider.Options(conf));
Assert.fail();
} catch (AuthorizationException ex) {
//NOP
} catch (Exception ex) {
Assert.fail(ex.getMessage());
}
return null;
}
});
doAs("GENERATE_EEK", new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
KeyProvider kp = createProvider(uri, conf);
try {
KeyProviderCryptoExtension kpCE = KeyProviderCryptoExtension.
createKeyProviderCryptoExtension(kp);
kpCE.generateEncryptedKey("k1");
} catch (IOException ex) {
// This isn't an AuthorizationException because generate goes
// through the ValueQueue. See KMSCP#generateEncryptedKey.
if (ex.getCause().getCause() instanceof AuthorizationException) {
LOG.info("Caught expected exception.", ex);
} else {
throw ex;
}
}
return null;
}
});
doAs("GENERATE_EEK", new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
KeyProvider kp = createProvider(uri, conf);
try {
KeyProviderCryptoExtension kpCE = KeyProviderCryptoExtension.
createKeyProviderCryptoExtension(kp);
kpCE.reencryptEncryptedKey(encKv);
fail("Should not have been able to reencryptEncryptedKey");
} catch (AuthorizationException ex) {
LOG.info("reencryptEncryptedKey caught expected exception.", ex);
}
return null;
}
});
doAs("GENERATE_EEK", new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
KeyProvider kp = createProvider(uri, conf);
try {
KeyProviderCryptoExtension kpCE = KeyProviderCryptoExtension.
createKeyProviderCryptoExtension(kp);
List<EncryptedKeyVersion> ekvs = new ArrayList<>(2);
ekvs.add(encKv);
ekvs.add(encKv);
kpCE.reencryptEncryptedKeys(ekvs);
fail("Should not have been able to reencryptEncryptedKeys");
} catch (AuthorizationException ex) {
LOG.info("reencryptEncryptedKeys caught expected exception.", ex);
}
return null;
}
});
return null;
}
});
}
@Test
public void testKMSBlackList() throws Exception {
Configuration conf = new Configuration();
conf.set("hadoop.security.authentication", "kerberos");
File testDir = getTestDir();
conf = createBaseKMSConf(testDir, conf);
conf.set("hadoop.kms.authentication.type", "kerberos");
conf.set("hadoop.kms.authentication.kerberos.keytab",
keytab.getAbsolutePath());
conf.set("hadoop.kms.authentication.kerberos.principal", "HTTP/localhost");
conf.set("hadoop.kms.authentication.kerberos.name.rules", "DEFAULT");
for (KMSACLs.Type type : KMSACLs.Type.values()) {
conf.set(type.getAclConfigKey(), " ");
}
conf.set(KMSACLs.Type.CREATE.getAclConfigKey(), "client,hdfs,otheradmin");
conf.set(KMSACLs.Type.GENERATE_EEK.getAclConfigKey(), "client,hdfs,otheradmin");
conf.set(KMSACLs.Type.DECRYPT_EEK.getAclConfigKey(), "client,hdfs,otheradmin");
conf.set(KMSACLs.Type.DECRYPT_EEK.getBlacklistConfigKey(), "hdfs,otheradmin");
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "ck0.ALL", "*");
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "ck1.ALL", "*");
writeConf(testDir, conf);
runServer(null, null, testDir, new KMSCallable<Void>() {
@Override
public Void call() throws Exception {
final Configuration conf = new Configuration();
conf.setInt(KeyProvider.DEFAULT_BITLENGTH_NAME, 128);
final URI uri = createKMSUri(getKMSUrl());
doAs("client", new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
try {
KeyProvider kp = createProvider(uri, conf);
KeyProvider.KeyVersion kv = kp.createKey("ck0",
new KeyProvider.Options(conf));
EncryptedKeyVersion eek =
((CryptoExtension)kp).generateEncryptedKey("ck0");
((CryptoExtension)kp).decryptEncryptedKey(eek);
Assert.assertNull(kv.getMaterial());
} catch (Exception ex) {
Assert.fail(ex.getMessage());
}
return null;
}
});
doAs("hdfs", new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
try {
KeyProvider kp = createProvider(uri, conf);
KeyProvider.KeyVersion kv = kp.createKey("ck1",
new KeyProvider.Options(conf));
EncryptedKeyVersion eek =
((CryptoExtension)kp).generateEncryptedKey("ck1");
((CryptoExtension)kp).decryptEncryptedKey(eek);
Assert.fail("admin user must not be allowed to decrypt !!");
} catch (Exception ex) {
}
return null;
}
});
doAs("otheradmin", new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
try {
KeyProvider kp = createProvider(uri, conf);
KeyProvider.KeyVersion kv = kp.createKey("ck2",
new KeyProvider.Options(conf));
EncryptedKeyVersion eek =
((CryptoExtension)kp).generateEncryptedKey("ck2");
((CryptoExtension)kp).decryptEncryptedKey(eek);
Assert.fail("admin user must not be allowed to decrypt !!");
} catch (Exception ex) {
}
return null;
}
});
return null;
}
});
}
@Test
public void testServicePrincipalACLs() throws Exception {
Configuration conf = new Configuration();
conf.set("hadoop.security.authentication", "kerberos");
File testDir = getTestDir();
conf = createBaseKMSConf(testDir, conf);
conf.set("hadoop.kms.authentication.type", "kerberos");
conf.set("hadoop.kms.authentication.kerberos.keytab",
keytab.getAbsolutePath());
conf.set("hadoop.kms.authentication.kerberos.principal", "HTTP/localhost");
conf.set("hadoop.kms.authentication.kerberos.name.rules", "DEFAULT");
for (KMSACLs.Type type : KMSACLs.Type.values()) {
conf.set(type.getAclConfigKey(), " ");
}
conf.set(KMSACLs.Type.CREATE.getAclConfigKey(), "client");
conf.set(KMSConfiguration.DEFAULT_KEY_ACL_PREFIX + "MANAGEMENT", "client,client/host");
writeConf(testDir, conf);
runServer(null, null, testDir, new KMSCallable<Void>() {
@Override
public Void call() throws Exception {
final Configuration conf = new Configuration();
conf.setInt(KeyProvider.DEFAULT_BITLENGTH_NAME, 128);
final URI uri = createKMSUri(getKMSUrl());
doAs("client", new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
try {
KeyProvider kp = createProvider(uri, conf);
KeyProvider.KeyVersion kv = kp.createKey("ck0",
new KeyProvider.Options(conf));
Assert.assertNull(kv.getMaterial());
} catch (Exception ex) {
Assert.fail(ex.getMessage());
}
return null;
}
});
doAs("client/host", new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
try {
KeyProvider kp = createProvider(uri, conf);
KeyProvider.KeyVersion kv = kp.createKey("ck1",
new KeyProvider.Options(conf));
Assert.assertNull(kv.getMaterial());
} catch (Exception ex) {
Assert.fail(ex.getMessage());
}
return null;
}
});
return null;
}
});
}
/**
* Test the configurable timeout in the KMSClientProvider. Open up a
* socket, but don't accept connections for it. This leads to a timeout
* when the KMS client attempts to connect.
* @throws Exception
*/
@Test
public void testKMSTimeout() throws Exception {
File confDir = getTestDir();
Configuration conf = createBaseKMSConf(confDir);
conf.setInt(CommonConfigurationKeysPublic.KMS_CLIENT_TIMEOUT_SECONDS, 1);
writeConf(confDir, conf);
ServerSocket sock;
int port;
try {
sock = new ServerSocket(0, 50, InetAddress.getByName("localhost"));
port = sock.getLocalPort();
} catch ( Exception e ) {
/* Problem creating socket? Just bail. */
return;
}
URL url = new URL("http://localhost:" + port + "/kms");
URI uri = createKMSUri(url);
boolean caughtTimeout = false;
try {
KeyProvider kp = createProvider(uri, conf);
kp.getKeys();
} catch (SocketTimeoutException e) {
caughtTimeout = true;
} catch (IOException e) {
Assert.assertTrue("Caught unexpected exception" + e.toString(), false);
}
caughtTimeout = false;
try {
KeyProvider kp = createProvider(uri, conf);
KeyProviderCryptoExtension.createKeyProviderCryptoExtension(kp)
.generateEncryptedKey("a");
} catch (SocketTimeoutException e) {
caughtTimeout = true;
} catch (IOException e) {
Assert.assertTrue("Caught unexpected exception" + e.toString(), false);
}
caughtTimeout = false;
try {
KeyProvider kp = createProvider(uri, conf);
KeyProviderCryptoExtension.createKeyProviderCryptoExtension(kp)
.decryptEncryptedKey(
new KMSClientProvider.KMSEncryptedKeyVersion("a",
"a", new byte[] {1, 2}, "EEK", new byte[] {1, 2}));
} catch (SocketTimeoutException e) {
caughtTimeout = true;
} catch (IOException e) {
Assert.assertTrue("Caught unexpected exception" + e.toString(), false);
}
Assert.assertTrue(caughtTimeout);
sock.close();
}
@Test
public void testDelegationTokenAccess() throws Exception {
Configuration conf = new Configuration();
conf.set("hadoop.security.authentication", "kerberos");
final File testDir = getTestDir();
conf = createBaseKMSConf(testDir, conf);
conf.set("hadoop.kms.authentication.type", "kerberos");
conf.set("hadoop.kms.authentication.kerberos.keytab",
keytab.getAbsolutePath());
conf.set("hadoop.kms.authentication.kerberos.principal", "HTTP/localhost");
conf.set("hadoop.kms.authentication.kerberos.name.rules", "DEFAULT");
final String keyA = "key_a";
final String keyD = "key_d";
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + keyA + ".ALL", "*");
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + keyD + ".ALL", "*");
writeConf(testDir, conf);
runServer(null, null, testDir, new KMSCallable<Void>() {
@Override
public Void call() throws Exception {
final Configuration conf = new Configuration();
conf.setInt(KeyProvider.DEFAULT_BITLENGTH_NAME, 128);
final URI uri = createKMSUri(getKMSUrl());
final Credentials credentials = new Credentials();
final UserGroupInformation nonKerberosUgi =
UserGroupInformation.getCurrentUser();
try {
KeyProvider kp = createProvider(uri, conf);
kp.createKey(keyA, new KeyProvider.Options(conf));
} catch (IOException ex) {
System.out.println(ex.getMessage());
}
doAs("client", new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
KeyProvider kp = createProvider(uri, conf);
KeyProviderDelegationTokenExtension kpdte =
KeyProviderDelegationTokenExtension.
createKeyProviderDelegationTokenExtension(kp);
kpdte.addDelegationTokens("foo", credentials);
return null;
}
});
nonKerberosUgi.addCredentials(credentials);
try {
KeyProvider kp = createProvider(uri, conf);
kp.createKey(keyA, new KeyProvider.Options(conf));
} catch (IOException ex) {
System.out.println(ex.getMessage());
}
nonKerberosUgi.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
KeyProvider kp = createProvider(uri, conf);
kp.createKey(keyD, new KeyProvider.Options(conf));
return null;
}
});
return null;
}
});
}
private Configuration setupConfForKerberos(File confDir) throws Exception {
final Configuration conf = createBaseKMSConf(confDir, null);
conf.set("hadoop.security.authentication", "kerberos");
conf.set("hadoop.kms.authentication.type", "kerberos");
conf.set("hadoop.kms.authentication.kerberos.keytab",
keytab.getAbsolutePath());
conf.set("hadoop.kms.authentication.kerberos.principal",
"HTTP/localhost");
conf.set("hadoop.kms.authentication.kerberos.name.rules", "DEFAULT");
return conf;
}
@Test
public void testDelegationTokensOpsHttpPseudo() throws Exception {
testDelegationTokensOps(false, false);
}
@Test
public void testDelegationTokensOpsHttpKerberized() throws Exception {
testDelegationTokensOps(false, true);
}
@Test
public void testDelegationTokensOpsHttpsPseudo() throws Exception {
testDelegationTokensOps(true, false);
}
@Test
public void testDelegationTokensOpsHttpsKerberized() throws Exception {
testDelegationTokensOps(true, true);
}
private void testDelegationTokensOps(final boolean ssl, final boolean kerb)
throws Exception {
final File confDir = getTestDir();
final Configuration conf;
if (kerb) {
conf = setupConfForKerberos(confDir);
} else {
conf = createBaseKMSConf(confDir, null);
}
final String keystore;
final String password;
if (ssl) {
final String sslConfDir = KeyStoreTestUtil.getClasspathDir(TestKMS.class);
KeyStoreTestUtil.setupSSLConfig(confDir.getAbsolutePath(), sslConfDir,
conf, false);
keystore = confDir.getAbsolutePath() + "/serverKS.jks";
password = "serverP";
} else {
keystore = null;
password = null;
}
writeConf(confDir, conf);
runServer(keystore, password, confDir, new KMSCallable<Void>() {
@Override
public Void call() throws Exception {
final Configuration clientConf = new Configuration();
final URI uri = createKMSUri(getKMSUrl());
clientConf.set(KeyProviderFactory.KEY_PROVIDER_PATH,
createKMSUri(getKMSUrl()).toString());
doAs("client", new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
KeyProvider kp = createProvider(uri, clientConf);
// test delegation token retrieval
KeyProviderDelegationTokenExtension kpdte =
KeyProviderDelegationTokenExtension.
createKeyProviderDelegationTokenExtension(kp);
final Credentials credentials = new Credentials();
final Token<?>[] tokens =
kpdte.addDelegationTokens("client1", credentials);
Assert.assertEquals(1, credentials.getAllTokens().size());
InetSocketAddress kmsAddr =
new InetSocketAddress(getKMSUrl().getHost(),
getKMSUrl().getPort());
Assert.assertEquals(KMSDelegationToken.TOKEN_KIND,
credentials.getToken(SecurityUtil.buildTokenService(kmsAddr)).
getKind());
// Test non-renewer user cannot renew.
for (Token<?> token : tokens) {
if (!(token.getKind().equals(KMSDelegationToken.TOKEN_KIND))) {
LOG.info("Skipping token {}", token);
continue;
}
LOG.info("Got dt for " + uri + "; " + token);
try {
token.renew(clientConf);
Assert.fail("client should not be allowed to renew token with"
+ "renewer=client1");
} catch (Exception e) {
final DelegationTokenIdentifier identifier =
(DelegationTokenIdentifier) token.decodeIdentifier();
GenericTestUtils.assertExceptionContains(
"tries to renew a token (" + identifier
+ ") with non-matching renewer", e);
}
}
final UserGroupInformation otherUgi;
if (kerb) {
UserGroupInformation
.loginUserFromKeytab("client1", keytab.getAbsolutePath());
otherUgi = UserGroupInformation.getLoginUser();
} else {
otherUgi = UserGroupInformation.createUserForTesting("client1",
new String[] {"other group"});
UserGroupInformation.setLoginUser(otherUgi);
}
try {
// test delegation token renewal via renewer
otherUgi.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
boolean renewed = false;
for (Token<?> token : tokens) {
if (!(token.getKind()
.equals(KMSDelegationToken.TOKEN_KIND))) {
LOG.info("Skipping token {}", token);
continue;
}
LOG.info("Got dt for " + uri + "; " + token);
long tokenLife = token.renew(clientConf);
LOG.info("Renewed token of kind {}, new lifetime:{}",
token.getKind(), tokenLife);
Thread.sleep(100);
long newTokenLife = token.renew(clientConf);
LOG.info("Renewed token of kind {}, new lifetime:{}",
token.getKind(), newTokenLife);
Assert.assertTrue(newTokenLife > tokenLife);
renewed = true;
}
Assert.assertTrue(renewed);
// test delegation token cancellation
for (Token<?> token : tokens) {
if (!(token.getKind()
.equals(KMSDelegationToken.TOKEN_KIND))) {
LOG.info("Skipping token {}", token);
continue;
}
LOG.info("Got dt for " + uri + "; " + token);
token.cancel(clientConf);
LOG.info("Cancelled token of kind {}", token.getKind());
try {
token.renew(clientConf);
Assert
.fail("should not be able to renew a canceled token");
} catch (Exception e) {
LOG.info("Expected exception when renewing token", e);
}
}
return null;
}
});
// Close the client provider. We will verify all providers'
// Truststore reloader threads are closed later.
kp.close();
return null;
} finally {
otherUgi.logoutUserFromKeytab();
}
}
});
return null;
}
});
// verify that providers created by KMSTokenRenewer are closed.
if (ssl) {
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
final Set<Thread> threadSet = Thread.getAllStackTraces().keySet();
for (Thread t : threadSet) {
if (t.getName().contains(SSL_RELOADER_THREAD_NAME)) {
return false;
}
}
return true;
}
}, 1000, 10000);
}
}
@Test
public void testDelegationTokensUpdatedInUGI() throws Exception {
Configuration conf = new Configuration();
File confDir = getTestDir();
conf = createBaseKMSConf(confDir, conf);
conf.set(
"hadoop.kms.authentication.delegation-token.max-lifetime.sec", "5");
conf.set(
"hadoop.kms.authentication.delegation-token.renew-interval.sec", "5");
writeConf(confDir, conf);
// Running as a service (e.g. YARN in practice).
runServer(null, null, confDir, new KMSCallable<Void>() {
@Override
public Void call() throws Exception {
final Configuration clientConf = new Configuration();
final URI uri = createKMSUri(getKMSUrl());
clientConf.set(KeyProviderFactory.KEY_PROVIDER_PATH,
createKMSUri(getKMSUrl()).toString());
final KeyProvider kp = createProvider(uri, clientConf);
final KeyProviderDelegationTokenExtension kpdte =
KeyProviderDelegationTokenExtension.
createKeyProviderDelegationTokenExtension(kp);
final InetSocketAddress kmsAddr =
new InetSocketAddress(getKMSUrl().getHost(), getKMSUrl().getPort());
// Job 1 (e.g. YARN log aggregation job), with user DT.
final Collection<Token<?>> job1Token = new HashSet<>();
doAs("client", new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
// Get a DT and use it.
final Credentials credentials = new Credentials();
kpdte.addDelegationTokens("client", credentials);
Assert.assertEquals(1, credentials.getAllTokens().size());
Assert.assertEquals(KMSDelegationToken.TOKEN_KIND, credentials.
getToken(SecurityUtil.buildTokenService(kmsAddr)).getKind());
UserGroupInformation.getCurrentUser().addCredentials(credentials);
LOG.info("Added kms dt to credentials: {}", UserGroupInformation.
getCurrentUser().getCredentials().getAllTokens());
Token<?> token =
UserGroupInformation.getCurrentUser().getCredentials()
.getToken(SecurityUtil.buildTokenService(kmsAddr));
Assert.assertNotNull(token);
job1Token.add(token);
// Decode the token to get max time.
ByteArrayInputStream buf =
new ByteArrayInputStream(token.getIdentifier());
DataInputStream dis = new DataInputStream(buf);
DelegationTokenIdentifier id =
new DelegationTokenIdentifier(token.getKind());
id.readFields(dis);
dis.close();
final long maxTime = id.getMaxDate();
// wait for token to expire.
Thread.sleep(5100);
Assert.assertTrue("maxTime " + maxTime + " is not less than now.",
maxTime > 0 && maxTime < Time.now());
try {
kp.getKeys();
Assert.fail("Operation should fail since dt is expired.");
} catch (Exception e) {
LOG.info("Expected error.", e);
}
return null;
}
});
Assert.assertFalse(job1Token.isEmpty());
// job 2 (e.g. Another YARN log aggregation job, with user DT.
doAs("client", new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
// Get a new DT, but don't use it yet.
final Credentials newCreds = new Credentials();
kpdte.addDelegationTokens("client", newCreds);
Assert.assertEquals(1, newCreds.getAllTokens().size());
Assert.assertEquals(KMSDelegationToken.TOKEN_KIND,
newCreds.getToken(SecurityUtil.buildTokenService(kmsAddr)).
getKind());
// Using job 1's DT should fail.
final Credentials oldCreds = new Credentials();
for (Token<?> token : job1Token) {
if (token.getKind().equals(KMSDelegationToken.TOKEN_KIND)) {
oldCreds
.addToken(SecurityUtil.buildTokenService(kmsAddr), token);
}
}
UserGroupInformation.getCurrentUser().addCredentials(oldCreds);
LOG.info("Added old kms dt to credentials: {}", UserGroupInformation
.getCurrentUser().getCredentials().getAllTokens());
try {
kp.getKeys();
Assert.fail("Operation should fail since dt is expired.");
} catch (Exception e) {
LOG.info("Expected error.", e);
}
// Using the new DT should succeed.
Assert.assertEquals(1, newCreds.getAllTokens().size());
Assert.assertEquals(KMSDelegationToken.TOKEN_KIND,
newCreds.getToken(SecurityUtil.buildTokenService(kmsAddr)).
getKind());
UserGroupInformation.getCurrentUser().addCredentials(newCreds);
LOG.info("Credetials now are: {}", UserGroupInformation
.getCurrentUser().getCredentials().getAllTokens());
kp.getKeys();
return null;
}
});
return null;
}
});
}
@Test
public void testKMSWithZKSigner() throws Exception {
doKMSWithZK(true, false);
}
@Test
public void testKMSWithZKDTSM() throws Exception {
doKMSWithZK(false, true);
}
@Test
public void testKMSWithZKSignerAndDTSM() throws Exception {
doKMSWithZK(true, true);
}
public void doKMSWithZK(boolean zkDTSM, boolean zkSigner) throws Exception {
TestingServer zkServer = null;
try {
zkServer = new TestingServer();
zkServer.start();
Configuration conf = new Configuration();
conf.set("hadoop.security.authentication", "kerberos");
final File testDir = getTestDir();
conf = createBaseKMSConf(testDir, conf);
conf.set("hadoop.kms.authentication.type", "kerberos");
conf.set("hadoop.kms.authentication.kerberos.keytab", keytab.getAbsolutePath());
conf.set("hadoop.kms.authentication.kerberos.principal", "HTTP/localhost");
conf.set("hadoop.kms.authentication.kerberos.name.rules", "DEFAULT");
if (zkSigner) {
conf.set("hadoop.kms.authentication.signer.secret.provider", "zookeeper");
conf.set("hadoop.kms.authentication.signer.secret.provider.zookeeper.path","/testKMSWithZKDTSM");
conf.set("hadoop.kms.authentication.signer.secret.provider.zookeeper.connection.string",zkServer.getConnectString());
}
if (zkDTSM) {
conf.set("hadoop.kms.authentication.zk-dt-secret-manager.enable", "true");
}
if (zkDTSM && !zkSigner) {
conf.set("hadoop.kms.authentication.zk-dt-secret-manager.zkConnectionString", zkServer.getConnectString());
conf.set("hadoop.kms.authentication.zk-dt-secret-manager.znodeWorkingPath", "testZKPath");
conf.set("hadoop.kms.authentication.zk-dt-secret-manager.zkAuthType", "none");
}
for (KMSACLs.Type type : KMSACLs.Type.values()) {
conf.set(type.getAclConfigKey(), type.toString());
}
conf.set(KMSACLs.Type.CREATE.getAclConfigKey(),
KMSACLs.Type.CREATE.toString() + ",SET_KEY_MATERIAL");
conf.set(KMSACLs.Type.ROLLOVER.getAclConfigKey(),
KMSACLs.Type.ROLLOVER.toString() + ",SET_KEY_MATERIAL");
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "k0.ALL", "*");
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "k1.ALL", "*");
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "k2.ALL", "*");
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "k3.ALL", "*");
writeConf(testDir, conf);
KMSCallable<KeyProvider> c =
new KMSCallable<KeyProvider>() {
@Override
public KeyProvider call() throws Exception {
final Configuration conf = new Configuration();
conf.setInt(KeyProvider.DEFAULT_BITLENGTH_NAME, 128);
final URI uri = createKMSUri(getKMSUrl());
final KeyProvider kp =
doAs("SET_KEY_MATERIAL",
new PrivilegedExceptionAction<KeyProvider>() {
@Override
public KeyProvider run() throws Exception {
KeyProvider kp = createProvider(uri, conf);
kp.createKey("k1", new byte[16],
new KeyProvider.Options(conf));
kp.createKey("k2", new byte[16],
new KeyProvider.Options(conf));
kp.createKey("k3", new byte[16],
new KeyProvider.Options(conf));
return kp;
}
});
return kp;
}
};
runServer(null, null, testDir, c);
} finally {
if (zkServer != null) {
zkServer.stop();
zkServer.close();
}
}
}
@Test
public void testProxyUserKerb() throws Exception {
doProxyUserTest(true);
}
@Test
public void testProxyUserSimple() throws Exception {
doProxyUserTest(false);
}
public void doProxyUserTest(final boolean kerberos) throws Exception {
Configuration conf = new Configuration();
conf.set("hadoop.security.authentication", "kerberos");
final File testDir = getTestDir();
conf = createBaseKMSConf(testDir, conf);
if (kerberos) {
conf.set("hadoop.kms.authentication.type", "kerberos");
}
conf.set("hadoop.kms.authentication.kerberos.keytab",
keytab.getAbsolutePath());
conf.set("hadoop.kms.authentication.kerberos.principal", "HTTP/localhost");
conf.set("hadoop.kms.authentication.kerberos.name.rules", "DEFAULT");
conf.set("hadoop.kms.proxyuser.client.users", "foo,bar");
conf.set("hadoop.kms.proxyuser.client.hosts", "*");
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "kaa.ALL", "client");
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "kbb.ALL", "foo");
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "kcc.ALL", "foo1");
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "kdd.ALL", "bar");
writeConf(testDir, conf);
runServer(null, null, testDir, new KMSCallable<Void>() {
@Override
public Void call() throws Exception {
final Configuration conf = new Configuration();
conf.setInt(KeyProvider.DEFAULT_BITLENGTH_NAME, 128);
final URI uri = createKMSUri(getKMSUrl());
UserGroupInformation proxyUgi = null;
if (kerberos) {
// proxyuser client using kerberos credentials
proxyUgi = UserGroupInformation.
loginUserFromKeytabAndReturnUGI("client", keytab.getAbsolutePath());
} else {
proxyUgi = UserGroupInformation.createRemoteUser("client");
UserGroupInformation.setLoginUser(proxyUgi);
}
final UserGroupInformation clientUgi = proxyUgi;
clientUgi.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
final KeyProvider kp = createProvider(uri, conf);
kp.createKey("kaa", new KeyProvider.Options(conf));
// authorized proxyuser
UserGroupInformation fooUgi =
UserGroupInformation.createProxyUser("foo", clientUgi);
fooUgi.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
Assert.assertNotNull(kp.createKey("kbb",
new KeyProvider.Options(conf)));
return null;
}
});
// unauthorized proxyuser
UserGroupInformation foo1Ugi =
UserGroupInformation.createProxyUser("foo1", clientUgi);
foo1Ugi.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
try {
kp.createKey("kcc", new KeyProvider.Options(conf));
Assert.fail();
} catch (AuthorizationException ex) {
// OK
} catch (Exception ex) {
Assert.fail(ex.getMessage());
}
return null;
}
});
// authorized proxyuser
UserGroupInformation barUgi =
UserGroupInformation.createProxyUser("bar", clientUgi);
barUgi.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
Assert.assertNotNull(kp.createKey("kdd",
new KeyProvider.Options(conf)));
return null;
}
});
return null;
}
});
return null;
}
});
}
@Test
public void testWebHDFSProxyUserKerb() throws Exception {
doWebHDFSProxyUserTest(true);
}
@Test
public void testWebHDFSProxyUserSimple() throws Exception {
doWebHDFSProxyUserTest(false);
}
@Test
public void testTGTRenewal() throws Exception {
tearDownMiniKdc();
Properties kdcConf = MiniKdc.createConf();
kdcConf.setProperty(MiniKdc.MAX_TICKET_LIFETIME, "3");
kdcConf.setProperty(MiniKdc.MIN_TICKET_LIFETIME, "3");
setUpMiniKdc(kdcConf);
Configuration conf = new Configuration();
conf.set("hadoop.security.authentication", "kerberos");
final File testDir = getTestDir();
conf = createBaseKMSConf(testDir, conf);
conf.set("hadoop.kms.authentication.type", "kerberos");
conf.set("hadoop.kms.authentication.kerberos.keytab",
keytab.getAbsolutePath());
conf.set("hadoop.kms.authentication.kerberos.principal", "HTTP/localhost");
conf.set("hadoop.kms.authentication.kerberos.name.rules", "DEFAULT");
conf.set("hadoop.kms.proxyuser.client.users", "*");
conf.set("hadoop.kms.proxyuser.client.hosts", "*");
writeConf(testDir, conf);
runServer(null, null, testDir, new KMSCallable<Void>() {
@Override
public Void call() throws Exception {
final Configuration conf = new Configuration();
final URI uri = createKMSUri(getKMSUrl());
UserGroupInformation.setShouldRenewImmediatelyForTests(true);
UserGroupInformation
.loginUserFromKeytab("client", keytab.getAbsolutePath());
final UserGroupInformation clientUgi =
UserGroupInformation.getCurrentUser();
clientUgi.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
// Verify getKeys can relogin
Thread.sleep(3100);
KeyProvider kp = createProvider(uri, conf);
kp.getKeys();
// Verify addDelegationTokens can relogin
// (different code path inside KMSClientProvider than getKeys)
Thread.sleep(3100);
kp = createProvider(uri, conf);
((KeyProviderDelegationTokenExtension.DelegationTokenExtension) kp)
.addDelegationTokens("myuser", new Credentials());
// Verify getKeys can relogin with proxy user
UserGroupInformation anotherUgi =
UserGroupInformation.createProxyUser("client1", clientUgi);
anotherUgi.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
Thread.sleep(3100);
KeyProvider kp = createProvider(uri, conf);
kp.getKeys();
return null;
}
});
return null;
}
});
return null;
}
});
}
public void doWebHDFSProxyUserTest(final boolean kerberos) throws Exception {
Configuration conf = new Configuration();
if (kerberos) {
conf.set("hadoop.security.authentication", "kerberos");
}
UserGroupInformation.setConfiguration(conf);
final File testDir = getTestDir();
conf = createBaseKMSConf(testDir, conf);
if (kerberos) {
conf.set("hadoop.kms.authentication.type", "kerberos");
}
conf.set("hadoop.kms.authentication.kerberos.keytab",
keytab.getAbsolutePath());
conf.set("hadoop.kms.authentication.kerberos.principal", "HTTP/localhost");
conf.set("hadoop.kms.authentication.kerberos.name.rules", "DEFAULT");
conf.set("hadoop.security.kms.client.timeout", "300");
conf.set("hadoop.kms.proxyuser.client.users", "foo,bar");
conf.set("hadoop.kms.proxyuser.client.hosts", "*");
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "kaa.ALL", "foo");
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "kbb.ALL", "foo1");
conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "kcc.ALL", "bar");
writeConf(testDir, conf);
runServer(null, null, testDir, new KMSCallable<Void>() {
@Override
public Void call() throws Exception {
final Configuration conf = new Configuration();
conf.setInt(KeyProvider.DEFAULT_BITLENGTH_NAME, 128);
final URI uri = createKMSUri(getKMSUrl());
UserGroupInformation proxyUgi = null;
if (kerberos) {
// proxyuser client using kerberos credentials
proxyUgi = UserGroupInformation.
loginUserFromKeytabAndReturnUGI("client", keytab.getAbsolutePath());
} else {
proxyUgi = UserGroupInformation.createRemoteUser("client");
}
final UserGroupInformation clientUgi = proxyUgi;
clientUgi.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
// authorized proxyuser
UserGroupInformation fooUgi =
UserGroupInformation.createProxyUser("foo", clientUgi);
fooUgi.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
KeyProvider kp = createProvider(uri, conf);
Assert.assertNotNull(kp.createKey("kaa",
new KeyProvider.Options(conf)));
return null;
}
});
// unauthorized proxyuser
UserGroupInformation foo1Ugi =
UserGroupInformation.createProxyUser("foo1", clientUgi);
foo1Ugi.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
try {
KeyProvider kp = createProvider(uri, conf);
kp.createKey("kbb", new KeyProvider.Options(conf));
Assert.fail();
} catch (Exception ex) {
Assert.assertTrue(ex.getMessage(), ex.getMessage().contains("Forbidden"));
}
return null;
}
});
// authorized proxyuser
UserGroupInformation barUgi =
UserGroupInformation.createProxyUser("bar", clientUgi);
barUgi.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
KeyProvider kp = createProvider(uri, conf);
Assert.assertNotNull(kp.createKey("kcc",
new KeyProvider.Options(conf)));
return null;
}
});
return null;
}
});
return null;
}
});
}
/*
* Test the jmx page can return, and contains the basic JvmMetrics. Only
* testing in simple mode since the page content is the same, kerberized
* or not.
*/
@Test
public void testKMSJMX() throws Exception {
Configuration conf = new Configuration();
final File confDir = getTestDir();
conf = createBaseKMSConf(confDir, conf);
final String processName = "testkmsjmx";
conf.set(KMSConfiguration.METRICS_PROCESS_NAME_KEY, processName);
writeConf(confDir, conf);
runServer(null, null, confDir, new KMSCallable<Void>() {
@Override
public Void call() throws Exception {
final URL jmxUrl = new URL(
getKMSUrl() + "/jmx?user.name=whatever&qry=Hadoop:service="
+ processName + ",name=JvmMetrics");
LOG.info("Requesting jmx from " + jmxUrl);
final StringBuilder sb = new StringBuilder();
final InputStream in = jmxUrl.openConnection().getInputStream();
final byte[] buffer = new byte[64 * 1024];
int len;
while ((len = in.read(buffer)) > 0) {
sb.append(new String(buffer, 0, len));
}
LOG.info("jmx returned: " + sb.toString());
assertTrue(sb.toString().contains("JvmMetrics"));
return null;
}
});
}
}
| [
"\"KRB5CCNAME\""
]
| []
| [
"KRB5CCNAME"
]
| [] | ["KRB5CCNAME"] | java | 1 | 0 | |
src/hark_lang/run/lambda_handlers.py | """Handle different kinds of events"""
import json
import logging
import os
from abc import ABC, abstractmethod
from ..machine import types as mt
HARK_CLI_VERSION_KEY = "hark_ver"
LOG = logging.getLogger(__name__)
class HarkEventHandler(ABC):
"""An event handler.
Designed so new event handlers can be built without having to interface
tightly with Hark
"""
@classmethod
@abstractmethod
def can_handle(cls, event: dict) -> bool:
"""Return whether event can be handled by this class."""
@classmethod
@abstractmethod
def handle(cls, event: dict, new_session, UserResolvableError) -> dict:
"""Handle an event"""
## Concrete Implementations
class CliHandler(HarkEventHandler):
"""Handle invocations by the Hark CLI"""
@classmethod
def can_handle(cls, event: dict):
return HARK_CLI_VERSION_KEY in event
@classmethod
def handle(cls, event: dict, new_session, UserResolvableError) -> dict:
try:
timeout = int(event["timeout"])
except KeyError:
timeout = int(os.getenv("FIXED_HARK_TIMEOUT", 5))
try:
controller = new_session(
function=event.get("function", "main"),
args=[mt.TlString(a) for a in event.get("args", [])],
check_period=event.get("check_period", 0.2),
wait_for_finish=event.get("wait_for_finish", True),
timeout=timeout,
code_override=event.get("code", None),
)
except UserResolvableError as exc:
return dict(hark_ok=False, message=exc.msg, suggested_fix=exc.suggested_fix)
# The "hark_ok" element is required (see in_own.py)
return dict(
hark_ok=True,
session_id=controller.session_id,
finished=controller.all_stopped(),
broken=controller.broken,
result=controller.get_top_level_result(),
)
class S3Handler(HarkEventHandler):
"""Handle S3 upload events"""
@classmethod
def can_handle(cls, event: dict):
return (
"Records" in event
and len(event["Records"]) > 0
and "eventSource" in event["Records"][0]
and event["Records"][0]["eventSource"] == "aws:s3"
and event["Records"][0]["s3"]["s3SchemaVersion"] == "1.0"
)
@classmethod
def handle(cls, event: dict, new_session, UserResolvableError) -> dict:
"""Get arguments to invoke the upload handler
NOTE: does not wait for the session to finish! All exceptions must end
up in the machine state.
"""
data = event["Records"][0]["s3"]
bucket = data["bucket"]["name"]
key = data["object"]["key"] # NOTE - could check size here
new_session(
function="on_upload", # constant
args=[mt.TlString(bucket), mt.TlString(key)],
wait_for_finish=False,
check_period=None,
timeout=None,
)
class HttpHandler(HarkEventHandler):
"""API Gateway (v1) HTTP endpoint handler"""
@classmethod
def can_handle(cls, event: dict):
return (
"httpMethod" in event and "path" in event and event.get("version") == "1.0"
)
@classmethod
def handle(cls, event: dict, new_session, UserResolvableError) -> dict:
method = event["httpMethod"]
path = event["path"]
controller = new_session(
function="on_http", # constant
args=[mt.to_hark_type(o) for o in (method, path, event)],
wait_for_finish=False,
check_period=0.1,
timeout=10.0, # TODO? make configurable
)
result = controller.get_top_level_result()
LOG.info(f"Finished HTTP handling. Result: {result}")
# Indicate an internal server error to the client
if controller.broken:
raise Exception("Controller broken")
# Try to DWIM: return a dict to do everything yourself, or return a
# string to have this handler do something sensible with it.
# TODO: HTML detection
if isinstance(result, dict) and "statusCode" in result:
return result
if isinstance(result, dict):
ct = "application/json"
body = json.dumps(result)
else:
ct = "text/plain"
body = str(result)
return {
"statusCode": 200,
"headers": {"Content-Type": ct},
"isBase64Encoded": False,
"body": body,
}
# List of all available handlers
ALL_HANDLERS = [CliHandler, S3Handler, HttpHandler]
| []
| []
| [
"FIXED_HARK_TIMEOUT"
]
| [] | ["FIXED_HARK_TIMEOUT"] | python | 1 | 0 | |
OA/wsgi.py | """
WSGI config for OA project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "OA.settings")
application = get_wsgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
manage.py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "tests.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django # noqa
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
mne/conftest.py | # -*- coding: utf-8 -*-
# Author: Eric Larson <[email protected]>
#
# License: BSD (3-clause)
from distutils.version import LooseVersion
import gc
import os
import os.path as op
import shutil
import sys
import warnings
import pytest
# For some unknown reason, on Travis-xenial there are segfaults caused on
# the line pytest -> pdb.Pdb.__init__ -> "import readline". Forcing an
# import here seems to prevent them (!?). This suggests a potential problem
# with some other library stepping on memory where it shouldn't. It only
# seems to happen on the Linux runs that install Mayavi. Anectodally,
# @larsoner has had problems a couple of years ago where a mayavi import
# seemed to corrupt SciPy linalg function results (!), likely due to the
# associated VTK import, so this could be another manifestation of that.
try:
import readline # noqa
except Exception:
pass
import numpy as np
import mne
from mne.datasets import testing
from mne.fixes import _fn35
test_path = testing.data_path(download=False)
s_path = op.join(test_path, 'MEG', 'sample')
fname_evoked = op.join(s_path, 'sample_audvis_trunc-ave.fif')
fname_cov = op.join(s_path, 'sample_audvis_trunc-cov.fif')
fname_fwd = op.join(s_path, 'sample_audvis_trunc-meg-eeg-oct-4-fwd.fif')
subjects_dir = op.join(test_path, 'subjects')
def pytest_configure(config):
"""Configure pytest options."""
# Markers
for marker in ('slowtest', 'ultraslowtest'):
config.addinivalue_line('markers', marker)
# Fixtures
for fixture in ('matplotlib_config', 'fix_pytest_tmpdir_35'):
config.addinivalue_line('usefixtures', fixture)
# Warnings
# - Once SciPy updates not to have non-integer and non-tuple errors (1.2.0)
# we should remove them from here.
# - This list should also be considered alongside reset_warnings in
# doc/conf.py.
warning_lines = r"""
error::
ignore::ImportWarning
ignore:the matrix subclass:PendingDeprecationWarning
ignore:numpy.dtype size changed:RuntimeWarning
ignore:.*HasTraits.trait_.*:DeprecationWarning
ignore:.*takes no parameters:DeprecationWarning
ignore:joblib not installed:RuntimeWarning
ignore:Using a non-tuple sequence for multidimensional indexing:FutureWarning
ignore:using a non-integer number instead of an integer will result in an error:DeprecationWarning
ignore:Importing from numpy.testing.decorators is deprecated:DeprecationWarning
ignore:np.loads is deprecated, use pickle.loads instead:DeprecationWarning
ignore:The oldnumeric module will be dropped:DeprecationWarning
ignore:Collection picker None could not be converted to float:UserWarning
ignore:covariance is not positive-semidefinite:RuntimeWarning
ignore:Can only plot ICA components:RuntimeWarning
ignore:Matplotlib is building the font cache using fc-list:UserWarning
ignore:Using or importing the ABCs from 'collections':DeprecationWarning
ignore:`formatargspec` is deprecated:DeprecationWarning
# This is only necessary until sklearn updates their wheels for NumPy 1.16
ignore:numpy.ufunc size changed:RuntimeWarning
ignore:.*mne-realtime.*:DeprecationWarning
ignore:.*imp.*:DeprecationWarning
ignore:Exception creating Regex for oneOf.*:SyntaxWarning
ignore:scipy\.gradient is deprecated.*:DeprecationWarning
ignore:sklearn\.externals\.joblib is deprecated.*:FutureWarning
ignore:The sklearn.*module.*deprecated.*:FutureWarning
ignore:.*TraitTuple.*trait.*handler.*deprecated.*:DeprecationWarning
ignore:.*rich_compare.*metadata.*deprecated.*:DeprecationWarning
ignore:.*In future, it will be an error for 'np.bool_'.*:DeprecationWarning
ignore:.*Converting `np\.character` to a dtype is deprecated.*:DeprecationWarning
ignore:.*sphinx\.util\.smartypants is deprecated.*:
ignore:.*pandas\.util\.testing is deprecated.*:
ignore:.*tostring.*is deprecated.*:DeprecationWarning
always:.*get_data.* is deprecated in favor of.*:DeprecationWarning
""" # noqa: E501
for warning_line in warning_lines.split('\n'):
warning_line = warning_line.strip()
if warning_line and not warning_line.startswith('#'):
config.addinivalue_line('filterwarnings', warning_line)
@pytest.fixture(scope='session')
def matplotlib_config():
"""Configure matplotlib for viz tests."""
import matplotlib
from matplotlib import cbook
# "force" should not really be necessary but should not hurt
kwargs = dict()
with warnings.catch_warnings(record=True): # ignore warning
warnings.filterwarnings('ignore')
matplotlib.use('agg', force=True, **kwargs) # don't pop up windows
import matplotlib.pyplot as plt
assert plt.get_backend() == 'agg'
# overwrite some params that can horribly slow down tests that
# users might have changed locally (but should not otherwise affect
# functionality)
plt.ioff()
plt.rcParams['figure.dpi'] = 100
try:
from traits.etsconfig.api import ETSConfig
except Exception:
pass
else:
ETSConfig.toolkit = 'qt4'
# Make sure that we always reraise exceptions in handlers
orig = cbook.CallbackRegistry
class CallbackRegistryReraise(orig):
def __init__(self, exception_handler=None):
args = ()
if LooseVersion(matplotlib.__version__) >= LooseVersion('2.1'):
args += (exception_handler,)
super(CallbackRegistryReraise, self).__init__(*args)
cbook.CallbackRegistry = CallbackRegistryReraise
@pytest.fixture()
def check_gui_ci():
"""Skip tests that are not reliable on CIs."""
osx = (os.getenv('TRAVIS', 'false').lower() == 'true' and
sys.platform == 'darwin')
win = os.getenv('AZURE_CI_WINDOWS', 'false').lower() == 'true'
if win or osx:
pytest.skip('Skipping GUI tests on Travis OSX and Azure Windows')
def _replace(mod, key):
orig = getattr(mod, key)
def func(x, *args, **kwargs):
return orig(_fn35(x), *args, **kwargs)
setattr(mod, key, func)
@pytest.fixture(scope='session')
def fix_pytest_tmpdir_35():
"""Deal with tmpdir being a LocalPath, which bombs on 3.5."""
if sys.version_info >= (3, 6):
return
for key in ('stat', 'mkdir', 'makedirs', 'access'):
_replace(os, key)
for key in ('split', 'splitext', 'realpath', 'join', 'basename'):
_replace(op, key)
@pytest.fixture(scope='function', params=[testing._pytest_param()])
def evoked():
"""Get evoked data."""
evoked = mne.read_evokeds(fname_evoked, condition='Left Auditory',
baseline=(None, 0))
evoked.crop(0, 0.2)
return evoked
@pytest.fixture(scope='function', params=[testing._pytest_param()])
def noise_cov():
"""Get a noise cov from the testing dataset."""
return mne.read_cov(fname_cov)
@pytest.fixture(scope='function')
def bias_params_free(evoked, noise_cov):
"""Provide inputs for free bias functions."""
fwd = mne.read_forward_solution(fname_fwd)
return _bias_params(evoked, noise_cov, fwd)
@pytest.fixture(scope='function')
def bias_params_fixed(evoked, noise_cov):
"""Provide inputs for fixed bias functions."""
fwd = mne.read_forward_solution(fname_fwd)
fwd = mne.convert_forward_solution(fwd, force_fixed=True, surf_ori=True)
return _bias_params(evoked, noise_cov, fwd)
def _bias_params(evoked, noise_cov, fwd):
evoked.pick_types(meg=True, eeg=True, exclude=())
# restrict to limited set of verts (small src here) and one hemi for speed
vertices = [fwd['src'][0]['vertno'].copy(), []]
stc = mne.SourceEstimate(np.zeros((sum(len(v) for v in vertices), 1)),
vertices, 0., 1.)
fwd = mne.forward.restrict_forward_to_stc(fwd, stc)
assert fwd['sol']['row_names'] == noise_cov['names']
assert noise_cov['names'] == evoked.ch_names
evoked = mne.EvokedArray(fwd['sol']['data'].copy(), evoked.info)
data_cov = noise_cov.copy()
data_cov['data'] = np.dot(fwd['sol']['data'], fwd['sol']['data'].T)
assert data_cov['data'].shape[0] == len(noise_cov['names'])
want = np.arange(fwd['sol']['data'].shape[1])
if not mne.forward.is_fixed_orient(fwd):
want //= 3
return evoked, fwd, noise_cov, data_cov, want
@pytest.fixture(scope="module", params=[
"mayavi",
"pyvista",
])
def backend_name(request):
"""Get the backend name."""
yield request.param
@pytest.yield_fixture
def renderer(backend_name, garbage_collect):
"""Yield the 3D backends."""
from mne.viz.backends.renderer import _use_test_3d_backend
_check_skip_backend(backend_name)
with _use_test_3d_backend(backend_name):
from mne.viz.backends import renderer
yield renderer
renderer.backend._close_all()
@pytest.yield_fixture
def garbage_collect():
"""Garbage collect on exit."""
yield
gc.collect()
@pytest.fixture(scope="module", params=[
"pyvista",
"mayavi",
])
def backend_name_interactive(request):
"""Get the backend name."""
yield request.param
@pytest.yield_fixture
def renderer_interactive(backend_name_interactive):
"""Yield the 3D backends."""
from mne.viz.backends.renderer import _use_test_3d_backend
_check_skip_backend(backend_name_interactive)
with _use_test_3d_backend(backend_name_interactive, interactive=True):
from mne.viz.backends import renderer
yield renderer
renderer.backend._close_all()
def _check_skip_backend(name):
from mne.viz.backends.tests._utils import has_mayavi, has_pyvista
if name == 'mayavi':
if not has_mayavi():
pytest.skip("Test skipped, requires mayavi.")
elif name == 'pyvista':
if not has_pyvista():
pytest.skip("Test skipped, requires pyvista.")
@pytest.fixture(scope='function', params=[testing._pytest_param()])
def subjects_dir_tmp(tmpdir):
"""Copy MNE-testing-data subjects_dir to a temp dir for manipulation."""
for key in ('sample', 'fsaverage'):
shutil.copytree(op.join(subjects_dir, key), str(tmpdir.join(key)))
return str(tmpdir)
| []
| []
| [
"AZURE_CI_WINDOWS",
"TRAVIS"
]
| [] | ["AZURE_CI_WINDOWS", "TRAVIS"] | python | 2 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.