filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
docs/source/conf.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# nbconvert documentation build configuration file, created by
# sphinx-quickstart on Tue Jun 9 17:11:30 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Automatically generate config_options.rst
with open(os.path.join(os.path.dirname(__file__), '..', 'autogen_config.py')) as f:
exec(compile(f.read(), 'autogen_config.py', 'exec'), {})
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.extlinks',
'sphinx.ext.intersphinx',
'sphinx.ext.napoleon',
'nbsphinx',
'IPython.sphinxext.ipython_console_highlighting',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = ['.rst', '.ipynb']
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'nbconvert'
from datetime import date
year = date.today().year
copyright = '2015-%s, Jupyter Development Team' % year
author = 'Jupyter Development Team'
extlinks = {'ghpull': ('https://github.com/jupyter/nbconvert/pull/%s', 'PR #')}
linkcheck_ignore = [
'https://github.com/jupyter/nbconvert/pull/',
]
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# Get information from _version.py and use it to generate version and release
_version_py = '../../nbconvert/_version.py'
version_ns = {}
exec(compile(open(_version_py).read(), _version_py, 'exec'), version_ns)
# The short X.Y version.
version = '%i.%i' % version_ns['version_info'][:2]
# The full version, including alpha/beta/rc tags.
release = version_ns['__version__']
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['.ipynb_checkpoints', 'example.ipynb']
# The reST default role (used for this markup: `text`) to use for all
# documents.
default_role = 'any'
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# Set on_rtd to whether we are building on readthedocs.org. We get this line of
# code grabbed from docs.readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# otherwise, readthedocs.org uses their default theme, so no need to specify it
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'nbconvertdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'nbconvert.tex', 'nbconvert Documentation',
'Jupyter Development Team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'nbconvert', 'nbconvert Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'nbconvert', 'nbconvert Documentation',
author, 'nbconvert', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('https://docs.python.org/3.6', None),
'jinja': ('http://jinja.pocoo.org/docs/dev', None),
'nbformat': ('https://nbformat.readthedocs.io/en/latest', None),
}
|
[] |
[] |
[
"READTHEDOCS"
] |
[]
|
["READTHEDOCS"]
|
python
| 1 | 0 | |
client/client.go
|
package client
import (
"bytes"
"fmt"
"net/http"
"net/url"
"os"
)
type Client struct {
Server string // GITGUARDIAN_SERVER
ApiKey string // GITGUARDIAN_API_KEY
Client HttpRequest
}
type ClientOption func(*Client) error
type HttpRequest interface {
Do(req *http.Request) (*http.Response, error)
}
func New(opts ...ClientOption) (*Client, error) {
client := Client{}
// Add all the provided options to the client
for _, v := range opts {
if err := v(&client); err != nil {
return nil, err
}
}
if client.Client == nil {
client.Client = &http.Client{}
}
if client.Server == "" {
client.Server = os.Getenv("GITGUARDIAN_SERVER")
}
if client.Server == "" {
client.Server = "https://api.gitguardian.com"
}
if client.ApiKey == "" {
client.ApiKey = os.Getenv("GITGUARDIAN_API_KEY")
}
if client.ApiKey == "" {
return nil, fmt.Errorf("GITGUARDIAN_API_KEY is not set")
}
return &client, nil
}
func (c *Client) NewRequest(method string, path string, payload *bytes.Buffer) (*http.Request, error) {
// Convert server address to *url.URL
serverURL, err := url.Parse(c.Server)
if err != nil {
return nil, err
}
// Add path to server address
queryURL, err := serverURL.Parse(path)
if err != nil {
return nil, err
}
var req *http.Request
if payload == nil {
req, err = http.NewRequest(method, queryURL.String(), nil)
} else {
req, err = http.NewRequest(method, queryURL.String(), payload)
}
if err != nil {
return nil, err
}
req.Header.Add("Content-Type", "application/json")
req.Header.Add("Authorization", "Token "+c.ApiKey)
return req, nil
}
func WithHTTPClient(ht HttpRequest) ClientOption {
return func(c *Client) error {
c.Client = ht
return nil
}
}
func WithServer(server string) ClientOption {
return func(c *Client) error {
c.Server = server
return nil
}
}
func WithApiKey(apiKey string) ClientOption {
return func(c *Client) error {
c.ApiKey = apiKey
return nil
}
}
|
[
"\"GITGUARDIAN_SERVER\"",
"\"GITGUARDIAN_API_KEY\""
] |
[] |
[
"GITGUARDIAN_SERVER",
"GITGUARDIAN_API_KEY"
] |
[]
|
["GITGUARDIAN_SERVER", "GITGUARDIAN_API_KEY"]
|
go
| 2 | 0 | |
test/e2e/e2e_suite_test.go
|
/*
Copyright The KubeDB Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e_test
import (
"flag"
"os"
"path/filepath"
"testing"
"time"
cs "kubedb.dev/apimachinery/client/clientset/versioned"
"kubedb.dev/apimachinery/client/clientset/versioned/scheme"
"kubedb.dev/mongodb/test/e2e/framework"
. "github.com/onsi/ginkgo"
"github.com/onsi/ginkgo/reporters"
. "github.com/onsi/gomega"
kext_cs "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
genericapiserver "k8s.io/apiserver/pkg/server"
"k8s.io/client-go/kubernetes"
clientSetScheme "k8s.io/client-go/kubernetes/scheme"
_ "k8s.io/client-go/plugin/pkg/client/auth"
"k8s.io/client-go/util/homedir"
ka "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset"
"kmodules.xyz/client-go/logs"
"kmodules.xyz/client-go/tools/clientcmd"
appcat_cs "kmodules.xyz/custom-resources/client/clientset/versioned/typed/appcatalog/v1alpha1"
scs "stash.appscode.dev/stash/client/clientset/versioned"
)
// To Run E2E tests:
// - For selfhosted Operator: ./hack/make.py test e2e --selfhosted-operator=true (--storageclass=standard) (--ginkgo.flakeAttempts=2)
// - For non selfhosted Operator: ./hack/make.py test e2e (--docker-registry=kubedb) (--storageclass=standard) (--ginkgo.flakeAttempts=2)
// () => Optional
var (
storageClass = "standard"
kubeconfigPath = func() string {
kubecfg := os.Getenv("KUBECONFIG")
if kubecfg != "" {
return kubecfg
}
return filepath.Join(homedir.HomeDir(), ".kube", "config")
}()
kubeContext = ""
)
func init() {
utilruntime.Must(scheme.AddToScheme(clientSetScheme.Scheme))
flag.StringVar(&kubeconfigPath, "kubeconfig", kubeconfigPath, "Path to kubeconfig file with authorization information (the master location is set by the master flag).")
flag.StringVar(&kubeContext, "kube-context", "", "Name of kube context")
flag.StringVar(&storageClass, "storageclass", storageClass, "Kubernetes StorageClass name")
flag.StringVar(&framework.DockerRegistry, "docker-registry", framework.DockerRegistry, "User provided docker repository")
flag.StringVar(&framework.DBCatalogName, "db-catalog", framework.DBCatalogName, "MongoDB version")
flag.BoolVar(&framework.SelfHostedOperator, "selfhosted-operator", framework.SelfHostedOperator, "Enable this for provided controller")
}
const (
TIMEOUT = 20 * time.Minute
)
var (
root *framework.Framework
)
func TestE2e(t *testing.T) {
logs.InitLogs()
defer logs.FlushLogs()
RegisterFailHandler(Fail)
SetDefaultEventuallyTimeout(TIMEOUT)
junitReporter := reporters.NewJUnitReporter("junit.xml")
RunSpecsWithDefaultAndCustomReporters(t, "e2e Suite", []Reporter{junitReporter})
}
var _ = BeforeSuite(func() {
By("Using kubeconfig from " + kubeconfigPath)
config, err := clientcmd.BuildConfigFromContext(kubeconfigPath, kubeContext)
Expect(err).NotTo(HaveOccurred())
// raise throttling time. ref: https://github.com/appscode/voyager/issues/640
config.Burst = 100
config.QPS = 100
// Clients
kubeClient := kubernetes.NewForConfigOrDie(config)
dbClient := cs.NewForConfigOrDie(config)
kaClient := ka.NewForConfigOrDie(config)
appCatalogClient := appcat_cs.NewForConfigOrDie(config)
aPIExtKubeClient := kext_cs.NewForConfigOrDie(config)
stashClient := scs.NewForConfigOrDie(config)
// Framework
root = framework.New(config, kubeClient, aPIExtKubeClient, dbClient, kaClient, appCatalogClient, stashClient, storageClass)
// Create namespace
By("Using namespace " + root.Namespace())
err = root.CreateNamespace()
Expect(err).NotTo(HaveOccurred())
if !framework.SelfHostedOperator {
stopCh := genericapiserver.SetupSignalHandler()
go root.RunOperatorAndServer(config, kubeconfigPath, stopCh)
}
root.EventuallyCRD().Should(Succeed())
root.EventuallyAPIServiceReady().Should(Succeed())
})
var _ = AfterSuite(func() {
By("Cleanup Left Overs")
if !framework.SelfHostedOperator {
By("Delete Admission Controller Configs")
root.CleanAdmissionConfigs()
}
By("Delete left over MongoDB objects")
root.CleanMongoDB()
By("Delete left over Dormant Database objects")
root.CleanDormantDatabase()
By("Delete left over Snapshot objects")
root.CleanSnapshot()
By("Delete left over workloads if exists any")
root.CleanWorkloadLeftOvers()
By("Delete Namespace")
err := root.DeleteNamespace()
Expect(err).NotTo(HaveOccurred())
})
|
[
"\"KUBECONFIG\""
] |
[] |
[
"KUBECONFIG"
] |
[]
|
["KUBECONFIG"]
|
go
| 1 | 0 | |
Experiment.py
|
# Class definition:
# Experiment
# This class is the main experiment class; ATLAS etc will inherit from this class
# Instances are generated with ExperimentFactory
# Subclasses should implement all needed methods prototyped in this class
# Note: not compatible with Singleton Design Pattern due to the subclassing
import os
import re
import time
import commands
from subprocess import Popen, PIPE
from PilotErrors import PilotErrors
from pUtil import tolog # Dump to pilot log
from pUtil import readpar # Used to read values from the schedconfig DB (queuedata)
from pUtil import getCmtconfig # cmtconfig (move to subclass)
from pUtil import getDirectAccessDic # Get the direct access dictionary
from pUtil import isBuildJob # Is the current job a build job?
from pUtil import remove # Used to remove redundant file before log file creation
from pUtil import getPilotlogFilename # Used in the subprocess arguments method
from pUtil import extractHPCInfo # Used by getSubprocessName() to determine HPC plug-in if necessary
class Experiment(object):
# experiment = "generic" # String defining the experiment
# private data members
__experiment = "generic" # String defining the experiment
__instance = None # Boolean used by subclasses to become a Singleton
__error = PilotErrors() # PilotErrors object
__doFileLookups = False # True for LFC based file lookups (basically a dummy data member here since singleton object is static)
__cache = "" # Cache URL used e.g. by LSST
# Required methods
def __init__(self, *args, **kwargs):
""" Default initialization """
# e.g. self.__errorLabel = errorLabel
# self.experiment = kwargs.get('experiment')
pass
def getExperiment(self):
""" Return a string with the experiment name """
# return self.experiment
return self.__experiment
def getJobExecutionCommand(self):
""" Define and test the command(s) that will be used to execute the payload """
# E.g. cmd = "source <path>/setup.sh; <path>/python <script>"
cmd = ""
return cmd
def getFileLookups(self):
""" Return the file lookup boolean """
return self.__doFileLookups
def doFileLookups(self, doFileLookups):
""" Update the file lookups boolean """
# Only implement this method if class really wants to update the __doFileLookups boolean
# ATLAS wants to implement this, but not CMS
# Method is used by Mover
# self.__doFileLookups = doFileLookups
pass
def willDoAlternativeFileLookups(self):
""" Should file lookups be done using alternative methods? """
# E.g. in the migration period where LFC lookups are halted in favour of other methods in the Rucio API
# (for ATLAS), this method could be useful. See the usage in Mover::getReplicaDictionary() which is called
# after Experiment::willDoFileLookups() defined above. The motivation is that direct LFC calls are not to be
# used any longer by the pilot, and in the migration period the actual LFC calls will be done in the Rucio
# API. Eventually this API will switch to alternative file lookups.
return False
def willDoFileLookups(self):
""" Should (LFC) file lookups be done by the pilot or not? """
return self.__doFileLookups
def willDoFileRegistration(self):
""" Should (LFC) file registration be done by the pilot or not? """
return False
def getFileCatalog(self):
""" Return the default file catalog to use (e.g. for replica lookups) """
# See usage in Mover.py
# e.g. 'lfc://prod-lfc-atlas.cern.ch:/grid/atlas'
return ""
# Additional optional methods
# These methods are optional and can be left as they are here, or modified according to special needs
def verifyProxy(self, envsetup="", limit=None):
""" Check for a valid voms/grid proxy longer than N hours """
# Use 'limit' to set required length
tolog("(verifyProxy() is not implemented)")
exitcode = 0
pilotErrorDiag = ""
return exitcode, pilotErrorDiag
def removeRedundantFiles(self, workdir):
""" Remove redundant files and directories """
# List of files and directories to be removed from work directory prior to log file creation
# Make sure that any large files or directories that are not wanted in the log file are included in this list
dir_list = [
"buildJob*",
"external",
"fort.*",
"home",
"python",
"share",
"workdir",
"*.py",
"*.pyc",
"*.root*",
"JEM",
"tmp*",
"*.tmp",
"*.TMP",
"scratch",
]
for _dir in dir_list:
files = glob(os.path.join(workdir, _dir))
rc = remove(files)
if not rc:
tolog("IGNORE: Failed to remove redundant file(s): %s" % (files))
def getPayloadName(self, job):
""" Set a suitable name for the payload stdout """
# The payload <name> gets translated into <name>_stdout.txt
# which is the name of the stdout file produced by the payload execution
# (essentially commands.getoutput("<setup>; <payload executable> [options] > <name>_stdout.txt"))
# The job object can be used to create more precise stdout names (see e.g. the ATLASExperiment implementation)
return "payload"
def isOutOfMemory(self, **kwargs):
""" Try to identify out of memory errors in the stderr/out """
return False
def getNumberOfEvents(self, **kwargs):
""" Return the number of events """
return 0
def specialChecks(self, **kwargs):
""" Implement special checks here """
# Return False if fatal failure, otherwise return True
# The pilot will abort if this method returns a False
# On an HPC system, it might be good to skip certain checks (e.g. CVMFS, LFC, etc). Refer to schedconfig.resourcetype, set to 'hpc' on an HPC queue
status = False
tolog("No special checks for \'%s\'" % (self.experiment))
return True # obviously change this to 'status' once implemented
def checkSpecialEnvVars(self, sitename):
""" Check special environment variables """
ec = 0
tolog("No special env var checks for site %s" % (sitename))
return ec
def setINDS(self, realDatasetsIn):
""" Extract the dataset as set by pathena option --inDS and set the INDS environmental variable """
# Needed by pathena (move to ATLASExperiment later)
inDS = ""
for ds in realDatasetsIn:
if "DBRelease" not in ds and ".lib." not in ds:
inDS = ds
break
if inDS != "":
tolog("Setting INDS env variable to: %s" % (inDS))
os.environ['INDS'] = inDS
else:
tolog("INDS unknown")
def getValidBaseURLs(self, order=None):
""" Return list of valid base URLs """
# if order is defined, return given item first
# e.g. order=http://atlpan.web.cern.ch/atlpan -> ['http://atlpan.web.cern.ch/atlpan', ...]
validBaseURLs = []
_validBaseURLs = ["http://www.usatlas.bnl.gov",\
"https://www.usatlas.bnl.gov",\
"http://pandaserver.cern.ch",\
"http://atlpan.web.cern.ch/atlpan",\
"https://atlpan.web.cern.ch/atlpan",\
"http://classis01.roma1.infn.it",\
"http://atlas-install.roma1.infn.it"]
if order:
validBaseURLs.append(order)
for url in _validBaseURLs:
if url != order:
validBaseURLs.append(url)
else:
validBaseURLs = _validBaseURLs
tolog("getValidBaseURLs will return: %s" % str(validBaseURLs))
return validBaseURLs
def downloadTrf(self, wgetCommand, jobTrf):
""" Download the trf """
status = False
pilotErrorDiag = ""
cmd = "%s %s" % (wgetCommand, jobTrf)
trial = 1
max_trials = 3
# try to download the trf a maximum of 3 times
while trial <= max_trials:
tolog("Executing command [Trial %d/%d]: %s" % (trial, max_trials, cmd))
ec, rets = commands.getstatusoutput(cmd)
if not rets:
rets = "(None)"
if ec != 0:
# Analyze exit code / output
from futil import check_syserr
check_syserr(ec, rets)
pilotErrorDiag = "wget command failed: %d, %s" % (ec, rets)
tolog("!!WARNING!!3000!! %s" % (pilotErrorDiag))
if trial == max_trials:
tolog("!!FAILED!!3000!! Could not download trf: %s" % (rets))
status = False
break
else:
tolog("Will try again after 60s..")
from time import sleep
sleep(60)
else:
pilotErrorDiag = ""
tolog("wget command returned: %s" % (rets))
status = True
break
trial += 1
return status, pilotErrorDiag
def getAnalysisTrf(self, wgetCommand, origTRF, pilot_initdir):
""" Get the trf to be used for analysis jobs """
pilotErrorDiag = ""
trfName = origTRF.split('/')[-1]
tolog("trfName = %s" % (trfName))
origBaseURL = ""
# Copy trf from pilot init dir if distributed with pilot code
fname = os.path.join(pilot_initdir, trfName)
status = False
if os.path.exists(fname):
from shutil import copy2
try:
copy2(fname, os.getcwd())
except Exception, e:
tolog("!!WARNING!!2999!! Could not copy trf from pilot init dir: %s" % str(e))
else:
tolog("Copied trf (%s) from pilot init dir" % (fname))
status = True
# Download trf
if not status:
# verify the base URL
for baseURL in self.getValidBaseURLs():
if origTRF.startswith(baseURL):
origBaseURL = baseURL
break
if origBaseURL == "":
pilotErrorDiag = "Invalid base URL: %s" % (origTRF)
return self.__error.ERR_TRFDOWNLOAD, pilotErrorDiag, ""
else:
tolog("Verified the trf base URL: %s" % (origBaseURL))
# try to download from the required location, if not - switch to backup
for baseURL in self.getValidBaseURLs(order=origBaseURL):
trf = re.sub(origBaseURL, baseURL, origTRF)
tolog("Attempting to download trf: %s" % (trf))
status, pilotErrorDiag = self.downloadTrf(wgetCommand, trf)
if status:
break
if not status:
return self.__error.ERR_TRFDOWNLOAD, pilotErrorDiag, ""
tolog("Successfully downloaded trf")
tolog("Changing permission of %s to 0755" % (trfName))
try:
os.chmod(trfName, 0755)
except Exception, e:
pilotErrorDiag = "Failed to chmod %s: %s" % (trfName, str(e))
return self.__error.ERR_CHMODTRF, pilotErrorDiag, ""
return 0, pilotErrorDiag, trfName
def getAnalysisRunCommand(self, job, jobSite, trfName):
""" Get the run command for analysis jobs """
# The run command is used to setup up the user job transform
ec = 0
pilotErrorDiag = ""
run_command = ""
return ec, pilotErrorDiag, run_command
def getFileTransferInfo(self, transferType, buildJob):
""" Get all relevant fields related to file transfer """
copysetup = readpar('copysetupin')
# create the direct access dictionary
fileTransferInfo = getDirectAccessDic(copysetup)
# if copysetupin did not contain direct access info, try the copysetup instead
if not fileTransferInfo:
copysetup = readpar('copysetup')
fileTransferInfo = getDirectAccessDic(copysetup)
# should the copytool be used?
useCopyTool = False
useFileStager = False
useDirectAccess = False
oldPrefix = ""
newPrefix = ""
dInfo = None
if fileTransferInfo:
dInfo = True
# no direct access / remote I/O, use standard copytool (copy-to-scratch)
if fileTransferInfo['useCopyTool']:
useCopyTool = True
# do not set the LFC host for file stager
if fileTransferInfo['useFileStager']:
useFileStager = True
if fileTransferInfo['directIn']:
useDirectAccess = True
oldPrefix = fileTransferInfo['oldPrefix']
newPrefix = fileTransferInfo['newPrefix']
# override settings for transferType direct
if transferType == 'direct':
useCopyTool = False
useFileStager = False
useDirectAccess = True
# should pilot create TURL based PFC? (not done here, but setup needs to be aware of it)
# if dInfo and useDirectAccess and oldPrefix == "" and newPrefix == "":
if (transferType == 'direct' or (useFileStager and useDirectAccess)) and (oldPrefix == "" and newPrefix == "") and not buildJob:
# if (transferType == 'direct' or (not useFileStager and useDirectAccess)) and (oldPrefix == "" and newPrefix == ""):
usePFCTurl = True
else:
usePFCTurl = False
# force usePFCTurl for all jobs
if not buildJob and useDirectAccess:
tolog("Forced usePFCTurl (reset old/newPrefix)")
usePFCTurl = True
oldPrefix = ""
newPrefix = ""
if os.environ.get("TestXRootD", 'False') == 'True':
import re
re.sub(r'\/xrootdsetup\.sh', '/xrootdsetup-dev.sh', copysetup)
return dInfo, useCopyTool, useDirectAccess, useFileStager, oldPrefix, newPrefix, copysetup, usePFCTurl
def getGuidsFromJobPars(self, jobPars, inputFiles, inFilesGuids):
""" Extract the correct guid from the input file list """
# the guids list is used for direct reading in an LFC environment
# 1. extract input file list for direct reading from jobPars
# 2. for each input file in this list, find the corresponding guid from the input file guid list
# since jobPars is entered by a human, the order of the input files might not be the same
guidList = []
jobPars = jobPars.replace("'","")
jobPars = jobPars.replace(", ",",")
pattern = re.compile(r'\-i \"\[([A-Za-z0-9.,_-]+)\]\"')
directReadingInputFiles = re.findall(pattern, jobPars)
inFiles = []
if directReadingInputFiles != []:
inFiles = directReadingInputFiles[0].split(",")
else:
match = re.search("-i ([A-Za-z0-9.\[\],_-]+) ", jobPars)
if match != None:
compactInFiles = match.group(1)
match = re.search('(.*)\[(.+)\](.*)\[(.+)\]', compactInFiles)
if match != None:
inputFiles = []
head = match.group(1)
tail = match.group(3)
body = match.group(2).split(',')
attr = match.group(4).split(',')
for idx in range(len(body)):
lfn = '%s%s%s%s' % (head, body[idx], tail, attr[idx])
inputFiles.append(lfn)
else:
inputFiles = [compactInFiles]
if inFiles != []:
for inFile in inFiles:
# get the corresponding index from the inputFiles list, which has the same order as inFilesGuids
try:
index = inputFiles.index(inFile)
except Exception, e:
tolog("!!WARNING!!2999!! Exception caught: %s (direct reading will fail)" % str(e))
else:
# add the corresponding guid to the list
guidList.append(inFilesGuids[index])
return guidList
def getMetadataForRegistration(self, guid):
""" Return metadata for [LFC] file registration """
# This method can insert special metadata into the metadata.xml file
# E.g. it can add preliminary XML tags for info that will only be known
# at a later time, such as "<metadata att_name="surl" att_value="%s-surltobeset"/>\n' % (guid)"
# The <guid>-surltobeset will be replaced by the pilot by the appropriate value once it is known
# Inputs:
# guid = file guid
# Returns:
# metadata string
# See e.g. the CMSExperiment implementation
# The method is called from pUtil::PFCxml() during metadata file creation
return ""
def getAttrForRegistration(self):
""" Return the attribute of the metadata XML to be updated with surl value """
# Used in combination with Experiment::getMetadataForRegistration()
# The attribute (default 'surl') will be copied into the metadata string used for pattern matching
# E.g. re.compile('\<metadata att\_name\=\"%s\" att\_value\=\"([a-zA-Z0-9-]+)\-surltobeset\"\/\>' % (attribute))
return 'surl'
def getExpSpecificMetadata(self, job, workdir):
""" Return experiment specific metadata """
# Inputs:
# job = PanDA pilot job object (see Job class)
# workdir = relevant work directory where the metadata is located
# Returns:
# metadata xml string
# See e.g. implementation in CMSExperiment
return ""
def getFileCatalogHosts(self):
""" Return a list of file catalog hosts """
# The method is used in combination with federated xrootd (FAX).
# In case FAX is allowed on a given site, the pilot might need to lookup
# replica information in more than one LFC catalog. Normally a site has only
# one LFC (as set in schedconfig.lfchost). Providing a list of hosts will increase
# the probability that FAX will succeed
# See e.g. ATLASExperiment implementation
return []
def verifySwbase(self, appdir):
""" Confirm existence of appdir/swbase """
# appdir/swbase is a queuedata parameter specifying the base location of physics analysis / release software
# This method will simply verify that the corresponding directory exists
#
# Input:
# appdir = application/software/release directory (e.g. /cvmfs/atlas.cern.ch/repo/sw)
# Return:
# error code (0 for success)
return 0
def interpretPayloadStdout(self, job, res, getstatusoutput_was_interrupted, current_job_number, runCommandList, failureCode):
""" Payload error interpretation and handling """
# NOTE: TODO, hide argument complexity with kwargs**
# This method can be used to interpret special errors that only occur in actual payload stdout, e.g. memory errors that have
# caused the payload to crash
#
# Inputs:
# job = PanDA pilot job object (see Job class)
# res =
# getstatusoutput_was_interrupted = True in case the payload execution command was aborted (e.g. keyboard CTRL-C)
# current_job_number = current job number, in case of multi-trf (ATLAS)
# runCommandList = list of payload execution commands (e.g. used by ATLAS to get to a setup file)
# failureCode = signal error code
# Returns:
# Updated PanDA pilot job objectwith proper payload error information, if needed
#
# The following Job attributes can be updated here
# result = tuple of size 3 that contain the standard error info: result[0] = current job status (e.g. failed, finished, holding),
# result[1] = payload error code, result[2] = PanDA pilot error code
# pilotErrorDiag = error diagnostics (string of up to 256 characters that will appear on the PanDA monitor job web page for a failed job)
# exeError
return job
def getSubprocessName(self, eventService):
""" Select which subprocess is to be run by the Monitor """
# The default subprocess is RunJob (name='Normal', which performs payload setup, stage-in, payload execution and stage-out).
# An alternative subprocess is the runEvent module which downloads events from an Event Server, executes a payload
# and stages ou output files asynchronously as they are ready.
# Note: send the entire job object to this method since there might be other subprocesses created at a later time which
# will be identified by this method using some other job data member
# Default subprocess name
name = "RunJob"
# Select alternative subprocess names for HPCs
isHPC, _name = extractHPCInfo(readpar('catchall'))
if isHPC:
name = "RunJob" + _name # e.g. "RunJobTitan" is the proper subprocess name for the Titan plug-in
# for es merge jobs
if _name and _name.startswith("Hpc"):
name = "RunJob"
# Are we going to run an event service job?
if eventService:
tolog("Encountered an event service job")
if isHPC:
name = "RunJob%sEvent" % (_name)
else:
name = "RunJobPrefetcher"
tolog("Selected subprocess: %s" % (name))
return name
def getSubprocessArguments(self, env, port, subprocessName="RunJob"):
""" Argument list needed to launch the subprocess by the pilot/Monitor """
# The pilot/Monitor is forking a subprocess which will be monitored for work dir size, hanging processes etc
# This method returns the arguments needed to execute the subprocess (python <subprocess name> <arguments>)
# By default the pilot has implementations for RunJob.py (standard job) and RunJobEvent.py (event server job)
# If a new subprocess module is added, it startup arguments need to be specified here
jobargs = None
tolog("Will set up subprocess arguments for type: %s" % (subprocessName))
url = '%s:%s/server/panda' % (env['pshttpurl'], str(env['psport']))
if subprocessName == "RunJobPrefetcher":
jobargs = [env['pyexe'], "RunJobPrefetcher.py",
"-a", env['thisSite'].appdir,
"-b", env['queuename'],
"-d", env['jobDic']["prod"][1].workdir,
"-g", env['inputDir'],
"-i", env['jobDic']["prod"][1].tarFileGuid,
"-k", getPilotlogFilename(),
"-l", env['pilot_initdir'],
"-m", env['outputDir'],
"-o", env['thisSite'].workdir,
"-p", str(port),
"-s", env['thisSite'].sitename,
"-t", str(env['proxycheckFlag']),
"-x", str(env['stageinretry']),
"-E", str(env['stageoutretry']),
"-F", env['experiment'],
"-H", env['cache'],
"-W", url]
else:
jobargs = [env['pyexe'], "%s.py" % (subprocessName),
"-a", env['thisSite'].appdir,
"-b", env['queuename'],
"-d", env['jobDic']["prod"][1].workdir,
"-g", env['inputDir'],
"-i", env['jobDic']["prod"][1].tarFileGuid,
"-k", getPilotlogFilename(),
"-l", env['pilot_initdir'],
"-m", env['outputDir'],
"-o", env['thisSite'].workdir,
"-p", str(port),
"-s", env['thisSite'].sitename,
"-t", str(env['proxycheckFlag']),
"-x", str(env['stageinretry']),
"-E", str(env['stageoutretry']),
"-F", env['experiment'],
"-H", env['cache'],
"-W", url]
if 'yodaNodes' in env and subprocessName == "RunJobHpcEvent":
jobargs.append("-N")
jobargs.append(str(env['yodaNodes']))
if 'yodaQueue' in env and subprocessName == "RunJobHpcEvent":
jobargs.append("-Q")
jobargs.append(str(env['yodaQueue']))
tolog("Will use arguments: %s" % str(jobargs))
return jobargs
# Optional
def doSpecialLogFileTransfer(self, **argdict):
""" Should the log file be transfered to a special SE? """
# The log file can at the end of the job be stored in a special SE - in addition to the normal stage-out of the log file
# If this method returns True, the JobLog class will attempt to store the log file in a secondary SE after the transfer of
# the log to the primary/normal SE. Additional information about the secondary SE is required and can be specified in
# another optional method defined in the *Experiment classes
# eventService = argdict.get('eventService', False)
return False
# Optional
def getSchedconfigURL(self, protocol="http://"):
""" Define the URL for the schedconfig / PanDA server"""
# This method gets called from SiteInformation in case the URL is not set (by the wrapper)
return protocol + "pandaserver.cern.ch"
# Optional
def getSubprocess(self, cmd, stdout=None, stderr=None):
""" Execute and return a subprocess """
process = None
try:
tolog("Executing command: %s" % (cmd))
if stdout and stderr:
# use stdout/stdout file objects to redirect the stdout/stderr streams
process = Popen(cmd, shell=True, stdout=stdout, stderr=stderr, preexec_fn=os.setsid)
else:
process = Popen(cmd, shell=True)
except Exception, e:
tolog("!!WARNING!!2344!! Caught exception: %s" % (e))
else:
tolog("Subprocess is running")
return process
# Optional
def getJobExecutionCommand4EventService(self):
""" Define and test the command(s) that will be used to execute the payload for the event service """
# E.g. cmd = ["source <path>/setup.sh; <path>/python <script>"]
# The command returned from this method is executed using subprocess.Popen() from the runEvent module
# Note: this optional method only need to be defined in case the event service is to be used
# As of March 2014, this is not yet functional or documented.
# The actual command must be declared as a list since that is expected by Popen()
cmd = [""]
return cmd
# Optional
def postGetJobActions(self, job):
""" Perform any special post-job definition download actions here """
# This method is called after the getJob() method has successfully downloaded a new job (job definition) from
# the server. If the job definition e.g. contains information that contradicts WN specifics, this method can
# be used to fail the job
# Return any error code using ec, and any error message using pilotErrorDiag
ec = 0
pilotErrorDiag = ""
return ec, pilotErrorDiag
# Optional
def useTracingService(self):
return False
# Optional
def updateJobSetupScript(self, workdir, create=False, to_script=None):
""" Create or update the job setup script (used to recreate the job locally if needed) """
# If create=True, this step will only create the file with the script header (bash info)
if create:
filename = os.path.basename(self.getJobSetupScriptName(workdir))
tolog("Creating job setup script with stage-in and payload execution commands: %s" % (filename))
to_script = "#!/bin/bash\n# %s %s\n\n" % (filename, time.strftime("%d %b %Y %H:%M:%S", time.gmtime(time.time())))
# Add the string to the setup script
if to_script:
self.addToJobSetupScript(to_script, workdir)
# Optional
def getJobSetupScriptName(self, workdir):
""" return the name of the job setup file """
return os.path.join(workdir, "job_setup.sh")
# Optional
def addToJobSetupScript(self, cmd, workdir):
""" add/append command to job setup file """
filename = self.getJobSetupScriptName(workdir)
if not os.path.exists(filename):
try:
fp = open(filename, "w")
except OSError, e:
tolog("!!WARNING!!1880!! Could not open job setup file for writing: %s" % str(e))
else:
try:
fp = open(filename, "a")
except OSError, e:
tolog("!!WARNING!!1880!! Could not open job setup file for appending: %s" % str(e))
if fp:
fp.write(cmd)
fp.write("\n\n")
fp.close()
tolog("Updated %s: %s" % (filename, cmd))
# Optional
def getRelease(self, release):
""" Return a list of the software release id's """
# Assuming 'release' is a string that separates release id's with '\n'
# Used in the case of payload using multiple steps with different release versions
# E.g. release = "19.0.0\n19.1.0" -> ['19.0.0', '19.1.0']
return release.split("\n")
# Optional
def formatReleaseString(release):
""" Return a special formatted release string """
# E.g. release = "Atlas-19.0.0" -> "19.0.0"
# This method is required for ATLAS but is probably of no interest for any other PanDA user
return release
# Optional
def setCache(self, cache):
""" Cache URL """
# Used e.g. by LSST
self.__cache = cache
# Optional
def getCache(self):
""" Return the cache URL """
# Used e.g. by LSST
return self.__cache
# Optional
def useTracingService(self):
""" Use the Rucio Tracing Service """
# A service provided by the Rucio system that allows for file transfer tracking; all file transfers
# are reported by the pilot to the Rucio Tracing Service if this method returns True
return False
# Optional
def updateJobDefinition(self, job, filename):
""" Update the job definition file and object before using it in RunJob """
# This method is called from Monitor, before RunJob is launched, which allows to make changes to the job object after it was downloaded from the job dispatcher
# (used within Monitor) and the job definition file (which is used from RunJob to recreate the same job object as is used in Monitor).
# 'job' is the job object, defined in Job.py, while 'filename' is the name of the file containing the job definition information.
return job
# Optional
def shouldExecuteUtility(self):
""" Determine whether a special utility should be executed """
# The RunJob class has the possibility to execute a special utility, e.g. a memory monitor, that runs in parallel
# to the payload (launched after the main payload process).
# The utility is executed if this method returns True. The utility is currently expected to produce
# a summary JSON file whose name is defined by the getUtilityJSONFilename() method. The contents of
# this file (ie. the full JSON dictionary) will be added to the job update.
#
# Example of summary JSON file (ATLAS case):
# {"Max":{"maxVMEM":40058624,"maxPSS":10340177,"maxRSS":16342012,"maxSwap":16235568},
# "Avg":{"avgVMEM":19384236,"avgPSS":5023500,"avgRSS":6501489,"avgSwap":5964997}}
#
# While running, the MemoryMonitor also produces a regularly updated text file with the following format: (tab separated)
# Time VMEM PSS RSS Swap (first line in file)
# 1447960494 16099644 3971809 6578312 1978060
return False
# Optional
def getUtilityOutputFilename(self):
""" Return the filename of a utility output file """
# For explanation, see shouldExecuteUtility()
return "memory_monitor_output.txt"
# Optional
def getUtilityJSONFilename(self):
""" Return the filename of a utility JSON file """
# For explanation, see shouldExecuteUtility()
return "utility_summary.json"
# Optional
def getUtilityInfo(self, workdir, pilot_initdir, allowTxtFile=False):
""" Add the utility info to the node structure if available """
# Extract the relevant information from the utility tool output and add it to the dictionary
# returned by this method. The dictionary will be merged with the node dictionary in
# PandaServerClient::getNodeStructure() and sent to the PanDA server
return {}
# Optional
def getUtilityCommand(self, **argdict):
""" Prepare a utility command string """
# This method can be used to prepare a setup string for an optional utility tool, e.g. a memory monitor,
# that will be executed by the pilot in parallel with the payload.
# The pilot will look for an output JSON file (summary.json) and will extract pre-determined fields
# from it and report them with the job updates. Currently the pilot expects to find fields related
# to memory information.
# pid = argdict.get('pid', 0)
return ""
# Optional
def getGUIDSourceFilename(self):
""" Return the filename of the file containing the GUIDs for the output files """
# In the case of ATLAS, Athena produces an XML file containing the GUIDs of the output files. The name of this
# file is PoolFileCatalog.xml. If this method returns an empty string (ie the default), the GUID generation will
# be done by the pilot in RunJobUtilities::getOutFilesGuids()
return ""
# Optional
def buildFAXPath(self, **argdict):
""" Build a proper FAX path """
# This method builds proper FAX paths and is used in pure FAX mode (i.e. when FAX is used in forced mode),
# particularly when the PoolFileCatalog.xml is built prior to stage-in
# Only needed if FAX mechanism is used in forced mode (i.e. when copytoolin='fax')
lfn = argdict.get('lfn', 'default_lfn')
scope = argdict.get('scope', 'default_scope')
subpath = argdict.get('subpath', 'atlas/rucio/')
pandaID = argdict.get('pandaID', '')
sourceSite = argdict.get('sourceSite', 'default_sourcesite')
computingSite = argdict.get('computingSite', 'default_computingsite')
# Get the proper FAX redirector (default ATLAS implementation)
from FAXTools import getFAXRedirectors
# First get the global redirectors (several, since the lib file might not be at the same place for overflow jobs)
fax_redirectors_dictionary = getFAXRedirectors(computingSite, sourceSite, pandaID)
tolog("fax_redirectors_dictionary=%s"%str(fax_redirectors_dictionary))
# select the proper fax redirector
if ".lib." in lfn:
redirector = fax_redirectors_dictionary['computingsite']
else:
redirector = fax_redirectors_dictionary['sourcesite']
# Make sure the redirector ends with a double slash
if not redirector.endswith('//'):
if redirector.endswith('/'):
redirector += "/"
else:
redirector += "//"
# Make sure that the subpath does not begin with a slash
if subpath.startswith('/') and len(subpath) > 1:
subpath = subpath[1:]
tolog("redirector=%s"%(redirector))
tolog("subpath=%s"%(subpath))
tolog("scope=%s"%(scope))
tolog("lfn=%s"%(lfn))
return redirector + subpath + scope + ":" + lfn
if __name__ == "__main__":
a=Experiment()
print a.getSubprocessName(False)
|
[] |
[] |
[
"TestXRootD",
"INDS"
] |
[]
|
["TestXRootD", "INDS"]
|
python
| 2 | 0 | |
firestore/client.go
|
// Copyright 2017 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package firestore
import (
"context"
"errors"
"fmt"
"io"
"os"
"strings"
"time"
vkit "cloud.google.com/go/firestore/apiv1"
"cloud.google.com/go/internal/trace"
"cloud.google.com/go/internal/version"
"github.com/golang/protobuf/ptypes"
gax "github.com/googleapis/gax-go/v2"
"google.golang.org/api/iterator"
"google.golang.org/api/option"
"google.golang.org/api/transport"
pb "google.golang.org/genproto/googleapis/firestore/v1"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
)
// resourcePrefixHeader is the name of the metadata header used to indicate
// the resource being operated on.
const resourcePrefixHeader = "google-cloud-resource-prefix"
// DetectProjectID is a sentinel value that instructs NewClient to detect the
// project ID. It is given in place of the projectID argument. NewClient will
// use the project ID from the given credentials or the default credentials
// (https://developers.google.com/accounts/docs/application-default-credentials)
// if no credentials were provided. When providing credentials, not all
// options will allow NewClient to extract the project ID. Specifically a JWT
// does not have the project ID encoded.
const DetectProjectID = "*detect-project-id*"
// A Client provides access to the Firestore service.
type Client struct {
c *vkit.Client
projectID string
databaseID string // A client is tied to a single database.
}
// NewClient creates a new Firestore client that uses the given project.
func NewClient(ctx context.Context, projectID string, opts ...option.ClientOption) (*Client, error) {
var o []option.ClientOption
// If this environment variable is defined, configure the client to talk to the emulator.
if addr := os.Getenv("FIRESTORE_EMULATOR_HOST"); addr != "" {
conn, err := grpc.Dial(addr, grpc.WithInsecure(), grpc.WithPerRPCCredentials(emulatorCreds{}))
if err != nil {
return nil, fmt.Errorf("firestore: dialing address from env var FIRESTORE_EMULATOR_HOST: %s", err)
}
o = []option.ClientOption{option.WithGRPCConn(conn)}
}
o = append(o, opts...)
if projectID == DetectProjectID {
creds, err := transport.Creds(ctx, o...)
if err != nil {
return nil, fmt.Errorf("fetching creds: %v", err)
}
if creds.ProjectID == "" {
return nil, errors.New("firestore: see the docs on DetectProjectID")
}
projectID = creds.ProjectID
}
vc, err := vkit.NewClient(ctx, o...)
if err != nil {
return nil, err
}
vc.SetGoogleClientInfo("gccl", version.Repo)
c := &Client{
c: vc,
projectID: projectID,
databaseID: "(default)", // always "(default)", for now
}
return c, nil
}
// Close closes any resources held by the client.
//
// Close need not be called at program exit.
func (c *Client) Close() error {
return c.c.Close()
}
func (c *Client) path() string {
return fmt.Sprintf("projects/%s/databases/%s", c.projectID, c.databaseID)
}
func withResourceHeader(ctx context.Context, resource string) context.Context {
md, _ := metadata.FromOutgoingContext(ctx)
md = md.Copy()
md[resourcePrefixHeader] = []string{resource}
return metadata.NewOutgoingContext(ctx, md)
}
// Collection creates a reference to a collection with the given path.
// A path is a sequence of IDs separated by slashes.
//
// Collection returns nil if path contains an even number of IDs or any ID is empty.
func (c *Client) Collection(path string) *CollectionRef {
coll, _ := c.idsToRef(strings.Split(path, "/"), c.path())
return coll
}
// Doc creates a reference to a document with the given path.
// A path is a sequence of IDs separated by slashes.
//
// Doc returns nil if path contains an odd number of IDs or any ID is empty.
func (c *Client) Doc(path string) *DocumentRef {
_, doc := c.idsToRef(strings.Split(path, "/"), c.path())
return doc
}
func (c *Client) NewDocumentSnapshot(proto *pb.Document) (*DocumentSnapshot, error) {
docRef, err := pathToDoc(proto.Name, c)
if err != nil {
return nil, err
}
doc, err := newDocumentSnapshot(docRef, proto, c, proto.UpdateTime)
if err != nil {
return nil, err
}
return doc, nil
}
// CollectionGroup creates a reference to a group of collections that include
// the given ID, regardless of parent document.
//
// For example, consider:
// France/Cities/Paris = {population: 100}
// Canada/Cities/Montreal = {population: 90}
//
// CollectionGroup can be used to query across all "Cities" regardless of
// its parent "Countries". See ExampleCollectionGroup for a complete example.
func (c *Client) CollectionGroup(collectionID string) *CollectionGroupRef {
return newCollectionGroupRef(c, c.path(), collectionID)
}
func (c *Client) idsToRef(IDs []string, dbPath string) (*CollectionRef, *DocumentRef) {
if len(IDs) == 0 {
return nil, nil
}
for _, id := range IDs {
if id == "" {
return nil, nil
}
}
coll := newTopLevelCollRef(c, dbPath, IDs[0])
i := 1
for i < len(IDs) {
doc := newDocRef(coll, IDs[i])
i++
if i == len(IDs) {
return nil, doc
}
coll = newCollRefWithParent(c, doc, IDs[i])
i++
}
return coll, nil
}
// GetAll retrieves multiple documents with a single call. The
// DocumentSnapshots are returned in the order of the given DocumentRefs.
// The return value will always contain the same number of DocumentSnapshots
// as the number of DocumentRefs in the input.
//
// If the same DocumentRef is specified multiple times in the input, the return
// value will contain the same number of DocumentSnapshots referencing the same
// document.
//
// If a document is not present, the corresponding DocumentSnapshot's Exists
// method will return false.
func (c *Client) GetAll(ctx context.Context, docRefs []*DocumentRef) (_ []*DocumentSnapshot, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/firestore.GetAll")
defer func() { trace.EndSpan(ctx, err) }()
return c.getAll(ctx, docRefs, nil)
}
func (c *Client) getAll(ctx context.Context, docRefs []*DocumentRef, tid []byte) ([]*DocumentSnapshot, error) {
var docNames []string
docIndices := map[string][]int{} // doc name to positions in docRefs
for i, dr := range docRefs {
if dr == nil {
return nil, errNilDocRef
}
docNames = append(docNames, dr.Path)
docIndices[dr.Path] = append(docIndices[dr.Path], i)
}
req := &pb.BatchGetDocumentsRequest{
Database: c.path(),
Documents: docNames,
}
if tid != nil {
req.ConsistencySelector = &pb.BatchGetDocumentsRequest_Transaction{tid}
}
streamClient, err := c.c.BatchGetDocuments(withResourceHeader(ctx, req.Database), req)
if err != nil {
return nil, err
}
// Read and remember all results from the stream.
var resps []*pb.BatchGetDocumentsResponse
for {
resp, err := streamClient.Recv()
if err == io.EOF {
break
}
if err != nil {
return nil, err
}
resps = append(resps, resp)
}
// Results may arrive out of order. Put each at the right indices.
docs := make([]*DocumentSnapshot, len(docNames))
for _, resp := range resps {
var (
indices []int
doc *pb.Document
err error
)
switch r := resp.Result.(type) {
case *pb.BatchGetDocumentsResponse_Found:
indices = docIndices[r.Found.Name]
doc = r.Found
case *pb.BatchGetDocumentsResponse_Missing:
indices = docIndices[r.Missing]
doc = nil
default:
return nil, errors.New("firestore: unknown BatchGetDocumentsResponse result type")
}
for _, index := range indices {
if docs[index] != nil {
return nil, fmt.Errorf("firestore: %q seen twice", docRefs[index].Path)
}
docs[index], err = newDocumentSnapshot(docRefs[index], doc, c, resp.ReadTime)
if err != nil {
return nil, err
}
}
}
return docs, nil
}
// Collections returns an iterator over the top-level collections.
func (c *Client) Collections(ctx context.Context) *CollectionIterator {
it := &CollectionIterator{
client: c,
it: c.c.ListCollectionIds(
withResourceHeader(ctx, c.path()),
&pb.ListCollectionIdsRequest{Parent: c.path() + "/documents"}),
}
it.pageInfo, it.nextFunc = iterator.NewPageInfo(
it.fetch,
func() int { return len(it.items) },
func() interface{} { b := it.items; it.items = nil; return b })
return it
}
// Batch returns a WriteBatch.
func (c *Client) Batch() *WriteBatch {
return &WriteBatch{c: c}
}
// commit calls the Commit RPC outside of a transaction.
func (c *Client) commit(ctx context.Context, ws []*pb.Write) ([]*WriteResult, error) {
req := &pb.CommitRequest{
Database: c.path(),
Writes: ws,
}
res, err := c.c.Commit(withResourceHeader(ctx, req.Database), req)
if err != nil {
return nil, err
}
if len(res.WriteResults) == 0 {
return nil, errors.New("firestore: missing WriteResult")
}
var wrs []*WriteResult
for _, pwr := range res.WriteResults {
wr, err := writeResultFromProto(pwr)
if err != nil {
return nil, err
}
wrs = append(wrs, wr)
}
return wrs, nil
}
func (c *Client) commit1(ctx context.Context, ws []*pb.Write) (*WriteResult, error) {
wrs, err := c.commit(ctx, ws)
if err != nil {
return nil, err
}
return wrs[0], nil
}
// A WriteResult is returned by methods that write documents.
type WriteResult struct {
// The time at which the document was updated, or created if it did not
// previously exist. Writes that do not actually change the document do
// not change the update time.
UpdateTime time.Time
}
func writeResultFromProto(wr *pb.WriteResult) (*WriteResult, error) {
t, err := ptypes.Timestamp(wr.UpdateTime)
if err != nil {
t = time.Time{}
// TODO(jba): Follow up if Delete is supposed to return a nil timestamp.
}
return &WriteResult{UpdateTime: t}, nil
}
func sleep(ctx context.Context, dur time.Duration) error {
switch err := gax.Sleep(ctx, dur); err {
case context.Canceled:
return status.Error(codes.Canceled, "context canceled")
case context.DeadlineExceeded:
return status.Error(codes.DeadlineExceeded, "context deadline exceeded")
default:
return err
}
}
// emulatorCreds is an instance of grpc.PerRPCCredentials that will configure a
// client to act as an admin for the Firestore emulator. It always hardcodes
// the "authorization" metadata field to contain "Bearer owner", which the
// Firestore emulator accepts as valid admin credentials.
type emulatorCreds struct{}
func (ec emulatorCreds) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) {
return map[string]string{"authorization": "Bearer owner"}, nil
}
func (ec emulatorCreds) RequireTransportSecurity() bool {
return false
}
|
[
"\"FIRESTORE_EMULATOR_HOST\""
] |
[] |
[
"FIRESTORE_EMULATOR_HOST"
] |
[]
|
["FIRESTORE_EMULATOR_HOST"]
|
go
| 1 | 0 | |
env/env.go
|
package env
import (
"fmt"
"os"
"regexp"
"strconv"
)
var ErrNoSessionId = fmt.Errorf("the ITERM_SESSION_ID environment variable is not set")
var ErrNoCookie = fmt.Errorf("the ITERM2_COOKIE environment variable is not set")
var ErrNoKey = fmt.Errorf("the ITERM2_KEY environment variable is not set")
// Session contains session information as reported by the ITERM_SESSION_ID environment variable.
type Session struct {
Id string
WindowIndex int
TabIndex int
}
// CurrentSession() parses the ITERM_SESSION_ID environment variable and returns a Session or ErrNoSessionId if the
// env var is not set.
func CurrentSession() (Session, error) {
v := os.Getenv("ITERM_SESSION_ID")
if v == "" {
return Session{}, ErrNoSessionId
}
re := regexp.MustCompile("^w(\\d+)t(\\d+)p(\\d+):(.*)$")
matches := re.FindStringSubmatch(v)
var err error
var w, t int
if w, err = strconv.Atoi(matches[1]); err != nil {
return Session{}, fmt.Errorf("get session: %w", err)
}
if t, err = strconv.Atoi(matches[2]); err != nil {
return Session{}, fmt.Errorf("get session: %w", err)
}
return Session{Id: matches[4], WindowIndex: w, TabIndex: t}, nil
}
// CookieAndKey retrieves the cookie and key from the environment.
func CookieAndKey() (string, string, error) {
cookie := os.Getenv("ITERM2_COOKIE")
if cookie == "" {
return "", "", ErrNoCookie
}
key := os.Getenv("ITERM2_KEY")
if key == "" {
return "", "", ErrNoKey
}
return cookie, key, nil
}
|
[
"\"ITERM_SESSION_ID\"",
"\"ITERM2_COOKIE\"",
"\"ITERM2_KEY\""
] |
[] |
[
"ITERM2_KEY",
"ITERM_SESSION_ID",
"ITERM2_COOKIE"
] |
[]
|
["ITERM2_KEY", "ITERM_SESSION_ID", "ITERM2_COOKIE"]
|
go
| 3 | 0 | |
misc/os.go
|
package misc
// OS specific code goes here
import (
"log"
"os"
"os/user"
"runtime"
)
func GetConfigDir() string {
var configDir string
switch osName := runtime.GOOS; osName {
case "windows":
configDir = os.Getenv("ProgramData") + "\\"
case "linux":
configDir = "/etc/"
}
configDir += "pascollector"
return configDir
}
func GetDatabaseDir() string {
dbDir := "pascollector"
switch osName := runtime.GOOS; osName {
case "windows":
dbDir = os.Getenv("ProgramData") + "\\" + dbDir + "\\"
case "linux":
dbDir = "/var/lib/" + dbDir + "/"
}
return dbDir
}
func GetConfigurationFilename() string {
return GetConfigDir() + string(os.PathSeparator) + ".pascollector.yaml"
}
func CheckUser() {
currentUser, _ := user.Current()
if runtime.GOOS == "linux" && currentUser.Username != "root" {
log.Fatal("setup should be ran as root")
}
}
|
[
"\"ProgramData\"",
"\"ProgramData\""
] |
[] |
[
"ProgramData"
] |
[]
|
["ProgramData"]
|
go
| 1 | 0 | |
cmd/node.go
|
package cmd
import (
"encoding/hex"
"flag"
"fmt"
"github.com/DataDog/datadog-go/statsd"
"github.com/bitclout/core/lib"
"net"
"os"
"path/filepath"
"time"
"github.com/btcsuite/btcd/addrmgr"
"github.com/btcsuite/btcd/wire"
"github.com/davecgh/go-spew/spew"
"github.com/dgraph-io/badger/v3"
"github.com/golang/glog"
"github.com/sasha-s/go-deadlock"
"gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer"
"gopkg.in/DataDog/dd-trace-go.v1/profiler"
)
type Node struct {
Server *lib.Server
chainDB *badger.DB
Params *lib.BitCloutParams
Config *Config
}
func NewNode(config *Config) *Node {
result := Node{}
result.Config = config
result.Params = config.Params
return &result
}
func (node *Node) Start() {
// TODO: Replace glog with logrus so we can also get rid of flag library
flag.Parse()
flag.Set("log_dir", node.Config.LogDirectory)
flag.Set("v", fmt.Sprintf("%d", node.Config.GlogV))
flag.Set("vmodule", node.Config.GlogVmodule)
glog.Init()
glog.CopyStandardLogTo("INFO")
// Print config
node.Config.Print()
// Validate params
validateParams(node.Params)
// Setup Datadog span tracer and profiler
if node.Config.DatadogProfiler {
tracer.Start()
err := profiler.Start(profiler.WithProfileTypes(profiler.CPUProfile, profiler.BlockProfile, profiler.MutexProfile, profiler.GoroutineProfile, profiler.HeapProfile))
if err != nil {
glog.Fatal(err)
}
}
// Setup statsd
statsdClient, err := statsd.New(fmt.Sprintf("%s:%d", os.Getenv("DD_AGENT_HOST"), 8125))
if err != nil {
glog.Fatal(err)
}
// Setup listeners and peers
bitcloutAddrMgr := addrmgr.New(node.Config.DataDirectory, net.LookupIP)
bitcloutAddrMgr.Start()
listeningAddrs, listeners := getAddrsToListenOn(node.Config.ProtocolPort)
for _, addr := range listeningAddrs {
netAddr := wire.NewNetAddress(&addr, 0)
_ = bitcloutAddrMgr.AddLocalAddress(netAddr, addrmgr.BoundPrio)
}
if len(node.Config.ConnectIPs) == 0 {
for _, host := range node.Config.AddIPs {
addIPsForHost(bitcloutAddrMgr, host, node.Params)
}
for _, host := range node.Params.DNSSeeds {
addIPsForHost(bitcloutAddrMgr, host, node.Params)
}
if !node.Config.PrivateMode {
go addSeedAddrsFromPrefixes(bitcloutAddrMgr, node.Params)
}
}
bitcoinDataDir := filepath.Join(node.Config.DataDirectory, "bitcoin_manager")
if err := os.MkdirAll(bitcoinDataDir, os.ModePerm); err != nil {
fmt.Errorf("Could not create Bitcoin datadir (%s): %v", node.Config.DataDirectory, err)
panic(err)
}
// Setup chain database
dbDir := lib.GetBadgerDbPath(node.Config.DataDirectory)
opts := badger.DefaultOptions(dbDir)
opts.ValueDir = dbDir
opts.MemTableSize = 1024 << 20
node.chainDB, err = badger.Open(opts)
if err != nil {
panic(err)
}
// Setup snapshot logger
if node.Config.LogDBSummarySnapshots {
lib.StartDBSummarySnapshots(node.chainDB)
}
// Setup the server
node.Server, err = lib.NewServer(
node.Params,
listeners,
bitcloutAddrMgr,
node.Config.ConnectIPs,
node.chainDB,
node.Config.TargetOutboundPeers,
node.Config.MaxInboundPeers,
node.Config.MinerPublicKeys,
node.Config.NumMiningThreads,
node.Config.OneInboundPerIp,
node.Config.RateLimitFeerate,
node.Config.MinFeerate,
node.Config.StallTimeoutSeconds,
bitcoinDataDir,
node.Config.MaxBlockTemplatesCache,
node.Config.MinBlockUpdateInterval,
node.Config.BlockCypherAPIKey,
true,
node.Config.DataDirectory,
node.Config.MempoolDumpDirectory,
node.Config.DisableNetworking,
node.Config.ReadOnlyMode,
node.Config.IgnoreInboundInvs,
node.Config.BitcoinConnectPeer,
node.Config.IgnoreUnminedBitcoin,
statsdClient,
node.Config.BlockProducerSeed,
node.Config.TrustedBlockProducerPublicKeys,
node.Config.TrustedBlockProducerStartHeight,
)
if err != nil {
panic(err)
}
node.Server.Start()
}
func (node* Node) Stop() {
node.Server.Stop()
node.chainDB.Close()
}
func validateParams(params *lib.BitCloutParams) {
if params.BitcoinBurnAddress == "" {
glog.Fatalf("The BitCloutParams being used are missing the BitcoinBurnAddress field.")
}
// Check that TimeBetweenDifficultyRetargets is evenly divisible
// by TimeBetweenBlocks.
if params.TimeBetweenBlocks == 0 {
glog.Fatalf("The BitCloutParams being used have TimeBetweenBlocks=0")
}
numBlocks := params.TimeBetweenDifficultyRetargets / params.TimeBetweenBlocks
truncatedTime := params.TimeBetweenBlocks * numBlocks
if truncatedTime != params.TimeBetweenDifficultyRetargets {
glog.Fatalf("TimeBetweenDifficultyRetargets (%v) should be evenly divisible by "+
"TimeBetweenBlocks (%v)", params.TimeBetweenDifficultyRetargets,
params.TimeBetweenBlocks)
}
if params.GenesisBlock == nil || params.GenesisBlockHashHex == "" {
glog.Fatalf("The BitCloutParams are missing genesis block info.")
}
// Compute the merkle root for the genesis block and make sure it matches.
merkle, _, err := lib.ComputeMerkleRoot(params.GenesisBlock.Txns)
if err != nil {
glog.Fatalf("Could not compute a merkle root for the genesis block: %v", err)
}
if *merkle != *params.GenesisBlock.Header.TransactionMerkleRoot {
glog.Fatalf("Genesis block merkle root (%s) not equal to computed merkle root (%s)",
hex.EncodeToString(params.GenesisBlock.Header.TransactionMerkleRoot[:]),
hex.EncodeToString(merkle[:]))
}
genesisHash, err := params.GenesisBlock.Header.Hash()
if err != nil {
glog.Fatalf("Problem hashing header for the GenesisBlock in "+
"the BitCloutParams (%+v): %v", params.GenesisBlock.Header, err)
}
genesisHashHex := hex.EncodeToString(genesisHash[:])
if genesisHashHex != params.GenesisBlockHashHex {
glog.Fatalf("GenesisBlockHash in BitCloutParams (%s) does not match the block "+
"hash computed (%s) %d %d", params.GenesisBlockHashHex, genesisHashHex, len(params.GenesisBlockHashHex), len(genesisHashHex))
}
if params.MinDifficultyTargetHex == "" {
glog.Fatalf("The BitCloutParams MinDifficultyTargetHex (%s) should be non-empty",
params.MinDifficultyTargetHex)
}
// Check to ensure the genesis block hash meets the initial difficulty target.
hexBytes, err := hex.DecodeString(params.MinDifficultyTargetHex)
if err != nil || len(hexBytes) != 32 {
glog.Fatalf("The BitCloutParams MinDifficultyTargetHex (%s) with length (%d) is "+
"invalid: %v", params.MinDifficultyTargetHex, len(params.MinDifficultyTargetHex), err)
}
if params.MaxDifficultyRetargetFactor == 0 {
glog.Fatalf("The BitCloutParams MaxDifficultyRetargetFactor is unset")
}
if params.BlockRewardMaturity == 0 {
glog.Fatalf("The BitCloutParams BlockRewardMaturity is unset")
}
}
func getAddrsToListenOn(protocolPort uint16) ([]net.TCPAddr, []net.Listener) {
listeningAddrs := []net.TCPAddr{}
listeners := []net.Listener{}
ifaceAddrs, err := net.InterfaceAddrs()
if err != nil {
return nil, nil
}
for _, iAddr := range ifaceAddrs {
ifaceIP, _, err := net.ParseCIDR(iAddr.String())
if err != nil {
continue
}
if ifaceIP.IsLinkLocalUnicast() {
continue
}
netAddr := net.TCPAddr{
IP: ifaceIP,
Port: int(protocolPort),
}
listener, err := net.Listen(netAddr.Network(), netAddr.String())
if err != nil {
continue
}
listeners = append(listeners, listener)
listeningAddrs = append(listeningAddrs, netAddr)
}
return listeningAddrs, listeners
}
func addIPsForHost(bitcloutAddrMgr *addrmgr.AddrManager, host string, params *lib.BitCloutParams) {
ipAddrs, err := net.LookupIP(host)
if err != nil {
glog.Tracef("_addSeedAddrs: DNS discovery failed on seed host (continuing on): %s %v\n", host, err)
return
}
if len(ipAddrs) == 0 {
glog.Tracef("_addSeedAddrs: No IPs found for host: %s\n", host)
return
}
// Don't take more than 5 IPs per host.
ipsPerHost := 5
if len(ipAddrs) > ipsPerHost {
glog.Debugf("_addSeedAddrs: Truncating IPs found from %d to %d\n", len(ipAddrs), ipsPerHost)
ipAddrs = ipAddrs[:ipsPerHost]
}
glog.Debugf("_addSeedAddrs: Adding seed IPs from seed %s: %v\n", host, ipAddrs)
// Convert addresses to NetAddress'es.
netAddrs := make([]*wire.NetAddress, len(ipAddrs))
for ii, ip := range ipAddrs {
netAddrs[ii] = wire.NewNetAddressTimestamp(
// We initialize addresses with a
// randomly selected "last seen time" between 3
// and 7 days ago similar to what bitcoind does.
time.Now().Add(-1*time.Second*time.Duration(lib.SecondsIn3Days+
lib.RandInt32(lib.SecondsIn4Days))),
0,
ip,
params.DefaultSocketPort)
}
glog.Debugf("_addSeedAddrs: Computed the following wire.NetAddress'es: %s", spew.Sdump(netAddrs))
// Normally the second argument is the source who told us about the
// addresses we're adding. In this case since the source is a DNS seed
// just use the first address in the fetch as the source.
bitcloutAddrMgr.AddAddresses(netAddrs, netAddrs[0])
}
// Must be run in a goroutine. This function continuously adds IPs from a DNS seed
// prefix+suffix by iterating up through all of the possible numeric values, which are typically
// [0, 10]
func addSeedAddrsFromPrefixes(bitcloutAddrMgr *addrmgr.AddrManager, params *lib.BitCloutParams) {
MaxIterations := 20
go func() {
for dnsNumber := 0; dnsNumber < MaxIterations; dnsNumber++ {
var wg deadlock.WaitGroup
for _, dnsGeneratorOuter := range params.DNSSeedGenerators {
wg.Add(1)
go func(dnsGenerator []string) {
dnsString := fmt.Sprintf("%s%d%s", dnsGenerator[0], dnsNumber, dnsGenerator[1])
glog.Tracef("_addSeedAddrsFromPrefixes: Querying DNS seed: %s", dnsString)
addIPsForHost(bitcloutAddrMgr, dnsString, params)
wg.Done()
}(dnsGeneratorOuter)
}
wg.Wait()
}
}()
}
|
[
"\"DD_AGENT_HOST\""
] |
[] |
[
"DD_AGENT_HOST"
] |
[]
|
["DD_AGENT_HOST"]
|
go
| 1 | 0 | |
apitools/base/py/credentials_lib.py
|
#!/usr/bin/env python
#
# Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common credentials classes and constructors."""
from __future__ import print_function
import contextlib
import datetime
import json
import os
import threading
import warnings
import httplib2
import oauth2client
import oauth2client.client
from oauth2client import service_account
from oauth2client import tools # for gflags declarations
import six
from six.moves import http_client
from six.moves import urllib
from apitools.base.py import exceptions
from apitools.base.py import util
# App Engine does not support ctypes which are required for the
# monotonic time used in fasteners. Conversely, App Engine does
# not support colocated concurrent processes, so process locks
# are not needed.
try:
import fasteners
_FASTENERS_AVAILABLE = True
except ImportError as import_error:
server_env = os.environ.get('SERVER_SOFTWARE', '')
if not (server_env.startswith('Development') or
server_env.startswith('Google App Engine')):
raise import_error
_FASTENERS_AVAILABLE = False
# Note: we try the oauth2client imports two ways, to accomodate layout
# changes in oauth2client 2.0+. We can remove these once we no longer
# support oauth2client < 2.0.
#
# pylint: disable=wrong-import-order,ungrouped-imports
try:
from oauth2client.contrib import gce
except ImportError:
from oauth2client import gce
try:
from oauth2client.contrib import multiprocess_file_storage
_NEW_FILESTORE = True
except ImportError:
_NEW_FILESTORE = False
try:
from oauth2client.contrib import multistore_file
except ImportError:
from oauth2client import multistore_file
try:
import gflags
FLAGS = gflags.FLAGS
except ImportError:
FLAGS = None
__all__ = [
'CredentialsFromFile',
'GaeAssertionCredentials',
'GceAssertionCredentials',
'GetCredentials',
'GetUserinfo',
'ServiceAccountCredentialsFromFile',
]
# Lock when accessing the cache file to avoid resource contention.
cache_file_lock = threading.Lock()
def SetCredentialsCacheFileLock(lock):
global cache_file_lock # pylint: disable=global-statement
cache_file_lock = lock
# List of additional methods we use when attempting to construct
# credentials. Users can register their own methods here, which we try
# before the defaults.
_CREDENTIALS_METHODS = []
def _RegisterCredentialsMethod(method, position=None):
"""Register a new method for fetching credentials.
This new method should be a function with signature:
client_info, **kwds -> Credentials or None
This method can be used as a decorator, unless position needs to
be supplied.
Note that method must *always* accept arbitrary keyword arguments.
Args:
method: New credential-fetching method.
position: (default: None) Where in the list of methods to
add this; if None, we append. In all but rare cases,
this should be either 0 or None.
Returns:
method, for use as a decorator.
"""
if position is None:
position = len(_CREDENTIALS_METHODS)
else:
position = min(position, len(_CREDENTIALS_METHODS))
_CREDENTIALS_METHODS.insert(position, method)
return method
def GetCredentials(package_name, scopes, client_id, client_secret, user_agent,
credentials_filename=None,
api_key=None, # pylint: disable=unused-argument
client=None, # pylint: disable=unused-argument
oauth2client_args=None,
**kwds):
"""Attempt to get credentials, using an oauth dance as the last resort."""
scopes = util.NormalizeScopes(scopes)
if isinstance(scope_spec, six.string_types):
scope_spec = six.ensure_str(scope_spec)
return set(scope_spec.split(' '))
elif isinstance(scope_spec, collections.Iterable):
scope_spec = [six.ensure_str(x) for x in scope_spec]
return set(scope_spec)
raise exceptions.TypecheckError(
'NormalizeScopes expected string or iterable, found %s' % (
type(scope_spec),))
client_info = {
'client_id': client_id,
'client_secret': client_secret,
'scope': ' '.join(sorted(scopes)),
'user_agent': user_agent or '%s-generated/0.1' % package_name,
}
for method in _CREDENTIALS_METHODS:
credentials = method(client_info, **kwds)
if credentials is not None:
return credentials
credentials_filename = credentials_filename or os.path.expanduser(
'~/.apitools.token')
credentials = CredentialsFromFile(credentials_filename, client_info,
oauth2client_args=oauth2client_args)
if credentials is not None:
return credentials
raise exceptions.CredentialsError('Could not create valid credentials')
def ServiceAccountCredentialsFromFile(filename, scopes, user_agent=None):
"""Use the credentials in filename to create a token for scopes."""
filename = os.path.expanduser(filename)
# We have two options, based on our version of oauth2client.
if oauth2client.__version__ > '1.5.2':
# oauth2client >= 2.0.0
credentials = (
service_account.ServiceAccountCredentials.from_json_keyfile_name(
filename, scopes=scopes))
if credentials is not None:
if user_agent is not None:
credentials.user_agent = user_agent
return credentials
else:
# oauth2client < 2.0.0
with open(filename) as keyfile:
service_account_info = json.load(keyfile)
account_type = service_account_info.get('type')
if account_type != oauth2client.client.SERVICE_ACCOUNT:
raise exceptions.CredentialsError(
'Invalid service account credentials: %s' % (filename,))
# pylint: disable=protected-access
credentials = service_account._ServiceAccountCredentials(
service_account_id=service_account_info['client_id'],
service_account_email=service_account_info['client_email'],
private_key_id=service_account_info['private_key_id'],
private_key_pkcs8_text=service_account_info['private_key'],
scopes=scopes, user_agent=user_agent)
# pylint: enable=protected-access
return credentials
def ServiceAccountCredentialsFromP12File(
service_account_name, private_key_filename, scopes, user_agent):
"""Create a new credential from the named .p12 keyfile."""
private_key_filename = os.path.expanduser(private_key_filename)
scopes = util.NormalizeScopes(scopes)
if oauth2client.__version__ > '1.5.2':
# oauth2client >= 2.0.0
credentials = (
service_account.ServiceAccountCredentials.from_p12_keyfile(
service_account_name, private_key_filename, scopes=scopes))
if credentials is not None:
credentials.user_agent = user_agent
return credentials
else:
# oauth2client < 2.0.0
with open(private_key_filename, 'rb') as key_file:
return oauth2client.client.SignedJwtAssertionCredentials(
service_account_name, key_file.read(), scopes,
user_agent=user_agent)
def _GceMetadataRequest(relative_url, use_metadata_ip=False):
"""Request the given url from the GCE metadata service."""
if use_metadata_ip:
base_url = os.environ.get('GCE_METADATA_IP', '169.254.169.254')
else:
base_url = os.environ.get(
'GCE_METADATA_ROOT', 'metadata.google.internal')
url = 'http://' + base_url + '/computeMetadata/v1/' + relative_url
# Extra header requirement can be found here:
# https://developers.google.com/compute/docs/metadata
headers = {'Metadata-Flavor': 'Google'}
request = urllib.request.Request(url, headers=headers)
opener = urllib.request.build_opener(urllib.request.ProxyHandler({}))
try:
response = opener.open(request)
except urllib.error.URLError as e:
raise exceptions.CommunicationError(
'Could not reach metadata service: %s' % e.reason)
return response
class GceAssertionCredentials(gce.AppAssertionCredentials):
"""Assertion credentials for GCE instances."""
def __init__(self, scopes=None, service_account_name='default', **kwds):
"""Initializes the credentials instance.
Args:
scopes: The scopes to get. If None, whatever scopes that are
available to the instance are used.
service_account_name: The service account to retrieve the scopes
from.
**kwds: Additional keyword args.
"""
# If there is a connectivity issue with the metadata server,
# detection calls may fail even if we've already successfully
# identified these scopes in the same execution. However, the
# available scopes don't change once an instance is created,
# so there is no reason to perform more than one query.
self.__service_account_name = six.ensure_text(
service_account_name,
encoding='utf-8',)
cached_scopes = None
cache_filename = kwds.get('cache_filename')
if cache_filename:
cached_scopes = self._CheckCacheFileForMatch(
cache_filename, scopes)
scopes = cached_scopes or self._ScopesFromMetadataServer(scopes)
if cache_filename and not cached_scopes:
self._WriteCacheFile(cache_filename, scopes)
# We check the scopes above, but don't need them again after
# this point. Newer versions of oauth2client let us drop them
# here, but since we support older versions as well, we just
# catch and squelch the warning.
with warnings.catch_warnings():
warnings.simplefilter('ignore')
super(GceAssertionCredentials, self).__init__(scope=scopes, **kwds)
@classmethod
def Get(cls, *args, **kwds):
try:
return cls(*args, **kwds)
except exceptions.Error:
return None
def _CheckCacheFileForMatch(self, cache_filename, scopes):
"""Checks the cache file to see if it matches the given credentials.
Args:
cache_filename: Cache filename to check.
scopes: Scopes for the desired credentials.
Returns:
List of scopes (if cache matches) or None.
"""
creds = { # Credentials metadata dict.
'scopes': sorted(list(scopes)) if scopes else None,
'svc_acct_name': self.__service_account_name,
}
cache_file = _MultiProcessCacheFile(cache_filename)
try:
cached_creds_str = cache_file.LockedRead()
if not cached_creds_str:
return None
cached_creds = json.loads(cached_creds_str)
if creds['svc_acct_name'] == cached_creds['svc_acct_name']:
if creds['scopes'] in (None, cached_creds['scopes']):
return cached_creds['scopes']
except KeyboardInterrupt:
raise
except: # pylint: disable=bare-except
# Treat exceptions as a cache miss.
pass
def _WriteCacheFile(self, cache_filename, scopes):
"""Writes the credential metadata to the cache file.
This does not save the credentials themselves (CredentialStore class
optionally handles that after this class is initialized).
Args:
cache_filename: Cache filename to check.
scopes: Scopes for the desired credentials.
"""
# Credentials metadata dict.
scopes = sorted([six.ensure_text(scope) for scope in scopes])
creds = {'scopes': scopes,
'svc_acct_name': self.__service_account_name}
creds_str = json.dumps(creds)
cache_file = _MultiProcessCacheFile(cache_filename)
try:
cache_file.LockedWrite(creds_str)
except KeyboardInterrupt:
raise
except: # pylint: disable=bare-except
# Treat exceptions as a cache miss.
pass
def _ScopesFromMetadataServer(self, scopes):
"""Returns instance scopes based on GCE metadata server."""
if not util.DetectGce():
raise exceptions.ResourceUnavailableError(
'GCE credentials requested outside a GCE instance')
if not self.GetServiceAccount(self.__service_account_name):
raise exceptions.ResourceUnavailableError(
'GCE credentials requested but service account '
'%s does not exist.' % self.__service_account_name)
if scopes:
scope_ls = util.NormalizeScopes(scopes)
instance_scopes = self.GetInstanceScopes()
if scope_ls > instance_scopes:
raise exceptions.CredentialsError(
'Instance did not have access to scopes %s' % (
sorted(list(scope_ls - instance_scopes)),))
else:
scopes = self.GetInstanceScopes()
return scopes
def GetServiceAccount(self, account):
relative_url = 'instance/service-accounts'
response = _GceMetadataRequest(relative_url)
response_lines = [six.ensure_str(line).rstrip(u'/\n\r')
for line in response.readlines()]
return account in response_lines
def GetInstanceScopes(self):
relative_url = 'instance/service-accounts/{0}/scopes'.format(
self.__service_account_name)
response = _GceMetadataRequest(relative_url)
return util.NormalizeScopes(six.ensure_str(scope).strip()
for scope in response.readlines())
# pylint: disable=arguments-differ
def _refresh(self, do_request):
"""Refresh self.access_token.
This function replaces AppAssertionCredentials._refresh, which
does not use the credential store and is therefore poorly
suited for multi-threaded scenarios.
Args:
do_request: A function matching httplib2.Http.request's signature.
"""
# pylint: disable=protected-access
oauth2client.client.OAuth2Credentials._refresh(self, do_request)
# pylint: enable=protected-access
def _do_refresh_request(self, unused_http_request):
"""Refresh self.access_token by querying the metadata server.
If self.store is initialized, store acquired credentials there.
"""
relative_url = 'instance/service-accounts/{0}/token'.format(
self.__service_account_name)
try:
response = _GceMetadataRequest(relative_url)
except exceptions.CommunicationError:
self.invalid = True
if self.store:
self.store.locked_put(self)
raise
content = response.read()
try:
credential_info = json.loads(content)
except ValueError:
raise exceptions.CredentialsError(
'Could not parse response as JSON: %s' % content)
self.access_token = credential_info['access_token']
if 'expires_in' in credential_info:
expires_in = int(credential_info['expires_in'])
self.token_expiry = (
datetime.timedelta(seconds=expires_in) +
datetime.datetime.utcnow())
else:
self.token_expiry = None
self.invalid = False
if self.store:
self.store.locked_put(self)
def to_json(self):
# OAuth2Client made gce.AppAssertionCredentials unserializable as of
# v3.0, but we need those credentials to be serializable for use with
# this library, so we use AppAssertionCredentials' parent's to_json
# method.
# pylint: disable=bad-super-call
return super(gce.AppAssertionCredentials, self).to_json()
@classmethod
def from_json(cls, json_data):
data = json.loads(json_data)
kwargs = {}
if 'cache_filename' in data.get('kwargs', []):
kwargs['cache_filename'] = data['kwargs']['cache_filename']
# Newer versions of GceAssertionCredentials don't have a "scope"
# attribute.
scope_list = None
if 'scope' in data:
scope_list = [data['scope']]
credentials = GceAssertionCredentials(scopes=scope_list, **kwargs)
if 'access_token' in data:
credentials.access_token = data['access_token']
if 'token_expiry' in data:
credentials.token_expiry = datetime.datetime.strptime(
data['token_expiry'], oauth2client.client.EXPIRY_FORMAT)
if 'invalid' in data:
credentials.invalid = data['invalid']
return credentials
@property
def serialization_data(self):
raise NotImplementedError(
'Cannot serialize credentials for GCE service accounts.')
# TODO(craigcitro): Currently, we can't even *load*
# `oauth2client.appengine` without being on appengine, because of how
# it handles imports. Fix that by splitting that module into
# GAE-specific and GAE-independent bits, and guarding imports.
class GaeAssertionCredentials(oauth2client.client.AssertionCredentials):
"""Assertion credentials for Google App Engine apps."""
def __init__(self, scopes, **kwds):
if not util.DetectGae():
raise exceptions.ResourceUnavailableError(
'GCE credentials requested outside a GCE instance')
self._scopes = list(util.NormalizeScopes(scopes))
super(GaeAssertionCredentials, self).__init__(None, **kwds)
@classmethod
def Get(cls, *args, **kwds):
try:
return cls(*args, **kwds)
except exceptions.Error:
return None
@classmethod
def from_json(cls, json_data):
data = json.loads(json_data)
return GaeAssertionCredentials(data['_scopes'])
def _refresh(self, _):
"""Refresh self.access_token.
Args:
_: (ignored) A function matching httplib2.Http.request's signature.
"""
# pylint: disable=import-error
from google.appengine.api import app_identity
try:
token, _ = app_identity.get_access_token(self._scopes)
except app_identity.Error as e:
raise exceptions.CredentialsError(str(e))
self.access_token = token
def sign_blob(self, blob):
"""Cryptographically sign a blob (of bytes).
This method is provided to support a common interface, but
the actual key used for a Google Compute Engine service account
is not available, so it can't be used to sign content.
Args:
blob: bytes, Message to be signed.
Raises:
NotImplementedError, always.
"""
raise NotImplementedError(
'Compute Engine service accounts cannot sign blobs')
def _GetRunFlowFlags(args=None):
"""Retrieves command line flags based on gflags module."""
# There's one rare situation where gsutil will not have argparse
# available, but doesn't need anything depending on argparse anyway,
# since they're bringing their own credentials. So we just allow this
# to fail with an ImportError in those cases.
#
# TODO(craigcitro): Move this import back to the top when we drop
# python 2.6 support (eg when gsutil does).
import argparse
parser = argparse.ArgumentParser(parents=[tools.argparser])
# Get command line argparse flags.
flags, _ = parser.parse_known_args(args=args)
# Allow `gflags` and `argparse` to be used side-by-side.
if hasattr(FLAGS, 'auth_host_name'):
flags.auth_host_name = FLAGS.auth_host_name
if hasattr(FLAGS, 'auth_host_port'):
flags.auth_host_port = FLAGS.auth_host_port
if hasattr(FLAGS, 'auth_local_webserver'):
flags.noauth_local_webserver = (not FLAGS.auth_local_webserver)
return flags
# TODO(craigcitro): Switch this from taking a path to taking a stream.
def CredentialsFromFile(path, client_info, oauth2client_args=None):
"""Read credentials from a file."""
user_agent = client_info['user_agent']
scope_key = client_info['scope']
if not isinstance(scope_key, six.string_types):
scope_key = ':'.join(scope_key)
storage_key = client_info['client_id'] + user_agent + scope_key
if _NEW_FILESTORE:
credential_store = multiprocess_file_storage.MultiprocessFileStorage(
path, storage_key)
else:
credential_store = multistore_file.get_credential_storage_custom_string_key( # noqa
path, storage_key)
if hasattr(FLAGS, 'auth_local_webserver'):
FLAGS.auth_local_webserver = False
credentials = credential_store.get()
if credentials is None or credentials.invalid:
print('Generating new OAuth credentials ...')
for _ in range(20):
# If authorization fails, we want to retry, rather than let this
# cascade up and get caught elsewhere. If users want out of the
# retry loop, they can ^C.
try:
flow = oauth2client.client.OAuth2WebServerFlow(**client_info)
flags = _GetRunFlowFlags(args=oauth2client_args)
credentials = tools.run_flow(flow, credential_store, flags)
break
except (oauth2client.client.FlowExchangeError, SystemExit) as e:
# Here SystemExit is "no credential at all", and the
# FlowExchangeError is "invalid" -- usually because
# you reused a token.
print('Invalid authorization: %s' % (e,))
except httplib2.HttpLib2Error as e:
print('Communication error: %s' % (e,))
raise exceptions.CredentialsError(
'Communication error creating credentials: %s' % e)
return credentials
class _MultiProcessCacheFile(object):
"""Simple multithreading and multiprocessing safe cache file.
Notes on behavior:
* the fasteners.InterProcessLock object cannot reliably prevent threads
from double-acquiring a lock. A threading lock is used in addition to
the InterProcessLock. The threading lock is always acquired first and
released last.
* The interprocess lock will not deadlock. If a process can not acquire
the interprocess lock within `_lock_timeout` the call will return as
a cache miss or unsuccessful cache write.
* App Engine environments cannot be process locked because (1) the runtime
does not provide monotonic time and (2) different processes may or may
not share the same machine. Because of this, process locks are disabled
and locking is only guaranteed to protect against multithreaded access.
"""
_lock_timeout = 1
_encoding = 'utf-8'
_thread_lock = threading.Lock()
def __init__(self, filename):
self._file = None
self._filename = filename
if _FASTENERS_AVAILABLE:
self._process_lock_getter = self._ProcessLockAcquired
self._process_lock = fasteners.InterProcessLock(
'{0}.lock'.format(filename))
else:
self._process_lock_getter = self._DummyLockAcquired
self._process_lock = None
@contextlib.contextmanager
def _ProcessLockAcquired(self):
"""Context manager for process locks with timeout."""
try:
is_locked = self._process_lock.acquire(timeout=self._lock_timeout)
yield is_locked
finally:
if is_locked:
self._process_lock.release()
@contextlib.contextmanager
def _DummyLockAcquired(self):
"""Lock context manager for environments without process locks."""
yield True
def LockedRead(self):
"""Acquire an interprocess lock and dump cache contents.
This method safely acquires the locks then reads a string
from the cache file. If the file does not exist and cannot
be created, it will return None. If the locks cannot be
acquired, this will also return None.
Returns:
cache data - string if present, None on failure.
"""
file_contents = None
with self._thread_lock:
if not self._EnsureFileExists():
return None
with self._process_lock_getter() as acquired_plock:
if not acquired_plock:
return None
with open(self._filename, 'rb') as f:
file_contents = f.read().decode(encoding=self._encoding)
return file_contents
def LockedWrite(self, cache_data):
"""Acquire an interprocess lock and write a string.
This method safely acquires the locks then writes a string
to the cache file. If the string is written successfully
the function will return True, if the write fails for any
reason it will return False.
Args:
cache_data: string or bytes to write.
Returns:
bool: success
"""
if isinstance(cache_data, six.text_type):
cache_data = cache_data.encode(encoding=self._encoding)
with self._thread_lock:
if not self._EnsureFileExists():
return False
with self._process_lock_getter() as acquired_plock:
if not acquired_plock:
return False
with open(self._filename, 'wb') as f:
f.write(cache_data)
return True
def _EnsureFileExists(self):
"""Touches a file; returns False on error, True on success."""
if not os.path.exists(self._filename):
old_umask = os.umask(0o177)
try:
open(self._filename, 'a+b').close()
except OSError:
return False
finally:
os.umask(old_umask)
return True
# TODO(craigcitro): Push this into oauth2client.
def GetUserinfo(credentials, http=None): # pylint: disable=invalid-name
"""Get the userinfo associated with the given credentials.
This is dependent on the token having either the userinfo.email or
userinfo.profile scope for the given token.
Args:
credentials: (oauth2client.client.Credentials) incoming credentials
http: (httplib2.Http, optional) http instance to use
Returns:
The email address for this token, or None if the required scopes
aren't available.
"""
http = http or httplib2.Http()
url = _GetUserinfoUrl(credentials)
# We ignore communication woes here (i.e. SSL errors, socket
# timeout), as handling these should be done in a common location.
response, content = http.request(url)
if response.status == http_client.BAD_REQUEST:
credentials.refresh(http)
url = _GetUserinfoUrl(credentials)
response, content = http.request(url)
return json.loads(content or '{}') # Save ourselves from an empty reply.
def _GetUserinfoUrl(credentials):
url_root = 'https://oauth2.googleapis.com/tokeninfo'
query_args = {'access_token': credentials.access_token}
return '?'.join((url_root, urllib.parse.urlencode(query_args)))
@_RegisterCredentialsMethod
def _GetServiceAccountCredentials(
client_info, service_account_name=None, service_account_keyfile=None,
service_account_json_keyfile=None, **unused_kwds):
"""Returns ServiceAccountCredentials from give file."""
if ((service_account_name and not service_account_keyfile) or
(service_account_keyfile and not service_account_name)):
raise exceptions.CredentialsError(
'Service account name or keyfile provided without the other')
scopes = client_info['scope'].split()
user_agent = client_info['user_agent']
# Use the .json credentials, if provided.
if service_account_json_keyfile:
return ServiceAccountCredentialsFromFile(
service_account_json_keyfile, scopes, user_agent=user_agent)
# Fall back to .p12 if there's no .json credentials.
if service_account_name is not None:
return ServiceAccountCredentialsFromP12File(
service_account_name, service_account_keyfile, scopes, user_agent)
@_RegisterCredentialsMethod
def _GetGaeServiceAccount(client_info, **unused_kwds):
scopes = client_info['scope'].split(' ')
return GaeAssertionCredentials.Get(scopes=scopes)
@_RegisterCredentialsMethod
def _GetGceServiceAccount(client_info, **unused_kwds):
scopes = client_info['scope'].split(' ')
return GceAssertionCredentials.Get(scopes=scopes)
@_RegisterCredentialsMethod
def _GetApplicationDefaultCredentials(
client_info, skip_application_default_credentials=False,
**unused_kwds):
"""Returns ADC with right scopes."""
scopes = client_info['scope'].split()
if skip_application_default_credentials:
return None
gc = oauth2client.client.GoogleCredentials
with cache_file_lock:
try:
# pylint: disable=protected-access
# We've already done our own check for GAE/GCE
# credentials, we don't want to pay for checking again.
credentials = gc._implicit_credentials_from_files()
except oauth2client.client.ApplicationDefaultCredentialsError:
return None
# If we got back a non-service account credential, we need to use
# a heuristic to decide whether or not the application default
# credential will work for us. We assume that if we're requesting
# cloud-platform, our scopes are a subset of cloud scopes, and the
# ADC will work.
cp = 'https://www.googleapis.com/auth/cloud-platform'
if credentials is None:
return None
if not isinstance(credentials, gc) or cp in scopes:
return credentials.create_scoped(scopes)
return None
|
[] |
[] |
[
"SERVER_SOFTWARE",
"GCE_METADATA_ROOT",
"GCE_METADATA_IP"
] |
[]
|
["SERVER_SOFTWARE", "GCE_METADATA_ROOT", "GCE_METADATA_IP"]
|
python
| 3 | 0 | |
preprocess/video_generator.py
|
import os
import pickle
import shutil
import imageio
import pandas as pd
import subprocess
from PIL import Image
import face_recognition
import numpy as np
import skimage
import scipy
from keras.engine import Model
from keras.layers import Input
from keras_vggface.vggface import VGGFace
from keras_vggface import utils
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'
FNULL = open(os.devnull, 'w')
class VideoExtract():
def __init__(self, fps, duration, face_extraction_model, verbose):
self.destination_dir = "speech2face/preprocess/data/speaker_video_embeddings/"
self.videos = "speech2face/preprocess/data/videos/"
self.frames_dir = "speech2face/preprocess/data/frames/"
self.frame_cropped = "speech2face/preprocess/data/cropped_frames/"
self.model_dir = "speech2face/preprocess/data/pretrained_model/"
self.fps = fps
self.duration = duration
self.face_extraction_model = face_extraction_model
self.vgg = VGGFace(model='vgg16')
self.out = self.vgg.get_layer('fc7').output
self.vgg_model = Model(self.vgg.input, self.out)
self.verbose = verbose
if not os.path.isdir(self.destination_dir):
os.mkdir(self.destination_dir)
if not os.path.isdir(self.frames_dir):
os.mkdir(self.frames_dir)
def extract_video(self, id, x, y):
embeddings = np.zeros((4096))
if not os.path.isfile(self.videos + id + ".mp4"):
if self.verbose:
print("--------Video {} not found-----------".format(self.videos + id + ".mp4"))
return 1
if (not os.path.isfile(self.destination_dir + id + ".pkl")):
if self.verbose:
print("Resampling video", id)
resample = "ffmpeg -nostats -loglevel 0 -y -i {1}{2}.mp4 -r {0} -t {3} '{4}{2}.mp4'".format(self.fps, self.videos, id, self.duration, self.destination_dir)
res2 = subprocess.Popen(resample, stdout = FNULL, shell=True).communicate()
if not os.path.isfile(self.destination_dir + id + ".mp4"):
if self.verbose:
print("--------Fault in video {}--------".format(id))
return 1
extract_frames = "ffmpeg -nostats -loglevel 0 -i '{0}{1}.mp4' {2}/%02d.jpg".format(self.destination_dir, id, self.frames_dir)
rs = subprocess.Popen(extract_frames, stdout = FNULL, shell = True).communicate()
for j in range(1, 7):
if not os.path.isfile(self.frames_dir + "%02d" % j + ".jpg"):
if self.verbose:
print("------MISSING FRAME DETECTED FOR {} FRAME NO {}----".format(id, j))
continue
if self.verbose:
print("reading frame - {0}".format(j))
frame = Image.open(self.frames_dir + "%02d" % j + ".jpg")
face_boxes = face_recognition.face_locations(np.array(frame), model= self.face_extraction_model)
if(len(face_boxes) > 1):
if self.verbose:
print("-----2 faces detected in {0} frame {1}-----".format(id, j))
return 1
elif len(face_boxes) == 0:
if self.verbose:
print("-----No face detected in {} frame {}-----".format(id, j))
return 1
top, right, bottom, left = np.squeeze(face_boxes)
frame_cropped = frame.crop(box = (left, top, right, bottom))
frame_resized = np.array(Image.fromarray(np.array(frame_cropped)).resize((224,224)))
Image.fromarray(frame_resized).save(self.frame_cropped + id + '.jpg')
frame_resized = np.expand_dims(np.array(frame_resized, dtype=np.float64), 0)
frame_resized = utils.preprocess_input(frame_resized, version=1)
embeddings = self.vgg_model.predict(frame_resized)
break
pickle.dump(embeddings, open(self.destination_dir + id + ".pkl", "wb"))
delete_frames = "rm {0}*".format(self.frames_dir)
delete_video = "rm '{0}'".format(self.destination_dir + id + ".mp4")
rs = subprocess.Popen(delete_frames, stdout = subprocess.PIPE, shell = True).communicate()
rs = subprocess.Popen(delete_video, stdout = subprocess.PIPE, shell = True).communicate()
return 0
|
[] |
[] |
[
"TF_CPP_MIN_LOG_LEVEL"
] |
[]
|
["TF_CPP_MIN_LOG_LEVEL"]
|
python
| 1 | 0 | |
iceoryx_integrationtest/iceoryx_integrationtest/test_complexdata_example.py
|
# Copyright (c) 2021 by Apex.AI Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: Apache-2.0
import os
import unittest
import launch
from launch_ros.substitutions import ExecutableInPackage
import launch_testing
import launch_testing.actions
from launch_testing.asserts import assertSequentialStdout
import pytest
# @brief Test goal: "Integrationtest for the complexdata example of iceoryx"
# @pre setup ROS2 launch executables for RouDi (debug mode) and the example processes
# @post check if all applications return exitcode 0 (success) after test run
@pytest.mark.launch_test
def generate_test_description():
proc_env = os.environ.copy()
colcon_prefix_path = os.environ.get('COLCON_PREFIX_PATH', '')
executable_list = ['iox-cpp-publisher-vector', 'iox-cpp-subscriber-vector',
'iox-cpp-publisher-complexdata', 'iox-cpp-subscriber-complexdata']
process_list = []
for exec in executable_list:
tmp_exec = os.path.join(
colcon_prefix_path,
'example_complexdata/bin/',
exec)
tmp_process = launch.actions.ExecuteProcess(
cmd=[tmp_exec],
env=proc_env, output='screen')
process_list.append(tmp_process)
print("Process list:", process_list)
roudi_executable = os.path.join(
colcon_prefix_path,
'iceoryx_posh/bin/',
'iox-roudi'
)
roudi_process = launch.actions.ExecuteProcess(
cmd=[roudi_executable, '-l', 'debug'],
env=proc_env, output='screen',
sigterm_timeout='20')
return launch.LaunchDescription([
process_list[0],
process_list[1],
process_list[2],
process_list[3],
roudi_process,
launch_testing.actions.ReadyToTest()
]), {'iox-cpp-publisher-vector': process_list[0], 'iox-cpp-subscriber-vector': process_list[1],
'iox-cpp-publisher-complexdata': process_list[2], 'iox-cpp-subscriber-complexdata': process_list[3],
'roudi_process': roudi_process}
# These tests will run concurrently with the dut process. After this test is done,
# the launch system will shut down RouDi
class TestComplexDataExample(unittest.TestCase):
def test_roudi_ready(self, proc_output):
proc_output.assertWaitFor(
'RouDi is ready for clients', timeout=45, stream='stdout')
def test_publisher_subscriber_data_exchange(self, proc_output):
proc_output.assertWaitFor(
'iox-cpp-subscriber-vector got values: 15, 16, 17, 18, 19', timeout=45, stream='stdout')
def test_publisher_subscriber_untyped_data_exchange(self, proc_output):
proc_output.assertWaitFor(
'iox-cpp-subscriber-complexdata got values:\nstringForwardList: hello, world\nintegerList: 15, 22, 11\noptionalList: optional is empty, 42\nfloatStack: 44, 33, 22, 11, 0\nsomeString: hello iceoryx\ndoubleVector: 11, 12, 13, 14, 15\nvariantVector: seven, 8, nine',
timeout=45, stream='stdout')
# These tests run after shutdown and examine the stdout log
@launch_testing.post_shutdown_test()
class TestComplexdataExampleExitCodes(unittest.TestCase):
def test_exit_code(self, proc_info):
launch_testing.asserts.assertExitCodes(proc_info)
|
[] |
[] |
[
"COLCON_PREFIX_PATH"
] |
[]
|
["COLCON_PREFIX_PATH"]
|
python
| 1 | 0 | |
main.go
|
package main
import (
"database/sql"
"fmt"
"log"
"net/http"
"os"
"github.com/sotoz/ferrytale/controller"
"github.com/sotoz/ferrytale/database"
)
func main() {
db, err := sql.Open("mysql", fmt.Sprintf("%s", os.Getenv("DATABASE_URL")))
if err != nil {
log.Fatalf("Could not open database: %s", err)
}
database.DBCon = db
err = db.Ping()
if err != nil {
log.Fatalf("cannot connect to the database: %s", err)
}
defer db.Close()
log.Print("Ferrytale started...")
port := ":" + os.Getenv("PORT")
log.Fatal(http.ListenAndServe(port, controller.Router()))
}
|
[
"\"DATABASE_URL\"",
"\"PORT\""
] |
[] |
[
"PORT",
"DATABASE_URL"
] |
[]
|
["PORT", "DATABASE_URL"]
|
go
| 2 | 0 | |
cmd/captain/cmd.go
|
package main
import (
"fmt"
"os"
"github.com/fatih/color"
"github.com/indigobio/captain"
"github.com/spf13/cobra"
)
// Options that are passed by CLI are mapped here for consumption
type Options struct {
debug bool
force bool
long_sha bool
namespace string
config string
images []string
tag string
// Options to define the docker tags context
all_branches bool
branch_tags bool
commit_tags bool
}
var options Options
func handleCmd() {
var cmdBuild = &cobra.Command{
Use: "build [image]",
Short: "Builds the docker image(s) of your repository",
Long: `It will build the docker image(s) described on captain.yml in order they appear on file.`,
Run: func(cmd *cobra.Command, args []string) {
config := captain.NewConfig(options.namespace, options.config, true)
if len(args) == 1 {
config.FilterConfig(args[0])
}
buildOpts := captain.BuildOptions{
Config: config,
Tag: options.tag,
Force: options.force,
All_branches: options.all_branches,
Long_sha: options.long_sha,
Branch_tags: options.branch_tags,
Commit_tags: options.commit_tags,
}
captain.Build(buildOpts)
},
}
var cmdTest = &cobra.Command{
Use: "test",
Short: "Runs the tests",
Long: `It will execute the commands described on test section in order they appear on file.`,
Run: func(cmd *cobra.Command, args []string) {
config := captain.NewConfig(options.namespace, options.config, true)
if len(args) == 1 {
config.FilterConfig(args[0])
}
buildOpts := captain.BuildOptions{
Config: config,
Tag: options.tag,
Force: options.force,
All_branches: options.all_branches,
Long_sha: options.long_sha,
Branch_tags: options.branch_tags,
Commit_tags: options.commit_tags,
}
// Build everything before testing
captain.Build(buildOpts)
captain.Test(buildOpts)
},
}
var cmdPush = &cobra.Command{
Use: "push",
Short: "Pushes the images to remote registry",
Long: `It will push the generated images to the remote registry.`,
Run: func(cmd *cobra.Command, args []string) {
config := captain.NewConfig(options.namespace, options.config, true)
if len(args) == 1 {
config.FilterConfig(args[0])
}
buildOpts := captain.BuildOptions{
Config: config,
Tag: options.tag,
Force: options.force,
All_branches: options.all_branches,
Long_sha: options.long_sha,
Branch_tags: options.branch_tags,
Commit_tags: options.commit_tags,
}
// Build everything before pushing
captain.Build(buildOpts)
captain.Push(buildOpts)
},
}
var cmdPull = &cobra.Command{
Use: "pull",
Short: "Pulls the images from remote registry",
Long: `It will pull the images from the remote registry.`,
Run: func(cmd *cobra.Command, args []string) {
config := captain.NewConfig(options.namespace, options.config, true)
if len(args) == 1 {
config.FilterConfig(args[0])
}
buildOpts := captain.BuildOptions{
Config: config,
Tag: options.tag,
Force: options.force,
All_branches: options.all_branches,
Long_sha: options.long_sha,
Branch_tags: options.branch_tags,
Commit_tags: options.commit_tags,
}
captain.Pull(buildOpts)
},
}
var cmdPurge = &cobra.Command{
Use: "purge",
Short: "Purges the stale images",
Long: `It will purge the stale images. Stale image is an image that is not the latest of at least one branch.`,
Run: func(cmd *cobra.Command, args []string) {
config := captain.NewConfig(options.namespace, options.config, true)
if len(args) == 1 {
config.FilterConfig(args[0])
}
buildOpts := captain.BuildOptions{
Config: config,
Force: options.force,
All_branches: options.all_branches,
Long_sha: options.long_sha,
}
captain.Purge(buildOpts)
},
}
var cmdSelfUpdate = &cobra.Command{
Use: "self-update",
Short: "Updates Captain to the last version",
Long: `Updates Captain to the last available version.`,
Run: func(cmd *cobra.Command, args []string) {
captain.SelfUpdate()
},
}
var cmdVersion = &cobra.Command{
Use: "version",
Short: "Display version",
Long: `Displays the version of Captain.`,
Run: func(cmd *cobra.Command, args []string) {
fmt.Println("v2.0.0")
},
}
var captainCmd = &cobra.Command{
Use: "captain",
Short: "captain - build tool for Docker focused on CI/CD",
Long: `
Captain, the CLI build tool for Docker made for Continuous Integration / Continuous Delivery.
It works by reading captain.yaml file which describes how to build, test, push and release the docker image(s) of your repository.`,
}
captainCmd.PersistentFlags().BoolVarP(&captain.Debug, "debug", "D", false, "Enable debug mode")
captainCmd.PersistentFlags().StringVarP(&options.namespace, "namespace", "N", getNamespace(), "Set default image namespace")
captainCmd.PersistentFlags().BoolVarP(&color.NoColor, "no-color", "n", false, "Disable color output")
captainCmd.PersistentFlags().BoolVarP(&options.long_sha, "long-sha", "l", false, "Use the long git commit SHA when referencing revisions")
cmdBuild.Flags().BoolVarP(&options.force, "force", "f", false, "Force build even if image is already built")
cmdBuild.Flags().BoolVarP(&options.all_branches, "all-branches", "B", false, "Build all branches on specific commit instead of just working branch")
cmdBuild.Flags().StringVarP(&options.tag, "tag", "t", "", "Tag version")
cmdPull.Flags().BoolVarP(&options.all_branches, "all-branches", "B", false, "Pull all branches on specific commit instead of just working branch")
cmdPull.Flags().BoolVarP(&options.branch_tags, "branch-tags", "b", true, "Pull the 'branch' docker tags")
cmdPull.Flags().BoolVarP(&options.commit_tags, "commit-tags", "c", false, "Pull the 'commit' docker tags")
cmdPull.Flags().StringVarP(&options.tag, "tag", "t", "", "Tag version")
cmdPush.Flags().BoolVarP(&options.all_branches, "all-branches", "B", false, "Push all branches on specific commit instead of just working branch")
cmdPush.Flags().BoolVarP(&options.branch_tags, "branch-tags", "b", true, "Push the 'branch' docker tags")
cmdPush.Flags().BoolVarP(&options.commit_tags, "commit-tags", "c", false, "Push the 'commit' docker tags")
cmdPush.Flags().StringVarP(&options.tag, "tag", "t", "", "Tag version")
cmdPurge.Flags().BoolVarP(&options.force, "dangling", "d", false, "Remove dangling images")
captainCmd.AddCommand(cmdBuild, cmdTest, cmdPush, cmdPull, cmdVersion, cmdPurge, cmdSelfUpdate)
captainCmd.Execute()
}
func getNamespace() string {
return os.Getenv("USER")
}
|
[
"\"USER\""
] |
[] |
[
"USER"
] |
[]
|
["USER"]
|
go
| 1 | 0 | |
optimade/server/config.py
|
# pylint: disable=no-self-argument
from enum import Enum
from pathlib import Path
import warnings
from typing import Any, Dict, List, Optional, Tuple
try:
from typing import Literal
except ImportError:
from typing_extensions import Literal
from pydantic import ( # pylint: disable=no-name-in-module
AnyHttpUrl,
BaseSettings,
Field,
root_validator,
validator,
)
from pydantic.env_settings import SettingsSourceCallable
from optimade import __version__
from optimade.models import Implementation, Provider
DEFAULT_CONFIG_FILE_PATH: str = str(Path.home().joinpath(".optimade.json"))
"""Default configuration file path.
This variable is used as the fallback value if the environment variable `OPTIMADE_CONFIG_FILE` is
not set.
!!! note
It is set to: `pathlib.Path.home()/.optimade.json`
For Unix-based systems (Linux) this will be equivalent to `~/.optimade.json`.
"""
class LogLevel(Enum):
"""Replication of logging LogLevels
- `notset`
- `debug`
- `info`
- `warning`
- `error`
- `critical`
"""
NOTSET = "notset"
DEBUG = "debug"
INFO = "info"
WARNING = "warning"
ERROR = "error"
CRITICAL = "critical"
class SupportedBackend(Enum):
"""Enumeration of supported database backends
- `elastic`: [Elasticsearch](https://www.elastic.co/).
- `mongodb`: [MongoDB](https://www.mongodb.com/).
- `mongomock`: Also MongoDB, but instead of using the
[`pymongo`](https://pymongo.readthedocs.io/) driver to connect to a live Mongo database
instance, this will use the [`mongomock`](https://github.com/mongomock/mongomock) driver,
creating an in-memory database, which is mainly used for testing.
"""
ELASTIC = "elastic"
MONGODB = "mongodb"
MONGOMOCK = "mongomock"
def config_file_settings(settings: BaseSettings) -> Dict[str, Any]:
"""Configuration file settings source.
Based on the example in the
[pydantic documentation](https://pydantic-docs.helpmanual.io/usage/settings/#adding-sources),
this function loads ServerConfig settings from a configuration file.
The file must be of either type JSON or YML/YAML.
Parameters:
settings: The `pydantic.BaseSettings` class using this function as a
`pydantic.SettingsSourceCallable`.
Returns:
Dictionary of settings as read from a file.
"""
import json
import os
import yaml
encoding = settings.__config__.env_file_encoding
config_file = Path(os.getenv("OPTIMADE_CONFIG_FILE", DEFAULT_CONFIG_FILE_PATH))
res = {}
if config_file.is_file():
config_file_content = config_file.read_text(encoding=encoding)
try:
res = json.loads(config_file_content)
except json.JSONDecodeError as json_exc:
try:
# This can essentially also load JSON files, as JSON is a subset of YAML v1,
# but I suspect it is not as rigorous
res = yaml.safe_load(config_file_content)
except yaml.YAMLError as yaml_exc:
warnings.warn(
f"Unable to parse config file {config_file} as JSON or YAML, using the "
"default settings instead..\n"
f"Errors:\n JSON:\n{json_exc}.\n\n YAML:\n{yaml_exc}"
)
else:
warnings.warn(
f"Unable to find config file at {config_file}, using the default settings instead."
)
if res is None:
# This can happen if the yaml loading doesn't succeed properly, e.g., if the file is empty.
warnings.warn(
"Unable to load any settings from {config_file}, using the default settings instead."
)
res = {}
return res
class ServerConfig(BaseSettings):
"""This class stores server config parameters in a way that
can be easily extended for new config file types.
"""
debug: bool = Field(
False,
description="Turns on Debug Mode for the OPTIMADE Server implementation",
)
insert_test_data: bool = Field(
True,
description=(
"Insert test data into each collection on server initialisation. If true, the "
"configured backend will be populated with test data on server start. Should be "
"disabled for production usage."
),
)
use_real_mongo: Optional[bool] = Field(
None, description="DEPRECATED: force usage of MongoDB over any other backend."
)
database_backend: SupportedBackend = Field(
SupportedBackend.MONGOMOCK,
description="Which database backend to use out of the supported backends.",
)
elastic_hosts: Optional[List[Dict]] = Field(
None, description="Host settings to pass through to the `Elasticsearch` class."
)
mongo_database: str = Field(
"optimade", description="Mongo database for collection data"
)
mongo_uri: str = Field("localhost:27017", description="URI for the Mongo server")
links_collection: str = Field(
"links", description="Mongo collection name for /links endpoint resources"
)
references_collection: str = Field(
"references",
description="Mongo collection name for /references endpoint resources",
)
structures_collection: str = Field(
"structures",
description="Mongo collection name for /structures endpoint resources",
)
page_limit: int = Field(20, description="Default number of resources per page")
page_limit_max: int = Field(
500, description="Max allowed number of resources per page"
)
default_db: str = Field(
"test_server",
description=(
"ID of /links endpoint resource for the chosen default OPTIMADE implementation (only "
"relevant for the index meta-database)"
),
)
root_path: Optional[str] = Field(
None,
description=(
"Sets the FastAPI app `root_path` parameter. This can be used to serve the API under a"
" path prefix behind a proxy or as a sub-application of another FastAPI app. See "
"https://fastapi.tiangolo.com/advanced/sub-applications/#technical-details-root_path "
"for details."
),
)
base_url: Optional[str] = Field(
None, description="Base URL for this implementation"
)
implementation: Implementation = Field(
Implementation(
name="OPTIMADE Python Tools",
version=__version__,
source_url="https://github.com/Materials-Consortia/optimade-python-tools",
maintainer={"email": "[email protected]"},
),
description="Introspective information about this OPTIMADE implementation",
)
index_base_url: Optional[AnyHttpUrl] = Field(
None,
description="An optional link to the base URL for the index meta-database of the provider.",
)
provider: Provider = Field(
Provider(
prefix="exmpl",
name="Example provider",
description="Provider used for examples, not to be assigned to a real database",
homepage="https://example.com",
),
description="General information about the provider of this OPTIMADE implementation",
)
provider_fields: Dict[
Literal["links", "references", "structures"], List[str]
] = Field(
{},
description=(
"A list of additional fields to be served with the provider's prefix attached, "
"broken down by endpoint."
),
)
aliases: Dict[Literal["links", "references", "structures"], Dict[str, str]] = Field(
{},
description=(
"A mapping between field names in the database with their corresponding OPTIMADE field"
" names, broken down by endpoint."
),
)
length_aliases: Dict[
Literal["links", "references", "structures"], Dict[str, str]
] = Field(
{},
description=(
"A mapping between a list property (or otherwise) and an integer property that defines"
" the length of that list, for example elements -> nelements. The standard aliases are"
" applied first, so this dictionary must refer to the API fields, not the database "
"fields."
),
)
index_links_path: Path = Field(
Path(__file__).parent.joinpath("index_links.json"),
description=(
"Absolute path to a JSON file containing the MongoDB collecton of links entries "
"(documents) to serve under the /links endpoint of the index meta-database. "
"NB! As suggested in the previous sentence, these will only be served when using a "
"MongoDB-based backend."
),
)
log_level: LogLevel = Field(
LogLevel.INFO, description="Logging level for the OPTIMADE server."
)
log_dir: Path = Field(
Path("/var/log/optimade/"),
description="Folder in which log files will be saved.",
)
@validator("implementation", pre=True)
def set_implementation_version(cls, v):
"""Set defaults and modify bypassed value(s)"""
res = {"version": __version__}
res.update(v)
return res
@root_validator(pre=True)
def use_real_mongo_override(cls, values):
"""Overrides the `database_backend` setting with MongoDB and
raises a deprecation warning.
"""
use_real_mongo = values.pop("use_real_mongo", None)
if use_real_mongo is not None:
warnings.warn(
"'use_real_mongo' is deprecated, please set the appropriate 'database_backend' "
"instead.",
DeprecationWarning,
)
if use_real_mongo:
values["database_backend"] = SupportedBackend.MONGODB
return values
class Config:
"""
This is a pydantic model Config object that modifies the behaviour of
ServerConfig by adding a prefix to the environment variables that
override config file values. It has nothing to do with the OPTIMADE
config.
"""
env_prefix = "optimade_"
extra = "allow"
env_file_encoding = "utf-8"
@classmethod
def customise_sources(
cls,
init_settings: SettingsSourceCallable,
env_settings: SettingsSourceCallable,
file_secret_settings: SettingsSourceCallable,
) -> Tuple[SettingsSourceCallable, ...]:
"""
**Priority of config settings sources**:
1. Passed arguments upon initialization of
[`ServerConfig`][optimade.server.config.ServerConfig].
2. Environment variables, matching the syntax: `"OPTIMADE_"` or `"optimade_"` +
`<config_name>`, e.g., `OPTIMADE_LOG_LEVEL=debug` or
`optimade_log_dir=~/logs_dir/optimade/`.
3. Configuration file (JSON/YAML) taken from:
1. Environment variable `OPTIMADE_CONFIG_FILE`.
2. Default location (see
[DEFAULT_CONFIG_FILE_PATH][optimade.server.config.DEFAULT_CONFIG_FILE_PATH]).
4. Settings from secret file (see
[pydantic documentation](https://pydantic-docs.helpmanual.io/usage/settings/#secret-support)
for more information).
"""
return (
init_settings,
env_settings,
config_file_settings,
file_secret_settings,
)
CONFIG = ServerConfig()
|
[] |
[] |
[
"OPTIMADE_CONFIG_FILE"
] |
[]
|
["OPTIMADE_CONFIG_FILE"]
|
python
| 1 | 0 | |
bake/appdirs.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2005-2010 ActiveState Software Inc.
# Copyright (c) 2013 Eddy Petrișor
"""Utilities for determining application-specific dirs.
See <http://github.com/ActiveState/appdirs> for details and usage.
"""
# Dev Notes:
# - MSDN on where to store app data files:
# http://support.microsoft.com/default.aspx?scid=kb;en-us;310294#XSLTH3194121123120121120120
# - Mac OS X: http://developer.apple.com/documentation/MacOSX/Conceptual/BPFileSystem/index.html
# - XDG spec for Un*x: http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html
__version_info__ = (1, 4, 1)
__version__ = '.'.join(map(str, __version_info__))
import sys
import os
PY3 = sys.version_info[0] == 3
if PY3:
unicode = str
if sys.platform.startswith('java'):
import platform
os_name = platform.java_ver()[3][0]
if os_name.startswith('Windows'): # "Windows XP", "Windows 7", etc.
system = 'win32'
elif os_name.startswith('Mac'): # "Mac OS X", etc.
system = 'darwin'
else: # "Linux", "SunOS", "FreeBSD", etc.
# Setting this to "linux2" is not ideal, but only Windows or Mac
# are actually checked for and the rest of the module expects
# *sys.platform* style strings.
system = 'linux2'
else:
system = sys.platform
def user_data_dir(appname=None, appauthor=None, version=None, roaming=False):
r"""Return full path to the user-specific data dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"roaming" (boolean, default False) can be set True to use the Windows
roaming appdata directory. That means that for users on a Windows
network setup for roaming profiles, this user data will be
sync'd on login. See
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
for a discussion of issues.
Typical user data directories are:
Mac OS X: ~/Library/Application Support/<AppName>
Unix: ~/.local/share/<AppName> # or in $XDG_DATA_HOME, if defined
Win XP (not roaming): C:\Documents and Settings\<username>\Application Data\<AppAuthor>\<AppName>
Win XP (roaming): C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>
Win 7 (not roaming): C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>
Win 7 (roaming): C:\Users\<username>\AppData\Roaming\<AppAuthor>\<AppName>
For Unix, we follow the XDG spec and support $XDG_DATA_HOME.
That means, by default "~/.local/share/<AppName>".
"""
if system == "win32":
if appauthor is None:
appauthor = appname
const = roaming and "CSIDL_APPDATA" or "CSIDL_LOCAL_APPDATA"
path = os.path.normpath(_get_win_folder(const))
if appname:
if appauthor is not False:
path = os.path.join(path, appauthor, appname)
else:
path = os.path.join(path, appname)
elif system == 'darwin':
path = os.path.expanduser('~/Library/Application Support/')
if appname:
path = os.path.join(path, appname)
else:
path = os.getenv('XDG_DATA_HOME', os.path.expanduser("~/.local/share"))
if appname:
path = os.path.join(path, appname)
if appname and version:
path = os.path.join(path, version)
return path
def site_data_dir(appname=None, appauthor=None, version=None, multipath=False):
"""Return full path to the user-shared data dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"multipath" is an optional parameter only applicable to *nix
which indicates that the entire list of data dirs should be
returned. By default, the first item from XDG_DATA_DIRS is
returned, or '/usr/local/share/<AppName>',
if XDG_DATA_DIRS is not set
Typical user data directories are:
Mac OS X: /Library/Application Support/<AppName>
Unix: /usr/local/share/<AppName> or /usr/share/<AppName>
Win XP: C:\Documents and Settings\All Users\Application Data\<AppAuthor>\<AppName>
Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
Win 7: C:\ProgramData\<AppAuthor>\<AppName> # Hidden, but writeable on Win 7.
For Unix, this is using the $XDG_DATA_DIRS[0] default.
WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
"""
if system == "win32":
if appauthor is None:
appauthor = appname
path = os.path.normpath(_get_win_folder("CSIDL_COMMON_APPDATA"))
if appname:
if appauthor is not False:
path = os.path.join(path, appauthor, appname)
else:
path = os.path.join(path, appname)
elif system == 'darwin':
path = os.path.expanduser('/Library/Application Support')
if appname:
path = os.path.join(path, appname)
else:
# XDG default for $XDG_DATA_DIRS
# only first, if multipath is False
path = os.getenv('XDG_DATA_DIRS',
os.pathsep.join(['/usr/local/share', '/usr/share']))
pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)]
if appname:
if version:
appname = os.path.join(appname, version)
pathlist = [os.sep.join([x, appname]) for x in pathlist]
if multipath:
path = os.pathsep.join(pathlist)
else:
path = pathlist[0]
return path
if appname and version:
path = os.path.join(path, version)
return path
def user_config_dir(appname=None, appauthor=None, version=None, roaming=False):
r"""Return full path to the user-specific config dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"roaming" (boolean, default False) can be set True to use the Windows
roaming appdata directory. That means that for users on a Windows
network setup for roaming profiles, this user data will be
sync'd on login. See
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
for a discussion of issues.
Typical user data directories are:
Mac OS X: same as user_data_dir
Unix: ~/.config/<AppName> # or in $XDG_CONFIG_HOME, if defined
Win *: same as user_data_dir
For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME.
That means, by deafult "~/.config/<AppName>".
"""
if system in ["win32", "darwin"]:
path = user_data_dir(appname, appauthor, None, roaming)
else:
path = os.getenv('XDG_CONFIG_HOME', os.path.expanduser("~/.config"))
if appname:
path = os.path.join(path, appname)
if appname and version:
path = os.path.join(path, version)
return path
def site_config_dir(appname=None, appauthor=None, version=None, multipath=False):
"""Return full path to the user-shared data dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"multipath" is an optional parameter only applicable to *nix
which indicates that the entire list of config dirs should be
returned. By default, the first item from XDG_CONFIG_DIRS is
returned, or '/etc/xdg/<AppName>', if XDG_CONFIG_DIRS is not set
Typical user data directories are:
Mac OS X: same as site_data_dir
Unix: /etc/xdg/<AppName> or $XDG_CONFIG_DIRS[i]/<AppName> for each value in
$XDG_CONFIG_DIRS
Win *: same as site_data_dir
Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
For Unix, this is using the $XDG_CONFIG_DIRS[0] default, if multipath=False
WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
"""
if system in ["win32", "darwin"]:
path = site_data_dir(appname, appauthor)
if appname and version:
path = os.path.join(path, version)
else:
# XDG default for $XDG_CONFIG_DIRS
# only first, if multipath is False
path = os.getenv('XDG_CONFIG_DIRS', '/etc/xdg')
pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)]
if appname:
if version:
appname = os.path.join(appname, version)
pathlist = [os.sep.join([x, appname]) for x in pathlist]
if multipath:
path = os.pathsep.join(pathlist)
else:
path = pathlist[0]
return path
def user_cache_dir(appname=None, appauthor=None, version=None, opinion=True):
r"""Return full path to the user-specific cache dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"opinion" (boolean) can be False to disable the appending of
"Cache" to the base app data dir for Windows. See
discussion below.
Typical user cache directories are:
Mac OS X: ~/Library/Caches/<AppName>
Unix: ~/.cache/<AppName> (XDG default)
Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Cache
Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Cache
On Windows the only suggestion in the MSDN docs is that local settings go in
the `CSIDL_LOCAL_APPDATA` directory. This is identical to the non-roaming
app data dir (the default returned by `user_data_dir` above). Apps typically
put cache data somewhere *under* the given dir here. Some examples:
...\Mozilla\Firefox\Profiles\<ProfileName>\Cache
...\Acme\SuperApp\Cache\1.0
OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value.
This can be disabled with the `opinion=False` option.
"""
if system == "win32":
if appauthor is None:
appauthor = appname
path = os.path.normpath(_get_win_folder("CSIDL_LOCAL_APPDATA"))
if appname:
if appauthor is not False:
path = os.path.join(path, appauthor, appname)
else:
path = os.path.join(path, appname)
if opinion:
path = os.path.join(path, "Cache")
elif system == 'darwin':
path = os.path.expanduser('~/Library/Caches')
if appname:
path = os.path.join(path, appname)
else:
path = os.getenv('XDG_CACHE_HOME', os.path.expanduser('~/.cache'))
if appname:
path = os.path.join(path, appname)
if appname and version:
path = os.path.join(path, version)
return path
def user_log_dir(appname=None, appauthor=None, version=None, opinion=True):
r"""Return full path to the user-specific log dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"opinion" (boolean) can be False to disable the appending of
"Logs" to the base app data dir for Windows, and "log" to the
base cache dir for Unix. See discussion below.
Typical user cache directories are:
Mac OS X: ~/Library/Logs/<AppName>
Unix: ~/.cache/<AppName>/log # or under $XDG_CACHE_HOME if defined
Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Logs
Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Logs
On Windows the only suggestion in the MSDN docs is that local settings
go in the `CSIDL_LOCAL_APPDATA` directory. (Note: I'm interested in
examples of what some windows apps use for a logs dir.)
OPINION: This function appends "Logs" to the `CSIDL_LOCAL_APPDATA`
value for Windows and appends "log" to the user cache dir for Unix.
This can be disabled with the `opinion=False` option.
"""
if system == "darwin":
path = os.path.join(
os.path.expanduser('~/Library/Logs'),
appname)
elif system == "win32":
path = user_data_dir(appname, appauthor, version)
version = False
if opinion:
path = os.path.join(path, "Logs")
else:
path = user_cache_dir(appname, appauthor, version)
version = False
if opinion:
path = os.path.join(path, "log")
if appname and version:
path = os.path.join(path, version)
return path
class AppDirs(object):
"""Convenience wrapper for getting application dirs."""
def __init__(self, appname, appauthor=None, version=None, roaming=False,
multipath=False):
self.appname = appname
self.appauthor = appauthor
self.version = version
self.roaming = roaming
self.multipath = multipath
@property
def user_data_dir(self):
return user_data_dir(self.appname, self.appauthor,
version=self.version, roaming=self.roaming)
@property
def site_data_dir(self):
return site_data_dir(self.appname, self.appauthor,
version=self.version, multipath=self.multipath)
@property
def user_config_dir(self):
return user_config_dir(self.appname, self.appauthor,
version=self.version, roaming=self.roaming)
@property
def site_config_dir(self):
return site_config_dir(self.appname, self.appauthor,
version=self.version, multipath=self.multipath)
@property
def user_cache_dir(self):
return user_cache_dir(self.appname, self.appauthor,
version=self.version)
@property
def user_log_dir(self):
return user_log_dir(self.appname, self.appauthor,
version=self.version)
#---- internal support stuff
def _get_win_folder_from_registry(csidl_name):
"""This is a fallback technique at best. I'm not sure if using the
registry for this guarantees us the correct answer for all CSIDL_*
names.
"""
if PY3:
import winreg as _winreg
else:
import _winreg
shell_folder_name = {
"CSIDL_APPDATA": "AppData",
"CSIDL_COMMON_APPDATA": "Common AppData",
"CSIDL_LOCAL_APPDATA": "Local AppData",
}[csidl_name]
key = _winreg.OpenKey(
_winreg.HKEY_CURRENT_USER,
r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders"
)
dir, type = _winreg.QueryValueEx(key, shell_folder_name)
return dir
def _get_win_folder_with_pywin32(csidl_name):
from win32com.shell import shellcon, shell
dir = shell.SHGetFolderPath(0, getattr(shellcon, csidl_name), 0, 0)
# Try to make this a unicode path because SHGetFolderPath does
# not return unicode strings when there is unicode data in the
# path.
try:
dir = unicode(dir)
# Downgrade to short path name if have highbit chars. See
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
has_high_char = False
for c in dir:
if ord(c) > 255:
has_high_char = True
break
if has_high_char:
try:
import win32api
dir = win32api.GetShortPathName(dir)
except ImportError:
pass
except UnicodeError:
pass
return dir
def _get_win_folder_with_ctypes(csidl_name):
import ctypes
csidl_const = {
"CSIDL_APPDATA": 26,
"CSIDL_COMMON_APPDATA": 35,
"CSIDL_LOCAL_APPDATA": 28,
}[csidl_name]
buf = ctypes.create_unicode_buffer(1024)
ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf)
# Downgrade to short path name if have highbit chars. See
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
has_high_char = False
for c in buf:
if ord(c) > 255:
has_high_char = True
break
if has_high_char:
buf2 = ctypes.create_unicode_buffer(1024)
if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024):
buf = buf2
return buf.value
def _get_win_folder_with_jna(csidl_name):
import array
from com.sun import jna
from com.sun.jna.platform import win32
buf_size = win32.WinDef.MAX_PATH * 2
buf = array.zeros('c', buf_size)
shell = win32.Shell32.INSTANCE
shell.SHGetFolderPath(None, getattr(win32.ShlObj, csidl_name), None, win32.ShlObj.SHGFP_TYPE_CURRENT, buf)
dir = jna.Native.toString(buf.tostring()).rstrip("\0")
# Downgrade to short path name if have highbit chars. See
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
has_high_char = False
for c in dir:
if ord(c) > 255:
has_high_char = True
break
if has_high_char:
buf = array.zeros('c', buf_size)
kernel = win32.Kernel32.INSTANCE
if kernal.GetShortPathName(dir, buf, buf_size):
dir = jna.Native.toString(buf.tostring()).rstrip("\0")
return dir
if system == "win32":
try:
import win32com.shell
_get_win_folder = _get_win_folder_with_pywin32
except ImportError:
try:
from ctypes import windll
_get_win_folder = _get_win_folder_with_ctypes
except ImportError:
try:
import com.sun.jna
_get_win_folder = _get_win_folder_with_jna
except ImportError:
_get_win_folder = _get_win_folder_from_registry
#---- self test code
if __name__ == "__main__":
appname = "MyApp"
appauthor = "MyCompany"
props = ("user_data_dir", "site_data_dir",
"user_config_dir", "site_config_dir",
"user_cache_dir", "user_log_dir")
print("-- app dirs %s --" % __version__)
print("-- app dirs (with optional 'version')")
dirs = AppDirs(appname, appauthor, version="1.0")
for prop in props:
print("%s: %s" % (prop, getattr(dirs, prop)))
print("\n-- app dirs (without optional 'version')")
dirs = AppDirs(appname, appauthor)
for prop in props:
print("%s: %s" % (prop, getattr(dirs, prop)))
print("\n-- app dirs (without optional 'appauthor')")
dirs = AppDirs(appname)
for prop in props:
print("%s: %s" % (prop, getattr(dirs, prop)))
print("\n-- app dirs (with disabled 'appauthor')")
dirs = AppDirs(appname, appauthor=False)
for prop in props:
print("%s: %s" % (prop, getattr(dirs, prop)))
|
[] |
[] |
[
"XDG_CONFIG_DIRS",
"XDG_DATA_DIRS",
"XDG_DATA_HOME",
"XDG_CACHE_HOME",
"XDG_CONFIG_HOME"
] |
[]
|
["XDG_CONFIG_DIRS", "XDG_DATA_DIRS", "XDG_DATA_HOME", "XDG_CACHE_HOME", "XDG_CONFIG_HOME"]
|
python
| 5 | 0 | |
pkg/startup/startup.go
|
// Copyright (c) 2016 Tigera, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package startup
import (
"context"
cryptorand "crypto/rand"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net"
"os"
"reflect"
"regexp"
"strings"
"time"
"github.com/projectcalico/libcalico-go/lib/apiconfig"
api "github.com/projectcalico/libcalico-go/lib/apis/v3"
client "github.com/projectcalico/libcalico-go/lib/clientv3"
cerrors "github.com/projectcalico/libcalico-go/lib/errors"
"github.com/projectcalico/libcalico-go/lib/logutils"
"github.com/projectcalico/libcalico-go/lib/names"
cnet "github.com/projectcalico/libcalico-go/lib/net"
"github.com/projectcalico/libcalico-go/lib/numorstring"
"github.com/projectcalico/libcalico-go/lib/options"
"github.com/projectcalico/libcalico-go/lib/upgrade/migrator"
"github.com/projectcalico/libcalico-go/lib/upgrade/migrator/clients"
log "github.com/sirupsen/logrus"
kapiv1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"github.com/projectcalico/node/pkg/calicoclient"
"github.com/projectcalico/node/pkg/startup/autodetection"
)
const (
DEFAULT_IPV4_POOL_CIDR = "192.168.0.0/16"
DEFAULT_IPV4_POOL_NAME = "default-ipv4-ippool"
DEFAULT_IPV6_POOL_NAME = "default-ipv6-ippool"
AUTODETECTION_METHOD_FIRST = "first-found"
AUTODETECTION_METHOD_CAN_REACH = "can-reach="
AUTODETECTION_METHOD_INTERFACE = "interface="
AUTODETECTION_METHOD_SKIP_INTERFACE = "skip-interface="
)
// Version string, set during build.
var VERSION string
// For testing purposes we define an exit function that we can override.
var exitFunction = os.Exit
var (
// Default values, names for different configs.
defaultLogSeverity = "Info"
globalFelixConfigName = "default"
felixNodeConfigNamePrefix = "node."
)
// This file contains the main startup processing for the calico/node. This
// includes:
// - Detecting IP address and Network to use for BGP
// - Configuring the node resource with IP/AS information provided in the
// environment, or autodetected.
// - Creating default IP Pools for quick-start use
func Run() {
// Check $CALICO_STARTUP_LOGLEVEL to capture early log statements
configureLogging()
// Determine the name for this node.
nodeName := determineNodeName()
// Create the Calico API cli.
cfg, cli := calicoclient.CreateClient()
ctx := context.Background()
// An explicit value of true is required to wait for the datastore.
if os.Getenv("WAIT_FOR_DATASTORE") == "true" {
waitForConnection(ctx, cli)
log.Info("Datastore is ready")
} else {
log.Info("Skipping datastore connection test")
}
if cfg.Spec.DatastoreType == apiconfig.Kubernetes {
if err := ensureKDDMigrated(cfg, cli); err != nil {
log.WithError(err).Errorf("Unable to ensure datastore is migrated.")
terminate()
}
}
// Query the current Node resources. We update our node resource with
// updated IP data and use the full list of nodes for validation.
node := getNode(ctx, cli, nodeName)
// If Calico is running in policy only mode we don't need to write
// BGP related details to the Node.
if os.Getenv("CALICO_NETWORKING_BACKEND") != "none" {
// Configure and verify the node IP addresses and subnets.
checkConflicts, err := configureIPsAndSubnets(node)
if err != nil {
clearv4 := os.Getenv("IP") == "autodetect"
clearv6 := os.Getenv("IP6") == "autodetect"
if node.ResourceVersion != "" {
// If we're auto-detecting an IP on an existing node and hit an error, clear the previous
// IP addresses from the node since they are no longer valid.
clearNodeIPs(ctx, cli, node, clearv4, clearv6)
}
terminate()
}
// If we report an IP change (v4 or v6) we should verify there are no
// conflicts between Nodes.
if checkConflicts && os.Getenv("DISABLE_NODE_IP_CHECK") != "true" {
v4conflict, v6conflict, err := checkConflictingNodes(ctx, cli, node)
if err != nil {
// If we've auto-detected a new IP address for an existing node that now conflicts, clear the old IP address(es)
// from the node in the datastore. This frees the address in case it needs to be used for another node.
clearv4 := (os.Getenv("IP") == "autodetect") && v4conflict
clearv6 := (os.Getenv("IP6") == "autodetect") && v6conflict
if node.ResourceVersion != "" {
clearNodeIPs(ctx, cli, node, clearv4, clearv6)
}
terminate()
}
}
// Configure the node AS number.
configureASNumber(node)
// If running under kubernetes with secrets to call k8s API
if config, err := rest.InClusterConfig(); err == nil {
log.Info("Setting NetworkUnavailable to False")
err = setNodeNetworkUnavailableFalse(config, nodeName)
if err != nil {
log.WithError(err).Errorf("Unable to set NetworkUnavailable to False")
}
}
}
configureNodeRef(node)
// Check expected filesystem
ensureFilesystemAsExpected()
// Apply the updated node resource.
if _, err := CreateOrUpdate(ctx, cli, node); err != nil {
log.WithError(err).Errorf("Unable to set node resource configuration")
terminate()
}
// Configure IP Pool configuration.
configureIPPools(ctx, cli)
// Set default configuration required for the cluster.
if err := ensureDefaultConfig(ctx, cfg, cli, node); err != nil {
log.WithError(err).Errorf("Unable to set global default configuration")
terminate()
}
// Write config files now that we are ready to start other components.
writeNodeConfig(nodeName)
// Tell the user what the name of the node is.
log.Infof("Using node name: %s", nodeName)
}
// configureNodeRef will attempt to discover the cluster type it is running on, check to ensure we
// have not already set it on this Node, and set it if need be.
func configureNodeRef(node *api.Node) {
orchestrator := "k8s"
nodeRef := ""
// Sort out what type of cluster we're running on.
if nodeRef = os.Getenv("CALICO_K8S_NODE_REF"); nodeRef == "" {
return
}
node.Spec.OrchRefs = []api.OrchRef{api.OrchRef{NodeName: nodeRef, Orchestrator: orchestrator}}
}
// CreateOrUpdate creates the Node if ResourceVersion is not specified,
// or Update if it's specified.
func CreateOrUpdate(ctx context.Context, client client.Interface, node *api.Node) (*api.Node, error) {
if node.ResourceVersion != "" {
return client.Nodes().Update(ctx, node, options.SetOptions{})
}
return client.Nodes().Create(ctx, node, options.SetOptions{})
}
func clearNodeIPs(ctx context.Context, client client.Interface, node *api.Node, clearv4, clearv6 bool) {
if clearv4 {
log.WithField("IP", node.Spec.BGP.IPv4Address).Info("Clearing out-of-date IPv4 address from this node")
node.Spec.BGP.IPv4Address = ""
}
if clearv6 {
log.WithField("IP", node.Spec.BGP.IPv6Address).Info("Clearing out-of-date IPv6 address from this node")
node.Spec.BGP.IPv6Address = ""
}
// If the BGP spec is empty, then set it to nil.
if node.Spec.BGP != nil && reflect.DeepEqual(*node.Spec.BGP, api.NodeBGPSpec{}) {
node.Spec.BGP = nil
}
if clearv4 || clearv6 {
_, err := client.Nodes().Update(ctx, node, options.SetOptions{})
if err != nil {
log.WithError(err).Warnf("Failed to clear node addresses")
}
}
}
func configureLogging() {
// Log to stdout. this prevents our logs from being interpreted as errors by, for example,
// fluentd's default configuration.
log.SetOutput(os.Stdout)
// Set log formatting.
log.SetFormatter(&logutils.Formatter{})
// Install a hook that adds file and line number information.
log.AddHook(&logutils.ContextHook{})
// Default to info level logging
logLevel := log.InfoLevel
rawLogLevel := os.Getenv("CALICO_STARTUP_LOGLEVEL")
if rawLogLevel != "" {
parsedLevel, err := log.ParseLevel(rawLogLevel)
if err == nil {
logLevel = parsedLevel
} else {
log.WithError(err).Error("Failed to parse log level, defaulting to info.")
}
}
log.SetLevel(logLevel)
log.Infof("Early log level set to %v", logLevel)
}
// determineNodeName is called to determine the node name to use for this instance
// of calico/node.
func determineNodeName() string {
var nodeName string
var err error
// Determine the name of this node. Precedence is:
// - NODENAME
// - Value stored in our nodename file.
// - HOSTNAME (lowercase)
// - os.Hostname (lowercase).
// We use the names.Hostname which lowercases and trims the name.
if nodeName = strings.TrimSpace(os.Getenv("NODENAME")); nodeName != "" {
log.Infof("Using NODENAME environment for node name")
} else if nodeName = nodenameFromFile(); nodeName != "" {
log.Info("Using stored node name from " + nodenameFileName())
} else if nodeName = strings.ToLower(strings.TrimSpace(os.Getenv("HOSTNAME"))); nodeName != "" {
log.Infof("Using HOSTNAME environment (lowercase) for node name")
} else if nodeName, err = names.Hostname(); err != nil {
log.WithError(err).Error("Unable to determine hostname")
terminate()
} else {
log.Warn("Using auto-detected node name. It is recommended that an explicit value is supplied using " +
"the NODENAME environment variable.")
}
log.Infof("Determined node name: %s", nodeName)
return nodeName
}
func nodenameFileName() string {
fn := os.Getenv("CALICO_NODENAME_FILE")
if fn == "" {
return defaultNodenameFile
}
return fn
}
// nodenameFromFile reads the nodename file if it exists and
// returns the nodename within.
func nodenameFromFile() string {
filename := nodenameFileName()
data, err := ioutil.ReadFile(filename)
if err != nil {
if os.IsNotExist(err) {
// File doesn't exist, return empty string.
log.Debug("File does not exist: " + filename)
return ""
}
log.WithError(err).Error("Failed to read " + filename)
terminate()
}
return string(data)
}
// waitForConnection waits for the datastore to become accessible.
func waitForConnection(ctx context.Context, c client.Interface) {
log.Info("Checking datastore connection")
for {
// Query some arbitrary configuration to see if the connection
// is working. Getting a specific Node is a good option, even
// if the Node does not exist.
_, err := c.Nodes().Get(ctx, "foo", options.GetOptions{})
// We only care about a couple of error cases, all others would
// suggest the datastore is accessible.
if err != nil {
switch err.(type) {
case cerrors.ErrorConnectionUnauthorized:
log.Warn("Connection to the datastore is unauthorized")
terminate()
case cerrors.ErrorDatastoreError:
log.WithError(err).Info("Hit error connecting to datastore - retry")
time.Sleep(1000 * time.Millisecond)
continue
}
}
// We've connected to the datastore - break out of the loop.
break
}
log.Info("Datastore connection verified")
}
// writeNodeConfig writes out the this node's configuration to disk for use by other components.
// Specifically, it creates:
// - nodenameFileName() - used to persist the determined node name to disk for future use.
func writeNodeConfig(nodeName string) {
filename := nodenameFileName()
log.Debugf("Writing %s to "+filename, nodeName)
if err := ioutil.WriteFile(filename, []byte(nodeName), 0644); err != nil {
log.WithError(err).Error("Unable to write to " + filename)
terminate()
}
}
// getNode returns the current node configuration. If this node has not yet
// been created, it returns a blank node resource.
func getNode(ctx context.Context, client client.Interface, nodeName string) *api.Node {
node, err := client.Nodes().Get(ctx, nodeName, options.GetOptions{})
if err != nil {
if _, ok := err.(cerrors.ErrorResourceDoesNotExist); !ok {
log.WithError(err).WithField("Name", nodeName).Info("Unable to query node configuration")
log.Warn("Unable to access datastore to query node configuration")
terminate()
}
log.WithField("Name", nodeName).Info("Building new node resource")
node = api.NewNode()
node.Name = nodeName
}
return node
}
// configureIPsAndSubnets updates the supplied node resource with IP and Subnet
// information to use for BGP. This returns true if we detect a change in Node IP address.
func configureIPsAndSubnets(node *api.Node) (bool, error) {
// If the node resource currently has no BGP configuration, add an empty
// set of configuration as it makes the processing below easier, and we
// must end up configuring some BGP fields before we complete.
if node.Spec.BGP == nil {
log.Info("Initialize BGP data")
node.Spec.BGP = &api.NodeBGPSpec{}
}
oldIpv4 := node.Spec.BGP.IPv4Address
oldIpv6 := node.Spec.BGP.IPv6Address
// Determine the autodetection type for IPv4 and IPv6. Note that we
// only autodetect IPv4 when it has not been specified. IPv6 must be
// explicitly requested using the "autodetect" value.
//
// If we aren't auto-detecting then we need to validate the configured
// value and possibly fix up missing subnet configuration.
ipv4Env := os.Getenv("IP")
if ipv4Env == "autodetect" || (ipv4Env == "" && node.Spec.BGP.IPv4Address == "") {
adm := os.Getenv("IP_AUTODETECTION_METHOD")
cidr := autoDetectCIDR(adm, 4)
if cidr != nil {
// We autodetected an IPv4 address so update the value in the node.
node.Spec.BGP.IPv4Address = cidr.String()
} else if node.Spec.BGP.IPv4Address == "" {
// No IPv4 address is configured, but we always require one, so exit.
log.Warn("Couldn't autodetect an IPv4 address. If auto-detecting, choose a different autodetection method. Otherwise provide an explicit address.")
return false, fmt.Errorf("Failed to autodetect an IPv4 address")
} else {
// No IPv4 autodetected, but a previous one was configured.
// Tell the user we are leaving the value unchanged. We
// will validate that the IP matches one on the interface.
log.Warnf("Autodetection of IPv4 address failed, keeping existing value: %s", node.Spec.BGP.IPv4Address)
validateIP(node.Spec.BGP.IPv4Address)
}
} else if ipv4Env != "none" {
if ipv4Env != "" {
node.Spec.BGP.IPv4Address = parseIPEnvironment("IP", ipv4Env, 4)
}
validateIP(node.Spec.BGP.IPv4Address)
}
ipv6Env := os.Getenv("IP6")
if ipv6Env == "autodetect" {
adm := os.Getenv("IP6_AUTODETECTION_METHOD")
cidr := autoDetectCIDR(adm, 6)
if cidr != nil {
// We autodetected an IPv6 address so update the value in the node.
node.Spec.BGP.IPv6Address = cidr.String()
} else if node.Spec.BGP.IPv6Address == "" {
// No IPv6 address is configured, but we have requested one, so exit.
log.Warn("Couldn't autodetect an IPv6 address. If auto-detecting, choose a different autodetection method. Otherwise provide an explicit address.")
return false, fmt.Errorf("Failed to autodetect an IPv6 address")
} else {
// No IPv6 autodetected, but a previous one was configured.
// Tell the user we are leaving the value unchanged. We
// will validate that the IP matches one on the interface.
log.Warnf("Autodetection of IPv6 address failed, keeping existing value: %s", node.Spec.BGP.IPv6Address)
validateIP(node.Spec.BGP.IPv6Address)
}
} else if ipv6Env != "none" {
if ipv6Env != "" {
node.Spec.BGP.IPv6Address = parseIPEnvironment("IP6", ipv6Env, 6)
}
validateIP(node.Spec.BGP.IPv6Address)
}
if ipv4Env == "none" && (ipv6Env == "" || ipv6Env == "none") {
log.Warn("No IP Addresses configured, and autodetection is not enabled")
terminate()
}
// Detect if we've seen the IP address change, and flag that we need to check for conflicting Nodes
if node.Spec.BGP.IPv4Address != oldIpv4 {
log.Info("Node IPv4 changed, will check for conflicts")
return true, nil
}
if node.Spec.BGP.IPv6Address != oldIpv6 {
log.Info("Node IPv6 changed, will check for conflicts")
return true, nil
}
return false, nil
}
// fetchAndValidateIPAndNetwork fetches and validates the IP configuration from
// either the environment variables or from the values already configured in the
// node.
func parseIPEnvironment(envName, envValue string, version int) string {
// To parse the environment (which could be an IP or a CIDR), convert
// to a JSON string and use the UnmarshalJSON method on the IPNet
// struct to parse the value.
ip := &cnet.IPNet{}
err := ip.UnmarshalJSON([]byte("\"" + envValue + "\""))
if err != nil || ip.Version() != version {
log.Warnf("Environment does not contain a valid IPv%d address: %s=%s", version, envName, envValue)
terminate()
}
log.Infof("Using IPv%d address from environment: %s=%s", ip.Version(), envName, envValue)
return ip.String()
}
// validateIP checks that the IP address is actually on one of the host
// interfaces and warns if not.
func validateIP(ipn string) {
// No validation required if no IP address is specified.
if ipn == "" {
return
}
ipAddr, _, err := cnet.ParseCIDROrIP(ipn)
if err != nil {
log.WithError(err).Errorf("Failed to parse autodetected CIDR '%s'", ipn)
terminate()
}
// Get a complete list of interfaces with their addresses and check if
// the IP address can be found.
ifaces, err := autodetection.GetInterfaces(nil, nil, ipAddr.Version())
if err != nil {
log.WithError(err).Error("Unable to query host interfaces")
terminate()
}
if len(ifaces) == 0 {
log.Info("No interfaces found for validating IP configuration")
}
for _, i := range ifaces {
for _, c := range i.Cidrs {
if ipAddr.Equal(c.IP) {
log.Infof("IPv%d address %s discovered on interface %s", ipAddr.Version(), ipAddr.String(), i.Name)
return
}
}
}
log.Warnf("Unable to confirm IPv%d address %s is assigned to this host", ipAddr.Version(), ipAddr)
}
// evaluateENVBool evaluates a passed environment variable
// Returns True if the envVar is defined and set to true.
// Returns False if the envVar is defined and set to false.
// Returns defaultValue in the envVar is not defined.
// An log entry will always be written
func evaluateENVBool(envVar string, defaultValue bool) bool {
envValue, isSet := os.LookupEnv(envVar)
if isSet {
switch strings.ToLower(envValue) {
case "false", "0", "no", "n", "f":
log.Infof("%s is %t through environment variable", envVar, false)
return false
}
log.Infof("%s is %t through environment variable", envVar, true)
return true
}
log.Infof("%s is %t (defaulted) through environment variable", envVar, defaultValue)
return defaultValue
}
// autoDetectCIDR auto-detects the IP and Network using the requested
// detection method.
func autoDetectCIDR(method string, version int) *cnet.IPNet {
if method == "" || method == AUTODETECTION_METHOD_FIRST {
// Autodetect the IP by enumerating all interfaces (excluding
// known internal interfaces).
return autoDetectCIDRFirstFound(version)
} else if strings.HasPrefix(method, AUTODETECTION_METHOD_INTERFACE) {
// Autodetect the IP from the specified interface.
ifStr := strings.TrimPrefix(method, AUTODETECTION_METHOD_INTERFACE)
// Regexes are passed in as a string separated by ","
ifRegexes := regexp.MustCompile("\\s*,\\s*").Split(ifStr, -1)
return autoDetectCIDRByInterface(ifRegexes, version)
} else if strings.HasPrefix(method, AUTODETECTION_METHOD_CAN_REACH) {
// Autodetect the IP by connecting a UDP socket to a supplied address.
destStr := strings.TrimPrefix(method, AUTODETECTION_METHOD_CAN_REACH)
return autoDetectCIDRByReach(destStr, version)
} else if strings.HasPrefix(method, AUTODETECTION_METHOD_SKIP_INTERFACE) {
// Autodetect the Ip by enumerating all interfaces (excluding
// known internal interfaces and any interfaces whose name
// matches the given regexes).
ifStr := strings.TrimPrefix(method, AUTODETECTION_METHOD_SKIP_INTERFACE)
// Regexes are passed in as a string separated by ","
ifRegexes := regexp.MustCompile("\\s*,\\s*").Split(ifStr, -1)
return autoDetectCIDRBySkipInterface(ifRegexes, version)
}
// The autodetection method is not recognised and is required. Exit.
log.Errorf("Invalid IP autodetection method: %s", method)
terminate()
return nil
}
// autoDetectCIDRFirstFound auto-detects the first valid Network it finds across
// all interfaces (excluding common known internal interface names).
func autoDetectCIDRFirstFound(version int) *cnet.IPNet {
incl := []string{}
iface, cidr, err := autodetection.FilteredEnumeration(incl, DEFAULT_INTERFACES_TO_EXCLUDE, version)
if err != nil {
log.Warnf("Unable to auto-detect an IPv%d address: %s", version, err)
return nil
}
log.Infof("Using autodetected IPv%d address on interface %s: %s", version, iface.Name, cidr.String())
return cidr
}
// autoDetectCIDRByInterface auto-detects the first valid Network on the interfaces
// matching the supplied interface regex.
func autoDetectCIDRByInterface(ifaceRegexes []string, version int) *cnet.IPNet {
iface, cidr, err := autodetection.FilteredEnumeration(ifaceRegexes, nil, version)
if err != nil {
log.Warnf("Unable to auto-detect an IPv%d address using interface regexes %v: %s", version, ifaceRegexes, err)
return nil
}
log.Infof("Using autodetected IPv%d address %s on matching interface %s", version, cidr.String(), iface.Name)
return cidr
}
// autoDetectCIDRByReach auto-detects the IP and Network by setting up a UDP
// connection to a "reach" address.
func autoDetectCIDRByReach(dest string, version int) *cnet.IPNet {
if cidr, err := autodetection.ReachDestination(dest, version); err != nil {
log.Warnf("Unable to auto-detect IPv%d address by connecting to %s: %s", version, dest, err)
return nil
} else {
log.Infof("Using autodetected IPv%d address %s, detected by connecting to %s", version, cidr.String(), dest)
return cidr
}
}
// autoDetectCIDRBySkipInterface auto-detects the first valid Network on the interfaces
// matching the supplied interface regexes.
func autoDetectCIDRBySkipInterface(ifaceRegexes []string, version int) *cnet.IPNet {
incl := []string{}
excl := DEFAULT_INTERFACES_TO_EXCLUDE
excl = append(excl, ifaceRegexes...)
iface, cidr, err := autodetection.FilteredEnumeration(incl, excl, version)
if err != nil {
log.Warnf("Unable to auto-detect an IPv%d address while excluding %v: %s", version, ifaceRegexes, err)
return nil
}
log.Infof("Using autodetected IPv%d address on interface %s: %s while skipping matching interfaces", version, iface.Name, cidr.String())
return cidr
}
// configureASNumber configures the Node resource with the AS number specified
// in the environment, or is a no-op if not specified.
func configureASNumber(node *api.Node) {
// Extract the AS number from the environment
asStr := os.Getenv("AS")
if asStr != "" {
if asNum, err := numorstring.ASNumberFromString(asStr); err != nil {
log.WithError(err).Errorf("The AS number specified in the environment (AS=%s) is not valid", asStr)
terminate()
} else {
log.Infof("Using AS number specified in environment (AS=%s)", asNum)
node.Spec.BGP.ASNumber = &asNum
}
} else {
if node.Spec.BGP.ASNumber == nil {
log.Info("No AS number configured on node resource, using global value")
} else {
log.Infof("Using AS number %s configured in node resource", node.Spec.BGP.ASNumber)
}
}
}
// generateIPv6ULAPrefix return a random generated ULA IPv6 prefix as per RFC 4193. The pool
// is generated from bytes pulled from a secure random source.
func GenerateIPv6ULAPrefix() (string, error) {
ulaAddr := []byte{0xfd, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
_, err := cryptorand.Read(ulaAddr[1:6])
if err != nil {
return "", err
}
ipNet := net.IPNet{
IP: net.IP(ulaAddr),
Mask: net.CIDRMask(48, 128),
}
return ipNet.String(), nil
}
// configureIPPools ensures that default IP pools are created (unless explicitly
// requested otherwise).
func configureIPPools(ctx context.Context, client client.Interface) {
// Read in environment variables for use here and later.
ipv4Pool := os.Getenv("CALICO_IPV4POOL_CIDR")
ipv6Pool := os.Getenv("CALICO_IPV6POOL_CIDR")
if strings.ToLower(os.Getenv("NO_DEFAULT_POOLS")) == "true" {
if len(ipv4Pool) > 0 || len(ipv6Pool) > 0 {
log.Error("Invalid configuration with NO_DEFAULT_POOLS defined and CALICO_IPV4POOL_CIDR or CALICO_IPV6POOL_CIDR defined.")
terminate()
}
log.Info("Skipping IP pool configuration")
return
}
ipv4IpipModeEnvVar := strings.ToLower(os.Getenv("CALICO_IPV4POOL_IPIP"))
ipv4VXLANModeEnvVar := strings.ToLower(os.Getenv("CALICO_IPV4POOL_VXLAN"))
// Get a list of all IP Pools
poolList, err := client.IPPools().List(ctx, options.ListOptions{})
if err != nil {
log.WithError(err).Error("Unable to fetch IP pool list")
terminate()
return // not really needed but allows testing to function
}
// Check for IPv4 and IPv6 pools.
ipv4Present := false
ipv6Present := false
for _, p := range poolList.Items {
ip, _, err := cnet.ParseCIDR(p.Spec.CIDR)
if err != nil {
log.Warnf("Error parsing CIDR '%s'. Skipping the IPPool.", p.Spec.CIDR)
}
version := ip.Version()
ipv4Present = ipv4Present || (version == 4)
ipv6Present = ipv6Present || (version == 6)
if ipv4Present && ipv6Present {
break
}
}
// Read IPV4 CIDR from env if set and parse then check it for errors
if ipv4Pool == "" {
ipv4Pool = DEFAULT_IPV4_POOL_CIDR
}
_, ipv4Cidr, err := cnet.ParseCIDR(ipv4Pool)
if err != nil || ipv4Cidr.Version() != 4 {
log.Errorf("Invalid CIDR specified in CALICO_IPV4POOL_CIDR '%s'", ipv4Pool)
terminate()
return // not really needed but allows testing to function
}
// If no IPv6 pool is specified, generate one.
if ipv6Pool == "" {
ipv6Pool, err = GenerateIPv6ULAPrefix()
if err != nil {
log.Errorf("Failed to generate an IPv6 default pool")
terminate()
}
}
_, ipv6Cidr, err := cnet.ParseCIDR(ipv6Pool)
if err != nil || ipv6Cidr.Version() != 6 {
log.Errorf("Invalid CIDR specified in CALICO_IPV6POOL_CIDR '%s'", ipv6Pool)
terminate()
return // not really needed but allows testing to function
}
// Ensure there are pools created for each IP version.
if !ipv4Present {
log.Debug("Create default IPv4 IP pool")
outgoingNATEnabled := evaluateENVBool("CALICO_IPV4POOL_NAT_OUTGOING", true)
createIPPool(ctx, client, ipv4Cidr, DEFAULT_IPV4_POOL_NAME, ipv4IpipModeEnvVar, ipv4VXLANModeEnvVar, outgoingNATEnabled)
}
if !ipv6Present && ipv6Supported() {
log.Debug("Create default IPv6 IP pool")
outgoingNATEnabled := evaluateENVBool("CALICO_IPV6POOL_NAT_OUTGOING", false)
createIPPool(ctx, client, ipv6Cidr, DEFAULT_IPV6_POOL_NAME, string(api.IPIPModeNever), string(api.VXLANModeNever), outgoingNATEnabled)
}
}
// createIPPool creates an IP pool using the specified CIDR. This
// method is a no-op if the pool already exists.
func createIPPool(ctx context.Context, client client.Interface, cidr *cnet.IPNet, poolName, ipipModeName, vxlanModeName string, isNATOutgoingEnabled bool) {
version := cidr.Version()
var ipipMode api.IPIPMode
var vxlanMode api.VXLANMode
// Parse the given IPIP mode.
switch strings.ToLower(ipipModeName) {
case "", "off", "never":
ipipMode = api.IPIPModeNever
case "crosssubnet", "cross-subnet":
ipipMode = api.IPIPModeCrossSubnet
case "always":
ipipMode = api.IPIPModeAlways
default:
log.Errorf("Unrecognized IPIP mode specified in CALICO_IPV4POOL_IPIP '%s'", ipipModeName)
terminate()
}
// Parse the given VXLAN mode.
switch strings.ToLower(vxlanModeName) {
case "", "off", "never":
vxlanMode = api.VXLANModeNever
case "always":
vxlanMode = api.VXLANModeAlways
default:
log.Errorf("Unrecognized VXLAN mode specified in CALICO_IPV4POOL_VXLAN'%s'", vxlanModeName)
terminate()
}
pool := &api.IPPool{
ObjectMeta: metav1.ObjectMeta{
Name: poolName,
},
Spec: api.IPPoolSpec{
CIDR: cidr.String(),
NATOutgoing: isNATOutgoingEnabled,
IPIPMode: ipipMode,
VXLANMode: vxlanMode,
},
}
log.Infof("Ensure default IPv%d pool is created. IPIP mode: %s, VXLAN mode: %s", version, ipipMode, vxlanMode)
// Create the pool. There is a small chance that another node may
// beat us to it, so handle the fact that the pool already exists.
if _, err := client.IPPools().Create(ctx, pool, options.SetOptions{}); err != nil {
if _, ok := err.(cerrors.ErrorResourceAlreadyExists); !ok {
log.WithError(err).Errorf("Failed to create default IPv%d IP pool: %s", version, cidr.String())
terminate()
}
} else {
log.Infof("Created default IPv%d pool (%s) with NAT outgoing %t. IPIP mode: %s, VXLAN mode: %s",
version, cidr, isNATOutgoingEnabled, ipipMode, vxlanMode)
}
}
// checkConflictingNodes checks whether any other nodes have been configured
// with the same IP addresses.
func checkConflictingNodes(ctx context.Context, client client.Interface, node *api.Node) (v4conflict, v6conflict bool, retErr error) {
// Get the full set of nodes.
var nodes []api.Node
if nodeList, err := client.Nodes().List(ctx, options.ListOptions{}); err != nil {
log.WithError(err).Errorf("Unable to query node configuration")
retErr = err
return
} else {
nodes = nodeList.Items
}
ourIPv4, _, err := cnet.ParseCIDROrIP(node.Spec.BGP.IPv4Address)
if err != nil && node.Spec.BGP.IPv4Address != "" {
log.WithError(err).Errorf("Error parsing IPv4 CIDR '%s' for node '%s'", node.Spec.BGP.IPv4Address, node.Name)
retErr = err
return
}
ourIPv6, _, err := cnet.ParseCIDROrIP(node.Spec.BGP.IPv6Address)
if err != nil && node.Spec.BGP.IPv6Address != "" {
log.WithError(err).Errorf("Error parsing IPv6 CIDR '%s' for node '%s'", node.Spec.BGP.IPv6Address, node.Name)
retErr = err
return
}
for _, theirNode := range nodes {
if theirNode.Spec.BGP == nil {
// Skip nodes that don't have BGP configured. We know
// that this node does have BGP since we only perform
// this check after configuring BGP.
continue
}
theirIPv4, _, err := cnet.ParseCIDROrIP(theirNode.Spec.BGP.IPv4Address)
if err != nil && theirNode.Spec.BGP.IPv4Address != "" {
log.WithError(err).Errorf("Error parsing IPv4 CIDR '%s' for node '%s'", theirNode.Spec.BGP.IPv4Address, theirNode.Name)
retErr = err
return
}
theirIPv6, _, err := cnet.ParseCIDROrIP(theirNode.Spec.BGP.IPv6Address)
if err != nil && theirNode.Spec.BGP.IPv6Address != "" {
log.WithError(err).Errorf("Error parsing IPv6 CIDR '%s' for node '%s'", theirNode.Spec.BGP.IPv6Address, theirNode.Name)
retErr = err
return
}
// If this is our node (based on the name), check if the IP
// addresses have changed. If so warn the user as it could be
// an indication of multiple nodes using the same name. This
// is not an error condition as the IPs could actually change.
if theirNode.Name == node.Name {
if theirIPv4.IP != nil && ourIPv4.IP != nil && !theirIPv4.IP.Equal(ourIPv4.IP) {
fields := log.Fields{"node": theirNode.Name, "original": theirIPv4.String(), "updated": ourIPv4.String()}
log.WithFields(fields).Warnf("IPv4 address has changed. This could happen if there are multiple nodes with the same name.")
}
if theirIPv6.IP != nil && ourIPv6.IP != nil && !theirIPv6.IP.Equal(ourIPv6.IP) {
fields := log.Fields{"node": theirNode.Name, "original": theirIPv6.String(), "updated": ourIPv6.String()}
log.WithFields(fields).Warnf("IPv6 address has changed. This could happen if there are multiple nodes with the same name.")
}
continue
}
// Check that other nodes aren't using the same IP addresses.
// This is an error condition.
if theirIPv4.IP != nil && ourIPv4.IP != nil && theirIPv4.IP.Equal(ourIPv4.IP) {
log.Warnf("Calico node '%s' is already using the IPv4 address %s.", theirNode.Name, ourIPv4.String())
retErr = fmt.Errorf("IPv4 address conflict")
v4conflict = true
}
if theirIPv6.IP != nil && ourIPv6.IP != nil && theirIPv6.IP.Equal(ourIPv6.IP) {
log.Warnf("Calico node '%s' is already using the IPv6 address %s.", theirNode.Name, ourIPv6.String())
retErr = fmt.Errorf("IPv6 address conflict")
v6conflict = true
}
}
return
}
// ensureDefaultConfig ensures all of the required default settings are
// configured.
func ensureDefaultConfig(ctx context.Context, cfg *apiconfig.CalicoAPIConfig, c client.Interface, node *api.Node) error {
// Ensure the ClusterInformation is populated.
// Get the ClusterType from ENV var. This is set from the manifest.
clusterType := os.Getenv("CLUSTER_TYPE")
if err := c.EnsureInitialized(ctx, VERSION, clusterType); err != nil {
return nil
}
// By default we set the global reporting interval to 0 - this is
// different from the defaults defined in Felix.
//
// Logging to file is disabled in the felix.cfg config file. This
// should always be disabled for calico/node. By default we log to
// screen - set the default logging value that we desire.
felixConf, err := c.FelixConfigurations().Get(ctx, globalFelixConfigName, options.GetOptions{})
if err != nil {
// Create the default config if it doesn't already exist.
if _, ok := err.(cerrors.ErrorResourceDoesNotExist); ok {
newFelixConf := api.NewFelixConfiguration()
newFelixConf.Name = globalFelixConfigName
newFelixConf.Spec.ReportingInterval = &metav1.Duration{Duration: 0}
newFelixConf.Spec.LogSeverityScreen = defaultLogSeverity
_, err = c.FelixConfigurations().Create(ctx, newFelixConf, options.SetOptions{})
if err != nil {
if conflict, ok := err.(cerrors.ErrorResourceAlreadyExists); ok {
log.Infof("Ignoring conflict when setting value %s", conflict.Identifier)
} else {
log.WithError(err).WithField("FelixConfig", newFelixConf).Errorf("Error creating Felix global config")
return err
}
}
} else {
log.WithError(err).WithField("FelixConfig", globalFelixConfigName).Errorf("Error getting Felix global config")
return err
}
} else {
updateNeeded := false
if felixConf.Spec.ReportingInterval == nil {
felixConf.Spec.ReportingInterval = &metav1.Duration{Duration: 0}
updateNeeded = true
} else {
log.WithField("ReportingInterval", felixConf.Spec.ReportingInterval).Debug("Global Felix value already assigned")
}
if felixConf.Spec.LogSeverityScreen == "" {
felixConf.Spec.LogSeverityScreen = defaultLogSeverity
updateNeeded = true
} else {
log.WithField("LogSeverityScreen", felixConf.Spec.LogSeverityScreen).Debug("Global Felix value already assigned")
}
if updateNeeded {
_, err = c.FelixConfigurations().Update(ctx, felixConf, options.SetOptions{})
if err != nil {
if conflict, ok := err.(cerrors.ErrorResourceUpdateConflict); ok {
log.Infof("Ignoring conflict when setting value %s", conflict.Identifier)
} else {
log.WithError(err).WithField("FelixConfig", felixConf).Errorf("Error updating Felix global config")
return err
}
}
}
}
// Configure Felix to allow traffic from the containers to the host (if
// not otherwise firewalled by the host administrator or profiles).
// This is important for container deployments, where it is common
// for containers to speak to services running on the host (e.g. k8s
// pods speaking to k8s api-server, and mesos tasks registering with agent
// on startup). Note: KDD does not yet support per-node felix config.
if cfg.Spec.DatastoreType != apiconfig.Kubernetes {
felixNodeCfg, err := c.FelixConfigurations().Get(ctx, fmt.Sprintf("%s%s", felixNodeConfigNamePrefix, node.Name), options.GetOptions{})
if err != nil {
// Create the default config if it doesn't already exist.
if _, ok := err.(cerrors.ErrorResourceDoesNotExist); ok {
newFelixNodeCfg := api.NewFelixConfiguration()
newFelixNodeCfg.Name = fmt.Sprintf("%s%s", felixNodeConfigNamePrefix, node.Name)
newFelixNodeCfg.Spec.DefaultEndpointToHostAction = "Return"
_, err = c.FelixConfigurations().Create(ctx, newFelixNodeCfg, options.SetOptions{})
if err != nil {
if exists, ok := err.(cerrors.ErrorResourceAlreadyExists); ok {
log.Infof("Ignoring resource exists error when setting value %s", exists.Identifier)
} else {
log.WithError(err).WithField("FelixConfig", newFelixNodeCfg).Errorf("Error creating Felix node config")
return err
}
}
} else {
log.WithError(err).WithField("FelixConfig", felixNodeConfigNamePrefix).Errorf("Error getting Felix node config")
return err
}
} else {
if felixNodeCfg.Spec.DefaultEndpointToHostAction == "" {
felixNodeCfg.Spec.DefaultEndpointToHostAction = "Return"
_, err = c.FelixConfigurations().Update(ctx, felixNodeCfg, options.SetOptions{})
if err != nil {
if conflict, ok := err.(cerrors.ErrorResourceUpdateConflict); ok {
log.Infof("Ignoring conflict when setting value %s", conflict.Identifier)
} else {
log.WithError(err).WithField("FelixConfig", felixNodeCfg).Errorf("Error updating Felix node config")
return err
}
}
} else {
log.WithField("DefaultEndpointToHostAction", felixNodeCfg.Spec.DefaultEndpointToHostAction).Debug("Host Felix value already assigned")
}
}
}
return nil
}
// ensureKDDMigrated ensures any data migration needed is done.
func ensureKDDMigrated(cfg *apiconfig.CalicoAPIConfig, cv3 client.Interface) error {
cv1, err := clients.LoadKDDClientV1FromAPIConfigV3(cfg)
if err != nil {
return err
}
m := migrator.New(cv3, cv1, nil)
yes, err := m.ShouldMigrate()
if err != nil {
return err
} else if yes {
log.Infof("Running migration")
if _, err = m.Migrate(); err != nil {
return errors.New(fmt.Sprintf("Migration failed: %v", err))
}
log.Infof("Migration successful")
} else {
log.Debugf("Migration is not needed")
}
return nil
}
// Set Kubernetes NodeNetworkUnavailable to false when starting
// https://kubernetes.io/docs/concepts/architecture/nodes/#condition
func setNodeNetworkUnavailableFalse(config *rest.Config, nodeName string) error {
// creates the k8s clientset
clientset, err := kubernetes.NewForConfig(config)
if err != nil {
return err
}
condition := kapiv1.NodeCondition{
Type: kapiv1.NodeNetworkUnavailable,
Status: kapiv1.ConditionFalse,
Reason: "CalicoIsUp",
Message: "Calico is running on this node",
LastTransitionTime: metav1.Now(),
LastHeartbeatTime: metav1.Now(),
}
raw, err := json.Marshal(&[]kapiv1.NodeCondition{condition})
if err != nil {
return err
}
patch := []byte(fmt.Sprintf(`{"status":{"conditions":%s}}`, raw))
_, err = clientset.CoreV1().Nodes().PatchStatus(nodeName, patch)
return err
}
// terminate prints a terminate message and exists with status 1.
func terminate() {
log.Warn("Terminating")
exitFunction(1)
}
|
[
"\"WAIT_FOR_DATASTORE\"",
"\"CALICO_NETWORKING_BACKEND\"",
"\"IP\"",
"\"IP6\"",
"\"DISABLE_NODE_IP_CHECK\"",
"\"IP\"",
"\"IP6\"",
"\"CALICO_K8S_NODE_REF\"",
"\"CALICO_STARTUP_LOGLEVEL\"",
"\"NODENAME\"",
"\"HOSTNAME\"",
"\"CALICO_NODENAME_FILE\"",
"\"IP\"",
"\"IP_AUTODETECTION_METHOD\"",
"\"IP6\"",
"\"IP6_AUTODETECTION_METHOD\"",
"\"AS\"",
"\"CALICO_IPV4POOL_CIDR\"",
"\"CALICO_IPV6POOL_CIDR\"",
"\"NO_DEFAULT_POOLS\"",
"\"CALICO_IPV4POOL_IPIP\"",
"\"CALICO_IPV4POOL_VXLAN\"",
"\"CLUSTER_TYPE\""
] |
[] |
[
"WAIT_FOR_DATASTORE",
"CALICO_IPV6POOL_CIDR",
"IP6",
"CALICO_K8S_NODE_REF",
"HOSTNAME",
"CLUSTER_TYPE",
"CALICO_IPV4POOL_CIDR",
"CALICO_NETWORKING_BACKEND",
"NODENAME",
"IP_AUTODETECTION_METHOD",
"IP6_AUTODETECTION_METHOD",
"CALICO_NODENAME_FILE",
"CALICO_STARTUP_LOGLEVEL",
"NO_DEFAULT_POOLS",
"IP",
"CALICO_IPV4POOL_VXLAN",
"CALICO_IPV4POOL_IPIP",
"AS",
"DISABLE_NODE_IP_CHECK"
] |
[]
|
["WAIT_FOR_DATASTORE", "CALICO_IPV6POOL_CIDR", "IP6", "CALICO_K8S_NODE_REF", "HOSTNAME", "CLUSTER_TYPE", "CALICO_IPV4POOL_CIDR", "CALICO_NETWORKING_BACKEND", "NODENAME", "IP_AUTODETECTION_METHOD", "IP6_AUTODETECTION_METHOD", "CALICO_NODENAME_FILE", "CALICO_STARTUP_LOGLEVEL", "NO_DEFAULT_POOLS", "IP", "CALICO_IPV4POOL_VXLAN", "CALICO_IPV4POOL_IPIP", "AS", "DISABLE_NODE_IP_CHECK"]
|
go
| 19 | 0 | |
halaws/service.go
|
package halaws
import (
"crypto/tls"
"fmt"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/connect"
"github.com/kyokomi/emoji"
"github.com/weAutomateEverything/go2hal/alert"
"golang.org/x/net/context"
"net/http"
"os"
"time"
)
type Service interface {
SendAlert(ctx context.Context, chatId uint32, destination string, name string, variables map[string]string) error
ResetLastCall(chat uint32)
}
type service struct {
alert alert.Service
lastcall map[uint32]time.Time
}
func (s *service) ResetLastCall(chat uint32) {
delete(s.lastcall, chat)
}
func NewService(alert alert.Service) Service {
s := &service{alert: alert}
s.lastcall = make(map[uint32]time.Time)
return s
}
func (s *service) SendAlert(ctx context.Context, chatId uint32, destination string, name string, variables map[string]string) error {
if !s.checkCallout(ctx, chatId) {
return fmt.Errorf("not invoking callout")
}
c := credentials.NewEnvCredentials()
client := http.DefaultClient
transport := http.DefaultTransport
transport.(*http.Transport).TLSClientConfig = &tls.Config{InsecureSkipVerify: true}
client.Transport = transport
config := aws.Config{Credentials: c, Region: aws.String(os.Getenv("AWS_REGION")), LogLevel: aws.LogLevel(aws.LogDebugWithHTTPBody), HTTPClient: client}
sess, _ := session.NewSession(&config)
outbound := connect.New(sess, &config)
v := map[string]*string{}
if variables != nil {
for key, val := range variables {
v[key] = aws.String(val)
}
}
req := connect.StartOutboundVoiceContactInput{
InstanceId: aws.String(getInstanceID()),
ContactFlowId: aws.String(getContactFlowID()),
DestinationPhoneNumber: aws.String(destination),
SourcePhoneNumber: aws.String(getSourcePhoneNumber()),
Attributes: v,
}
output, err := outbound.StartOutboundVoiceContactWithContext(ctx, &req)
if err != nil {
s.alert.SendError(ctx, fmt.Errorf("error invoking alexa to call %v on %v. Error: %v", name, destination, err.Error()))
return err
}
s.lastcall[chatId] = time.Now()
s.alert.SendAlert(ctx, chatId, emoji.Sprintf(":phone: HAL has phoned %v on %v. Reference %v ", name, destination, output.ContactId))
return nil
}
func (s service) checkCallout(ctx context.Context, chatid uint32) bool {
t, ok := s.lastcall[chatid]
if ok {
if time.Since(t) < time.Duration(30*time.Minute) {
s.alert.SendAlert(ctx, chatid, emoji.Sprintf(":phone: :negative_squared_cross_mark: Not invoking callout since its been less than 30 minutes since the last phone call"))
return false
}
}
return true
}
func getInstanceID() string {
return os.Getenv("AWS_CONNECT_INSTANCE")
}
func getContactFlowID() string {
return os.Getenv("AWS_CONNECT_FLOW_ID")
}
func getSourcePhoneNumber() string {
return os.Getenv("AWS_CONNECT_SOURCE_PHONE")
}
|
[
"\"AWS_REGION\"",
"\"AWS_CONNECT_INSTANCE\"",
"\"AWS_CONNECT_FLOW_ID\"",
"\"AWS_CONNECT_SOURCE_PHONE\""
] |
[] |
[
"AWS_CONNECT_INSTANCE",
"AWS_CONNECT_FLOW_ID",
"AWS_CONNECT_SOURCE_PHONE",
"AWS_REGION"
] |
[]
|
["AWS_CONNECT_INSTANCE", "AWS_CONNECT_FLOW_ID", "AWS_CONNECT_SOURCE_PHONE", "AWS_REGION"]
|
go
| 4 | 0 | |
credhub/request.go
|
package credhub
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/http/httputil"
"net/url"
"os"
)
// Request sends an authenticated request to the CredHub server.
//
// The pathStr should include the full path (eg. /api/v1/data).
// The request body should be marshallable to JSON, but can be left nil for GET requests.
//
// Request() is used by other CredHub client methods to send authenticated requests to the CredHub server.
//
// Use Request() directly to send authenticated requests to the CredHub server.
// For unauthenticated requests (eg. /health), use Config.Client() instead.
func (ch *CredHub) Request(method string, pathStr string, query url.Values, body interface{}, checkServerErr bool) (*http.Response, error) {
return ch.request(ch.Auth, method, pathStr, query, body, checkServerErr)
}
type requester interface {
Do(req *http.Request) (*http.Response, error)
}
func (ch *CredHub) request(client requester, method string, pathStr string, query url.Values, body interface{}, checkServerErr bool) (*http.Response, error) {
u := *ch.baseURL // clone
u.Path = pathStr
u.RawQuery = query.Encode()
var req *http.Request
jsonBody, err := json.Marshal(body)
if err != nil {
return nil, err
}
if body != nil {
req, err = http.NewRequest(method, u.String(), bytes.NewReader(jsonBody))
} else {
req, err = http.NewRequest(method, u.String(), nil)
}
if err != nil {
return nil, err
}
req.Header.Set("Content-Type", "application/json")
if os.Getenv("CREDHUB_DEBUG") == "true" {
dumpRequest(req)
}
resp, err := client.Do(req)
if os.Getenv("CREDHUB_DEBUG") == "true" {
dumpResponse(resp)
}
if err != nil {
return resp, err
}
if checkServerErr {
if err := ch.checkForServerError(resp); err != nil {
return nil, err
}
}
return resp, err
}
func (ch *CredHub) checkForServerError(resp *http.Response) error {
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
defer resp.Body.Close()
defer io.Copy(ioutil.Discard, resp.Body)
dec := json.NewDecoder(resp.Body)
respErr := &Error{}
if err := dec.Decode(&respErr); err != nil {
return errors.New("The response body could not be decoded: " + err.Error())
}
return respErr
}
return nil
}
func dumpRequest(req *http.Request) {
dump, err := httputil.DumpRequestOut(req, true)
if err != nil {
fmt.Println("[DEBUG] An error occurred during request dump.", err.Error())
}
fmt.Println("[DEBUG]", string(dump))
}
func dumpResponse(resp *http.Response) {
dump, err := httputil.DumpResponse(resp, true)
if err != nil {
fmt.Println("[DEBUG] An error occurred during response dump.", err.Error())
}
fmt.Println("[DEBUG]", string(dump))
}
|
[
"\"CREDHUB_DEBUG\"",
"\"CREDHUB_DEBUG\""
] |
[] |
[
"CREDHUB_DEBUG"
] |
[]
|
["CREDHUB_DEBUG"]
|
go
| 1 | 0 | |
main.py
|
# -*- coding: utf-8 -*-
import os, sys
import argparse
import numpy as np
import torch
from torchvision import datasets, transforms
from torch import optim
from torch.optim import lr_scheduler
from model import Net
from train_eng import net_train
from test_eng import net_test
def parse_args():
parser = argparse.ArgumentParser(description="PyTorch MNIST Classification")
parser.add_argument("--gpu", type=str, default="0", help="the gpu device to use (default: 0)")
parser.add_argument("--batch_size", type=int, default=64, help="input batch size for training (default: 64)")
parser.add_argument("--test_batch_size", type=int, default=1000, help="input batch size for testing (default: 1000)")
parser.add_argument("--lr", type=float, default=0.01, help="learning rate (default: 0.01)")
parser.add_argument("--epochs", type=int, default=10, help="number of epochs to train (default: 10)")
parser.add_argument("--log_interval", type=int, default=10, help="how many batches to wait before logging training status")
parser.add_argument("--seed", type=int, default=1234, help="random seed (default: 1234)")
args = parser.parse_args()
return args
def set_random_seed(seed):
# PyTorch
torch.manual_seed(seed)
# CuDNN
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# Numpy
np.random.seed(seed)
if __name__ == "__main__":
assert torch.cuda.is_available(), "...No available GPU..."
args = parse_args()
# ensure reproducible training
set_random_seed(args.seed)
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
# create model object
net = Net()
net = net.cuda()
# prepare train & test dataset
transform=transforms.Compose([
transforms.ToTensor(),
# transforms.Normalize((0.1307,), (0.3081,))
])
train_dset = datasets.MNIST("/Data", train=True, download=True, transform=transform)
test_dset = datasets.MNIST("/Data", train=False, transform=transform)
train_kwargs = {"batch_size": args.batch_size, "num_workers": 4, "pin_memory": True, "shuffle": True}
test_kwargs = {"batch_size": args.test_batch_size, "num_workers": 4, "pin_memory": True, "shuffle": False}
train_loader = torch.utils.data.DataLoader(train_dset,**train_kwargs)
test_loader = torch.utils.data.DataLoader(test_dset, **test_kwargs)
# optimizer & scheduling
optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=0.9, nesterov=True)
scheduler = lr_scheduler.StepLR(optimizer, gamma=0.7, step_size=1)
# model training & test
for epoch in range(1, args.epochs + 1):
net_train(net, train_loader, optimizer, epoch, args)
net_test(net, test_loader)
scheduler.step()
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_VISIBLE_DEVICES"]
|
python
| 1 | 0 | |
utils/src/main/java/org/spongepowered/downloads/utils/AuthUtils.java
|
/*
* This file is part of SystemOfADownload, licensed under the MIT License (MIT).
*
* Copyright (c) SpongePowered <https://spongepowered.org/>
* Copyright (c) contributors
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package org.spongepowered.downloads.utils;
import io.vavr.collection.List;
import org.pac4j.core.authorization.authorizer.Authorizer;
import org.pac4j.core.client.Client;
import org.pac4j.core.client.DirectClient;
import org.pac4j.core.config.Config;
import org.pac4j.core.context.HttpConstants;
import org.pac4j.core.credentials.TokenCredentials;
import org.pac4j.core.profile.CommonProfile;
import org.pac4j.http.client.direct.HeaderClient;
import org.pac4j.jwt.config.encryption.EncryptionConfiguration;
import org.pac4j.jwt.config.encryption.SecretEncryptionConfiguration;
import org.pac4j.jwt.config.signature.SecretSignatureConfiguration;
import org.pac4j.jwt.config.signature.SignatureConfiguration;
import org.pac4j.jwt.credentials.authenticator.JwtAuthenticator;
import org.pac4j.jwt.profile.JwtGenerator;
import java.time.Instant;
import java.time.temporal.ChronoUnit;
import java.util.Date;
public final class AuthUtils {
private static final String ENCRYPTION_SECRET = System.getenv("JWT-ENCRYPTION-SECRET");
private static final String SIGNATURE_SECRET = System.getenv("JWT-SIGNATURE-SECRET");
private static final String NEXUS_WEBHOOK_SECRET = System.getenv("NEXUS_WEBHOOK_SECRET");
public static final String INTERNAL_HEADER_SECRET = System.getenv("INTERNAL_KEY");
public static final String INTERNAL_HEADER_KEY = System.getenv("INTERNAL_HEADER");
@SuppressWarnings("rawtypes")
public static Config createConfig(final Client... additionalClients) {
final var jwtClient = AuthUtils.createJwtClient();
final var config = new Config(List.<Client>of(jwtClient).appendAll(List.of(additionalClients)).asJava());
config.getClients().setDefaultSecurityClients(jwtClient.getName());
AuthUtils.setAuthorizers(config);
return config;
}
public static DirectClient<TokenCredentials, CommonProfile> createJwtClient() {
final var headerClient = new HeaderClient();
headerClient.setName(AuthUtils.Types.JWT);
headerClient.setHeaderName(HttpConstants.AUTHORIZATION_HEADER);
headerClient.setPrefixHeader(HttpConstants.BEARER_HEADER_PREFIX);
final var jwtAuthenticator = new JwtAuthenticator();
if (AuthUtils.SIGNATURE_SECRET != null) {
jwtAuthenticator.addSignatureConfiguration(AuthUtils.getSignatureConfiguration());
}
if (AuthUtils.ENCRYPTION_SECRET != null) {
jwtAuthenticator.addEncryptionConfiguration(AuthUtils.getEncryptionConfiguration());
}
headerClient.setAuthenticator(jwtAuthenticator); // this should provide the correct profile automagically.
headerClient.setName(AuthUtils.Types.JWT);
return headerClient;
}
public static JwtGenerator<CommonProfile> createJwtGenerator() {
final var generator = new JwtGenerator<>();
if (AuthUtils.SIGNATURE_SECRET != null) {
generator.setSignatureConfiguration(AuthUtils.getSignatureConfiguration());
}
if (AuthUtils.ENCRYPTION_SECRET != null) {
generator.setEncryptionConfiguration(AuthUtils.getEncryptionConfiguration());
}
generator.setExpirationTime(Date.from(Instant.now().plus(10, ChronoUnit.MINUTES)));
return generator;
}
public static void setAuthorizers(final Config config) {
config.addAuthorizer(Roles.ADMIN, Authorizers.ADMIN);
config.addAuthorizer(Roles.WEBHOOK, Authorizers.WEBHOOK);
}
private static EncryptionConfiguration getEncryptionConfiguration() {
return new SecretEncryptionConfiguration(AuthUtils.ENCRYPTION_SECRET);
}
private static SignatureConfiguration getSignatureConfiguration() {
return new SecretSignatureConfiguration(AuthUtils.SIGNATURE_SECRET);
}
private static SignatureConfiguration getSonatypeSignatureConfiguration() {
return new SecretSignatureConfiguration(AuthUtils.NEXUS_WEBHOOK_SECRET);
}
private AuthUtils() {}
static final class Authorizers {
static final Authorizer<CommonProfile> ADMIN =
(webContext, list) -> list.stream().anyMatch(x -> !x.isExpired() && x.getRoles().contains(AuthUtils.Roles.ADMIN));
static final Authorizer<CommonProfile> WEBHOOK =
(webContext, list) -> list.stream().anyMatch(x -> !x.isExpired() && x.getRoles().contains(AuthUtils.Roles.WEBHOOK));
}
public static final class Types {
public static final String JWT = "jwt";
public static final String WEBHOOK = "internal";
}
public static final class Roles {
public static final String ADMIN = "soad_admin";
public static final String WEBHOOK = "soad_webhook";
}
}
|
[
"\"JWT-ENCRYPTION-SECRET\"",
"\"JWT-SIGNATURE-SECRET\"",
"\"NEXUS_WEBHOOK_SECRET\"",
"\"INTERNAL_KEY\"",
"\"INTERNAL_HEADER\""
] |
[] |
[
"INTERNAL_KEY",
"NEXUS_WEBHOOK_SECRET",
"JWT-SIGNATURE-SECRET",
"INTERNAL_HEADER",
"JWT-ENCRYPTION-SECRET"
] |
[]
|
["INTERNAL_KEY", "NEXUS_WEBHOOK_SECRET", "JWT-SIGNATURE-SECRET", "INTERNAL_HEADER", "JWT-ENCRYPTION-SECRET"]
|
java
| 5 | 0 | |
hloc/pairs_from_retrieval.py
|
import argparse
import logging
from pathlib import Path
import h5py
import numpy as np
import torch
from .utils.parsers import parse_image_lists_with_intrinsics
def main(descriptors, output, num_matched,
db_prefix=None, query_prefix=None, db_list=None, query_list=None):
logging.info('Extracting image pairs from a retrieval database.')
hfile = h5py.File(str(descriptors), 'r')
if db_prefix and query_prefix:
names = []
hfile.visititems(
lambda _, obj: names.append(obj.parent.name.strip('/'))
if isinstance(obj, h5py.Dataset) else None)
names = list(set(names))
db_names = [n for n in names if n.startswith(db_prefix)]
query_names = [n for n in names if n.startswith(query_prefix)]
assert len(db_names)
assert len(query_names)
elif db_list and query_list:
db_names = [
n for n, _ in parse_image_lists_with_intrinsics(db_list)]
query_names = [
n for n, _ in parse_image_lists_with_intrinsics(query_list)]
else:
raise ValueError('Provide either prefixes of DB and query names, '
'or paths to lists of DB and query images.')
device = 'cuda' if torch.cuda.is_available() else 'cpu'
def tensor_from_names(names):
desc = [hfile[i]['global_descriptor'].__array__() for i in names]
desc = torch.from_numpy(np.stack(desc, 0)).to(device).float()
return desc
db_desc = tensor_from_names(db_names)
query_desc = tensor_from_names(query_names)
sim = torch.einsum('id,jd->ij', query_desc, db_desc)
topk = torch.topk(sim, num_matched, dim=1).indices.cpu().numpy()
pairs = []
for query, indices in zip(query_names, topk):
for i in indices:
pair = (query, db_names[i])
pairs.append(pair)
logging.info(f'Found {len(pairs)} pairs.')
with open(output, 'w') as f:
f.write('\n'.join(' '.join([i, j]) for i, j in pairs))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--descriptors', type=Path, required=True)
parser.add_argument('--output', type=Path, required=True)
parser.add_argument('--num_matched', type=int, required=True)
parser.add_argument('--db_prefix', type=str)
parser.add_argument('--query_prefix', type=str)
parser.add_argument('--db_list', type=Path)
parser.add_argument('--query_list', type=Path)
args = parser.parse_args()
main(**args.__dict__)
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
site-packages/pskf/tools/run/pythonmodule.py
|
"""
pythonmodule
============
Provides utility functions for working with SHEMAT-Suite output in Python.
"""
import os
import shutil
###############################################################################
# Directories #
###############################################################################
python_dir = os.environ['HOME']+'/pyshemkf'
python_scripts_dir = python_dir+'/scripts'
python_output_dir = python_dir+'/output'
###############################################################################
# Output #
###############################################################################
def py_output_dir(tag, ending):
"""
Generate Python output directory according to tag ending.
Parameters
----------
tag : string
Subdirectory name in ~/pyshemkf/output
ending : string
File ending of output.
Examples: npy, png, jpg, eps, pdf
Returns
----------
py_output_dir : string
Designated output directory.
"""
py_output_dir = (python_output_dir + "/"
+ tag + "/"
+ ending)
return py_output_dir
def py_output_filename(tag, filename, spec, ending):
"""
Generate Python output filename (with specifier)
according to filename (without ending), tag ending, spec.
Parameters
----------
tag : string
Subdirectory name in ~/pyshemkf/output
filename : string
Filename body, without ending.
spec : string
Output identifier that will be added to the filename.
ending : string
File ending of output.
Examples: npy, png, jpg, eps, pdf
Returns
----------
py_output_filename : string
Absolute filename for output file.
Notes
----------
spec is added to filename to form the body filename body.
The format-convention for spec is of the following form
(see pskf.tools.plot.spec.specl()):
'model_2018_01_01_b'
In principle any string can be used as spec.
"""
py_output_filename = (py_output_dir(tag, ending) + "/"
+ filename + "_" + spec + "."
+ ending)
return py_output_filename
def py_simple_output_filename(filename, tag, ending):
"""
Generate Python simple output filename (without specifier)
according to filename (without ending), tag, ending.
Parameters
----------
filename : string
Filename body, without ending.
tag : string
Subdirectory name in ~/pyshemkf/output
ending : string
File ending of output.
Examples: npy, png, jpg, eps, pdf
Returns
----------
py_simple_output_filename : string
Absolute filename for output file.
Notes
----------
filename is used as body of the output-filename, nothing is added.
"""
py_simple_output_filename = (py_output_dir(tag, ending) + "/"
+ filename + "."
+ ending)
return py_simple_output_filename
###############################################################################
# Script Backup #
###############################################################################
def py_backup(python_sub_dir, tag, filename, ending, spec):
"""
Copy a python script to backup directory and add specifier
Parameters
----------
python_sub_dir : string
Absolute subdirectory of the script, mostly
~/PythonExecDir and ~/pyshemkf/scripts
tag : string
Subdirectory name in ~/PythonExecDir or
~/pyshemkf/scripts
filename : string
Filename body, without ending.
ending : string
File ending of output.
Examples: npy, png, jpg, eps, pdf
spec : string
Output identifier that will be added to the filename.
Notes
----------
Returns nothing, but copies the file to a possibly generated backup
directory.
spec is added to filename to form the body filename body.
The format-convention for spec is of the following form
(see pskf.tools.plot.spec.specl()):
'model_2018_01_01_b'
In principle any string can be used as spec.
"""
# Script Name
py_file_name = (python_sub_dir + "/"
+ tag + "/"
+ filename + "."
+ ending)
# Possibly create backup directory
if not os.path.exists(python_sub_dir+"/"+tag+"/backup"):
os.mkdir(python_sub_dir+"/"+tag+"/backup")
# Backup Script Name
py_backup_file_name = (python_sub_dir + "/"
+ tag + "/"
+ "backup" + "/"
+ filename + "_"
+ spec + "."
+ ending)
# Exception if file already exists
if os.path.isfile(py_backup_file_name):
os.remove(py_backup_file_name)
print('Removed old file: '+py_backup_file_name)
shutil.copyfile(py_file_name, py_backup_file_name)
print('Backup as '+py_backup_file_name)
|
[] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
python
| 1 | 0 | |
commands/tools.go
|
package commands
import (
"os"
"path/filepath"
"strings"
"github.com/toolkits/file"
"github.com/fatih/color"
)
type Tool struct {
Name string `json:"name"`
Alias string `json:"alias"`
Install string `json:"install"`
Summary string `json:"summary"`
Requirements []string `json:"requirements"`
Platform []string `json:"platform"`
requires []*Tool
}
var tools = []*Tool{
{
Name: "wire",
Alias: "wire",
Install: "go install github.com/google/wire/cmd/wire@latest",
Summary: "",
Requirements: []string{},
Platform: []string{"darwin", "linux", "windows"},
},
{
Name: "swag",
Alias: "swag",
Install: "go install github.com/swaggo/swag/cmd/[email protected]",
Summary: "",
Requirements: []string{},
Platform: []string{"darwin", "linux", "windows"},
},
}
func installTools() (err error) {
for _, t := range tools {
if err = t.install(); err != nil {
return
}
}
return
}
func findTool(name string) *Tool {
for i, tool := range tools {
if tool.Name == name {
return tools[i]
}
}
return nil
}
func (t *Tool) check() (ok bool) {
gopath := os.Getenv("GOPATH")
if gopath == "" {
gopath = filepath.Join(os.Getenv("HOME"), "go")
}
if !file.IsExist(filepath.Join(gopath, "bin", t.Name)) {
return false
}
return true
}
func (t *Tool) install() (err error) {
if t == nil {
return
}
if t.check() {
return
}
for _, r := range t.Requirements {
if err = findTool(r).install(); err != nil {
return
}
}
cmds := strings.Split(t.Install, " ")
if len(cmds) > 0 {
err = execCmd(cmds[0], cmds[1:]...)
if err != nil {
color.Red("%s: install failed!", t.Name)
} else {
color.Green("%s: install success!", t.Name)
}
}
return
}
|
[
"\"GOPATH\"",
"\"HOME\""
] |
[] |
[
"GOPATH",
"HOME"
] |
[]
|
["GOPATH", "HOME"]
|
go
| 2 | 0 | |
certs/acme_client.go
|
package certs
import (
"os"
"github.com/xenolf/lego/lego"
)
// NewAcmeClient returns a new AcmeClient
func NewAcmeClient(u *User) (*lego.Client, error) {
var (
c *lego.Config
cl *lego.Client
)
c = lego.NewConfig(u)
c.CADirURL = acmeURL()
cl, err := lego.NewClient(c)
if err != nil {
return nil, err
}
return cl, nil
}
func acmeURL() string {
if u := os.Getenv("CONCOURSE_UP_ACME_URL"); u != "" {
return u
}
return lego.LEDirectoryProduction
}
|
[
"\"CONCOURSE_UP_ACME_URL\""
] |
[] |
[
"CONCOURSE_UP_ACME_URL"
] |
[]
|
["CONCOURSE_UP_ACME_URL"]
|
go
| 1 | 0 | |
tests/test_utils/system_tests_class.py
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
from contextlib import ContextDecorator
from shutil import move
from tempfile import mkdtemp
from unittest import SkipTest, TestCase
from airflow import AirflowException, models
from airflow.configuration import AIRFLOW_HOME, AirflowConfigParser, get_airflow_config
from airflow.utils import db
from airflow.utils.log.logging_mixin import LoggingMixin
AIRFLOW_MAIN_FOLDER = os.path.realpath(
os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir, os.pardir)
)
DEFAULT_DAG_FOLDER = os.path.join(AIRFLOW_MAIN_FOLDER, "airflow", "example_dags")
SKIP_SYSTEM_TEST_WARNING = """Skipping system test.
To allow system test set ENABLE_SYSTEM_TESTS=true.
"""
def resolve_dags_folder() -> str:
"""
Returns DAG folder specified in current Airflow config.
"""
config_file = get_airflow_config(AIRFLOW_HOME)
conf = AirflowConfigParser()
conf.read(config_file)
try:
dags = conf.get("core", "dags_folder")
except AirflowException:
dags = os.path.join(AIRFLOW_HOME, 'dags')
return dags
class empty_dags_directory( # pylint: disable=invalid-name
ContextDecorator, LoggingMixin
):
"""
Context manager that temporally removes DAGs from provided directory.
"""
def __init__(self, dag_directory: str) -> None:
super().__init__()
self.dag_directory = dag_directory
self.temp_dir = mkdtemp()
def __enter__(self) -> str:
self._store_dags_to_temporary_directory(self.dag_directory, self.temp_dir)
return self.temp_dir
def __exit__(self, *args, **kwargs) -> None:
self._restore_dags_from_temporary_directory(self.dag_directory, self.temp_dir)
def _store_dags_to_temporary_directory(
self, dag_folder: str, temp_dir: str
) -> None:
self.log.info(
"Storing DAGS from %s to temporary directory %s", dag_folder, temp_dir
)
try:
os.mkdir(dag_folder)
except OSError:
pass
for file in os.listdir(dag_folder):
move(os.path.join(dag_folder, file), os.path.join(temp_dir, file))
def _restore_dags_from_temporary_directory(
self, dag_folder: str, temp_dir: str
) -> None:
self.log.info(
"Restoring DAGS to %s from temporary directory %s", dag_folder, temp_dir
)
for file in os.listdir(temp_dir):
move(os.path.join(temp_dir, file), os.path.join(dag_folder, file))
class SystemTest(TestCase, LoggingMixin):
def run(self, result=None):
if os.environ.get('ENABLE_SYSTEM_TESTS') != 'true':
raise SkipTest(SKIP_SYSTEM_TEST_WARNING)
return super().run(result)
def setUp(self) -> None:
"""
We want to avoid random errors while database got reset - those
Are apparently triggered by parser trying to parse DAGs while
The tables are dropped. We move the dags temporarily out of the dags folder
and move them back after reset
"""
dag_folder = resolve_dags_folder()
with empty_dags_directory(dag_folder):
db.resetdb()
super().setUp()
def run_dag(self, dag_id: str, dag_folder: str = DEFAULT_DAG_FOLDER) -> None:
"""
Runs example dag by it's ID.
:param dag_id: id of a DAG to be run
:type dag_id: str
:param dag_folder: directory where to look for the specific DAG. Relative to AIRFLOW_HOME.
:type dag_folder: str
"""
self.log.info("Looking for DAG: %s in %s", dag_id, dag_folder)
dag_bag = models.DagBag(dag_folder=dag_folder, include_examples=False)
dag = dag_bag.get_dag(dag_id)
if dag is None:
raise AirflowException(
"The Dag {dag_id} could not be found. It's either an import problem,"
"wrong dag_id or DAG is not in provided dag_folder."
"The content of the {dag_folder} folder is {content}".format(
dag_id=dag_id,
dag_folder=dag_folder,
content=os.listdir(dag_folder),
)
)
self.log.info("Attempting to run DAG: %s", dag_id)
dag.clear(reset_dag_runs=True)
dag.run(ignore_first_depends_on_past=True, verbose=True)
|
[] |
[] |
[
"ENABLE_SYSTEM_TESTS"
] |
[]
|
["ENABLE_SYSTEM_TESTS"]
|
python
| 1 | 0 | |
plugin/aws/root.go
|
// Package aws presents a filesystem hierarchy for AWS resources.
//
// It uses the AWS_SHARED_CREDENTIALS_FILE environment variable or
// $HOME/.aws/credentials to configure AWS access.
package aws
import (
"context"
"fmt"
"os"
"path/filepath"
"strings"
"time"
"github.com/puppetlabs/wash/activity"
"github.com/puppetlabs/wash/plugin"
"gopkg.in/go-ini/ini.v1"
)
// Root of the AWS plugin
type Root struct {
plugin.EntryBase
profs []string
}
func awsCredentialsFile() (string, error) {
if filename := os.Getenv("AWS_SHARED_CREDENTIALS_FILE"); len(filename) != 0 {
return filename, nil
}
homedir, err := os.UserHomeDir()
if err != nil {
return "", fmt.Errorf("could not determine the location of the AWS credentials file: %v", err)
}
return filepath.Join(homedir, ".aws", "credentials"), nil
}
func awsConfigFile() (string, error) {
if filename := os.Getenv("AWS_CONFIG_FILE"); len(filename) != 0 {
return filename, nil
}
homedir, err := os.UserHomeDir()
if err != nil {
return "", fmt.Errorf("could not determine the location of the AWS config file: %v", err)
}
return filepath.Join(homedir, ".aws", "config"), nil
}
func exists(path string) error {
if _, err := os.Stat(path); err != nil {
if os.IsNotExist(err) {
return fmt.Errorf("could not load any profiles: the %v file does not exist", path)
}
return err
}
return nil
}
// Init for root
func (r *Root) Init(cfg map[string]interface{}) error {
r.EntryBase = plugin.NewEntry("aws")
r.SetTTLOf(plugin.ListOp, 1*time.Minute)
if profsI, ok := cfg["profiles"]; ok {
profs, ok := profsI.([]interface{})
if !ok {
return fmt.Errorf("aws.profiles config must be an array of strings, not %s", profs)
}
r.profs = make([]string, len(profs))
for i, elem := range profs {
prof, ok := elem.(string)
if !ok {
return fmt.Errorf("aws.profiles config must be an array of strings, not %s", profs)
}
r.profs[i] = prof
}
}
// Force authorizing profiles on startup
_, err := r.List(context.Background())
return err
}
// ChildSchemas returns the root's child schema
func (r *Root) ChildSchemas() []*plugin.EntrySchema {
return []*plugin.EntrySchema{
(&profile{}).Schema(),
}
}
// Schema returns the root's schema
func (r *Root) Schema() *plugin.EntrySchema {
return plugin.NewEntrySchema(r, "aws").IsSingleton()
}
// List the available AWS profiles
func (r *Root) List(ctx context.Context) ([]plugin.Entry, error) {
awsCredentials, err := awsCredentialsFile()
if err != nil {
return nil, err
}
if err := exists(awsCredentials); err != nil {
return nil, err
}
awsConfig, err := awsConfigFile()
if err != nil {
return nil, err
}
if err := exists(awsConfig); err != nil {
return nil, err
}
activity.Record(ctx, "Loading profiles from %v and %v", awsConfig, awsCredentials)
cred, err := ini.Load(awsCredentials)
if err != nil {
return nil, fmt.Errorf("failed to read %v: %v", awsCredentials, err)
}
config, err := ini.Load(awsConfig)
if err != nil {
return nil, fmt.Errorf("failed to read %v: %v", awsConfig, err)
}
names := make(map[string]struct{})
for _, section := range cred.Sections() {
names[section.Name()] = struct{}{}
}
for _, section := range config.Sections() {
// https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-profiles.html
// Named profiles in config begin with 'profile '. Trim that so config and credentials
// entries match up.
names[strings.TrimPrefix(section.Name(), "profile ")] = struct{}{}
}
profs := make(map[string]struct{})
for _, p := range r.profs {
profs[p] = struct{}{}
}
var profiles []plugin.Entry
for name := range names {
if name == "DEFAULT" {
continue
}
if _, ok := profs[name]; len(profs) > 0 && !ok {
continue
}
profile, err := newProfile(ctx, name)
if err != nil {
activity.Record(ctx, err.Error())
continue
}
profiles = append(profiles, profile)
}
return profiles, nil
}
|
[
"\"AWS_SHARED_CREDENTIALS_FILE\"",
"\"AWS_CONFIG_FILE\""
] |
[] |
[
"AWS_CONFIG_FILE",
"AWS_SHARED_CREDENTIALS_FILE"
] |
[]
|
["AWS_CONFIG_FILE", "AWS_SHARED_CREDENTIALS_FILE"]
|
go
| 2 | 0 | |
cmd/istioctl/mixer.go
|
// Copyright 2017 Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"os"
"github.com/ghodss/yaml"
rpc "github.com/googleapis/googleapis/google/rpc"
"github.com/spf13/cobra"
"k8s.io/client-go/rest"
"istio.io/pilot/adapter/config/crd"
)
// TODO This should come from something like istio.io/api instead of
// being hand copied from istio.io/mixer.
type mixerAPIResponse struct {
Data interface{} `json:"data,omitempty"`
Status rpc.Status `json:"status,omitempty"`
}
const (
scopesPath = "api/v1/scopes/"
)
type k8sRESTRequester struct {
namespace string
service string
client *rest.RESTClient
}
// Request sends requests through the Kubernetes apiserver proxy to
// the a Kubernetes service.
// (see https://kubernetes.io/docs/concepts/cluster-administration/access-cluster/#discovering-builtin-services)
func (rr *k8sRESTRequester) Request(method, path string, inBody []byte) (int, []byte, error) {
// Kubernetes apiserver proxy prefix for the specified namespace and service.
absPath := fmt.Sprintf("api/v1/namespaces/%s/services/%s/proxy", rr.namespace, rr.service)
// API server resource path.
absPath += "/" + path
var status int
outBody, err := rr.client.Verb(method).
AbsPath(absPath).
SetHeader("Content-Type", "application/json").
Body(inBody).
Do().
StatusCode(&status).
Raw()
return status, outBody, err
}
var (
mixerFile string
mixerFileContent []byte
mixerAPIServerAddr string // deprecated
istioMixerAPIService string
mixerRESTRequester *k8sRESTRequester
mixerCmd = &cobra.Command{
Use: "mixer",
Short: "Istio Mixer configuration",
Long: `
The Mixer configuration API allows users to configure all facets of the
Mixer.
See https://istio.io/docs/concepts/policy-and-control/mixer-config.html
for a description of Mixer configuration's scope, subject, and rules.
`,
SilenceUsage: true,
PersistentPreRunE: func(c *cobra.Command, args []string) error {
restconfig, err := crd.CreateRESTConfig(kubeconfig)
if err != nil {
return err
}
client, err := rest.RESTClientFor(restconfig)
if err != nil {
return err
}
mixerRESTRequester = &k8sRESTRequester{
client: client,
namespace: namespace,
service: istioMixerAPIService,
}
if c.Name() == "create" {
if mixerFile == "" {
return errors.New(c.UsageString())
}
data, err := ioutil.ReadFile(mixerFile)
if err != nil {
return fmt.Errorf("failed opening %s: %v", mixerFile, err)
}
mixerFileContent = data
}
return nil
},
}
mixerRuleCmd = &cobra.Command{
Use: "rule",
Short: "Istio Mixer Rule configuration",
Long: `
Create and list Mixer rules in the configuration server.
`,
SilenceUsage: true,
}
mixerRuleCreateCmd = &cobra.Command{
Use: "create <scope> <subject>",
Short: "Create Istio Mixer rules",
Example: `
# Create a new Mixer rule for the given scope and subject.
istioctl mixer rule create global myservice.ns.svc.cluster.local -f mixer-rule.yml
`,
RunE: func(c *cobra.Command, args []string) error {
if len(args) != 2 {
return errors.New(c.UsageString())
}
return mixerRuleCreate(args[0], args[1], mixerFileContent)
},
}
mixerRuleGetCmd = &cobra.Command{
Use: "get <scope> <subject>",
Short: "Get Istio Mixer rules",
Long: `
Get Mixer rules for a given scope and subject.
`,
Example: `
# Get the Mixer rule with scope='global' and subject='myservice.ns.svc.cluster.local'
istioctl mixer rule get global myservice.ns.svc.cluster.local
`,
RunE: func(c *cobra.Command, args []string) error {
if len(args) != 2 {
return errors.New(c.UsageString())
}
out, err := mixerRuleGet(args[0], args[1])
if err != nil {
return err
}
fmt.Println(out)
return nil
},
}
mixerRuleDeleteCmd = &cobra.Command{
Use: "delete <scope> <subject>",
Short: "Delete Istio Mixer rules",
Long: `
Delete Mixer rules for a given scope and subject.
`,
Example: `
# Delete Mixer rules with scope='global' and subject='myservice.ns.svc.cluster.local'
istioctl mixer rule delete global myservice.ns.svc.cluster.local
`,
RunE: func(c *cobra.Command, args []string) error {
if len(args) != 2 {
return errors.New(c.UsageString())
}
return mixerRuleDelete(args[0], args[1])
},
}
mixerAdapterCmd = &cobra.Command{
Use: "adapter",
Short: "Istio Mixer Adapter configuration",
Long: "Create and list Mixer adapters in the configuration server.",
SilenceUsage: true,
}
mixerAdapterCreateCmd = &cobra.Command{
Use: "create <scope>",
Short: "Create Istio Mixer adapters",
Example: `
# Create new Mixer adapter configs for the given scope.
istioctl mixer adapter create global -f adapters.yml
`,
RunE: mixerAdapterOrDescriptorCreateRunE,
}
mixerAdapterGetCmd = &cobra.Command{
Use: "get <scope>",
Short: "Get Istio Mixer adapters",
Example: `
# Get the Mixer adapter configs for the given scope.
istioctl mixer adapter get global
`,
RunE: mixerAdapterOrDescriptorGetRunE,
}
mixerDescriptorCmd = &cobra.Command{
Use: "descriptor",
Short: "Istio Mixer Descriptor configuration",
Long: "Create and list Mixer descriptors in the configuration server.",
SilenceUsage: true,
}
mixerDescriptorCreateCmd = &cobra.Command{
Use: "create <scope>",
Short: "Create Istio Mixer descriptors",
Example: `
# Create new Mixer descriptor configs for the given scope.
istioctl mixer descriptor create global -f adapters.yml
`,
RunE: mixerAdapterOrDescriptorCreateRunE,
}
mixerDescriptorGetCmd = &cobra.Command{
Use: "get <scope>",
Short: "Get Istio Mixer descriptors",
Example: `
# Get the Mixer descriptor configs for the given scope.
istioctl mixer descriptor get global
`,
RunE: mixerAdapterOrDescriptorGetRunE,
}
)
func mixerGet(path string) (string, error) {
status, body, err := mixerRESTRequester.Request(http.MethodGet, path, nil)
if err != nil {
return "", err
}
if status != http.StatusOK {
return "", errors.New(http.StatusText(status))
}
var response mixerAPIResponse
if err = json.Unmarshal(body, &response); err != nil {
return "", fmt.Errorf("failed processing response: %v", err)
}
data, err := yaml.Marshal(response.Data)
if err != nil {
return "", fmt.Errorf("failed formatting response: %v", err)
}
return string(data), nil
}
func mixerRequest(method, path string, reqBody []byte) error {
status, respBody, err := mixerRESTRequester.Request(method, path, reqBody)
// If we got output, let's look at it, even if we got an error. The output might include the reason for the error.
if respBody != nil {
var response mixerAPIResponse
message := "unknown"
if errJSON := json.Unmarshal(respBody, &response); errJSON == nil {
message = response.Status.Message
}
if status != http.StatusOK {
return fmt.Errorf("failed to %s %s with status %v: %s", method, path, status, message)
}
fmt.Printf("%s\n", message)
}
return err
}
func mixerRulePath(scope, subject string) string {
return scopesPath + fmt.Sprintf("%s/subjects/%s/rules", url.PathEscape(scope), url.PathEscape(subject))
}
func mixerRuleCreate(scope, subject string, rule []byte) error {
return mixerRequest(http.MethodPut, mixerRulePath(scope, subject), rule)
}
func mixerRuleGet(scope, subject string) (string, error) {
return mixerGet(mixerRulePath(scope, subject))
}
func mixerRuleDelete(scope, subject string) error {
return mixerRequest(http.MethodDelete, mixerRulePath(scope, subject), nil)
}
func mixerAdapterOrDescriptorPath(scope, name string) string {
return scopesPath + fmt.Sprintf("%s/%s", url.PathEscape(scope), url.PathEscape(name))
}
func mixerAdapterOrDescriptorCreate(scope, name string, config []byte) error {
path := mixerAdapterOrDescriptorPath(scope, name)
return mixerRequest(http.MethodPut, path, config)
}
func mixerAdapterOrDescriptorGet(scope, name string) (string, error) {
path := mixerAdapterOrDescriptorPath(scope, name)
return mixerGet(path)
}
func mixerAdapterOrDescriptorCreateRunE(c *cobra.Command, args []string) error {
if len(args) != 1 {
return errors.New(c.UsageString())
}
return mixerAdapterOrDescriptorCreate(args[0], c.Parent().Name()+"s", mixerFileContent)
}
func mixerAdapterOrDescriptorGetRunE(c *cobra.Command, args []string) error {
if len(args) != 1 {
return errors.New(c.UsageString())
}
out, err := mixerAdapterOrDescriptorGet(args[0], c.Parent().Name()+"s")
if err != nil {
return err
}
fmt.Println(out)
return nil
}
func init() {
mixerRuleCreateCmd.PersistentFlags().StringVarP(&mixerFile, "file", "f", "",
"Input file with contents of the Mixer rule")
mixerAdapterCreateCmd.PersistentFlags().StringVarP(&mixerFile, "file", "f", "",
"Input file with contents of the adapters config")
mixerDescriptorCmd.PersistentFlags().StringVarP(&mixerFile, "file", "f", "",
"Input file with contents of the descriptors config")
mixerCmd.PersistentFlags().StringVar(&istioMixerAPIService,
"mixerAPIService", "istio-mixer:9094",
"Name of istio-mixer service. When --kube=false this sets the address of the mixer service")
// TODO remove this flag once istio/istio integration tests are
// updated to use mixer service
mixerCmd.PersistentFlags().StringVar(&mixerAPIServerAddr, "mixer", os.Getenv("ISTIO_MIXER_API_SERVER"),
"(deprecated) Address of the Mixer configuration server as <host>:<port>")
mixerRuleCmd.AddCommand(mixerRuleCreateCmd)
mixerRuleCmd.AddCommand(mixerRuleGetCmd)
mixerRuleCmd.AddCommand(mixerRuleDeleteCmd)
mixerCmd.AddCommand(mixerRuleCmd)
mixerAdapterCmd.AddCommand(mixerAdapterCreateCmd)
mixerAdapterCmd.AddCommand(mixerAdapterGetCmd)
mixerCmd.AddCommand(mixerAdapterCmd)
mixerDescriptorCmd.AddCommand(mixerDescriptorCreateCmd)
mixerDescriptorCmd.AddCommand(mixerDescriptorGetCmd)
mixerCmd.AddCommand(mixerDescriptorCmd)
rootCmd.AddCommand(mixerCmd)
}
|
[
"\"ISTIO_MIXER_API_SERVER\""
] |
[] |
[
"ISTIO_MIXER_API_SERVER"
] |
[]
|
["ISTIO_MIXER_API_SERVER"]
|
go
| 1 | 0 | |
pkg/asset/installconfig/aws/aws.go
|
// Package aws collects AWS-specific configuration.
package aws
import (
"fmt"
"os"
"path/filepath"
"sort"
"strings"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/defaults"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/openshift/installer/pkg/types/aws"
"github.com/openshift/installer/pkg/types/aws/validation"
"github.com/openshift/installer/pkg/version"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
survey "gopkg.in/AlecAivazis/survey.v1"
ini "gopkg.in/ini.v1"
)
// Platform collects AWS-specific configuration.
func Platform() (*aws.Platform, error) {
longRegions := make([]string, 0, len(validation.Regions))
shortRegions := make([]string, 0, len(validation.Regions))
for id, location := range validation.Regions {
longRegions = append(longRegions, fmt.Sprintf("%s (%s)", id, location))
shortRegions = append(shortRegions, id)
}
regionTransform := survey.TransformString(func(s string) string {
return strings.SplitN(s, " ", 2)[0]
})
defaultRegion := "us-east-1"
_, ok := validation.Regions[defaultRegion]
if !ok {
panic(fmt.Sprintf("installer bug: invalid default AWS region %q", defaultRegion))
}
ssn, err := GetSession()
if err != nil {
return nil, err
}
defaultRegionPointer := ssn.Config.Region
if defaultRegionPointer != nil && *defaultRegionPointer != "" {
_, ok := validation.Regions[*defaultRegionPointer]
if ok {
defaultRegion = *defaultRegionPointer
} else {
logrus.Warnf("Unrecognized AWS region %q, defaulting to %s", *defaultRegionPointer, defaultRegion)
}
}
sort.Strings(longRegions)
sort.Strings(shortRegions)
var region string
err = survey.Ask([]*survey.Question{
{
Prompt: &survey.Select{
Message: "Region",
Help: "The AWS region to be used for installation.",
Default: fmt.Sprintf("%s (%s)", defaultRegion, validation.Regions[defaultRegion]),
Options: longRegions,
},
Validate: survey.ComposeValidators(survey.Required, func(ans interface{}) error {
choice := regionTransform(ans).(string)
i := sort.SearchStrings(shortRegions, choice)
if i == len(shortRegions) || shortRegions[i] != choice {
return errors.Errorf("invalid region %q", choice)
}
return nil
}),
Transform: regionTransform,
},
}, ®ion)
if err != nil {
return nil, err
}
return &aws.Platform{
Region: region,
}, nil
}
// GetSession returns an AWS session by checking credentials
// and, if no creds are found, asks for them and stores them on disk in a config file
func GetSession() (*session.Session, error) {
ssn := session.Must(session.NewSessionWithOptions(session.Options{
SharedConfigState: session.SharedConfigEnable,
}))
ssn.Config.Credentials = credentials.NewChainCredentials([]credentials.Provider{
&credentials.EnvProvider{},
&credentials.SharedCredentialsProvider{},
})
_, err := ssn.Config.Credentials.Get()
if err == credentials.ErrNoValidProvidersFoundInChain {
err = getCredentials()
if err != nil {
return nil, err
}
}
ssn.Handlers.Build.PushBackNamed(request.NamedHandler{
Name: "openshiftInstaller.OpenshiftInstallerUserAgentHandler",
Fn: request.MakeAddToUserAgentHandler("OpenShift/4.x Installer", version.Raw),
})
return ssn, nil
}
func getCredentials() error {
var keyID string
err := survey.Ask([]*survey.Question{
{
Prompt: &survey.Input{
Message: "AWS Access Key ID",
Help: "The AWS access key ID to use for installation (this is not your username).\nhttps://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html",
},
},
}, &keyID)
if err != nil {
return err
}
var secretKey string
err = survey.Ask([]*survey.Question{
{
Prompt: &survey.Password{
Message: "AWS Secret Access Key",
Help: "The AWS secret access key corresponding to your access key ID (this is not your password).",
},
},
}, &secretKey)
if err != nil {
return err
}
path := defaults.SharedCredentialsFilename()
logrus.Infof("Writing AWS credentials to %q (https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-files.html)", path)
err = os.MkdirAll(filepath.Dir(path), 0700)
if err != nil {
return err
}
creds, err := ini.Load(path)
if err != nil {
if os.IsNotExist(err) {
creds = ini.Empty()
creds.Section("").Comment = "https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-files.html"
} else {
return err
}
}
profile := os.Getenv("AWS_PROFILE")
if profile == "" {
profile = "default"
}
creds.Section(profile).Key("aws_access_key_id").SetValue(keyID)
creds.Section(profile).Key("aws_secret_access_key").SetValue(secretKey)
tempPath := path + ".tmp"
file, err := os.OpenFile(tempPath, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0600)
if err != nil {
return err
}
defer file.Close()
_, err = creds.WriteTo(file)
if err != nil {
err2 := os.Remove(tempPath)
if err2 != nil {
logrus.Error(errors.Wrap(err2, "failed to remove partially-written credentials file"))
}
return err
}
return os.Rename(tempPath, path)
}
|
[
"\"AWS_PROFILE\""
] |
[] |
[
"AWS_PROFILE"
] |
[]
|
["AWS_PROFILE"]
|
go
| 1 | 0 | |
dependency/JThread-1.3.3/sphinxdoc/source/conf.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# JThread documentation build configuration file, created by
# sphinx-quickstart on Fri May 13 17:36:32 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.mathjax',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'test'
# General information about the project.
project = 'JThread'
copyright = '2004-now'
author = 'Jori Liesenborgs'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
def checkDevel(version):
if os.getenv("READTHEDOCS") == 'True':
if os.getenv("READTHEDOCS_VERSION") == 'latest':
version += " (development version)"
return version
def getVersion():
curDir = os.path.dirname(os.path.realpath(__file__))
cmakePath = os.path.join(curDir,"../../CMakeLists.txt")
confPath = os.path.join(curDir,"../../configure")
if os.path.exists(cmakePath):
for l in open(cmakePath):
pref = "set(VERSION "
if l.startswith(pref):
l = l[len(pref):].strip()
idx = l.find(")")
version = l[:idx].strip()
return checkDevel(version)
elif os.path.exists(confPath):
for l in open(confPath):
l = l.strip()
pref = "VERSION="
if l.startswith(pref):
version = l[len(pref):].strip()
return checkDevel(version)
# Try to use README.TXT
readmePath = os.path.join(curDir, "../../README.TXT")
for l in open(readmePath):
l = l.strip()
pref = "Notes about JThread (v"
if l.startswith(pref):
l = l[len(pref):].strip()
idx = l.find(")")
version = l[:idx].strip()
return checkDevel(version)
raise Exception("No version number found")
version = getVersion()
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'alabaster'
html_theme = "classic"
html_theme_path = [ "." ]
#html_theme_options = {
# "rightsidebar": "true",
# "relbarbgcolor": "black"
#}
#import sphinx_rtd_theme
#html_theme = "sphinx_rtd_theme"
#html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'jthreaddoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'jthreaddoc.tex', 'JThread Documentation',
'Jori Liesenborgs', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'jthreaddoc', 'JThread Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'jthreaddoc', 'JThread Documentation',
author, 'jthreaddoc', 'Thread Library',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
def checkMarkdownSetting():
for l in open("Doxyfile"):
if "MARKDOWN_SUPPORT" in l: # Markdown was configured
return
# In older doxygen, there was no markdown so we'll set the default
# to NO to avoid strange effects
with open("Doxyfile", "at") as f:
f.write("\nMARKDOWN_SUPPORT = NO\n")
import subprocess
import os
import shutil
curpath = os.getcwd()
try:
if os.getenv("READTHEDOCS") == 'True':
dstdir = "_build/html"
else:
dstdir = "../build/html"
dstdir = os.path.abspath(dstdir)
os.chdir("../../doc")
subprocess.call("pdflatex manual.tex", shell=True)
subprocess.call("pdflatex manual.tex", shell=True)
subprocess.call("mv -f manual.pdf {}".format(dstdir), shell=True)
with open("{}/index.html".format(dstdir),"wt") as f:
f.write('''
<html>
<head>
<style>
body, html {
margin: 0px;
width: 100%;
height: 100%;
overflow: auto;
}
</style>
</head>
<body>
<embed src="manual.pdf" width="100%" height="100%" type="application/pdf">
</body>
</html>''')
finally:
os.chdir(curpath)
with open("test.rst", "wb") as f:
f.write("Test output\n")
f.write("===========\n\n")
f.write(".. code-block:: none\n\n")
output = subprocess.check_output("which doxygen ; pwd ; set ; ls ; ls _build ; ls _build/html ; ls ../ ; ", shell = True)
for l in output.splitlines():
f.write(" ")
f.write(l)
f.write("\n")
|
[] |
[] |
[
"READTHEDOCS_VERSION",
"READTHEDOCS"
] |
[]
|
["READTHEDOCS_VERSION", "READTHEDOCS"]
|
python
| 2 | 0 | |
integration-cli/docker_cli_build_test.go
|
package main
import (
"archive/tar"
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"reflect"
"regexp"
"runtime"
"strconv"
"strings"
"text/template"
"time"
"github.com/docker/docker/builder/dockerfile/command"
"github.com/docker/docker/integration-cli/checker"
"github.com/docker/docker/integration-cli/cli"
"github.com/docker/docker/integration-cli/cli/build"
"github.com/docker/docker/integration-cli/cli/build/fakecontext"
"github.com/docker/docker/integration-cli/cli/build/fakegit"
"github.com/docker/docker/integration-cli/cli/build/fakestorage"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/stringutils"
"github.com/docker/docker/pkg/testutil"
icmd "github.com/docker/docker/pkg/testutil/cmd"
"github.com/go-check/check"
"github.com/opencontainers/go-digest"
)
func (s *DockerSuite) TestBuildJSONEmptyRun(c *check.C) {
cli.BuildCmd(c, "testbuildjsonemptyrun", build.WithDockerfile(`
FROM busybox
RUN []
`))
}
func (s *DockerSuite) TestBuildShCmdJSONEntrypoint(c *check.C) {
name := "testbuildshcmdjsonentrypoint"
expected := "/bin/sh -c echo test"
if testEnv.DaemonPlatform() == "windows" {
expected = "cmd /S /C echo test"
}
buildImageSuccessfully(c, name, build.WithDockerfile(`
FROM busybox
ENTRYPOINT ["echo"]
CMD echo test
`))
out, _ := dockerCmd(c, "run", "--rm", name)
if strings.TrimSpace(out) != expected {
c.Fatalf("CMD did not contain %q : %q", expected, out)
}
}
func (s *DockerSuite) TestBuildEnvironmentReplacementUser(c *check.C) {
// Windows does not support FROM scratch or the USER command
testRequires(c, DaemonIsLinux)
name := "testbuildenvironmentreplacement"
buildImageSuccessfully(c, name, build.WithDockerfile(`
FROM scratch
ENV user foo
USER ${user}
`))
res := inspectFieldJSON(c, name, "Config.User")
if res != `"foo"` {
c.Fatal("User foo from environment not in Config.User on image")
}
}
func (s *DockerSuite) TestBuildEnvironmentReplacementVolume(c *check.C) {
name := "testbuildenvironmentreplacement"
var volumePath string
if testEnv.DaemonPlatform() == "windows" {
volumePath = "c:/quux"
} else {
volumePath = "/quux"
}
buildImageSuccessfully(c, name, build.WithDockerfile(`
FROM `+minimalBaseImage()+`
ENV volume `+volumePath+`
VOLUME ${volume}
`))
var volumes map[string]interface{}
inspectFieldAndUnmarshall(c, name, "Config.Volumes", &volumes)
if _, ok := volumes[volumePath]; !ok {
c.Fatal("Volume " + volumePath + " from environment not in Config.Volumes on image")
}
}
func (s *DockerSuite) TestBuildEnvironmentReplacementExpose(c *check.C) {
// Windows does not support FROM scratch or the EXPOSE command
testRequires(c, DaemonIsLinux)
name := "testbuildenvironmentreplacement"
buildImageSuccessfully(c, name, build.WithDockerfile(`
FROM scratch
ENV port 80
EXPOSE ${port}
ENV ports " 99 100 "
EXPOSE ${ports}
`))
var exposedPorts map[string]interface{}
inspectFieldAndUnmarshall(c, name, "Config.ExposedPorts", &exposedPorts)
exp := []int{80, 99, 100}
for _, p := range exp {
tmp := fmt.Sprintf("%d/tcp", p)
if _, ok := exposedPorts[tmp]; !ok {
c.Fatalf("Exposed port %d from environment not in Config.ExposedPorts on image", p)
}
}
}
func (s *DockerSuite) TestBuildEnvironmentReplacementWorkdir(c *check.C) {
name := "testbuildenvironmentreplacement"
buildImageSuccessfully(c, name, build.WithDockerfile(`
FROM busybox
ENV MYWORKDIR /work
RUN mkdir ${MYWORKDIR}
WORKDIR ${MYWORKDIR}
`))
res := inspectFieldJSON(c, name, "Config.WorkingDir")
expected := `"/work"`
if testEnv.DaemonPlatform() == "windows" {
expected = `"C:\\work"`
}
if res != expected {
c.Fatalf("Workdir /workdir from environment not in Config.WorkingDir on image: %s", res)
}
}
func (s *DockerSuite) TestBuildEnvironmentReplacementAddCopy(c *check.C) {
name := "testbuildenvironmentreplacement"
buildImageSuccessfully(c, name, build.WithBuildContext(c,
build.WithFile("Dockerfile", `
FROM `+minimalBaseImage()+`
ENV baz foo
ENV quux bar
ENV dot .
ENV fee fff
ENV gee ggg
ADD ${baz} ${dot}
COPY ${quux} ${dot}
ADD ${zzz:-${fee}} ${dot}
COPY ${zzz:-${gee}} ${dot}
`),
build.WithFile("foo", "test1"),
build.WithFile("bar", "test2"),
build.WithFile("fff", "test3"),
build.WithFile("ggg", "test4"),
))
}
func (s *DockerSuite) TestBuildEnvironmentReplacementEnv(c *check.C) {
// ENV expansions work differently in Windows
testRequires(c, DaemonIsLinux)
name := "testbuildenvironmentreplacement"
buildImageSuccessfully(c, name, build.WithDockerfile(`
FROM busybox
ENV foo zzz
ENV bar ${foo}
ENV abc1='$foo'
ENV env1=$foo env2=${foo} env3="$foo" env4="${foo}"
RUN [ "$abc1" = '$foo' ] && (echo "$abc1" | grep -q foo)
ENV abc2="\$foo"
RUN [ "$abc2" = '$foo' ] && (echo "$abc2" | grep -q foo)
ENV abc3 '$foo'
RUN [ "$abc3" = '$foo' ] && (echo "$abc3" | grep -q foo)
ENV abc4 "\$foo"
RUN [ "$abc4" = '$foo' ] && (echo "$abc4" | grep -q foo)
ENV foo2="abc\def"
RUN [ "$foo2" = 'abc\def' ]
ENV foo3="abc\\def"
RUN [ "$foo3" = 'abc\def' ]
ENV foo4='abc\\def'
RUN [ "$foo4" = 'abc\\def' ]
ENV foo5='abc\def'
RUN [ "$foo5" = 'abc\def' ]
`))
envResult := []string{}
inspectFieldAndUnmarshall(c, name, "Config.Env", &envResult)
found := false
envCount := 0
for _, env := range envResult {
parts := strings.SplitN(env, "=", 2)
if parts[0] == "bar" {
found = true
if parts[1] != "zzz" {
c.Fatalf("Could not find replaced var for env `bar`: got %q instead of `zzz`", parts[1])
}
} else if strings.HasPrefix(parts[0], "env") {
envCount++
if parts[1] != "zzz" {
c.Fatalf("%s should be 'zzz' but instead its %q", parts[0], parts[1])
}
} else if strings.HasPrefix(parts[0], "env") {
envCount++
if parts[1] != "foo" {
c.Fatalf("%s should be 'foo' but instead its %q", parts[0], parts[1])
}
}
}
if !found {
c.Fatal("Never found the `bar` env variable")
}
if envCount != 4 {
c.Fatalf("Didn't find all env vars - only saw %d\n%s", envCount, envResult)
}
}
func (s *DockerSuite) TestBuildHandleEscapesInVolume(c *check.C) {
// The volume paths used in this test are invalid on Windows
testRequires(c, DaemonIsLinux)
name := "testbuildhandleescapes"
testCases := []struct {
volumeValue string
expected string
}{
{
volumeValue: "${FOO}",
expected: "bar",
},
{
volumeValue: `\${FOO}`,
expected: "${FOO}",
},
// this test in particular provides *7* backslashes and expects 6 to come back.
// Like above, the first escape is swallowed and the rest are treated as
// literals, this one is just less obvious because of all the character noise.
{
volumeValue: `\\\\\\\${FOO}`,
expected: `\\\${FOO}`,
},
}
for _, tc := range testCases {
buildImageSuccessfully(c, name, build.WithDockerfile(fmt.Sprintf(`
FROM scratch
ENV FOO bar
VOLUME %s
`, tc.volumeValue)))
var result map[string]map[string]struct{}
inspectFieldAndUnmarshall(c, name, "Config.Volumes", &result)
if _, ok := result[tc.expected]; !ok {
c.Fatalf("Could not find volume %s set from env foo in volumes table, got %q", tc.expected, result)
}
// Remove the image for the next iteration
dockerCmd(c, "rmi", name)
}
}
func (s *DockerSuite) TestBuildOnBuildLowercase(c *check.C) {
name := "testbuildonbuildlowercase"
name2 := "testbuildonbuildlowercase2"
buildImageSuccessfully(c, name, build.WithDockerfile(`
FROM busybox
onbuild run echo quux
`))
result := buildImage(name2, build.WithDockerfile(fmt.Sprintf(`
FROM %s
`, name)))
result.Assert(c, icmd.Success)
if !strings.Contains(result.Combined(), "quux") {
c.Fatalf("Did not receive the expected echo text, got %s", result.Combined())
}
if strings.Contains(result.Combined(), "ONBUILD ONBUILD") {
c.Fatalf("Got an ONBUILD ONBUILD error with no error: got %s", result.Combined())
}
}
func (s *DockerSuite) TestBuildEnvEscapes(c *check.C) {
// ENV expansions work differently in Windows
testRequires(c, DaemonIsLinux)
name := "testbuildenvescapes"
buildImageSuccessfully(c, name, build.WithDockerfile(`
FROM busybox
ENV TEST foo
CMD echo \$
`))
out, _ := dockerCmd(c, "run", "-t", name)
if strings.TrimSpace(out) != "$" {
c.Fatalf("Env TEST was not overwritten with bar when foo was supplied to dockerfile: was %q", strings.TrimSpace(out))
}
}
func (s *DockerSuite) TestBuildEnvOverwrite(c *check.C) {
// ENV expansions work differently in Windows
testRequires(c, DaemonIsLinux)
name := "testbuildenvoverwrite"
buildImageSuccessfully(c, name, build.WithDockerfile(`
FROM busybox
ENV TEST foo
CMD echo ${TEST}
`))
out, _ := dockerCmd(c, "run", "-e", "TEST=bar", "-t", name)
if strings.TrimSpace(out) != "bar" {
c.Fatalf("Env TEST was not overwritten with bar when foo was supplied to dockerfile: was %q", strings.TrimSpace(out))
}
}
// FIXME(vdemeester) why we disabled cache here ?
func (s *DockerSuite) TestBuildOnBuildCmdEntrypointJSON(c *check.C) {
name1 := "onbuildcmd"
name2 := "onbuildgenerated"
cli.BuildCmd(c, name1, build.WithDockerfile(`
FROM busybox
ONBUILD CMD ["hello world"]
ONBUILD ENTRYPOINT ["echo"]
ONBUILD RUN ["true"]`))
cli.BuildCmd(c, name2, build.WithDockerfile(fmt.Sprintf(`FROM %s`, name1)))
result := cli.DockerCmd(c, "run", name2)
result.Assert(c, icmd.Expected{Out: "hello world"})
}
// FIXME(vdemeester) why we disabled cache here ?
func (s *DockerSuite) TestBuildOnBuildEntrypointJSON(c *check.C) {
name1 := "onbuildcmd"
name2 := "onbuildgenerated"
buildImageSuccessfully(c, name1, build.WithDockerfile(`
FROM busybox
ONBUILD ENTRYPOINT ["echo"]`))
buildImageSuccessfully(c, name2, build.WithDockerfile(fmt.Sprintf("FROM %s\nCMD [\"hello world\"]\n", name1)))
out, _ := dockerCmd(c, "run", name2)
if !regexp.MustCompile(`(?m)^hello world`).MatchString(out) {
c.Fatal("got malformed output from onbuild", out)
}
}
func (s *DockerSuite) TestBuildCacheAdd(c *check.C) {
testRequires(c, DaemonIsLinux) // Windows doesn't have httpserver image yet
name := "testbuildtwoimageswithadd"
server := fakestorage.New(c, "", fakecontext.WithFiles(map[string]string{
"robots.txt": "hello",
"index.html": "world",
}))
defer server.Close()
cli.BuildCmd(c, name, build.WithDockerfile(fmt.Sprintf(`FROM scratch
ADD %s/robots.txt /`, server.URL())))
result := cli.Docker(cli.Build(name), build.WithDockerfile(fmt.Sprintf(`FROM scratch
ADD %s/index.html /`, server.URL())))
result.Assert(c, icmd.Success)
if strings.Contains(result.Combined(), "Using cache") {
c.Fatal("2nd build used cache on ADD, it shouldn't")
}
}
func (s *DockerSuite) TestBuildLastModified(c *check.C) {
// Temporary fix for #30890. TODO @jhowardmsft figure out what
// has changed in the master busybox image.
testRequires(c, DaemonIsLinux)
name := "testbuildlastmodified"
server := fakestorage.New(c, "", fakecontext.WithFiles(map[string]string{
"file": "hello",
}))
defer server.Close()
var out, out2 string
dFmt := `FROM busybox
ADD %s/file /`
dockerfile := fmt.Sprintf(dFmt, server.URL())
cli.BuildCmd(c, name, build.WithoutCache, build.WithDockerfile(dockerfile))
out = cli.DockerCmd(c, "run", name, "ls", "-le", "/file").Combined()
// Build it again and make sure the mtime of the file didn't change.
// Wait a few seconds to make sure the time changed enough to notice
time.Sleep(2 * time.Second)
cli.BuildCmd(c, name, build.WithoutCache, build.WithDockerfile(dockerfile))
out2 = cli.DockerCmd(c, "run", name, "ls", "-le", "/file").Combined()
if out != out2 {
c.Fatalf("MTime changed:\nOrigin:%s\nNew:%s", out, out2)
}
// Now 'touch' the file and make sure the timestamp DID change this time
// Create a new fakeStorage instead of just using Add() to help windows
server = fakestorage.New(c, "", fakecontext.WithFiles(map[string]string{
"file": "hello",
}))
defer server.Close()
dockerfile = fmt.Sprintf(dFmt, server.URL())
cli.BuildCmd(c, name, build.WithoutCache, build.WithDockerfile(dockerfile))
out2 = cli.DockerCmd(c, "run", name, "ls", "-le", "/file").Combined()
if out == out2 {
c.Fatalf("MTime didn't change:\nOrigin:%s\nNew:%s", out, out2)
}
}
// Regression for https://github.com/docker/docker/pull/27805
// Makes sure that we don't use the cache if the contents of
// a file in a subfolder of the context is modified and we re-build.
func (s *DockerSuite) TestBuildModifyFileInFolder(c *check.C) {
name := "testbuildmodifyfileinfolder"
ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(`FROM busybox
RUN ["mkdir", "/test"]
ADD folder/file /test/changetarget`))
defer ctx.Close()
if err := ctx.Add("folder/file", "first"); err != nil {
c.Fatal(err)
}
buildImageSuccessfully(c, name, build.WithExternalBuildContext(ctx))
id1 := getIDByName(c, name)
if err := ctx.Add("folder/file", "second"); err != nil {
c.Fatal(err)
}
buildImageSuccessfully(c, name, build.WithExternalBuildContext(ctx))
id2 := getIDByName(c, name)
if id1 == id2 {
c.Fatal("cache was used even though file contents in folder was changed")
}
}
func (s *DockerSuite) TestBuildAddSingleFileToRoot(c *check.C) {
testRequires(c, DaemonIsLinux) // Linux specific test
buildImageSuccessfully(c, "testaddimg", build.WithBuildContext(c,
build.WithFile("Dockerfile", fmt.Sprintf(`FROM busybox
RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd
RUN echo 'dockerio:x:1001:' >> /etc/group
RUN touch /exists
RUN chown dockerio.dockerio /exists
ADD test_file /
RUN [ $(ls -l /test_file | awk '{print $3":"$4}') = 'root:root' ]
RUN [ $(ls -l /test_file | awk '{print $1}') = '%s' ]
RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, expectedFileChmod)),
build.WithFile("test_file", "test1")))
}
// Issue #3960: "ADD src ." hangs
func (s *DockerSuite) TestBuildAddSingleFileToWorkdir(c *check.C) {
name := "testaddsinglefiletoworkdir"
ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(
`FROM busybox
ADD test_file .`),
fakecontext.WithFiles(map[string]string{
"test_file": "test1",
}))
defer ctx.Close()
errChan := make(chan error)
go func() {
errChan <- buildImage(name, build.WithExternalBuildContext(ctx)).Error
close(errChan)
}()
select {
case <-time.After(15 * time.Second):
c.Fatal("Build with adding to workdir timed out")
case err := <-errChan:
c.Assert(err, check.IsNil)
}
}
func (s *DockerSuite) TestBuildAddSingleFileToExistDir(c *check.C) {
testRequires(c, DaemonIsLinux) // Linux specific test
cli.BuildCmd(c, "testaddsinglefiletoexistdir", build.WithBuildContext(c,
build.WithFile("Dockerfile", `FROM busybox
RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd
RUN echo 'dockerio:x:1001:' >> /etc/group
RUN mkdir /exists
RUN touch /exists/exists_file
RUN chown -R dockerio.dockerio /exists
ADD test_file /exists/
RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]
RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ]
RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`),
build.WithFile("test_file", "test1")))
}
func (s *DockerSuite) TestBuildCopyAddMultipleFiles(c *check.C) {
testRequires(c, DaemonIsLinux) // Linux specific test
server := fakestorage.New(c, "", fakecontext.WithFiles(map[string]string{
"robots.txt": "hello",
}))
defer server.Close()
cli.BuildCmd(c, "testcopymultiplefilestofile", build.WithBuildContext(c,
build.WithFile("Dockerfile", fmt.Sprintf(`FROM busybox
RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd
RUN echo 'dockerio:x:1001:' >> /etc/group
RUN mkdir /exists
RUN touch /exists/exists_file
RUN chown -R dockerio.dockerio /exists
COPY test_file1 test_file2 /exists/
ADD test_file3 test_file4 %s/robots.txt /exists/
RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]
RUN [ $(ls -l /exists/test_file1 | awk '{print $3":"$4}') = 'root:root' ]
RUN [ $(ls -l /exists/test_file2 | awk '{print $3":"$4}') = 'root:root' ]
RUN [ $(ls -l /exists/test_file3 | awk '{print $3":"$4}') = 'root:root' ]
RUN [ $(ls -l /exists/test_file4 | awk '{print $3":"$4}') = 'root:root' ]
RUN [ $(ls -l /exists/robots.txt | awk '{print $3":"$4}') = 'root:root' ]
RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ]
`, server.URL())),
build.WithFile("test_file1", "test1"),
build.WithFile("test_file2", "test2"),
build.WithFile("test_file3", "test3"),
build.WithFile("test_file3", "test3"),
build.WithFile("test_file4", "test4")))
}
// These tests are mainly for user namespaces to verify that new directories
// are created as the remapped root uid/gid pair
func (s *DockerSuite) TestBuildUsernamespaceValidateRemappedRoot(c *check.C) {
testRequires(c, DaemonIsLinux)
testCases := []string{
"ADD . /new_dir",
"COPY test_dir /new_dir",
"WORKDIR /new_dir",
}
name := "testbuildusernamespacevalidateremappedroot"
for _, tc := range testCases {
cli.BuildCmd(c, name, build.WithBuildContext(c,
build.WithFile("Dockerfile", fmt.Sprintf(`FROM busybox
%s
RUN [ $(ls -l / | grep new_dir | awk '{print $3":"$4}') = 'root:root' ]`, tc)),
build.WithFile("test_dir/test_file", "test file")))
cli.DockerCmd(c, "rmi", name)
}
}
func (s *DockerSuite) TestBuildAddAndCopyFileWithWhitespace(c *check.C) {
testRequires(c, DaemonIsLinux) // Not currently passing on Windows
name := "testaddfilewithwhitespace"
for _, command := range []string{"ADD", "COPY"} {
cli.BuildCmd(c, name, build.WithBuildContext(c,
build.WithFile("Dockerfile", fmt.Sprintf(`FROM busybox
RUN mkdir "/test dir"
RUN mkdir "/test_dir"
%s [ "test file1", "/test_file1" ]
%s [ "test_file2", "/test file2" ]
%s [ "test file3", "/test file3" ]
%s [ "test dir/test_file4", "/test_dir/test_file4" ]
%s [ "test_dir/test_file5", "/test dir/test_file5" ]
%s [ "test dir/test_file6", "/test dir/test_file6" ]
RUN [ $(cat "/test_file1") = 'test1' ]
RUN [ $(cat "/test file2") = 'test2' ]
RUN [ $(cat "/test file3") = 'test3' ]
RUN [ $(cat "/test_dir/test_file4") = 'test4' ]
RUN [ $(cat "/test dir/test_file5") = 'test5' ]
RUN [ $(cat "/test dir/test_file6") = 'test6' ]`, command, command, command, command, command, command)),
build.WithFile("test file1", "test1"),
build.WithFile("test_file2", "test2"),
build.WithFile("test file3", "test3"),
build.WithFile("test dir/test_file4", "test4"),
build.WithFile("test_dir/test_file5", "test5"),
build.WithFile("test dir/test_file6", "test6"),
))
cli.DockerCmd(c, "rmi", name)
}
}
func (s *DockerSuite) TestBuildCopyFileWithWhitespaceOnWindows(c *check.C) {
testRequires(c, DaemonIsWindows)
dockerfile := `FROM ` + testEnv.MinimalBaseImage() + `
RUN mkdir "C:/test dir"
RUN mkdir "C:/test_dir"
COPY [ "test file1", "/test_file1" ]
COPY [ "test_file2", "/test file2" ]
COPY [ "test file3", "/test file3" ]
COPY [ "test dir/test_file4", "/test_dir/test_file4" ]
COPY [ "test_dir/test_file5", "/test dir/test_file5" ]
COPY [ "test dir/test_file6", "/test dir/test_file6" ]
RUN find "test1" "C:/test_file1"
RUN find "test2" "C:/test file2"
RUN find "test3" "C:/test file3"
RUN find "test4" "C:/test_dir/test_file4"
RUN find "test5" "C:/test dir/test_file5"
RUN find "test6" "C:/test dir/test_file6"`
name := "testcopyfilewithwhitespace"
cli.BuildCmd(c, name, build.WithBuildContext(c,
build.WithFile("Dockerfile", dockerfile),
build.WithFile("test file1", "test1"),
build.WithFile("test_file2", "test2"),
build.WithFile("test file3", "test3"),
build.WithFile("test dir/test_file4", "test4"),
build.WithFile("test_dir/test_file5", "test5"),
build.WithFile("test dir/test_file6", "test6"),
))
}
func (s *DockerSuite) TestBuildCopyWildcard(c *check.C) {
name := "testcopywildcard"
server := fakestorage.New(c, "", fakecontext.WithFiles(map[string]string{
"robots.txt": "hello",
"index.html": "world",
}))
defer server.Close()
ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(fmt.Sprintf(`FROM busybox
COPY file*.txt /tmp/
RUN ls /tmp/file1.txt /tmp/file2.txt
RUN [ "mkdir", "/tmp1" ]
COPY dir* /tmp1/
RUN ls /tmp1/dirt /tmp1/nested_file /tmp1/nested_dir/nest_nest_file
RUN [ "mkdir", "/tmp2" ]
ADD dir/*dir %s/robots.txt /tmp2/
RUN ls /tmp2/nest_nest_file /tmp2/robots.txt
`, server.URL())),
fakecontext.WithFiles(map[string]string{
"file1.txt": "test1",
"file2.txt": "test2",
"dir/nested_file": "nested file",
"dir/nested_dir/nest_nest_file": "2 times nested",
"dirt": "dirty",
}))
defer ctx.Close()
cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx))
id1 := getIDByName(c, name)
// Now make sure we use a cache the 2nd time
cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx))
id2 := getIDByName(c, name)
if id1 != id2 {
c.Fatal("didn't use the cache")
}
}
func (s *DockerSuite) TestBuildCopyWildcardInName(c *check.C) {
// Run this only on Linux
// Below is the original comment (that I don't agree with — vdemeester)
// Normally we would do c.Fatal(err) here but given that
// the odds of this failing are so rare, it must be because
// the OS we're running the client on doesn't support * in
// filenames (like windows). So, instead of failing the test
// just let it pass. Then we don't need to explicitly
// say which OSs this works on or not.
testRequires(c, DaemonIsLinux, UnixCli)
buildImageSuccessfully(c, "testcopywildcardinname", build.WithBuildContext(c,
build.WithFile("Dockerfile", `FROM busybox
COPY *.txt /tmp/
RUN [ "$(cat /tmp/\*.txt)" = 'hi there' ]
`),
build.WithFile("*.txt", "hi there"),
))
}
func (s *DockerSuite) TestBuildCopyWildcardCache(c *check.C) {
name := "testcopywildcardcache"
ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(`FROM busybox
COPY file1.txt /tmp/`),
fakecontext.WithFiles(map[string]string{
"file1.txt": "test1",
}))
defer ctx.Close()
buildImageSuccessfully(c, name, build.WithExternalBuildContext(ctx))
id1 := getIDByName(c, name)
// Now make sure we use a cache the 2nd time even with wild cards.
// Use the same context so the file is the same and the checksum will match
ctx.Add("Dockerfile", `FROM busybox
COPY file*.txt /tmp/`)
buildImageSuccessfully(c, name, build.WithExternalBuildContext(ctx))
id2 := getIDByName(c, name)
if id1 != id2 {
c.Fatal("didn't use the cache")
}
}
func (s *DockerSuite) TestBuildAddSingleFileToNonExistingDir(c *check.C) {
testRequires(c, DaemonIsLinux) // Linux specific test
buildImageSuccessfully(c, "testaddsinglefiletononexistingdir", build.WithBuildContext(c,
build.WithFile("Dockerfile", `FROM busybox
RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd
RUN echo 'dockerio:x:1001:' >> /etc/group
RUN touch /exists
RUN chown dockerio.dockerio /exists
ADD test_file /test_dir/
RUN [ $(ls -l / | grep test_dir | awk '{print $3":"$4}') = 'root:root' ]
RUN [ $(ls -l /test_dir/test_file | awk '{print $3":"$4}') = 'root:root' ]
RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`),
build.WithFile("test_file", "test1")))
}
func (s *DockerSuite) TestBuildAddDirContentToRoot(c *check.C) {
testRequires(c, DaemonIsLinux) // Linux specific test
buildImageSuccessfully(c, "testadddircontenttoroot", build.WithBuildContext(c,
build.WithFile("Dockerfile", `FROM busybox
RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd
RUN echo 'dockerio:x:1001:' >> /etc/group
RUN touch /exists
RUN chown dockerio.dockerio exists
ADD test_dir /
RUN [ $(ls -l /test_file | awk '{print $3":"$4}') = 'root:root' ]
RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`),
build.WithFile("test_dir/test_file", "test1")))
}
func (s *DockerSuite) TestBuildAddDirContentToExistingDir(c *check.C) {
testRequires(c, DaemonIsLinux) // Linux specific test
buildImageSuccessfully(c, "testadddircontenttoexistingdir", build.WithBuildContext(c,
build.WithFile("Dockerfile", `FROM busybox
RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd
RUN echo 'dockerio:x:1001:' >> /etc/group
RUN mkdir /exists
RUN touch /exists/exists_file
RUN chown -R dockerio.dockerio /exists
ADD test_dir/ /exists/
RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]
RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ]
RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ]`),
build.WithFile("test_dir/test_file", "test1")))
}
func (s *DockerSuite) TestBuildAddWholeDirToRoot(c *check.C) {
testRequires(c, DaemonIsLinux) // Linux specific test
buildImageSuccessfully(c, "testaddwholedirtoroot", build.WithBuildContext(c,
build.WithFile("Dockerfile", fmt.Sprintf(`FROM busybox
RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd
RUN echo 'dockerio:x:1001:' >> /etc/group
RUN touch /exists
RUN chown dockerio.dockerio exists
ADD test_dir /test_dir
RUN [ $(ls -l / | grep test_dir | awk '{print $3":"$4}') = 'root:root' ]
RUN [ $(ls -l / | grep test_dir | awk '{print $1}') = 'drwxr-xr-x' ]
RUN [ $(ls -l /test_dir/test_file | awk '{print $3":"$4}') = 'root:root' ]
RUN [ $(ls -l /test_dir/test_file | awk '{print $1}') = '%s' ]
RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, expectedFileChmod)),
build.WithFile("test_dir/test_file", "test1")))
}
// Testing #5941 : Having an etc directory in context conflicts with the /etc/mtab
func (s *DockerSuite) TestBuildAddOrCopyEtcToRootShouldNotConflict(c *check.C) {
buildImageSuccessfully(c, "testaddetctoroot", build.WithBuildContext(c,
build.WithFile("Dockerfile", `FROM `+minimalBaseImage()+`
ADD . /`),
build.WithFile("etc/test_file", "test1")))
buildImageSuccessfully(c, "testcopyetctoroot", build.WithBuildContext(c,
build.WithFile("Dockerfile", `FROM `+minimalBaseImage()+`
COPY . /`),
build.WithFile("etc/test_file", "test1")))
}
// Testing #9401 : Losing setuid flag after a ADD
func (s *DockerSuite) TestBuildAddPreservesFilesSpecialBits(c *check.C) {
testRequires(c, DaemonIsLinux) // Linux specific test
buildImageSuccessfully(c, "testaddetctoroot", build.WithBuildContext(c,
build.WithFile("Dockerfile", `FROM busybox
ADD suidbin /usr/bin/suidbin
RUN chmod 4755 /usr/bin/suidbin
RUN [ $(ls -l /usr/bin/suidbin | awk '{print $1}') = '-rwsr-xr-x' ]
ADD ./data/ /
RUN [ $(ls -l /usr/bin/suidbin | awk '{print $1}') = '-rwsr-xr-x' ]`),
build.WithFile("suidbin", "suidbin"),
build.WithFile("/data/usr/test_file", "test1")))
}
func (s *DockerSuite) TestBuildCopySingleFileToRoot(c *check.C) {
testRequires(c, DaemonIsLinux) // Linux specific test
buildImageSuccessfully(c, "testcopysinglefiletoroot", build.WithBuildContext(c,
build.WithFile("Dockerfile", fmt.Sprintf(`FROM busybox
RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd
RUN echo 'dockerio:x:1001:' >> /etc/group
RUN touch /exists
RUN chown dockerio.dockerio /exists
COPY test_file /
RUN [ $(ls -l /test_file | awk '{print $3":"$4}') = 'root:root' ]
RUN [ $(ls -l /test_file | awk '{print $1}') = '%s' ]
RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, expectedFileChmod)),
build.WithFile("test_file", "test1")))
}
// Issue #3960: "ADD src ." hangs - adapted for COPY
func (s *DockerSuite) TestBuildCopySingleFileToWorkdir(c *check.C) {
name := "testcopysinglefiletoworkdir"
ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(`FROM busybox
COPY test_file .`),
fakecontext.WithFiles(map[string]string{
"test_file": "test1",
}))
defer ctx.Close()
errChan := make(chan error)
go func() {
errChan <- buildImage(name, build.WithExternalBuildContext(ctx)).Error
close(errChan)
}()
select {
case <-time.After(15 * time.Second):
c.Fatal("Build with adding to workdir timed out")
case err := <-errChan:
c.Assert(err, check.IsNil)
}
}
func (s *DockerSuite) TestBuildCopySingleFileToExistDir(c *check.C) {
testRequires(c, DaemonIsLinux) // Linux specific test
buildImageSuccessfully(c, "testcopysinglefiletoexistdir", build.WithBuildContext(c,
build.WithFile("Dockerfile", `FROM busybox
RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd
RUN echo 'dockerio:x:1001:' >> /etc/group
RUN mkdir /exists
RUN touch /exists/exists_file
RUN chown -R dockerio.dockerio /exists
COPY test_file /exists/
RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]
RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ]
RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`),
build.WithFile("test_file", "test1")))
}
func (s *DockerSuite) TestBuildCopySingleFileToNonExistDir(c *check.C) {
testRequires(c, DaemonIsLinux) // Linux specific
buildImageSuccessfully(c, "testcopysinglefiletononexistdir", build.WithBuildContext(c,
build.WithFile("Dockerfile", `FROM busybox
RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd
RUN echo 'dockerio:x:1001:' >> /etc/group
RUN touch /exists
RUN chown dockerio.dockerio /exists
COPY test_file /test_dir/
RUN [ $(ls -l / | grep test_dir | awk '{print $3":"$4}') = 'root:root' ]
RUN [ $(ls -l /test_dir/test_file | awk '{print $3":"$4}') = 'root:root' ]
RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`),
build.WithFile("test_file", "test1")))
}
func (s *DockerSuite) TestBuildCopyDirContentToRoot(c *check.C) {
testRequires(c, DaemonIsLinux) // Linux specific test
buildImageSuccessfully(c, "testcopydircontenttoroot", build.WithBuildContext(c,
build.WithFile("Dockerfile", `FROM busybox
RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd
RUN echo 'dockerio:x:1001:' >> /etc/group
RUN touch /exists
RUN chown dockerio.dockerio exists
COPY test_dir /
RUN [ $(ls -l /test_file | awk '{print $3":"$4}') = 'root:root' ]
RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`),
build.WithFile("test_dir/test_file", "test1")))
}
func (s *DockerSuite) TestBuildCopyDirContentToExistDir(c *check.C) {
testRequires(c, DaemonIsLinux) // Linux specific test
buildImageSuccessfully(c, "testcopydircontenttoexistdir", build.WithBuildContext(c,
build.WithFile("Dockerfile", `FROM busybox
RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd
RUN echo 'dockerio:x:1001:' >> /etc/group
RUN mkdir /exists
RUN touch /exists/exists_file
RUN chown -R dockerio.dockerio /exists
COPY test_dir/ /exists/
RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]
RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ]
RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ]`),
build.WithFile("test_dir/test_file", "test1")))
}
func (s *DockerSuite) TestBuildCopyWholeDirToRoot(c *check.C) {
testRequires(c, DaemonIsLinux) // Linux specific test
buildImageSuccessfully(c, "testcopywholedirtoroot", build.WithBuildContext(c,
build.WithFile("Dockerfile", fmt.Sprintf(`FROM busybox
RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd
RUN echo 'dockerio:x:1001:' >> /etc/group
RUN touch /exists
RUN chown dockerio.dockerio exists
COPY test_dir /test_dir
RUN [ $(ls -l / | grep test_dir | awk '{print $3":"$4}') = 'root:root' ]
RUN [ $(ls -l / | grep test_dir | awk '{print $1}') = 'drwxr-xr-x' ]
RUN [ $(ls -l /test_dir/test_file | awk '{print $3":"$4}') = 'root:root' ]
RUN [ $(ls -l /test_dir/test_file | awk '{print $1}') = '%s' ]
RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, expectedFileChmod)),
build.WithFile("test_dir/test_file", "test1")))
}
func (s *DockerSuite) TestBuildAddBadLinks(c *check.C) {
testRequires(c, DaemonIsLinux) // Not currently working on Windows
dockerfile := `
FROM scratch
ADD links.tar /
ADD foo.txt /symlink/
`
targetFile := "foo.txt"
var (
name = "test-link-absolute"
)
ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(dockerfile))
defer ctx.Close()
tempDir, err := ioutil.TempDir("", "test-link-absolute-temp-")
if err != nil {
c.Fatalf("failed to create temporary directory: %s", tempDir)
}
defer os.RemoveAll(tempDir)
var symlinkTarget string
if runtime.GOOS == "windows" {
var driveLetter string
if abs, err := filepath.Abs(tempDir); err != nil {
c.Fatal(err)
} else {
driveLetter = abs[:1]
}
tempDirWithoutDrive := tempDir[2:]
symlinkTarget = fmt.Sprintf(`%s:\..\..\..\..\..\..\..\..\..\..\..\..%s`, driveLetter, tempDirWithoutDrive)
} else {
symlinkTarget = fmt.Sprintf("/../../../../../../../../../../../..%s", tempDir)
}
tarPath := filepath.Join(ctx.Dir, "links.tar")
nonExistingFile := filepath.Join(tempDir, targetFile)
fooPath := filepath.Join(ctx.Dir, targetFile)
tarOut, err := os.Create(tarPath)
if err != nil {
c.Fatal(err)
}
tarWriter := tar.NewWriter(tarOut)
header := &tar.Header{
Name: "symlink",
Typeflag: tar.TypeSymlink,
Linkname: symlinkTarget,
Mode: 0755,
Uid: 0,
Gid: 0,
}
err = tarWriter.WriteHeader(header)
if err != nil {
c.Fatal(err)
}
tarWriter.Close()
tarOut.Close()
foo, err := os.Create(fooPath)
if err != nil {
c.Fatal(err)
}
defer foo.Close()
if _, err := foo.WriteString("test"); err != nil {
c.Fatal(err)
}
buildImageSuccessfully(c, name, build.WithExternalBuildContext(ctx))
if _, err := os.Stat(nonExistingFile); err == nil || err != nil && !os.IsNotExist(err) {
c.Fatalf("%s shouldn't have been written and it shouldn't exist", nonExistingFile)
}
}
func (s *DockerSuite) TestBuildAddBadLinksVolume(c *check.C) {
testRequires(c, DaemonIsLinux) // ln not implemented on Windows busybox
const (
dockerfileTemplate = `
FROM busybox
RUN ln -s /../../../../../../../../%s /x
VOLUME /x
ADD foo.txt /x/`
targetFile = "foo.txt"
)
var (
name = "test-link-absolute-volume"
dockerfile = ""
)
tempDir, err := ioutil.TempDir("", "test-link-absolute-volume-temp-")
if err != nil {
c.Fatalf("failed to create temporary directory: %s", tempDir)
}
defer os.RemoveAll(tempDir)
dockerfile = fmt.Sprintf(dockerfileTemplate, tempDir)
nonExistingFile := filepath.Join(tempDir, targetFile)
ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(dockerfile))
defer ctx.Close()
fooPath := filepath.Join(ctx.Dir, targetFile)
foo, err := os.Create(fooPath)
if err != nil {
c.Fatal(err)
}
defer foo.Close()
if _, err := foo.WriteString("test"); err != nil {
c.Fatal(err)
}
buildImageSuccessfully(c, name, build.WithExternalBuildContext(ctx))
if _, err := os.Stat(nonExistingFile); err == nil || err != nil && !os.IsNotExist(err) {
c.Fatalf("%s shouldn't have been written and it shouldn't exist", nonExistingFile)
}
}
// Issue #5270 - ensure we throw a better error than "unexpected EOF"
// when we can't access files in the context.
func (s *DockerSuite) TestBuildWithInaccessibleFilesInContext(c *check.C) {
testRequires(c, DaemonIsLinux, UnixCli) // test uses chown/chmod: not available on windows
{
name := "testbuildinaccessiblefiles"
ctx := fakecontext.New(c, "",
fakecontext.WithDockerfile("FROM scratch\nADD . /foo/"),
fakecontext.WithFiles(map[string]string{"fileWithoutReadAccess": "foo"}),
)
defer ctx.Close()
// This is used to ensure we detect inaccessible files early during build in the cli client
pathToFileWithoutReadAccess := filepath.Join(ctx.Dir, "fileWithoutReadAccess")
if err := os.Chown(pathToFileWithoutReadAccess, 0, 0); err != nil {
c.Fatalf("failed to chown file to root: %s", err)
}
if err := os.Chmod(pathToFileWithoutReadAccess, 0700); err != nil {
c.Fatalf("failed to chmod file to 700: %s", err)
}
result := icmd.RunCmd(icmd.Cmd{
Command: []string{"su", "unprivilegeduser", "-c", fmt.Sprintf("%s build -t %s .", dockerBinary, name)},
Dir: ctx.Dir,
})
if result.Error == nil {
c.Fatalf("build should have failed: %s %s", result.Error, result.Combined())
}
// check if we've detected the failure before we started building
if !strings.Contains(result.Combined(), "no permission to read from ") {
c.Fatalf("output should've contained the string: no permission to read from but contained: %s", result.Combined())
}
if !strings.Contains(result.Combined(), "error checking context") {
c.Fatalf("output should've contained the string: error checking context")
}
}
{
name := "testbuildinaccessibledirectory"
ctx := fakecontext.New(c, "",
fakecontext.WithDockerfile("FROM scratch\nADD . /foo/"),
fakecontext.WithFiles(map[string]string{"directoryWeCantStat/bar": "foo"}),
)
defer ctx.Close()
// This is used to ensure we detect inaccessible directories early during build in the cli client
pathToDirectoryWithoutReadAccess := filepath.Join(ctx.Dir, "directoryWeCantStat")
pathToFileInDirectoryWithoutReadAccess := filepath.Join(pathToDirectoryWithoutReadAccess, "bar")
if err := os.Chown(pathToDirectoryWithoutReadAccess, 0, 0); err != nil {
c.Fatalf("failed to chown directory to root: %s", err)
}
if err := os.Chmod(pathToDirectoryWithoutReadAccess, 0444); err != nil {
c.Fatalf("failed to chmod directory to 444: %s", err)
}
if err := os.Chmod(pathToFileInDirectoryWithoutReadAccess, 0700); err != nil {
c.Fatalf("failed to chmod file to 700: %s", err)
}
result := icmd.RunCmd(icmd.Cmd{
Command: []string{"su", "unprivilegeduser", "-c", fmt.Sprintf("%s build -t %s .", dockerBinary, name)},
Dir: ctx.Dir,
})
if result.Error == nil {
c.Fatalf("build should have failed: %s %s", result.Error, result.Combined())
}
// check if we've detected the failure before we started building
if !strings.Contains(result.Combined(), "can't stat") {
c.Fatalf("output should've contained the string: can't access %s", result.Combined())
}
if !strings.Contains(result.Combined(), "error checking context") {
c.Fatalf("output should've contained the string: error checking context\ngot:%s", result.Combined())
}
}
{
name := "testlinksok"
ctx := fakecontext.New(c, "", fakecontext.WithDockerfile("FROM scratch\nADD . /foo/"))
defer ctx.Close()
target := "../../../../../../../../../../../../../../../../../../../azA"
if err := os.Symlink(filepath.Join(ctx.Dir, "g"), target); err != nil {
c.Fatal(err)
}
defer os.Remove(target)
// This is used to ensure we don't follow links when checking if everything in the context is accessible
// This test doesn't require that we run commands as an unprivileged user
buildImageSuccessfully(c, name, build.WithExternalBuildContext(ctx))
}
{
name := "testbuildignoredinaccessible"
ctx := fakecontext.New(c, "",
fakecontext.WithDockerfile("FROM scratch\nADD . /foo/"),
fakecontext.WithFiles(map[string]string{
"directoryWeCantStat/bar": "foo",
".dockerignore": "directoryWeCantStat",
}),
)
defer ctx.Close()
// This is used to ensure we don't try to add inaccessible files when they are ignored by a .dockerignore pattern
pathToDirectoryWithoutReadAccess := filepath.Join(ctx.Dir, "directoryWeCantStat")
pathToFileInDirectoryWithoutReadAccess := filepath.Join(pathToDirectoryWithoutReadAccess, "bar")
if err := os.Chown(pathToDirectoryWithoutReadAccess, 0, 0); err != nil {
c.Fatalf("failed to chown directory to root: %s", err)
}
if err := os.Chmod(pathToDirectoryWithoutReadAccess, 0444); err != nil {
c.Fatalf("failed to chmod directory to 444: %s", err)
}
if err := os.Chmod(pathToFileInDirectoryWithoutReadAccess, 0700); err != nil {
c.Fatalf("failed to chmod file to 700: %s", err)
}
result := icmd.RunCmd(icmd.Cmd{
Dir: ctx.Dir,
Command: []string{"su", "unprivilegeduser", "-c",
fmt.Sprintf("%s build -t %s .", dockerBinary, name)},
})
result.Assert(c, icmd.Expected{})
}
}
func (s *DockerSuite) TestBuildForceRm(c *check.C) {
containerCountBefore := getContainerCount(c)
name := "testbuildforcerm"
buildImage(name, cli.WithFlags("--force-rm"), build.WithBuildContext(c,
build.WithFile("Dockerfile", `FROM `+minimalBaseImage()+`
RUN true
RUN thiswillfail`))).Assert(c, icmd.Expected{
ExitCode: 1,
})
containerCountAfter := getContainerCount(c)
if containerCountBefore != containerCountAfter {
c.Fatalf("--force-rm shouldn't have left containers behind")
}
}
func (s *DockerSuite) TestBuildRm(c *check.C) {
name := "testbuildrm"
testCases := []struct {
buildflags []string
shouldLeftContainerBehind bool
}{
// Default case (i.e. --rm=true)
{
buildflags: []string{},
shouldLeftContainerBehind: false,
},
{
buildflags: []string{"--rm"},
shouldLeftContainerBehind: false,
},
{
buildflags: []string{"--rm=false"},
shouldLeftContainerBehind: true,
},
}
for _, tc := range testCases {
containerCountBefore := getContainerCount(c)
buildImageSuccessfully(c, name, cli.WithFlags(tc.buildflags...), build.WithDockerfile(`FROM busybox
RUN echo hello world`))
containerCountAfter := getContainerCount(c)
if tc.shouldLeftContainerBehind {
if containerCountBefore == containerCountAfter {
c.Fatalf("flags %v should have left containers behind", tc.buildflags)
}
} else {
if containerCountBefore != containerCountAfter {
c.Fatalf("flags %v shouldn't have left containers behind", tc.buildflags)
}
}
dockerCmd(c, "rmi", name)
}
}
func (s *DockerSuite) TestBuildWithVolumes(c *check.C) {
testRequires(c, DaemonIsLinux) // Invalid volume paths on Windows
var (
result map[string]map[string]struct{}
name = "testbuildvolumes"
emptyMap = make(map[string]struct{})
expected = map[string]map[string]struct{}{
"/test1": emptyMap,
"/test2": emptyMap,
"/test3": emptyMap,
"/test4": emptyMap,
"/test5": emptyMap,
"/test6": emptyMap,
"[/test7": emptyMap,
"/test8]": emptyMap,
}
)
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM scratch
VOLUME /test1
VOLUME /test2
VOLUME /test3 /test4
VOLUME ["/test5", "/test6"]
VOLUME [/test7 /test8]
`))
inspectFieldAndUnmarshall(c, name, "Config.Volumes", &result)
equal := reflect.DeepEqual(&result, &expected)
if !equal {
c.Fatalf("Volumes %s, expected %s", result, expected)
}
}
func (s *DockerSuite) TestBuildMaintainer(c *check.C) {
name := "testbuildmaintainer"
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM `+minimalBaseImage()+`
MAINTAINER dockerio`))
expected := "dockerio"
res := inspectField(c, name, "Author")
if res != expected {
c.Fatalf("Maintainer %s, expected %s", res, expected)
}
}
func (s *DockerSuite) TestBuildUser(c *check.C) {
testRequires(c, DaemonIsLinux)
name := "testbuilduser"
expected := "dockerio"
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox
RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd
USER dockerio
RUN [ $(whoami) = 'dockerio' ]`))
res := inspectField(c, name, "Config.User")
if res != expected {
c.Fatalf("User %s, expected %s", res, expected)
}
}
func (s *DockerSuite) TestBuildRelativeWorkdir(c *check.C) {
name := "testbuildrelativeworkdir"
var (
expected1 string
expected2 string
expected3 string
expected4 string
expectedFinal string
)
if testEnv.DaemonPlatform() == "windows" {
expected1 = `C:/`
expected2 = `C:/test1`
expected3 = `C:/test2`
expected4 = `C:/test2/test3`
expectedFinal = `C:\test2\test3` // Note inspect is going to return Windows paths, as it's not in busybox
} else {
expected1 = `/`
expected2 = `/test1`
expected3 = `/test2`
expected4 = `/test2/test3`
expectedFinal = `/test2/test3`
}
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox
RUN sh -c "[ "$PWD" = "`+expected1+`" ]"
WORKDIR test1
RUN sh -c "[ "$PWD" = "`+expected2+`" ]"
WORKDIR /test2
RUN sh -c "[ "$PWD" = "`+expected3+`" ]"
WORKDIR test3
RUN sh -c "[ "$PWD" = "`+expected4+`" ]"`))
res := inspectField(c, name, "Config.WorkingDir")
if res != expectedFinal {
c.Fatalf("Workdir %s, expected %s", res, expectedFinal)
}
}
// #22181 Regression test. Single end-to-end test of using
// Windows semantics. Most path handling verifications are in unit tests
func (s *DockerSuite) TestBuildWindowsWorkdirProcessing(c *check.C) {
testRequires(c, DaemonIsWindows)
buildImageSuccessfully(c, "testbuildwindowsworkdirprocessing", build.WithDockerfile(`FROM busybox
WORKDIR C:\\foo
WORKDIR bar
RUN sh -c "[ "$PWD" = "C:/foo/bar" ]"
`))
}
// #22181 Regression test. Most paths handling verifications are in unit test.
// One functional test for end-to-end
func (s *DockerSuite) TestBuildWindowsAddCopyPathProcessing(c *check.C) {
testRequires(c, DaemonIsWindows)
// TODO Windows (@jhowardmsft). Needs a follow-up PR to 22181 to
// support backslash such as .\\ being equivalent to ./ and c:\\ being
// equivalent to c:/. This is not currently (nor ever has been) supported
// by docker on the Windows platform.
buildImageSuccessfully(c, "testbuildwindowsaddcopypathprocessing", build.WithBuildContext(c,
build.WithFile("Dockerfile", `FROM busybox
# No trailing slash on COPY/ADD
# Results in dir being changed to a file
WORKDIR /wc1
COPY wc1 c:/wc1
WORKDIR /wc2
ADD wc2 c:/wc2
WORKDIR c:/
RUN sh -c "[ $(cat c:/wc1/wc1) = 'hellowc1' ]"
RUN sh -c "[ $(cat c:/wc2/wc2) = 'worldwc2' ]"
# Trailing slash on COPY/ADD, Windows-style path.
WORKDIR /wd1
COPY wd1 c:/wd1/
WORKDIR /wd2
ADD wd2 c:/wd2/
RUN sh -c "[ $(cat c:/wd1/wd1) = 'hellowd1' ]"
RUN sh -c "[ $(cat c:/wd2/wd2) = 'worldwd2' ]"
`),
build.WithFile("wc1", "hellowc1"),
build.WithFile("wc2", "worldwc2"),
build.WithFile("wd1", "hellowd1"),
build.WithFile("wd2", "worldwd2"),
))
}
func (s *DockerSuite) TestBuildWorkdirWithEnvVariables(c *check.C) {
name := "testbuildworkdirwithenvvariables"
var expected string
if testEnv.DaemonPlatform() == "windows" {
expected = `C:\test1\test2`
} else {
expected = `/test1/test2`
}
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox
ENV DIRPATH /test1
ENV SUBDIRNAME test2
WORKDIR $DIRPATH
WORKDIR $SUBDIRNAME/$MISSING_VAR`))
res := inspectField(c, name, "Config.WorkingDir")
if res != expected {
c.Fatalf("Workdir %s, expected %s", res, expected)
}
}
func (s *DockerSuite) TestBuildRelativeCopy(c *check.C) {
// cat /test1/test2/foo gets permission denied for the user
testRequires(c, NotUserNamespace)
var expected string
if testEnv.DaemonPlatform() == "windows" {
expected = `C:/test1/test2`
} else {
expected = `/test1/test2`
}
buildImageSuccessfully(c, "testbuildrelativecopy", build.WithBuildContext(c,
build.WithFile("Dockerfile", `FROM busybox
WORKDIR /test1
WORKDIR test2
RUN sh -c "[ "$PWD" = '`+expected+`' ]"
COPY foo ./
RUN sh -c "[ $(cat /test1/test2/foo) = 'hello' ]"
ADD foo ./bar/baz
RUN sh -c "[ $(cat /test1/test2/bar/baz) = 'hello' ]"
COPY foo ./bar/baz2
RUN sh -c "[ $(cat /test1/test2/bar/baz2) = 'hello' ]"
WORKDIR ..
COPY foo ./
RUN sh -c "[ $(cat /test1/foo) = 'hello' ]"
COPY foo /test3/
RUN sh -c "[ $(cat /test3/foo) = 'hello' ]"
WORKDIR /test4
COPY . .
RUN sh -c "[ $(cat /test4/foo) = 'hello' ]"
WORKDIR /test5/test6
COPY foo ../
RUN sh -c "[ $(cat /test5/foo) = 'hello' ]"
`),
build.WithFile("foo", "hello"),
))
}
func (s *DockerSuite) TestBuildBlankName(c *check.C) {
name := "testbuildblankname"
testCases := []struct {
expression string
expectedStderr string
}{
{
expression: "ENV =",
expectedStderr: "ENV names can not be blank",
},
{
expression: "LABEL =",
expectedStderr: "LABEL names can not be blank",
},
{
expression: "ARG =foo",
expectedStderr: "ARG names can not be blank",
},
}
for _, tc := range testCases {
buildImage(name, build.WithDockerfile(fmt.Sprintf(`FROM busybox
%s`, tc.expression))).Assert(c, icmd.Expected{
ExitCode: 1,
Err: tc.expectedStderr,
})
}
}
func (s *DockerSuite) TestBuildEnv(c *check.C) {
testRequires(c, DaemonIsLinux) // ENV expansion is different in Windows
name := "testbuildenv"
expected := "[PATH=/test:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin PORT=2375]"
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox
ENV PATH /test:$PATH
ENV PORT 2375
RUN [ $(env | grep PORT) = 'PORT=2375' ]`))
res := inspectField(c, name, "Config.Env")
if res != expected {
c.Fatalf("Env %s, expected %s", res, expected)
}
}
func (s *DockerSuite) TestBuildPATH(c *check.C) {
testRequires(c, DaemonIsLinux) // ENV expansion is different in Windows
defPath := "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
fn := func(dockerfile string, expected string) {
buildImageSuccessfully(c, "testbldpath", build.WithDockerfile(dockerfile))
res := inspectField(c, "testbldpath", "Config.Env")
if res != expected {
c.Fatalf("Env %q, expected %q for dockerfile:%q", res, expected, dockerfile)
}
}
tests := []struct{ dockerfile, exp string }{
{"FROM scratch\nMAINTAINER me", "[PATH=" + defPath + "]"},
{"FROM busybox\nMAINTAINER me", "[PATH=" + defPath + "]"},
{"FROM scratch\nENV FOO=bar", "[PATH=" + defPath + " FOO=bar]"},
{"FROM busybox\nENV FOO=bar", "[PATH=" + defPath + " FOO=bar]"},
{"FROM scratch\nENV PATH=/test", "[PATH=/test]"},
{"FROM busybox\nENV PATH=/test", "[PATH=/test]"},
{"FROM scratch\nENV PATH=''", "[PATH=]"},
{"FROM busybox\nENV PATH=''", "[PATH=]"},
}
for _, test := range tests {
fn(test.dockerfile, test.exp)
}
}
func (s *DockerSuite) TestBuildContextCleanup(c *check.C) {
testRequires(c, SameHostDaemon)
name := "testbuildcontextcleanup"
entries, err := ioutil.ReadDir(filepath.Join(testEnv.DockerBasePath(), "tmp"))
if err != nil {
c.Fatalf("failed to list contents of tmp dir: %s", err)
}
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM `+minimalBaseImage()+`
ENTRYPOINT ["/bin/echo"]`))
entriesFinal, err := ioutil.ReadDir(filepath.Join(testEnv.DockerBasePath(), "tmp"))
if err != nil {
c.Fatalf("failed to list contents of tmp dir: %s", err)
}
if err = testutil.CompareDirectoryEntries(entries, entriesFinal); err != nil {
c.Fatalf("context should have been deleted, but wasn't")
}
}
func (s *DockerSuite) TestBuildContextCleanupFailedBuild(c *check.C) {
testRequires(c, SameHostDaemon)
name := "testbuildcontextcleanup"
entries, err := ioutil.ReadDir(filepath.Join(testEnv.DockerBasePath(), "tmp"))
if err != nil {
c.Fatalf("failed to list contents of tmp dir: %s", err)
}
buildImage(name, build.WithDockerfile(`FROM `+minimalBaseImage()+`
RUN /non/existing/command`)).Assert(c, icmd.Expected{
ExitCode: 1,
})
entriesFinal, err := ioutil.ReadDir(filepath.Join(testEnv.DockerBasePath(), "tmp"))
if err != nil {
c.Fatalf("failed to list contents of tmp dir: %s", err)
}
if err = testutil.CompareDirectoryEntries(entries, entriesFinal); err != nil {
c.Fatalf("context should have been deleted, but wasn't")
}
}
func (s *DockerSuite) TestBuildCmd(c *check.C) {
name := "testbuildcmd"
expected := "[/bin/echo Hello World]"
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM `+minimalBaseImage()+`
CMD ["/bin/echo", "Hello World"]`))
res := inspectField(c, name, "Config.Cmd")
if res != expected {
c.Fatalf("Cmd %s, expected %s", res, expected)
}
}
func (s *DockerSuite) TestBuildExpose(c *check.C) {
testRequires(c, DaemonIsLinux) // Expose not implemented on Windows
name := "testbuildexpose"
expected := "map[2375/tcp:{}]"
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM scratch
EXPOSE 2375`))
res := inspectField(c, name, "Config.ExposedPorts")
if res != expected {
c.Fatalf("Exposed ports %s, expected %s", res, expected)
}
}
func (s *DockerSuite) TestBuildExposeMorePorts(c *check.C) {
testRequires(c, DaemonIsLinux) // Expose not implemented on Windows
// start building docker file with a large number of ports
portList := make([]string, 50)
line := make([]string, 100)
expectedPorts := make([]int, len(portList)*len(line))
for i := 0; i < len(portList); i++ {
for j := 0; j < len(line); j++ {
p := i*len(line) + j + 1
line[j] = strconv.Itoa(p)
expectedPorts[p-1] = p
}
if i == len(portList)-1 {
portList[i] = strings.Join(line, " ")
} else {
portList[i] = strings.Join(line, " ") + ` \`
}
}
dockerfile := `FROM scratch
EXPOSE {{range .}} {{.}}
{{end}}`
tmpl := template.Must(template.New("dockerfile").Parse(dockerfile))
buf := bytes.NewBuffer(nil)
tmpl.Execute(buf, portList)
name := "testbuildexpose"
buildImageSuccessfully(c, name, build.WithDockerfile(buf.String()))
// check if all the ports are saved inside Config.ExposedPorts
res := inspectFieldJSON(c, name, "Config.ExposedPorts")
var exposedPorts map[string]interface{}
if err := json.Unmarshal([]byte(res), &exposedPorts); err != nil {
c.Fatal(err)
}
for _, p := range expectedPorts {
ep := fmt.Sprintf("%d/tcp", p)
if _, ok := exposedPorts[ep]; !ok {
c.Errorf("Port(%s) is not exposed", ep)
} else {
delete(exposedPorts, ep)
}
}
if len(exposedPorts) != 0 {
c.Errorf("Unexpected extra exposed ports %v", exposedPorts)
}
}
func (s *DockerSuite) TestBuildExposeOrder(c *check.C) {
testRequires(c, DaemonIsLinux) // Expose not implemented on Windows
buildID := func(name, exposed string) string {
buildImageSuccessfully(c, name, build.WithDockerfile(fmt.Sprintf(`FROM scratch
EXPOSE %s`, exposed)))
id := inspectField(c, name, "Id")
return id
}
id1 := buildID("testbuildexpose1", "80 2375")
id2 := buildID("testbuildexpose2", "2375 80")
if id1 != id2 {
c.Errorf("EXPOSE should invalidate the cache only when ports actually changed")
}
}
func (s *DockerSuite) TestBuildExposeUpperCaseProto(c *check.C) {
testRequires(c, DaemonIsLinux) // Expose not implemented on Windows
name := "testbuildexposeuppercaseproto"
expected := "map[5678/udp:{}]"
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM scratch
EXPOSE 5678/UDP`))
res := inspectField(c, name, "Config.ExposedPorts")
if res != expected {
c.Fatalf("Exposed ports %s, expected %s", res, expected)
}
}
func (s *DockerSuite) TestBuildEmptyEntrypointInheritance(c *check.C) {
name := "testbuildentrypointinheritance"
name2 := "testbuildentrypointinheritance2"
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox
ENTRYPOINT ["/bin/echo"]`))
res := inspectField(c, name, "Config.Entrypoint")
expected := "[/bin/echo]"
if res != expected {
c.Fatalf("Entrypoint %s, expected %s", res, expected)
}
buildImageSuccessfully(c, name2, build.WithDockerfile(fmt.Sprintf(`FROM %s
ENTRYPOINT []`, name)))
res = inspectField(c, name2, "Config.Entrypoint")
expected = "[]"
if res != expected {
c.Fatalf("Entrypoint %s, expected %s", res, expected)
}
}
func (s *DockerSuite) TestBuildEmptyEntrypoint(c *check.C) {
name := "testbuildentrypoint"
expected := "[]"
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox
ENTRYPOINT []`))
res := inspectField(c, name, "Config.Entrypoint")
if res != expected {
c.Fatalf("Entrypoint %s, expected %s", res, expected)
}
}
func (s *DockerSuite) TestBuildEntrypoint(c *check.C) {
name := "testbuildentrypoint"
expected := "[/bin/echo]"
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM `+minimalBaseImage()+`
ENTRYPOINT ["/bin/echo"]`))
res := inspectField(c, name, "Config.Entrypoint")
if res != expected {
c.Fatalf("Entrypoint %s, expected %s", res, expected)
}
}
// #6445 ensure ONBUILD triggers aren't committed to grandchildren
func (s *DockerSuite) TestBuildOnBuildLimitedInheritence(c *check.C) {
buildImageSuccessfully(c, "testonbuildtrigger1", build.WithDockerfile(`
FROM busybox
RUN echo "GRANDPARENT"
ONBUILD RUN echo "ONBUILD PARENT"
`))
// ONBUILD should be run in second build.
buildImage("testonbuildtrigger2", build.WithDockerfile("FROM testonbuildtrigger1")).Assert(c, icmd.Expected{
Out: "ONBUILD PARENT",
})
// ONBUILD should *not* be run in third build.
result := buildImage("testonbuildtrigger3", build.WithDockerfile("FROM testonbuildtrigger2"))
result.Assert(c, icmd.Success)
if strings.Contains(result.Combined(), "ONBUILD PARENT") {
c.Fatalf("ONBUILD instruction ran in grandchild of ONBUILD parent")
}
}
func (s *DockerSuite) TestBuildSameDockerfileWithAndWithoutCache(c *check.C) {
testRequires(c, DaemonIsLinux) // Expose not implemented on Windows
name := "testbuildwithcache"
dockerfile := `FROM scratch
MAINTAINER dockerio
EXPOSE 5432
ENTRYPOINT ["/bin/echo"]`
buildImageSuccessfully(c, name, build.WithDockerfile(dockerfile))
id1 := getIDByName(c, name)
buildImageSuccessfully(c, name, build.WithDockerfile(dockerfile))
id2 := getIDByName(c, name)
buildImageSuccessfully(c, name, build.WithoutCache, build.WithDockerfile(dockerfile))
id3 := getIDByName(c, name)
if id1 != id2 {
c.Fatal("The cache should have been used but hasn't.")
}
if id1 == id3 {
c.Fatal("The cache should have been invalided but hasn't.")
}
}
// Make sure that ADD/COPY still populate the cache even if they don't use it
func (s *DockerSuite) TestBuildConditionalCache(c *check.C) {
name := "testbuildconditionalcache"
dockerfile := `
FROM busybox
ADD foo /tmp/`
ctx := fakecontext.New(c, "",
fakecontext.WithDockerfile(dockerfile),
fakecontext.WithFiles(map[string]string{
"foo": "hello",
}))
defer ctx.Close()
cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx))
id1 := getIDByName(c, name)
if err := ctx.Add("foo", "bye"); err != nil {
c.Fatalf("Error modifying foo: %s", err)
}
// Updating a file should invalidate the cache
cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx))
id2 := getIDByName(c, name)
if id2 == id1 {
c.Fatal("Should not have used the cache")
}
cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx))
id3 := getIDByName(c, name)
if id3 != id2 {
c.Fatal("Should have used the cache")
}
}
func (s *DockerSuite) TestBuildAddMultipleLocalFileWithAndWithoutCache(c *check.C) {
name := "testbuildaddmultiplelocalfilewithcache"
baseName := name + "-base"
cli.BuildCmd(c, baseName, build.WithDockerfile(`
FROM busybox
ENTRYPOINT ["/bin/sh"]
`))
dockerfile := `
FROM testbuildaddmultiplelocalfilewithcache-base
MAINTAINER dockerio
ADD foo Dockerfile /usr/lib/bla/
RUN sh -c "[ $(cat /usr/lib/bla/foo) = "hello" ]"`
ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(dockerfile), fakecontext.WithFiles(map[string]string{
"foo": "hello",
}))
defer ctx.Close()
cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx))
id1 := getIDByName(c, name)
result2 := cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx))
id2 := getIDByName(c, name)
result3 := cli.BuildCmd(c, name, build.WithoutCache, build.WithExternalBuildContext(ctx))
id3 := getIDByName(c, name)
if id1 != id2 {
c.Fatalf("The cache should have been used but hasn't: %s", result2.Stdout())
}
if id1 == id3 {
c.Fatalf("The cache should have been invalided but hasn't: %s", result3.Stdout())
}
}
func (s *DockerSuite) TestBuildCopyDirButNotFile(c *check.C) {
name := "testbuildcopydirbutnotfile"
name2 := "testbuildcopydirbutnotfile2"
dockerfile := `
FROM ` + minimalBaseImage() + `
COPY dir /tmp/`
ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(dockerfile), fakecontext.WithFiles(map[string]string{
"dir/foo": "hello",
}))
defer ctx.Close()
cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx))
id1 := getIDByName(c, name)
// Check that adding file with similar name doesn't mess with cache
if err := ctx.Add("dir_file", "hello2"); err != nil {
c.Fatal(err)
}
cli.BuildCmd(c, name2, build.WithExternalBuildContext(ctx))
id2 := getIDByName(c, name2)
if id1 != id2 {
c.Fatal("The cache should have been used but wasn't")
}
}
func (s *DockerSuite) TestBuildAddCurrentDirWithCache(c *check.C) {
name := "testbuildaddcurrentdirwithcache"
name2 := name + "2"
name3 := name + "3"
name4 := name + "4"
dockerfile := `
FROM ` + minimalBaseImage() + `
MAINTAINER dockerio
ADD . /usr/lib/bla`
ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(dockerfile), fakecontext.WithFiles(map[string]string{
"foo": "hello",
}))
defer ctx.Close()
buildImageSuccessfully(c, name, build.WithExternalBuildContext(ctx))
id1 := getIDByName(c, name)
// Check that adding file invalidate cache of "ADD ."
if err := ctx.Add("bar", "hello2"); err != nil {
c.Fatal(err)
}
buildImageSuccessfully(c, name2, build.WithExternalBuildContext(ctx))
id2 := getIDByName(c, name2)
if id1 == id2 {
c.Fatal("The cache should have been invalided but hasn't.")
}
// Check that changing file invalidate cache of "ADD ."
if err := ctx.Add("foo", "hello1"); err != nil {
c.Fatal(err)
}
buildImageSuccessfully(c, name3, build.WithExternalBuildContext(ctx))
id3 := getIDByName(c, name3)
if id2 == id3 {
c.Fatal("The cache should have been invalided but hasn't.")
}
// Check that changing file to same content with different mtime does not
// invalidate cache of "ADD ."
time.Sleep(1 * time.Second) // wait second because of mtime precision
if err := ctx.Add("foo", "hello1"); err != nil {
c.Fatal(err)
}
buildImageSuccessfully(c, name4, build.WithExternalBuildContext(ctx))
id4 := getIDByName(c, name4)
if id3 != id4 {
c.Fatal("The cache should have been used but hasn't.")
}
}
// FIXME(vdemeester) this really seems to test the same thing as before (TestBuildAddMultipleLocalFileWithAndWithoutCache)
func (s *DockerSuite) TestBuildAddCurrentDirWithoutCache(c *check.C) {
name := "testbuildaddcurrentdirwithoutcache"
dockerfile := `
FROM ` + minimalBaseImage() + `
MAINTAINER dockerio
ADD . /usr/lib/bla`
ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(dockerfile), fakecontext.WithFiles(map[string]string{
"foo": "hello",
}))
defer ctx.Close()
buildImageSuccessfully(c, name, build.WithExternalBuildContext(ctx))
id1 := getIDByName(c, name)
buildImageSuccessfully(c, name, build.WithoutCache, build.WithExternalBuildContext(ctx))
id2 := getIDByName(c, name)
if id1 == id2 {
c.Fatal("The cache should have been invalided but hasn't.")
}
}
func (s *DockerSuite) TestBuildAddRemoteFileWithAndWithoutCache(c *check.C) {
name := "testbuildaddremotefilewithcache"
server := fakestorage.New(c, "", fakecontext.WithFiles(map[string]string{
"baz": "hello",
}))
defer server.Close()
dockerfile := fmt.Sprintf(`FROM `+minimalBaseImage()+`
MAINTAINER dockerio
ADD %s/baz /usr/lib/baz/quux`, server.URL())
cli.BuildCmd(c, name, build.WithDockerfile(dockerfile))
id1 := getIDByName(c, name)
cli.BuildCmd(c, name, build.WithDockerfile(dockerfile))
id2 := getIDByName(c, name)
cli.BuildCmd(c, name, build.WithoutCache, build.WithDockerfile(dockerfile))
id3 := getIDByName(c, name)
if id1 != id2 {
c.Fatal("The cache should have been used but hasn't.")
}
if id1 == id3 {
c.Fatal("The cache should have been invalided but hasn't.")
}
}
func (s *DockerSuite) TestBuildAddRemoteFileMTime(c *check.C) {
name := "testbuildaddremotefilemtime"
name2 := name + "2"
name3 := name + "3"
files := map[string]string{"baz": "hello"}
server := fakestorage.New(c, "", fakecontext.WithFiles(files))
defer server.Close()
ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(fmt.Sprintf(`FROM `+minimalBaseImage()+`
MAINTAINER dockerio
ADD %s/baz /usr/lib/baz/quux`, server.URL())))
defer ctx.Close()
cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx))
id1 := getIDByName(c, name)
cli.BuildCmd(c, name2, build.WithExternalBuildContext(ctx))
id2 := getIDByName(c, name2)
if id1 != id2 {
c.Fatal("The cache should have been used but wasn't - #1")
}
// Now create a different server with same contents (causes different mtime)
// The cache should still be used
// allow some time for clock to pass as mtime precision is only 1s
time.Sleep(2 * time.Second)
server2 := fakestorage.New(c, "", fakecontext.WithFiles(files))
defer server2.Close()
ctx2 := fakecontext.New(c, "", fakecontext.WithDockerfile(fmt.Sprintf(`FROM `+minimalBaseImage()+`
MAINTAINER dockerio
ADD %s/baz /usr/lib/baz/quux`, server2.URL())))
defer ctx2.Close()
cli.BuildCmd(c, name3, build.WithExternalBuildContext(ctx2))
id3 := getIDByName(c, name3)
if id1 != id3 {
c.Fatal("The cache should have been used but wasn't")
}
}
// FIXME(vdemeester) this really seems to test the same thing as before (combined)
func (s *DockerSuite) TestBuildAddLocalAndRemoteFilesWithAndWithoutCache(c *check.C) {
name := "testbuildaddlocalandremotefilewithcache"
server := fakestorage.New(c, "", fakecontext.WithFiles(map[string]string{
"baz": "hello",
}))
defer server.Close()
ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(fmt.Sprintf(`FROM `+minimalBaseImage()+`
MAINTAINER dockerio
ADD foo /usr/lib/bla/bar
ADD %s/baz /usr/lib/baz/quux`, server.URL())),
fakecontext.WithFiles(map[string]string{
"foo": "hello world",
}))
defer ctx.Close()
buildImageSuccessfully(c, name, build.WithExternalBuildContext(ctx))
id1 := getIDByName(c, name)
buildImageSuccessfully(c, name, build.WithExternalBuildContext(ctx))
id2 := getIDByName(c, name)
buildImageSuccessfully(c, name, build.WithoutCache, build.WithExternalBuildContext(ctx))
id3 := getIDByName(c, name)
if id1 != id2 {
c.Fatal("The cache should have been used but hasn't.")
}
if id1 == id3 {
c.Fatal("The cache should have been invalidated but hasn't.")
}
}
func testContextTar(c *check.C, compression archive.Compression) {
ctx := fakecontext.New(c, "",
fakecontext.WithDockerfile(`FROM busybox
ADD foo /foo
CMD ["cat", "/foo"]`),
fakecontext.WithFiles(map[string]string{
"foo": "bar",
}),
)
defer ctx.Close()
context, err := archive.Tar(ctx.Dir, compression)
if err != nil {
c.Fatalf("failed to build context tar: %v", err)
}
name := "contexttar"
cli.BuildCmd(c, name, build.WithStdinContext(context))
}
func (s *DockerSuite) TestBuildContextTarGzip(c *check.C) {
testContextTar(c, archive.Gzip)
}
func (s *DockerSuite) TestBuildContextTarNoCompression(c *check.C) {
testContextTar(c, archive.Uncompressed)
}
func (s *DockerSuite) TestBuildNoContext(c *check.C) {
name := "nocontext"
icmd.RunCmd(icmd.Cmd{
Command: []string{dockerBinary, "build", "-t", name, "-"},
Stdin: strings.NewReader(
`FROM busybox
CMD ["echo", "ok"]`),
}).Assert(c, icmd.Success)
if out, _ := dockerCmd(c, "run", "--rm", "nocontext"); out != "ok\n" {
c.Fatalf("run produced invalid output: %q, expected %q", out, "ok")
}
}
func (s *DockerSuite) TestBuildDockerfileStdin(c *check.C) {
name := "stdindockerfile"
tmpDir, err := ioutil.TempDir("", "fake-context")
c.Assert(err, check.IsNil)
err = ioutil.WriteFile(filepath.Join(tmpDir, "foo"), []byte("bar"), 0600)
c.Assert(err, check.IsNil)
icmd.RunCmd(icmd.Cmd{
Command: []string{dockerBinary, "build", "-t", name, "-f", "-", tmpDir},
Stdin: strings.NewReader(
`FROM busybox
ADD foo /foo
CMD ["cat", "/foo"]`),
}).Assert(c, icmd.Success)
res := inspectField(c, name, "Config.Cmd")
c.Assert(strings.TrimSpace(string(res)), checker.Equals, `[cat /foo]`)
}
func (s *DockerSuite) TestBuildDockerfileStdinConflict(c *check.C) {
name := "stdindockerfiletarcontext"
icmd.RunCmd(icmd.Cmd{
Command: []string{dockerBinary, "build", "-t", name, "-f", "-", "-"},
}).Assert(c, icmd.Expected{
ExitCode: 1,
Err: "use stdin for both build context and dockerfile",
})
}
func (s *DockerSuite) TestBuildDockerfileStdinNoExtraFiles(c *check.C) {
s.testBuildDockerfileStdinNoExtraFiles(c, false, false)
}
func (s *DockerSuite) TestBuildDockerfileStdinDockerignore(c *check.C) {
s.testBuildDockerfileStdinNoExtraFiles(c, true, false)
}
func (s *DockerSuite) TestBuildDockerfileStdinDockerignoreIgnored(c *check.C) {
s.testBuildDockerfileStdinNoExtraFiles(c, true, true)
}
func (s *DockerSuite) testBuildDockerfileStdinNoExtraFiles(c *check.C, hasDockerignore, ignoreDockerignore bool) {
name := "stdindockerfilenoextra"
tmpDir, err := ioutil.TempDir("", "fake-context")
c.Assert(err, check.IsNil)
defer os.RemoveAll(tmpDir)
writeFile := func(filename, content string) {
err = ioutil.WriteFile(filepath.Join(tmpDir, filename), []byte(content), 0600)
c.Assert(err, check.IsNil)
}
writeFile("foo", "bar")
if hasDockerignore {
// Add an empty Dockerfile to verify that it is not added to the image
writeFile("Dockerfile", "")
ignores := "Dockerfile\n"
if ignoreDockerignore {
ignores += ".dockerignore\n"
}
writeFile(".dockerignore", ignores)
}
result := icmd.RunCmd(icmd.Cmd{
Command: []string{dockerBinary, "build", "-t", name, "-f", "-", tmpDir},
Stdin: strings.NewReader(
`FROM busybox
COPY . /baz`),
})
result.Assert(c, icmd.Success)
result = cli.DockerCmd(c, "run", "--rm", name, "ls", "-A", "/baz")
if hasDockerignore && !ignoreDockerignore {
c.Assert(result.Stdout(), checker.Equals, ".dockerignore\nfoo\n")
} else {
c.Assert(result.Stdout(), checker.Equals, "foo\n")
}
}
func (s *DockerSuite) TestBuildWithVolumeOwnership(c *check.C) {
testRequires(c, DaemonIsLinux)
name := "testbuildimg"
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox:latest
RUN mkdir /test && chown daemon:daemon /test && chmod 0600 /test
VOLUME /test`))
out, _ := dockerCmd(c, "run", "--rm", "testbuildimg", "ls", "-la", "/test")
if expected := "drw-------"; !strings.Contains(out, expected) {
c.Fatalf("expected %s received %s", expected, out)
}
if expected := "daemon daemon"; !strings.Contains(out, expected) {
c.Fatalf("expected %s received %s", expected, out)
}
}
// testing #1405 - config.Cmd does not get cleaned up if
// utilizing cache
func (s *DockerSuite) TestBuildEntrypointRunCleanup(c *check.C) {
name := "testbuildcmdcleanup"
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox
RUN echo "hello"`))
buildImageSuccessfully(c, name, build.WithBuildContext(c,
build.WithFile("Dockerfile", `FROM busybox
RUN echo "hello"
ADD foo /foo
ENTRYPOINT ["/bin/echo"]`),
build.WithFile("foo", "hello")))
res := inspectField(c, name, "Config.Cmd")
// Cmd must be cleaned up
if res != "[]" {
c.Fatalf("Cmd %s, expected nil", res)
}
}
func (s *DockerSuite) TestBuildAddFileNotFound(c *check.C) {
name := "testbuildaddnotfound"
expected := "foo: no such file or directory"
if testEnv.DaemonPlatform() == "windows" {
expected = "foo: The system cannot find the file specified"
}
buildImage(name, build.WithBuildContext(c,
build.WithFile("Dockerfile", `FROM `+minimalBaseImage()+`
ADD foo /usr/local/bar`),
build.WithFile("bar", "hello"))).Assert(c, icmd.Expected{
ExitCode: 1,
Err: expected,
})
}
func (s *DockerSuite) TestBuildInheritance(c *check.C) {
testRequires(c, DaemonIsLinux)
name := "testbuildinheritance"
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM scratch
EXPOSE 2375`))
ports1 := inspectField(c, name, "Config.ExposedPorts")
buildImageSuccessfully(c, name, build.WithDockerfile(fmt.Sprintf(`FROM %s
ENTRYPOINT ["/bin/echo"]`, name)))
res := inspectField(c, name, "Config.Entrypoint")
if expected := "[/bin/echo]"; res != expected {
c.Fatalf("Entrypoint %s, expected %s", res, expected)
}
ports2 := inspectField(c, name, "Config.ExposedPorts")
if ports1 != ports2 {
c.Fatalf("Ports must be same: %s != %s", ports1, ports2)
}
}
func (s *DockerSuite) TestBuildFails(c *check.C) {
name := "testbuildfails"
buildImage(name, build.WithDockerfile(`FROM busybox
RUN sh -c "exit 23"`)).Assert(c, icmd.Expected{
ExitCode: 23,
Err: "returned a non-zero code: 23",
})
}
func (s *DockerSuite) TestBuildOnBuild(c *check.C) {
name := "testbuildonbuild"
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox
ONBUILD RUN touch foobar`))
buildImageSuccessfully(c, name, build.WithDockerfile(fmt.Sprintf(`FROM %s
RUN [ -f foobar ]`, name)))
}
// gh #2446
func (s *DockerSuite) TestBuildAddToSymlinkDest(c *check.C) {
makeLink := `ln -s /foo /bar`
if testEnv.DaemonPlatform() == "windows" {
makeLink = `mklink /D C:\bar C:\foo`
}
name := "testbuildaddtosymlinkdest"
buildImageSuccessfully(c, name, build.WithBuildContext(c,
build.WithFile("Dockerfile", `
FROM busybox
RUN sh -c "mkdir /foo"
RUN `+makeLink+`
ADD foo /bar/
RUN sh -c "[ -f /bar/foo ]"
RUN sh -c "[ -f /foo/foo ]"`),
build.WithFile("foo", "hello"),
))
}
func (s *DockerSuite) TestBuildEscapeWhitespace(c *check.C) {
name := "testbuildescapewhitespace"
buildImageSuccessfully(c, name, build.WithDockerfile(`
# ESCAPE=\
FROM busybox
MAINTAINER "Docker \
IO <io@\
docker.com>"
`))
res := inspectField(c, name, "Author")
if res != "\"Docker IO <[email protected]>\"" {
c.Fatalf("Parsed string did not match the escaped string. Got: %q", res)
}
}
func (s *DockerSuite) TestBuildVerifyIntString(c *check.C) {
// Verify that strings that look like ints are still passed as strings
name := "testbuildstringing"
buildImageSuccessfully(c, name, build.WithDockerfile(`
FROM busybox
MAINTAINER 123`))
out, _ := dockerCmd(c, "inspect", name)
if !strings.Contains(out, "\"123\"") {
c.Fatalf("Output does not contain the int as a string:\n%s", out)
}
}
func (s *DockerSuite) TestBuildDockerignore(c *check.C) {
name := "testbuilddockerignore"
buildImageSuccessfully(c, name, build.WithBuildContext(c,
build.WithFile("Dockerfile", `
FROM busybox
ADD . /bla
RUN sh -c "[[ -f /bla/src/x.go ]]"
RUN sh -c "[[ -f /bla/Makefile ]]"
RUN sh -c "[[ ! -e /bla/src/_vendor ]]"
RUN sh -c "[[ ! -e /bla/.gitignore ]]"
RUN sh -c "[[ ! -e /bla/README.md ]]"
RUN sh -c "[[ ! -e /bla/dir/foo ]]"
RUN sh -c "[[ ! -e /bla/foo ]]"
RUN sh -c "[[ ! -e /bla/.git ]]"
RUN sh -c "[[ ! -e v.cc ]]"
RUN sh -c "[[ ! -e src/v.cc ]]"
RUN sh -c "[[ ! -e src/_vendor/v.cc ]]"`),
build.WithFile("Makefile", "all:"),
build.WithFile(".git/HEAD", "ref: foo"),
build.WithFile("src/x.go", "package main"),
build.WithFile("src/_vendor/v.go", "package main"),
build.WithFile("src/_vendor/v.cc", "package main"),
build.WithFile("src/v.cc", "package main"),
build.WithFile("v.cc", "package main"),
build.WithFile("dir/foo", ""),
build.WithFile(".gitignore", ""),
build.WithFile("README.md", "readme"),
build.WithFile(".dockerignore", `
.git
pkg
.gitignore
src/_vendor
*.md
**/*.cc
dir`),
))
}
func (s *DockerSuite) TestBuildDockerignoreCleanPaths(c *check.C) {
name := "testbuilddockerignorecleanpaths"
buildImageSuccessfully(c, name, build.WithBuildContext(c,
build.WithFile("Dockerfile", `
FROM busybox
ADD . /tmp/
RUN sh -c "(! ls /tmp/foo) && (! ls /tmp/foo2) && (! ls /tmp/dir1/foo)"`),
build.WithFile("foo", "foo"),
build.WithFile("foo2", "foo2"),
build.WithFile("dir1/foo", "foo in dir1"),
build.WithFile(".dockerignore", "./foo\ndir1//foo\n./dir1/../foo2"),
))
}
func (s *DockerSuite) TestBuildDockerignoreExceptions(c *check.C) {
name := "testbuilddockerignoreexceptions"
buildImageSuccessfully(c, name, build.WithBuildContext(c,
build.WithFile("Dockerfile", `
FROM busybox
ADD . /bla
RUN sh -c "[[ -f /bla/src/x.go ]]"
RUN sh -c "[[ -f /bla/Makefile ]]"
RUN sh -c "[[ ! -e /bla/src/_vendor ]]"
RUN sh -c "[[ ! -e /bla/.gitignore ]]"
RUN sh -c "[[ ! -e /bla/README.md ]]"
RUN sh -c "[[ -e /bla/dir/dir/foo ]]"
RUN sh -c "[[ ! -e /bla/dir/foo1 ]]"
RUN sh -c "[[ -f /bla/dir/e ]]"
RUN sh -c "[[ -f /bla/dir/e-dir/foo ]]"
RUN sh -c "[[ ! -e /bla/foo ]]"
RUN sh -c "[[ ! -e /bla/.git ]]"
RUN sh -c "[[ -e /bla/dir/a.cc ]]"`),
build.WithFile("Makefile", "all:"),
build.WithFile(".git/HEAD", "ref: foo"),
build.WithFile("src/x.go", "package main"),
build.WithFile("src/_vendor/v.go", "package main"),
build.WithFile("dir/foo", ""),
build.WithFile("dir/foo1", ""),
build.WithFile("dir/dir/f1", ""),
build.WithFile("dir/dir/foo", ""),
build.WithFile("dir/e", ""),
build.WithFile("dir/e-dir/foo", ""),
build.WithFile(".gitignore", ""),
build.WithFile("README.md", "readme"),
build.WithFile("dir/a.cc", "hello"),
build.WithFile(".dockerignore", `
.git
pkg
.gitignore
src/_vendor
*.md
dir
!dir/e*
!dir/dir/foo
**/*.cc
!**/*.cc`),
))
}
func (s *DockerSuite) TestBuildDockerignoringDockerfile(c *check.C) {
name := "testbuilddockerignoredockerfile"
dockerfile := `
FROM busybox
ADD . /tmp/
RUN sh -c "! ls /tmp/Dockerfile"
RUN ls /tmp/.dockerignore`
buildImageSuccessfully(c, name, build.WithBuildContext(c,
build.WithFile("Dockerfile", dockerfile),
build.WithFile(".dockerignore", "Dockerfile\n"),
))
buildImageSuccessfully(c, name, build.WithBuildContext(c,
build.WithFile("Dockerfile", dockerfile),
build.WithFile(".dockerignore", "./Dockerfile\n"),
))
}
func (s *DockerSuite) TestBuildDockerignoringRenamedDockerfile(c *check.C) {
name := "testbuilddockerignoredockerfile"
dockerfile := `
FROM busybox
ADD . /tmp/
RUN ls /tmp/Dockerfile
RUN sh -c "! ls /tmp/MyDockerfile"
RUN ls /tmp/.dockerignore`
buildImageSuccessfully(c, name, cli.WithFlags("-f", "MyDockerfile"), build.WithBuildContext(c,
build.WithFile("Dockerfile", "Should not use me"),
build.WithFile("MyDockerfile", dockerfile),
build.WithFile(".dockerignore", "MyDockerfile\n"),
))
buildImageSuccessfully(c, name, cli.WithFlags("-f", "MyDockerfile"), build.WithBuildContext(c,
build.WithFile("Dockerfile", "Should not use me"),
build.WithFile("MyDockerfile", dockerfile),
build.WithFile(".dockerignore", "./MyDockerfile\n"),
))
}
func (s *DockerSuite) TestBuildDockerignoringDockerignore(c *check.C) {
name := "testbuilddockerignoredockerignore"
dockerfile := `
FROM busybox
ADD . /tmp/
RUN sh -c "! ls /tmp/.dockerignore"
RUN ls /tmp/Dockerfile`
buildImageSuccessfully(c, name, build.WithBuildContext(c,
build.WithFile("Dockerfile", dockerfile),
build.WithFile(".dockerignore", ".dockerignore\n"),
))
}
func (s *DockerSuite) TestBuildDockerignoreTouchDockerfile(c *check.C) {
name := "testbuilddockerignoretouchdockerfile"
dockerfile := `
FROM busybox
ADD . /tmp/`
ctx := fakecontext.New(c, "",
fakecontext.WithDockerfile(dockerfile),
fakecontext.WithFiles(map[string]string{
".dockerignore": "Dockerfile\n",
}))
defer ctx.Close()
cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx))
id1 := getIDByName(c, name)
cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx))
id2 := getIDByName(c, name)
if id1 != id2 {
c.Fatalf("Didn't use the cache - 1")
}
// Now make sure touching Dockerfile doesn't invalidate the cache
if err := ctx.Add("Dockerfile", dockerfile+"\n# hi"); err != nil {
c.Fatalf("Didn't add Dockerfile: %s", err)
}
cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx))
id2 = getIDByName(c, name)
if id1 != id2 {
c.Fatalf("Didn't use the cache - 2")
}
// One more time but just 'touch' it instead of changing the content
if err := ctx.Add("Dockerfile", dockerfile+"\n# hi"); err != nil {
c.Fatalf("Didn't add Dockerfile: %s", err)
}
cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx))
id2 = getIDByName(c, name)
if id1 != id2 {
c.Fatalf("Didn't use the cache - 3")
}
}
func (s *DockerSuite) TestBuildDockerignoringWholeDir(c *check.C) {
name := "testbuilddockerignorewholedir"
dockerfile := `
FROM busybox
COPY . /
RUN sh -c "[[ ! -e /.gitignore ]]"
RUN sh -c "[[ ! -e /Makefile ]]"`
buildImageSuccessfully(c, name, build.WithBuildContext(c,
build.WithFile("Dockerfile", dockerfile),
build.WithFile(".dockerignore", "*\n"),
build.WithFile("Makefile", "all:"),
build.WithFile(".gitignore", ""),
))
}
func (s *DockerSuite) TestBuildDockerignoringOnlyDotfiles(c *check.C) {
name := "testbuilddockerignorewholedir"
dockerfile := `
FROM busybox
COPY . /
RUN sh -c "[[ ! -e /.gitignore ]]"
RUN sh -c "[[ -f /Makefile ]]"`
buildImageSuccessfully(c, name, build.WithBuildContext(c,
build.WithFile("Dockerfile", dockerfile),
build.WithFile(".dockerignore", ".*"),
build.WithFile("Makefile", "all:"),
build.WithFile(".gitignore", ""),
))
}
func (s *DockerSuite) TestBuildDockerignoringBadExclusion(c *check.C) {
name := "testbuilddockerignorebadexclusion"
buildImage(name, build.WithBuildContext(c,
build.WithFile("Dockerfile", `
FROM busybox
COPY . /
RUN sh -c "[[ ! -e /.gitignore ]]"
RUN sh -c "[[ -f /Makefile ]]"`),
build.WithFile("Makefile", "all:"),
build.WithFile(".gitignore", ""),
build.WithFile(".dockerignore", "!\n"),
)).Assert(c, icmd.Expected{
ExitCode: 1,
Err: "error checking context: 'illegal exclusion pattern: \"!\"",
})
}
func (s *DockerSuite) TestBuildDockerignoringWildTopDir(c *check.C) {
dockerfile := `
FROM busybox
COPY . /
RUN sh -c "[[ ! -e /.dockerignore ]]"
RUN sh -c "[[ ! -e /Dockerfile ]]"
RUN sh -c "[[ ! -e /file1 ]]"
RUN sh -c "[[ ! -e /dir ]]"`
// All of these should result in ignoring all files
for _, variant := range []string{"**", "**/", "**/**", "*"} {
buildImageSuccessfully(c, "noname", build.WithBuildContext(c,
build.WithFile("Dockerfile", dockerfile),
build.WithFile("file1", ""),
build.WithFile("dir/file1", ""),
build.WithFile(".dockerignore", variant),
))
dockerCmd(c, "rmi", "noname")
}
}
func (s *DockerSuite) TestBuildDockerignoringWildDirs(c *check.C) {
dockerfile := `
FROM busybox
COPY . /
#RUN sh -c "[[ -e /.dockerignore ]]"
RUN sh -c "[[ -e /Dockerfile ]] && \
[[ ! -e /file0 ]] && \
[[ ! -e /dir1/file0 ]] && \
[[ ! -e /dir2/file0 ]] && \
[[ ! -e /file1 ]] && \
[[ ! -e /dir1/file1 ]] && \
[[ ! -e /dir1/dir2/file1 ]] && \
[[ ! -e /dir1/file2 ]] && \
[[ -e /dir1/dir2/file2 ]] && \
[[ ! -e /dir1/dir2/file4 ]] && \
[[ ! -e /dir1/dir2/file5 ]] && \
[[ ! -e /dir1/dir2/file6 ]] && \
[[ ! -e /dir1/dir3/file7 ]] && \
[[ ! -e /dir1/dir3/file8 ]] && \
[[ -e /dir1/dir3 ]] && \
[[ -e /dir1/dir4 ]] && \
[[ ! -e 'dir1/dir5/fileAA' ]] && \
[[ -e 'dir1/dir5/fileAB' ]] && \
[[ -e 'dir1/dir5/fileB' ]]" # "." in pattern means nothing
RUN echo all done!`
dockerignore := `
**/file0
**/*file1
**/dir1/file2
dir1/**/file4
**/dir2/file5
**/dir1/dir2/file6
dir1/dir3/**
**/dir4/**
**/file?A
**/file\?B
**/dir5/file.
`
buildImageSuccessfully(c, "noname", build.WithBuildContext(c,
build.WithFile("Dockerfile", dockerfile),
build.WithFile(".dockerignore", dockerignore),
build.WithFile("dir1/file0", ""),
build.WithFile("dir1/dir2/file0", ""),
build.WithFile("file1", ""),
build.WithFile("dir1/file1", ""),
build.WithFile("dir1/dir2/file1", ""),
build.WithFile("dir1/file2", ""),
build.WithFile("dir1/dir2/file2", ""), // remains
build.WithFile("dir1/dir2/file4", ""),
build.WithFile("dir1/dir2/file5", ""),
build.WithFile("dir1/dir2/file6", ""),
build.WithFile("dir1/dir3/file7", ""),
build.WithFile("dir1/dir3/file8", ""),
build.WithFile("dir1/dir4/file9", ""),
build.WithFile("dir1/dir5/fileAA", ""),
build.WithFile("dir1/dir5/fileAB", ""),
build.WithFile("dir1/dir5/fileB", ""),
))
}
func (s *DockerSuite) TestBuildLineBreak(c *check.C) {
testRequires(c, DaemonIsLinux)
name := "testbuildlinebreak"
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox
RUN sh -c 'echo root:testpass \
> /tmp/passwd'
RUN mkdir -p /var/run/sshd
RUN sh -c "[ "$(cat /tmp/passwd)" = "root:testpass" ]"
RUN sh -c "[ "$(ls -d /var/run/sshd)" = "/var/run/sshd" ]"`))
}
func (s *DockerSuite) TestBuildEOLInLine(c *check.C) {
testRequires(c, DaemonIsLinux)
name := "testbuildeolinline"
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox
RUN sh -c 'echo root:testpass > /tmp/passwd'
RUN echo "foo \n bar"; echo "baz"
RUN mkdir -p /var/run/sshd
RUN sh -c "[ "$(cat /tmp/passwd)" = "root:testpass" ]"
RUN sh -c "[ "$(ls -d /var/run/sshd)" = "/var/run/sshd" ]"`))
}
func (s *DockerSuite) TestBuildCommentsShebangs(c *check.C) {
testRequires(c, DaemonIsLinux)
name := "testbuildcomments"
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox
# This is an ordinary comment.
RUN { echo '#!/bin/sh'; echo 'echo hello world'; } > /hello.sh
RUN [ ! -x /hello.sh ]
# comment with line break \
RUN chmod +x /hello.sh
RUN [ -x /hello.sh ]
RUN [ "$(cat /hello.sh)" = $'#!/bin/sh\necho hello world' ]
RUN [ "$(/hello.sh)" = "hello world" ]`))
}
func (s *DockerSuite) TestBuildUsersAndGroups(c *check.C) {
testRequires(c, DaemonIsLinux)
name := "testbuildusers"
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox
# Make sure our defaults work
RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)" = '0:0/root:root' ]
# TODO decide if "args.user = strconv.Itoa(syscall.Getuid())" is acceptable behavior for changeUser in sysvinit instead of "return nil" when "USER" isn't specified (so that we get the proper group list even if that is the empty list, even in the default case of not supplying an explicit USER to run as, which implies USER 0)
USER root
RUN [ "$(id -G):$(id -Gn)" = '0 10:root wheel' ]
# Setup dockerio user and group
RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd && \
echo 'dockerio:x:1001:' >> /etc/group
# Make sure we can switch to our user and all the information is exactly as we expect it to be
USER dockerio
RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001:dockerio' ]
# Switch back to root and double check that worked exactly as we might expect it to
USER root
RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '0:0/root:root/0 10:root wheel' ] && \
# Add a "supplementary" group for our dockerio user
echo 'supplementary:x:1002:dockerio' >> /etc/group
# ... and then go verify that we get it like we expect
USER dockerio
RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001 1002:dockerio supplementary' ]
USER 1001
RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001 1002:dockerio supplementary' ]
# super test the new "user:group" syntax
USER dockerio:dockerio
RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001:dockerio' ]
USER 1001:dockerio
RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001:dockerio' ]
USER dockerio:1001
RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001:dockerio' ]
USER 1001:1001
RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001:dockerio' ]
USER dockerio:supplementary
RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1002/dockerio:supplementary/1002:supplementary' ]
USER dockerio:1002
RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1002/dockerio:supplementary/1002:supplementary' ]
USER 1001:supplementary
RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1002/dockerio:supplementary/1002:supplementary' ]
USER 1001:1002
RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1002/dockerio:supplementary/1002:supplementary' ]
# make sure unknown uid/gid still works properly
USER 1042:1043
RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1042:1043/1042:1043/1043:1043' ]`))
}
// FIXME(vdemeester) rename this test (and probably "merge" it with the one below TestBuildEnvUsage2)
func (s *DockerSuite) TestBuildEnvUsage(c *check.C) {
// /docker/world/hello is not owned by the correct user
testRequires(c, NotUserNamespace)
testRequires(c, DaemonIsLinux)
name := "testbuildenvusage"
dockerfile := `FROM busybox
ENV HOME /root
ENV PATH $HOME/bin:$PATH
ENV PATH /tmp:$PATH
RUN [ "$PATH" = "/tmp:$HOME/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ]
ENV FOO /foo/baz
ENV BAR /bar
ENV BAZ $BAR
ENV FOOPATH $PATH:$FOO
RUN [ "$BAR" = "$BAZ" ]
RUN [ "$FOOPATH" = "$PATH:/foo/baz" ]
ENV FROM hello/docker/world
ENV TO /docker/world/hello
ADD $FROM $TO
RUN [ "$(cat $TO)" = "hello" ]
ENV abc=def
ENV ghi=$abc
RUN [ "$ghi" = "def" ]
`
buildImageSuccessfully(c, name, build.WithBuildContext(c,
build.WithFile("Dockerfile", dockerfile),
build.WithFile("hello/docker/world", "hello"),
))
}
// FIXME(vdemeester) rename this test (and probably "merge" it with the one above TestBuildEnvUsage)
func (s *DockerSuite) TestBuildEnvUsage2(c *check.C) {
// /docker/world/hello is not owned by the correct user
testRequires(c, NotUserNamespace)
testRequires(c, DaemonIsLinux)
name := "testbuildenvusage2"
dockerfile := `FROM busybox
ENV abc=def def="hello world"
RUN [ "$abc,$def" = "def,hello world" ]
ENV def=hello\ world v1=abc v2="hi there" v3='boogie nights' v4="with'quotes too"
RUN [ "$def,$v1,$v2,$v3,$v4" = "hello world,abc,hi there,boogie nights,with'quotes too" ]
ENV abc=zzz FROM=hello/docker/world
ENV abc=zzz TO=/docker/world/hello
ADD $FROM $TO
RUN [ "$abc,$(cat $TO)" = "zzz,hello" ]
ENV abc 'yyy'
RUN [ $abc = 'yyy' ]
ENV abc=
RUN [ "$abc" = "" ]
# use grep to make sure if the builder substitutes \$foo by mistake
# we don't get a false positive
ENV abc=\$foo
RUN [ "$abc" = "\$foo" ] && (echo "$abc" | grep foo)
ENV abc \$foo
RUN [ "$abc" = "\$foo" ] && (echo "$abc" | grep foo)
ENV abc=\'foo\' abc2=\"foo\"
RUN [ "$abc,$abc2" = "'foo',\"foo\"" ]
ENV abc "foo"
RUN [ "$abc" = "foo" ]
ENV abc 'foo'
RUN [ "$abc" = 'foo' ]
ENV abc \'foo\'
RUN [ "$abc" = "'foo'" ]
ENV abc \"foo\"
RUN [ "$abc" = '"foo"' ]
ENV abc=ABC
RUN [ "$abc" = "ABC" ]
ENV def1=${abc:-DEF} def2=${ccc:-DEF}
ENV def3=${ccc:-${def2}xx} def4=${abc:+ALT} def5=${def2:+${abc}:} def6=${ccc:-\$abc:} def7=${ccc:-\${abc}:}
RUN [ "$def1,$def2,$def3,$def4,$def5,$def6,$def7" = 'ABC,DEF,DEFxx,ALT,ABC:,$abc:,${abc:}' ]
ENV mypath=${mypath:+$mypath:}/home
ENV mypath=${mypath:+$mypath:}/away
RUN [ "$mypath" = '/home:/away' ]
ENV e1=bar
ENV e2=$e1 e3=$e11 e4=\$e1 e5=\$e11
RUN [ "$e0,$e1,$e2,$e3,$e4,$e5" = ',bar,bar,,$e1,$e11' ]
ENV ee1 bar
ENV ee2 $ee1
ENV ee3 $ee11
ENV ee4 \$ee1
ENV ee5 \$ee11
RUN [ "$ee1,$ee2,$ee3,$ee4,$ee5" = 'bar,bar,,$ee1,$ee11' ]
ENV eee1="foo" eee2='foo'
ENV eee3 "foo"
ENV eee4 'foo'
RUN [ "$eee1,$eee2,$eee3,$eee4" = 'foo,foo,foo,foo' ]
`
buildImageSuccessfully(c, name, build.WithBuildContext(c,
build.WithFile("Dockerfile", dockerfile),
build.WithFile("hello/docker/world", "hello"),
))
}
func (s *DockerSuite) TestBuildAddScript(c *check.C) {
testRequires(c, DaemonIsLinux)
name := "testbuildaddscript"
dockerfile := `
FROM busybox
ADD test /test
RUN ["chmod","+x","/test"]
RUN ["/test"]
RUN [ "$(cat /testfile)" = 'test!' ]`
buildImageSuccessfully(c, name, build.WithBuildContext(c,
build.WithFile("Dockerfile", dockerfile),
build.WithFile("test", "#!/bin/sh\necho 'test!' > /testfile"),
))
}
func (s *DockerSuite) TestBuildAddTar(c *check.C) {
// /test/foo is not owned by the correct user
testRequires(c, NotUserNamespace)
name := "testbuildaddtar"
ctx := func() *fakecontext.Fake {
dockerfile := `
FROM busybox
ADD test.tar /
RUN cat /test/foo | grep Hi
ADD test.tar /test.tar
RUN cat /test.tar/test/foo | grep Hi
ADD test.tar /unlikely-to-exist
RUN cat /unlikely-to-exist/test/foo | grep Hi
ADD test.tar /unlikely-to-exist-trailing-slash/
RUN cat /unlikely-to-exist-trailing-slash/test/foo | grep Hi
RUN sh -c "mkdir /existing-directory" #sh -c is needed on Windows to use the correct mkdir
ADD test.tar /existing-directory
RUN cat /existing-directory/test/foo | grep Hi
ADD test.tar /existing-directory-trailing-slash/
RUN cat /existing-directory-trailing-slash/test/foo | grep Hi`
tmpDir, err := ioutil.TempDir("", "fake-context")
c.Assert(err, check.IsNil)
testTar, err := os.Create(filepath.Join(tmpDir, "test.tar"))
if err != nil {
c.Fatalf("failed to create test.tar archive: %v", err)
}
defer testTar.Close()
tw := tar.NewWriter(testTar)
if err := tw.WriteHeader(&tar.Header{
Name: "test/foo",
Size: 2,
}); err != nil {
c.Fatalf("failed to write tar file header: %v", err)
}
if _, err := tw.Write([]byte("Hi")); err != nil {
c.Fatalf("failed to write tar file content: %v", err)
}
if err := tw.Close(); err != nil {
c.Fatalf("failed to close tar archive: %v", err)
}
if err := ioutil.WriteFile(filepath.Join(tmpDir, "Dockerfile"), []byte(dockerfile), 0644); err != nil {
c.Fatalf("failed to open destination dockerfile: %v", err)
}
return fakecontext.New(c, tmpDir)
}()
defer ctx.Close()
buildImageSuccessfully(c, name, build.WithExternalBuildContext(ctx))
}
func (s *DockerSuite) TestBuildAddBrokenTar(c *check.C) {
name := "testbuildaddbrokentar"
ctx := func() *fakecontext.Fake {
dockerfile := `
FROM busybox
ADD test.tar /`
tmpDir, err := ioutil.TempDir("", "fake-context")
c.Assert(err, check.IsNil)
testTar, err := os.Create(filepath.Join(tmpDir, "test.tar"))
if err != nil {
c.Fatalf("failed to create test.tar archive: %v", err)
}
defer testTar.Close()
tw := tar.NewWriter(testTar)
if err := tw.WriteHeader(&tar.Header{
Name: "test/foo",
Size: 2,
}); err != nil {
c.Fatalf("failed to write tar file header: %v", err)
}
if _, err := tw.Write([]byte("Hi")); err != nil {
c.Fatalf("failed to write tar file content: %v", err)
}
if err := tw.Close(); err != nil {
c.Fatalf("failed to close tar archive: %v", err)
}
// Corrupt the tar by removing one byte off the end
stat, err := testTar.Stat()
if err != nil {
c.Fatalf("failed to stat tar archive: %v", err)
}
if err := testTar.Truncate(stat.Size() - 1); err != nil {
c.Fatalf("failed to truncate tar archive: %v", err)
}
if err := ioutil.WriteFile(filepath.Join(tmpDir, "Dockerfile"), []byte(dockerfile), 0644); err != nil {
c.Fatalf("failed to open destination dockerfile: %v", err)
}
return fakecontext.New(c, tmpDir)
}()
defer ctx.Close()
buildImage(name, build.WithExternalBuildContext(ctx)).Assert(c, icmd.Expected{
ExitCode: 1,
})
}
func (s *DockerSuite) TestBuildAddNonTar(c *check.C) {
name := "testbuildaddnontar"
// Should not try to extract test.tar
buildImageSuccessfully(c, name, build.WithBuildContext(c,
build.WithFile("Dockerfile", `
FROM busybox
ADD test.tar /
RUN test -f /test.tar`),
build.WithFile("test.tar", "not_a_tar_file"),
))
}
func (s *DockerSuite) TestBuildAddTarXz(c *check.C) {
// /test/foo is not owned by the correct user
testRequires(c, NotUserNamespace)
testRequires(c, DaemonIsLinux)
name := "testbuildaddtarxz"
ctx := func() *fakecontext.Fake {
dockerfile := `
FROM busybox
ADD test.tar.xz /
RUN cat /test/foo | grep Hi`
tmpDir, err := ioutil.TempDir("", "fake-context")
c.Assert(err, check.IsNil)
testTar, err := os.Create(filepath.Join(tmpDir, "test.tar"))
if err != nil {
c.Fatalf("failed to create test.tar archive: %v", err)
}
defer testTar.Close()
tw := tar.NewWriter(testTar)
if err := tw.WriteHeader(&tar.Header{
Name: "test/foo",
Size: 2,
}); err != nil {
c.Fatalf("failed to write tar file header: %v", err)
}
if _, err := tw.Write([]byte("Hi")); err != nil {
c.Fatalf("failed to write tar file content: %v", err)
}
if err := tw.Close(); err != nil {
c.Fatalf("failed to close tar archive: %v", err)
}
icmd.RunCmd(icmd.Cmd{
Command: []string{"xz", "-k", "test.tar"},
Dir: tmpDir,
}).Assert(c, icmd.Success)
if err := ioutil.WriteFile(filepath.Join(tmpDir, "Dockerfile"), []byte(dockerfile), 0644); err != nil {
c.Fatalf("failed to open destination dockerfile: %v", err)
}
return fakecontext.New(c, tmpDir)
}()
defer ctx.Close()
buildImageSuccessfully(c, name, build.WithExternalBuildContext(ctx))
}
func (s *DockerSuite) TestBuildAddTarXzGz(c *check.C) {
testRequires(c, DaemonIsLinux)
name := "testbuildaddtarxzgz"
ctx := func() *fakecontext.Fake {
dockerfile := `
FROM busybox
ADD test.tar.xz.gz /
RUN ls /test.tar.xz.gz`
tmpDir, err := ioutil.TempDir("", "fake-context")
c.Assert(err, check.IsNil)
testTar, err := os.Create(filepath.Join(tmpDir, "test.tar"))
if err != nil {
c.Fatalf("failed to create test.tar archive: %v", err)
}
defer testTar.Close()
tw := tar.NewWriter(testTar)
if err := tw.WriteHeader(&tar.Header{
Name: "test/foo",
Size: 2,
}); err != nil {
c.Fatalf("failed to write tar file header: %v", err)
}
if _, err := tw.Write([]byte("Hi")); err != nil {
c.Fatalf("failed to write tar file content: %v", err)
}
if err := tw.Close(); err != nil {
c.Fatalf("failed to close tar archive: %v", err)
}
icmd.RunCmd(icmd.Cmd{
Command: []string{"xz", "-k", "test.tar"},
Dir: tmpDir,
}).Assert(c, icmd.Success)
icmd.RunCmd(icmd.Cmd{
Command: []string{"gzip", "test.tar.xz"},
Dir: tmpDir,
})
if err := ioutil.WriteFile(filepath.Join(tmpDir, "Dockerfile"), []byte(dockerfile), 0644); err != nil {
c.Fatalf("failed to open destination dockerfile: %v", err)
}
return fakecontext.New(c, tmpDir)
}()
defer ctx.Close()
buildImageSuccessfully(c, name, build.WithExternalBuildContext(ctx))
}
func (s *DockerSuite) TestBuildFromGit(c *check.C) {
name := "testbuildfromgit"
git := fakegit.New(c, "repo", map[string]string{
"Dockerfile": `FROM busybox
ADD first /first
RUN [ -f /first ]
MAINTAINER docker`,
"first": "test git data",
}, true)
defer git.Close()
buildImageSuccessfully(c, name, build.WithContextPath(git.RepoURL))
res := inspectField(c, name, "Author")
if res != "docker" {
c.Fatalf("Maintainer should be docker, got %s", res)
}
}
func (s *DockerSuite) TestBuildFromGitWithContext(c *check.C) {
name := "testbuildfromgit"
git := fakegit.New(c, "repo", map[string]string{
"docker/Dockerfile": `FROM busybox
ADD first /first
RUN [ -f /first ]
MAINTAINER docker`,
"docker/first": "test git data",
}, true)
defer git.Close()
buildImageSuccessfully(c, name, build.WithContextPath(fmt.Sprintf("%s#master:docker", git.RepoURL)))
res := inspectField(c, name, "Author")
if res != "docker" {
c.Fatalf("Maintainer should be docker, got %s", res)
}
}
func (s *DockerSuite) TestBuildFromGitwithF(c *check.C) {
name := "testbuildfromgitwithf"
git := fakegit.New(c, "repo", map[string]string{
"myApp/myDockerfile": `FROM busybox
RUN echo hi from Dockerfile`,
}, true)
defer git.Close()
buildImage(name, cli.WithFlags("-f", "myApp/myDockerfile"), build.WithContextPath(git.RepoURL)).Assert(c, icmd.Expected{
Out: "hi from Dockerfile",
})
}
func (s *DockerSuite) TestBuildFromRemoteTarball(c *check.C) {
name := "testbuildfromremotetarball"
buffer := new(bytes.Buffer)
tw := tar.NewWriter(buffer)
defer tw.Close()
dockerfile := []byte(`FROM busybox
MAINTAINER docker`)
if err := tw.WriteHeader(&tar.Header{
Name: "Dockerfile",
Size: int64(len(dockerfile)),
}); err != nil {
c.Fatalf("failed to write tar file header: %v", err)
}
if _, err := tw.Write(dockerfile); err != nil {
c.Fatalf("failed to write tar file content: %v", err)
}
if err := tw.Close(); err != nil {
c.Fatalf("failed to close tar archive: %v", err)
}
server := fakestorage.New(c, "", fakecontext.WithBinaryFiles(map[string]*bytes.Buffer{
"testT.tar": buffer,
}))
defer server.Close()
cli.BuildCmd(c, name, build.WithContextPath(server.URL()+"/testT.tar"))
res := inspectField(c, name, "Author")
if res != "docker" {
c.Fatalf("Maintainer should be docker, got %s", res)
}
}
func (s *DockerSuite) TestBuildCleanupCmdOnEntrypoint(c *check.C) {
name := "testbuildcmdcleanuponentrypoint"
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM `+minimalBaseImage()+`
CMD ["test"]
ENTRYPOINT ["echo"]`))
buildImageSuccessfully(c, name, build.WithDockerfile(fmt.Sprintf(`FROM %s
ENTRYPOINT ["cat"]`, name)))
res := inspectField(c, name, "Config.Cmd")
if res != "[]" {
c.Fatalf("Cmd %s, expected nil", res)
}
res = inspectField(c, name, "Config.Entrypoint")
if expected := "[cat]"; res != expected {
c.Fatalf("Entrypoint %s, expected %s", res, expected)
}
}
func (s *DockerSuite) TestBuildClearCmd(c *check.C) {
name := "testbuildclearcmd"
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM `+minimalBaseImage()+`
ENTRYPOINT ["/bin/bash"]
CMD []`))
res := inspectFieldJSON(c, name, "Config.Cmd")
if res != "[]" {
c.Fatalf("Cmd %s, expected %s", res, "[]")
}
}
func (s *DockerSuite) TestBuildEmptyCmd(c *check.C) {
// Skip on Windows. Base image on Windows has a CMD set in the image.
testRequires(c, DaemonIsLinux)
name := "testbuildemptycmd"
buildImageSuccessfully(c, name, build.WithDockerfile("FROM "+minimalBaseImage()+"\nMAINTAINER quux\n"))
res := inspectFieldJSON(c, name, "Config.Cmd")
if res != "null" {
c.Fatalf("Cmd %s, expected %s", res, "null")
}
}
func (s *DockerSuite) TestBuildOnBuildOutput(c *check.C) {
name := "testbuildonbuildparent"
buildImageSuccessfully(c, name, build.WithDockerfile("FROM busybox\nONBUILD RUN echo foo\n"))
buildImage(name, build.WithDockerfile("FROM "+name+"\nMAINTAINER quux\n")).Assert(c, icmd.Expected{
Out: "# Executing 1 build trigger",
})
}
// FIXME(vdemeester) should be a unit test
func (s *DockerSuite) TestBuildInvalidTag(c *check.C) {
name := "abcd:" + stringutils.GenerateRandomAlphaOnlyString(200)
buildImage(name, build.WithDockerfile("FROM "+minimalBaseImage()+"\nMAINTAINER quux\n")).Assert(c, icmd.Expected{
ExitCode: 125,
Err: "invalid reference format",
})
}
func (s *DockerSuite) TestBuildCmdShDashC(c *check.C) {
name := "testbuildcmdshc"
buildImageSuccessfully(c, name, build.WithDockerfile("FROM busybox\nCMD echo cmd\n"))
res := inspectFieldJSON(c, name, "Config.Cmd")
expected := `["/bin/sh","-c","echo cmd"]`
if testEnv.DaemonPlatform() == "windows" {
expected = `["cmd","/S","/C","echo cmd"]`
}
if res != expected {
c.Fatalf("Expected value %s not in Config.Cmd: %s", expected, res)
}
}
func (s *DockerSuite) TestBuildCmdSpaces(c *check.C) {
// Test to make sure that when we strcat arrays we take into account
// the arg separator to make sure ["echo","hi"] and ["echo hi"] don't
// look the same
name := "testbuildcmdspaces"
buildImageSuccessfully(c, name, build.WithDockerfile("FROM busybox\nCMD [\"echo hi\"]\n"))
id1 := getIDByName(c, name)
buildImageSuccessfully(c, name, build.WithDockerfile("FROM busybox\nCMD [\"echo\", \"hi\"]\n"))
id2 := getIDByName(c, name)
if id1 == id2 {
c.Fatal("Should not have resulted in the same CMD")
}
// Now do the same with ENTRYPOINT
buildImageSuccessfully(c, name, build.WithDockerfile("FROM busybox\nENTRYPOINT [\"echo hi\"]\n"))
id1 = getIDByName(c, name)
buildImageSuccessfully(c, name, build.WithDockerfile("FROM busybox\nENTRYPOINT [\"echo\", \"hi\"]\n"))
id2 = getIDByName(c, name)
if id1 == id2 {
c.Fatal("Should not have resulted in the same ENTRYPOINT")
}
}
func (s *DockerSuite) TestBuildCmdJSONNoShDashC(c *check.C) {
name := "testbuildcmdjson"
buildImageSuccessfully(c, name, build.WithDockerfile("FROM busybox\nCMD [\"echo\", \"cmd\"]"))
res := inspectFieldJSON(c, name, "Config.Cmd")
expected := `["echo","cmd"]`
if res != expected {
c.Fatalf("Expected value %s not in Config.Cmd: %s", expected, res)
}
}
func (s *DockerSuite) TestBuildEntrypointCanBeOverridenByChild(c *check.C) {
buildImageSuccessfully(c, "parent", build.WithDockerfile(`
FROM busybox
ENTRYPOINT exit 130
`))
icmd.RunCommand(dockerBinary, "run", "parent").Assert(c, icmd.Expected{
ExitCode: 130,
})
buildImageSuccessfully(c, "child", build.WithDockerfile(`
FROM parent
ENTRYPOINT exit 5
`))
icmd.RunCommand(dockerBinary, "run", "child").Assert(c, icmd.Expected{
ExitCode: 5,
})
}
func (s *DockerSuite) TestBuildEntrypointCanBeOverridenByChildInspect(c *check.C) {
var (
name = "testbuildepinherit"
name2 = "testbuildepinherit2"
expected = `["/bin/sh","-c","echo quux"]`
)
if testEnv.DaemonPlatform() == "windows" {
expected = `["cmd","/S","/C","echo quux"]`
}
buildImageSuccessfully(c, name, build.WithDockerfile("FROM busybox\nENTRYPOINT /foo/bar"))
buildImageSuccessfully(c, name2, build.WithDockerfile(fmt.Sprintf("FROM %s\nENTRYPOINT echo quux", name)))
res := inspectFieldJSON(c, name2, "Config.Entrypoint")
if res != expected {
c.Fatalf("Expected value %s not in Config.Entrypoint: %s", expected, res)
}
icmd.RunCommand(dockerBinary, "run", name2).Assert(c, icmd.Expected{
Out: "quux",
})
}
func (s *DockerSuite) TestBuildRunShEntrypoint(c *check.C) {
name := "testbuildentrypoint"
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox
ENTRYPOINT echo`))
dockerCmd(c, "run", "--rm", name)
}
func (s *DockerSuite) TestBuildExoticShellInterpolation(c *check.C) {
testRequires(c, DaemonIsLinux)
name := "testbuildexoticshellinterpolation"
buildImageSuccessfully(c, name, build.WithDockerfile(`
FROM busybox
ENV SOME_VAR a.b.c
RUN [ "$SOME_VAR" = 'a.b.c' ]
RUN [ "${SOME_VAR}" = 'a.b.c' ]
RUN [ "${SOME_VAR%.*}" = 'a.b' ]
RUN [ "${SOME_VAR%%.*}" = 'a' ]
RUN [ "${SOME_VAR#*.}" = 'b.c' ]
RUN [ "${SOME_VAR##*.}" = 'c' ]
RUN [ "${SOME_VAR/c/d}" = 'a.b.d' ]
RUN [ "${#SOME_VAR}" = '5' ]
RUN [ "${SOME_UNSET_VAR:-$SOME_VAR}" = 'a.b.c' ]
RUN [ "${SOME_VAR:+Version: ${SOME_VAR}}" = 'Version: a.b.c' ]
RUN [ "${SOME_UNSET_VAR:+${SOME_VAR}}" = '' ]
RUN [ "${SOME_UNSET_VAR:-${SOME_VAR:-d.e.f}}" = 'a.b.c' ]
`))
}
func (s *DockerSuite) TestBuildVerifySingleQuoteFails(c *check.C) {
// This testcase is supposed to generate an error because the
// JSON array we're passing in on the CMD uses single quotes instead
// of double quotes (per the JSON spec). This means we interpret it
// as a "string" instead of "JSON array" and pass it on to "sh -c" and
// it should barf on it.
name := "testbuildsinglequotefails"
expectedExitCode := 2
if testEnv.DaemonPlatform() == "windows" {
expectedExitCode = 127
}
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox
CMD [ '/bin/sh', '-c', 'echo hi' ]`))
icmd.RunCommand(dockerBinary, "run", "--rm", name).Assert(c, icmd.Expected{
ExitCode: expectedExitCode,
})
}
func (s *DockerSuite) TestBuildVerboseOut(c *check.C) {
name := "testbuildverboseout"
expected := "\n123\n"
if testEnv.DaemonPlatform() == "windows" {
expected = "\n123\r\n"
}
buildImage(name, build.WithDockerfile(`FROM busybox
RUN echo 123`)).Assert(c, icmd.Expected{
Out: expected,
})
}
func (s *DockerSuite) TestBuildWithTabs(c *check.C) {
name := "testbuildwithtabs"
buildImageSuccessfully(c, name, build.WithDockerfile("FROM busybox\nRUN echo\tone\t\ttwo"))
res := inspectFieldJSON(c, name, "ContainerConfig.Cmd")
expected1 := `["/bin/sh","-c","echo\tone\t\ttwo"]`
expected2 := `["/bin/sh","-c","echo\u0009one\u0009\u0009two"]` // syntactically equivalent, and what Go 1.3 generates
if testEnv.DaemonPlatform() == "windows" {
expected1 = `["cmd","/S","/C","echo\tone\t\ttwo"]`
expected2 = `["cmd","/S","/C","echo\u0009one\u0009\u0009two"]` // syntactically equivalent, and what Go 1.3 generates
}
if res != expected1 && res != expected2 {
c.Fatalf("Missing tabs.\nGot: %s\nExp: %s or %s", res, expected1, expected2)
}
}
func (s *DockerSuite) TestBuildLabels(c *check.C) {
name := "testbuildlabel"
expected := `{"License":"GPL","Vendor":"Acme"}`
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox
LABEL Vendor=Acme
LABEL License GPL`))
res := inspectFieldJSON(c, name, "Config.Labels")
if res != expected {
c.Fatalf("Labels %s, expected %s", res, expected)
}
}
func (s *DockerSuite) TestBuildLabelsCache(c *check.C) {
name := "testbuildlabelcache"
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox
LABEL Vendor=Acme`))
id1 := getIDByName(c, name)
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox
LABEL Vendor=Acme`))
id2 := getIDByName(c, name)
if id1 != id2 {
c.Fatalf("Build 2 should have worked & used cache(%s,%s)", id1, id2)
}
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox
LABEL Vendor=Acme1`))
id2 = getIDByName(c, name)
if id1 == id2 {
c.Fatalf("Build 3 should have worked & NOT used cache(%s,%s)", id1, id2)
}
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox
LABEL Vendor Acme`))
id2 = getIDByName(c, name)
if id1 != id2 {
c.Fatalf("Build 4 should have worked & used cache(%s,%s)", id1, id2)
}
// Now make sure the cache isn't used by mistake
buildImageSuccessfully(c, name, build.WithoutCache, build.WithDockerfile(`FROM busybox
LABEL f1=b1 f2=b2`))
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox
LABEL f1=b1 f2=b2`))
id2 = getIDByName(c, name)
if id1 == id2 {
c.Fatalf("Build 6 should have worked & NOT used the cache(%s,%s)", id1, id2)
}
}
func (s *DockerSuite) TestBuildNotVerboseSuccess(c *check.C) {
// This test makes sure that -q works correctly when build is successful:
// stdout has only the image ID (long image ID) and stderr is empty.
outRegexp := regexp.MustCompile("^(sha256:|)[a-z0-9]{64}\\n$")
buildFlags := cli.WithFlags("-q")
tt := []struct {
Name string
BuildFunc func(string) *icmd.Result
}{
{
Name: "quiet_build_stdin_success",
BuildFunc: func(name string) *icmd.Result {
return buildImage(name, buildFlags, build.WithDockerfile("FROM busybox"))
},
},
{
Name: "quiet_build_ctx_success",
BuildFunc: func(name string) *icmd.Result {
return buildImage(name, buildFlags, build.WithBuildContext(c,
build.WithFile("Dockerfile", "FROM busybox"),
build.WithFile("quiet_build_success_fctx", "test"),
))
},
},
{
Name: "quiet_build_git_success",
BuildFunc: func(name string) *icmd.Result {
git := fakegit.New(c, "repo", map[string]string{
"Dockerfile": "FROM busybox",
}, true)
return buildImage(name, buildFlags, build.WithContextPath(git.RepoURL))
},
},
}
for _, te := range tt {
result := te.BuildFunc(te.Name)
result.Assert(c, icmd.Success)
if outRegexp.Find([]byte(result.Stdout())) == nil {
c.Fatalf("Test %s expected stdout to match the [%v] regexp, but it is [%v]", te.Name, outRegexp, result.Stdout())
}
if result.Stderr() != "" {
c.Fatalf("Test %s expected stderr to be empty, but it is [%#v]", te.Name, result.Stderr())
}
}
}
func (s *DockerSuite) TestBuildNotVerboseFailureWithNonExistImage(c *check.C) {
// This test makes sure that -q works correctly when build fails by
// comparing between the stderr output in quiet mode and in stdout
// and stderr output in verbose mode
testRequires(c, Network)
testName := "quiet_build_not_exists_image"
dockerfile := "FROM busybox11"
quietResult := buildImage(testName, cli.WithFlags("-q"), build.WithDockerfile(dockerfile))
quietResult.Assert(c, icmd.Expected{
ExitCode: 1,
})
result := buildImage(testName, build.WithDockerfile(dockerfile))
result.Assert(c, icmd.Expected{
ExitCode: 1,
})
if quietResult.Stderr() != result.Combined() {
c.Fatal(fmt.Errorf("Test[%s] expected that quiet stderr and verbose stdout are equal; quiet [%v], verbose [%v]", testName, quietResult.Stderr(), result.Combined()))
}
}
func (s *DockerSuite) TestBuildNotVerboseFailure(c *check.C) {
// This test makes sure that -q works correctly when build fails by
// comparing between the stderr output in quiet mode and in stdout
// and stderr output in verbose mode
testCases := []struct {
testName string
dockerfile string
}{
{"quiet_build_no_from_at_the_beginning", "RUN whoami"},
{"quiet_build_unknown_instr", "FROMD busybox"},
}
for _, tc := range testCases {
quietResult := buildImage(tc.testName, cli.WithFlags("-q"), build.WithDockerfile(tc.dockerfile))
quietResult.Assert(c, icmd.Expected{
ExitCode: 1,
})
result := buildImage(tc.testName, build.WithDockerfile(tc.dockerfile))
result.Assert(c, icmd.Expected{
ExitCode: 1,
})
if quietResult.Stderr() != result.Combined() {
c.Fatal(fmt.Errorf("Test[%s] expected that quiet stderr and verbose stdout are equal; quiet [%v], verbose [%v]", tc.testName, quietResult.Stderr(), result.Combined()))
}
}
}
func (s *DockerSuite) TestBuildNotVerboseFailureRemote(c *check.C) {
// This test ensures that when given a wrong URL, stderr in quiet mode and
// stderr in verbose mode are identical.
// TODO(vdemeester) with cobra, stdout has a carriage return too much so this test should not check stdout
URL := "http://something.invalid"
name := "quiet_build_wrong_remote"
quietResult := buildImage(name, cli.WithFlags("-q"), build.WithContextPath(URL))
quietResult.Assert(c, icmd.Expected{
ExitCode: 1,
})
result := buildImage(name, build.WithContextPath(URL))
result.Assert(c, icmd.Expected{
ExitCode: 1,
})
if strings.TrimSpace(quietResult.Stderr()) != strings.TrimSpace(result.Combined()) {
c.Fatal(fmt.Errorf("Test[%s] expected that quiet stderr and verbose stdout are equal; quiet [%v], verbose [%v]", name, quietResult.Stderr(), result.Combined()))
}
}
func (s *DockerSuite) TestBuildStderr(c *check.C) {
// This test just makes sure that no non-error output goes
// to stderr
name := "testbuildstderr"
result := buildImage(name, build.WithDockerfile("FROM busybox\nRUN echo one"))
result.Assert(c, icmd.Success)
// Windows to non-Windows should have a security warning
if runtime.GOOS == "windows" && testEnv.DaemonPlatform() != "windows" && !strings.Contains(result.Stdout(), "SECURITY WARNING:") {
c.Fatalf("Stdout contains unexpected output: %q", result.Stdout())
}
// Stderr should always be empty
if result.Stderr() != "" {
c.Fatalf("Stderr should have been empty, instead it's: %q", result.Stderr())
}
}
func (s *DockerSuite) TestBuildChownSingleFile(c *check.C) {
testRequires(c, UnixCli, DaemonIsLinux) // test uses chown: not available on windows
name := "testbuildchownsinglefile"
ctx := fakecontext.New(c, "",
fakecontext.WithDockerfile(`
FROM busybox
COPY test /
RUN ls -l /test
RUN [ $(ls -l /test | awk '{print $3":"$4}') = 'root:root' ]
`),
fakecontext.WithFiles(map[string]string{
"test": "test",
}))
defer ctx.Close()
if err := os.Chown(filepath.Join(ctx.Dir, "test"), 4242, 4242); err != nil {
c.Fatal(err)
}
cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx))
}
func (s *DockerSuite) TestBuildSymlinkBreakout(c *check.C) {
name := "testbuildsymlinkbreakout"
tmpdir, err := ioutil.TempDir("", name)
c.Assert(err, check.IsNil)
defer os.RemoveAll(tmpdir)
ctx := filepath.Join(tmpdir, "context")
if err := os.MkdirAll(ctx, 0755); err != nil {
c.Fatal(err)
}
if err := ioutil.WriteFile(filepath.Join(ctx, "Dockerfile"), []byte(`
from busybox
add symlink.tar /
add inject /symlink/
`), 0644); err != nil {
c.Fatal(err)
}
inject := filepath.Join(ctx, "inject")
if err := ioutil.WriteFile(inject, nil, 0644); err != nil {
c.Fatal(err)
}
f, err := os.Create(filepath.Join(ctx, "symlink.tar"))
if err != nil {
c.Fatal(err)
}
w := tar.NewWriter(f)
w.WriteHeader(&tar.Header{
Name: "symlink2",
Typeflag: tar.TypeSymlink,
Linkname: "/../../../../../../../../../../../../../../",
Uid: os.Getuid(),
Gid: os.Getgid(),
})
w.WriteHeader(&tar.Header{
Name: "symlink",
Typeflag: tar.TypeSymlink,
Linkname: filepath.Join("symlink2", tmpdir),
Uid: os.Getuid(),
Gid: os.Getgid(),
})
w.Close()
f.Close()
buildImageSuccessfully(c, name, build.WithoutCache, build.WithExternalBuildContext(fakecontext.New(c, ctx)))
if _, err := os.Lstat(filepath.Join(tmpdir, "inject")); err == nil {
c.Fatal("symlink breakout - inject")
} else if !os.IsNotExist(err) {
c.Fatalf("unexpected error: %v", err)
}
}
func (s *DockerSuite) TestBuildXZHost(c *check.C) {
// /usr/local/sbin/xz gets permission denied for the user
testRequires(c, NotUserNamespace)
testRequires(c, DaemonIsLinux)
name := "testbuildxzhost"
buildImageSuccessfully(c, name, build.WithBuildContext(c,
build.WithFile("Dockerfile", `
FROM busybox
ADD xz /usr/local/sbin/
RUN chmod 755 /usr/local/sbin/xz
ADD test.xz /
RUN [ ! -e /injected ]`),
build.WithFile("test.xz", "\xfd\x37\x7a\x58\x5a\x00\x00\x04\xe6\xd6\xb4\x46\x02\x00"+"\x21\x01\x16\x00\x00\x00\x74\x2f\xe5\xa3\x01\x00\x3f\xfd"+"\x37\x7a\x58\x5a\x00\x00\x04\xe6\xd6\xb4\x46\x02\x00\x21"),
build.WithFile("xz", "#!/bin/sh\ntouch /injected"),
))
}
func (s *DockerSuite) TestBuildVolumesRetainContents(c *check.C) {
// /foo/file gets permission denied for the user
testRequires(c, NotUserNamespace)
testRequires(c, DaemonIsLinux) // TODO Windows: Issue #20127
var (
name = "testbuildvolumescontent"
expected = "some text"
volName = "/foo"
)
if testEnv.DaemonPlatform() == "windows" {
volName = "C:/foo"
}
buildImageSuccessfully(c, name, build.WithBuildContext(c,
build.WithFile("Dockerfile", `
FROM busybox
COPY content /foo/file
VOLUME `+volName+`
CMD cat /foo/file`),
build.WithFile("content", expected),
))
out, _ := dockerCmd(c, "run", "--rm", name)
if out != expected {
c.Fatalf("expected file contents for /foo/file to be %q but received %q", expected, out)
}
}
// FIXME(vdemeester) part of this should be unit test, other part should be clearer
func (s *DockerSuite) TestBuildRenamedDockerfile(c *check.C) {
ctx := fakecontext.New(c, "", fakecontext.WithFiles(map[string]string{
"Dockerfile": "FROM busybox\nRUN echo from Dockerfile",
"files/Dockerfile": "FROM busybox\nRUN echo from files/Dockerfile",
"files/dFile": "FROM busybox\nRUN echo from files/dFile",
"dFile": "FROM busybox\nRUN echo from dFile",
"files/dFile2": "FROM busybox\nRUN echo from files/dFile2",
}))
defer ctx.Close()
cli.Docker(cli.Args("build", "-t", "test1", "."), cli.InDir(ctx.Dir)).Assert(c, icmd.Expected{
Out: "from Dockerfile",
})
cli.Docker(cli.Args("build", "-f", filepath.Join("files", "Dockerfile"), "-t", "test2", "."), cli.InDir(ctx.Dir)).Assert(c, icmd.Expected{
Out: "from files/Dockerfile",
})
cli.Docker(cli.Args("build", fmt.Sprintf("--file=%s", filepath.Join("files", "dFile")), "-t", "test3", "."), cli.InDir(ctx.Dir)).Assert(c, icmd.Expected{
Out: "from files/dFile",
})
cli.Docker(cli.Args("build", "--file=dFile", "-t", "test4", "."), cli.InDir(ctx.Dir)).Assert(c, icmd.Expected{
Out: "from dFile",
})
dirWithNoDockerfile, err := ioutil.TempDir(os.TempDir(), "test5")
c.Assert(err, check.IsNil)
nonDockerfileFile := filepath.Join(dirWithNoDockerfile, "notDockerfile")
if _, err = os.Create(nonDockerfileFile); err != nil {
c.Fatal(err)
}
cli.Docker(cli.Args("build", fmt.Sprintf("--file=%s", nonDockerfileFile), "-t", "test5", "."), cli.InDir(ctx.Dir)).Assert(c, icmd.Expected{
ExitCode: 1,
Err: fmt.Sprintf("unable to prepare context: the Dockerfile (%s) must be within the build context", nonDockerfileFile),
})
cli.Docker(cli.Args("build", "-f", filepath.Join("..", "Dockerfile"), "-t", "test6", ".."), cli.InDir(filepath.Join(ctx.Dir, "files"))).Assert(c, icmd.Expected{
Out: "from Dockerfile",
})
cli.Docker(cli.Args("build", "-f", filepath.Join(ctx.Dir, "files", "Dockerfile"), "-t", "test7", ".."), cli.InDir(filepath.Join(ctx.Dir, "files"))).Assert(c, icmd.Expected{
Out: "from files/Dockerfile",
})
cli.Docker(cli.Args("build", "-f", filepath.Join("..", "Dockerfile"), "-t", "test8", "."), cli.InDir(filepath.Join(ctx.Dir, "files"))).Assert(c, icmd.Expected{
ExitCode: 1,
Err: "must be within the build context",
})
tmpDir := os.TempDir()
cli.Docker(cli.Args("build", "-t", "test9", ctx.Dir), cli.InDir(tmpDir)).Assert(c, icmd.Expected{
Out: "from Dockerfile",
})
cli.Docker(cli.Args("build", "-f", "dFile2", "-t", "test10", "."), cli.InDir(filepath.Join(ctx.Dir, "files"))).Assert(c, icmd.Expected{
Out: "from files/dFile2",
})
}
func (s *DockerSuite) TestBuildFromMixedcaseDockerfile(c *check.C) {
testRequires(c, UnixCli) // Dockerfile overwrites dockerfile on windows
testRequires(c, DaemonIsLinux)
// If Dockerfile is not present, use dockerfile
buildImage("test1", build.WithBuildContext(c,
build.WithFile("dockerfile", `FROM busybox
RUN echo from dockerfile`),
)).Assert(c, icmd.Expected{
Out: "from dockerfile",
})
// Prefer Dockerfile in place of dockerfile
buildImage("test1", build.WithBuildContext(c,
build.WithFile("dockerfile", `FROM busybox
RUN echo from dockerfile`),
build.WithFile("Dockerfile", `FROM busybox
RUN echo from Dockerfile`),
)).Assert(c, icmd.Expected{
Out: "from Dockerfile",
})
}
func (s *DockerSuite) TestBuildFromURLWithF(c *check.C) {
server := fakestorage.New(c, "", fakecontext.WithFiles(map[string]string{"baz": `FROM busybox
RUN echo from baz
COPY * /tmp/
RUN find /tmp/`}))
defer server.Close()
ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(`FROM busybox
RUN echo from Dockerfile`))
defer ctx.Close()
// Make sure that -f is ignored and that we don't use the Dockerfile
// that's in the current dir
result := cli.BuildCmd(c, "test1", cli.WithFlags("-f", "baz", server.URL()+"/baz"), func(cmd *icmd.Cmd) func() {
cmd.Dir = ctx.Dir
return nil
})
if !strings.Contains(result.Combined(), "from baz") ||
strings.Contains(result.Combined(), "/tmp/baz") ||
!strings.Contains(result.Combined(), "/tmp/Dockerfile") {
c.Fatalf("Missing proper output: %s", result.Combined())
}
}
func (s *DockerSuite) TestBuildFromStdinWithF(c *check.C) {
testRequires(c, DaemonIsLinux) // TODO Windows: This test is flaky; no idea why
ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(`FROM busybox
RUN echo "from Dockerfile"`))
defer ctx.Close()
// Make sure that -f is ignored and that we don't use the Dockerfile
// that's in the current dir
result := cli.BuildCmd(c, "test1", cli.WithFlags("-f", "baz", "-"), func(cmd *icmd.Cmd) func() {
cmd.Dir = ctx.Dir
cmd.Stdin = strings.NewReader(`FROM busybox
RUN echo "from baz"
COPY * /tmp/
RUN sh -c "find /tmp/" # sh -c is needed on Windows to use the correct find`)
return nil
})
if !strings.Contains(result.Combined(), "from baz") ||
strings.Contains(result.Combined(), "/tmp/baz") ||
!strings.Contains(result.Combined(), "/tmp/Dockerfile") {
c.Fatalf("Missing proper output: %s", result.Combined())
}
}
func (s *DockerSuite) TestBuildFromOfficialNames(c *check.C) {
name := "testbuildfromofficial"
fromNames := []string{
"busybox",
"docker.io/busybox",
"index.docker.io/busybox",
"library/busybox",
"docker.io/library/busybox",
"index.docker.io/library/busybox",
}
for idx, fromName := range fromNames {
imgName := fmt.Sprintf("%s%d", name, idx)
buildImageSuccessfully(c, imgName, build.WithDockerfile("FROM "+fromName))
dockerCmd(c, "rmi", imgName)
}
}
func (s *DockerSuite) TestBuildDockerfileOutsideContext(c *check.C) {
testRequires(c, UnixCli, DaemonIsLinux) // uses os.Symlink: not implemented in windows at the time of writing (go-1.4.2)
name := "testbuilddockerfileoutsidecontext"
tmpdir, err := ioutil.TempDir("", name)
c.Assert(err, check.IsNil)
defer os.RemoveAll(tmpdir)
ctx := filepath.Join(tmpdir, "context")
if err := os.MkdirAll(ctx, 0755); err != nil {
c.Fatal(err)
}
if err := ioutil.WriteFile(filepath.Join(ctx, "Dockerfile"), []byte("FROM scratch\nENV X Y"), 0644); err != nil {
c.Fatal(err)
}
wd, err := os.Getwd()
if err != nil {
c.Fatal(err)
}
defer os.Chdir(wd)
if err := os.Chdir(ctx); err != nil {
c.Fatal(err)
}
if err := ioutil.WriteFile(filepath.Join(tmpdir, "outsideDockerfile"), []byte("FROM scratch\nENV x y"), 0644); err != nil {
c.Fatal(err)
}
if err := os.Symlink(filepath.Join("..", "outsideDockerfile"), filepath.Join(ctx, "dockerfile1")); err != nil {
c.Fatal(err)
}
if err := os.Symlink(filepath.Join(tmpdir, "outsideDockerfile"), filepath.Join(ctx, "dockerfile2")); err != nil {
c.Fatal(err)
}
for _, dockerfilePath := range []string{
filepath.Join("..", "outsideDockerfile"),
filepath.Join(ctx, "dockerfile1"),
filepath.Join(ctx, "dockerfile2"),
} {
result := dockerCmdWithResult("build", "-t", name, "--no-cache", "-f", dockerfilePath, ".")
c.Assert(result, icmd.Matches, icmd.Expected{
Err: "must be within the build context",
ExitCode: 1,
})
deleteImages(name)
}
os.Chdir(tmpdir)
// Path to Dockerfile should be resolved relative to working directory, not relative to context.
// There is a Dockerfile in the context, but since there is no Dockerfile in the current directory, the following should fail
out, _, err := dockerCmdWithError("build", "-t", name, "--no-cache", "-f", "Dockerfile", ctx)
if err == nil {
c.Fatalf("Expected error. Out: %s", out)
}
}
// FIXME(vdemeester) should be a unit test
func (s *DockerSuite) TestBuildSpaces(c *check.C) {
// Test to make sure that leading/trailing spaces on a command
// doesn't change the error msg we get
name := "testspaces"
ctx := fakecontext.New(c, "", fakecontext.WithDockerfile("FROM busybox\nCOPY\n"))
defer ctx.Close()
result1 := cli.Docker(cli.Build(name), build.WithExternalBuildContext(ctx))
result1.Assert(c, icmd.Expected{
ExitCode: 1,
})
ctx.Add("Dockerfile", "FROM busybox\nCOPY ")
result2 := cli.Docker(cli.Build(name), build.WithExternalBuildContext(ctx))
result2.Assert(c, icmd.Expected{
ExitCode: 1,
})
removeLogTimestamps := func(s string) string {
return regexp.MustCompile(`time="(.*?)"`).ReplaceAllString(s, `time=[TIMESTAMP]`)
}
// Skip over the times
e1 := removeLogTimestamps(result1.Error.Error())
e2 := removeLogTimestamps(result2.Error.Error())
// Ignore whitespace since that's what were verifying doesn't change stuff
if strings.Replace(e1, " ", "", -1) != strings.Replace(e2, " ", "", -1) {
c.Fatalf("Build 2's error wasn't the same as build 1's\n1:%s\n2:%s", result1.Error, result2.Error)
}
ctx.Add("Dockerfile", "FROM busybox\n COPY")
result2 = cli.Docker(cli.Build(name), build.WithoutCache, build.WithExternalBuildContext(ctx))
result2.Assert(c, icmd.Expected{
ExitCode: 1,
})
// Skip over the times
e1 = removeLogTimestamps(result1.Error.Error())
e2 = removeLogTimestamps(result2.Error.Error())
// Ignore whitespace since that's what were verifying doesn't change stuff
if strings.Replace(e1, " ", "", -1) != strings.Replace(e2, " ", "", -1) {
c.Fatalf("Build 3's error wasn't the same as build 1's\n1:%s\n3:%s", result1.Error, result2.Error)
}
ctx.Add("Dockerfile", "FROM busybox\n COPY ")
result2 = cli.Docker(cli.Build(name), build.WithoutCache, build.WithExternalBuildContext(ctx))
result2.Assert(c, icmd.Expected{
ExitCode: 1,
})
// Skip over the times
e1 = removeLogTimestamps(result1.Error.Error())
e2 = removeLogTimestamps(result2.Error.Error())
// Ignore whitespace since that's what were verifying doesn't change stuff
if strings.Replace(e1, " ", "", -1) != strings.Replace(e2, " ", "", -1) {
c.Fatalf("Build 4's error wasn't the same as build 1's\n1:%s\n4:%s", result1.Error, result2.Error)
}
}
func (s *DockerSuite) TestBuildSpacesWithQuotes(c *check.C) {
// Test to make sure that spaces in quotes aren't lost
name := "testspacesquotes"
dockerfile := `FROM busybox
RUN echo " \
foo "`
expected := "\n foo \n"
// Windows uses the builtin echo, which preserves quotes
if testEnv.DaemonPlatform() == "windows" {
expected = "\" foo \""
}
buildImage(name, build.WithDockerfile(dockerfile)).Assert(c, icmd.Expected{
Out: expected,
})
}
// #4393
func (s *DockerSuite) TestBuildVolumeFileExistsinContainer(c *check.C) {
testRequires(c, DaemonIsLinux) // TODO Windows: This should error out
buildImage("docker-test-errcreatevolumewithfile", build.WithDockerfile(`
FROM busybox
RUN touch /foo
VOLUME /foo
`)).Assert(c, icmd.Expected{
ExitCode: 1,
Err: "file exists",
})
}
// FIXME(vdemeester) should be a unit test
func (s *DockerSuite) TestBuildMissingArgs(c *check.C) {
// Test to make sure that all Dockerfile commands (except the ones listed
// in skipCmds) will generate an error if no args are provided.
// Note: INSERT is deprecated so we exclude it because of that.
skipCmds := map[string]struct{}{
"CMD": {},
"RUN": {},
"ENTRYPOINT": {},
"INSERT": {},
}
if testEnv.DaemonPlatform() == "windows" {
skipCmds = map[string]struct{}{
"CMD": {},
"RUN": {},
"ENTRYPOINT": {},
"INSERT": {},
"STOPSIGNAL": {},
"ARG": {},
"USER": {},
"EXPOSE": {},
}
}
for cmd := range command.Commands {
cmd = strings.ToUpper(cmd)
if _, ok := skipCmds[cmd]; ok {
continue
}
var dockerfile string
if cmd == "FROM" {
dockerfile = cmd
} else {
// Add FROM to make sure we don't complain about it missing
dockerfile = "FROM busybox\n" + cmd
}
buildImage("args", build.WithDockerfile(dockerfile)).Assert(c, icmd.Expected{
ExitCode: 1,
Err: cmd + " requires",
})
}
}
func (s *DockerSuite) TestBuildEmptyScratch(c *check.C) {
testRequires(c, DaemonIsLinux)
buildImage("sc", build.WithDockerfile("FROM scratch")).Assert(c, icmd.Expected{
ExitCode: 1,
Err: "No image was generated",
})
}
func (s *DockerSuite) TestBuildDotDotFile(c *check.C) {
buildImageSuccessfully(c, "sc", build.WithBuildContext(c,
build.WithFile("Dockerfile", "FROM busybox\n"),
build.WithFile("..gitme", ""),
))
}
func (s *DockerSuite) TestBuildRUNoneJSON(c *check.C) {
testRequires(c, DaemonIsLinux) // No hello-world Windows image
name := "testbuildrunonejson"
buildImage(name, build.WithDockerfile(`FROM hello-world:frozen
RUN [ "/hello" ]`)).Assert(c, icmd.Expected{
Out: "Hello from Docker",
})
}
func (s *DockerSuite) TestBuildEmptyStringVolume(c *check.C) {
name := "testbuildemptystringvolume"
buildImage(name, build.WithDockerfile(`
FROM busybox
ENV foo=""
VOLUME $foo
`)).Assert(c, icmd.Expected{
ExitCode: 1,
})
}
func (s *DockerSuite) TestBuildContainerWithCgroupParent(c *check.C) {
testRequires(c, SameHostDaemon, DaemonIsLinux)
cgroupParent := "test"
data, err := ioutil.ReadFile("/proc/self/cgroup")
if err != nil {
c.Fatalf("failed to read '/proc/self/cgroup - %v", err)
}
selfCgroupPaths := testutil.ParseCgroupPaths(string(data))
_, found := selfCgroupPaths["memory"]
if !found {
c.Fatalf("unable to find self memory cgroup path. CgroupsPath: %v", selfCgroupPaths)
}
result := buildImage("buildcgroupparent",
cli.WithFlags("--cgroup-parent", cgroupParent),
build.WithDockerfile(`
FROM busybox
RUN cat /proc/self/cgroup
`))
result.Assert(c, icmd.Success)
m, err := regexp.MatchString(fmt.Sprintf("memory:.*/%s/.*", cgroupParent), result.Combined())
c.Assert(err, check.IsNil)
if !m {
c.Fatalf("There is no expected memory cgroup with parent /%s/: %s", cgroupParent, result.Combined())
}
}
// FIXME(vdemeester) could be a unit test
func (s *DockerSuite) TestBuildNoDupOutput(c *check.C) {
// Check to make sure our build output prints the Dockerfile cmd
// property - there was a bug that caused it to be duplicated on the
// Step X line
name := "testbuildnodupoutput"
result := buildImage(name, build.WithDockerfile(`
FROM busybox
RUN env`))
result.Assert(c, icmd.Success)
exp := "\nStep 2/2 : RUN env\n"
if !strings.Contains(result.Combined(), exp) {
c.Fatalf("Bad output\nGot:%s\n\nExpected to contain:%s\n", result.Combined(), exp)
}
}
// GH15826
// FIXME(vdemeester) could be a unit test
func (s *DockerSuite) TestBuildStartsFromOne(c *check.C) {
// Explicit check to ensure that build starts from step 1 rather than 0
name := "testbuildstartsfromone"
result := buildImage(name, build.WithDockerfile(`FROM busybox`))
result.Assert(c, icmd.Success)
exp := "\nStep 1/1 : FROM busybox\n"
if !strings.Contains(result.Combined(), exp) {
c.Fatalf("Bad output\nGot:%s\n\nExpected to contain:%s\n", result.Combined(), exp)
}
}
func (s *DockerSuite) TestBuildRUNErrMsg(c *check.C) {
// Test to make sure the bad command is quoted with just "s and
// not as a Go []string
name := "testbuildbadrunerrmsg"
shell := "/bin/sh -c"
exitCode := 127
if testEnv.DaemonPlatform() == "windows" {
shell = "cmd /S /C"
// architectural - Windows has to start the container to determine the exe is bad, Linux does not
exitCode = 1
}
exp := fmt.Sprintf(`The command '%s badEXE a1 \& a2 a3' returned a non-zero code: %d`, shell, exitCode)
buildImage(name, build.WithDockerfile(`
FROM busybox
RUN badEXE a1 \& a2 a3`)).Assert(c, icmd.Expected{
ExitCode: exitCode,
Err: exp,
})
}
func (s *DockerTrustSuite) TestTrustedBuild(c *check.C) {
repoName := s.setupTrustedImage(c, "trusted-build")
dockerFile := fmt.Sprintf(`
FROM %s
RUN []
`, repoName)
name := "testtrustedbuild"
buildImage(name, trustedBuild, build.WithDockerfile(dockerFile)).Assert(c, icmd.Expected{
Out: fmt.Sprintf("FROM %s@sha", repoName[:len(repoName)-7]),
})
// We should also have a tag reference for the image.
dockerCmd(c, "inspect", repoName)
// We should now be able to remove the tag reference.
dockerCmd(c, "rmi", repoName)
}
func (s *DockerTrustSuite) TestTrustedBuildUntrustedTag(c *check.C) {
repoName := fmt.Sprintf("%v/dockercli/build-untrusted-tag:latest", privateRegistryURL)
dockerFile := fmt.Sprintf(`
FROM %s
RUN []
`, repoName)
name := "testtrustedbuilduntrustedtag"
buildImage(name, trustedBuild, build.WithDockerfile(dockerFile)).Assert(c, icmd.Expected{
ExitCode: 1,
Err: "does not have trust data for",
})
}
func (s *DockerTrustSuite) TestBuildContextDirIsSymlink(c *check.C) {
testRequires(c, DaemonIsLinux)
tempDir, err := ioutil.TempDir("", "test-build-dir-is-symlink-")
c.Assert(err, check.IsNil)
defer os.RemoveAll(tempDir)
// Make a real context directory in this temp directory with a simple
// Dockerfile.
realContextDirname := filepath.Join(tempDir, "context")
if err := os.Mkdir(realContextDirname, os.FileMode(0755)); err != nil {
c.Fatal(err)
}
if err = ioutil.WriteFile(
filepath.Join(realContextDirname, "Dockerfile"),
[]byte(`
FROM busybox
RUN echo hello world
`),
os.FileMode(0644),
); err != nil {
c.Fatal(err)
}
// Make a symlink to the real context directory.
contextSymlinkName := filepath.Join(tempDir, "context_link")
if err := os.Symlink(realContextDirname, contextSymlinkName); err != nil {
c.Fatal(err)
}
// Executing the build with the symlink as the specified context should
// *not* fail.
dockerCmd(c, "build", contextSymlinkName)
}
func (s *DockerTrustSuite) TestTrustedBuildTagFromReleasesRole(c *check.C) {
testRequires(c, NotaryHosting)
latestTag := s.setupTrustedImage(c, "trusted-build-releases-role")
repoName := strings.TrimSuffix(latestTag, ":latest")
// Now create the releases role
s.notaryCreateDelegation(c, repoName, "targets/releases", s.not.keys[0].Public)
s.notaryImportKey(c, repoName, "targets/releases", s.not.keys[0].Private)
s.notaryPublish(c, repoName)
// push a different tag to the releases role
otherTag := fmt.Sprintf("%s:other", repoName)
cli.DockerCmd(c, "tag", "busybox", otherTag)
cli.Docker(cli.Args("push", otherTag), trustedCmd).Assert(c, icmd.Success)
s.assertTargetInRoles(c, repoName, "other", "targets/releases")
s.assertTargetNotInRoles(c, repoName, "other", "targets")
cli.DockerCmd(c, "rmi", otherTag)
dockerFile := fmt.Sprintf(`
FROM %s
RUN []
`, otherTag)
name := "testtrustedbuildreleasesrole"
cli.BuildCmd(c, name, trustedCmd, build.WithDockerfile(dockerFile)).Assert(c, icmd.Expected{
Out: fmt.Sprintf("FROM %s@sha", repoName),
})
}
func (s *DockerTrustSuite) TestTrustedBuildTagIgnoresOtherDelegationRoles(c *check.C) {
testRequires(c, NotaryHosting)
latestTag := s.setupTrustedImage(c, "trusted-build-releases-role")
repoName := strings.TrimSuffix(latestTag, ":latest")
// Now create a non-releases delegation role
s.notaryCreateDelegation(c, repoName, "targets/other", s.not.keys[0].Public)
s.notaryImportKey(c, repoName, "targets/other", s.not.keys[0].Private)
s.notaryPublish(c, repoName)
// push a different tag to the other role
otherTag := fmt.Sprintf("%s:other", repoName)
cli.DockerCmd(c, "tag", "busybox", otherTag)
cli.Docker(cli.Args("push", otherTag), trustedCmd).Assert(c, icmd.Success)
s.assertTargetInRoles(c, repoName, "other", "targets/other")
s.assertTargetNotInRoles(c, repoName, "other", "targets")
cli.DockerCmd(c, "rmi", otherTag)
dockerFile := fmt.Sprintf(`
FROM %s
RUN []
`, otherTag)
name := "testtrustedbuildotherrole"
cli.Docker(cli.Build(name), trustedCmd, build.WithDockerfile(dockerFile)).Assert(c, icmd.Expected{
ExitCode: 1,
})
}
// Issue #15634: COPY fails when path starts with "null"
func (s *DockerSuite) TestBuildNullStringInAddCopyVolume(c *check.C) {
name := "testbuildnullstringinaddcopyvolume"
volName := "nullvolume"
if testEnv.DaemonPlatform() == "windows" {
volName = `C:\\nullvolume`
}
buildImageSuccessfully(c, name, build.WithBuildContext(c,
build.WithFile("Dockerfile", `
FROM busybox
ADD null /
COPY nullfile /
VOLUME `+volName+`
`),
build.WithFile("null", "test1"),
build.WithFile("nullfile", "test2"),
))
}
func (s *DockerSuite) TestBuildStopSignal(c *check.C) {
testRequires(c, DaemonIsLinux) // Windows does not support STOPSIGNAL yet
imgName := "test_build_stop_signal"
buildImageSuccessfully(c, imgName, build.WithDockerfile(`FROM busybox
STOPSIGNAL SIGKILL`))
res := inspectFieldJSON(c, imgName, "Config.StopSignal")
if res != `"SIGKILL"` {
c.Fatalf("Signal %s, expected SIGKILL", res)
}
containerName := "test-container-stop-signal"
dockerCmd(c, "run", "-d", "--name", containerName, imgName, "top")
res = inspectFieldJSON(c, containerName, "Config.StopSignal")
if res != `"SIGKILL"` {
c.Fatalf("Signal %s, expected SIGKILL", res)
}
}
func (s *DockerSuite) TestBuildBuildTimeArg(c *check.C) {
imgName := "bldargtest"
envKey := "foo"
envVal := "bar"
var dockerfile string
if testEnv.DaemonPlatform() == "windows" {
// Bugs in Windows busybox port - use the default base image and native cmd stuff
dockerfile = fmt.Sprintf(`FROM `+minimalBaseImage()+`
ARG %s
RUN echo %%%s%%
CMD setlocal enableextensions && if defined %s (echo %%%s%%)`, envKey, envKey, envKey, envKey)
} else {
dockerfile = fmt.Sprintf(`FROM busybox
ARG %s
RUN echo $%s
CMD echo $%s`, envKey, envKey, envKey)
}
buildImage(imgName,
cli.WithFlags("--build-arg", fmt.Sprintf("%s=%s", envKey, envVal)),
build.WithDockerfile(dockerfile),
).Assert(c, icmd.Expected{
Out: envVal,
})
containerName := "bldargCont"
out, _ := dockerCmd(c, "run", "--name", containerName, imgName)
out = strings.Trim(out, " \r\n'")
if out != "" {
c.Fatalf("run produced invalid output: %q, expected empty string", out)
}
}
func (s *DockerSuite) TestBuildBuildTimeArgHistory(c *check.C) {
imgName := "bldargtest"
envKey := "foo"
envVal := "bar"
envDef := "bar1"
dockerfile := fmt.Sprintf(`FROM busybox
ARG %s=%s`, envKey, envDef)
buildImage(imgName,
cli.WithFlags("--build-arg", fmt.Sprintf("%s=%s", envKey, envVal)),
build.WithDockerfile(dockerfile),
).Assert(c, icmd.Expected{
Out: envVal,
})
out, _ := dockerCmd(c, "history", "--no-trunc", imgName)
outputTabs := strings.Split(out, "\n")[1]
if !strings.Contains(outputTabs, envDef) {
c.Fatalf("failed to find arg default in image history output: %q expected: %q", outputTabs, envDef)
}
}
func (s *DockerSuite) TestBuildTimeArgHistoryExclusions(c *check.C) {
imgName := "bldargtest"
envKey := "foo"
envVal := "bar"
proxy := "HTTP_PROXY=http://user:[email protected]"
explicitProxyKey := "http_proxy"
explicitProxyVal := "http://user:[email protected]"
dockerfile := fmt.Sprintf(`FROM busybox
ARG %s
ARG %s
RUN echo "Testing Build Args!"`, envKey, explicitProxyKey)
buildImage := func(imgName string) string {
cli.BuildCmd(c, imgName,
cli.WithFlags("--build-arg", "https_proxy=https://proxy.example.com",
"--build-arg", fmt.Sprintf("%s=%s", envKey, envVal),
"--build-arg", fmt.Sprintf("%s=%s", explicitProxyKey, explicitProxyVal),
"--build-arg", proxy),
build.WithDockerfile(dockerfile),
)
return getIDByName(c, imgName)
}
origID := buildImage(imgName)
result := cli.DockerCmd(c, "history", "--no-trunc", imgName)
out := result.Stdout()
if strings.Contains(out, proxy) {
c.Fatalf("failed to exclude proxy settings from history!")
}
if strings.Contains(out, "https_proxy") {
c.Fatalf("failed to exclude proxy settings from history!")
}
result.Assert(c, icmd.Expected{Out: fmt.Sprintf("%s=%s", envKey, envVal)})
result.Assert(c, icmd.Expected{Out: fmt.Sprintf("%s=%s", explicitProxyKey, explicitProxyVal)})
cacheID := buildImage(imgName + "-two")
c.Assert(origID, checker.Equals, cacheID)
}
func (s *DockerSuite) TestBuildBuildTimeArgCacheHit(c *check.C) {
imgName := "bldargtest"
envKey := "foo"
envVal := "bar"
dockerfile := fmt.Sprintf(`FROM busybox
ARG %s
RUN echo $%s`, envKey, envKey)
buildImageSuccessfully(c, imgName,
cli.WithFlags("--build-arg", fmt.Sprintf("%s=%s", envKey, envVal)),
build.WithDockerfile(dockerfile),
)
origImgID := getIDByName(c, imgName)
imgNameCache := "bldargtestcachehit"
buildImageSuccessfully(c, imgNameCache,
cli.WithFlags("--build-arg", fmt.Sprintf("%s=%s", envKey, envVal)),
build.WithDockerfile(dockerfile),
)
newImgID := getIDByName(c, imgName)
if newImgID != origImgID {
c.Fatalf("build didn't use cache! expected image id: %q built image id: %q", origImgID, newImgID)
}
}
func (s *DockerSuite) TestBuildBuildTimeArgCacheMissExtraArg(c *check.C) {
imgName := "bldargtest"
envKey := "foo"
envVal := "bar"
extraEnvKey := "foo1"
extraEnvVal := "bar1"
dockerfile := fmt.Sprintf(`FROM busybox
ARG %s
ARG %s
RUN echo $%s`, envKey, extraEnvKey, envKey)
buildImageSuccessfully(c, imgName,
cli.WithFlags("--build-arg", fmt.Sprintf("%s=%s", envKey, envVal)),
build.WithDockerfile(dockerfile),
)
origImgID := getIDByName(c, imgName)
imgNameCache := "bldargtestcachemiss"
buildImageSuccessfully(c, imgNameCache,
cli.WithFlags(
"--build-arg", fmt.Sprintf("%s=%s", envKey, envVal),
"--build-arg", fmt.Sprintf("%s=%s", extraEnvKey, extraEnvVal),
),
build.WithDockerfile(dockerfile),
)
newImgID := getIDByName(c, imgNameCache)
if newImgID == origImgID {
c.Fatalf("build used cache, expected a miss!")
}
}
func (s *DockerSuite) TestBuildBuildTimeArgCacheMissSameArgDiffVal(c *check.C) {
imgName := "bldargtest"
envKey := "foo"
envVal := "bar"
newEnvVal := "bar1"
dockerfile := fmt.Sprintf(`FROM busybox
ARG %s
RUN echo $%s`, envKey, envKey)
buildImageSuccessfully(c, imgName,
cli.WithFlags("--build-arg", fmt.Sprintf("%s=%s", envKey, envVal)),
build.WithDockerfile(dockerfile),
)
origImgID := getIDByName(c, imgName)
imgNameCache := "bldargtestcachemiss"
buildImageSuccessfully(c, imgNameCache,
cli.WithFlags("--build-arg", fmt.Sprintf("%s=%s", envKey, newEnvVal)),
build.WithDockerfile(dockerfile),
)
newImgID := getIDByName(c, imgNameCache)
if newImgID == origImgID {
c.Fatalf("build used cache, expected a miss!")
}
}
func (s *DockerSuite) TestBuildBuildTimeArgOverrideArgDefinedBeforeEnv(c *check.C) {
testRequires(c, DaemonIsLinux) // Windows does not support ARG
imgName := "bldargtest"
envKey := "foo"
envVal := "bar"
envValOveride := "barOverride"
dockerfile := fmt.Sprintf(`FROM busybox
ARG %s
ENV %s %s
RUN echo $%s
CMD echo $%s
`, envKey, envKey, envValOveride, envKey, envKey)
result := buildImage(imgName,
cli.WithFlags("--build-arg", fmt.Sprintf("%s=%s", envKey, envVal)),
build.WithDockerfile(dockerfile),
)
result.Assert(c, icmd.Success)
if strings.Count(result.Combined(), envValOveride) != 2 {
c.Fatalf("failed to access environment variable in output: %q expected: %q", result.Combined(), envValOveride)
}
containerName := "bldargCont"
if out, _ := dockerCmd(c, "run", "--name", containerName, imgName); !strings.Contains(out, envValOveride) {
c.Fatalf("run produced invalid output: %q, expected %q", out, envValOveride)
}
}
// FIXME(vdemeester) might be useful to merge with the one above ?
func (s *DockerSuite) TestBuildBuildTimeArgOverrideEnvDefinedBeforeArg(c *check.C) {
testRequires(c, DaemonIsLinux) // Windows does not support ARG
imgName := "bldargtest"
envKey := "foo"
envVal := "bar"
envValOveride := "barOverride"
dockerfile := fmt.Sprintf(`FROM busybox
ENV %s %s
ARG %s
RUN echo $%s
CMD echo $%s
`, envKey, envValOveride, envKey, envKey, envKey)
result := buildImage(imgName,
cli.WithFlags("--build-arg", fmt.Sprintf("%s=%s", envKey, envVal)),
build.WithDockerfile(dockerfile),
)
result.Assert(c, icmd.Success)
if strings.Count(result.Combined(), envValOveride) != 2 {
c.Fatalf("failed to access environment variable in output: %q expected: %q", result.Combined(), envValOveride)
}
containerName := "bldargCont"
if out, _ := dockerCmd(c, "run", "--name", containerName, imgName); !strings.Contains(out, envValOveride) {
c.Fatalf("run produced invalid output: %q, expected %q", out, envValOveride)
}
}
func (s *DockerSuite) TestBuildBuildTimeArgExpansion(c *check.C) {
testRequires(c, DaemonIsLinux) // Windows does not support ARG
imgName := "bldvarstest"
wdVar := "WDIR"
wdVal := "/tmp/"
addVar := "AFILE"
addVal := "addFile"
copyVar := "CFILE"
copyVal := "copyFile"
envVar := "foo"
envVal := "bar"
exposeVar := "EPORT"
exposeVal := "9999"
userVar := "USER"
userVal := "testUser"
volVar := "VOL"
volVal := "/testVol/"
buildImageSuccessfully(c, imgName,
cli.WithFlags(
"--build-arg", fmt.Sprintf("%s=%s", wdVar, wdVal),
"--build-arg", fmt.Sprintf("%s=%s", addVar, addVal),
"--build-arg", fmt.Sprintf("%s=%s", copyVar, copyVal),
"--build-arg", fmt.Sprintf("%s=%s", envVar, envVal),
"--build-arg", fmt.Sprintf("%s=%s", exposeVar, exposeVal),
"--build-arg", fmt.Sprintf("%s=%s", userVar, userVal),
"--build-arg", fmt.Sprintf("%s=%s", volVar, volVal),
),
build.WithBuildContext(c,
build.WithFile("Dockerfile", fmt.Sprintf(`FROM busybox
ARG %s
WORKDIR ${%s}
ARG %s
ADD ${%s} testDir/
ARG %s
COPY $%s testDir/
ARG %s
ENV %s=${%s}
ARG %s
EXPOSE $%s
ARG %s
USER $%s
ARG %s
VOLUME ${%s}`,
wdVar, wdVar, addVar, addVar, copyVar, copyVar, envVar, envVar,
envVar, exposeVar, exposeVar, userVar, userVar, volVar, volVar)),
build.WithFile(addVal, "some stuff"),
build.WithFile(copyVal, "some stuff"),
),
)
res := inspectField(c, imgName, "Config.WorkingDir")
c.Check(res, check.Equals, filepath.ToSlash(wdVal))
var resArr []string
inspectFieldAndUnmarshall(c, imgName, "Config.Env", &resArr)
found := false
for _, v := range resArr {
if fmt.Sprintf("%s=%s", envVar, envVal) == v {
found = true
break
}
}
if !found {
c.Fatalf("Config.Env value mismatch. Expected <key=value> to exist: %s=%s, got: %v",
envVar, envVal, resArr)
}
var resMap map[string]interface{}
inspectFieldAndUnmarshall(c, imgName, "Config.ExposedPorts", &resMap)
if _, ok := resMap[fmt.Sprintf("%s/tcp", exposeVal)]; !ok {
c.Fatalf("Config.ExposedPorts value mismatch. Expected exposed port: %s/tcp, got: %v", exposeVal, resMap)
}
res = inspectField(c, imgName, "Config.User")
if res != userVal {
c.Fatalf("Config.User value mismatch. Expected: %s, got: %s", userVal, res)
}
inspectFieldAndUnmarshall(c, imgName, "Config.Volumes", &resMap)
if _, ok := resMap[volVal]; !ok {
c.Fatalf("Config.Volumes value mismatch. Expected volume: %s, got: %v", volVal, resMap)
}
}
func (s *DockerSuite) TestBuildBuildTimeArgExpansionOverride(c *check.C) {
testRequires(c, DaemonIsLinux) // Windows does not support ARG
imgName := "bldvarstest"
envKey := "foo"
envVal := "bar"
envKey1 := "foo1"
envValOveride := "barOverride"
dockerfile := fmt.Sprintf(`FROM busybox
ARG %s
ENV %s %s
ENV %s ${%s}
RUN echo $%s
CMD echo $%s`, envKey, envKey, envValOveride, envKey1, envKey, envKey1, envKey1)
result := buildImage(imgName,
cli.WithFlags("--build-arg", fmt.Sprintf("%s=%s", envKey, envVal)),
build.WithDockerfile(dockerfile),
)
result.Assert(c, icmd.Success)
if strings.Count(result.Combined(), envValOveride) != 2 {
c.Fatalf("failed to access environment variable in output: %q expected: %q", result.Combined(), envValOveride)
}
containerName := "bldargCont"
if out, _ := dockerCmd(c, "run", "--name", containerName, imgName); !strings.Contains(out, envValOveride) {
c.Fatalf("run produced invalid output: %q, expected %q", out, envValOveride)
}
}
func (s *DockerSuite) TestBuildBuildTimeArgUntrustedDefinedAfterUse(c *check.C) {
testRequires(c, DaemonIsLinux) // Windows does not support ARG
imgName := "bldargtest"
envKey := "foo"
envVal := "bar"
dockerfile := fmt.Sprintf(`FROM busybox
RUN echo $%s
ARG %s
CMD echo $%s`, envKey, envKey, envKey)
result := buildImage(imgName,
cli.WithFlags("--build-arg", fmt.Sprintf("%s=%s", envKey, envVal)),
build.WithDockerfile(dockerfile),
)
result.Assert(c, icmd.Success)
if strings.Contains(result.Combined(), envVal) {
c.Fatalf("able to access environment variable in output: %q expected to be missing", result.Combined())
}
containerName := "bldargCont"
if out, _ := dockerCmd(c, "run", "--name", containerName, imgName); out != "\n" {
c.Fatalf("run produced invalid output: %q, expected empty string", out)
}
}
func (s *DockerSuite) TestBuildBuildTimeArgBuiltinArg(c *check.C) {
testRequires(c, DaemonIsLinux) // Windows does not support --build-arg
imgName := "bldargtest"
envKey := "HTTP_PROXY"
envVal := "bar"
dockerfile := fmt.Sprintf(`FROM busybox
RUN echo $%s
CMD echo $%s`, envKey, envKey)
result := buildImage(imgName,
cli.WithFlags("--build-arg", fmt.Sprintf("%s=%s", envKey, envVal)),
build.WithDockerfile(dockerfile),
)
result.Assert(c, icmd.Success)
if !strings.Contains(result.Combined(), envVal) {
c.Fatalf("failed to access environment variable in output: %q expected: %q", result.Combined(), envVal)
}
containerName := "bldargCont"
if out, _ := dockerCmd(c, "run", "--name", containerName, imgName); out != "\n" {
c.Fatalf("run produced invalid output: %q, expected empty string", out)
}
}
func (s *DockerSuite) TestBuildBuildTimeArgDefaultOverride(c *check.C) {
testRequires(c, DaemonIsLinux) // Windows does not support ARG
imgName := "bldargtest"
envKey := "foo"
envVal := "bar"
envValOveride := "barOverride"
dockerfile := fmt.Sprintf(`FROM busybox
ARG %s=%s
ENV %s $%s
RUN echo $%s
CMD echo $%s`, envKey, envVal, envKey, envKey, envKey, envKey)
result := buildImage(imgName,
cli.WithFlags("--build-arg", fmt.Sprintf("%s=%s", envKey, envValOveride)),
build.WithDockerfile(dockerfile),
)
result.Assert(c, icmd.Success)
if strings.Count(result.Combined(), envValOveride) != 1 {
c.Fatalf("failed to access environment variable in output: %q expected: %q", result.Combined(), envValOveride)
}
containerName := "bldargCont"
if out, _ := dockerCmd(c, "run", "--name", containerName, imgName); !strings.Contains(out, envValOveride) {
c.Fatalf("run produced invalid output: %q, expected %q", out, envValOveride)
}
}
func (s *DockerSuite) TestBuildBuildTimeArgUnconsumedArg(c *check.C) {
imgName := "bldargtest"
envKey := "foo"
envVal := "bar"
dockerfile := fmt.Sprintf(`FROM busybox
RUN echo $%s
CMD echo $%s`, envKey, envKey)
warnStr := "[Warning] One or more build-args"
buildImage(imgName,
cli.WithFlags("--build-arg", fmt.Sprintf("%s=%s", envKey, envVal)),
build.WithDockerfile(dockerfile),
).Assert(c, icmd.Expected{
Out: warnStr,
})
}
func (s *DockerSuite) TestBuildBuildTimeArgEnv(c *check.C) {
testRequires(c, DaemonIsLinux) // Windows does not support ARG
dockerfile := `FROM busybox
ARG FOO1=fromfile
ARG FOO2=fromfile
ARG FOO3=fromfile
ARG FOO4=fromfile
ARG FOO5
ARG FOO6
ARG FO10
RUN env
RUN [ "$FOO1" == "fromcmd" ]
RUN [ "$FOO2" == "" ]
RUN [ "$FOO3" == "fromenv" ]
RUN [ "$FOO4" == "fromfile" ]
RUN [ "$FOO5" == "fromcmd" ]
# The following should not exist at all in the env
RUN [ "$(env | grep FOO6)" == "" ]
RUN [ "$(env | grep FOO7)" == "" ]
RUN [ "$(env | grep FOO8)" == "" ]
RUN [ "$(env | grep FOO9)" == "" ]
RUN [ "$FO10" == "" ]
`
result := buildImage("testbuildtimeargenv",
cli.WithFlags(
"--build-arg", fmt.Sprintf("FOO1=fromcmd"),
"--build-arg", fmt.Sprintf("FOO2="),
"--build-arg", fmt.Sprintf("FOO3"), // set in env
"--build-arg", fmt.Sprintf("FOO4"), // not set in env
"--build-arg", fmt.Sprintf("FOO5=fromcmd"),
// FOO6 is not set at all
"--build-arg", fmt.Sprintf("FOO7=fromcmd"), // should produce a warning
"--build-arg", fmt.Sprintf("FOO8="), // should produce a warning
"--build-arg", fmt.Sprintf("FOO9"), // should produce a warning
"--build-arg", fmt.Sprintf("FO10"), // not set in env, empty value
),
cli.WithEnvironmentVariables(append(os.Environ(),
"FOO1=fromenv",
"FOO2=fromenv",
"FOO3=fromenv")...),
build.WithBuildContext(c,
build.WithFile("Dockerfile", dockerfile),
),
)
result.Assert(c, icmd.Success)
// Now check to make sure we got a warning msg about unused build-args
i := strings.Index(result.Combined(), "[Warning]")
if i < 0 {
c.Fatalf("Missing the build-arg warning in %q", result.Combined())
}
out := result.Combined()[i:] // "out" should contain just the warning message now
// These were specified on a --build-arg but no ARG was in the Dockerfile
c.Assert(out, checker.Contains, "FOO7")
c.Assert(out, checker.Contains, "FOO8")
c.Assert(out, checker.Contains, "FOO9")
}
func (s *DockerSuite) TestBuildBuildTimeArgQuotedValVariants(c *check.C) {
imgName := "bldargtest"
envKey := "foo"
envKey1 := "foo1"
envKey2 := "foo2"
envKey3 := "foo3"
dockerfile := fmt.Sprintf(`FROM busybox
ARG %s=""
ARG %s=''
ARG %s="''"
ARG %s='""'
RUN [ "$%s" != "$%s" ]
RUN [ "$%s" != "$%s" ]
RUN [ "$%s" != "$%s" ]
RUN [ "$%s" != "$%s" ]
RUN [ "$%s" != "$%s" ]`, envKey, envKey1, envKey2, envKey3,
envKey, envKey2, envKey, envKey3, envKey1, envKey2, envKey1, envKey3,
envKey2, envKey3)
buildImageSuccessfully(c, imgName, build.WithDockerfile(dockerfile))
}
func (s *DockerSuite) TestBuildBuildTimeArgEmptyValVariants(c *check.C) {
testRequires(c, DaemonIsLinux) // Windows does not support ARG
imgName := "bldargtest"
envKey := "foo"
envKey1 := "foo1"
envKey2 := "foo2"
dockerfile := fmt.Sprintf(`FROM busybox
ARG %s=
ARG %s=""
ARG %s=''
RUN [ "$%s" == "$%s" ]
RUN [ "$%s" == "$%s" ]
RUN [ "$%s" == "$%s" ]`, envKey, envKey1, envKey2, envKey, envKey1, envKey1, envKey2, envKey, envKey2)
buildImageSuccessfully(c, imgName, build.WithDockerfile(dockerfile))
}
func (s *DockerSuite) TestBuildBuildTimeArgDefintionWithNoEnvInjection(c *check.C) {
imgName := "bldargtest"
envKey := "foo"
dockerfile := fmt.Sprintf(`FROM busybox
ARG %s
RUN env`, envKey)
result := cli.BuildCmd(c, imgName, build.WithDockerfile(dockerfile))
result.Assert(c, icmd.Success)
if strings.Count(result.Combined(), envKey) != 1 {
c.Fatalf("unexpected number of occurrences of the arg in output: %q expected: 1", result.Combined())
}
}
func (s *DockerSuite) TestBuildBuildTimeArgMultipleFrom(c *check.C) {
imgName := "multifrombldargtest"
dockerfile := `FROM busybox
ARG foo=abc
LABEL multifromtest=1
RUN env > /out
FROM busybox
ARG bar=def
RUN env > /out`
result := cli.BuildCmd(c, imgName, build.WithDockerfile(dockerfile))
result.Assert(c, icmd.Success)
result = cli.DockerCmd(c, "images", "-q", "-f", "label=multifromtest=1")
parentID := strings.TrimSpace(result.Stdout())
result = cli.DockerCmd(c, "run", "--rm", parentID, "cat", "/out")
c.Assert(result.Stdout(), checker.Contains, "foo=abc")
result = cli.DockerCmd(c, "run", "--rm", imgName, "cat", "/out")
c.Assert(result.Stdout(), checker.Not(checker.Contains), "foo")
c.Assert(result.Stdout(), checker.Contains, "bar=def")
}
func (s *DockerSuite) TestBuildBuildTimeFromArgMultipleFrom(c *check.C) {
imgName := "multifrombldargtest"
dockerfile := `ARG tag=nosuchtag
FROM busybox:${tag}
LABEL multifromtest=1
RUN env > /out
FROM busybox:${tag}
ARG tag
RUN env > /out`
result := cli.BuildCmd(c, imgName,
build.WithDockerfile(dockerfile),
cli.WithFlags("--build-arg", fmt.Sprintf("tag=latest")))
result.Assert(c, icmd.Success)
result = cli.DockerCmd(c, "images", "-q", "-f", "label=multifromtest=1")
parentID := strings.TrimSpace(result.Stdout())
result = cli.DockerCmd(c, "run", "--rm", parentID, "cat", "/out")
c.Assert(result.Stdout(), checker.Not(checker.Contains), "tag")
result = cli.DockerCmd(c, "run", "--rm", imgName, "cat", "/out")
c.Assert(result.Stdout(), checker.Contains, "tag=latest")
}
func (s *DockerSuite) TestBuildBuildTimeUnusedArgMultipleFrom(c *check.C) {
imgName := "multifromunusedarg"
dockerfile := `FROM busybox
ARG foo
FROM busybox
ARG bar
RUN env > /out`
result := cli.BuildCmd(c, imgName,
build.WithDockerfile(dockerfile),
cli.WithFlags("--build-arg", fmt.Sprintf("baz=abc")))
result.Assert(c, icmd.Success)
c.Assert(result.Combined(), checker.Contains, "[Warning]")
c.Assert(result.Combined(), checker.Contains, "[baz] were not consumed")
result = cli.DockerCmd(c, "run", "--rm", imgName, "cat", "/out")
c.Assert(result.Stdout(), checker.Not(checker.Contains), "bar")
c.Assert(result.Stdout(), checker.Not(checker.Contains), "baz")
}
func (s *DockerSuite) TestBuildNoNamedVolume(c *check.C) {
volName := "testname:/foo"
if testEnv.DaemonPlatform() == "windows" {
volName = "testname:C:\\foo"
}
dockerCmd(c, "run", "-v", volName, "busybox", "sh", "-c", "touch /foo/oops")
dockerFile := `FROM busybox
VOLUME ` + volName + `
RUN ls /foo/oops
`
buildImage("test", build.WithDockerfile(dockerFile)).Assert(c, icmd.Expected{
ExitCode: 1,
})
}
func (s *DockerSuite) TestBuildTagEvent(c *check.C) {
since := daemonUnixTime(c)
dockerFile := `FROM busybox
RUN echo events
`
buildImageSuccessfully(c, "test", build.WithDockerfile(dockerFile))
until := daemonUnixTime(c)
out, _ := dockerCmd(c, "events", "--since", since, "--until", until, "--filter", "type=image")
events := strings.Split(strings.TrimSpace(out), "\n")
actions := eventActionsByIDAndType(c, events, "test:latest", "image")
var foundTag bool
for _, a := range actions {
if a == "tag" {
foundTag = true
break
}
}
c.Assert(foundTag, checker.True, check.Commentf("No tag event found:\n%s", out))
}
// #15780
func (s *DockerSuite) TestBuildMultipleTags(c *check.C) {
dockerfile := `
FROM busybox
MAINTAINER test-15780
`
buildImageSuccessfully(c, "tag1", cli.WithFlags("-t", "tag2:v2", "-t", "tag1:latest", "-t", "tag1"), build.WithDockerfile(dockerfile))
id1 := getIDByName(c, "tag1")
id2 := getIDByName(c, "tag2:v2")
c.Assert(id1, check.Equals, id2)
}
// #17290
func (s *DockerSuite) TestBuildCacheBrokenSymlink(c *check.C) {
name := "testbuildbrokensymlink"
ctx := fakecontext.New(c, "",
fakecontext.WithDockerfile(`
FROM busybox
COPY . ./`),
fakecontext.WithFiles(map[string]string{
"foo": "bar",
}))
defer ctx.Close()
err := os.Symlink(filepath.Join(ctx.Dir, "nosuchfile"), filepath.Join(ctx.Dir, "asymlink"))
c.Assert(err, checker.IsNil)
// warm up cache
cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx))
// add new file to context, should invalidate cache
err = ioutil.WriteFile(filepath.Join(ctx.Dir, "newfile"), []byte("foo"), 0644)
c.Assert(err, checker.IsNil)
result := cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx))
if strings.Contains(result.Combined(), "Using cache") {
c.Fatal("2nd build used cache on ADD, it shouldn't")
}
}
func (s *DockerSuite) TestBuildFollowSymlinkToFile(c *check.C) {
name := "testbuildbrokensymlink"
ctx := fakecontext.New(c, "",
fakecontext.WithDockerfile(`
FROM busybox
COPY asymlink target`),
fakecontext.WithFiles(map[string]string{
"foo": "bar",
}))
defer ctx.Close()
err := os.Symlink("foo", filepath.Join(ctx.Dir, "asymlink"))
c.Assert(err, checker.IsNil)
cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx))
out := cli.DockerCmd(c, "run", "--rm", name, "cat", "target").Combined()
c.Assert(out, checker.Matches, "bar")
// change target file should invalidate cache
err = ioutil.WriteFile(filepath.Join(ctx.Dir, "foo"), []byte("baz"), 0644)
c.Assert(err, checker.IsNil)
result := cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx))
c.Assert(result.Combined(), checker.Not(checker.Contains), "Using cache")
out = cli.DockerCmd(c, "run", "--rm", name, "cat", "target").Combined()
c.Assert(out, checker.Matches, "baz")
}
func (s *DockerSuite) TestBuildFollowSymlinkToDir(c *check.C) {
name := "testbuildbrokensymlink"
ctx := fakecontext.New(c, "",
fakecontext.WithDockerfile(`
FROM busybox
COPY asymlink /`),
fakecontext.WithFiles(map[string]string{
"foo/abc": "bar",
"foo/def": "baz",
}))
defer ctx.Close()
err := os.Symlink("foo", filepath.Join(ctx.Dir, "asymlink"))
c.Assert(err, checker.IsNil)
cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx))
out := cli.DockerCmd(c, "run", "--rm", name, "cat", "abc", "def").Combined()
c.Assert(out, checker.Matches, "barbaz")
// change target file should invalidate cache
err = ioutil.WriteFile(filepath.Join(ctx.Dir, "foo/def"), []byte("bax"), 0644)
c.Assert(err, checker.IsNil)
result := cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx))
c.Assert(result.Combined(), checker.Not(checker.Contains), "Using cache")
out = cli.DockerCmd(c, "run", "--rm", name, "cat", "abc", "def").Combined()
c.Assert(out, checker.Matches, "barbax")
}
// TestBuildSymlinkBasename tests that target file gets basename from symlink,
// not from the target file.
func (s *DockerSuite) TestBuildSymlinkBasename(c *check.C) {
name := "testbuildbrokensymlink"
ctx := fakecontext.New(c, "",
fakecontext.WithDockerfile(`
FROM busybox
COPY asymlink /`),
fakecontext.WithFiles(map[string]string{
"foo": "bar",
}))
defer ctx.Close()
err := os.Symlink("foo", filepath.Join(ctx.Dir, "asymlink"))
c.Assert(err, checker.IsNil)
cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx))
out := cli.DockerCmd(c, "run", "--rm", name, "cat", "asymlink").Combined()
c.Assert(out, checker.Matches, "bar")
}
// #17827
func (s *DockerSuite) TestBuildCacheRootSource(c *check.C) {
name := "testbuildrootsource"
ctx := fakecontext.New(c, "",
fakecontext.WithDockerfile(`
FROM busybox
COPY / /data`),
fakecontext.WithFiles(map[string]string{
"foo": "bar",
}))
defer ctx.Close()
// warm up cache
cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx))
// change file, should invalidate cache
err := ioutil.WriteFile(filepath.Join(ctx.Dir, "foo"), []byte("baz"), 0644)
c.Assert(err, checker.IsNil)
result := cli.BuildCmd(c, name, build.WithExternalBuildContext(ctx))
c.Assert(result.Combined(), checker.Not(checker.Contains), "Using cache")
}
// #19375
func (s *DockerSuite) TestBuildFailsGitNotCallable(c *check.C) {
buildImage("gitnotcallable", cli.WithEnvironmentVariables("PATH="),
build.WithContextPath("github.com/docker/v1.10-migrator.git")).Assert(c, icmd.Expected{
ExitCode: 1,
Err: "unable to prepare context: unable to find 'git': ",
})
buildImage("gitnotcallable", cli.WithEnvironmentVariables("PATH="),
build.WithContextPath("https://github.com/docker/v1.10-migrator.git")).Assert(c, icmd.Expected{
ExitCode: 1,
Err: "unable to prepare context: unable to find 'git': ",
})
}
// TestBuildWorkdirWindowsPath tests that a Windows style path works as a workdir
func (s *DockerSuite) TestBuildWorkdirWindowsPath(c *check.C) {
testRequires(c, DaemonIsWindows)
name := "testbuildworkdirwindowspath"
buildImageSuccessfully(c, name, build.WithDockerfile(`
FROM `+testEnv.MinimalBaseImage()+`
RUN mkdir C:\\work
WORKDIR C:\\work
RUN if "%CD%" NEQ "C:\work" exit -1
`))
}
func (s *DockerSuite) TestBuildLabel(c *check.C) {
name := "testbuildlabel"
testLabel := "foo"
buildImageSuccessfully(c, name, cli.WithFlags("--label", testLabel),
build.WithDockerfile(`
FROM `+minimalBaseImage()+`
LABEL default foo
`))
var labels map[string]string
inspectFieldAndUnmarshall(c, name, "Config.Labels", &labels)
if _, ok := labels[testLabel]; !ok {
c.Fatal("label not found in image")
}
}
func (s *DockerSuite) TestBuildLabelOneNode(c *check.C) {
name := "testbuildlabel"
buildImageSuccessfully(c, name, cli.WithFlags("--label", "foo=bar"),
build.WithDockerfile("FROM busybox"))
var labels map[string]string
inspectFieldAndUnmarshall(c, name, "Config.Labels", &labels)
v, ok := labels["foo"]
if !ok {
c.Fatal("label `foo` not found in image")
}
c.Assert(v, checker.Equals, "bar")
}
func (s *DockerSuite) TestBuildLabelCacheCommit(c *check.C) {
name := "testbuildlabelcachecommit"
testLabel := "foo"
buildImageSuccessfully(c, name, build.WithDockerfile(`
FROM `+minimalBaseImage()+`
LABEL default foo
`))
buildImageSuccessfully(c, name, cli.WithFlags("--label", testLabel),
build.WithDockerfile(`
FROM `+minimalBaseImage()+`
LABEL default foo
`))
var labels map[string]string
inspectFieldAndUnmarshall(c, name, "Config.Labels", &labels)
if _, ok := labels[testLabel]; !ok {
c.Fatal("label not found in image")
}
}
func (s *DockerSuite) TestBuildLabelMultiple(c *check.C) {
name := "testbuildlabelmultiple"
testLabels := map[string]string{
"foo": "bar",
"123": "456",
}
labelArgs := []string{}
for k, v := range testLabels {
labelArgs = append(labelArgs, "--label", k+"="+v)
}
buildImageSuccessfully(c, name, cli.WithFlags(labelArgs...),
build.WithDockerfile(`
FROM `+minimalBaseImage()+`
LABEL default foo
`))
var labels map[string]string
inspectFieldAndUnmarshall(c, name, "Config.Labels", &labels)
for k, v := range testLabels {
if x, ok := labels[k]; !ok || x != v {
c.Fatalf("label %s=%s not found in image", k, v)
}
}
}
func (s *DockerRegistryAuthHtpasswdSuite) TestBuildFromAuthenticatedRegistry(c *check.C) {
dockerCmd(c, "login", "-u", s.reg.Username(), "-p", s.reg.Password(), privateRegistryURL)
baseImage := privateRegistryURL + "/baseimage"
buildImageSuccessfully(c, baseImage, build.WithDockerfile(`
FROM busybox
ENV env1 val1
`))
dockerCmd(c, "push", baseImage)
dockerCmd(c, "rmi", baseImage)
buildImageSuccessfully(c, baseImage, build.WithDockerfile(fmt.Sprintf(`
FROM %s
ENV env2 val2
`, baseImage)))
}
func (s *DockerRegistryAuthHtpasswdSuite) TestBuildWithExternalAuth(c *check.C) {
osPath := os.Getenv("PATH")
defer os.Setenv("PATH", osPath)
workingDir, err := os.Getwd()
c.Assert(err, checker.IsNil)
absolute, err := filepath.Abs(filepath.Join(workingDir, "fixtures", "auth"))
c.Assert(err, checker.IsNil)
testPath := fmt.Sprintf("%s%c%s", osPath, filepath.ListSeparator, absolute)
os.Setenv("PATH", testPath)
repoName := fmt.Sprintf("%v/dockercli/busybox:authtest", privateRegistryURL)
tmp, err := ioutil.TempDir("", "integration-cli-")
c.Assert(err, checker.IsNil)
externalAuthConfig := `{ "credsStore": "shell-test" }`
configPath := filepath.Join(tmp, "config.json")
err = ioutil.WriteFile(configPath, []byte(externalAuthConfig), 0644)
c.Assert(err, checker.IsNil)
dockerCmd(c, "--config", tmp, "login", "-u", s.reg.Username(), "-p", s.reg.Password(), privateRegistryURL)
b, err := ioutil.ReadFile(configPath)
c.Assert(err, checker.IsNil)
c.Assert(string(b), checker.Not(checker.Contains), "\"auth\":")
dockerCmd(c, "--config", tmp, "tag", "busybox", repoName)
dockerCmd(c, "--config", tmp, "push", repoName)
// make sure the image is pulled when building
dockerCmd(c, "rmi", repoName)
icmd.RunCmd(icmd.Cmd{
Command: []string{dockerBinary, "--config", tmp, "build", "-"},
Stdin: strings.NewReader(fmt.Sprintf("FROM %s", repoName)),
}).Assert(c, icmd.Success)
}
// Test cases in #22036
func (s *DockerSuite) TestBuildLabelsOverride(c *check.C) {
// Command line option labels will always override
name := "scratchy"
expected := `{"bar":"from-flag","foo":"from-flag"}`
buildImageSuccessfully(c, name, cli.WithFlags("--label", "foo=from-flag", "--label", "bar=from-flag"),
build.WithDockerfile(`FROM `+minimalBaseImage()+`
LABEL foo=from-dockerfile`))
res := inspectFieldJSON(c, name, "Config.Labels")
if res != expected {
c.Fatalf("Labels %s, expected %s", res, expected)
}
name = "from"
expected = `{"foo":"from-dockerfile"}`
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM `+minimalBaseImage()+`
LABEL foo from-dockerfile`))
res = inspectFieldJSON(c, name, "Config.Labels")
if res != expected {
c.Fatalf("Labels %s, expected %s", res, expected)
}
// Command line option label will override even via `FROM`
name = "new"
expected = `{"bar":"from-dockerfile2","foo":"new"}`
buildImageSuccessfully(c, name, cli.WithFlags("--label", "foo=new"),
build.WithDockerfile(`FROM from
LABEL bar from-dockerfile2`))
res = inspectFieldJSON(c, name, "Config.Labels")
if res != expected {
c.Fatalf("Labels %s, expected %s", res, expected)
}
// Command line option without a value set (--label foo, --label bar=)
// will be treated as --label foo="", --label bar=""
name = "scratchy2"
expected = `{"bar":"","foo":""}`
buildImageSuccessfully(c, name, cli.WithFlags("--label", "foo", "--label", "bar="),
build.WithDockerfile(`FROM `+minimalBaseImage()+`
LABEL foo=from-dockerfile`))
res = inspectFieldJSON(c, name, "Config.Labels")
if res != expected {
c.Fatalf("Labels %s, expected %s", res, expected)
}
// Command line option without a value set (--label foo, --label bar=)
// will be treated as --label foo="", --label bar=""
// This time is for inherited images
name = "new2"
expected = `{"bar":"","foo":""}`
buildImageSuccessfully(c, name, cli.WithFlags("--label", "foo=", "--label", "bar"),
build.WithDockerfile(`FROM from
LABEL bar from-dockerfile2`))
res = inspectFieldJSON(c, name, "Config.Labels")
if res != expected {
c.Fatalf("Labels %s, expected %s", res, expected)
}
// Command line option labels with only `FROM`
name = "scratchy"
expected = `{"bar":"from-flag","foo":"from-flag"}`
buildImageSuccessfully(c, name, cli.WithFlags("--label", "foo=from-flag", "--label", "bar=from-flag"),
build.WithDockerfile(`FROM `+minimalBaseImage()))
res = inspectFieldJSON(c, name, "Config.Labels")
if res != expected {
c.Fatalf("Labels %s, expected %s", res, expected)
}
// Command line option labels with env var
name = "scratchz"
expected = `{"bar":"$PATH"}`
buildImageSuccessfully(c, name, cli.WithFlags("--label", "bar=$PATH"),
build.WithDockerfile(`FROM `+minimalBaseImage()))
res = inspectFieldJSON(c, name, "Config.Labels")
if res != expected {
c.Fatalf("Labels %s, expected %s", res, expected)
}
}
// Test case for #22855
func (s *DockerSuite) TestBuildDeleteCommittedFile(c *check.C) {
name := "test-delete-committed-file"
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox
RUN echo test > file
RUN test -e file
RUN rm file
RUN sh -c "! test -e file"`))
}
// #20083
func (s *DockerSuite) TestBuildDockerignoreComment(c *check.C) {
// TODO Windows: Figure out why this test is flakey on TP5. If you add
// something like RUN sleep 5, or even RUN ls /tmp after the ADD line,
// it is more reliable, but that's not a good fix.
testRequires(c, DaemonIsLinux)
name := "testbuilddockerignorecleanpaths"
dockerfile := `
FROM busybox
ADD . /tmp/
RUN sh -c "(ls -la /tmp/#1)"
RUN sh -c "(! ls -la /tmp/#2)"
RUN sh -c "(! ls /tmp/foo) && (! ls /tmp/foo2) && (ls /tmp/dir1/foo)"`
buildImageSuccessfully(c, name, build.WithBuildContext(c,
build.WithFile("Dockerfile", dockerfile),
build.WithFile("foo", "foo"),
build.WithFile("foo2", "foo2"),
build.WithFile("dir1/foo", "foo in dir1"),
build.WithFile("#1", "# file 1"),
build.WithFile("#2", "# file 2"),
build.WithFile(".dockerignore", `# Visual C++ cache files
# because we have git ;-)
# The above comment is from #20083
foo
#dir1/foo
foo2
# The following is considered as comment as # is at the beginning
#1
# The following is not considered as comment as # is not at the beginning
#2
`)))
}
// Test case for #23221
func (s *DockerSuite) TestBuildWithUTF8BOM(c *check.C) {
name := "test-with-utf8-bom"
dockerfile := []byte(`FROM busybox`)
bomDockerfile := append([]byte{0xEF, 0xBB, 0xBF}, dockerfile...)
buildImageSuccessfully(c, name, build.WithBuildContext(c,
build.WithFile("Dockerfile", string(bomDockerfile)),
))
}
// Test case for UTF-8 BOM in .dockerignore, related to #23221
func (s *DockerSuite) TestBuildWithUTF8BOMDockerignore(c *check.C) {
name := "test-with-utf8-bom-dockerignore"
dockerfile := `
FROM busybox
ADD . /tmp/
RUN ls -la /tmp
RUN sh -c "! ls /tmp/Dockerfile"
RUN ls /tmp/.dockerignore`
dockerignore := []byte("./Dockerfile\n")
bomDockerignore := append([]byte{0xEF, 0xBB, 0xBF}, dockerignore...)
buildImageSuccessfully(c, name, build.WithBuildContext(c,
build.WithFile("Dockerfile", dockerfile),
build.WithFile(".dockerignore", string(bomDockerignore)),
))
}
// #22489 Shell test to confirm config gets updated correctly
func (s *DockerSuite) TestBuildShellUpdatesConfig(c *check.C) {
name := "testbuildshellupdatesconfig"
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM `+minimalBaseImage()+`
SHELL ["foo", "-bar"]`))
expected := `["foo","-bar","#(nop) ","SHELL [foo -bar]"]`
res := inspectFieldJSON(c, name, "ContainerConfig.Cmd")
if res != expected {
c.Fatalf("%s, expected %s", res, expected)
}
res = inspectFieldJSON(c, name, "ContainerConfig.Shell")
if res != `["foo","-bar"]` {
c.Fatalf(`%s, expected ["foo","-bar"]`, res)
}
}
// #22489 Changing the shell multiple times and CMD after.
func (s *DockerSuite) TestBuildShellMultiple(c *check.C) {
name := "testbuildshellmultiple"
result := buildImage(name, build.WithDockerfile(`FROM busybox
RUN echo defaultshell
SHELL ["echo"]
RUN echoshell
SHELL ["ls"]
RUN -l
CMD -l`))
result.Assert(c, icmd.Success)
// Must contain 'defaultshell' twice
if len(strings.Split(result.Combined(), "defaultshell")) != 3 {
c.Fatalf("defaultshell should have appeared twice in %s", result.Combined())
}
// Must contain 'echoshell' twice
if len(strings.Split(result.Combined(), "echoshell")) != 3 {
c.Fatalf("echoshell should have appeared twice in %s", result.Combined())
}
// Must contain "total " (part of ls -l)
if !strings.Contains(result.Combined(), "total ") {
c.Fatalf("%s should have contained 'total '", result.Combined())
}
// A container started from the image uses the shell-form CMD.
// Last shell is ls. CMD is -l. So should contain 'total '.
outrun, _ := dockerCmd(c, "run", "--rm", name)
if !strings.Contains(outrun, "total ") {
c.Fatalf("Expected started container to run ls -l. %s", outrun)
}
}
// #22489. Changed SHELL with ENTRYPOINT
func (s *DockerSuite) TestBuildShellEntrypoint(c *check.C) {
name := "testbuildshellentrypoint"
buildImageSuccessfully(c, name, build.WithDockerfile(`FROM busybox
SHELL ["ls"]
ENTRYPOINT -l`))
// A container started from the image uses the shell-form ENTRYPOINT.
// Shell is ls. ENTRYPOINT is -l. So should contain 'total '.
outrun, _ := dockerCmd(c, "run", "--rm", name)
if !strings.Contains(outrun, "total ") {
c.Fatalf("Expected started container to run ls -l. %s", outrun)
}
}
// #22489 Shell test to confirm shell is inherited in a subsequent build
func (s *DockerSuite) TestBuildShellInherited(c *check.C) {
name1 := "testbuildshellinherited1"
buildImageSuccessfully(c, name1, build.WithDockerfile(`FROM busybox
SHELL ["ls"]`))
name2 := "testbuildshellinherited2"
buildImage(name2, build.WithDockerfile(`FROM `+name1+`
RUN -l`)).Assert(c, icmd.Expected{
// ls -l has "total " followed by some number in it, ls without -l does not.
Out: "total ",
})
}
// #22489 Shell test to confirm non-JSON doesn't work
func (s *DockerSuite) TestBuildShellNotJSON(c *check.C) {
name := "testbuildshellnotjson"
buildImage(name, build.WithDockerfile(`FROM `+minimalBaseImage()+`
sHeLl exec -form`, // Casing explicit to ensure error is upper-cased.
)).Assert(c, icmd.Expected{
ExitCode: 1,
Err: "SHELL requires the arguments to be in JSON form",
})
}
// #22489 Windows shell test to confirm native is powershell if executing a PS command
// This would error if the default shell were still cmd.
func (s *DockerSuite) TestBuildShellWindowsPowershell(c *check.C) {
testRequires(c, DaemonIsWindows)
name := "testbuildshellpowershell"
buildImage(name, build.WithDockerfile(`FROM `+minimalBaseImage()+`
SHELL ["powershell", "-command"]
RUN Write-Host John`)).Assert(c, icmd.Expected{
Out: "\nJohn\n",
})
}
// Verify that escape is being correctly applied to words when escape directive is not \.
// Tests WORKDIR, ADD
func (s *DockerSuite) TestBuildEscapeNotBackslashWordTest(c *check.C) {
testRequires(c, DaemonIsWindows)
name := "testbuildescapenotbackslashwordtesta"
buildImage(name, build.WithDockerfile(`# escape= `+"`"+`
FROM `+minimalBaseImage()+`
WORKDIR c:\windows
RUN dir /w`)).Assert(c, icmd.Expected{
Out: "[System32]",
})
name = "testbuildescapenotbackslashwordtestb"
buildImage(name, build.WithDockerfile(`# escape= `+"`"+`
FROM `+minimalBaseImage()+`
SHELL ["powershell.exe"]
WORKDIR c:\foo
ADD Dockerfile c:\foo\
RUN dir Dockerfile`)).Assert(c, icmd.Expected{
Out: "-a----",
})
}
// #22868. Make sure shell-form CMD is marked as escaped in the config of the image
func (s *DockerSuite) TestBuildCmdShellArgsEscaped(c *check.C) {
testRequires(c, DaemonIsWindows)
name := "testbuildcmdshellescaped"
buildImageSuccessfully(c, name, build.WithDockerfile(`
FROM `+minimalBaseImage()+`
CMD "ipconfig"
`))
res := inspectFieldJSON(c, name, "Config.ArgsEscaped")
if res != "true" {
c.Fatalf("CMD did not update Config.ArgsEscaped on image: %v", res)
}
dockerCmd(c, "run", "--name", "inspectme", name)
dockerCmd(c, "wait", "inspectme")
res = inspectFieldJSON(c, name, "Config.Cmd")
if res != `["cmd","/S","/C","\"ipconfig\""]` {
c.Fatalf("CMD was not escaped Config.Cmd: got %v", res)
}
}
// Test case for #24912.
func (s *DockerSuite) TestBuildStepsWithProgress(c *check.C) {
name := "testbuildstepswithprogress"
totalRun := 5
result := buildImage(name, build.WithDockerfile("FROM busybox\n"+strings.Repeat("RUN echo foo\n", totalRun)))
result.Assert(c, icmd.Success)
c.Assert(result.Combined(), checker.Contains, fmt.Sprintf("Step 1/%d : FROM busybox", 1+totalRun))
for i := 2; i <= 1+totalRun; i++ {
c.Assert(result.Combined(), checker.Contains, fmt.Sprintf("Step %d/%d : RUN echo foo", i, 1+totalRun))
}
}
func (s *DockerSuite) TestBuildWithFailure(c *check.C) {
name := "testbuildwithfailure"
// First test case can only detect `nobody` in runtime so all steps will show up
dockerfile := "FROM busybox\nRUN nobody"
result := buildImage(name, build.WithDockerfile(dockerfile))
c.Assert(result.Error, checker.NotNil)
c.Assert(result.Stdout(), checker.Contains, "Step 1/2 : FROM busybox")
c.Assert(result.Stdout(), checker.Contains, "Step 2/2 : RUN nobody")
// Second test case `FFOM` should have been detected before build runs so no steps
dockerfile = "FFOM nobody\nRUN nobody"
result = buildImage(name, build.WithDockerfile(dockerfile))
c.Assert(result.Error, checker.NotNil)
c.Assert(result.Stdout(), checker.Not(checker.Contains), "Step 1/2 : FROM busybox")
c.Assert(result.Stdout(), checker.Not(checker.Contains), "Step 2/2 : RUN nobody")
}
func (s *DockerSuite) TestBuildCacheFromEqualDiffIDsLength(c *check.C) {
dockerfile := `
FROM busybox
RUN echo "test"
ENTRYPOINT ["sh"]`
ctx := fakecontext.New(c, "",
fakecontext.WithDockerfile(dockerfile),
fakecontext.WithFiles(map[string]string{
"Dockerfile": dockerfile,
}))
defer ctx.Close()
cli.BuildCmd(c, "build1", build.WithExternalBuildContext(ctx))
id1 := getIDByName(c, "build1")
// rebuild with cache-from
result := cli.BuildCmd(c, "build2", cli.WithFlags("--cache-from=build1"), build.WithExternalBuildContext(ctx))
id2 := getIDByName(c, "build2")
c.Assert(id1, checker.Equals, id2)
c.Assert(strings.Count(result.Combined(), "Using cache"), checker.Equals, 2)
}
func (s *DockerSuite) TestBuildCacheFrom(c *check.C) {
testRequires(c, DaemonIsLinux) // All tests that do save are skipped in windows
dockerfile := `
FROM busybox
ENV FOO=bar
ADD baz /
RUN touch bax`
ctx := fakecontext.New(c, "",
fakecontext.WithDockerfile(dockerfile),
fakecontext.WithFiles(map[string]string{
"Dockerfile": dockerfile,
"baz": "baz",
}))
defer ctx.Close()
cli.BuildCmd(c, "build1", build.WithExternalBuildContext(ctx))
id1 := getIDByName(c, "build1")
// rebuild with cache-from
result := cli.BuildCmd(c, "build2", cli.WithFlags("--cache-from=build1"), build.WithExternalBuildContext(ctx))
id2 := getIDByName(c, "build2")
c.Assert(id1, checker.Equals, id2)
c.Assert(strings.Count(result.Combined(), "Using cache"), checker.Equals, 3)
cli.DockerCmd(c, "rmi", "build2")
// no cache match with unknown source
result = cli.BuildCmd(c, "build2", cli.WithFlags("--cache-from=nosuchtag"), build.WithExternalBuildContext(ctx))
id2 = getIDByName(c, "build2")
c.Assert(id1, checker.Not(checker.Equals), id2)
c.Assert(strings.Count(result.Combined(), "Using cache"), checker.Equals, 0)
cli.DockerCmd(c, "rmi", "build2")
// clear parent images
tempDir, err := ioutil.TempDir("", "test-build-cache-from-")
if err != nil {
c.Fatalf("failed to create temporary directory: %s", tempDir)
}
defer os.RemoveAll(tempDir)
tempFile := filepath.Join(tempDir, "img.tar")
cli.DockerCmd(c, "save", "-o", tempFile, "build1")
cli.DockerCmd(c, "rmi", "build1")
cli.DockerCmd(c, "load", "-i", tempFile)
parentID := cli.DockerCmd(c, "inspect", "-f", "{{.Parent}}", "build1").Combined()
c.Assert(strings.TrimSpace(parentID), checker.Equals, "")
// cache still applies without parents
result = cli.BuildCmd(c, "build2", cli.WithFlags("--cache-from=build1"), build.WithExternalBuildContext(ctx))
id2 = getIDByName(c, "build2")
c.Assert(id1, checker.Equals, id2)
c.Assert(strings.Count(result.Combined(), "Using cache"), checker.Equals, 3)
history1 := cli.DockerCmd(c, "history", "-q", "build2").Combined()
// Retry, no new intermediate images
result = cli.BuildCmd(c, "build3", cli.WithFlags("--cache-from=build1"), build.WithExternalBuildContext(ctx))
id3 := getIDByName(c, "build3")
c.Assert(id1, checker.Equals, id3)
c.Assert(strings.Count(result.Combined(), "Using cache"), checker.Equals, 3)
history2 := cli.DockerCmd(c, "history", "-q", "build3").Combined()
c.Assert(history1, checker.Equals, history2)
cli.DockerCmd(c, "rmi", "build2")
cli.DockerCmd(c, "rmi", "build3")
cli.DockerCmd(c, "rmi", "build1")
cli.DockerCmd(c, "load", "-i", tempFile)
// Modify file, everything up to last command and layers are reused
dockerfile = `
FROM busybox
ENV FOO=bar
ADD baz /
RUN touch newfile`
err = ioutil.WriteFile(filepath.Join(ctx.Dir, "Dockerfile"), []byte(dockerfile), 0644)
c.Assert(err, checker.IsNil)
result = cli.BuildCmd(c, "build2", cli.WithFlags("--cache-from=build1"), build.WithExternalBuildContext(ctx))
id2 = getIDByName(c, "build2")
c.Assert(id1, checker.Not(checker.Equals), id2)
c.Assert(strings.Count(result.Combined(), "Using cache"), checker.Equals, 2)
layers1Str := cli.DockerCmd(c, "inspect", "-f", "{{json .RootFS.Layers}}", "build1").Combined()
layers2Str := cli.DockerCmd(c, "inspect", "-f", "{{json .RootFS.Layers}}", "build2").Combined()
var layers1 []string
var layers2 []string
c.Assert(json.Unmarshal([]byte(layers1Str), &layers1), checker.IsNil)
c.Assert(json.Unmarshal([]byte(layers2Str), &layers2), checker.IsNil)
c.Assert(len(layers1), checker.Equals, len(layers2))
for i := 0; i < len(layers1)-1; i++ {
c.Assert(layers1[i], checker.Equals, layers2[i])
}
c.Assert(layers1[len(layers1)-1], checker.Not(checker.Equals), layers2[len(layers1)-1])
}
func (s *DockerSuite) TestBuildCacheMultipleFrom(c *check.C) {
testRequires(c, DaemonIsLinux) // All tests that do save are skipped in windows
dockerfile := `
FROM busybox
ADD baz /
FROM busybox
ADD baz /`
ctx := fakecontext.New(c, "",
fakecontext.WithDockerfile(dockerfile),
fakecontext.WithFiles(map[string]string{
"Dockerfile": dockerfile,
"baz": "baz",
}))
defer ctx.Close()
result := cli.BuildCmd(c, "build1", build.WithExternalBuildContext(ctx))
// second part of dockerfile was a repeat of first so should be cached
c.Assert(strings.Count(result.Combined(), "Using cache"), checker.Equals, 1)
result = cli.BuildCmd(c, "build2", cli.WithFlags("--cache-from=build1"), build.WithExternalBuildContext(ctx))
// now both parts of dockerfile should be cached
c.Assert(strings.Count(result.Combined(), "Using cache"), checker.Equals, 2)
}
func (s *DockerSuite) TestBuildNetNone(c *check.C) {
testRequires(c, DaemonIsLinux)
name := "testbuildnetnone"
buildImage(name, cli.WithFlags("--network=none"), build.WithDockerfile(`
FROM busybox
RUN ping -c 1 8.8.8.8
`)).Assert(c, icmd.Expected{
ExitCode: 1,
Out: "unreachable",
})
}
func (s *DockerSuite) TestBuildNetContainer(c *check.C) {
testRequires(c, DaemonIsLinux)
id, _ := dockerCmd(c, "run", "--hostname", "foobar", "-d", "busybox", "nc", "-ll", "-p", "1234", "-e", "hostname")
name := "testbuildnetcontainer"
buildImageSuccessfully(c, name, cli.WithFlags("--network=container:"+strings.TrimSpace(id)),
build.WithDockerfile(`
FROM busybox
RUN nc localhost 1234 > /otherhost
`))
host, _ := dockerCmd(c, "run", "testbuildnetcontainer", "cat", "/otherhost")
c.Assert(strings.TrimSpace(host), check.Equals, "foobar")
}
func (s *DockerSuite) TestBuildWithExtraHost(c *check.C) {
testRequires(c, DaemonIsLinux)
name := "testbuildwithextrahost"
buildImageSuccessfully(c, name,
cli.WithFlags(
"--add-host", "foo:127.0.0.1",
"--add-host", "bar:127.0.0.1",
),
build.WithDockerfile(`
FROM busybox
RUN ping -c 1 foo
RUN ping -c 1 bar
`))
}
func (s *DockerSuite) TestBuildWithExtraHostInvalidFormat(c *check.C) {
testRequires(c, DaemonIsLinux)
dockerfile := `
FROM busybox
RUN ping -c 1 foo`
testCases := []struct {
testName string
dockerfile string
buildFlag string
}{
{"extra_host_missing_ip", dockerfile, "--add-host=foo"},
{"extra_host_missing_ip_with_delimeter", dockerfile, "--add-host=foo:"},
{"extra_host_missing_hostname", dockerfile, "--add-host=:127.0.0.1"},
{"extra_host_invalid_ipv4", dockerfile, "--add-host=foo:101.10.2"},
{"extra_host_invalid_ipv6", dockerfile, "--add-host=foo:2001::1::3F"},
}
for _, tc := range testCases {
result := buildImage(tc.testName, cli.WithFlags(tc.buildFlag), build.WithDockerfile(tc.dockerfile))
result.Assert(c, icmd.Expected{
ExitCode: 125,
})
}
}
func (s *DockerSuite) TestBuildSquashParent(c *check.C) {
testRequires(c, ExperimentalDaemon)
dockerFile := `
FROM busybox
RUN echo hello > /hello
RUN echo world >> /hello
RUN echo hello > /remove_me
ENV HELLO world
RUN rm /remove_me
`
// build and get the ID that we can use later for history comparison
name := "test"
buildImageSuccessfully(c, name, build.WithDockerfile(dockerFile))
origID := getIDByName(c, name)
// build with squash
buildImageSuccessfully(c, name, cli.WithFlags("--squash"), build.WithDockerfile(dockerFile))
id := getIDByName(c, name)
out, _ := dockerCmd(c, "run", "--rm", id, "/bin/sh", "-c", "cat /hello")
c.Assert(strings.TrimSpace(out), checker.Equals, "hello\nworld")
dockerCmd(c, "run", "--rm", id, "/bin/sh", "-c", "[ ! -f /remove_me ]")
dockerCmd(c, "run", "--rm", id, "/bin/sh", "-c", `[ "$(echo $HELLO)" == "world" ]`)
// make sure the ID produced is the ID of the tag we specified
inspectID := inspectImage(c, "test", ".ID")
c.Assert(inspectID, checker.Equals, id)
origHistory, _ := dockerCmd(c, "history", origID)
testHistory, _ := dockerCmd(c, "history", "test")
splitOrigHistory := strings.Split(strings.TrimSpace(origHistory), "\n")
splitTestHistory := strings.Split(strings.TrimSpace(testHistory), "\n")
c.Assert(len(splitTestHistory), checker.Equals, len(splitOrigHistory)+1)
out = inspectImage(c, id, "len .RootFS.Layers")
c.Assert(strings.TrimSpace(out), checker.Equals, "2")
}
func (s *DockerSuite) TestBuildContChar(c *check.C) {
name := "testbuildcontchar"
buildImage(name, build.WithDockerfile(`FROM busybox\`)).Assert(c, icmd.Expected{
Out: "Step 1/1 : FROM busybox",
})
result := buildImage(name, build.WithDockerfile(`FROM busybox
RUN echo hi \`))
result.Assert(c, icmd.Success)
c.Assert(result.Combined(), checker.Contains, "Step 1/2 : FROM busybox")
c.Assert(result.Combined(), checker.Contains, "Step 2/2 : RUN echo hi\n")
result = buildImage(name, build.WithDockerfile(`FROM busybox
RUN echo hi \\`))
result.Assert(c, icmd.Success)
c.Assert(result.Combined(), checker.Contains, "Step 1/2 : FROM busybox")
c.Assert(result.Combined(), checker.Contains, "Step 2/2 : RUN echo hi \\\n")
result = buildImage(name, build.WithDockerfile(`FROM busybox
RUN echo hi \\\`))
result.Assert(c, icmd.Success)
c.Assert(result.Combined(), checker.Contains, "Step 1/2 : FROM busybox")
c.Assert(result.Combined(), checker.Contains, "Step 2/2 : RUN echo hi \\\\\n")
}
func (s *DockerSuite) TestBuildCopyFromPreviousRootFS(c *check.C) {
dockerfile := `
FROM busybox AS first
COPY foo bar
FROM busybox
%s
COPY baz baz
RUN echo mno > baz/cc
FROM busybox
COPY bar /
COPY --from=1 baz sub/
COPY --from=0 bar baz
COPY --from=first bar bay`
ctx := fakecontext.New(c, "",
fakecontext.WithDockerfile(fmt.Sprintf(dockerfile, "")),
fakecontext.WithFiles(map[string]string{
"foo": "abc",
"bar": "def",
"baz/aa": "ghi",
"baz/bb": "jkl",
}))
defer ctx.Close()
cli.BuildCmd(c, "build1", build.WithExternalBuildContext(ctx))
out := cli.DockerCmd(c, "run", "build1", "cat", "bar").Combined()
c.Assert(strings.TrimSpace(out), check.Equals, "def")
out = cli.DockerCmd(c, "run", "build1", "cat", "sub/aa").Combined()
c.Assert(strings.TrimSpace(out), check.Equals, "ghi")
out = cli.DockerCmd(c, "run", "build1", "cat", "sub/cc").Combined()
c.Assert(strings.TrimSpace(out), check.Equals, "mno")
out = cli.DockerCmd(c, "run", "build1", "cat", "baz").Combined()
c.Assert(strings.TrimSpace(out), check.Equals, "abc")
out = cli.DockerCmd(c, "run", "build1", "cat", "bay").Combined()
c.Assert(strings.TrimSpace(out), check.Equals, "abc")
result := cli.BuildCmd(c, "build2", build.WithExternalBuildContext(ctx))
// all commands should be cached
c.Assert(strings.Count(result.Combined(), "Using cache"), checker.Equals, 7)
err := ioutil.WriteFile(filepath.Join(ctx.Dir, "Dockerfile"), []byte(fmt.Sprintf(dockerfile, "COPY baz/aa foo")), 0644)
c.Assert(err, checker.IsNil)
// changing file in parent block should not affect last block
result = cli.BuildCmd(c, "build3", build.WithExternalBuildContext(ctx))
c.Assert(strings.Count(result.Combined(), "Using cache"), checker.Equals, 5)
c.Assert(getIDByName(c, "build1"), checker.Equals, getIDByName(c, "build2"))
err = ioutil.WriteFile(filepath.Join(ctx.Dir, "foo"), []byte("pqr"), 0644)
c.Assert(err, checker.IsNil)
// changing file in parent block should affect both first and last block
result = cli.BuildCmd(c, "build4", build.WithExternalBuildContext(ctx))
c.Assert(strings.Count(result.Combined(), "Using cache"), checker.Equals, 5)
out = cli.DockerCmd(c, "run", "build4", "cat", "bay").Combined()
c.Assert(strings.TrimSpace(out), check.Equals, "pqr")
out = cli.DockerCmd(c, "run", "build4", "cat", "baz").Combined()
c.Assert(strings.TrimSpace(out), check.Equals, "pqr")
}
func (s *DockerSuite) TestBuildCopyFromPreviousRootFSErrors(c *check.C) {
testCases := []struct {
dockerfile string
expectedError string
}{
{
dockerfile: `
FROM busybox
COPY --from=foo foo bar`,
expectedError: "invalid from flag value foo",
},
{
dockerfile: `
FROM busybox
COPY --from=0 foo bar`,
expectedError: "invalid from flag value 0 refers current build block",
},
{
dockerfile: `
FROM busybox AS foo
COPY --from=bar foo bar`,
expectedError: "invalid from flag value bar",
},
{
dockerfile: `
FROM busybox AS 1
COPY --from=1 foo bar`,
expectedError: "invalid name for build stage",
},
}
for _, tc := range testCases {
ctx := fakecontext.New(c, "",
fakecontext.WithDockerfile(tc.dockerfile),
fakecontext.WithFiles(map[string]string{
"foo": "abc",
}))
cli.Docker(cli.Build("build1"), build.WithExternalBuildContext(ctx)).Assert(c, icmd.Expected{
ExitCode: 1,
Err: tc.expectedError,
})
ctx.Close()
}
}
func (s *DockerSuite) TestBuildCopyFromPreviousFrom(c *check.C) {
dockerfile := `
FROM busybox
COPY foo bar`
ctx := fakecontext.New(c, "",
fakecontext.WithDockerfile(dockerfile),
fakecontext.WithFiles(map[string]string{
"foo": "abc",
}))
defer ctx.Close()
cli.BuildCmd(c, "build1", build.WithExternalBuildContext(ctx))
dockerfile = `
FROM build1:latest AS foo
FROM busybox
COPY --from=foo bar /
COPY foo /`
ctx = fakecontext.New(c, "",
fakecontext.WithDockerfile(dockerfile),
fakecontext.WithFiles(map[string]string{
"foo": "def",
}))
defer ctx.Close()
cli.BuildCmd(c, "build2", build.WithExternalBuildContext(ctx))
out := cli.DockerCmd(c, "run", "build2", "cat", "bar").Combined()
c.Assert(strings.TrimSpace(out), check.Equals, "abc")
out = cli.DockerCmd(c, "run", "build2", "cat", "foo").Combined()
c.Assert(strings.TrimSpace(out), check.Equals, "def")
}
func (s *DockerSuite) TestBuildCopyFromImplicitFrom(c *check.C) {
dockerfile := `
FROM busybox
COPY --from=busybox /etc/passwd /mypasswd
RUN cmp /etc/passwd /mypasswd`
if DaemonIsWindows() {
dockerfile = `
FROM busybox
COPY --from=busybox License.txt foo`
}
ctx := fakecontext.New(c, "",
fakecontext.WithDockerfile(dockerfile),
)
defer ctx.Close()
cli.BuildCmd(c, "build1", build.WithExternalBuildContext(ctx))
if DaemonIsWindows() {
out := cli.DockerCmd(c, "run", "build1", "cat", "License.txt").Combined()
c.Assert(len(out), checker.GreaterThan, 10)
out2 := cli.DockerCmd(c, "run", "build1", "cat", "foo").Combined()
c.Assert(out, check.Equals, out2)
}
}
func (s *DockerRegistrySuite) TestBuildCopyFromImplicitPullingFrom(c *check.C) {
repoName := fmt.Sprintf("%v/dockercli/testf", privateRegistryURL)
dockerfile := `
FROM busybox
COPY foo bar`
ctx := fakecontext.New(c, "",
fakecontext.WithDockerfile(dockerfile),
fakecontext.WithFiles(map[string]string{
"foo": "abc",
}))
defer ctx.Close()
cli.BuildCmd(c, repoName, build.WithExternalBuildContext(ctx))
cli.DockerCmd(c, "push", repoName)
cli.DockerCmd(c, "rmi", repoName)
dockerfile = `
FROM busybox
COPY --from=%s bar baz`
ctx = fakecontext.New(c, "", fakecontext.WithDockerfile(fmt.Sprintf(dockerfile, repoName)))
defer ctx.Close()
cli.BuildCmd(c, "build1", build.WithExternalBuildContext(ctx))
cli.Docker(cli.Args("run", "build1", "cat", "baz")).Assert(c, icmd.Expected{Out: "abc"})
}
func (s *DockerSuite) TestBuildFromPreviousBlock(c *check.C) {
dockerfile := `
FROM busybox as foo
COPY foo /
FROM foo as foo1
RUN echo 1 >> foo
FROM foo as foO2
RUN echo 2 >> foo
FROM foo
COPY --from=foo1 foo f1
COPY --from=FOo2 foo f2
` // foo2 case also tests that names are canse insensitive
ctx := fakecontext.New(c, "",
fakecontext.WithDockerfile(dockerfile),
fakecontext.WithFiles(map[string]string{
"foo": "bar",
}))
defer ctx.Close()
cli.BuildCmd(c, "build1", build.WithExternalBuildContext(ctx))
cli.Docker(cli.Args("run", "build1", "cat", "foo")).Assert(c, icmd.Expected{Out: "bar"})
cli.Docker(cli.Args("run", "build1", "cat", "f1")).Assert(c, icmd.Expected{Out: "bar1"})
cli.Docker(cli.Args("run", "build1", "cat", "f2")).Assert(c, icmd.Expected{Out: "bar2"})
}
func (s *DockerTrustSuite) TestCopyFromTrustedBuild(c *check.C) {
img1 := s.setupTrustedImage(c, "trusted-build1")
img2 := s.setupTrustedImage(c, "trusted-build2")
dockerFile := fmt.Sprintf(`
FROM %s AS build-base
RUN echo ok > /foo
FROM %s
COPY --from=build-base foo bar`, img1, img2)
name := "testcopyfromtrustedbuild"
r := buildImage(name, trustedBuild, build.WithDockerfile(dockerFile))
r.Assert(c, icmd.Expected{
Out: fmt.Sprintf("FROM %s@sha", img1[:len(img1)-7]),
})
r.Assert(c, icmd.Expected{
Out: fmt.Sprintf("FROM %s@sha", img2[:len(img2)-7]),
})
dockerCmdWithResult("run", name, "cat", "bar").Assert(c, icmd.Expected{Out: "ok"})
}
func (s *DockerSuite) TestBuildCopyFromPreviousFromWindows(c *check.C) {
testRequires(c, DaemonIsWindows)
dockerfile := `
FROM ` + testEnv.MinimalBaseImage() + `
COPY foo c:\\bar`
ctx := fakecontext.New(c, "",
fakecontext.WithDockerfile(dockerfile),
fakecontext.WithFiles(map[string]string{
"foo": "abc",
}))
defer ctx.Close()
cli.BuildCmd(c, "build1", build.WithExternalBuildContext(ctx))
dockerfile = `
FROM build1:latest
FROM ` + testEnv.MinimalBaseImage() + `
COPY --from=0 c:\\bar /
COPY foo /`
ctx = fakecontext.New(c, "",
fakecontext.WithDockerfile(dockerfile),
fakecontext.WithFiles(map[string]string{
"foo": "def",
}))
defer ctx.Close()
cli.BuildCmd(c, "build2", build.WithExternalBuildContext(ctx))
out := cli.DockerCmd(c, "run", "build2", "cmd.exe", "/s", "/c", "type", "c:\\bar").Combined()
c.Assert(strings.TrimSpace(out), check.Equals, "abc")
out = cli.DockerCmd(c, "run", "build2", "cmd.exe", "/s", "/c", "type", "c:\\foo").Combined()
c.Assert(strings.TrimSpace(out), check.Equals, "def")
}
func (s *DockerSuite) TestBuildCopyFromForbidWindowsSystemPaths(c *check.C) {
testRequires(c, DaemonIsWindows)
dockerfile := `
FROM ` + testEnv.MinimalBaseImage() + `
FROM ` + testEnv.MinimalBaseImage() + `
COPY --from=0 %s c:\\oscopy
`
exp := icmd.Expected{
ExitCode: 1,
Err: "copy from c:\\ or c:\\windows is not allowed on windows",
}
buildImage("testforbidsystempaths1", build.WithDockerfile(fmt.Sprintf(dockerfile, "c:\\\\"))).Assert(c, exp)
buildImage("testforbidsystempaths2", build.WithDockerfile(fmt.Sprintf(dockerfile, "C:\\\\"))).Assert(c, exp)
buildImage("testforbidsystempaths3", build.WithDockerfile(fmt.Sprintf(dockerfile, "c:\\\\windows"))).Assert(c, exp)
buildImage("testforbidsystempaths4", build.WithDockerfile(fmt.Sprintf(dockerfile, "c:\\\\wInDows"))).Assert(c, exp)
}
func (s *DockerSuite) TestBuildCopyFromForbidWindowsRelativePaths(c *check.C) {
testRequires(c, DaemonIsWindows)
dockerfile := `
FROM ` + testEnv.MinimalBaseImage() + `
FROM ` + testEnv.MinimalBaseImage() + `
COPY --from=0 %s c:\\oscopy
`
exp := icmd.Expected{
ExitCode: 1,
Err: "copy from c:\\ or c:\\windows is not allowed on windows",
}
buildImage("testforbidsystempaths1", build.WithDockerfile(fmt.Sprintf(dockerfile, "c:"))).Assert(c, exp)
buildImage("testforbidsystempaths2", build.WithDockerfile(fmt.Sprintf(dockerfile, "."))).Assert(c, exp)
buildImage("testforbidsystempaths3", build.WithDockerfile(fmt.Sprintf(dockerfile, "..\\\\"))).Assert(c, exp)
buildImage("testforbidsystempaths4", build.WithDockerfile(fmt.Sprintf(dockerfile, ".\\\\windows"))).Assert(c, exp)
buildImage("testforbidsystempaths5", build.WithDockerfile(fmt.Sprintf(dockerfile, "\\\\windows"))).Assert(c, exp)
}
func (s *DockerSuite) TestBuildCopyFromWindowsIsCaseInsensitive(c *check.C) {
testRequires(c, DaemonIsWindows)
dockerfile := `
FROM ` + testEnv.MinimalBaseImage() + `
COPY foo /
FROM ` + testEnv.MinimalBaseImage() + `
COPY --from=0 c:\\fOo c:\\copied
RUN type c:\\copied
`
cli.Docker(cli.Build("copyfrom-windows-insensitive"), build.WithBuildContext(c,
build.WithFile("Dockerfile", dockerfile),
build.WithFile("foo", "hello world"),
)).Assert(c, icmd.Expected{
ExitCode: 0,
Out: "hello world",
})
}
func (s *DockerSuite) TestBuildIntermediateTarget(c *check.C) {
dockerfile := `
FROM busybox AS build-env
CMD ["/dev"]
FROM busybox
CMD ["/dist"]
`
ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(dockerfile))
defer ctx.Close()
cli.BuildCmd(c, "build1", build.WithExternalBuildContext(ctx),
cli.WithFlags("--target", "build-env"))
//res := inspectFieldJSON(c, "build1", "Config.Cmd")
res := cli.InspectCmd(c, "build1", cli.Format("json .Config.Cmd")).Combined()
c.Assert(strings.TrimSpace(res), checker.Equals, `["/dev"]`)
result := cli.Docker(cli.Build("build1"), build.WithExternalBuildContext(ctx),
cli.WithFlags("--target", "nosuchtarget"))
result.Assert(c, icmd.Expected{
ExitCode: 1,
Err: "failed to reach build target",
})
}
// TestBuildOpaqueDirectory tests that a build succeeds which
// creates opaque directories.
// See https://github.com/docker/docker/issues/25244
func (s *DockerSuite) TestBuildOpaqueDirectory(c *check.C) {
testRequires(c, DaemonIsLinux)
dockerFile := `
FROM busybox
RUN mkdir /dir1 && touch /dir1/f1
RUN rm -rf /dir1 && mkdir /dir1 && touch /dir1/f2
RUN touch /dir1/f3
RUN [ -f /dir1/f2 ]
`
// Test that build succeeds, last command fails if opaque directory
// was not handled correctly
buildImageSuccessfully(c, "testopaquedirectory", build.WithDockerfile(dockerFile))
}
// Windows test for USER in dockerfile
func (s *DockerSuite) TestBuildWindowsUser(c *check.C) {
testRequires(c, DaemonIsWindows)
name := "testbuildwindowsuser"
buildImage(name, build.WithDockerfile(`FROM `+testEnv.MinimalBaseImage()+`
RUN net user user /add
USER user
RUN set username
`)).Assert(c, icmd.Expected{
Out: "USERNAME=user",
})
}
// Verifies if COPY file . when WORKDIR is set to a non-existing directory,
// the directory is created and the file is copied into the directory,
// as opposed to the file being copied as a file with the name of the
// directory. Fix for 27545 (found on Windows, but regression good for Linux too).
// Note 27545 was reverted in 28505, but a new fix was added subsequently in 28514.
func (s *DockerSuite) TestBuildCopyFileDotWithWorkdir(c *check.C) {
name := "testbuildcopyfiledotwithworkdir"
buildImageSuccessfully(c, name, build.WithBuildContext(c,
build.WithFile("Dockerfile", `FROM busybox
WORKDIR /foo
COPY file .
RUN ["cat", "/foo/file"]
`),
build.WithFile("file", "content"),
))
}
// Case-insensitive environment variables on Windows
func (s *DockerSuite) TestBuildWindowsEnvCaseInsensitive(c *check.C) {
testRequires(c, DaemonIsWindows)
name := "testbuildwindowsenvcaseinsensitive"
buildImageSuccessfully(c, name, build.WithDockerfile(`
FROM `+testEnv.MinimalBaseImage()+`
ENV FOO=bar foo=baz
`))
res := inspectFieldJSON(c, name, "Config.Env")
if res != `["foo=baz"]` { // Should not have FOO=bar in it - takes the last one processed. And only one entry as deduped.
c.Fatalf("Case insensitive environment variables on Windows failed. Got %s", res)
}
}
// Test case for 29667
func (s *DockerSuite) TestBuildWorkdirImageCmd(c *check.C) {
image := "testworkdirimagecmd"
buildImageSuccessfully(c, image, build.WithDockerfile(`
FROM busybox
WORKDIR /foo/bar
`))
out, _ := dockerCmd(c, "inspect", "--format", "{{ json .Config.Cmd }}", image)
// The Windows busybox image has a blank `cmd`
lookingFor := `["sh"]`
if testEnv.DaemonPlatform() == "windows" {
lookingFor = "null"
}
c.Assert(strings.TrimSpace(out), checker.Equals, lookingFor)
image = "testworkdirlabelimagecmd"
buildImageSuccessfully(c, image, build.WithDockerfile(`
FROM busybox
WORKDIR /foo/bar
LABEL a=b
`))
out, _ = dockerCmd(c, "inspect", "--format", "{{ json .Config.Cmd }}", image)
c.Assert(strings.TrimSpace(out), checker.Equals, lookingFor)
}
// Test case for 28902/28909
func (s *DockerSuite) TestBuildWorkdirCmd(c *check.C) {
testRequires(c, DaemonIsLinux)
name := "testbuildworkdircmd"
dockerFile := `
FROM busybox
WORKDIR /
`
buildImageSuccessfully(c, name, build.WithDockerfile(dockerFile))
result := buildImage(name, build.WithDockerfile(dockerFile))
result.Assert(c, icmd.Success)
c.Assert(strings.Count(result.Combined(), "Using cache"), checker.Equals, 1)
}
// FIXME(vdemeester) should be a unit test
func (s *DockerSuite) TestBuildLineErrorOnBuild(c *check.C) {
name := "test_build_line_error_onbuild"
buildImage(name, build.WithDockerfile(`FROM busybox
ONBUILD
`)).Assert(c, icmd.Expected{
ExitCode: 1,
Err: "Dockerfile parse error line 2: ONBUILD requires at least one argument",
})
}
// FIXME(vdemeester) should be a unit test
func (s *DockerSuite) TestBuildLineErrorUnknownInstruction(c *check.C) {
name := "test_build_line_error_unknown_instruction"
cli.Docker(cli.Build(name), build.WithDockerfile(`FROM busybox
RUN echo hello world
NOINSTRUCTION echo ba
RUN echo hello
ERROR
`)).Assert(c, icmd.Expected{
ExitCode: 1,
Err: "Dockerfile parse error line 3: unknown instruction: NOINSTRUCTION",
})
}
// FIXME(vdemeester) should be a unit test
func (s *DockerSuite) TestBuildLineErrorWithEmptyLines(c *check.C) {
name := "test_build_line_error_with_empty_lines"
cli.Docker(cli.Build(name), build.WithDockerfile(`
FROM busybox
RUN echo hello world
NOINSTRUCTION echo ba
CMD ["/bin/init"]
`)).Assert(c, icmd.Expected{
ExitCode: 1,
Err: "Dockerfile parse error line 6: unknown instruction: NOINSTRUCTION",
})
}
// FIXME(vdemeester) should be a unit test
func (s *DockerSuite) TestBuildLineErrorWithComments(c *check.C) {
name := "test_build_line_error_with_comments"
cli.Docker(cli.Build(name), build.WithDockerfile(`FROM busybox
# This will print hello world
# and then ba
RUN echo hello world
NOINSTRUCTION echo ba
`)).Assert(c, icmd.Expected{
ExitCode: 1,
Err: "Dockerfile parse error line 5: unknown instruction: NOINSTRUCTION",
})
}
// #31957
func (s *DockerSuite) TestBuildSetCommandWithDefinedShell(c *check.C) {
buildImageSuccessfully(c, "build1", build.WithDockerfile(`
FROM busybox
SHELL ["/bin/sh", "-c"]
`))
buildImageSuccessfully(c, "build2", build.WithDockerfile(`
FROM build1
CMD echo foo
`))
out, _ := dockerCmd(c, "inspect", "--format", "{{ json .Config.Cmd }}", "build2")
c.Assert(strings.TrimSpace(out), checker.Equals, `["/bin/sh","-c","echo foo"]`)
}
func (s *DockerSuite) TestBuildIidFile(c *check.C) {
tmpDir, err := ioutil.TempDir("", "TestBuildIidFile")
if err != nil {
c.Fatal(err)
}
defer os.RemoveAll(tmpDir)
tmpIidFile := filepath.Join(tmpDir, "iid")
name := "testbuildiidfile"
// Use a Dockerfile with multiple stages to ensure we get the last one
cli.BuildCmd(c, name,
build.WithDockerfile(`FROM `+minimalBaseImage()+` AS stage1
ENV FOO FOO
FROM `+minimalBaseImage()+`
ENV BAR BAZ`),
cli.WithFlags("--iidfile", tmpIidFile))
id, err := ioutil.ReadFile(tmpIidFile)
c.Assert(err, check.IsNil)
d, err := digest.Parse(string(id))
c.Assert(err, check.IsNil)
c.Assert(d.String(), checker.Equals, getIDByName(c, name))
}
func (s *DockerSuite) TestBuildIidFileCleanupOnFail(c *check.C) {
tmpDir, err := ioutil.TempDir("", "TestBuildIidFileCleanupOnFail")
if err != nil {
c.Fatal(err)
}
defer os.RemoveAll(tmpDir)
tmpIidFile := filepath.Join(tmpDir, "iid")
err = ioutil.WriteFile(tmpIidFile, []byte("Dummy"), 0666)
c.Assert(err, check.IsNil)
cli.Docker(cli.Build("testbuildiidfilecleanuponfail"),
build.WithDockerfile(`FROM `+minimalBaseImage()+`
RUN /non/existing/command`),
cli.WithFlags("--iidfile", tmpIidFile)).Assert(c, icmd.Expected{
ExitCode: 1,
})
_, err = os.Stat(tmpIidFile)
c.Assert(err, check.NotNil)
c.Assert(os.IsNotExist(err), check.Equals, true)
}
|
[
"\"PATH\""
] |
[] |
[
"PATH"
] |
[]
|
["PATH"]
|
go
| 1 | 0 | |
clients/google-api-services-iap/v1/1.31.0/com/google/api/services/iap/v1/CloudIAP.java
|
/*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
/*
* This code was generated by https://github.com/googleapis/google-api-java-client-services/
* Modify at your own risk.
*/
package com.google.api.services.iap.v1;
/**
* Service definition for CloudIAP (v1).
*
* <p>
* Controls access to cloud applications running on Google Cloud Platform.
* </p>
*
* <p>
* For more information about this service, see the
* <a href="https://cloud.google.com/iap" target="_blank">API Documentation</a>
* </p>
*
* <p>
* This service uses {@link CloudIAPRequestInitializer} to initialize global parameters via its
* {@link Builder}.
* </p>
*
* @since 1.3
* @author Google, Inc.
*/
@SuppressWarnings("javadoc")
public class CloudIAP extends com.google.api.client.googleapis.services.json.AbstractGoogleJsonClient {
// Note: Leave this static initializer at the top of the file.
static {
com.google.api.client.util.Preconditions.checkState(
com.google.api.client.googleapis.GoogleUtils.MAJOR_VERSION == 1 &&
(com.google.api.client.googleapis.GoogleUtils.MINOR_VERSION >= 32 ||
(com.google.api.client.googleapis.GoogleUtils.MINOR_VERSION == 31 &&
com.google.api.client.googleapis.GoogleUtils.BUGFIX_VERSION >= 1)),
"You are currently running with version %s of google-api-client. " +
"You need at least version 1.31.1 of google-api-client to run version " +
"1.32.1 of the Cloud Identity-Aware Proxy API library.", com.google.api.client.googleapis.GoogleUtils.VERSION);
}
/**
* The default encoded root URL of the service. This is determined when the library is generated
* and normally should not be changed.
*
* @since 1.7
*/
public static final String DEFAULT_ROOT_URL = "https://iap.googleapis.com/";
/**
* The default encoded mTLS root URL of the service. This is determined when the library is generated
* and normally should not be changed.
*
* @since 1.31
*/
public static final String DEFAULT_MTLS_ROOT_URL = "https://iap.mtls.googleapis.com/";
/**
* The default encoded service path of the service. This is determined when the library is
* generated and normally should not be changed.
*
* @since 1.7
*/
public static final String DEFAULT_SERVICE_PATH = "";
/**
* The default encoded batch path of the service. This is determined when the library is
* generated and normally should not be changed.
*
* @since 1.23
*/
public static final String DEFAULT_BATCH_PATH = "batch";
/**
* The default encoded base URL of the service. This is determined when the library is generated
* and normally should not be changed.
*/
public static final String DEFAULT_BASE_URL = DEFAULT_ROOT_URL + DEFAULT_SERVICE_PATH;
/**
* Constructor.
*
* <p>
* Use {@link Builder} if you need to specify any of the optional parameters.
* </p>
*
* @param transport HTTP transport, which should normally be:
* <ul>
* <li>Google App Engine:
* {@code com.google.api.client.extensions.appengine.http.UrlFetchTransport}</li>
* <li>Android: {@code newCompatibleTransport} from
* {@code com.google.api.client.extensions.android.http.AndroidHttp}</li>
* <li>Java: {@link com.google.api.client.googleapis.javanet.GoogleNetHttpTransport#newTrustedTransport()}
* </li>
* </ul>
* @param jsonFactory JSON factory, which may be:
* <ul>
* <li>Jackson: {@code com.google.api.client.json.jackson2.JacksonFactory}</li>
* <li>Google GSON: {@code com.google.api.client.json.gson.GsonFactory}</li>
* <li>Android Honeycomb or higher:
* {@code com.google.api.client.extensions.android.json.AndroidJsonFactory}</li>
* </ul>
* @param httpRequestInitializer HTTP request initializer or {@code null} for none
* @since 1.7
*/
public CloudIAP(com.google.api.client.http.HttpTransport transport, com.google.api.client.json.JsonFactory jsonFactory,
com.google.api.client.http.HttpRequestInitializer httpRequestInitializer) {
this(new Builder(transport, jsonFactory, httpRequestInitializer));
}
/**
* @param builder builder
*/
CloudIAP(Builder builder) {
super(builder);
}
@Override
protected void initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest<?> httpClientRequest) throws java.io.IOException {
super.initialize(httpClientRequest);
}
/**
* An accessor for creating requests from the Projects collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code CloudIAP iap = new CloudIAP(...);}
* {@code CloudIAP.Projects.List request = iap.projects().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public Projects projects() {
return new Projects();
}
/**
* The "projects" collection of methods.
*/
public class Projects {
/**
* An accessor for creating requests from the Brands collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code CloudIAP iap = new CloudIAP(...);}
* {@code CloudIAP.Brands.List request = iap.brands().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public Brands brands() {
return new Brands();
}
/**
* The "brands" collection of methods.
*/
public class Brands {
/**
* Constructs a new OAuth brand for the project if one does not exist. The created brand is
* "internal only", meaning that OAuth clients created under it only accept requests from users who
* belong to the same Google Workspace organization as the project. The brand is created in an un-
* reviewed status. NOTE: The "internal only" status can be manually changed in the Google Cloud
* Console. Requires that a brand does not already exist for the project, and that the specified
* support email is owned by the caller.
*
* Create a request for the method "brands.create".
*
* This request holds the parameters needed by the iap server. After setting any optional
* parameters, call the {@link Create#execute()} method to invoke the remote operation.
*
* @param parent Required. GCP Project number/id under which the brand is to be created. In the following format:
* projects/{project_number/id}.
* @param content the {@link com.google.api.services.iap.v1.model.Brand}
* @return the request
*/
public Create create(java.lang.String parent, com.google.api.services.iap.v1.model.Brand content) throws java.io.IOException {
Create result = new Create(parent, content);
initialize(result);
return result;
}
public class Create extends CloudIAPRequest<com.google.api.services.iap.v1.model.Brand> {
private static final String REST_PATH = "v1/{+parent}/brands";
private final java.util.regex.Pattern PARENT_PATTERN =
java.util.regex.Pattern.compile("^projects/[^/]+$");
/**
* Constructs a new OAuth brand for the project if one does not exist. The created brand is
* "internal only", meaning that OAuth clients created under it only accept requests from users
* who belong to the same Google Workspace organization as the project. The brand is created in an
* un-reviewed status. NOTE: The "internal only" status can be manually changed in the Google
* Cloud Console. Requires that a brand does not already exist for the project, and that the
* specified support email is owned by the caller.
*
* Create a request for the method "brands.create".
*
* This request holds the parameters needed by the the iap server. After setting any optional
* parameters, call the {@link Create#execute()} method to invoke the remote operation. <p> {@link
* Create#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param parent Required. GCP Project number/id under which the brand is to be created. In the following format:
* projects/{project_number/id}.
* @param content the {@link com.google.api.services.iap.v1.model.Brand}
* @since 1.13
*/
protected Create(java.lang.String parent, com.google.api.services.iap.v1.model.Brand content) {
super(CloudIAP.this, "POST", REST_PATH, content, com.google.api.services.iap.v1.model.Brand.class);
this.parent = com.google.api.client.util.Preconditions.checkNotNull(parent, "Required parameter parent must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(PARENT_PATTERN.matcher(parent).matches(),
"Parameter parent must conform to the pattern " +
"^projects/[^/]+$");
}
}
@Override
public Create set$Xgafv(java.lang.String $Xgafv) {
return (Create) super.set$Xgafv($Xgafv);
}
@Override
public Create setAccessToken(java.lang.String accessToken) {
return (Create) super.setAccessToken(accessToken);
}
@Override
public Create setAlt(java.lang.String alt) {
return (Create) super.setAlt(alt);
}
@Override
public Create setCallback(java.lang.String callback) {
return (Create) super.setCallback(callback);
}
@Override
public Create setFields(java.lang.String fields) {
return (Create) super.setFields(fields);
}
@Override
public Create setKey(java.lang.String key) {
return (Create) super.setKey(key);
}
@Override
public Create setOauthToken(java.lang.String oauthToken) {
return (Create) super.setOauthToken(oauthToken);
}
@Override
public Create setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Create) super.setPrettyPrint(prettyPrint);
}
@Override
public Create setQuotaUser(java.lang.String quotaUser) {
return (Create) super.setQuotaUser(quotaUser);
}
@Override
public Create setUploadType(java.lang.String uploadType) {
return (Create) super.setUploadType(uploadType);
}
@Override
public Create setUploadProtocol(java.lang.String uploadProtocol) {
return (Create) super.setUploadProtocol(uploadProtocol);
}
/**
* Required. GCP Project number/id under which the brand is to be created. In the following
* format: projects/{project_number/id}.
*/
@com.google.api.client.util.Key
private java.lang.String parent;
/** Required. GCP Project number/id under which the brand is to be created. In the following format:
projects/{project_number/id}.
*/
public java.lang.String getParent() {
return parent;
}
/**
* Required. GCP Project number/id under which the brand is to be created. In the following
* format: projects/{project_number/id}.
*/
public Create setParent(java.lang.String parent) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(PARENT_PATTERN.matcher(parent).matches(),
"Parameter parent must conform to the pattern " +
"^projects/[^/]+$");
}
this.parent = parent;
return this;
}
@Override
public Create set(String parameterName, Object value) {
return (Create) super.set(parameterName, value);
}
}
/**
* Retrieves the OAuth brand of the project.
*
* Create a request for the method "brands.get".
*
* This request holds the parameters needed by the iap server. After setting any optional
* parameters, call the {@link Get#execute()} method to invoke the remote operation.
*
* @param name Required. Name of the brand to be fetched. In the following format:
* projects/{project_number/id}/brands/{brand}.
* @return the request
*/
public Get get(java.lang.String name) throws java.io.IOException {
Get result = new Get(name);
initialize(result);
return result;
}
public class Get extends CloudIAPRequest<com.google.api.services.iap.v1.model.Brand> {
private static final String REST_PATH = "v1/{+name}";
private final java.util.regex.Pattern NAME_PATTERN =
java.util.regex.Pattern.compile("^projects/[^/]+/brands/[^/]+$");
/**
* Retrieves the OAuth brand of the project.
*
* Create a request for the method "brands.get".
*
* This request holds the parameters needed by the the iap server. After setting any optional
* parameters, call the {@link Get#execute()} method to invoke the remote operation. <p> {@link
* Get#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must be
* called to initialize this instance immediately after invoking the constructor. </p>
*
* @param name Required. Name of the brand to be fetched. In the following format:
* projects/{project_number/id}/brands/{brand}.
* @since 1.13
*/
protected Get(java.lang.String name) {
super(CloudIAP.this, "GET", REST_PATH, null, com.google.api.services.iap.v1.model.Brand.class);
this.name = com.google.api.client.util.Preconditions.checkNotNull(name, "Required parameter name must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^projects/[^/]+/brands/[^/]+$");
}
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public Get set$Xgafv(java.lang.String $Xgafv) {
return (Get) super.set$Xgafv($Xgafv);
}
@Override
public Get setAccessToken(java.lang.String accessToken) {
return (Get) super.setAccessToken(accessToken);
}
@Override
public Get setAlt(java.lang.String alt) {
return (Get) super.setAlt(alt);
}
@Override
public Get setCallback(java.lang.String callback) {
return (Get) super.setCallback(callback);
}
@Override
public Get setFields(java.lang.String fields) {
return (Get) super.setFields(fields);
}
@Override
public Get setKey(java.lang.String key) {
return (Get) super.setKey(key);
}
@Override
public Get setOauthToken(java.lang.String oauthToken) {
return (Get) super.setOauthToken(oauthToken);
}
@Override
public Get setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Get) super.setPrettyPrint(prettyPrint);
}
@Override
public Get setQuotaUser(java.lang.String quotaUser) {
return (Get) super.setQuotaUser(quotaUser);
}
@Override
public Get setUploadType(java.lang.String uploadType) {
return (Get) super.setUploadType(uploadType);
}
@Override
public Get setUploadProtocol(java.lang.String uploadProtocol) {
return (Get) super.setUploadProtocol(uploadProtocol);
}
/**
* Required. Name of the brand to be fetched. In the following format:
* projects/{project_number/id}/brands/{brand}.
*/
@com.google.api.client.util.Key
private java.lang.String name;
/** Required. Name of the brand to be fetched. In the following format:
projects/{project_number/id}/brands/{brand}.
*/
public java.lang.String getName() {
return name;
}
/**
* Required. Name of the brand to be fetched. In the following format:
* projects/{project_number/id}/brands/{brand}.
*/
public Get setName(java.lang.String name) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^projects/[^/]+/brands/[^/]+$");
}
this.name = name;
return this;
}
@Override
public Get set(String parameterName, Object value) {
return (Get) super.set(parameterName, value);
}
}
/**
* Lists the existing brands for the project.
*
* Create a request for the method "brands.list".
*
* This request holds the parameters needed by the iap server. After setting any optional
* parameters, call the {@link List#execute()} method to invoke the remote operation.
*
* @param parent Required. GCP Project number/id. In the following format: projects/{project_number/id}.
* @return the request
*/
public List list(java.lang.String parent) throws java.io.IOException {
List result = new List(parent);
initialize(result);
return result;
}
public class List extends CloudIAPRequest<com.google.api.services.iap.v1.model.ListBrandsResponse> {
private static final String REST_PATH = "v1/{+parent}/brands";
private final java.util.regex.Pattern PARENT_PATTERN =
java.util.regex.Pattern.compile("^projects/[^/]+$");
/**
* Lists the existing brands for the project.
*
* Create a request for the method "brands.list".
*
* This request holds the parameters needed by the the iap server. After setting any optional
* parameters, call the {@link List#execute()} method to invoke the remote operation. <p> {@link
* List#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must be
* called to initialize this instance immediately after invoking the constructor. </p>
*
* @param parent Required. GCP Project number/id. In the following format: projects/{project_number/id}.
* @since 1.13
*/
protected List(java.lang.String parent) {
super(CloudIAP.this, "GET", REST_PATH, null, com.google.api.services.iap.v1.model.ListBrandsResponse.class);
this.parent = com.google.api.client.util.Preconditions.checkNotNull(parent, "Required parameter parent must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(PARENT_PATTERN.matcher(parent).matches(),
"Parameter parent must conform to the pattern " +
"^projects/[^/]+$");
}
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public List set$Xgafv(java.lang.String $Xgafv) {
return (List) super.set$Xgafv($Xgafv);
}
@Override
public List setAccessToken(java.lang.String accessToken) {
return (List) super.setAccessToken(accessToken);
}
@Override
public List setAlt(java.lang.String alt) {
return (List) super.setAlt(alt);
}
@Override
public List setCallback(java.lang.String callback) {
return (List) super.setCallback(callback);
}
@Override
public List setFields(java.lang.String fields) {
return (List) super.setFields(fields);
}
@Override
public List setKey(java.lang.String key) {
return (List) super.setKey(key);
}
@Override
public List setOauthToken(java.lang.String oauthToken) {
return (List) super.setOauthToken(oauthToken);
}
@Override
public List setPrettyPrint(java.lang.Boolean prettyPrint) {
return (List) super.setPrettyPrint(prettyPrint);
}
@Override
public List setQuotaUser(java.lang.String quotaUser) {
return (List) super.setQuotaUser(quotaUser);
}
@Override
public List setUploadType(java.lang.String uploadType) {
return (List) super.setUploadType(uploadType);
}
@Override
public List setUploadProtocol(java.lang.String uploadProtocol) {
return (List) super.setUploadProtocol(uploadProtocol);
}
/**
* Required. GCP Project number/id. In the following format: projects/{project_number/id}.
*/
@com.google.api.client.util.Key
private java.lang.String parent;
/** Required. GCP Project number/id. In the following format: projects/{project_number/id}.
*/
public java.lang.String getParent() {
return parent;
}
/**
* Required. GCP Project number/id. In the following format: projects/{project_number/id}.
*/
public List setParent(java.lang.String parent) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(PARENT_PATTERN.matcher(parent).matches(),
"Parameter parent must conform to the pattern " +
"^projects/[^/]+$");
}
this.parent = parent;
return this;
}
@Override
public List set(String parameterName, Object value) {
return (List) super.set(parameterName, value);
}
}
/**
* An accessor for creating requests from the IdentityAwareProxyClients collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code CloudIAP iap = new CloudIAP(...);}
* {@code CloudIAP.IdentityAwareProxyClients.List request = iap.identityAwareProxyClients().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public IdentityAwareProxyClients identityAwareProxyClients() {
return new IdentityAwareProxyClients();
}
/**
* The "identityAwareProxyClients" collection of methods.
*/
public class IdentityAwareProxyClients {
/**
* Creates an Identity Aware Proxy (IAP) OAuth client. The client is owned by IAP. Requires that the
* brand for the project exists and that it is set for internal-only use.
*
* Create a request for the method "identityAwareProxyClients.create".
*
* This request holds the parameters needed by the iap server. After setting any optional
* parameters, call the {@link Create#execute()} method to invoke the remote operation.
*
* @param parent Required. Path to create the client in. In the following format:
* projects/{project_number/id}/brands/{brand}. The project must belong to a G Suite account.
* @param content the {@link com.google.api.services.iap.v1.model.IdentityAwareProxyClient}
* @return the request
*/
public Create create(java.lang.String parent, com.google.api.services.iap.v1.model.IdentityAwareProxyClient content) throws java.io.IOException {
Create result = new Create(parent, content);
initialize(result);
return result;
}
public class Create extends CloudIAPRequest<com.google.api.services.iap.v1.model.IdentityAwareProxyClient> {
private static final String REST_PATH = "v1/{+parent}/identityAwareProxyClients";
private final java.util.regex.Pattern PARENT_PATTERN =
java.util.regex.Pattern.compile("^projects/[^/]+/brands/[^/]+$");
/**
* Creates an Identity Aware Proxy (IAP) OAuth client. The client is owned by IAP. Requires that
* the brand for the project exists and that it is set for internal-only use.
*
* Create a request for the method "identityAwareProxyClients.create".
*
* This request holds the parameters needed by the the iap server. After setting any optional
* parameters, call the {@link Create#execute()} method to invoke the remote operation. <p> {@link
* Create#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param parent Required. Path to create the client in. In the following format:
* projects/{project_number/id}/brands/{brand}. The project must belong to a G Suite account.
* @param content the {@link com.google.api.services.iap.v1.model.IdentityAwareProxyClient}
* @since 1.13
*/
protected Create(java.lang.String parent, com.google.api.services.iap.v1.model.IdentityAwareProxyClient content) {
super(CloudIAP.this, "POST", REST_PATH, content, com.google.api.services.iap.v1.model.IdentityAwareProxyClient.class);
this.parent = com.google.api.client.util.Preconditions.checkNotNull(parent, "Required parameter parent must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(PARENT_PATTERN.matcher(parent).matches(),
"Parameter parent must conform to the pattern " +
"^projects/[^/]+/brands/[^/]+$");
}
}
@Override
public Create set$Xgafv(java.lang.String $Xgafv) {
return (Create) super.set$Xgafv($Xgafv);
}
@Override
public Create setAccessToken(java.lang.String accessToken) {
return (Create) super.setAccessToken(accessToken);
}
@Override
public Create setAlt(java.lang.String alt) {
return (Create) super.setAlt(alt);
}
@Override
public Create setCallback(java.lang.String callback) {
return (Create) super.setCallback(callback);
}
@Override
public Create setFields(java.lang.String fields) {
return (Create) super.setFields(fields);
}
@Override
public Create setKey(java.lang.String key) {
return (Create) super.setKey(key);
}
@Override
public Create setOauthToken(java.lang.String oauthToken) {
return (Create) super.setOauthToken(oauthToken);
}
@Override
public Create setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Create) super.setPrettyPrint(prettyPrint);
}
@Override
public Create setQuotaUser(java.lang.String quotaUser) {
return (Create) super.setQuotaUser(quotaUser);
}
@Override
public Create setUploadType(java.lang.String uploadType) {
return (Create) super.setUploadType(uploadType);
}
@Override
public Create setUploadProtocol(java.lang.String uploadProtocol) {
return (Create) super.setUploadProtocol(uploadProtocol);
}
/**
* Required. Path to create the client in. In the following format:
* projects/{project_number/id}/brands/{brand}. The project must belong to a G Suite
* account.
*/
@com.google.api.client.util.Key
private java.lang.String parent;
/** Required. Path to create the client in. In the following format:
projects/{project_number/id}/brands/{brand}. The project must belong to a G Suite account.
*/
public java.lang.String getParent() {
return parent;
}
/**
* Required. Path to create the client in. In the following format:
* projects/{project_number/id}/brands/{brand}. The project must belong to a G Suite
* account.
*/
public Create setParent(java.lang.String parent) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(PARENT_PATTERN.matcher(parent).matches(),
"Parameter parent must conform to the pattern " +
"^projects/[^/]+/brands/[^/]+$");
}
this.parent = parent;
return this;
}
@Override
public Create set(String parameterName, Object value) {
return (Create) super.set(parameterName, value);
}
}
/**
* Deletes an Identity Aware Proxy (IAP) OAuth client. Useful for removing obsolete clients,
* managing the number of clients in a given project, and cleaning up after tests. Requires that the
* client is owned by IAP.
*
* Create a request for the method "identityAwareProxyClients.delete".
*
* This request holds the parameters needed by the iap server. After setting any optional
* parameters, call the {@link Delete#execute()} method to invoke the remote operation.
*
* @param name Required. Name of the Identity Aware Proxy client to be deleted. In the following format:
* projects/{project_number/id}/brands/{brand}/identityAwareProxyClients/{client_id}.
* @return the request
*/
public Delete delete(java.lang.String name) throws java.io.IOException {
Delete result = new Delete(name);
initialize(result);
return result;
}
public class Delete extends CloudIAPRequest<com.google.api.services.iap.v1.model.Empty> {
private static final String REST_PATH = "v1/{+name}";
private final java.util.regex.Pattern NAME_PATTERN =
java.util.regex.Pattern.compile("^projects/[^/]+/brands/[^/]+/identityAwareProxyClients/[^/]+$");
/**
* Deletes an Identity Aware Proxy (IAP) OAuth client. Useful for removing obsolete clients,
* managing the number of clients in a given project, and cleaning up after tests. Requires that
* the client is owned by IAP.
*
* Create a request for the method "identityAwareProxyClients.delete".
*
* This request holds the parameters needed by the the iap server. After setting any optional
* parameters, call the {@link Delete#execute()} method to invoke the remote operation. <p> {@link
* Delete#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param name Required. Name of the Identity Aware Proxy client to be deleted. In the following format:
* projects/{project_number/id}/brands/{brand}/identityAwareProxyClients/{client_id}.
* @since 1.13
*/
protected Delete(java.lang.String name) {
super(CloudIAP.this, "DELETE", REST_PATH, null, com.google.api.services.iap.v1.model.Empty.class);
this.name = com.google.api.client.util.Preconditions.checkNotNull(name, "Required parameter name must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^projects/[^/]+/brands/[^/]+/identityAwareProxyClients/[^/]+$");
}
}
@Override
public Delete set$Xgafv(java.lang.String $Xgafv) {
return (Delete) super.set$Xgafv($Xgafv);
}
@Override
public Delete setAccessToken(java.lang.String accessToken) {
return (Delete) super.setAccessToken(accessToken);
}
@Override
public Delete setAlt(java.lang.String alt) {
return (Delete) super.setAlt(alt);
}
@Override
public Delete setCallback(java.lang.String callback) {
return (Delete) super.setCallback(callback);
}
@Override
public Delete setFields(java.lang.String fields) {
return (Delete) super.setFields(fields);
}
@Override
public Delete setKey(java.lang.String key) {
return (Delete) super.setKey(key);
}
@Override
public Delete setOauthToken(java.lang.String oauthToken) {
return (Delete) super.setOauthToken(oauthToken);
}
@Override
public Delete setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Delete) super.setPrettyPrint(prettyPrint);
}
@Override
public Delete setQuotaUser(java.lang.String quotaUser) {
return (Delete) super.setQuotaUser(quotaUser);
}
@Override
public Delete setUploadType(java.lang.String uploadType) {
return (Delete) super.setUploadType(uploadType);
}
@Override
public Delete setUploadProtocol(java.lang.String uploadProtocol) {
return (Delete) super.setUploadProtocol(uploadProtocol);
}
/**
* Required. Name of the Identity Aware Proxy client to be deleted. In the following
* format:
* projects/{project_number/id}/brands/{brand}/identityAwareProxyClients/{client_id}.
*/
@com.google.api.client.util.Key
private java.lang.String name;
/** Required. Name of the Identity Aware Proxy client to be deleted. In the following format:
projects/{project_number/id}/brands/{brand}/identityAwareProxyClients/{client_id}.
*/
public java.lang.String getName() {
return name;
}
/**
* Required. Name of the Identity Aware Proxy client to be deleted. In the following
* format:
* projects/{project_number/id}/brands/{brand}/identityAwareProxyClients/{client_id}.
*/
public Delete setName(java.lang.String name) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^projects/[^/]+/brands/[^/]+/identityAwareProxyClients/[^/]+$");
}
this.name = name;
return this;
}
@Override
public Delete set(String parameterName, Object value) {
return (Delete) super.set(parameterName, value);
}
}
/**
* Retrieves an Identity Aware Proxy (IAP) OAuth client. Requires that the client is owned by IAP.
*
* Create a request for the method "identityAwareProxyClients.get".
*
* This request holds the parameters needed by the iap server. After setting any optional
* parameters, call the {@link Get#execute()} method to invoke the remote operation.
*
* @param name Required. Name of the Identity Aware Proxy client to be fetched. In the following format:
* projects/{project_number/id}/brands/{brand}/identityAwareProxyClients/{client_id}.
* @return the request
*/
public Get get(java.lang.String name) throws java.io.IOException {
Get result = new Get(name);
initialize(result);
return result;
}
public class Get extends CloudIAPRequest<com.google.api.services.iap.v1.model.IdentityAwareProxyClient> {
private static final String REST_PATH = "v1/{+name}";
private final java.util.regex.Pattern NAME_PATTERN =
java.util.regex.Pattern.compile("^projects/[^/]+/brands/[^/]+/identityAwareProxyClients/[^/]+$");
/**
* Retrieves an Identity Aware Proxy (IAP) OAuth client. Requires that the client is owned by IAP.
*
* Create a request for the method "identityAwareProxyClients.get".
*
* This request holds the parameters needed by the the iap server. After setting any optional
* parameters, call the {@link Get#execute()} method to invoke the remote operation. <p> {@link
* Get#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must be
* called to initialize this instance immediately after invoking the constructor. </p>
*
* @param name Required. Name of the Identity Aware Proxy client to be fetched. In the following format:
* projects/{project_number/id}/brands/{brand}/identityAwareProxyClients/{client_id}.
* @since 1.13
*/
protected Get(java.lang.String name) {
super(CloudIAP.this, "GET", REST_PATH, null, com.google.api.services.iap.v1.model.IdentityAwareProxyClient.class);
this.name = com.google.api.client.util.Preconditions.checkNotNull(name, "Required parameter name must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^projects/[^/]+/brands/[^/]+/identityAwareProxyClients/[^/]+$");
}
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public Get set$Xgafv(java.lang.String $Xgafv) {
return (Get) super.set$Xgafv($Xgafv);
}
@Override
public Get setAccessToken(java.lang.String accessToken) {
return (Get) super.setAccessToken(accessToken);
}
@Override
public Get setAlt(java.lang.String alt) {
return (Get) super.setAlt(alt);
}
@Override
public Get setCallback(java.lang.String callback) {
return (Get) super.setCallback(callback);
}
@Override
public Get setFields(java.lang.String fields) {
return (Get) super.setFields(fields);
}
@Override
public Get setKey(java.lang.String key) {
return (Get) super.setKey(key);
}
@Override
public Get setOauthToken(java.lang.String oauthToken) {
return (Get) super.setOauthToken(oauthToken);
}
@Override
public Get setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Get) super.setPrettyPrint(prettyPrint);
}
@Override
public Get setQuotaUser(java.lang.String quotaUser) {
return (Get) super.setQuotaUser(quotaUser);
}
@Override
public Get setUploadType(java.lang.String uploadType) {
return (Get) super.setUploadType(uploadType);
}
@Override
public Get setUploadProtocol(java.lang.String uploadProtocol) {
return (Get) super.setUploadProtocol(uploadProtocol);
}
/**
* Required. Name of the Identity Aware Proxy client to be fetched. In the following
* format:
* projects/{project_number/id}/brands/{brand}/identityAwareProxyClients/{client_id}.
*/
@com.google.api.client.util.Key
private java.lang.String name;
/** Required. Name of the Identity Aware Proxy client to be fetched. In the following format:
projects/{project_number/id}/brands/{brand}/identityAwareProxyClients/{client_id}.
*/
public java.lang.String getName() {
return name;
}
/**
* Required. Name of the Identity Aware Proxy client to be fetched. In the following
* format:
* projects/{project_number/id}/brands/{brand}/identityAwareProxyClients/{client_id}.
*/
public Get setName(java.lang.String name) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^projects/[^/]+/brands/[^/]+/identityAwareProxyClients/[^/]+$");
}
this.name = name;
return this;
}
@Override
public Get set(String parameterName, Object value) {
return (Get) super.set(parameterName, value);
}
}
/**
* Lists the existing clients for the brand.
*
* Create a request for the method "identityAwareProxyClients.list".
*
* This request holds the parameters needed by the iap server. After setting any optional
* parameters, call the {@link List#execute()} method to invoke the remote operation.
*
* @param parent Required. Full brand path. In the following format: projects/{project_number/id}/brands/{brand}.
* @return the request
*/
public List list(java.lang.String parent) throws java.io.IOException {
List result = new List(parent);
initialize(result);
return result;
}
public class List extends CloudIAPRequest<com.google.api.services.iap.v1.model.ListIdentityAwareProxyClientsResponse> {
private static final String REST_PATH = "v1/{+parent}/identityAwareProxyClients";
private final java.util.regex.Pattern PARENT_PATTERN =
java.util.regex.Pattern.compile("^projects/[^/]+/brands/[^/]+$");
/**
* Lists the existing clients for the brand.
*
* Create a request for the method "identityAwareProxyClients.list".
*
* This request holds the parameters needed by the the iap server. After setting any optional
* parameters, call the {@link List#execute()} method to invoke the remote operation. <p> {@link
* List#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must be
* called to initialize this instance immediately after invoking the constructor. </p>
*
* @param parent Required. Full brand path. In the following format: projects/{project_number/id}/brands/{brand}.
* @since 1.13
*/
protected List(java.lang.String parent) {
super(CloudIAP.this, "GET", REST_PATH, null, com.google.api.services.iap.v1.model.ListIdentityAwareProxyClientsResponse.class);
this.parent = com.google.api.client.util.Preconditions.checkNotNull(parent, "Required parameter parent must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(PARENT_PATTERN.matcher(parent).matches(),
"Parameter parent must conform to the pattern " +
"^projects/[^/]+/brands/[^/]+$");
}
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public List set$Xgafv(java.lang.String $Xgafv) {
return (List) super.set$Xgafv($Xgafv);
}
@Override
public List setAccessToken(java.lang.String accessToken) {
return (List) super.setAccessToken(accessToken);
}
@Override
public List setAlt(java.lang.String alt) {
return (List) super.setAlt(alt);
}
@Override
public List setCallback(java.lang.String callback) {
return (List) super.setCallback(callback);
}
@Override
public List setFields(java.lang.String fields) {
return (List) super.setFields(fields);
}
@Override
public List setKey(java.lang.String key) {
return (List) super.setKey(key);
}
@Override
public List setOauthToken(java.lang.String oauthToken) {
return (List) super.setOauthToken(oauthToken);
}
@Override
public List setPrettyPrint(java.lang.Boolean prettyPrint) {
return (List) super.setPrettyPrint(prettyPrint);
}
@Override
public List setQuotaUser(java.lang.String quotaUser) {
return (List) super.setQuotaUser(quotaUser);
}
@Override
public List setUploadType(java.lang.String uploadType) {
return (List) super.setUploadType(uploadType);
}
@Override
public List setUploadProtocol(java.lang.String uploadProtocol) {
return (List) super.setUploadProtocol(uploadProtocol);
}
/**
* Required. Full brand path. In the following format:
* projects/{project_number/id}/brands/{brand}.
*/
@com.google.api.client.util.Key
private java.lang.String parent;
/** Required. Full brand path. In the following format: projects/{project_number/id}/brands/{brand}.
*/
public java.lang.String getParent() {
return parent;
}
/**
* Required. Full brand path. In the following format:
* projects/{project_number/id}/brands/{brand}.
*/
public List setParent(java.lang.String parent) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(PARENT_PATTERN.matcher(parent).matches(),
"Parameter parent must conform to the pattern " +
"^projects/[^/]+/brands/[^/]+$");
}
this.parent = parent;
return this;
}
/**
* The maximum number of clients to return. The service may return fewer than this value.
* If unspecified, at most 100 clients will be returned. The maximum value is 1000; values
* above 1000 will be coerced to 1000.
*/
@com.google.api.client.util.Key
private java.lang.Integer pageSize;
/** The maximum number of clients to return. The service may return fewer than this value. If
unspecified, at most 100 clients will be returned. The maximum value is 1000; values above 1000
will be coerced to 1000.
*/
public java.lang.Integer getPageSize() {
return pageSize;
}
/**
* The maximum number of clients to return. The service may return fewer than this value.
* If unspecified, at most 100 clients will be returned. The maximum value is 1000; values
* above 1000 will be coerced to 1000.
*/
public List setPageSize(java.lang.Integer pageSize) {
this.pageSize = pageSize;
return this;
}
/**
* A page token, received from a previous `ListIdentityAwareProxyClients` call. Provide
* this to retrieve the subsequent page. When paginating, all other parameters provided to
* `ListIdentityAwareProxyClients` must match the call that provided the page token.
*/
@com.google.api.client.util.Key
private java.lang.String pageToken;
/** A page token, received from a previous `ListIdentityAwareProxyClients` call. Provide this to
retrieve the subsequent page. When paginating, all other parameters provided to
`ListIdentityAwareProxyClients` must match the call that provided the page token.
*/
public java.lang.String getPageToken() {
return pageToken;
}
/**
* A page token, received from a previous `ListIdentityAwareProxyClients` call. Provide
* this to retrieve the subsequent page. When paginating, all other parameters provided to
* `ListIdentityAwareProxyClients` must match the call that provided the page token.
*/
public List setPageToken(java.lang.String pageToken) {
this.pageToken = pageToken;
return this;
}
@Override
public List set(String parameterName, Object value) {
return (List) super.set(parameterName, value);
}
}
/**
* Resets an Identity Aware Proxy (IAP) OAuth client secret. Useful if the secret was compromised.
* Requires that the client is owned by IAP.
*
* Create a request for the method "identityAwareProxyClients.resetSecret".
*
* This request holds the parameters needed by the iap server. After setting any optional
* parameters, call the {@link ResetSecret#execute()} method to invoke the remote operation.
*
* @param name Required. Name of the Identity Aware Proxy client to that will have its secret reset. In the
* following format:
* projects/{project_number/id}/brands/{brand}/identityAwareProxyClients/{client_id}.
* @param content the {@link com.google.api.services.iap.v1.model.ResetIdentityAwareProxyClientSecretRequest}
* @return the request
*/
public ResetSecret resetSecret(java.lang.String name, com.google.api.services.iap.v1.model.ResetIdentityAwareProxyClientSecretRequest content) throws java.io.IOException {
ResetSecret result = new ResetSecret(name, content);
initialize(result);
return result;
}
public class ResetSecret extends CloudIAPRequest<com.google.api.services.iap.v1.model.IdentityAwareProxyClient> {
private static final String REST_PATH = "v1/{+name}:resetSecret";
private final java.util.regex.Pattern NAME_PATTERN =
java.util.regex.Pattern.compile("^projects/[^/]+/brands/[^/]+/identityAwareProxyClients/[^/]+$");
/**
* Resets an Identity Aware Proxy (IAP) OAuth client secret. Useful if the secret was compromised.
* Requires that the client is owned by IAP.
*
* Create a request for the method "identityAwareProxyClients.resetSecret".
*
* This request holds the parameters needed by the the iap server. After setting any optional
* parameters, call the {@link ResetSecret#execute()} method to invoke the remote operation. <p>
* {@link
* ResetSecret#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param name Required. Name of the Identity Aware Proxy client to that will have its secret reset. In the
* following format:
* projects/{project_number/id}/brands/{brand}/identityAwareProxyClients/{client_id}.
* @param content the {@link com.google.api.services.iap.v1.model.ResetIdentityAwareProxyClientSecretRequest}
* @since 1.13
*/
protected ResetSecret(java.lang.String name, com.google.api.services.iap.v1.model.ResetIdentityAwareProxyClientSecretRequest content) {
super(CloudIAP.this, "POST", REST_PATH, content, com.google.api.services.iap.v1.model.IdentityAwareProxyClient.class);
this.name = com.google.api.client.util.Preconditions.checkNotNull(name, "Required parameter name must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^projects/[^/]+/brands/[^/]+/identityAwareProxyClients/[^/]+$");
}
}
@Override
public ResetSecret set$Xgafv(java.lang.String $Xgafv) {
return (ResetSecret) super.set$Xgafv($Xgafv);
}
@Override
public ResetSecret setAccessToken(java.lang.String accessToken) {
return (ResetSecret) super.setAccessToken(accessToken);
}
@Override
public ResetSecret setAlt(java.lang.String alt) {
return (ResetSecret) super.setAlt(alt);
}
@Override
public ResetSecret setCallback(java.lang.String callback) {
return (ResetSecret) super.setCallback(callback);
}
@Override
public ResetSecret setFields(java.lang.String fields) {
return (ResetSecret) super.setFields(fields);
}
@Override
public ResetSecret setKey(java.lang.String key) {
return (ResetSecret) super.setKey(key);
}
@Override
public ResetSecret setOauthToken(java.lang.String oauthToken) {
return (ResetSecret) super.setOauthToken(oauthToken);
}
@Override
public ResetSecret setPrettyPrint(java.lang.Boolean prettyPrint) {
return (ResetSecret) super.setPrettyPrint(prettyPrint);
}
@Override
public ResetSecret setQuotaUser(java.lang.String quotaUser) {
return (ResetSecret) super.setQuotaUser(quotaUser);
}
@Override
public ResetSecret setUploadType(java.lang.String uploadType) {
return (ResetSecret) super.setUploadType(uploadType);
}
@Override
public ResetSecret setUploadProtocol(java.lang.String uploadProtocol) {
return (ResetSecret) super.setUploadProtocol(uploadProtocol);
}
/**
* Required. Name of the Identity Aware Proxy client to that will have its secret reset.
* In the following format:
* projects/{project_number/id}/brands/{brand}/identityAwareProxyClients/{client_id}.
*/
@com.google.api.client.util.Key
private java.lang.String name;
/** Required. Name of the Identity Aware Proxy client to that will have its secret reset. In the
following format:
projects/{project_number/id}/brands/{brand}/identityAwareProxyClients/{client_id}.
*/
public java.lang.String getName() {
return name;
}
/**
* Required. Name of the Identity Aware Proxy client to that will have its secret reset.
* In the following format:
* projects/{project_number/id}/brands/{brand}/identityAwareProxyClients/{client_id}.
*/
public ResetSecret setName(java.lang.String name) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^projects/[^/]+/brands/[^/]+/identityAwareProxyClients/[^/]+$");
}
this.name = name;
return this;
}
@Override
public ResetSecret set(String parameterName, Object value) {
return (ResetSecret) super.set(parameterName, value);
}
}
}
}
/**
* An accessor for creating requests from the IapTunnel collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code CloudIAP iap = new CloudIAP(...);}
* {@code CloudIAP.IapTunnel.List request = iap.iapTunnel().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public IapTunnel iapTunnel() {
return new IapTunnel();
}
/**
* The "iap_tunnel" collection of methods.
*/
public class IapTunnel {
/**
* An accessor for creating requests from the Locations collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code CloudIAP iap = new CloudIAP(...);}
* {@code CloudIAP.Locations.List request = iap.locations().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public Locations locations() {
return new Locations();
}
/**
* The "locations" collection of methods.
*/
public class Locations {
/**
* An accessor for creating requests from the DestGroups collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code CloudIAP iap = new CloudIAP(...);}
* {@code CloudIAP.DestGroups.List request = iap.destGroups().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public DestGroups destGroups() {
return new DestGroups();
}
/**
* The "destGroups" collection of methods.
*/
public class DestGroups {
/**
* Creates a new TunnelDestGroup.
*
* Create a request for the method "destGroups.create".
*
* This request holds the parameters needed by the iap server. After setting any optional
* parameters, call the {@link Create#execute()} method to invoke the remote operation.
*
* @param parent Required. Google Cloud Project ID and location. In the following format:
* `projects/{project_number/id}/iap_tunnel/locations/{location}`.
* @param content the {@link com.google.api.services.iap.v1.model.TunnelDestGroup}
* @return the request
*/
public Create create(java.lang.String parent, com.google.api.services.iap.v1.model.TunnelDestGroup content) throws java.io.IOException {
Create result = new Create(parent, content);
initialize(result);
return result;
}
public class Create extends CloudIAPRequest<com.google.api.services.iap.v1.model.TunnelDestGroup> {
private static final String REST_PATH = "v1/{+parent}/destGroups";
private final java.util.regex.Pattern PARENT_PATTERN =
java.util.regex.Pattern.compile("^projects/[^/]+/iap_tunnel/locations/[^/]+$");
/**
* Creates a new TunnelDestGroup.
*
* Create a request for the method "destGroups.create".
*
* This request holds the parameters needed by the the iap server. After setting any optional
* parameters, call the {@link Create#execute()} method to invoke the remote operation. <p> {@link
* Create#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param parent Required. Google Cloud Project ID and location. In the following format:
* `projects/{project_number/id}/iap_tunnel/locations/{location}`.
* @param content the {@link com.google.api.services.iap.v1.model.TunnelDestGroup}
* @since 1.13
*/
protected Create(java.lang.String parent, com.google.api.services.iap.v1.model.TunnelDestGroup content) {
super(CloudIAP.this, "POST", REST_PATH, content, com.google.api.services.iap.v1.model.TunnelDestGroup.class);
this.parent = com.google.api.client.util.Preconditions.checkNotNull(parent, "Required parameter parent must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(PARENT_PATTERN.matcher(parent).matches(),
"Parameter parent must conform to the pattern " +
"^projects/[^/]+/iap_tunnel/locations/[^/]+$");
}
}
@Override
public Create set$Xgafv(java.lang.String $Xgafv) {
return (Create) super.set$Xgafv($Xgafv);
}
@Override
public Create setAccessToken(java.lang.String accessToken) {
return (Create) super.setAccessToken(accessToken);
}
@Override
public Create setAlt(java.lang.String alt) {
return (Create) super.setAlt(alt);
}
@Override
public Create setCallback(java.lang.String callback) {
return (Create) super.setCallback(callback);
}
@Override
public Create setFields(java.lang.String fields) {
return (Create) super.setFields(fields);
}
@Override
public Create setKey(java.lang.String key) {
return (Create) super.setKey(key);
}
@Override
public Create setOauthToken(java.lang.String oauthToken) {
return (Create) super.setOauthToken(oauthToken);
}
@Override
public Create setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Create) super.setPrettyPrint(prettyPrint);
}
@Override
public Create setQuotaUser(java.lang.String quotaUser) {
return (Create) super.setQuotaUser(quotaUser);
}
@Override
public Create setUploadType(java.lang.String uploadType) {
return (Create) super.setUploadType(uploadType);
}
@Override
public Create setUploadProtocol(java.lang.String uploadProtocol) {
return (Create) super.setUploadProtocol(uploadProtocol);
}
/**
* Required. Google Cloud Project ID and location. In the following format:
* `projects/{project_number/id}/iap_tunnel/locations/{location}`.
*/
@com.google.api.client.util.Key
private java.lang.String parent;
/** Required. Google Cloud Project ID and location. In the following format:
`projects/{project_number/id}/iap_tunnel/locations/{location}`.
*/
public java.lang.String getParent() {
return parent;
}
/**
* Required. Google Cloud Project ID and location. In the following format:
* `projects/{project_number/id}/iap_tunnel/locations/{location}`.
*/
public Create setParent(java.lang.String parent) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(PARENT_PATTERN.matcher(parent).matches(),
"Parameter parent must conform to the pattern " +
"^projects/[^/]+/iap_tunnel/locations/[^/]+$");
}
this.parent = parent;
return this;
}
/**
* Required. The ID to use for the TunnelDestGroup, which becomes the final component of
* the resource name. This value must be 4-63 characters, and valid characters are
* `a-z-`.
*/
@com.google.api.client.util.Key
private java.lang.String tunnelDestGroupId;
/** Required. The ID to use for the TunnelDestGroup, which becomes the final component of the resource
name. This value must be 4-63 characters, and valid characters are `a-z-`.
*/
public java.lang.String getTunnelDestGroupId() {
return tunnelDestGroupId;
}
/**
* Required. The ID to use for the TunnelDestGroup, which becomes the final component of
* the resource name. This value must be 4-63 characters, and valid characters are
* `a-z-`.
*/
public Create setTunnelDestGroupId(java.lang.String tunnelDestGroupId) {
this.tunnelDestGroupId = tunnelDestGroupId;
return this;
}
@Override
public Create set(String parameterName, Object value) {
return (Create) super.set(parameterName, value);
}
}
/**
* Deletes a TunnelDestGroup.
*
* Create a request for the method "destGroups.delete".
*
* This request holds the parameters needed by the iap server. After setting any optional
* parameters, call the {@link Delete#execute()} method to invoke the remote operation.
*
* @param name Required. Name of the TunnelDestGroup to delete. In the following format:
* `projects/{project_number/id}/iap_tunnel/locations/{location}/destGroups/{dest_group}`.
* @return the request
*/
public Delete delete(java.lang.String name) throws java.io.IOException {
Delete result = new Delete(name);
initialize(result);
return result;
}
public class Delete extends CloudIAPRequest<com.google.api.services.iap.v1.model.Empty> {
private static final String REST_PATH = "v1/{+name}";
private final java.util.regex.Pattern NAME_PATTERN =
java.util.regex.Pattern.compile("^projects/[^/]+/iap_tunnel/locations/[^/]+/destGroups/[^/]+$");
/**
* Deletes a TunnelDestGroup.
*
* Create a request for the method "destGroups.delete".
*
* This request holds the parameters needed by the the iap server. After setting any optional
* parameters, call the {@link Delete#execute()} method to invoke the remote operation. <p> {@link
* Delete#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param name Required. Name of the TunnelDestGroup to delete. In the following format:
* `projects/{project_number/id}/iap_tunnel/locations/{location}/destGroups/{dest_group}`.
* @since 1.13
*/
protected Delete(java.lang.String name) {
super(CloudIAP.this, "DELETE", REST_PATH, null, com.google.api.services.iap.v1.model.Empty.class);
this.name = com.google.api.client.util.Preconditions.checkNotNull(name, "Required parameter name must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^projects/[^/]+/iap_tunnel/locations/[^/]+/destGroups/[^/]+$");
}
}
@Override
public Delete set$Xgafv(java.lang.String $Xgafv) {
return (Delete) super.set$Xgafv($Xgafv);
}
@Override
public Delete setAccessToken(java.lang.String accessToken) {
return (Delete) super.setAccessToken(accessToken);
}
@Override
public Delete setAlt(java.lang.String alt) {
return (Delete) super.setAlt(alt);
}
@Override
public Delete setCallback(java.lang.String callback) {
return (Delete) super.setCallback(callback);
}
@Override
public Delete setFields(java.lang.String fields) {
return (Delete) super.setFields(fields);
}
@Override
public Delete setKey(java.lang.String key) {
return (Delete) super.setKey(key);
}
@Override
public Delete setOauthToken(java.lang.String oauthToken) {
return (Delete) super.setOauthToken(oauthToken);
}
@Override
public Delete setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Delete) super.setPrettyPrint(prettyPrint);
}
@Override
public Delete setQuotaUser(java.lang.String quotaUser) {
return (Delete) super.setQuotaUser(quotaUser);
}
@Override
public Delete setUploadType(java.lang.String uploadType) {
return (Delete) super.setUploadType(uploadType);
}
@Override
public Delete setUploadProtocol(java.lang.String uploadProtocol) {
return (Delete) super.setUploadProtocol(uploadProtocol);
}
/**
* Required. Name of the TunnelDestGroup to delete. In the following format: `projects/{
* project_number/id}/iap_tunnel/locations/{location}/destGroups/{dest_group}`.
*/
@com.google.api.client.util.Key
private java.lang.String name;
/** Required. Name of the TunnelDestGroup to delete. In the following format:
`projects/{project_number/id}/iap_tunnel/locations/{location}/destGroups/{dest_group}`.
*/
public java.lang.String getName() {
return name;
}
/**
* Required. Name of the TunnelDestGroup to delete. In the following format: `projects/{
* project_number/id}/iap_tunnel/locations/{location}/destGroups/{dest_group}`.
*/
public Delete setName(java.lang.String name) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^projects/[^/]+/iap_tunnel/locations/[^/]+/destGroups/[^/]+$");
}
this.name = name;
return this;
}
@Override
public Delete set(String parameterName, Object value) {
return (Delete) super.set(parameterName, value);
}
}
/**
* Retrieves an existing TunnelDestGroup.
*
* Create a request for the method "destGroups.get".
*
* This request holds the parameters needed by the iap server. After setting any optional
* parameters, call the {@link Get#execute()} method to invoke the remote operation.
*
* @param name Required. Name of the TunnelDestGroup to be fetched. In the following format:
* `projects/{project_number/id}/iap_tunnel/locations/{location}/destGroups/{dest_group}`.
* @return the request
*/
public Get get(java.lang.String name) throws java.io.IOException {
Get result = new Get(name);
initialize(result);
return result;
}
public class Get extends CloudIAPRequest<com.google.api.services.iap.v1.model.TunnelDestGroup> {
private static final String REST_PATH = "v1/{+name}";
private final java.util.regex.Pattern NAME_PATTERN =
java.util.regex.Pattern.compile("^projects/[^/]+/iap_tunnel/locations/[^/]+/destGroups/[^/]+$");
/**
* Retrieves an existing TunnelDestGroup.
*
* Create a request for the method "destGroups.get".
*
* This request holds the parameters needed by the the iap server. After setting any optional
* parameters, call the {@link Get#execute()} method to invoke the remote operation. <p> {@link
* Get#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must be
* called to initialize this instance immediately after invoking the constructor. </p>
*
* @param name Required. Name of the TunnelDestGroup to be fetched. In the following format:
* `projects/{project_number/id}/iap_tunnel/locations/{location}/destGroups/{dest_group}`.
* @since 1.13
*/
protected Get(java.lang.String name) {
super(CloudIAP.this, "GET", REST_PATH, null, com.google.api.services.iap.v1.model.TunnelDestGroup.class);
this.name = com.google.api.client.util.Preconditions.checkNotNull(name, "Required parameter name must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^projects/[^/]+/iap_tunnel/locations/[^/]+/destGroups/[^/]+$");
}
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public Get set$Xgafv(java.lang.String $Xgafv) {
return (Get) super.set$Xgafv($Xgafv);
}
@Override
public Get setAccessToken(java.lang.String accessToken) {
return (Get) super.setAccessToken(accessToken);
}
@Override
public Get setAlt(java.lang.String alt) {
return (Get) super.setAlt(alt);
}
@Override
public Get setCallback(java.lang.String callback) {
return (Get) super.setCallback(callback);
}
@Override
public Get setFields(java.lang.String fields) {
return (Get) super.setFields(fields);
}
@Override
public Get setKey(java.lang.String key) {
return (Get) super.setKey(key);
}
@Override
public Get setOauthToken(java.lang.String oauthToken) {
return (Get) super.setOauthToken(oauthToken);
}
@Override
public Get setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Get) super.setPrettyPrint(prettyPrint);
}
@Override
public Get setQuotaUser(java.lang.String quotaUser) {
return (Get) super.setQuotaUser(quotaUser);
}
@Override
public Get setUploadType(java.lang.String uploadType) {
return (Get) super.setUploadType(uploadType);
}
@Override
public Get setUploadProtocol(java.lang.String uploadProtocol) {
return (Get) super.setUploadProtocol(uploadProtocol);
}
/**
* Required. Name of the TunnelDestGroup to be fetched. In the following format: `projec
* ts/{project_number/id}/iap_tunnel/locations/{location}/destGroups/{dest_group}`.
*/
@com.google.api.client.util.Key
private java.lang.String name;
/** Required. Name of the TunnelDestGroup to be fetched. In the following format:
`projects/{project_number/id}/iap_tunnel/locations/{location}/destGroups/{dest_group}`.
*/
public java.lang.String getName() {
return name;
}
/**
* Required. Name of the TunnelDestGroup to be fetched. In the following format: `projec
* ts/{project_number/id}/iap_tunnel/locations/{location}/destGroups/{dest_group}`.
*/
public Get setName(java.lang.String name) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^projects/[^/]+/iap_tunnel/locations/[^/]+/destGroups/[^/]+$");
}
this.name = name;
return this;
}
@Override
public Get set(String parameterName, Object value) {
return (Get) super.set(parameterName, value);
}
}
/**
* Lists the existing TunnelDestGroups. To group across all locations, use a `-` as the location ID.
* For example: `/v1/projects/123/iap_tunnel/locations/-/destGroups`
*
* Create a request for the method "destGroups.list".
*
* This request holds the parameters needed by the iap server. After setting any optional
* parameters, call the {@link List#execute()} method to invoke the remote operation.
*
* @param parent Required. Google Cloud Project ID and location. In the following format:
* `projects/{project_number/id}/iap_tunnel/locations/{location}`. A `-` can be used for the
* location to group across all locations.
* @return the request
*/
public List list(java.lang.String parent) throws java.io.IOException {
List result = new List(parent);
initialize(result);
return result;
}
public class List extends CloudIAPRequest<com.google.api.services.iap.v1.model.ListTunnelDestGroupsResponse> {
private static final String REST_PATH = "v1/{+parent}/destGroups";
private final java.util.regex.Pattern PARENT_PATTERN =
java.util.regex.Pattern.compile("^projects/[^/]+/iap_tunnel/locations/[^/]+$");
/**
* Lists the existing TunnelDestGroups. To group across all locations, use a `-` as the location
* ID. For example: `/v1/projects/123/iap_tunnel/locations/-/destGroups`
*
* Create a request for the method "destGroups.list".
*
* This request holds the parameters needed by the the iap server. After setting any optional
* parameters, call the {@link List#execute()} method to invoke the remote operation. <p> {@link
* List#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must be
* called to initialize this instance immediately after invoking the constructor. </p>
*
* @param parent Required. Google Cloud Project ID and location. In the following format:
* `projects/{project_number/id}/iap_tunnel/locations/{location}`. A `-` can be used for the
* location to group across all locations.
* @since 1.13
*/
protected List(java.lang.String parent) {
super(CloudIAP.this, "GET", REST_PATH, null, com.google.api.services.iap.v1.model.ListTunnelDestGroupsResponse.class);
this.parent = com.google.api.client.util.Preconditions.checkNotNull(parent, "Required parameter parent must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(PARENT_PATTERN.matcher(parent).matches(),
"Parameter parent must conform to the pattern " +
"^projects/[^/]+/iap_tunnel/locations/[^/]+$");
}
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public List set$Xgafv(java.lang.String $Xgafv) {
return (List) super.set$Xgafv($Xgafv);
}
@Override
public List setAccessToken(java.lang.String accessToken) {
return (List) super.setAccessToken(accessToken);
}
@Override
public List setAlt(java.lang.String alt) {
return (List) super.setAlt(alt);
}
@Override
public List setCallback(java.lang.String callback) {
return (List) super.setCallback(callback);
}
@Override
public List setFields(java.lang.String fields) {
return (List) super.setFields(fields);
}
@Override
public List setKey(java.lang.String key) {
return (List) super.setKey(key);
}
@Override
public List setOauthToken(java.lang.String oauthToken) {
return (List) super.setOauthToken(oauthToken);
}
@Override
public List setPrettyPrint(java.lang.Boolean prettyPrint) {
return (List) super.setPrettyPrint(prettyPrint);
}
@Override
public List setQuotaUser(java.lang.String quotaUser) {
return (List) super.setQuotaUser(quotaUser);
}
@Override
public List setUploadType(java.lang.String uploadType) {
return (List) super.setUploadType(uploadType);
}
@Override
public List setUploadProtocol(java.lang.String uploadProtocol) {
return (List) super.setUploadProtocol(uploadProtocol);
}
/**
* Required. Google Cloud Project ID and location. In the following format:
* `projects/{project_number/id}/iap_tunnel/locations/{location}`. A `-` can be used for
* the location to group across all locations.
*/
@com.google.api.client.util.Key
private java.lang.String parent;
/** Required. Google Cloud Project ID and location. In the following format:
`projects/{project_number/id}/iap_tunnel/locations/{location}`. A `-` can be used for the location
to group across all locations.
*/
public java.lang.String getParent() {
return parent;
}
/**
* Required. Google Cloud Project ID and location. In the following format:
* `projects/{project_number/id}/iap_tunnel/locations/{location}`. A `-` can be used for
* the location to group across all locations.
*/
public List setParent(java.lang.String parent) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(PARENT_PATTERN.matcher(parent).matches(),
"Parameter parent must conform to the pattern " +
"^projects/[^/]+/iap_tunnel/locations/[^/]+$");
}
this.parent = parent;
return this;
}
/**
* The maximum number of groups to return. The service might return fewer than this
* value. If unspecified, at most 100 groups are returned. The maximum value is 1000;
* values above 1000 are coerced to 1000.
*/
@com.google.api.client.util.Key
private java.lang.Integer pageSize;
/** The maximum number of groups to return. The service might return fewer than this value. If
unspecified, at most 100 groups are returned. The maximum value is 1000; values above 1000 are
coerced to 1000.
*/
public java.lang.Integer getPageSize() {
return pageSize;
}
/**
* The maximum number of groups to return. The service might return fewer than this
* value. If unspecified, at most 100 groups are returned. The maximum value is 1000;
* values above 1000 are coerced to 1000.
*/
public List setPageSize(java.lang.Integer pageSize) {
this.pageSize = pageSize;
return this;
}
/**
* A page token, received from a previous `ListTunnelDestGroups` call. Provide this to
* retrieve the subsequent page. When paginating, all other parameters provided to
* `ListTunnelDestGroups` must match the call that provided the page token.
*/
@com.google.api.client.util.Key
private java.lang.String pageToken;
/** A page token, received from a previous `ListTunnelDestGroups` call. Provide this to retrieve the
subsequent page. When paginating, all other parameters provided to `ListTunnelDestGroups` must
match the call that provided the page token.
*/
public java.lang.String getPageToken() {
return pageToken;
}
/**
* A page token, received from a previous `ListTunnelDestGroups` call. Provide this to
* retrieve the subsequent page. When paginating, all other parameters provided to
* `ListTunnelDestGroups` must match the call that provided the page token.
*/
public List setPageToken(java.lang.String pageToken) {
this.pageToken = pageToken;
return this;
}
@Override
public List set(String parameterName, Object value) {
return (List) super.set(parameterName, value);
}
}
/**
* Updates a TunnelDestGroup.
*
* Create a request for the method "destGroups.patch".
*
* This request holds the parameters needed by the iap server. After setting any optional
* parameters, call the {@link Patch#execute()} method to invoke the remote operation.
*
* @param name Required. Immutable. Identifier for the TunnelDestGroup. Must be unique within the project.
* @param content the {@link com.google.api.services.iap.v1.model.TunnelDestGroup}
* @return the request
*/
public Patch patch(java.lang.String name, com.google.api.services.iap.v1.model.TunnelDestGroup content) throws java.io.IOException {
Patch result = new Patch(name, content);
initialize(result);
return result;
}
public class Patch extends CloudIAPRequest<com.google.api.services.iap.v1.model.TunnelDestGroup> {
private static final String REST_PATH = "v1/{+name}";
private final java.util.regex.Pattern NAME_PATTERN =
java.util.regex.Pattern.compile("^projects/[^/]+/iap_tunnel/locations/[^/]+/destGroups/[^/]+$");
/**
* Updates a TunnelDestGroup.
*
* Create a request for the method "destGroups.patch".
*
* This request holds the parameters needed by the the iap server. After setting any optional
* parameters, call the {@link Patch#execute()} method to invoke the remote operation. <p> {@link
* Patch#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param name Required. Immutable. Identifier for the TunnelDestGroup. Must be unique within the project.
* @param content the {@link com.google.api.services.iap.v1.model.TunnelDestGroup}
* @since 1.13
*/
protected Patch(java.lang.String name, com.google.api.services.iap.v1.model.TunnelDestGroup content) {
super(CloudIAP.this, "PATCH", REST_PATH, content, com.google.api.services.iap.v1.model.TunnelDestGroup.class);
this.name = com.google.api.client.util.Preconditions.checkNotNull(name, "Required parameter name must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^projects/[^/]+/iap_tunnel/locations/[^/]+/destGroups/[^/]+$");
}
}
@Override
public Patch set$Xgafv(java.lang.String $Xgafv) {
return (Patch) super.set$Xgafv($Xgafv);
}
@Override
public Patch setAccessToken(java.lang.String accessToken) {
return (Patch) super.setAccessToken(accessToken);
}
@Override
public Patch setAlt(java.lang.String alt) {
return (Patch) super.setAlt(alt);
}
@Override
public Patch setCallback(java.lang.String callback) {
return (Patch) super.setCallback(callback);
}
@Override
public Patch setFields(java.lang.String fields) {
return (Patch) super.setFields(fields);
}
@Override
public Patch setKey(java.lang.String key) {
return (Patch) super.setKey(key);
}
@Override
public Patch setOauthToken(java.lang.String oauthToken) {
return (Patch) super.setOauthToken(oauthToken);
}
@Override
public Patch setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Patch) super.setPrettyPrint(prettyPrint);
}
@Override
public Patch setQuotaUser(java.lang.String quotaUser) {
return (Patch) super.setQuotaUser(quotaUser);
}
@Override
public Patch setUploadType(java.lang.String uploadType) {
return (Patch) super.setUploadType(uploadType);
}
@Override
public Patch setUploadProtocol(java.lang.String uploadProtocol) {
return (Patch) super.setUploadProtocol(uploadProtocol);
}
/**
* Required. Immutable. Identifier for the TunnelDestGroup. Must be unique within the
* project.
*/
@com.google.api.client.util.Key
private java.lang.String name;
/** Required. Immutable. Identifier for the TunnelDestGroup. Must be unique within the project.
*/
public java.lang.String getName() {
return name;
}
/**
* Required. Immutable. Identifier for the TunnelDestGroup. Must be unique within the
* project.
*/
public Patch setName(java.lang.String name) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^projects/[^/]+/iap_tunnel/locations/[^/]+/destGroups/[^/]+$");
}
this.name = name;
return this;
}
/**
* A field mask that specifies which IAP settings to update. If omitted, then all of the
* settings are updated. See https://developers.google.com/protocol-
* buffers/docs/reference/google.protobuf#fieldmask
*/
@com.google.api.client.util.Key
private String updateMask;
/** A field mask that specifies which IAP settings to update. If omitted, then all of the settings are
updated. See https://developers.google.com/protocol-
buffers/docs/reference/google.protobuf#fieldmask
*/
public String getUpdateMask() {
return updateMask;
}
/**
* A field mask that specifies which IAP settings to update. If omitted, then all of the
* settings are updated. See https://developers.google.com/protocol-
* buffers/docs/reference/google.protobuf#fieldmask
*/
public Patch setUpdateMask(String updateMask) {
this.updateMask = updateMask;
return this;
}
@Override
public Patch set(String parameterName, Object value) {
return (Patch) super.set(parameterName, value);
}
}
}
}
}
}
/**
* An accessor for creating requests from the V1 collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code CloudIAP iap = new CloudIAP(...);}
* {@code CloudIAP.V1.List request = iap.v1().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public V1 v1() {
return new V1();
}
/**
* The "v1" collection of methods.
*/
public class V1 {
/**
* Gets the access control policy for an Identity-Aware Proxy protected resource. More information
* about managing access via IAP can be found at: https://cloud.google.com/iap/docs/managing-
* access#managing_access_via_the_api
*
* Create a request for the method "v1.getIamPolicy".
*
* This request holds the parameters needed by the iap server. After setting any optional
* parameters, call the {@link GetIamPolicy#execute()} method to invoke the remote operation.
*
* @param resource REQUIRED: The resource for which the policy is being requested. See [Resource
* names](https://cloud.google.com/apis/design/resource_names) for the appropriate value for
* this field.
* @param content the {@link com.google.api.services.iap.v1.model.GetIamPolicyRequest}
* @return the request
*/
public GetIamPolicy getIamPolicy(java.lang.String resource, com.google.api.services.iap.v1.model.GetIamPolicyRequest content) throws java.io.IOException {
GetIamPolicy result = new GetIamPolicy(resource, content);
initialize(result);
return result;
}
public class GetIamPolicy extends CloudIAPRequest<com.google.api.services.iap.v1.model.Policy> {
private static final String REST_PATH = "v1/{+resource}:getIamPolicy";
private final java.util.regex.Pattern RESOURCE_PATTERN =
java.util.regex.Pattern.compile("^.*$");
/**
* Gets the access control policy for an Identity-Aware Proxy protected resource. More information
* about managing access via IAP can be found at: https://cloud.google.com/iap/docs/managing-
* access#managing_access_via_the_api
*
* Create a request for the method "v1.getIamPolicy".
*
* This request holds the parameters needed by the the iap server. After setting any optional
* parameters, call the {@link GetIamPolicy#execute()} method to invoke the remote operation. <p>
* {@link
* GetIamPolicy#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param resource REQUIRED: The resource for which the policy is being requested. See [Resource
* names](https://cloud.google.com/apis/design/resource_names) for the appropriate value for
* this field.
* @param content the {@link com.google.api.services.iap.v1.model.GetIamPolicyRequest}
* @since 1.13
*/
protected GetIamPolicy(java.lang.String resource, com.google.api.services.iap.v1.model.GetIamPolicyRequest content) {
super(CloudIAP.this, "POST", REST_PATH, content, com.google.api.services.iap.v1.model.Policy.class);
this.resource = com.google.api.client.util.Preconditions.checkNotNull(resource, "Required parameter resource must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(RESOURCE_PATTERN.matcher(resource).matches(),
"Parameter resource must conform to the pattern " +
"^.*$");
}
}
@Override
public GetIamPolicy set$Xgafv(java.lang.String $Xgafv) {
return (GetIamPolicy) super.set$Xgafv($Xgafv);
}
@Override
public GetIamPolicy setAccessToken(java.lang.String accessToken) {
return (GetIamPolicy) super.setAccessToken(accessToken);
}
@Override
public GetIamPolicy setAlt(java.lang.String alt) {
return (GetIamPolicy) super.setAlt(alt);
}
@Override
public GetIamPolicy setCallback(java.lang.String callback) {
return (GetIamPolicy) super.setCallback(callback);
}
@Override
public GetIamPolicy setFields(java.lang.String fields) {
return (GetIamPolicy) super.setFields(fields);
}
@Override
public GetIamPolicy setKey(java.lang.String key) {
return (GetIamPolicy) super.setKey(key);
}
@Override
public GetIamPolicy setOauthToken(java.lang.String oauthToken) {
return (GetIamPolicy) super.setOauthToken(oauthToken);
}
@Override
public GetIamPolicy setPrettyPrint(java.lang.Boolean prettyPrint) {
return (GetIamPolicy) super.setPrettyPrint(prettyPrint);
}
@Override
public GetIamPolicy setQuotaUser(java.lang.String quotaUser) {
return (GetIamPolicy) super.setQuotaUser(quotaUser);
}
@Override
public GetIamPolicy setUploadType(java.lang.String uploadType) {
return (GetIamPolicy) super.setUploadType(uploadType);
}
@Override
public GetIamPolicy setUploadProtocol(java.lang.String uploadProtocol) {
return (GetIamPolicy) super.setUploadProtocol(uploadProtocol);
}
/**
* REQUIRED: The resource for which the policy is being requested. See [Resource
* names](https://cloud.google.com/apis/design/resource_names) for the appropriate value for
* this field.
*/
@com.google.api.client.util.Key
private java.lang.String resource;
/** REQUIRED: The resource for which the policy is being requested. See [Resource
names](https://cloud.google.com/apis/design/resource_names) for the appropriate value for this
field.
*/
public java.lang.String getResource() {
return resource;
}
/**
* REQUIRED: The resource for which the policy is being requested. See [Resource
* names](https://cloud.google.com/apis/design/resource_names) for the appropriate value for
* this field.
*/
public GetIamPolicy setResource(java.lang.String resource) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(RESOURCE_PATTERN.matcher(resource).matches(),
"Parameter resource must conform to the pattern " +
"^.*$");
}
this.resource = resource;
return this;
}
@Override
public GetIamPolicy set(String parameterName, Object value) {
return (GetIamPolicy) super.set(parameterName, value);
}
}
/**
* Gets the IAP settings on a particular IAP protected resource.
*
* Create a request for the method "v1.getIapSettings".
*
* This request holds the parameters needed by the iap server. After setting any optional
* parameters, call the {@link GetIapSettings#execute()} method to invoke the remote operation.
*
* @param name Required. The resource name for which to retrieve the settings. Authorization: Requires the
* `getSettings` permission for the associated resource.
* @return the request
*/
public GetIapSettings getIapSettings(java.lang.String name) throws java.io.IOException {
GetIapSettings result = new GetIapSettings(name);
initialize(result);
return result;
}
public class GetIapSettings extends CloudIAPRequest<com.google.api.services.iap.v1.model.IapSettings> {
private static final String REST_PATH = "v1/{+name}:iapSettings";
private final java.util.regex.Pattern NAME_PATTERN =
java.util.regex.Pattern.compile("^.*$");
/**
* Gets the IAP settings on a particular IAP protected resource.
*
* Create a request for the method "v1.getIapSettings".
*
* This request holds the parameters needed by the the iap server. After setting any optional
* parameters, call the {@link GetIapSettings#execute()} method to invoke the remote operation.
* <p> {@link GetIapSettings#initialize(com.google.api.client.googleapis.services.AbstractGoogleCl
* ientRequest)} must be called to initialize this instance immediately after invoking the
* constructor. </p>
*
* @param name Required. The resource name for which to retrieve the settings. Authorization: Requires the
* `getSettings` permission for the associated resource.
* @since 1.13
*/
protected GetIapSettings(java.lang.String name) {
super(CloudIAP.this, "GET", REST_PATH, null, com.google.api.services.iap.v1.model.IapSettings.class);
this.name = com.google.api.client.util.Preconditions.checkNotNull(name, "Required parameter name must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^.*$");
}
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public GetIapSettings set$Xgafv(java.lang.String $Xgafv) {
return (GetIapSettings) super.set$Xgafv($Xgafv);
}
@Override
public GetIapSettings setAccessToken(java.lang.String accessToken) {
return (GetIapSettings) super.setAccessToken(accessToken);
}
@Override
public GetIapSettings setAlt(java.lang.String alt) {
return (GetIapSettings) super.setAlt(alt);
}
@Override
public GetIapSettings setCallback(java.lang.String callback) {
return (GetIapSettings) super.setCallback(callback);
}
@Override
public GetIapSettings setFields(java.lang.String fields) {
return (GetIapSettings) super.setFields(fields);
}
@Override
public GetIapSettings setKey(java.lang.String key) {
return (GetIapSettings) super.setKey(key);
}
@Override
public GetIapSettings setOauthToken(java.lang.String oauthToken) {
return (GetIapSettings) super.setOauthToken(oauthToken);
}
@Override
public GetIapSettings setPrettyPrint(java.lang.Boolean prettyPrint) {
return (GetIapSettings) super.setPrettyPrint(prettyPrint);
}
@Override
public GetIapSettings setQuotaUser(java.lang.String quotaUser) {
return (GetIapSettings) super.setQuotaUser(quotaUser);
}
@Override
public GetIapSettings setUploadType(java.lang.String uploadType) {
return (GetIapSettings) super.setUploadType(uploadType);
}
@Override
public GetIapSettings setUploadProtocol(java.lang.String uploadProtocol) {
return (GetIapSettings) super.setUploadProtocol(uploadProtocol);
}
/**
* Required. The resource name for which to retrieve the settings. Authorization: Requires the
* `getSettings` permission for the associated resource.
*/
@com.google.api.client.util.Key
private java.lang.String name;
/** Required. The resource name for which to retrieve the settings. Authorization: Requires the
`getSettings` permission for the associated resource.
*/
public java.lang.String getName() {
return name;
}
/**
* Required. The resource name for which to retrieve the settings. Authorization: Requires the
* `getSettings` permission for the associated resource.
*/
public GetIapSettings setName(java.lang.String name) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^.*$");
}
this.name = name;
return this;
}
@Override
public GetIapSettings set(String parameterName, Object value) {
return (GetIapSettings) super.set(parameterName, value);
}
}
/**
* Sets the access control policy for an Identity-Aware Proxy protected resource. Replaces any
* existing policy. More information about managing access via IAP can be found at:
* https://cloud.google.com/iap/docs/managing-access#managing_access_via_the_api
*
* Create a request for the method "v1.setIamPolicy".
*
* This request holds the parameters needed by the iap server. After setting any optional
* parameters, call the {@link SetIamPolicy#execute()} method to invoke the remote operation.
*
* @param resource REQUIRED: The resource for which the policy is being specified. See [Resource
* names](https://cloud.google.com/apis/design/resource_names) for the appropriate value for
* this field.
* @param content the {@link com.google.api.services.iap.v1.model.SetIamPolicyRequest}
* @return the request
*/
public SetIamPolicy setIamPolicy(java.lang.String resource, com.google.api.services.iap.v1.model.SetIamPolicyRequest content) throws java.io.IOException {
SetIamPolicy result = new SetIamPolicy(resource, content);
initialize(result);
return result;
}
public class SetIamPolicy extends CloudIAPRequest<com.google.api.services.iap.v1.model.Policy> {
private static final String REST_PATH = "v1/{+resource}:setIamPolicy";
private final java.util.regex.Pattern RESOURCE_PATTERN =
java.util.regex.Pattern.compile("^.*$");
/**
* Sets the access control policy for an Identity-Aware Proxy protected resource. Replaces any
* existing policy. More information about managing access via IAP can be found at:
* https://cloud.google.com/iap/docs/managing-access#managing_access_via_the_api
*
* Create a request for the method "v1.setIamPolicy".
*
* This request holds the parameters needed by the the iap server. After setting any optional
* parameters, call the {@link SetIamPolicy#execute()} method to invoke the remote operation. <p>
* {@link
* SetIamPolicy#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param resource REQUIRED: The resource for which the policy is being specified. See [Resource
* names](https://cloud.google.com/apis/design/resource_names) for the appropriate value for
* this field.
* @param content the {@link com.google.api.services.iap.v1.model.SetIamPolicyRequest}
* @since 1.13
*/
protected SetIamPolicy(java.lang.String resource, com.google.api.services.iap.v1.model.SetIamPolicyRequest content) {
super(CloudIAP.this, "POST", REST_PATH, content, com.google.api.services.iap.v1.model.Policy.class);
this.resource = com.google.api.client.util.Preconditions.checkNotNull(resource, "Required parameter resource must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(RESOURCE_PATTERN.matcher(resource).matches(),
"Parameter resource must conform to the pattern " +
"^.*$");
}
}
@Override
public SetIamPolicy set$Xgafv(java.lang.String $Xgafv) {
return (SetIamPolicy) super.set$Xgafv($Xgafv);
}
@Override
public SetIamPolicy setAccessToken(java.lang.String accessToken) {
return (SetIamPolicy) super.setAccessToken(accessToken);
}
@Override
public SetIamPolicy setAlt(java.lang.String alt) {
return (SetIamPolicy) super.setAlt(alt);
}
@Override
public SetIamPolicy setCallback(java.lang.String callback) {
return (SetIamPolicy) super.setCallback(callback);
}
@Override
public SetIamPolicy setFields(java.lang.String fields) {
return (SetIamPolicy) super.setFields(fields);
}
@Override
public SetIamPolicy setKey(java.lang.String key) {
return (SetIamPolicy) super.setKey(key);
}
@Override
public SetIamPolicy setOauthToken(java.lang.String oauthToken) {
return (SetIamPolicy) super.setOauthToken(oauthToken);
}
@Override
public SetIamPolicy setPrettyPrint(java.lang.Boolean prettyPrint) {
return (SetIamPolicy) super.setPrettyPrint(prettyPrint);
}
@Override
public SetIamPolicy setQuotaUser(java.lang.String quotaUser) {
return (SetIamPolicy) super.setQuotaUser(quotaUser);
}
@Override
public SetIamPolicy setUploadType(java.lang.String uploadType) {
return (SetIamPolicy) super.setUploadType(uploadType);
}
@Override
public SetIamPolicy setUploadProtocol(java.lang.String uploadProtocol) {
return (SetIamPolicy) super.setUploadProtocol(uploadProtocol);
}
/**
* REQUIRED: The resource for which the policy is being specified. See [Resource
* names](https://cloud.google.com/apis/design/resource_names) for the appropriate value for
* this field.
*/
@com.google.api.client.util.Key
private java.lang.String resource;
/** REQUIRED: The resource for which the policy is being specified. See [Resource
names](https://cloud.google.com/apis/design/resource_names) for the appropriate value for this
field.
*/
public java.lang.String getResource() {
return resource;
}
/**
* REQUIRED: The resource for which the policy is being specified. See [Resource
* names](https://cloud.google.com/apis/design/resource_names) for the appropriate value for
* this field.
*/
public SetIamPolicy setResource(java.lang.String resource) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(RESOURCE_PATTERN.matcher(resource).matches(),
"Parameter resource must conform to the pattern " +
"^.*$");
}
this.resource = resource;
return this;
}
@Override
public SetIamPolicy set(String parameterName, Object value) {
return (SetIamPolicy) super.set(parameterName, value);
}
}
/**
* Returns permissions that a caller has on the Identity-Aware Proxy protected resource. More
* information about managing access via IAP can be found at: https://cloud.google.com/iap/docs
* /managing-access#managing_access_via_the_api
*
* Create a request for the method "v1.testIamPermissions".
*
* This request holds the parameters needed by the iap server. After setting any optional
* parameters, call the {@link TestIamPermissions#execute()} method to invoke the remote operation.
*
* @param resource REQUIRED: The resource for which the policy detail is being requested. See [Resource
* names](https://cloud.google.com/apis/design/resource_names) for the appropriate value for
* this field.
* @param content the {@link com.google.api.services.iap.v1.model.TestIamPermissionsRequest}
* @return the request
*/
public TestIamPermissions testIamPermissions(java.lang.String resource, com.google.api.services.iap.v1.model.TestIamPermissionsRequest content) throws java.io.IOException {
TestIamPermissions result = new TestIamPermissions(resource, content);
initialize(result);
return result;
}
public class TestIamPermissions extends CloudIAPRequest<com.google.api.services.iap.v1.model.TestIamPermissionsResponse> {
private static final String REST_PATH = "v1/{+resource}:testIamPermissions";
private final java.util.regex.Pattern RESOURCE_PATTERN =
java.util.regex.Pattern.compile("^.*$");
/**
* Returns permissions that a caller has on the Identity-Aware Proxy protected resource. More
* information about managing access via IAP can be found at: https://cloud.google.com/iap/docs
* /managing-access#managing_access_via_the_api
*
* Create a request for the method "v1.testIamPermissions".
*
* This request holds the parameters needed by the the iap server. After setting any optional
* parameters, call the {@link TestIamPermissions#execute()} method to invoke the remote
* operation. <p> {@link TestIamPermissions#initialize(com.google.api.client.googleapis.services.A
* bstractGoogleClientRequest)} must be called to initialize this instance immediately after
* invoking the constructor. </p>
*
* @param resource REQUIRED: The resource for which the policy detail is being requested. See [Resource
* names](https://cloud.google.com/apis/design/resource_names) for the appropriate value for
* this field.
* @param content the {@link com.google.api.services.iap.v1.model.TestIamPermissionsRequest}
* @since 1.13
*/
protected TestIamPermissions(java.lang.String resource, com.google.api.services.iap.v1.model.TestIamPermissionsRequest content) {
super(CloudIAP.this, "POST", REST_PATH, content, com.google.api.services.iap.v1.model.TestIamPermissionsResponse.class);
this.resource = com.google.api.client.util.Preconditions.checkNotNull(resource, "Required parameter resource must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(RESOURCE_PATTERN.matcher(resource).matches(),
"Parameter resource must conform to the pattern " +
"^.*$");
}
}
@Override
public TestIamPermissions set$Xgafv(java.lang.String $Xgafv) {
return (TestIamPermissions) super.set$Xgafv($Xgafv);
}
@Override
public TestIamPermissions setAccessToken(java.lang.String accessToken) {
return (TestIamPermissions) super.setAccessToken(accessToken);
}
@Override
public TestIamPermissions setAlt(java.lang.String alt) {
return (TestIamPermissions) super.setAlt(alt);
}
@Override
public TestIamPermissions setCallback(java.lang.String callback) {
return (TestIamPermissions) super.setCallback(callback);
}
@Override
public TestIamPermissions setFields(java.lang.String fields) {
return (TestIamPermissions) super.setFields(fields);
}
@Override
public TestIamPermissions setKey(java.lang.String key) {
return (TestIamPermissions) super.setKey(key);
}
@Override
public TestIamPermissions setOauthToken(java.lang.String oauthToken) {
return (TestIamPermissions) super.setOauthToken(oauthToken);
}
@Override
public TestIamPermissions setPrettyPrint(java.lang.Boolean prettyPrint) {
return (TestIamPermissions) super.setPrettyPrint(prettyPrint);
}
@Override
public TestIamPermissions setQuotaUser(java.lang.String quotaUser) {
return (TestIamPermissions) super.setQuotaUser(quotaUser);
}
@Override
public TestIamPermissions setUploadType(java.lang.String uploadType) {
return (TestIamPermissions) super.setUploadType(uploadType);
}
@Override
public TestIamPermissions setUploadProtocol(java.lang.String uploadProtocol) {
return (TestIamPermissions) super.setUploadProtocol(uploadProtocol);
}
/**
* REQUIRED: The resource for which the policy detail is being requested. See [Resource
* names](https://cloud.google.com/apis/design/resource_names) for the appropriate value for
* this field.
*/
@com.google.api.client.util.Key
private java.lang.String resource;
/** REQUIRED: The resource for which the policy detail is being requested. See [Resource
names](https://cloud.google.com/apis/design/resource_names) for the appropriate value for this
field.
*/
public java.lang.String getResource() {
return resource;
}
/**
* REQUIRED: The resource for which the policy detail is being requested. See [Resource
* names](https://cloud.google.com/apis/design/resource_names) for the appropriate value for
* this field.
*/
public TestIamPermissions setResource(java.lang.String resource) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(RESOURCE_PATTERN.matcher(resource).matches(),
"Parameter resource must conform to the pattern " +
"^.*$");
}
this.resource = resource;
return this;
}
@Override
public TestIamPermissions set(String parameterName, Object value) {
return (TestIamPermissions) super.set(parameterName, value);
}
}
/**
* Updates the IAP settings on a particular IAP protected resource. It replaces all fields unless
* the `update_mask` is set.
*
* Create a request for the method "v1.updateIapSettings".
*
* This request holds the parameters needed by the iap server. After setting any optional
* parameters, call the {@link UpdateIapSettings#execute()} method to invoke the remote operation.
*
* @param name Required. The resource name of the IAP protected resource.
* @param content the {@link com.google.api.services.iap.v1.model.IapSettings}
* @return the request
*/
public UpdateIapSettings updateIapSettings(java.lang.String name, com.google.api.services.iap.v1.model.IapSettings content) throws java.io.IOException {
UpdateIapSettings result = new UpdateIapSettings(name, content);
initialize(result);
return result;
}
public class UpdateIapSettings extends CloudIAPRequest<com.google.api.services.iap.v1.model.IapSettings> {
private static final String REST_PATH = "v1/{+name}:iapSettings";
private final java.util.regex.Pattern NAME_PATTERN =
java.util.regex.Pattern.compile("^.*$");
/**
* Updates the IAP settings on a particular IAP protected resource. It replaces all fields unless
* the `update_mask` is set.
*
* Create a request for the method "v1.updateIapSettings".
*
* This request holds the parameters needed by the the iap server. After setting any optional
* parameters, call the {@link UpdateIapSettings#execute()} method to invoke the remote operation.
* <p> {@link UpdateIapSettings#initialize(com.google.api.client.googleapis.services.AbstractGoogl
* eClientRequest)} must be called to initialize this instance immediately after invoking the
* constructor. </p>
*
* @param name Required. The resource name of the IAP protected resource.
* @param content the {@link com.google.api.services.iap.v1.model.IapSettings}
* @since 1.13
*/
protected UpdateIapSettings(java.lang.String name, com.google.api.services.iap.v1.model.IapSettings content) {
super(CloudIAP.this, "PATCH", REST_PATH, content, com.google.api.services.iap.v1.model.IapSettings.class);
this.name = com.google.api.client.util.Preconditions.checkNotNull(name, "Required parameter name must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^.*$");
}
}
@Override
public UpdateIapSettings set$Xgafv(java.lang.String $Xgafv) {
return (UpdateIapSettings) super.set$Xgafv($Xgafv);
}
@Override
public UpdateIapSettings setAccessToken(java.lang.String accessToken) {
return (UpdateIapSettings) super.setAccessToken(accessToken);
}
@Override
public UpdateIapSettings setAlt(java.lang.String alt) {
return (UpdateIapSettings) super.setAlt(alt);
}
@Override
public UpdateIapSettings setCallback(java.lang.String callback) {
return (UpdateIapSettings) super.setCallback(callback);
}
@Override
public UpdateIapSettings setFields(java.lang.String fields) {
return (UpdateIapSettings) super.setFields(fields);
}
@Override
public UpdateIapSettings setKey(java.lang.String key) {
return (UpdateIapSettings) super.setKey(key);
}
@Override
public UpdateIapSettings setOauthToken(java.lang.String oauthToken) {
return (UpdateIapSettings) super.setOauthToken(oauthToken);
}
@Override
public UpdateIapSettings setPrettyPrint(java.lang.Boolean prettyPrint) {
return (UpdateIapSettings) super.setPrettyPrint(prettyPrint);
}
@Override
public UpdateIapSettings setQuotaUser(java.lang.String quotaUser) {
return (UpdateIapSettings) super.setQuotaUser(quotaUser);
}
@Override
public UpdateIapSettings setUploadType(java.lang.String uploadType) {
return (UpdateIapSettings) super.setUploadType(uploadType);
}
@Override
public UpdateIapSettings setUploadProtocol(java.lang.String uploadProtocol) {
return (UpdateIapSettings) super.setUploadProtocol(uploadProtocol);
}
/** Required. The resource name of the IAP protected resource. */
@com.google.api.client.util.Key
private java.lang.String name;
/** Required. The resource name of the IAP protected resource.
*/
public java.lang.String getName() {
return name;
}
/** Required. The resource name of the IAP protected resource. */
public UpdateIapSettings setName(java.lang.String name) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^.*$");
}
this.name = name;
return this;
}
/**
* The field mask specifying which IAP settings should be updated. If omitted, the all of the
* settings are updated. See https://developers.google.com/protocol-
* buffers/docs/reference/google.protobuf#fieldmask
*/
@com.google.api.client.util.Key
private String updateMask;
/** The field mask specifying which IAP settings should be updated. If omitted, the all of the settings
are updated. See https://developers.google.com/protocol-
buffers/docs/reference/google.protobuf#fieldmask
*/
public String getUpdateMask() {
return updateMask;
}
/**
* The field mask specifying which IAP settings should be updated. If omitted, the all of the
* settings are updated. See https://developers.google.com/protocol-
* buffers/docs/reference/google.protobuf#fieldmask
*/
public UpdateIapSettings setUpdateMask(String updateMask) {
this.updateMask = updateMask;
return this;
}
@Override
public UpdateIapSettings set(String parameterName, Object value) {
return (UpdateIapSettings) super.set(parameterName, value);
}
}
}
/**
* Builder for {@link CloudIAP}.
*
* <p>
* Implementation is not thread-safe.
* </p>
*
* @since 1.3.0
*/
public static final class Builder extends com.google.api.client.googleapis.services.json.AbstractGoogleJsonClient.Builder {
private static String chooseEndpoint(com.google.api.client.http.HttpTransport transport) {
// If the GOOGLE_API_USE_MTLS_ENDPOINT environment variable value is "always", use mTLS endpoint.
// If the env variable is "auto", use mTLS endpoint if and only if the transport is mTLS.
// Use the regular endpoint for all other cases.
String useMtlsEndpoint = System.getenv("GOOGLE_API_USE_MTLS_ENDPOINT");
useMtlsEndpoint = useMtlsEndpoint == null ? "auto" : useMtlsEndpoint;
if ("always".equals(useMtlsEndpoint) || ("auto".equals(useMtlsEndpoint) && transport != null && transport.isMtls())) {
return DEFAULT_MTLS_ROOT_URL;
}
return DEFAULT_ROOT_URL;
}
/**
* Returns an instance of a new builder.
*
* @param transport HTTP transport, which should normally be:
* <ul>
* <li>Google App Engine:
* {@code com.google.api.client.extensions.appengine.http.UrlFetchTransport}</li>
* <li>Android: {@code newCompatibleTransport} from
* {@code com.google.api.client.extensions.android.http.AndroidHttp}</li>
* <li>Java: {@link com.google.api.client.googleapis.javanet.GoogleNetHttpTransport#newTrustedTransport()}
* </li>
* </ul>
* @param jsonFactory JSON factory, which may be:
* <ul>
* <li>Jackson: {@code com.google.api.client.json.jackson2.JacksonFactory}</li>
* <li>Google GSON: {@code com.google.api.client.json.gson.GsonFactory}</li>
* <li>Android Honeycomb or higher:
* {@code com.google.api.client.extensions.android.json.AndroidJsonFactory}</li>
* </ul>
* @param httpRequestInitializer HTTP request initializer or {@code null} for none
* @since 1.7
*/
public Builder(com.google.api.client.http.HttpTransport transport, com.google.api.client.json.JsonFactory jsonFactory,
com.google.api.client.http.HttpRequestInitializer httpRequestInitializer) {
super(
transport,
jsonFactory,
Builder.chooseEndpoint(transport),
DEFAULT_SERVICE_PATH,
httpRequestInitializer,
false);
setBatchPath(DEFAULT_BATCH_PATH);
}
/** Builds a new instance of {@link CloudIAP}. */
@Override
public CloudIAP build() {
return new CloudIAP(this);
}
@Override
public Builder setRootUrl(String rootUrl) {
return (Builder) super.setRootUrl(rootUrl);
}
@Override
public Builder setServicePath(String servicePath) {
return (Builder) super.setServicePath(servicePath);
}
@Override
public Builder setBatchPath(String batchPath) {
return (Builder) super.setBatchPath(batchPath);
}
@Override
public Builder setHttpRequestInitializer(com.google.api.client.http.HttpRequestInitializer httpRequestInitializer) {
return (Builder) super.setHttpRequestInitializer(httpRequestInitializer);
}
@Override
public Builder setApplicationName(String applicationName) {
return (Builder) super.setApplicationName(applicationName);
}
@Override
public Builder setSuppressPatternChecks(boolean suppressPatternChecks) {
return (Builder) super.setSuppressPatternChecks(suppressPatternChecks);
}
@Override
public Builder setSuppressRequiredParameterChecks(boolean suppressRequiredParameterChecks) {
return (Builder) super.setSuppressRequiredParameterChecks(suppressRequiredParameterChecks);
}
@Override
public Builder setSuppressAllChecks(boolean suppressAllChecks) {
return (Builder) super.setSuppressAllChecks(suppressAllChecks);
}
/**
* Set the {@link CloudIAPRequestInitializer}.
*
* @since 1.12
*/
public Builder setCloudIAPRequestInitializer(
CloudIAPRequestInitializer cloudiapRequestInitializer) {
return (Builder) super.setGoogleClientRequestInitializer(cloudiapRequestInitializer);
}
@Override
public Builder setGoogleClientRequestInitializer(
com.google.api.client.googleapis.services.GoogleClientRequestInitializer googleClientRequestInitializer) {
return (Builder) super.setGoogleClientRequestInitializer(googleClientRequestInitializer);
}
}
}
|
[
"\"GOOGLE_API_USE_MTLS_ENDPOINT\""
] |
[] |
[
"GOOGLE_API_USE_MTLS_ENDPOINT"
] |
[]
|
["GOOGLE_API_USE_MTLS_ENDPOINT"]
|
java
| 1 | 0 | |
tornado_demo/web2py/scripts/sync_languages.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# TODO: Comment this code
import sys
import shutil
import os
from gluon.languages import findT
sys.path.insert(0, '.')
def sync_language(d, data):
''' this function makes sure a translated string will be prefered over an untranslated
string when syncing languages between apps. when both are translated, it prefers the
latter app, as did the original script
'''
for key in data:
# if this string is not in the allready translated data, add it
if key not in d:
d[key] = data[key]
# see if there is a translated string in the original list, but not in the new list
elif (
((d[key] != '') or (d[key] != key)) and
((data[key] == '') or (data[key] == key))
):
d[key] = d[key]
# any other case (wether there is or there isn't a translated string)
else:
d[key] = data[key]
return d
def sync_main(file, apps):
d = {}
for app in apps:
path = 'applications/%s/' % app
findT(path, file)
langfile = open(os.path.join(path, 'languages', '%s.py' % file))
try:
data = eval(langfile.read())
finally:
langfile.close()
d = sync_language(d, data)
path = 'applications/%s/' % apps[-1]
file1 = os.path.join(path, 'languages', '%s.py' % file)
f = open(file1, 'w')
try:
f.write('# coding: utf8\n')
f.write('{\n')
keys = d.keys()
keys.sort()
for key in keys:
f.write("'''%s''':'''%s''',\n" % (key.replace("'", "\\'"), str(d[key].replace("'", "\\'"))))
f.write('}\n')
finally:
f.close()
oapps = reversed(apps[:-1])
for app in oapps:
path2 = 'applications/%s/' % app
file2 = os.path.join(path2, 'languages', '%s.py' % file)
if file1 != file2:
shutil.copyfile(file1, file2)
if __name__ == "__main__":
file = sys.argv[1]
apps = sys.argv[2:]
sync_main(file, apps)
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
pkg/cmd/templates.go
|
package cmd
import (
"fmt"
"os"
"path/filepath"
"strings"
"github.com/spf13/afero"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"golang.org/x/term"
"github.com/stripe/stripe-cli/pkg/ansi"
"github.com/stripe/stripe-cli/pkg/config"
)
//
// Public functions
//
// WrappedInheritedFlagUsages returns a string containing the usage information
// for all flags which were inherited from parent commands, wrapped to the
// terminal's width.
func WrappedInheritedFlagUsages(cmd *cobra.Command) string {
return cmd.InheritedFlags().FlagUsagesWrapped(getTerminalWidth())
}
// WrappedLocalFlagUsages returns a string containing the usage information
// for all flags specifically set in the current command, wrapped to the
// terminal's width.
func WrappedLocalFlagUsages(cmd *cobra.Command) string {
return cmd.LocalFlags().FlagUsagesWrapped(getTerminalWidth())
}
// WrappedRequestParamsFlagUsages returns a string containing the usage
// information for all request parameters flags, i.e. flags used in operation
// commands to set values for request parameters. The string is wrapped to the
// terminal's width.
func WrappedRequestParamsFlagUsages(cmd *cobra.Command) string {
var sb strings.Builder
// We're cheating a little bit in thie method: we're not actually wrapping
// anything, just printing out the flag names and assuming that no name
// will be long enough to go over the terminal's width.
// We do this instead of using pflag's `FlagUsagesWrapped` function because
// we don't want to print the types (all request parameters flags are
// defined as strings in the CLI, but it would be confusing to print that
// out as a lot of them are not strings in the API).
// If/when we do add help strings for request parameters flags, we'll have
// to do actual wrapping.
cmd.LocalFlags().VisitAll(func(flag *pflag.Flag) {
if _, ok := flag.Annotations["request"]; ok {
sb.WriteString(fmt.Sprintf(" --%s\n", flag.Name))
}
})
return sb.String()
}
// WrappedNonRequestParamsFlagUsages returns a string containing the usage
// information for all non-request parameters flags. The string is wrapped to
// the terminal's width.
func WrappedNonRequestParamsFlagUsages(cmd *cobra.Command) string {
nonRequestParamsFlags := pflag.NewFlagSet("request", pflag.ExitOnError)
cmd.LocalFlags().VisitAll(func(flag *pflag.Flag) {
if _, ok := flag.Annotations["request"]; !ok {
nonRequestParamsFlags.AddFlag(flag)
}
})
return nonRequestParamsFlags.FlagUsagesWrapped(getTerminalWidth())
}
//
// Private functions
//
func getLogin(fs *afero.Fs, cfg *config.Config) string {
// We're checking against the path because we don't initialize the config
// at this point of execution.
path := cfg.GetConfigFolder(os.Getenv("XDG_CONFIG_HOME"))
file := filepath.Join(path, "config.toml")
exists, _ := afero.Exists(*fs, file)
if !exists {
return `
Before using the CLI, you'll need to login:
$ stripe login
If you're working on multiple projects, you can run the login command with the
--project-name flag:
$ stripe login --project-name rocket-rides`
}
return ""
}
func getUsageTemplate() string {
return fmt.Sprintf(`%s{{if .Runnable}}
{{.UseLine}}{{end}}{{if .HasAvailableSubCommands}}
{{.CommandPath}} [command]{{end}}{{if gt (len .Aliases) 0}}
%s
{{.NameAndAliases}}{{end}}{{if .HasExample}}
%s
{{.Example}}{{end}}{{if .HasAvailableSubCommands}}{{if .Annotations}}
%s{{range $index, $cmd := .Commands}}{{if (eq (index $.Annotations $cmd.Name) "webhooks")}}
{{rpad $cmd.Name $cmd.NamePadding}} {{$cmd.Short}}{{end}}{{end}}
%s{{range $index, $cmd := .Commands}}{{if (eq (index $.Annotations $cmd.Name) "stripe")}}
{{rpad $cmd.Name $cmd.NamePadding}} {{$cmd.Short}}{{end}}{{end}}
%s
{{rpad "get" 29}} Quickly retrieve resources from Stripe
{{rpad "charges" 29}} Make requests (capture, create, list, etc) on charges
{{rpad "customers" 29}} Make requests (create, delete, list, etc) on customers
{{rpad "payment_intents" 29}} Make requests (cancel, capture, confirm, etc) on payment intents
{{rpad "..." 29}} %s
%s{{range $index, $cmd := .Commands}}{{if (not (or (index $.Annotations $cmd.Name) $cmd.Hidden))}}
{{rpad $cmd.Name $cmd.NamePadding}} {{$cmd.Short}}{{end}}{{end}}{{else}}
%s{{range .Commands}}{{if (or .IsAvailableCommand (eq .Name "help"))}}
{{rpad .Name .NamePadding}} {{.Short}}{{end}}{{end}}{{end}}{{end}}{{if .HasAvailableLocalFlags}}
%s
{{WrappedLocalFlagUsages . | trimTrailingWhitespaces}}{{end}}{{if .HasAvailableInheritedFlags}}
%s
{{WrappedInheritedFlagUsages . | trimTrailingWhitespaces}}{{end}}{{if .HasAvailableSubCommands}}
Use "{{.CommandPath}} [command] --help" for more information about a command.{{end}}
`,
ansi.Bold("Usage:"),
ansi.Bold("Aliases:"),
ansi.Bold("Examples:"),
ansi.Bold("Webhook commands:"),
ansi.Bold("Stripe commands:"),
ansi.Bold("Resource commands:"),
ansi.Italic("To see more resource commands, run `stripe resources help`"),
ansi.Bold("Other commands:"),
ansi.Bold("Available commands:"),
ansi.Bold("Flags:"),
ansi.Bold("Global flags:"),
)
}
func getTerminalWidth() int {
var width int
width, _, err := term.GetSize(0)
if err != nil {
width = 80
}
return width
}
func init() {
cobra.AddTemplateFunc("WrappedInheritedFlagUsages", WrappedInheritedFlagUsages)
cobra.AddTemplateFunc("WrappedLocalFlagUsages", WrappedLocalFlagUsages)
cobra.AddTemplateFunc("WrappedRequestParamsFlagUsages", WrappedRequestParamsFlagUsages)
cobra.AddTemplateFunc("WrappedNonRequestParamsFlagUsages", WrappedNonRequestParamsFlagUsages)
}
|
[
"\"XDG_CONFIG_HOME\""
] |
[] |
[
"XDG_CONFIG_HOME"
] |
[]
|
["XDG_CONFIG_HOME"]
|
go
| 1 | 0 | |
integration/cluster.go
|
// Copyright 2016 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package integration
import (
"context"
"crypto/tls"
"fmt"
"io/ioutil"
"math/rand"
"net"
"net/http"
"net/http/httptest"
"os"
"reflect"
"sort"
"strings"
"sync"
"sync/atomic"
"testing"
"time"
"github.com/coreos/etcd/client"
"github.com/coreos/etcd/clientv3"
"github.com/coreos/etcd/embed"
"github.com/coreos/etcd/etcdserver"
"github.com/coreos/etcd/etcdserver/api/etcdhttp"
"github.com/coreos/etcd/etcdserver/api/v2http"
"github.com/coreos/etcd/etcdserver/api/v3client"
"github.com/coreos/etcd/etcdserver/api/v3election"
epb "github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb"
"github.com/coreos/etcd/etcdserver/api/v3lock"
lockpb "github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb"
"github.com/coreos/etcd/etcdserver/api/v3rpc"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"github.com/coreos/etcd/pkg/testutil"
"github.com/coreos/etcd/pkg/tlsutil"
"github.com/coreos/etcd/pkg/transport"
"github.com/coreos/etcd/pkg/types"
"github.com/coreos/etcd/rafthttp"
"github.com/coreos/pkg/capnslog"
"github.com/soheilhy/cmux"
"google.golang.org/grpc"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/keepalive"
)
const (
// RequestWaitTimeout is the time duration to wait for a request to go through or detect leader loss.
RequestWaitTimeout = 3 * time.Second
tickDuration = 10 * time.Millisecond
requestTimeout = 20 * time.Second
clusterName = "etcd"
basePort = 21000
UrlScheme = "unix"
UrlSchemeTLS = "unixs"
)
var (
electionTicks = 10
// integration test uses unique ports, counting up, to listen for each
// member, ensuring restarted members can listen on the same port again.
localListenCount int64 = 0
testTLSInfo = transport.TLSInfo{
KeyFile: "./fixtures/server.key.insecure",
CertFile: "./fixtures/server.crt",
TrustedCAFile: "./fixtures/ca.crt",
ClientCertAuth: true,
}
testTLSInfoIP = transport.TLSInfo{
KeyFile: "./fixtures/server-ip.key.insecure",
CertFile: "./fixtures/server-ip.crt",
TrustedCAFile: "./fixtures/ca.crt",
ClientCertAuth: true,
}
testTLSInfoExpired = transport.TLSInfo{
KeyFile: "./fixtures-expired/server.key.insecure",
CertFile: "./fixtures-expired/server.crt",
TrustedCAFile: "./fixtures-expired/ca.crt",
ClientCertAuth: true,
}
testTLSInfoExpiredIP = transport.TLSInfo{
KeyFile: "./fixtures-expired/server-ip.key.insecure",
CertFile: "./fixtures-expired/server-ip.crt",
TrustedCAFile: "./fixtures-expired/ca.crt",
ClientCertAuth: true,
}
plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "integration")
)
type ClusterConfig struct {
Size int
PeerTLS *transport.TLSInfo
ClientTLS *transport.TLSInfo
DiscoveryURL string
UseGRPC bool
QuotaBackendBytes int64
MaxTxnOps uint
MaxRequestBytes uint
GRPCKeepAliveMinTime time.Duration
GRPCKeepAliveInterval time.Duration
GRPCKeepAliveTimeout time.Duration
// SkipCreatingClient to skip creating clients for each member.
SkipCreatingClient bool
ClientMaxCallSendMsgSize int
ClientMaxCallRecvMsgSize int
// UseIP is true to use only IP for gRPC requests.
UseIP bool
}
type cluster struct {
cfg *ClusterConfig
Members []*member
}
func schemeFromTLSInfo(tls *transport.TLSInfo) string {
if tls == nil {
return UrlScheme
}
return UrlSchemeTLS
}
func (c *cluster) fillClusterForMembers() error {
if c.cfg.DiscoveryURL != "" {
// cluster will be discovered
return nil
}
addrs := make([]string, 0)
for _, m := range c.Members {
scheme := schemeFromTLSInfo(m.PeerTLSInfo)
for _, l := range m.PeerListeners {
addrs = append(addrs, fmt.Sprintf("%s=%s://%s", m.Name, scheme, l.Addr().String()))
}
}
clusterStr := strings.Join(addrs, ",")
var err error
for _, m := range c.Members {
m.InitialPeerURLsMap, err = types.NewURLsMap(clusterStr)
if err != nil {
return err
}
}
return nil
}
func newCluster(t *testing.T, cfg *ClusterConfig) *cluster {
c := &cluster{cfg: cfg}
ms := make([]*member, cfg.Size)
for i := 0; i < cfg.Size; i++ {
ms[i] = c.mustNewMember(t)
}
c.Members = ms
if err := c.fillClusterForMembers(); err != nil {
t.Fatal(err)
}
return c
}
// NewCluster returns an unlaunched cluster of the given size which has been
// set to use static bootstrap.
func NewCluster(t *testing.T, size int) *cluster {
return newCluster(t, &ClusterConfig{Size: size})
}
// NewClusterByConfig returns an unlaunched cluster defined by a cluster configuration
func NewClusterByConfig(t *testing.T, cfg *ClusterConfig) *cluster {
return newCluster(t, cfg)
}
func (c *cluster) Launch(t *testing.T) {
errc := make(chan error)
for _, m := range c.Members {
// Members are launched in separate goroutines because if they boot
// using discovery url, they have to wait for others to register to continue.
go func(m *member) {
errc <- m.Launch()
}(m)
}
for range c.Members {
if err := <-errc; err != nil {
t.Fatalf("error setting up member: %v", err)
}
}
// wait cluster to be stable to receive future client requests
c.waitMembersMatch(t, c.HTTPMembers())
c.waitVersion()
}
func (c *cluster) URL(i int) string {
return c.Members[i].ClientURLs[0].String()
}
// URLs returns a list of all active client URLs in the cluster
func (c *cluster) URLs() []string {
return getMembersURLs(c.Members)
}
func getMembersURLs(members []*member) []string {
urls := make([]string, 0)
for _, m := range members {
select {
case <-m.s.StopNotify():
continue
default:
}
for _, u := range m.ClientURLs {
urls = append(urls, u.String())
}
}
return urls
}
// HTTPMembers returns a list of all active members as client.Members
func (c *cluster) HTTPMembers() []client.Member {
ms := []client.Member{}
for _, m := range c.Members {
pScheme := schemeFromTLSInfo(m.PeerTLSInfo)
cScheme := schemeFromTLSInfo(m.ClientTLSInfo)
cm := client.Member{Name: m.Name}
for _, ln := range m.PeerListeners {
cm.PeerURLs = append(cm.PeerURLs, pScheme+"://"+ln.Addr().String())
}
for _, ln := range m.ClientListeners {
cm.ClientURLs = append(cm.ClientURLs, cScheme+"://"+ln.Addr().String())
}
ms = append(ms, cm)
}
return ms
}
func (c *cluster) mustNewMember(t *testing.T) *member {
m := mustNewMember(t,
memberConfig{
name: c.name(rand.Int()),
peerTLS: c.cfg.PeerTLS,
clientTLS: c.cfg.ClientTLS,
quotaBackendBytes: c.cfg.QuotaBackendBytes,
maxTxnOps: c.cfg.MaxTxnOps,
maxRequestBytes: c.cfg.MaxRequestBytes,
grpcKeepAliveMinTime: c.cfg.GRPCKeepAliveMinTime,
grpcKeepAliveInterval: c.cfg.GRPCKeepAliveInterval,
grpcKeepAliveTimeout: c.cfg.GRPCKeepAliveTimeout,
clientMaxCallSendMsgSize: c.cfg.ClientMaxCallSendMsgSize,
clientMaxCallRecvMsgSize: c.cfg.ClientMaxCallRecvMsgSize,
useIP: c.cfg.UseIP,
})
m.DiscoveryURL = c.cfg.DiscoveryURL
if c.cfg.UseGRPC {
if err := m.listenGRPC(); err != nil {
t.Fatal(err)
}
}
return m
}
func (c *cluster) addMember(t *testing.T) {
m := c.mustNewMember(t)
scheme := schemeFromTLSInfo(c.cfg.PeerTLS)
// send add request to the cluster
var err error
for i := 0; i < len(c.Members); i++ {
clientURL := c.URL(i)
peerURL := scheme + "://" + m.PeerListeners[0].Addr().String()
if err = c.addMemberByURL(t, clientURL, peerURL); err == nil {
break
}
}
if err != nil {
t.Fatalf("add member failed on all members error: %v", err)
}
m.InitialPeerURLsMap = types.URLsMap{}
for _, mm := range c.Members {
m.InitialPeerURLsMap[mm.Name] = mm.PeerURLs
}
m.InitialPeerURLsMap[m.Name] = m.PeerURLs
m.NewCluster = false
if err := m.Launch(); err != nil {
t.Fatal(err)
}
c.Members = append(c.Members, m)
// wait cluster to be stable to receive future client requests
c.waitMembersMatch(t, c.HTTPMembers())
}
func (c *cluster) addMemberByURL(t *testing.T, clientURL, peerURL string) error {
cc := MustNewHTTPClient(t, []string{clientURL}, c.cfg.ClientTLS)
ma := client.NewMembersAPI(cc)
ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
_, err := ma.Add(ctx, peerURL)
cancel()
if err != nil {
return err
}
// wait for the add node entry applied in the cluster
members := append(c.HTTPMembers(), client.Member{PeerURLs: []string{peerURL}, ClientURLs: []string{}})
c.waitMembersMatch(t, members)
return nil
}
func (c *cluster) AddMember(t *testing.T) {
c.addMember(t)
}
func (c *cluster) RemoveMember(t *testing.T, id uint64) {
if err := c.removeMember(t, id); err != nil {
t.Fatal(err)
}
}
func (c *cluster) removeMember(t *testing.T, id uint64) error {
// send remove request to the cluster
cc := MustNewHTTPClient(t, c.URLs(), c.cfg.ClientTLS)
ma := client.NewMembersAPI(cc)
ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
err := ma.Remove(ctx, types.ID(id).String())
cancel()
if err != nil {
return err
}
newMembers := make([]*member, 0)
for _, m := range c.Members {
if uint64(m.s.ID()) != id {
newMembers = append(newMembers, m)
} else {
select {
case <-m.s.StopNotify():
m.Terminate(t)
// 1s stop delay + election timeout + 1s disk and network delay + connection write timeout
// TODO: remove connection write timeout by selecting on http response closeNotifier
// blocking on https://github.com/golang/go/issues/9524
case <-time.After(time.Second + time.Duration(electionTicks)*tickDuration + time.Second + rafthttp.ConnWriteTimeout):
t.Fatalf("failed to remove member %s in time", m.s.ID())
}
}
}
c.Members = newMembers
c.waitMembersMatch(t, c.HTTPMembers())
return nil
}
func (c *cluster) Terminate(t *testing.T) {
var wg sync.WaitGroup
wg.Add(len(c.Members))
for _, m := range c.Members {
go func(mm *member) {
defer wg.Done()
mm.Terminate(t)
}(m)
}
wg.Wait()
}
func (c *cluster) waitMembersMatch(t *testing.T, membs []client.Member) {
for _, u := range c.URLs() {
cc := MustNewHTTPClient(t, []string{u}, c.cfg.ClientTLS)
ma := client.NewMembersAPI(cc)
for {
ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
ms, err := ma.List(ctx)
cancel()
if err == nil && isMembersEqual(ms, membs) {
break
}
time.Sleep(tickDuration)
}
}
}
func (c *cluster) WaitLeader(t *testing.T) int { return c.waitLeader(t, c.Members) }
// waitLeader waits until given members agree on the same leader.
func (c *cluster) waitLeader(t *testing.T, membs []*member) int {
possibleLead := make(map[uint64]bool)
var lead uint64
for _, m := range membs {
possibleLead[uint64(m.s.ID())] = true
}
cc := MustNewHTTPClient(t, getMembersURLs(membs), nil)
kapi := client.NewKeysAPI(cc)
// ensure leader is up via linearizable get
for {
ctx, cancel := context.WithTimeout(context.Background(), 10*tickDuration+time.Second)
_, err := kapi.Get(ctx, "0", &client.GetOptions{Quorum: true})
cancel()
if err == nil || strings.Contains(err.Error(), "Key not found") {
break
}
}
for lead == 0 || !possibleLead[lead] {
lead = 0
for _, m := range membs {
select {
case <-m.s.StopNotify():
continue
default:
}
if lead != 0 && lead != m.s.Lead() {
lead = 0
time.Sleep(10 * tickDuration)
break
}
lead = m.s.Lead()
}
}
for i, m := range membs {
if uint64(m.s.ID()) == lead {
return i
}
}
return -1
}
func (c *cluster) WaitNoLeader(t *testing.T) { c.waitNoLeader(t, c.Members) }
// waitNoLeader waits until given members lose leader.
func (c *cluster) waitNoLeader(t *testing.T, membs []*member) {
noLeader := false
for !noLeader {
noLeader = true
for _, m := range membs {
select {
case <-m.s.StopNotify():
continue
default:
}
if m.s.Lead() != 0 {
noLeader = false
time.Sleep(10 * tickDuration)
break
}
}
}
}
func (c *cluster) waitVersion() {
for _, m := range c.Members {
for {
if m.s.ClusterVersion() != nil {
break
}
time.Sleep(tickDuration)
}
}
}
func (c *cluster) name(i int) string {
return fmt.Sprint(i)
}
// isMembersEqual checks whether two members equal except ID field.
// The given wmembs should always set ID field to empty string.
func isMembersEqual(membs []client.Member, wmembs []client.Member) bool {
sort.Sort(SortableMemberSliceByPeerURLs(membs))
sort.Sort(SortableMemberSliceByPeerURLs(wmembs))
for i := range membs {
membs[i].ID = ""
}
return reflect.DeepEqual(membs, wmembs)
}
func newLocalListener(t *testing.T) net.Listener {
c := atomic.AddInt64(&localListenCount, 1)
// Go 1.8+ allows only numbers in port
addr := fmt.Sprintf("127.0.0.1:%05d%05d", c+basePort, os.Getpid())
return NewListenerWithAddr(t, addr)
}
func NewListenerWithAddr(t *testing.T, addr string) net.Listener {
l, err := transport.NewUnixListener(addr)
if err != nil {
t.Fatal(err)
}
return l
}
type member struct {
etcdserver.ServerConfig
PeerListeners, ClientListeners []net.Listener
grpcListener net.Listener
// PeerTLSInfo enables peer TLS when set
PeerTLSInfo *transport.TLSInfo
// ClientTLSInfo enables client TLS when set
ClientTLSInfo *transport.TLSInfo
raftHandler *testutil.PauseableHandler
s *etcdserver.EtcdServer
serverClosers []func()
grpcServerOpts []grpc.ServerOption
grpcServer *grpc.Server
grpcServerPeer *grpc.Server
grpcAddr string
grpcBridge *bridge
// serverClient is a clientv3 that directly calls the etcdserver.
serverClient *clientv3.Client
keepDataDirTerminate bool
clientMaxCallSendMsgSize int
clientMaxCallRecvMsgSize int
useIP bool
}
func (m *member) GRPCAddr() string { return m.grpcAddr }
type memberConfig struct {
name string
peerTLS *transport.TLSInfo
clientTLS *transport.TLSInfo
quotaBackendBytes int64
maxTxnOps uint
maxRequestBytes uint
grpcKeepAliveMinTime time.Duration
grpcKeepAliveInterval time.Duration
grpcKeepAliveTimeout time.Duration
clientMaxCallSendMsgSize int
clientMaxCallRecvMsgSize int
useIP bool
}
// mustNewMember return an inited member with the given name. If peerTLS is
// set, it will use https scheme to communicate between peers.
func mustNewMember(t *testing.T, mcfg memberConfig) *member {
var err error
m := &member{}
peerScheme := schemeFromTLSInfo(mcfg.peerTLS)
clientScheme := schemeFromTLSInfo(mcfg.clientTLS)
pln := newLocalListener(t)
m.PeerListeners = []net.Listener{pln}
m.PeerURLs, err = types.NewURLs([]string{peerScheme + "://" + pln.Addr().String()})
if err != nil {
t.Fatal(err)
}
m.PeerTLSInfo = mcfg.peerTLS
cln := newLocalListener(t)
m.ClientListeners = []net.Listener{cln}
m.ClientURLs, err = types.NewURLs([]string{clientScheme + "://" + cln.Addr().String()})
if err != nil {
t.Fatal(err)
}
m.ClientTLSInfo = mcfg.clientTLS
m.Name = mcfg.name
m.DataDir, err = ioutil.TempDir(os.TempDir(), "etcd")
if err != nil {
t.Fatal(err)
}
clusterStr := fmt.Sprintf("%s=%s://%s", mcfg.name, peerScheme, pln.Addr().String())
m.InitialPeerURLsMap, err = types.NewURLsMap(clusterStr)
if err != nil {
t.Fatal(err)
}
m.InitialClusterToken = clusterName
m.NewCluster = true
m.BootstrapTimeout = 10 * time.Millisecond
if m.PeerTLSInfo != nil {
m.ServerConfig.PeerTLSInfo = *m.PeerTLSInfo
}
m.ElectionTicks = electionTicks
m.InitialElectionTickAdvance = true
m.TickMs = uint(tickDuration / time.Millisecond)
m.QuotaBackendBytes = mcfg.quotaBackendBytes
m.MaxTxnOps = mcfg.maxTxnOps
if m.MaxTxnOps == 0 {
m.MaxTxnOps = embed.DefaultMaxTxnOps
}
m.MaxRequestBytes = mcfg.maxRequestBytes
if m.MaxRequestBytes == 0 {
m.MaxRequestBytes = embed.DefaultMaxRequestBytes
}
m.AuthToken = "simple" // for the purpose of integration testing, simple token is enough
m.grpcServerOpts = []grpc.ServerOption{}
if mcfg.grpcKeepAliveMinTime > time.Duration(0) {
m.grpcServerOpts = append(m.grpcServerOpts, grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{
MinTime: mcfg.grpcKeepAliveMinTime,
PermitWithoutStream: false,
}))
}
if mcfg.grpcKeepAliveInterval > time.Duration(0) &&
mcfg.grpcKeepAliveTimeout > time.Duration(0) {
m.grpcServerOpts = append(m.grpcServerOpts, grpc.KeepaliveParams(keepalive.ServerParameters{
Time: mcfg.grpcKeepAliveInterval,
Timeout: mcfg.grpcKeepAliveTimeout,
}))
}
m.clientMaxCallSendMsgSize = mcfg.clientMaxCallSendMsgSize
m.clientMaxCallRecvMsgSize = mcfg.clientMaxCallRecvMsgSize
m.useIP = mcfg.useIP
m.InitialCorruptCheck = true
return m
}
// listenGRPC starts a grpc server over a unix domain socket on the member
func (m *member) listenGRPC() error {
// prefix with localhost so cert has right domain
m.grpcAddr = "localhost:" + m.Name
if m.useIP { // for IP-only sTLS certs
m.grpcAddr = "127.0.0.1:" + m.Name
}
l, err := transport.NewUnixListener(m.grpcAddr)
if err != nil {
return fmt.Errorf("listen failed on grpc socket %s (%v)", m.grpcAddr, err)
}
m.grpcBridge, err = newBridge(m.grpcAddr)
if err != nil {
l.Close()
return err
}
m.grpcAddr = schemeFromTLSInfo(m.ClientTLSInfo) + "://" + m.grpcBridge.inaddr
m.grpcListener = l
return nil
}
func (m *member) ElectionTimeout() time.Duration {
return time.Duration(m.s.Cfg.ElectionTicks*int(m.s.Cfg.TickMs)) * time.Millisecond
}
func (m *member) ID() types.ID { return m.s.ID() }
func (m *member) DropConnections() { m.grpcBridge.Reset() }
func (m *member) PauseConnections() { m.grpcBridge.Pause() }
func (m *member) UnpauseConnections() { m.grpcBridge.Unpause() }
func (m *member) Blackhole() { m.grpcBridge.Blackhole() }
func (m *member) Unblackhole() { m.grpcBridge.Unblackhole() }
// NewClientV3 creates a new grpc client connection to the member
func NewClientV3(m *member) (*clientv3.Client, error) {
if m.grpcAddr == "" {
return nil, fmt.Errorf("member not configured for grpc")
}
cfg := clientv3.Config{
Endpoints: []string{m.grpcAddr},
DialTimeout: 5 * time.Second,
MaxCallSendMsgSize: m.clientMaxCallSendMsgSize,
MaxCallRecvMsgSize: m.clientMaxCallRecvMsgSize,
}
if m.ClientTLSInfo != nil {
tls, err := m.ClientTLSInfo.ClientConfig()
if err != nil {
return nil, err
}
cfg.TLS = tls
}
return newClientV3(cfg)
}
// Clone returns a member with the same server configuration. The returned
// member will not set PeerListeners and ClientListeners.
func (m *member) Clone(t *testing.T) *member {
mm := &member{}
mm.ServerConfig = m.ServerConfig
var err error
clientURLStrs := m.ClientURLs.StringSlice()
mm.ClientURLs, err = types.NewURLs(clientURLStrs)
if err != nil {
// this should never fail
panic(err)
}
peerURLStrs := m.PeerURLs.StringSlice()
mm.PeerURLs, err = types.NewURLs(peerURLStrs)
if err != nil {
// this should never fail
panic(err)
}
clusterStr := m.InitialPeerURLsMap.String()
mm.InitialPeerURLsMap, err = types.NewURLsMap(clusterStr)
if err != nil {
// this should never fail
panic(err)
}
mm.InitialClusterToken = m.InitialClusterToken
mm.ElectionTicks = m.ElectionTicks
mm.PeerTLSInfo = m.PeerTLSInfo
mm.ClientTLSInfo = m.ClientTLSInfo
return mm
}
// Launch starts a member based on ServerConfig, PeerListeners
// and ClientListeners.
func (m *member) Launch() error {
plog.Printf("launching %s (%s)", m.Name, m.grpcAddr)
var err error
if m.s, err = etcdserver.NewServer(m.ServerConfig); err != nil {
return fmt.Errorf("failed to initialize the etcd server: %v", err)
}
m.s.SyncTicker = time.NewTicker(500 * time.Millisecond)
m.s.Start()
var peerTLScfg *tls.Config
if m.PeerTLSInfo != nil && !m.PeerTLSInfo.Empty() {
if peerTLScfg, err = m.PeerTLSInfo.ServerConfig(); err != nil {
return err
}
}
if m.grpcListener != nil {
var (
tlscfg *tls.Config
)
if m.ClientTLSInfo != nil && !m.ClientTLSInfo.Empty() {
tlscfg, err = m.ClientTLSInfo.ServerConfig()
if err != nil {
return err
}
}
m.grpcServer = v3rpc.Server(m.s, tlscfg, m.grpcServerOpts...)
m.grpcServerPeer = v3rpc.Server(m.s, peerTLScfg)
m.serverClient = v3client.New(m.s)
lockpb.RegisterLockServer(m.grpcServer, v3lock.NewLockServer(m.serverClient))
epb.RegisterElectionServer(m.grpcServer, v3election.NewElectionServer(m.serverClient))
go m.grpcServer.Serve(m.grpcListener)
}
m.raftHandler = &testutil.PauseableHandler{Next: etcdhttp.NewPeerHandler(m.s)}
h := (http.Handler)(m.raftHandler)
if m.grpcListener != nil {
h = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.ProtoMajor == 2 && strings.Contains(r.Header.Get("Content-Type"), "application/grpc") {
m.grpcServerPeer.ServeHTTP(w, r)
} else {
m.raftHandler.ServeHTTP(w, r)
}
})
}
for _, ln := range m.PeerListeners {
cm := cmux.New(ln)
// don't hang on matcher after closing listener
cm.SetReadTimeout(time.Second)
if m.grpcServer != nil {
grpcl := cm.Match(cmux.HTTP2())
go m.grpcServerPeer.Serve(grpcl)
}
// serve http1/http2 rafthttp/grpc
ll := cm.Match(cmux.Any())
if peerTLScfg != nil {
if ll, err = transport.NewTLSListener(ll, m.PeerTLSInfo); err != nil {
return err
}
}
hs := &httptest.Server{
Listener: ll,
Config: &http.Server{Handler: h, TLSConfig: peerTLScfg},
TLS: peerTLScfg,
}
hs.Start()
donec := make(chan struct{})
go func() {
defer close(donec)
cm.Serve()
}()
closer := func() {
ll.Close()
hs.CloseClientConnections()
hs.Close()
<-donec
}
m.serverClosers = append(m.serverClosers, closer)
}
for _, ln := range m.ClientListeners {
hs := &httptest.Server{
Listener: ln,
Config: &http.Server{Handler: v2http.NewClientHandler(m.s, m.ServerConfig.ReqTimeout())},
}
if m.ClientTLSInfo == nil {
hs.Start()
} else {
info := m.ClientTLSInfo
hs.TLS, err = info.ServerConfig()
if err != nil {
return err
}
// baseConfig is called on initial TLS handshake start.
//
// Previously,
// 1. Server has non-empty (*tls.Config).Certificates on client hello
// 2. Server calls (*tls.Config).GetCertificate iff:
// - Server's (*tls.Config).Certificates is not empty, or
// - Client supplies SNI; non-empty (*tls.ClientHelloInfo).ServerName
//
// When (*tls.Config).Certificates is always populated on initial handshake,
// client is expected to provide a valid matching SNI to pass the TLS
// verification, thus trigger server (*tls.Config).GetCertificate to reload
// TLS assets. However, a cert whose SAN field does not include domain names
// but only IP addresses, has empty (*tls.ClientHelloInfo).ServerName, thus
// it was never able to trigger TLS reload on initial handshake; first
// ceritifcate object was being used, never being updated.
//
// Now, (*tls.Config).Certificates is created empty on initial TLS client
// handshake, in order to trigger (*tls.Config).GetCertificate and populate
// rest of the certificates on every new TLS connection, even when client
// SNI is empty (e.g. cert only includes IPs).
//
// This introduces another problem with "httptest.Server":
// when server initial certificates are empty, certificates
// are overwritten by Go's internal test certs, which have
// different SAN fields (e.g. example.com). To work around,
// re-overwrite (*tls.Config).Certificates before starting
// test server.
tlsCert, err := tlsutil.NewCert(info.CertFile, info.KeyFile, nil)
if err != nil {
return err
}
hs.TLS.Certificates = []tls.Certificate{*tlsCert}
hs.StartTLS()
}
closer := func() {
ln.Close()
hs.CloseClientConnections()
hs.Close()
}
m.serverClosers = append(m.serverClosers, closer)
}
plog.Printf("launched %s (%s)", m.Name, m.grpcAddr)
return nil
}
func (m *member) WaitOK(t *testing.T) {
cc := MustNewHTTPClient(t, []string{m.URL()}, m.ClientTLSInfo)
kapi := client.NewKeysAPI(cc)
for {
ctx, cancel := context.WithTimeout(context.Background(), requestTimeout)
_, err := kapi.Get(ctx, "/", nil)
if err != nil {
time.Sleep(tickDuration)
continue
}
cancel()
break
}
for m.s.Leader() == 0 {
time.Sleep(tickDuration)
}
}
func (m *member) URL() string { return m.ClientURLs[0].String() }
func (m *member) Pause() {
m.raftHandler.Pause()
m.s.PauseSending()
}
func (m *member) Resume() {
m.raftHandler.Resume()
m.s.ResumeSending()
}
// Close stops the member's etcdserver and closes its connections
func (m *member) Close() {
if m.grpcBridge != nil {
m.grpcBridge.Close()
m.grpcBridge = nil
}
if m.serverClient != nil {
m.serverClient.Close()
m.serverClient = nil
}
if m.grpcServer != nil {
m.grpcServer.Stop()
m.grpcServer.GracefulStop()
m.grpcServer = nil
m.grpcServerPeer.Stop()
m.grpcServerPeer.GracefulStop()
m.grpcServerPeer = nil
}
m.s.HardStop()
for _, f := range m.serverClosers {
f()
}
}
// Stop stops the member, but the data dir of the member is preserved.
func (m *member) Stop(t *testing.T) {
plog.Printf("stopping %s (%s)", m.Name, m.grpcAddr)
m.Close()
m.serverClosers = nil
plog.Printf("stopped %s (%s)", m.Name, m.grpcAddr)
}
// checkLeaderTransition waits for leader transition, returning the new leader ID.
func checkLeaderTransition(t *testing.T, m *member, oldLead uint64) uint64 {
interval := time.Duration(m.s.Cfg.TickMs) * time.Millisecond
for m.s.Lead() == 0 || (m.s.Lead() == oldLead) {
time.Sleep(interval)
}
return m.s.Lead()
}
// StopNotify unblocks when a member stop completes
func (m *member) StopNotify() <-chan struct{} {
return m.s.StopNotify()
}
// Restart starts the member using the preserved data dir.
func (m *member) Restart(t *testing.T) error {
plog.Printf("restarting %s (%s)", m.Name, m.grpcAddr)
newPeerListeners := make([]net.Listener, 0)
for _, ln := range m.PeerListeners {
newPeerListeners = append(newPeerListeners, NewListenerWithAddr(t, ln.Addr().String()))
}
m.PeerListeners = newPeerListeners
newClientListeners := make([]net.Listener, 0)
for _, ln := range m.ClientListeners {
newClientListeners = append(newClientListeners, NewListenerWithAddr(t, ln.Addr().String()))
}
m.ClientListeners = newClientListeners
if m.grpcListener != nil {
if err := m.listenGRPC(); err != nil {
t.Fatal(err)
}
}
err := m.Launch()
plog.Printf("restarted %s (%s)", m.Name, m.grpcAddr)
return err
}
// Terminate stops the member and removes the data dir.
func (m *member) Terminate(t *testing.T) {
plog.Printf("terminating %s (%s)", m.Name, m.grpcAddr)
m.Close()
if !m.keepDataDirTerminate {
if err := os.RemoveAll(m.ServerConfig.DataDir); err != nil {
t.Fatal(err)
}
}
plog.Printf("terminated %s (%s)", m.Name, m.grpcAddr)
}
// Metric gets the metric value for a member
func (m *member) Metric(metricName string) (string, error) {
cfgtls := transport.TLSInfo{}
tr, err := transport.NewTimeoutTransport(cfgtls, time.Second, time.Second, time.Second)
if err != nil {
return "", err
}
cli := &http.Client{Transport: tr}
resp, err := cli.Get(m.ClientURLs[0].String() + "/metrics")
if err != nil {
return "", err
}
defer resp.Body.Close()
b, rerr := ioutil.ReadAll(resp.Body)
if rerr != nil {
return "", rerr
}
lines := strings.Split(string(b), "\n")
for _, l := range lines {
if strings.HasPrefix(l, metricName) {
return strings.Split(l, " ")[1], nil
}
}
return "", nil
}
// InjectPartition drops connections from m to others, vice versa.
func (m *member) InjectPartition(t *testing.T, others ...*member) {
for _, other := range others {
m.s.CutPeer(other.s.ID())
other.s.CutPeer(m.s.ID())
}
}
// RecoverPartition recovers connections from m to others, vice versa.
func (m *member) RecoverPartition(t *testing.T, others ...*member) {
for _, other := range others {
m.s.MendPeer(other.s.ID())
other.s.MendPeer(m.s.ID())
}
}
func MustNewHTTPClient(t *testing.T, eps []string, tls *transport.TLSInfo) client.Client {
cfgtls := transport.TLSInfo{}
if tls != nil {
cfgtls = *tls
}
cfg := client.Config{Transport: mustNewTransport(t, cfgtls), Endpoints: eps}
c, err := client.New(cfg)
if err != nil {
t.Fatal(err)
}
return c
}
func mustNewTransport(t *testing.T, tlsInfo transport.TLSInfo) *http.Transport {
// tick in integration test is short, so 1s dial timeout could play well.
tr, err := transport.NewTimeoutTransport(tlsInfo, time.Second, rafthttp.ConnReadTimeout, rafthttp.ConnWriteTimeout)
if err != nil {
t.Fatal(err)
}
return tr
}
type SortableMemberSliceByPeerURLs []client.Member
func (p SortableMemberSliceByPeerURLs) Len() int { return len(p) }
func (p SortableMemberSliceByPeerURLs) Less(i, j int) bool {
return p[i].PeerURLs[0] < p[j].PeerURLs[0]
}
func (p SortableMemberSliceByPeerURLs) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
type ClusterV3 struct {
*cluster
mu sync.Mutex
clients []*clientv3.Client
}
// NewClusterV3 returns a launched cluster with a grpc client connection
// for each cluster member.
func NewClusterV3(t *testing.T, cfg *ClusterConfig) *ClusterV3 {
cfg.UseGRPC = true
if os.Getenv("CLIENT_DEBUG") != "" {
clientv3.SetLogger(grpclog.NewLoggerV2WithVerbosity(os.Stderr, os.Stderr, os.Stderr, 4))
}
clus := &ClusterV3{
cluster: NewClusterByConfig(t, cfg),
}
clus.Launch(t)
if !cfg.SkipCreatingClient {
for _, m := range clus.Members {
client, err := NewClientV3(m)
if err != nil {
t.Fatalf("cannot create client: %v", err)
}
clus.clients = append(clus.clients, client)
}
}
return clus
}
func (c *ClusterV3) TakeClient(idx int) {
c.mu.Lock()
c.clients[idx] = nil
c.mu.Unlock()
}
func (c *ClusterV3) Terminate(t *testing.T) {
c.mu.Lock()
for _, client := range c.clients {
if client == nil {
continue
}
if err := client.Close(); err != nil {
t.Error(err)
}
}
c.mu.Unlock()
c.cluster.Terminate(t)
}
func (c *ClusterV3) RandClient() *clientv3.Client {
return c.clients[rand.Intn(len(c.clients))]
}
func (c *ClusterV3) Client(i int) *clientv3.Client {
return c.clients[i]
}
type grpcAPI struct {
// Cluster is the cluster API for the client's connection.
Cluster pb.ClusterClient
// KV is the keyvalue API for the client's connection.
KV pb.KVClient
// Lease is the lease API for the client's connection.
Lease pb.LeaseClient
// Watch is the watch API for the client's connection.
Watch pb.WatchClient
// Maintenance is the maintenance API for the client's connection.
Maintenance pb.MaintenanceClient
// Auth is the authentication API for the client's connection.
Auth pb.AuthClient
// Lock is the lock API for the client's connection.
Lock lockpb.LockClient
// Election is the election API for the client's connection.
Election epb.ElectionClient
}
|
[
"\"CLIENT_DEBUG\""
] |
[] |
[
"CLIENT_DEBUG"
] |
[]
|
["CLIENT_DEBUG"]
|
go
| 1 | 0 | |
haiconf/osutils/systemcommand_test.go
|
// Copyright 2013 Jérôme Renard. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package osutils
import (
. "launchpad.net/gocheck"
"os"
)
type SystemCommandTestSuite struct{}
var _ = Suite(&SystemCommandTestSuite{})
func (s *SystemCommandTestSuite) TestBuildEnvVars(c *C) {
sc := &SystemCommand{
EnvVars: map[string]string{
"FOO": "bar",
"BAR": "baz",
},
}
obtained := sc.buildEnvVars()
expected := []string{
"FOO=bar",
"BAR=baz",
"PATH=" + os.Getenv("PATH"),
}
c.Assert(obtained, DeepEquals, expected)
}
func (s *SystemCommandTestSuite) TestBuildCmd_ShellExpansionDisabled(c *C) {
path := "/foo/bar"
args := []string{"a", "b"}
sc := &SystemCommand{
Path: path,
Args: args,
EnableShellExpansion: false,
}
cmd := sc.buildCmd()
c.Assert(cmd.Path, Equals, path)
c.Assert(cmd.Args, DeepEquals, args)
}
func (s *SystemCommandTestSuite) TestBuildCmd_ShellExpansionEnabled(c *C) {
path := "/foo/bar"
args := []string{"a", "b"}
sc := &SystemCommand{
Path: path,
Args: args,
EnableShellExpansion: true,
}
cmd := sc.buildCmd()
c.Assert(cmd.Path, Equals, "/bin/sh")
c.Assert(cmd.Args, DeepEquals, []string{"sh", "-c", "/foo/bar a b"})
}
func (s *SystemCommandTestSuite) TestRun_CommandFailedWrongPath(c *C) {
sc := &SystemCommand{
Path: "/path/to/inexistant/command",
Args: []string{"a", "b"},
EnableShellExpansion: false,
}
err := sc.Run()
c.Assert(err, NotNil)
expected := `Error with command "/path/to/inexistant/command a b". Error message was "fork/exec /path/to/inexistant/command: no such file or directory".`
c.Assert(err.Error(), Equals, expected)
}
func (s *SystemCommandTestSuite) TestRun_CommandFailed(c *C) {
sc := &SystemCommand{
Path: "/usr/bin/tr",
Args: []string{"--xxx"},
EnableShellExpansion: false,
}
output := sc.Run()
s.assertOutputNotNil(output, c)
}
func (s *SystemCommandTestSuite) TestRun_CommandSuccess(c *C) {
sc := &SystemCommand{
Path: "/bin/hostname",
EnableShellExpansion: false,
}
output := sc.Run()
s.assertOutputIsNil(output, c)
}
func (s *SystemCommandTestSuite) assertOutputNotNil(o SystemCommandOutput, c *C) {
c.Assert(o.HasError(), Equals, true)
c.Assert(len(o.ExitMessage), Not(Equals), 0)
c.Assert(len(o.Stdout), Equals, 0)
c.Assert(len(o.Stderr), Not(Equals), 0)
}
func (s *SystemCommandTestSuite) assertOutputIsNil(o SystemCommandOutput, c *C) {
c.Assert(o.HasError(), Equals, false)
c.Assert(len(o.ExitMessage), Equals, 0)
c.Assert(len(o.Stdout), Not(Equals), 0)
c.Assert(len(o.Stderr), Equals, 0)
}
|
[
"\"PATH\""
] |
[] |
[
"PATH"
] |
[]
|
["PATH"]
|
go
| 1 | 0 | |
utils/utils.go
|
package utils
import (
"bytes"
"crypto/rand"
"crypto/sha1"
"crypto/sha256"
"encoding/hex"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"os/exec"
"path/filepath"
"runtime"
"strconv"
"strings"
"sync"
"syscall"
"time"
"github.com/docker/docker/dockerversion"
)
type KeyValuePair struct {
Key string
Value string
}
// A common interface to access the Fatal method of
// both testing.B and testing.T.
type Fataler interface {
Fatal(args ...interface{})
}
// Go is a basic promise implementation: it wraps calls a function in a goroutine,
// and returns a channel which will later return the function's return value.
func Go(f func() error) chan error {
ch := make(chan error, 1)
go func() {
ch <- f()
}()
return ch
}
// Request a given URL and return an io.Reader
func Download(url string) (resp *http.Response, err error) {
if resp, err = http.Get(url); err != nil {
return nil, err
}
if resp.StatusCode >= 400 {
return nil, fmt.Errorf("Got HTTP status code >= 400: %s", resp.Status)
}
return resp, nil
}
func logf(level string, format string, a ...interface{}) {
// Retrieve the stack infos
_, file, line, ok := runtime.Caller(2)
if !ok {
file = "<unknown>"
line = -1
} else {
file = file[strings.LastIndex(file, "/")+1:]
}
fmt.Fprintf(os.Stderr, fmt.Sprintf("[%s] %s:%d %s\n", level, file, line, format), a...)
}
// Debug function, if the debug flag is set, then display. Do nothing otherwise
// If Docker is in damon mode, also send the debug info on the socket
func Debugf(format string, a ...interface{}) {
if os.Getenv("DEBUG") != "" {
logf("debug", format, a...)
}
}
func Errorf(format string, a ...interface{}) {
logf("error", format, a...)
}
func Trunc(s string, maxlen int) string {
if len(s) <= maxlen {
return s
}
return s[:maxlen]
}
// Figure out the absolute path of our own binary (if it's still around).
func SelfPath() string {
path, err := exec.LookPath(os.Args[0])
if err != nil {
if os.IsNotExist(err) {
return ""
}
if execErr, ok := err.(*exec.Error); ok && os.IsNotExist(execErr.Err) {
return ""
}
panic(err)
}
path, err = filepath.Abs(path)
if err != nil {
if os.IsNotExist(err) {
return ""
}
panic(err)
}
return path
}
func dockerInitSha1(target string) string {
f, err := os.Open(target)
if err != nil {
return ""
}
defer f.Close()
h := sha1.New()
_, err = io.Copy(h, f)
if err != nil {
return ""
}
return hex.EncodeToString(h.Sum(nil))
}
func isValidDockerInitPath(target string, selfPath string) bool { // target and selfPath should be absolute (InitPath and SelfPath already do this)
if target == "" {
return false
}
if dockerversion.IAMSTATIC {
if selfPath == "" {
return false
}
if target == selfPath {
return true
}
targetFileInfo, err := os.Lstat(target)
if err != nil {
return false
}
selfPathFileInfo, err := os.Lstat(selfPath)
if err != nil {
return false
}
return os.SameFile(targetFileInfo, selfPathFileInfo)
}
return dockerversion.INITSHA1 != "" && dockerInitSha1(target) == dockerversion.INITSHA1
}
// Figure out the path of our dockerinit (which may be SelfPath())
func DockerInitPath(localCopy string) string {
selfPath := SelfPath()
if isValidDockerInitPath(selfPath, selfPath) {
// if we're valid, don't bother checking anything else
return selfPath
}
var possibleInits = []string{
localCopy,
dockerversion.INITPATH,
filepath.Join(filepath.Dir(selfPath), "dockerinit"),
// FHS 3.0 Draft: "/usr/libexec includes internal binaries that are not intended to be executed directly by users or shell scripts. Applications may use a single subdirectory under /usr/libexec."
// http://www.linuxbase.org/betaspecs/fhs/fhs.html#usrlibexec
"/usr/libexec/docker/dockerinit",
"/usr/local/libexec/docker/dockerinit",
// FHS 2.3: "/usr/lib includes object files, libraries, and internal binaries that are not intended to be executed directly by users or shell scripts."
// http://refspecs.linuxfoundation.org/FHS_2.3/fhs-2.3.html#USRLIBLIBRARIESFORPROGRAMMINGANDPA
"/usr/lib/docker/dockerinit",
"/usr/local/lib/docker/dockerinit",
}
for _, dockerInit := range possibleInits {
if dockerInit == "" {
continue
}
path, err := exec.LookPath(dockerInit)
if err == nil {
path, err = filepath.Abs(path)
if err != nil {
// LookPath already validated that this file exists and is executable (following symlinks), so how could Abs fail?
panic(err)
}
if isValidDockerInitPath(path, selfPath) {
return path
}
}
}
return ""
}
type NopWriter struct{}
func (*NopWriter) Write(buf []byte) (int, error) {
return len(buf), nil
}
type nopWriteCloser struct {
io.Writer
}
func (w *nopWriteCloser) Close() error { return nil }
func NopWriteCloser(w io.Writer) io.WriteCloser {
return &nopWriteCloser{w}
}
type bufReader struct {
sync.Mutex
buf *bytes.Buffer
reader io.Reader
err error
wait sync.Cond
}
func NewBufReader(r io.Reader) *bufReader {
reader := &bufReader{
buf: &bytes.Buffer{},
reader: r,
}
reader.wait.L = &reader.Mutex
go reader.drain()
return reader
}
func (r *bufReader) drain() {
buf := make([]byte, 1024)
for {
n, err := r.reader.Read(buf)
r.Lock()
if err != nil {
r.err = err
} else {
r.buf.Write(buf[0:n])
}
r.wait.Signal()
r.Unlock()
if err != nil {
break
}
}
}
func (r *bufReader) Read(p []byte) (n int, err error) {
r.Lock()
defer r.Unlock()
for {
n, err = r.buf.Read(p)
if n > 0 {
return n, err
}
if r.err != nil {
return 0, r.err
}
r.wait.Wait()
}
}
func (r *bufReader) Close() error {
closer, ok := r.reader.(io.ReadCloser)
if !ok {
return nil
}
return closer.Close()
}
type JSONLog struct {
Log string `json:"log,omitempty"`
Stream string `json:"stream,omitempty"`
Created time.Time `json:"time"`
}
func (jl *JSONLog) Format(format string) (string, error) {
if format == "" {
return jl.Log, nil
}
if format == "json" {
m, err := json.Marshal(jl)
return string(m), err
}
return fmt.Sprintf("[%s] %s", jl.Created.Format(format), jl.Log), nil
}
func WriteLog(src io.Reader, dst io.WriteCloser, format string) error {
dec := json.NewDecoder(src)
for {
l := &JSONLog{}
if err := dec.Decode(l); err == io.EOF {
return nil
} else if err != nil {
Errorf("Error streaming logs: %s", err)
return err
}
line, err := l.Format(format)
if err != nil {
return err
}
fmt.Fprintf(dst, "%s", line)
}
}
func GetTotalUsedFds() int {
if fds, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil {
Errorf("Error opening /proc/%d/fd: %s", os.Getpid(), err)
} else {
return len(fds)
}
return -1
}
// TruncateID returns a shorthand version of a string identifier for convenience.
// A collision with other shorthands is very unlikely, but possible.
// In case of a collision a lookup with TruncIndex.Get() will fail, and the caller
// will need to use a langer prefix, or the full-length Id.
func TruncateID(id string) string {
shortLen := 12
if len(id) < shortLen {
shortLen = len(id)
}
return id[:shortLen]
}
// GenerateRandomID returns an unique id
func GenerateRandomID() string {
for {
id := make([]byte, 32)
if _, err := io.ReadFull(rand.Reader, id); err != nil {
panic(err) // This shouldn't happen
}
value := hex.EncodeToString(id)
// if we try to parse the truncated for as an int and we don't have
// an error then the value is all numberic and causes issues when
// used as a hostname. ref #3869
if _, err := strconv.ParseInt(TruncateID(value), 10, 64); err == nil {
continue
}
return value
}
}
func ValidateID(id string) error {
if id == "" {
return fmt.Errorf("Id can't be empty")
}
if strings.Contains(id, ":") {
return fmt.Errorf("Invalid character in id: ':'")
}
return nil
}
// Code c/c from io.Copy() modified to handle escape sequence
func CopyEscapable(dst io.Writer, src io.ReadCloser) (written int64, err error) {
buf := make([]byte, 32*1024)
for {
nr, er := src.Read(buf)
if nr > 0 {
// ---- Docker addition
// char 16 is C-p
if nr == 1 && buf[0] == 16 {
nr, er = src.Read(buf)
// char 17 is C-q
if nr == 1 && buf[0] == 17 {
if err := src.Close(); err != nil {
return 0, err
}
return 0, nil
}
}
// ---- End of docker
nw, ew := dst.Write(buf[0:nr])
if nw > 0 {
written += int64(nw)
}
if ew != nil {
err = ew
break
}
if nr != nw {
err = io.ErrShortWrite
break
}
}
if er == io.EOF {
break
}
if er != nil {
err = er
break
}
}
return written, err
}
func HashData(src io.Reader) (string, error) {
h := sha256.New()
if _, err := io.Copy(h, src); err != nil {
return "", err
}
return "sha256:" + hex.EncodeToString(h.Sum(nil)), nil
}
// FIXME: this is deprecated by CopyWithTar in archive.go
func CopyDirectory(source, dest string) error {
if output, err := exec.Command("cp", "-ra", source, dest).CombinedOutput(); err != nil {
return fmt.Errorf("Error copy: %s (%s)", err, output)
}
return nil
}
type NopFlusher struct{}
func (f *NopFlusher) Flush() {}
type WriteFlusher struct {
sync.Mutex
w io.Writer
flusher http.Flusher
}
func (wf *WriteFlusher) Write(b []byte) (n int, err error) {
wf.Lock()
defer wf.Unlock()
n, err = wf.w.Write(b)
wf.flusher.Flush()
return n, err
}
// Flush the stream immediately.
func (wf *WriteFlusher) Flush() {
wf.Lock()
defer wf.Unlock()
wf.flusher.Flush()
}
func NewWriteFlusher(w io.Writer) *WriteFlusher {
var flusher http.Flusher
if f, ok := w.(http.Flusher); ok {
flusher = f
} else {
flusher = &NopFlusher{}
}
return &WriteFlusher{w: w, flusher: flusher}
}
func NewHTTPRequestError(msg string, res *http.Response) error {
return &JSONError{
Message: msg,
Code: res.StatusCode,
}
}
func IsURL(str string) bool {
return strings.HasPrefix(str, "http://") || strings.HasPrefix(str, "https://")
}
func IsGIT(str string) bool {
return strings.HasPrefix(str, "git://") || strings.HasPrefix(str, "github.com/") || strings.HasPrefix(str, "[email protected]:") || (strings.HasSuffix(str, ".git") && IsURL(str))
}
// CheckLocalDns looks into the /etc/resolv.conf,
// it returns true if there is a local nameserver or if there is no nameserver.
func CheckLocalDns(resolvConf []byte) bool {
for _, line := range GetLines(resolvConf, []byte("#")) {
if !bytes.Contains(line, []byte("nameserver")) {
continue
}
for _, ip := range [][]byte{
[]byte("127.0.0.1"),
[]byte("127.0.1.1"),
} {
if bytes.Contains(line, ip) {
return true
}
}
return false
}
return true
}
// GetLines parses input into lines and strips away comments.
func GetLines(input []byte, commentMarker []byte) [][]byte {
lines := bytes.Split(input, []byte("\n"))
var output [][]byte
for _, currentLine := range lines {
var commentIndex = bytes.Index(currentLine, commentMarker)
if commentIndex == -1 {
output = append(output, currentLine)
} else {
output = append(output, currentLine[:commentIndex])
}
}
return output
}
// An StatusError reports an unsuccessful exit by a command.
type StatusError struct {
Status string
StatusCode int
}
func (e *StatusError) Error() string {
return fmt.Sprintf("Status: %s, Code: %d", e.Status, e.StatusCode)
}
func quote(word string, buf *bytes.Buffer) {
// Bail out early for "simple" strings
if word != "" && !strings.ContainsAny(word, "\\'\"`${[|&;<>()~*?! \t\n") {
buf.WriteString(word)
return
}
buf.WriteString("'")
for i := 0; i < len(word); i++ {
b := word[i]
if b == '\'' {
// Replace literal ' with a close ', a \', and a open '
buf.WriteString("'\\''")
} else {
buf.WriteByte(b)
}
}
buf.WriteString("'")
}
// Take a list of strings and escape them so they will be handled right
// when passed as arguments to an program via a shell
func ShellQuoteArguments(args []string) string {
var buf bytes.Buffer
for i, arg := range args {
if i != 0 {
buf.WriteByte(' ')
}
quote(arg, &buf)
}
return buf.String()
}
var globalTestID string
// TestDirectory creates a new temporary directory and returns its path.
// The contents of directory at path `templateDir` is copied into the
// new directory.
func TestDirectory(templateDir string) (dir string, err error) {
if globalTestID == "" {
globalTestID = RandomString()[:4]
}
prefix := fmt.Sprintf("docker-test%s-%s-", globalTestID, GetCallerName(2))
if prefix == "" {
prefix = "docker-test-"
}
dir, err = ioutil.TempDir("", prefix)
if err = os.Remove(dir); err != nil {
return
}
if templateDir != "" {
if err = CopyDirectory(templateDir, dir); err != nil {
return
}
}
return
}
// GetCallerName introspects the call stack and returns the name of the
// function `depth` levels down in the stack.
func GetCallerName(depth int) string {
// Use the caller function name as a prefix.
// This helps trace temp directories back to their test.
pc, _, _, _ := runtime.Caller(depth + 1)
callerLongName := runtime.FuncForPC(pc).Name()
parts := strings.Split(callerLongName, ".")
callerShortName := parts[len(parts)-1]
return callerShortName
}
func CopyFile(src, dst string) (int64, error) {
if src == dst {
return 0, nil
}
sf, err := os.Open(src)
if err != nil {
return 0, err
}
defer sf.Close()
if err := os.Remove(dst); err != nil && !os.IsNotExist(err) {
return 0, err
}
df, err := os.Create(dst)
if err != nil {
return 0, err
}
defer df.Close()
return io.Copy(df, sf)
}
type readCloserWrapper struct {
io.Reader
closer func() error
}
func (r *readCloserWrapper) Close() error {
return r.closer()
}
func NewReadCloserWrapper(r io.Reader, closer func() error) io.ReadCloser {
return &readCloserWrapper{
Reader: r,
closer: closer,
}
}
// ReplaceOrAppendValues returns the defaults with the overrides either
// replaced by env key or appended to the list
func ReplaceOrAppendEnvValues(defaults, overrides []string) []string {
cache := make(map[string]int, len(defaults))
for i, e := range defaults {
parts := strings.SplitN(e, "=", 2)
cache[parts[0]] = i
}
for _, value := range overrides {
parts := strings.SplitN(value, "=", 2)
if i, exists := cache[parts[0]]; exists {
defaults[i] = value
} else {
defaults = append(defaults, value)
}
}
return defaults
}
// ReadSymlinkedDirectory returns the target directory of a symlink.
// The target of the symbolic link may not be a file.
func ReadSymlinkedDirectory(path string) (string, error) {
var realPath string
var err error
if realPath, err = filepath.Abs(path); err != nil {
return "", fmt.Errorf("unable to get absolute path for %s: %s", path, err)
}
if realPath, err = filepath.EvalSymlinks(realPath); err != nil {
return "", fmt.Errorf("failed to canonicalise path for %s: %s", path, err)
}
realPathInfo, err := os.Stat(realPath)
if err != nil {
return "", fmt.Errorf("failed to stat target '%s' of '%s': %s", realPath, path, err)
}
if !realPathInfo.Mode().IsDir() {
return "", fmt.Errorf("canonical path points to a file '%s'", realPath)
}
return realPath, nil
}
// TreeSize walks a directory tree and returns its total size in bytes.
func TreeSize(dir string) (size int64, err error) {
data := make(map[uint64]struct{})
err = filepath.Walk(dir, func(d string, fileInfo os.FileInfo, e error) error {
// Ignore directory sizes
if fileInfo == nil {
return nil
}
s := fileInfo.Size()
if fileInfo.IsDir() || s == 0 {
return nil
}
// Check inode to handle hard links correctly
inode := fileInfo.Sys().(*syscall.Stat_t).Ino
// inode is not a uint64 on all platforms. Cast it to avoid issues.
if _, exists := data[uint64(inode)]; exists {
return nil
}
// inode is not a uint64 on all platforms. Cast it to avoid issues.
data[uint64(inode)] = struct{}{}
size += s
return nil
})
return
}
// ValidateContextDirectory checks if all the contents of the directory
// can be read and returns an error if some files can't be read
// symlinks which point to non-existing files don't trigger an error
func ValidateContextDirectory(srcPath string, excludes []string) error {
var finalError error
filepath.Walk(filepath.Join(srcPath, "."), func(filePath string, f os.FileInfo, err error) error {
// skip this directory/file if it's not in the path, it won't get added to the context
relFilePath, err := filepath.Rel(srcPath, filePath)
if err != nil && os.IsPermission(err) {
return nil
}
skip, err := Matches(relFilePath, excludes)
if err != nil {
finalError = err
}
if skip {
if f.IsDir() {
return filepath.SkipDir
}
return nil
}
if _, err := os.Stat(filePath); err != nil && os.IsPermission(err) {
finalError = fmt.Errorf("can't stat '%s'", filePath)
return err
}
// skip checking if symlinks point to non-existing files, such symlinks can be useful
lstat, _ := os.Lstat(filePath)
if lstat.Mode()&os.ModeSymlink == os.ModeSymlink {
return err
}
if !f.IsDir() {
currentFile, err := os.Open(filePath)
if err != nil && os.IsPermission(err) {
finalError = fmt.Errorf("no permission to read from '%s'", filePath)
return err
} else {
currentFile.Close()
}
}
return nil
})
return finalError
}
func StringsContainsNoCase(slice []string, s string) bool {
for _, ss := range slice {
if strings.ToLower(s) == strings.ToLower(ss) {
return true
}
}
return false
}
// Matches returns true if relFilePath matches any of the patterns
func Matches(relFilePath string, patterns []string) (bool, error) {
for _, exclude := range patterns {
matched, err := filepath.Match(exclude, relFilePath)
if err != nil {
Errorf("Error matching: %s (pattern: %s)", relFilePath, exclude)
return false, err
}
if matched {
if filepath.Clean(relFilePath) == "." {
Errorf("Can't exclude whole path, excluding pattern: %s", exclude)
continue
}
Debugf("Skipping excluded path: %s", relFilePath)
return true, nil
}
}
return false, nil
}
|
[
"\"DEBUG\""
] |
[] |
[
"DEBUG"
] |
[]
|
["DEBUG"]
|
go
| 1 | 0 | |
parsers/OntologicalHierarchy/src/loadOH.py
|
import os
import argparse
import logging
import datetime
import time
from rdflib import Graph
from Common.utils import LoggingUtil, GetData
from Common.loader_interface import SourceDataLoader
from Common.kgxmodel import kgxnode, kgxedge
from Common.prefixes import HGNC
##############
# Class: Ontological-Hierarchy loader
#
# By: Phil Owen
# Date: 4/5/2021
# Desc: Class that loads/parses the Ontological-Hierarchy data.
##############
class OHLoader(SourceDataLoader):
def __init__(self, test_mode: bool = False):
"""
constructor
:param test_mode - sets the run into test mode
"""
# call the super
super(SourceDataLoader, self).__init__()
# set global variables
self.data_path: str = os.environ['DATA_SERVICES_STORAGE']
self.data_file: str = 'properties-redundant.zip'
self.test_mode: bool = test_mode
self.source_id: str = 'OntologicalHierarchy'
self.source_db: str = 'properties-redundant.ttl'
self.provenance_id: str = 'infores:ontological-hierarchy'
self.subclass_predicate = 'biolink:subclass_of'
# the final output lists of nodes and edges
self.final_node_list: list = []
self.final_edge_list: list = []
self.file_size = 500000
# create a logger
self.logger = LoggingUtil.init_logging("Data_services.Ontological-Hierarchy.OHLoader", level=logging.INFO, line_format='medium', log_file_path=os.environ['DATA_SERVICES_LOGS'])
def get_latest_source_version(self) -> str:
"""
gets the version of the data
:return:
"""
return datetime.datetime.now().strftime("%m/%d/%Y")
def get_data(self) -> int:
"""
Gets the ontological-hierarchy data.
"""
# get a reference to the data gathering class
# gd: GetData = GetData(self.logger.level)
# get a reference to the data gatherer
gd: GetData = GetData(self.logger.level)
byte_count: int = gd.pull_via_http(f'https://stars.renci.org/var/data_services/{self.data_file}',
self.data_path, False)
if byte_count > 0:
return True
def load(self, nodes_output_file_path: str, edges_output_file_path: str):
"""
Loads/parsers the UberGraph data file to produce node/edge KGX files for importation into a graph database.
:param: nodes_output_file_path - path to node file
:param: edges_output_file_path - path to edge file
:return: None
"""
self.logger.info(f'OHLoader - Start of UberGraph Ontological hierarchy data processing.')
self.get_data()
# split the input file names
file_name = self.data_file
self.logger.info(f'Parsing OntologicalHierarchy data file: {file_name}. {self.file_size} records per file + remainder')
# parse the data
split_files, final_record_count, final_skipped_count, final_skipped_non_subclass = \
self.parse_data_file(file_name)
# remove all the intermediate files
for file in split_files:
os.remove(file)
# remove the data file
os.remove(os.path.join(self.data_path, file_name ))
self.write_to_file(nodes_output_file_path, edges_output_file_path)
self.logger.info(f'OntologicalHierarchy loader - Processing complete.')
# load up the metadata
load_metadata: dict = {
'num_source_lines': final_record_count,
'unusable_source_lines': final_skipped_count,
'non_subclass_source_lines': final_skipped_non_subclass
}
# return the metadata to the caller
return load_metadata
def parse_data_file(self, data_file_name: str) -> (list, int, int):
"""
Parses the data file for graph nodes/edges and writes them out the KGX tsv files.
:param data_file_path: the path to the UberGraph data file
:param data_file_name: the name of the UberGraph file
:return: split_files: the temporary files created of the input file and the parsed metadata
"""
# init the record counters
record_counter: int = 0
skipped_record_counter: int = 0
skipped_non_subclass_record_counter: int = 0
# get a reference to the data handler object
gd: GetData = GetData(self.logger.level)
# init a list for the output data
triple: list = []
# split the file into pieces
split_files: list = gd.split_file(os.path.join(self.data_path, f'{data_file_name}'), self.data_path,
data_file_name.replace('.zip', '.ttl'), self.file_size)
# parse each file
# test mode
if self.test_mode:
# use the first few files
files_to_parse = split_files[0:2]
else:
files_to_parse = split_files
for file in files_to_parse:
self.logger.info(f'Working file: {file}')
# get a time stamp
tm_start = time.time()
# get the biolink json-ld data
g: Graph = gd.get_biolink_graph(file)
# get the triples
g_t = g.triples((None, None, None))
# for every triple in the input data
for t in g_t:
# increment the record counter
record_counter += 1
# clear before use
triple.clear()
# get the curie for each element in the triple
for n in t:
# init the value storage
val = None
try:
# get the value
qname = g.compute_qname(n)
# HGNC must be handled differently that the others
if qname[1].find('hgnc') > 0:
val = f"{HGNC}:" + qname[2]
# if string is all lower it is not a curie
elif not qname[2].islower():
# replace the underscores to create a curie
val = qname[2].replace('_', ':')
except Exception as e:
self.logger.warning(f'Exception parsing RDF {t}. {e}')
# did we get a valid value
if val is not None:
# add it to the group
triple.append(val)
# make sure we have all 3 entries
if len(triple) == 3:
# Filter only subclass edges
if triple[1] == 'subClassOf':
# create the nodes and edges
self.final_node_list.append(kgxnode(triple[0], name=triple[0]))
self.final_node_list.append(kgxnode(triple[2], name=triple[2]))
self.final_edge_list.append(kgxedge(subject_id=triple[0],
object_id=triple[2],
relation=self.subclass_predicate,
predicate=self.subclass_predicate))
else:
skipped_non_subclass_record_counter += 1
else:
skipped_record_counter += 1
self.logger.debug(
f'Loading complete for file {file.split(".")[2]} of {len(split_files)} in {round(time.time() - tm_start, 0)} seconds.')
# return the split file names so they can be removed if desired
return split_files, record_counter, skipped_record_counter, skipped_non_subclass_record_counter
if __name__ == '__main__':
# create a command line parser
ap = argparse.ArgumentParser(description='Load Ontological-Hierarchy data files and create KGX import files.')
ap.add_argument('-r', '--data_dir', required=True, help='The location of the Ontological-Hierarchy data file')
# parse the arguments
args = vars(ap.parse_args())
# this is the base directory for data files and the resultant KGX files.
data_dir: str = args['data_dir']
# get a reference to the processor
ldr = OHLoader()
# load the data files and create KGX output
ldr.load(data_dir + '/nodes.jsonl', data_dir + '/edges.jsonl')
|
[] |
[] |
[
"DATA_SERVICES_STORAGE",
"DATA_SERVICES_LOGS"
] |
[]
|
["DATA_SERVICES_STORAGE", "DATA_SERVICES_LOGS"]
|
python
| 2 | 0 | |
studio/local_worker.py
|
import os
import sys
import subprocess
import argparse
import json
import psutil
import time
import six
import signal
import pdb
from apscheduler.schedulers.background import BackgroundScheduler
from . import fs_tracker, model, logs
from .local_queue import LocalQueue
from .gpu_util import get_available_gpus, get_gpu_mapping, get_gpus_summary
from .artifact import Artifact
from .experiment import Experiment
from .util import sixdecode, str2duration, retry, LogReprinter, parse_verbosity
logs.getLogger('apscheduler.scheduler').setLevel(logs.ERROR)
class LocalExecutor(object):
"""Runs job while capturing environment and logs results.
"""
def __init__(self, queue, args):
self.config = args.config
if args.guest:
self.config['database']['guest'] = True
self.task_queue = queue
self.logger = logs.getLogger('LocalExecutor')
self.logger.setLevel(model.parse_verbosity(self.config.get('verbose')))
self.logger.debug("Config: ")
self.logger.debug(self.config)
def run(self, experiment):
if isinstance(experiment, six.string_types):
experiment = self.db.get_experiment(experiment)
elif not isinstance(experiment, Experiment):
raise ValueError("Unknown type of experiment: " +
str(type(experiment)))
self.logger.info("Experiment key: " + experiment.key)
with model.get_db_provider(self.config) as db:
db.start_experiment(experiment)
""" Override env variables with those inside the queued message
"""
env = dict(os.environ)
if 'env' in self.config.keys():
for k, v in six.iteritems(self.config['env']):
if v is not None:
env[str(k)] = str(v)
env['PYTHONUNBUFFERED'] = 'TRUE'
fs_tracker.setup_experiment(env, experiment, clean=False)
log_path = fs_tracker.get_artifact_cache('output', experiment.key)
# log_path = os.path.join(model_dir, self.config['log']['name'])
self.logger.debug('Child process environment:')
self.logger.debug(str(env))
sched = BackgroundScheduler()
sched.start()
with open(log_path, 'w') as output_file:
python = 'python'
if experiment.pythonver[0] == '3':
python = 'python3'
python = which(python)
cmd = [python, experiment.filename] + experiment.args
cwd = experiment.artifacts['workspace'].local_path
container_artifact = experiment.artifacts.get('_singularity')
if container_artifact:
container = container_artifact.get('local')
if not container:
container = container_artifact.get('qualified')
cwd = fs_tracker.get_artifact_cache(
'workspace', experiment.key)
for tag, art in six.iteritems(experiment.artifacts):
local_path = art.get('local')
if not art['mutable'] and os.path.exists(local_path):
os.symlink(
art['local'],
os.path.join(os.path.dirname(cwd), tag)
)
if experiment.filename is not None:
cmd = [
'singularity',
'exec',
container,
] + cmd
else:
cmd = ['singularity', 'run', container]
self.logger.info('Running cmd: {0} in {1}'.format(cmd, cwd))
p = subprocess.Popen(
cmd,
stdout=output_file,
stderr=subprocess.STDOUT,
env=env,
cwd=cwd
)
run_log_reprinter = True
log_reprinter = LogReprinter(log_path)
if run_log_reprinter:
log_reprinter.run()
def kill_subprocess():
log_reprinter.stop()
p.kill()
minutes = 0
if self.config.get('saveWorkspaceFrequency'):
minutes = int(
str2duration(
self.config['saveWorkspaceFrequency'])
.total_seconds() / 60)
def checkpoint():
try:
db.checkpoint_experiment(experiment)
except BaseException as e:
self.logger.info(e)
sched.add_job(
checkpoint,
'interval',
minutes=minutes)
metrics_path = fs_tracker.get_artifact_cache(
'_metrics', experiment.key)
minutes = 0
if self.config.get('saveMetricsFrequency'):
minutes = int(
str2duration(
self.config['saveMetricsFrequency'])
.total_seconds() / 60)
sched.add_job(
lambda: save_metrics(metrics_path),
'interval',
minutes=minutes)
def kill_if_stopped():
try:
db_expr = db.get_experiment(
experiment.key,
getinfo=False)
except:
db_expr = None
# Transient issues with getting experiment data might
# result in a None value being returned, as result
# leave the experiment running because we wont be able to
# do anything else even if this experiment is stopped
# in any event if the experiment runs too long then it
# will exceed its allocated time and stop
if db_expr is not None:
if db_expr.status == 'stopped':
kill_subprocess()
return
if experiment.max_duration is not None and \
time.time() > experiment.time_started + \
int(str2duration(experiment.max_duration)
.total_seconds()):
kill_subprocess()
return
# If our tasks queue is signalled inactive
# during work process execution, that means we need to drop
# current execution and exit
if not self.task_queue.is_active():
kill_subprocess()
sched.add_job(kill_if_stopped, 'interval', seconds=10)
try:
p.wait()
finally:
log_reprinter.stop()
save_metrics(metrics_path)
sched.shutdown()
db.checkpoint_experiment(experiment)
db.finish_experiment(experiment)
return p.returncode
def allocate_resources(experiment, config=None, verbose=10):
logger = logs.getLogger('allocate_resources')
logger.setLevel(verbose)
logger.info('Allocating resources {} for experiment {}'
.format(experiment.resources_needed, experiment.key))
ret_val = True
gpus_needed = int(experiment.resources_needed.get('gpus')) \
if experiment.resources_needed else 0
if gpus_needed > 0:
ret_val = ret_val and allocate_gpus(gpus_needed,
experiment.resources_needed,
config)
else:
allocate_gpus(0)
return ret_val
def allocate_gpus(gpus_needed, resources_needed={}, config=None):
# Only disable gpus if gpus_needed < 0
if gpus_needed < 0:
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
return True
elif gpus_needed == 0:
return True
gpu_mem_needed = resources_needed.get('gpuMem', None)
strict = resources_needed.get('gpuMemStrict', False)
available_gpus = get_available_gpus(gpu_mem_needed, strict)
gpu_mapping = get_gpu_mapping()
mapped_gpus = [str(gpu_mapping[g]) for g in available_gpus]
os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
if len(mapped_gpus) >= gpus_needed:
os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(
mapped_gpus[:gpus_needed])
return True
else:
return False
def main(args=sys.argv):
parser = argparse.ArgumentParser(
description='Studio worker. \
Usage: studio-local-worker \
')
parser.add_argument('--config', help='configuration file', default=None)
parser.add_argument(
'--guest',
help='Guest mode (does not require db credentials)',
action='store_true')
parser.add_argument(
'--timeout',
default=0, type=int)
parser.add_argument(
'--verbose',
default='error')
# Register signal handler for signal.SIGUSR1
# which will invoke built-in Python debugger:
signal.signal(signal.SIGUSR1, lambda sig, stack: pdb.set_trace())
parsed_args, script_args = parser.parse_known_args(args)
verbose = parse_verbosity(parsed_args.verbose)
queue = LocalQueue(verbose=verbose)
# queue = glob.glob(fs_tracker.get_queue_directory() + "/*")
# wait_for_messages(queue, parsed_args.timeout)
returncode = worker_loop(queue, parsed_args, timeout=parsed_args.timeout)
sys.exit(returncode)
def which(program):
import os
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def worker_loop(queue, parsed_args,
single_experiment=False,
timeout=0,
verbose=None):
fetch_artifacts = True
logger = logs.getLogger('worker_loop')
hold_period = 4
retval = 0
while True:
msg = queue.dequeue(acknowledge=False, timeout=timeout)
if not msg:
break
first_exp, ack_key = msg
data_dict = json.loads(sixdecode(first_exp))
experiment_key = data_dict['experiment']['key']
config = data_dict['config']
parsed_args.config = config
if verbose:
config['verbose'] = verbose
else:
verbose = model.parse_verbosity(config.get('verbose'))
logger.setLevel(verbose)
logger.debug('Received message: \n{}'.format(data_dict))
executor = LocalExecutor(queue, parsed_args)
with model.get_db_provider(config) as db:
# experiment = experiment_from_dict(data_dict['experiment'])
def try_get_experiment():
experiment = db.get_experiment(experiment_key)
if experiment is None:
raise ValueError(
'experiment is not found - indicates storage failure')
return experiment
experiment = retry(
try_get_experiment,
sleep_time=10,
logger=logger)
if config.get('experimentLifetime') and \
int(str2duration(config['experimentLifetime'])
.total_seconds()) + experiment.time_added < time.time():
logger.info(
'Experiment expired (max lifetime of {0} was exceeded)'
.format(config.get('experimentLifetime'))
)
queue.acknowledge(ack_key)
continue
if allocate_resources(experiment, config, verbose=verbose):
def hold_job():
queue.hold(ack_key, hold_period)
hold_job()
sched = BackgroundScheduler()
sched.add_job(hold_job, 'interval', minutes=hold_period / 2)
sched.start()
try:
python = 'python'
if experiment.pythonver[0] == '3':
python = 'python3'
if '_singularity' not in experiment.artifacts.keys():
pip_diff = pip_needed_packages(
experiment.pythonenv, python)
if any(pip_diff):
logger.info(
'Setting up python packages for experiment')
if pip_install_packages(
pip_diff,
python,
logger
) != 0:
logger.info(
"Installation of all packages together " +
" failed, "
"trying one package at a time")
for pkg in pip_diff:
pip_install_packages([pkg], python, logger)
for tag, item in experiment.artifacts.items():
art: Artifact = item
if fetch_artifacts or art.local_path is None:
get_only_newer: bool = True
if tag == 'workspace':
get_only_newer = False
if not art.is_mutable:
logger.info('Fetching artifact ' + tag)
art.local_path = retry(
lambda: db.get_artifact(art, only_newer=get_only_newer),
sleep_time=10,
logger=logger
)
else:
logger.info('Skipping mutable artifact ' + tag)
returncode = executor.run(experiment)
if returncode != 0:
retval = returncode
finally:
sched.shutdown()
queue.acknowledge(ack_key)
if single_experiment:
logger.info('single_experiment is True, quitting')
return retval
else:
logger.info('Cannot run experiment ' + experiment.key +
' due lack of resources. Will retry')
# Debounce failed requests we cannot service yet
time.sleep(config.get('sleep_time', 5))
logger.info("Queue in {0} is empty, quitting"
.format(fs_tracker.get_queue_directory()))
return retval
def pip_install_packages(packages, python='python', logger=None):
pipp = subprocess.Popen(
[python, '-m', 'pip', 'install'] + [p for p in packages],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
pipout, _ = pipp.communicate()
pipout = pipout.decode('utf-8')
# return pip.main(['install'] + list(packages))
if logger:
logger.info("pip output: \n" + pipout)
return pipp.returncode
def wait_for_messages(queue, timeout, logger=None):
wait_time = 0
wait_step = 5
timeout = int(timeout)
if timeout == 0:
return
while not queue.has_next():
if logger:
logger.info(
'No messages found, sleeping for {} s (total wait time {} s)'
.format(wait_step, wait_time))
time.sleep(wait_step)
wait_time += wait_step
if timeout > 0 and timeout < wait_time:
if logger:
logger.info('No jobs found in the queue during {} s'.
format(timeout))
return
def save_metrics(path):
cpu_load = psutil.cpu_percent()
cpu_mem = psutil.virtual_memory().used
timestamp = time.time()
with open(path, 'a') as f:
entry = 'time: {} CPU: {} mem: {} {} \n' \
.format(
timestamp,
cpu_load,
cpu_mem,
get_gpus_summary())
f.write(entry)
def pip_needed_packages(packages, python='python'):
pipp = subprocess.Popen(
[python, '-m', 'pip', 'freeze'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
pipout, _ = pipp.communicate()
pipout = pipout.decode('utf-8')
current_packages = {l.strip() for l in pipout.strip().split('\n')}
# current_packages = {p._key + '==' + p._version for p in
# pip.pip.get_installed_distributions(local_only=True)}
return {p for p in packages} - current_packages
if __name__ == "__main__":
main()
|
[] |
[] |
[
"CUDA_DEVICE_ORDER",
"CUDA_VISIBLE_DEVICES",
"PATH"
] |
[]
|
["CUDA_DEVICE_ORDER", "CUDA_VISIBLE_DEVICES", "PATH"]
|
python
| 3 | 0 | |
tests/test_requirements.py
|
"""Test requirements module."""
import os
from unittest.mock import call, patch
import pytest
from homeassistant import loader, setup
from homeassistant.requirements import (
CONSTRAINT_FILE,
RequirementsNotFound,
async_get_integration_with_requirements,
async_process_requirements,
)
from tests.common import MockModule, mock_integration
def env_without_wheel_links():
"""Return env without wheel links."""
env = dict(os.environ)
env.pop("WHEEL_LINKS", None)
return env
async def test_requirement_installed_in_venv(hass):
"""Test requirement installed in virtual environment."""
with patch("os.path.dirname", return_value="ha_package_path"), patch(
"homeassistant.util.package.is_virtual_env", return_value=True
), patch("homeassistant.util.package.is_docker_env", return_value=False), patch(
"homeassistant.util.package.install_package", return_value=True
) as mock_install, patch.dict(
os.environ, env_without_wheel_links(), clear=True
):
hass.config.skip_pip = False
mock_integration(hass, MockModule("comp", requirements=["package==0.0.1"]))
assert await setup.async_setup_component(hass, "comp", {})
assert "comp" in hass.config.components
assert mock_install.call_args == call(
"package==0.0.1",
constraints=os.path.join("ha_package_path", CONSTRAINT_FILE),
no_cache_dir=False,
)
async def test_requirement_installed_in_deps(hass):
"""Test requirement installed in deps directory."""
with patch("os.path.dirname", return_value="ha_package_path"), patch(
"homeassistant.util.package.is_virtual_env", return_value=False
), patch("homeassistant.util.package.is_docker_env", return_value=False), patch(
"homeassistant.util.package.install_package", return_value=True
) as mock_install, patch.dict(
os.environ, env_without_wheel_links(), clear=True
):
hass.config.skip_pip = False
mock_integration(hass, MockModule("comp", requirements=["package==0.0.1"]))
assert await setup.async_setup_component(hass, "comp", {})
assert "comp" in hass.config.components
assert mock_install.call_args == call(
"package==0.0.1",
target=hass.config.path("deps"),
constraints=os.path.join("ha_package_path", CONSTRAINT_FILE),
no_cache_dir=False,
)
async def test_install_existing_package(hass):
"""Test an install attempt on an existing package."""
with patch(
"homeassistant.util.package.install_package", return_value=True
) as mock_inst:
await async_process_requirements(hass, "test_component", ["hello==1.0.0"])
assert len(mock_inst.mock_calls) == 1
with patch("homeassistant.util.package.is_installed", return_value=True), patch(
"homeassistant.util.package.install_package"
) as mock_inst:
await async_process_requirements(hass, "test_component", ["hello==1.0.0"])
assert len(mock_inst.mock_calls) == 0
async def test_install_missing_package(hass):
"""Test an install attempt on an existing package."""
with patch(
"homeassistant.util.package.install_package", return_value=False
) as mock_inst, pytest.raises(RequirementsNotFound):
await async_process_requirements(hass, "test_component", ["hello==1.0.0"])
assert len(mock_inst.mock_calls) == 1
async def test_get_integration_with_requirements(hass):
"""Check getting an integration with loaded requirements."""
hass.config.skip_pip = False
mock_integration(
hass, MockModule("test_component_dep", requirements=["test-comp-dep==1.0.0"])
)
mock_integration(
hass,
MockModule(
"test_component_after_dep", requirements=["test-comp-after-dep==1.0.0"]
),
)
mock_integration(
hass,
MockModule(
"test_component",
requirements=["test-comp==1.0.0"],
dependencies=["test_component_dep"],
partial_manifest={"after_dependencies": ["test_component_after_dep"]},
),
)
with patch(
"homeassistant.util.package.is_installed", return_value=False
) as mock_is_installed, patch(
"homeassistant.util.package.install_package", return_value=True
) as mock_inst:
integration = await async_get_integration_with_requirements(
hass, "test_component"
)
assert integration
assert integration.domain == "test_component"
assert len(mock_is_installed.mock_calls) == 3
assert sorted(mock_call[1][0] for mock_call in mock_is_installed.mock_calls) == [
"test-comp-after-dep==1.0.0",
"test-comp-dep==1.0.0",
"test-comp==1.0.0",
]
assert len(mock_inst.mock_calls) == 3
assert sorted(mock_call[1][0] for mock_call in mock_inst.mock_calls) == [
"test-comp-after-dep==1.0.0",
"test-comp-dep==1.0.0",
"test-comp==1.0.0",
]
async def test_get_integration_with_requirements_pip_install_fails_two_passes(hass):
"""Check getting an integration with loaded requirements and the pip install fails two passes."""
hass.config.skip_pip = False
mock_integration(
hass, MockModule("test_component_dep", requirements=["test-comp-dep==1.0.0"])
)
mock_integration(
hass,
MockModule(
"test_component_after_dep", requirements=["test-comp-after-dep==1.0.0"]
),
)
mock_integration(
hass,
MockModule(
"test_component",
requirements=["test-comp==1.0.0"],
dependencies=["test_component_dep"],
partial_manifest={"after_dependencies": ["test_component_after_dep"]},
),
)
def _mock_install_package(package, **kwargs):
if package == "test-comp==1.0.0":
return True
return False
# 1st pass
with pytest.raises(RequirementsNotFound), patch(
"homeassistant.util.package.is_installed", return_value=False
) as mock_is_installed, patch(
"homeassistant.util.package.install_package", side_effect=_mock_install_package
) as mock_inst:
integration = await async_get_integration_with_requirements(
hass, "test_component"
)
assert integration
assert integration.domain == "test_component"
assert len(mock_is_installed.mock_calls) == 3
assert sorted(mock_call[1][0] for mock_call in mock_is_installed.mock_calls) == [
"test-comp-after-dep==1.0.0",
"test-comp-dep==1.0.0",
"test-comp==1.0.0",
]
assert len(mock_inst.mock_calls) == 3
assert sorted(mock_call[1][0] for mock_call in mock_inst.mock_calls) == [
"test-comp-after-dep==1.0.0",
"test-comp-dep==1.0.0",
"test-comp==1.0.0",
]
# 2nd pass
with pytest.raises(RequirementsNotFound), patch(
"homeassistant.util.package.is_installed", return_value=False
) as mock_is_installed, patch(
"homeassistant.util.package.install_package", side_effect=_mock_install_package
) as mock_inst:
integration = await async_get_integration_with_requirements(
hass, "test_component"
)
assert integration
assert integration.domain == "test_component"
assert len(mock_is_installed.mock_calls) == 3
assert sorted(mock_call[1][0] for mock_call in mock_is_installed.mock_calls) == [
"test-comp-after-dep==1.0.0",
"test-comp-dep==1.0.0",
"test-comp==1.0.0",
]
assert len(mock_inst.mock_calls) == 3
assert sorted(mock_call[1][0] for mock_call in mock_inst.mock_calls) == [
"test-comp-after-dep==1.0.0",
"test-comp-dep==1.0.0",
"test-comp==1.0.0",
]
async def test_get_integration_with_missing_dependencies(hass):
"""Check getting an integration with missing dependencies."""
hass.config.skip_pip = False
mock_integration(
hass,
MockModule("test_component_after_dep"),
)
mock_integration(
hass,
MockModule(
"test_component",
dependencies=["test_component_dep"],
partial_manifest={"after_dependencies": ["test_component_after_dep"]},
),
)
mock_integration(
hass,
MockModule(
"test_custom_component",
dependencies=["test_component_dep"],
partial_manifest={"after_dependencies": ["test_component_after_dep"]},
),
built_in=False,
)
with pytest.raises(loader.IntegrationNotFound):
await async_get_integration_with_requirements(hass, "test_component")
with pytest.raises(loader.IntegrationNotFound):
await async_get_integration_with_requirements(hass, "test_custom_component")
async def test_get_built_in_integration_with_missing_after_dependencies(hass):
"""Check getting a built_in integration with missing after_dependencies results in exception."""
hass.config.skip_pip = False
mock_integration(
hass,
MockModule(
"test_component",
partial_manifest={"after_dependencies": ["test_component_after_dep"]},
),
built_in=True,
)
with pytest.raises(loader.IntegrationNotFound):
await async_get_integration_with_requirements(hass, "test_component")
async def test_get_custom_integration_with_missing_after_dependencies(hass):
"""Check getting a custom integration with missing after_dependencies."""
hass.config.skip_pip = False
mock_integration(
hass,
MockModule(
"test_custom_component",
partial_manifest={"after_dependencies": ["test_component_after_dep"]},
),
built_in=False,
)
integration = await async_get_integration_with_requirements(
hass, "test_custom_component"
)
assert integration
assert integration.domain == "test_custom_component"
async def test_install_with_wheels_index(hass):
"""Test an install attempt with wheels index URL."""
hass.config.skip_pip = False
mock_integration(hass, MockModule("comp", requirements=["hello==1.0.0"]))
with patch("homeassistant.util.package.is_installed", return_value=False), patch(
"homeassistant.util.package.is_docker_env", return_value=True
), patch("homeassistant.util.package.install_package") as mock_inst, patch.dict(
os.environ, {"WHEELS_LINKS": "https://wheels.hass.io/test"}
), patch(
"os.path.dirname"
) as mock_dir:
mock_dir.return_value = "ha_package_path"
assert await setup.async_setup_component(hass, "comp", {})
assert "comp" in hass.config.components
assert mock_inst.call_args == call(
"hello==1.0.0",
find_links="https://wheels.hass.io/test",
constraints=os.path.join("ha_package_path", CONSTRAINT_FILE),
no_cache_dir=True,
)
async def test_install_on_docker(hass):
"""Test an install attempt on an docker system env."""
hass.config.skip_pip = False
mock_integration(hass, MockModule("comp", requirements=["hello==1.0.0"]))
with patch("homeassistant.util.package.is_installed", return_value=False), patch(
"homeassistant.util.package.is_docker_env", return_value=True
), patch("homeassistant.util.package.install_package") as mock_inst, patch(
"os.path.dirname"
) as mock_dir, patch.dict(
os.environ, env_without_wheel_links(), clear=True
):
mock_dir.return_value = "ha_package_path"
assert await setup.async_setup_component(hass, "comp", {})
assert "comp" in hass.config.components
assert mock_inst.call_args == call(
"hello==1.0.0",
constraints=os.path.join("ha_package_path", CONSTRAINT_FILE),
no_cache_dir=True,
)
async def test_discovery_requirements_mqtt(hass):
"""Test that we load discovery requirements."""
hass.config.skip_pip = False
mqtt = await loader.async_get_integration(hass, "mqtt")
mock_integration(
hass, MockModule("mqtt_comp", partial_manifest={"mqtt": ["foo/discovery"]})
)
with patch(
"homeassistant.requirements.async_process_requirements",
) as mock_process:
await async_get_integration_with_requirements(hass, "mqtt_comp")
assert len(mock_process.mock_calls) == 2 # mqtt also depends on http
assert mock_process.mock_calls[0][1][2] == mqtt.requirements
async def test_discovery_requirements_ssdp(hass):
"""Test that we load discovery requirements."""
hass.config.skip_pip = False
ssdp = await loader.async_get_integration(hass, "ssdp")
mock_integration(
hass, MockModule("ssdp_comp", partial_manifest={"ssdp": [{"st": "roku:ecp"}]})
)
with patch(
"homeassistant.requirements.async_process_requirements",
) as mock_process:
await async_get_integration_with_requirements(hass, "ssdp_comp")
assert len(mock_process.mock_calls) == 3
assert mock_process.mock_calls[0][1][2] == ssdp.requirements
# Ensure zeroconf is a dep for ssdp
assert mock_process.mock_calls[1][1][1] == "zeroconf"
@pytest.mark.parametrize(
"partial_manifest",
[{"zeroconf": ["_googlecast._tcp.local."]}, {"homekit": {"models": ["LIFX"]}}],
)
async def test_discovery_requirements_zeroconf(hass, partial_manifest):
"""Test that we load discovery requirements."""
hass.config.skip_pip = False
zeroconf = await loader.async_get_integration(hass, "zeroconf")
mock_integration(
hass,
MockModule("comp", partial_manifest=partial_manifest),
)
with patch(
"homeassistant.requirements.async_process_requirements",
) as mock_process:
await async_get_integration_with_requirements(hass, "comp")
assert len(mock_process.mock_calls) == 2 # zeroconf also depends on http
assert mock_process.mock_calls[0][1][2] == zeroconf.requirements
async def test_discovery_requirements_dhcp(hass):
"""Test that we load dhcp discovery requirements."""
hass.config.skip_pip = False
dhcp = await loader.async_get_integration(hass, "dhcp")
mock_integration(
hass,
MockModule(
"comp",
partial_manifest={
"dhcp": [{"hostname": "somfy_*", "macaddress": "B8B7F1*"}]
},
),
)
with patch(
"homeassistant.requirements.async_process_requirements",
) as mock_process:
await async_get_integration_with_requirements(hass, "comp")
assert len(mock_process.mock_calls) == 1 # dhcp does not depend on http
assert mock_process.mock_calls[0][1][2] == dhcp.requirements
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
lib_test.go
|
package main
import (
"database/sql"
"fmt"
_ "github.com/go-sql-driver/mysql"
"os"
"strconv"
"testing"
)
var db *sql.DB
var err error
const DefaultMySQLHost = "localhost"
const DefaultMySQLPort = "3306"
const DefaultRootPassword = ""
var TestDatabases = []string{
"GOODTEK",
"REIMEI",
"BetterGraphicAnimation",
}
var globalDBHost string
var globalDBPort int
func TestMain(m *testing.M) {
dbHost := os.Getenv("Z_TEST_MYSQL_HOST")
dbPortString := os.Getenv("Z_TEST_MYSQL_PORT")
rootPassword := os.Getenv("Z_TEST_ROOT_PASSWORD")
if dbHost == "" {
dbHost = DefaultMySQLHost
}
if dbPortString == "" {
dbPortString = DefaultMySQLPort
}
if rootPassword == "" {
rootPassword = DefaultRootPassword
}
var dbPort int
dbPort, err = strconv.Atoi(dbPortString)
if err != nil {
handleError(InvalidPortError{portString: dbPortString})
}
globalDBHost = dbHost
globalDBPort = dbPort
db, err = sql.Open("mysql", fmt.Sprintf("root:%s@tcp(%s:%d)/", rootPassword, dbHost, dbPort))
if err != nil {
handleError(ConnectMySQLServerError{
err: err,
dbHost: dbHost,
dbPort: dbPort,
})
}
m.Run()
err = db.Close()
if err != nil {
handleError(CloseMySQLConnecttionError{
err: err,
dbHost: dbHost,
dbPort: dbPort,
})
}
}
func TestCreateDatabases(t *testing.T) {
var result map[string]string
result, err = CreateDatabasesWithDB(db, TestDatabases)
if err != nil {
t.Fatal(err)
}
for database, password := range result {
var anotherDB *sql.DB
anotherDB, err = sql.Open("mysql",
fmt.Sprintf("%s:%s@tcp(%s:%d)/%s", database, password, globalDBHost, globalDBPort, database))
err = anotherDB.Ping()
if err != nil {
t.Fatal(ConnectMySQLServerError{err: err, dbHost: globalDBHost, dbPort: globalDBPort})
}
_, err = anotherDB.Query(fmt.Sprintf("CREATE TABLE %s (id INT PRIMARY KEY, name VARCHAR(255))", database))
if err != nil {
t.Fatal(CreateTableFailedError{
err: err,
table: database,
database: database,
})
}
}
}
func handleError(err error) {
fmt.Println(err.Error())
os.Exit(-1)
}
|
[
"\"Z_TEST_MYSQL_HOST\"",
"\"Z_TEST_MYSQL_PORT\"",
"\"Z_TEST_ROOT_PASSWORD\""
] |
[] |
[
"Z_TEST_MYSQL_HOST",
"Z_TEST_MYSQL_PORT",
"Z_TEST_ROOT_PASSWORD"
] |
[]
|
["Z_TEST_MYSQL_HOST", "Z_TEST_MYSQL_PORT", "Z_TEST_ROOT_PASSWORD"]
|
go
| 3 | 0 | |
tests/repository_test.go
|
package tests
import (
"os"
"testing"
_ "github.com/k0kubun/pp"
"github.com/ktrysmt/go-bitbucket"
)
func TestGetRepositoryRepositories(t *testing.T) {
user := os.Getenv("BITBUCKET_TEST_USERNAME")
pass := os.Getenv("BITBUCKET_TEST_PASSWORD")
owner := os.Getenv("BITBUCKET_TEST_OWNER")
repo := os.Getenv("BITBUCKET_TEST_REPOSLUG")
if user == "" {
t.Error("BITBUCKET_TEST_USERNAME is empty.")
}
if pass == "" {
t.Error("BITBUCKET_TEST_PASSWORD is empty.")
}
if owner == "" {
t.Error("BITBUCKET_TEST_OWNER is empty.")
}
if repo == "" {
t.Error("BITBUCKET_TEST_REPOSLUG is empty.")
}
c := bitbucket.NewBasicAuth(user, pass)
opt := &bitbucket.RepositoryOptions{
Owner: owner,
RepoSlug: repo,
}
res, err := c.Repositories.Repository.Get(opt)
if err != nil {
t.Error("The repository is not found.")
}
if res.Full_name != owner+"/"+repo {
t.Error("Cannot catch repos full name.")
}
}
|
[
"\"BITBUCKET_TEST_USERNAME\"",
"\"BITBUCKET_TEST_PASSWORD\"",
"\"BITBUCKET_TEST_OWNER\"",
"\"BITBUCKET_TEST_REPOSLUG\""
] |
[] |
[
"BITBUCKET_TEST_PASSWORD",
"BITBUCKET_TEST_OWNER",
"BITBUCKET_TEST_USERNAME",
"BITBUCKET_TEST_REPOSLUG"
] |
[]
|
["BITBUCKET_TEST_PASSWORD", "BITBUCKET_TEST_OWNER", "BITBUCKET_TEST_USERNAME", "BITBUCKET_TEST_REPOSLUG"]
|
go
| 4 | 0 | |
cmd/tusd/cli/composer.go
|
package cli
import (
"os"
"github.com/brianshepanek/tusd"
"github.com/tus/tusd/filestore"
"github.com/tus/tusd/limitedstore"
"github.com/tus/tusd/memorylocker"
"github.com/tus/tusd/s3store"
"github.com/tus/tusd/gcsstore"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
)
var Composer *tusd.StoreComposer
func CreateComposer() {
// Attempt to use S3 as a backend if the -s3-bucket option has been supplied.
// If not, we default to storing them locally on disk.
Composer = tusd.NewStoreComposer()
if Flags.S3Bucket != "" {
s3Config := aws.NewConfig()
if Flags.S3Endpoint == "" {
stdout.Printf("Using 's3://%s' as S3 bucket for storage.\n", Flags.S3Bucket)
} else {
stdout.Printf("Using '%s/%s' as S3 endpoint and bucket for storage.\n", Flags.S3Endpoint, Flags.S3Bucket)
s3Config = s3Config.WithEndpoint(Flags.S3Endpoint).WithS3ForcePathStyle(true)
}
// Derive credentials from default credential chain (env, shared, ec2 instance role)
// as per https://github.com/aws/aws-sdk-go#configuring-credentials
store := s3store.New(Flags.S3Bucket, s3.New(session.Must(session.NewSession()), s3Config))
store.UseIn(Composer)
locker := memorylocker.New()
locker.UseIn(Composer)
} else if Flags.GCSBucket != "" {
// Derivce credentials from service account file path passed in
// GCS_SERVICE_ACCOUNT_FILE environment variable.
gcsSAF := os.Getenv("GCS_SERVICE_ACCOUNT_FILE")
if gcsSAF == "" {
stderr.Fatalf("No service account file provided for Google Cloud Storage using the GCS_SERVICE_ACCOUNT_FILE environment variable.\n")
}
service, err := gcsstore.NewGCSService(gcsSAF)
if err != nil {
stderr.Fatalf("Unable to create Google Cloud Storage service: %s\n", err)
}
stdout.Printf("Using 'gcs://%s' as GCS bucket for storage.\n", Flags.GCSBucket)
store := gcsstore.New(Flags.GCSBucket, service)
store.UseIn(Composer)
locker := memorylocker.New()
locker.UseIn(Composer)
} else {
dir := Flags.UploadDir
stdout.Printf("Using '%s' as directory storage.\n", dir)
if err := os.MkdirAll(dir, os.FileMode(0774)); err != nil {
stderr.Fatalf("Unable to ensure directory exists: %s", err)
}
store := filestore.New(dir)
store.UseIn(Composer)
}
storeSize := Flags.StoreSize
maxSize := Flags.MaxSize
if storeSize > 0 {
limitedstore.New(storeSize, Composer.Core, Composer.Terminater).UseIn(Composer)
stdout.Printf("Using %.2fMB as storage size.\n", float64(storeSize)/1024/1024)
// We need to ensure that a single upload can fit into the storage size
if maxSize > storeSize || maxSize == 0 {
Flags.MaxSize = storeSize
}
}
stdout.Printf("Using %.2fMB as maximum size.\n", float64(Flags.MaxSize)/1024/1024)
}
|
[
"\"GCS_SERVICE_ACCOUNT_FILE\""
] |
[] |
[
"GCS_SERVICE_ACCOUNT_FILE"
] |
[]
|
["GCS_SERVICE_ACCOUNT_FILE"]
|
go
| 1 | 0 | |
examples/naturallanguageclassifierv1/natural_language_classifier_v1.go
|
package main
import (
"fmt"
"os"
"github.com/edwindvinas/go-sdk-core/core"
nlc "github.com/edwindvinas/go-sdk/naturallanguageclassifierv1"
)
func main() {
// Instantiate the Watson Natural Language Classifier service
authenticator := &core.IamAuthenticator{
ApiKey: os.Getenv("YOUR API KEY"),
}
service, serviceErr := nlc.NewNaturalLanguageClassifierV1(&nlc.NaturalLanguageClassifierV1Options{
URL: "YOUR SERVICE URL",
Authenticator: authenticator,
})
// Check successful instantiation
if serviceErr != nil {
panic(serviceErr)
}
/* CREATE CLASSIFIER */
pwd, _ := os.Getwd()
metadata, metadataErr := os.Open(pwd + "/../../resources/weather_training_metadata.json")
if metadataErr != nil {
fmt.Println(metadataErr)
}
data, dataErr := os.Open(pwd + "/../../resources/weather_training_data.csv")
if dataErr != nil {
fmt.Println(dataErr)
}
createClassifierOptions := service.NewCreateClassifierOptions(metadata, data)
// Call the natural language classifier CreateClassifier method
createResult, _, responseErr := service.CreateClassifier(createClassifierOptions)
// Check successful call
if responseErr != nil {
panic(responseErr)
}
// Check successful casting
if createResult != nil {
core.PrettyPrint(createResult, "Create Classifier")
}
/* CLASSIFY */
if *createResult.Status == "Available" {
classifyOptions := service.NewClassifyOptions(*createResult.ClassifierID, "How hot will it be tomorrow?")
// Call the natural language classifier Classify method
classifyResult, _, responseErr := service.Classify(classifyOptions)
// Check successful call
if responseErr != nil {
panic(responseErr)
}
// Check successful casting
if classifyResult != nil {
core.PrettyPrint(classifyResult, "Classify")
}
}
}
|
[
"\"YOUR API KEY\""
] |
[] |
[
"YOUR API KEY"
] |
[]
|
["YOUR API KEY"]
|
go
| 1 | 0 | |
cmd/commandfuncs.go
|
// Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package caddycmd
import (
"bytes"
"context"
"crypto/rand"
"encoding/json"
"errors"
"fmt"
"io"
"log"
"net"
"net/http"
"os"
"os/exec"
"runtime"
"runtime/debug"
"sort"
"strings"
"github.com/caddyserver/caddy/v2"
"github.com/caddyserver/caddy/v2/caddyconfig"
"github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
"go.uber.org/zap"
)
func cmdStart(fl Flags) (int, error) {
startCmdConfigFlag := fl.String("config")
startCmdConfigAdapterFlag := fl.String("adapter")
startCmdPidfileFlag := fl.String("pidfile")
startCmdWatchFlag := fl.Bool("watch")
startCmdEnvfileFlag := fl.String("envfile")
// open a listener to which the child process will connect when
// it is ready to confirm that it has successfully started
ln, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
return caddy.ExitCodeFailedStartup,
fmt.Errorf("opening listener for success confirmation: %v", err)
}
defer ln.Close()
// craft the command with a pingback address and with a
// pipe for its stdin, so we can tell it our confirmation
// code that we expect so that some random port scan at
// the most unfortunate time won't fool us into thinking
// the child succeeded (i.e. the alternative is to just
// wait for any connection on our listener, but better to
// ensure it's the process we're expecting - we can be
// sure by giving it some random bytes and having it echo
// them back to us)
cmd := exec.Command(os.Args[0], "run", "--pingback", ln.Addr().String())
if startCmdConfigFlag != "" {
cmd.Args = append(cmd.Args, "--config", startCmdConfigFlag)
}
if startCmdEnvfileFlag != "" {
cmd.Args = append(cmd.Args, "--envfile", startCmdEnvfileFlag)
}
if startCmdConfigAdapterFlag != "" {
cmd.Args = append(cmd.Args, "--adapter", startCmdConfigAdapterFlag)
}
if startCmdWatchFlag {
cmd.Args = append(cmd.Args, "--watch")
}
if startCmdPidfileFlag != "" {
cmd.Args = append(cmd.Args, "--pidfile", startCmdPidfileFlag)
}
stdinpipe, err := cmd.StdinPipe()
if err != nil {
return caddy.ExitCodeFailedStartup,
fmt.Errorf("creating stdin pipe: %v", err)
}
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
// generate the random bytes we'll send to the child process
expect := make([]byte, 32)
_, err = rand.Read(expect)
if err != nil {
return caddy.ExitCodeFailedStartup, fmt.Errorf("generating random confirmation bytes: %v", err)
}
// begin writing the confirmation bytes to the child's
// stdin; use a goroutine since the child hasn't been
// started yet, and writing synchronously would result
// in a deadlock
go func() {
_, _ = stdinpipe.Write(expect)
stdinpipe.Close()
}()
// start the process
err = cmd.Start()
if err != nil {
return caddy.ExitCodeFailedStartup, fmt.Errorf("starting caddy process: %v", err)
}
// there are two ways we know we're done: either
// the process will connect to our listener, or
// it will exit with an error
success, exit := make(chan struct{}), make(chan error)
// in one goroutine, we await the success of the child process
go func() {
for {
conn, err := ln.Accept()
if err != nil {
if !errors.Is(err, net.ErrClosed) {
log.Println(err)
}
break
}
err = handlePingbackConn(conn, expect)
if err == nil {
close(success)
break
}
log.Println(err)
}
}()
// in another goroutine, we await the failure of the child process
go func() {
err := cmd.Wait() // don't send on this line! Wait blocks, but send starts before it unblocks
exit <- err // sending on separate line ensures select won't trigger until after Wait unblocks
}()
// when one of the goroutines unblocks, we're done and can exit
select {
case <-success:
fmt.Printf("Successfully started Caddy (pid=%d) - Caddy is running in the background\n", cmd.Process.Pid)
case err := <-exit:
return caddy.ExitCodeFailedStartup,
fmt.Errorf("caddy process exited with error: %v", err)
}
return caddy.ExitCodeSuccess, nil
}
func cmdRun(fl Flags) (int, error) {
caddy.TrapSignals()
runCmdConfigFlag := fl.String("config")
runCmdConfigAdapterFlag := fl.String("adapter")
runCmdResumeFlag := fl.Bool("resume")
runCmdLoadEnvfileFlag := fl.String("envfile")
runCmdPrintEnvFlag := fl.Bool("environ")
runCmdWatchFlag := fl.Bool("watch")
runCmdPidfileFlag := fl.String("pidfile")
runCmdPingbackFlag := fl.String("pingback")
// load all additional envs as soon as possible
if runCmdLoadEnvfileFlag != "" {
if err := loadEnvFromFile(runCmdLoadEnvfileFlag); err != nil {
return caddy.ExitCodeFailedStartup,
fmt.Errorf("loading additional environment variables: %v", err)
}
}
// if we are supposed to print the environment, do that first
if runCmdPrintEnvFlag {
printEnvironment()
}
// load the config, depending on flags
var config []byte
var err error
if runCmdResumeFlag {
config, err = os.ReadFile(caddy.ConfigAutosavePath)
if os.IsNotExist(err) {
// not a bad error; just can't resume if autosave file doesn't exist
caddy.Log().Info("no autosave file exists", zap.String("autosave_file", caddy.ConfigAutosavePath))
runCmdResumeFlag = false
} else if err != nil {
return caddy.ExitCodeFailedStartup, err
} else {
if runCmdConfigFlag == "" {
caddy.Log().Info("resuming from last configuration",
zap.String("autosave_file", caddy.ConfigAutosavePath))
} else {
// if they also specified a config file, user should be aware that we're not
// using it (doing so could lead to data/config loss by overwriting!)
caddy.Log().Warn("--config and --resume flags were used together; ignoring --config and resuming from last configuration",
zap.String("autosave_file", caddy.ConfigAutosavePath))
}
}
}
// we don't use 'else' here since this value might have been changed in 'if' block; i.e. not mutually exclusive
var configFile string
if !runCmdResumeFlag {
config, configFile, err = LoadConfig(runCmdConfigFlag, runCmdConfigAdapterFlag)
if err != nil {
return caddy.ExitCodeFailedStartup, err
}
}
// run the initial config
err = caddy.Load(config, true)
if err != nil {
return caddy.ExitCodeFailedStartup, fmt.Errorf("loading initial config: %v", err)
}
caddy.Log().Info("serving initial configuration")
// if we are to report to another process the successful start
// of the server, do so now by echoing back contents of stdin
if runCmdPingbackFlag != "" {
confirmationBytes, err := io.ReadAll(os.Stdin)
if err != nil {
return caddy.ExitCodeFailedStartup,
fmt.Errorf("reading confirmation bytes from stdin: %v", err)
}
conn, err := net.Dial("tcp", runCmdPingbackFlag)
if err != nil {
return caddy.ExitCodeFailedStartup,
fmt.Errorf("dialing confirmation address: %v", err)
}
defer conn.Close()
_, err = conn.Write(confirmationBytes)
if err != nil {
return caddy.ExitCodeFailedStartup,
fmt.Errorf("writing confirmation bytes to %s: %v", runCmdPingbackFlag, err)
}
}
// if enabled, reload config file automatically on changes
// (this better only be used in dev!)
if runCmdWatchFlag {
go watchConfigFile(configFile, runCmdConfigAdapterFlag)
}
// create pidfile
if runCmdPidfileFlag != "" {
err := caddy.PIDFile(runCmdPidfileFlag)
if err != nil {
caddy.Log().Error("unable to write PID file",
zap.String("pidfile", runCmdPidfileFlag),
zap.Error(err))
}
}
// warn if the environment does not provide enough information about the disk
hasXDG := os.Getenv("XDG_DATA_HOME") != "" &&
os.Getenv("XDG_CONFIG_HOME") != "" &&
os.Getenv("XDG_CACHE_HOME") != ""
switch runtime.GOOS {
case "windows":
if os.Getenv("HOME") == "" && os.Getenv("USERPROFILE") == "" && !hasXDG {
caddy.Log().Warn("neither HOME nor USERPROFILE environment variables are set - please fix; some assets might be stored in ./caddy")
}
case "plan9":
if os.Getenv("home") == "" && !hasXDG {
caddy.Log().Warn("$home environment variable is empty - please fix; some assets might be stored in ./caddy")
}
default:
if os.Getenv("HOME") == "" && !hasXDG {
caddy.Log().Warn("$HOME environment variable is empty - please fix; some assets might be stored in ./caddy")
}
}
select {}
}
func cmdStop(fl Flags) (int, error) {
addrFlag := fl.String("address")
configFlag := fl.String("config")
configAdapterFlag := fl.String("adapter")
adminAddr, err := DetermineAdminAPIAddress(addrFlag, configFlag, configAdapterFlag)
if err != nil {
return caddy.ExitCodeFailedStartup, fmt.Errorf("couldn't determine admin API address: %v", err)
}
resp, err := AdminAPIRequest(adminAddr, http.MethodPost, "/stop", nil, nil)
if err != nil {
caddy.Log().Warn("failed using API to stop instance", zap.Error(err))
return caddy.ExitCodeFailedStartup, err
}
defer resp.Body.Close()
return caddy.ExitCodeSuccess, nil
}
func cmdReload(fl Flags) (int, error) {
configFlag := fl.String("config")
configAdapterFlag := fl.String("adapter")
addrFlag := fl.String("address")
forceFlag := fl.Bool("force")
// get the config in caddy's native format
config, configFile, err := LoadConfig(configFlag, configAdapterFlag)
if err != nil {
return caddy.ExitCodeFailedStartup, err
}
if configFile == "" {
return caddy.ExitCodeFailedStartup, fmt.Errorf("no config file to load")
}
adminAddr, err := DetermineAdminAPIAddress(addrFlag, configFlag, configAdapterFlag)
if err != nil {
return caddy.ExitCodeFailedStartup, fmt.Errorf("couldn't determine admin API address: %v", err)
}
// optionally force a config reload
headers := make(http.Header)
if forceFlag {
headers.Set("Cache-Control", "must-revalidate")
}
resp, err := AdminAPIRequest(adminAddr, http.MethodPost, "/load", headers, bytes.NewReader(config))
if err != nil {
return caddy.ExitCodeFailedStartup, fmt.Errorf("sending configuration to instance: %v", err)
}
defer resp.Body.Close()
return caddy.ExitCodeSuccess, nil
}
func cmdVersion(_ Flags) (int, error) {
fmt.Println(CaddyVersion())
return caddy.ExitCodeSuccess, nil
}
func cmdBuildInfo(fl Flags) (int, error) {
bi, ok := debug.ReadBuildInfo()
if !ok {
return caddy.ExitCodeFailedStartup, fmt.Errorf("no build information")
}
fmt.Printf("go_version: %s\n", runtime.Version())
fmt.Printf("go_os: %s\n", runtime.GOOS)
fmt.Printf("go_arch: %s\n", runtime.GOARCH)
fmt.Printf("path: %s\n", bi.Path)
fmt.Printf("main: %s %s %s\n", bi.Main.Path, bi.Main.Version, bi.Main.Sum)
fmt.Println("dependencies:")
for _, goMod := range bi.Deps {
fmt.Printf("%s %s %s", goMod.Path, goMod.Version, goMod.Sum)
if goMod.Replace != nil {
fmt.Printf(" => %s %s %s", goMod.Replace.Path, goMod.Replace.Version, goMod.Replace.Sum)
}
fmt.Println()
}
return caddy.ExitCodeSuccess, nil
}
func cmdListModules(fl Flags) (int, error) {
packages := fl.Bool("packages")
versions := fl.Bool("versions")
skipStandard := fl.Bool("skip-standard")
printModuleInfo := func(mi moduleInfo) {
fmt.Print(mi.caddyModuleID)
if versions && mi.goModule != nil {
fmt.Print(" " + mi.goModule.Version)
}
if packages && mi.goModule != nil {
fmt.Print(" " + mi.goModule.Path)
if mi.goModule.Replace != nil {
fmt.Print(" => " + mi.goModule.Replace.Path)
}
}
if mi.err != nil {
fmt.Printf(" [%v]", mi.err)
}
fmt.Println()
}
// organize modules by whether they come with the standard distribution
standard, nonstandard, unknown, err := getModules()
if err != nil {
// oh well, just print the module IDs and exit
for _, m := range caddy.Modules() {
fmt.Println(m)
}
return caddy.ExitCodeSuccess, nil
}
// Standard modules (always shipped with Caddy)
if !skipStandard {
if len(standard) > 0 {
for _, mod := range standard {
printModuleInfo(mod)
}
}
fmt.Printf("\n Standard modules: %d\n", len(standard))
}
// Non-standard modules (third party plugins)
if len(nonstandard) > 0 {
if len(standard) > 0 && !skipStandard {
fmt.Println()
}
for _, mod := range nonstandard {
printModuleInfo(mod)
}
}
fmt.Printf("\n Non-standard modules: %d\n", len(nonstandard))
// Unknown modules (couldn't get Caddy module info)
if len(unknown) > 0 {
if (len(standard) > 0 && !skipStandard) || len(nonstandard) > 0 {
fmt.Println()
}
for _, mod := range unknown {
printModuleInfo(mod)
}
}
fmt.Printf("\n Unknown modules: %d\n", len(unknown))
return caddy.ExitCodeSuccess, nil
}
func cmdEnviron(_ Flags) (int, error) {
printEnvironment()
return caddy.ExitCodeSuccess, nil
}
func cmdAdaptConfig(fl Flags) (int, error) {
adaptCmdInputFlag := fl.String("config")
adaptCmdAdapterFlag := fl.String("adapter")
adaptCmdPrettyFlag := fl.Bool("pretty")
adaptCmdValidateFlag := fl.Bool("validate")
// if no input file was specified, try a default
// Caddyfile if the Caddyfile adapter is plugged in
if adaptCmdInputFlag == "" && caddyconfig.GetAdapter("caddyfile") != nil {
_, err := os.Stat("Caddyfile")
if err == nil {
// default Caddyfile exists
adaptCmdInputFlag = "Caddyfile"
caddy.Log().Info("using adjacent Caddyfile")
} else if !os.IsNotExist(err) {
// default Caddyfile exists, but error accessing it
return caddy.ExitCodeFailedStartup, fmt.Errorf("accessing default Caddyfile: %v", err)
}
}
if adaptCmdInputFlag == "" {
return caddy.ExitCodeFailedStartup,
fmt.Errorf("input file required when there is no Caddyfile in current directory (use --config flag)")
}
if adaptCmdAdapterFlag == "" {
return caddy.ExitCodeFailedStartup,
fmt.Errorf("adapter name is required (use --adapt flag or leave unspecified for default)")
}
cfgAdapter := caddyconfig.GetAdapter(adaptCmdAdapterFlag)
if cfgAdapter == nil {
return caddy.ExitCodeFailedStartup,
fmt.Errorf("unrecognized config adapter: %s", adaptCmdAdapterFlag)
}
input, err := os.ReadFile(adaptCmdInputFlag)
if err != nil {
return caddy.ExitCodeFailedStartup,
fmt.Errorf("reading input file: %v", err)
}
opts := map[string]interface{}{"filename": adaptCmdInputFlag}
adaptedConfig, warnings, err := cfgAdapter.Adapt(input, opts)
if err != nil {
return caddy.ExitCodeFailedStartup, err
}
if adaptCmdPrettyFlag {
var prettyBuf bytes.Buffer
err = json.Indent(&prettyBuf, adaptedConfig, "", "\t")
if err != nil {
return caddy.ExitCodeFailedStartup, err
}
adaptedConfig = prettyBuf.Bytes()
}
// print result to stdout
fmt.Println(string(adaptedConfig))
// print warnings to stderr
for _, warn := range warnings {
msg := warn.Message
if warn.Directive != "" {
msg = fmt.Sprintf("%s: %s", warn.Directive, warn.Message)
}
fmt.Fprintf(os.Stderr, "[WARNING][%s] %s:%d: %s\n", adaptCmdAdapterFlag, warn.File, warn.Line, msg)
}
// validate output if requested
if adaptCmdValidateFlag {
var cfg *caddy.Config
err = json.Unmarshal(adaptedConfig, &cfg)
if err != nil {
return caddy.ExitCodeFailedStartup, fmt.Errorf("decoding config: %v", err)
}
err = caddy.Validate(cfg)
if err != nil {
return caddy.ExitCodeFailedStartup, fmt.Errorf("validation: %v", err)
}
}
return caddy.ExitCodeSuccess, nil
}
func cmdValidateConfig(fl Flags) (int, error) {
validateCmdConfigFlag := fl.String("config")
validateCmdAdapterFlag := fl.String("adapter")
input, _, err := LoadConfig(validateCmdConfigFlag, validateCmdAdapterFlag)
if err != nil {
return caddy.ExitCodeFailedStartup, err
}
input = caddy.RemoveMetaFields(input)
var cfg *caddy.Config
err = json.Unmarshal(input, &cfg)
if err != nil {
return caddy.ExitCodeFailedStartup, fmt.Errorf("decoding config: %v", err)
}
err = caddy.Validate(cfg)
if err != nil {
return caddy.ExitCodeFailedStartup, err
}
fmt.Println("Valid configuration")
return caddy.ExitCodeSuccess, nil
}
func cmdFmt(fl Flags) (int, error) {
formatCmdConfigFile := fl.Arg(0)
if formatCmdConfigFile == "" {
formatCmdConfigFile = "Caddyfile"
}
// as a special case, read from stdin if the file name is "-"
if formatCmdConfigFile == "-" {
input, err := io.ReadAll(os.Stdin)
if err != nil {
return caddy.ExitCodeFailedStartup,
fmt.Errorf("reading stdin: %v", err)
}
fmt.Print(string(caddyfile.Format(input)))
return caddy.ExitCodeSuccess, nil
}
input, err := os.ReadFile(formatCmdConfigFile)
if err != nil {
return caddy.ExitCodeFailedStartup,
fmt.Errorf("reading input file: %v", err)
}
output := caddyfile.Format(input)
if fl.Bool("overwrite") {
if err := os.WriteFile(formatCmdConfigFile, output, 0600); err != nil {
return caddy.ExitCodeFailedStartup, fmt.Errorf("overwriting formatted file: %v", err)
}
} else {
fmt.Print(string(output))
}
return caddy.ExitCodeSuccess, nil
}
func cmdHelp(fl Flags) (int, error) {
const fullDocs = `Full documentation is available at:
https://caddyserver.com/docs/command-line`
args := fl.Args()
if len(args) == 0 {
s := `Caddy is an extensible server platform.
usage:
caddy <command> [<args...>]
commands:
`
keys := make([]string, 0, len(commands))
for k := range commands {
keys = append(keys, k)
}
sort.Strings(keys)
for _, k := range keys {
cmd := commands[k]
short := strings.TrimSuffix(cmd.Short, ".")
s += fmt.Sprintf(" %-15s %s\n", cmd.Name, short)
}
s += "\nUse 'caddy help <command>' for more information about a command.\n"
s += "\n" + fullDocs + "\n"
fmt.Print(s)
return caddy.ExitCodeSuccess, nil
} else if len(args) > 1 {
return caddy.ExitCodeFailedStartup, fmt.Errorf("can only give help with one command")
}
subcommand, ok := commands[args[0]]
if !ok {
return caddy.ExitCodeFailedStartup, fmt.Errorf("unknown command: %s", args[0])
}
helpText := strings.TrimSpace(subcommand.Long)
if helpText == "" {
helpText = subcommand.Short
if !strings.HasSuffix(helpText, ".") {
helpText += "."
}
}
result := fmt.Sprintf("%s\n\nusage:\n caddy %s %s\n",
helpText,
subcommand.Name,
strings.TrimSpace(subcommand.Usage),
)
if help := flagHelp(subcommand.Flags); help != "" {
result += fmt.Sprintf("\nflags:\n%s", help)
}
result += "\n" + fullDocs + "\n"
fmt.Print(result)
return caddy.ExitCodeSuccess, nil
}
// AdminAPIRequest makes an API request according to the CLI flags given,
// with the given HTTP method and request URI. If body is non-nil, it will
// be assumed to be Content-Type application/json. The caller should close
// the response body. Should only be used by Caddy CLI commands which
// need to interact with a running instance of Caddy via the admin API.
func AdminAPIRequest(adminAddr, method, uri string, headers http.Header, body io.Reader) (*http.Response, error) {
parsedAddr, err := caddy.ParseNetworkAddress(adminAddr)
if err != nil || parsedAddr.PortRangeSize() > 1 {
return nil, fmt.Errorf("invalid admin address %s: %v", adminAddr, err)
}
origin := "http://" + parsedAddr.JoinHostPort(0)
if parsedAddr.IsUnixNetwork() {
origin = "unixsocket" // hack so that http.NewRequest() is happy
}
// form the request
req, err := http.NewRequest(method, origin+uri, body)
if err != nil {
return nil, fmt.Errorf("making request: %v", err)
}
if parsedAddr.IsUnixNetwork() {
// When listening on a unix socket, the admin endpoint doesn't
// accept any Host header because there is no host:port for
// a unix socket's address. The server's host check is fairly
// strict for security reasons, so we don't allow just any
// Host header. For unix sockets, the Host header must be
// empty. Unfortunately, Go makes it impossible to make HTTP
// requests with an empty Host header... except with this one
// weird trick. (Hopefully they don't fix it. It's already
// hard enough to use HTTP over unix sockets.)
//
// An equivalent curl command would be something like:
// $ curl --unix-socket caddy.sock http:/:$REQUEST_URI
req.URL.Host = " "
req.Host = ""
} else {
req.Header.Set("Origin", origin)
}
if body != nil {
req.Header.Set("Content-Type", "application/json")
}
for k, v := range headers {
req.Header[k] = v
}
// make an HTTP client that dials our network type, since admin
// endpoints aren't always TCP, which is what the default transport
// expects; reuse is not of particular concern here
client := http.Client{
Transport: &http.Transport{
DialContext: func(_ context.Context, _, _ string) (net.Conn, error) {
return net.Dial(parsedAddr.Network, parsedAddr.JoinHostPort(0))
},
},
}
resp, err := client.Do(req)
if err != nil {
return nil, fmt.Errorf("performing request: %v", err)
}
// if it didn't work, let the user know
if resp.StatusCode >= 400 {
respBody, err := io.ReadAll(io.LimitReader(resp.Body, 1024*10))
if err != nil {
return nil, fmt.Errorf("HTTP %d: reading error message: %v", resp.StatusCode, err)
}
return nil, fmt.Errorf("caddy responded with error: HTTP %d: %s", resp.StatusCode, respBody)
}
return resp, nil
}
// DetermineAdminAPIAddress determines which admin API endpoint address should
// be used based on the inputs. By priority: if `address` is specified, then
// it is returned; if `configFile` (and `configAdapter`) are specified, then that
// config will be loaded to find the admin address; otherwise, the default
// admin listen address will be returned.
func DetermineAdminAPIAddress(address, configFile, configAdapter string) (string, error) {
// Prefer the address if specified and non-empty
if address != "" {
return address, nil
}
// Try to load the config from file if specified, with the given adapter name
if configFile != "" {
// get the config in caddy's native format
config, loadedConfigFile, err := LoadConfig(configFile, configAdapter)
if err != nil {
return "", err
}
if loadedConfigFile == "" {
return "", fmt.Errorf("no config file to load")
}
// get the address of the admin listener
if len(config) > 0 {
var tmpStruct struct {
Admin caddy.AdminConfig `json:"admin"`
}
err = json.Unmarshal(config, &tmpStruct)
if err != nil {
return "", fmt.Errorf("unmarshaling admin listener address from config: %v", err)
}
return tmpStruct.Admin.Listen, nil
}
}
// Fallback to the default listen address otherwise
return caddy.DefaultAdminListen, nil
}
type moduleInfo struct {
caddyModuleID string
goModule *debug.Module
err error
}
|
[
"\"XDG_DATA_HOME\"",
"\"XDG_CONFIG_HOME\"",
"\"XDG_CACHE_HOME\"",
"\"HOME\"",
"\"USERPROFILE\"",
"\"home\"",
"\"HOME\""
] |
[] |
[
"XDG_DATA_HOME",
"home",
"XDG_CACHE_HOME",
"USERPROFILE",
"HOME",
"XDG_CONFIG_HOME"
] |
[]
|
["XDG_DATA_HOME", "home", "XDG_CACHE_HOME", "USERPROFILE", "HOME", "XDG_CONFIG_HOME"]
|
go
| 6 | 0 | |
src/crypto/x509/verify.go
|
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package x509
import (
"bytes"
"errors"
"fmt"
"net"
"net/url"
"os"
"reflect"
"runtime"
"strings"
"time"
"unicode/utf8"
)
// ignoreCN disables interpreting Common Name as a hostname. See issue 24151.
var ignoreCN = strings.Contains(os.Getenv("GODEBUG"), "x509ignoreCN=1")
type InvalidReason int
const (
// NotAuthorizedToSign results when a certificate is signed by another
// which isn't marked as a CA certificate.
NotAuthorizedToSign InvalidReason = iota
// Expired results when a certificate has expired, based on the time
// given in the VerifyOptions.
Expired
// CANotAuthorizedForThisName results when an intermediate or root
// certificate has a name constraint which doesn't permit a DNS or
// other name (including IP address) in the leaf certificate.
CANotAuthorizedForThisName
// TooManyIntermediates results when a path length constraint is
// violated.
TooManyIntermediates
// IncompatibleUsage results when the certificate's key usage indicates
// that it may only be used for a different purpose.
IncompatibleUsage
// NameMismatch results when the subject name of a parent certificate
// does not match the issuer name in the child.
NameMismatch
// NameConstraintsWithoutSANs results when a leaf certificate doesn't
// contain a Subject Alternative Name extension, but a CA certificate
// contains name constraints, and the Common Name can be interpreted as
// a hostname.
//
// You can avoid this error by setting the experimental GODEBUG environment
// variable to "x509ignoreCN=1", disabling Common Name matching entirely.
// This behavior might become the default in the future.
NameConstraintsWithoutSANs
// UnconstrainedName results when a CA certificate contains permitted
// name constraints, but leaf certificate contains a name of an
// unsupported or unconstrained type.
UnconstrainedName
// TooManyConstraints results when the number of comparison operations
// needed to check a certificate exceeds the limit set by
// VerifyOptions.MaxConstraintComparisions. This limit exists to
// prevent pathological certificates can consuming excessive amounts of
// CPU time to verify.
TooManyConstraints
// CANotAuthorizedForExtKeyUsage results when an intermediate or root
// certificate does not permit a requested extended key usage.
CANotAuthorizedForExtKeyUsage
)
// CertificateInvalidError results when an odd error occurs. Users of this
// library probably want to handle all these errors uniformly.
type CertificateInvalidError struct {
Cert *Certificate
Reason InvalidReason
Detail string
}
func (e CertificateInvalidError) Error() string {
switch e.Reason {
case NotAuthorizedToSign:
return "x509: certificate is not authorized to sign other certificates"
case Expired:
return "x509: certificate has expired or is not yet valid"
case CANotAuthorizedForThisName:
return "x509: a root or intermediate certificate is not authorized to sign for this name: " + e.Detail
case CANotAuthorizedForExtKeyUsage:
return "x509: a root or intermediate certificate is not authorized for an extended key usage: " + e.Detail
case TooManyIntermediates:
return "x509: too many intermediates for path length constraint"
case IncompatibleUsage:
return "x509: certificate specifies an incompatible key usage"
case NameMismatch:
return "x509: issuer name does not match subject from issuing certificate"
case NameConstraintsWithoutSANs:
return "x509: issuer has name constraints but leaf doesn't have a SAN extension"
case UnconstrainedName:
return "x509: issuer has name constraints but leaf contains unknown or unconstrained name: " + e.Detail
}
return "x509: unknown error"
}
// HostnameError results when the set of authorized names doesn't match the
// requested name.
type HostnameError struct {
Certificate *Certificate
Host string
}
func (h HostnameError) Error() string {
c := h.Certificate
if !c.hasSANExtension() && !validHostname(c.Subject.CommonName) &&
matchHostnames(toLowerCaseASCII(c.Subject.CommonName), toLowerCaseASCII(h.Host)) {
// This would have validated, if it weren't for the validHostname check on Common Name.
return "x509: Common Name is not a valid hostname: " + c.Subject.CommonName
}
var valid string
if ip := net.ParseIP(h.Host); ip != nil {
// Trying to validate an IP
if len(c.IPAddresses) == 0 {
return "x509: cannot validate certificate for " + h.Host + " because it doesn't contain any IP SANs"
}
for _, san := range c.IPAddresses {
if len(valid) > 0 {
valid += ", "
}
valid += san.String()
}
} else {
if c.commonNameAsHostname() {
valid = c.Subject.CommonName
} else {
valid = strings.Join(c.DNSNames, ", ")
}
}
if len(valid) == 0 {
return "x509: certificate is not valid for any names, but wanted to match " + h.Host
}
return "x509: certificate is valid for " + valid + ", not " + h.Host
}
// UnknownAuthorityError results when the certificate issuer is unknown
type UnknownAuthorityError struct {
Cert *Certificate
// hintErr contains an error that may be helpful in determining why an
// authority wasn't found.
hintErr error
// hintCert contains a possible authority certificate that was rejected
// because of the error in hintErr.
hintCert *Certificate
}
func (e UnknownAuthorityError) Error() string {
s := "x509: certificate signed by unknown authority"
if e.hintErr != nil {
certName := e.hintCert.Subject.CommonName
if len(certName) == 0 {
if len(e.hintCert.Subject.Organization) > 0 {
certName = e.hintCert.Subject.Organization[0]
} else {
certName = "serial:" + e.hintCert.SerialNumber.String()
}
}
s += fmt.Sprintf(" (possibly because of %q while trying to verify candidate authority certificate %q)", e.hintErr, certName)
}
return s
}
// SystemRootsError results when we fail to load the system root certificates.
type SystemRootsError struct {
Err error
}
func (se SystemRootsError) Error() string {
msg := "x509: failed to load system roots and no roots provided"
if se.Err != nil {
return msg + "; " + se.Err.Error()
}
return msg
}
// errNotParsed is returned when a certificate without ASN.1 contents is
// verified. Platform-specific verification needs the ASN.1 contents.
var errNotParsed = errors.New("x509: missing ASN.1 contents; use ParseCertificate")
// VerifyOptions contains parameters for Certificate.Verify. It's a structure
// because other PKIX verification APIs have ended up needing many options.
type VerifyOptions struct {
DNSName string
Intermediates *CertPool
Roots *CertPool // if nil, the system roots are used
CurrentTime time.Time // if zero, the current time is used
// KeyUsage specifies which Extended Key Usage values are acceptable. A leaf
// certificate is accepted if it contains any of the listed values. An empty
// list means ExtKeyUsageServerAuth. To accept any key usage, include
// ExtKeyUsageAny.
//
// Certificate chains are required to nest these extended key usage values.
// (This matches the Windows CryptoAPI behavior, but not the spec.)
KeyUsages []ExtKeyUsage
// MaxConstraintComparisions is the maximum number of comparisons to
// perform when checking a given certificate's name constraints. If
// zero, a sensible default is used. This limit prevents pathological
// certificates from consuming excessive amounts of CPU time when
// validating.
MaxConstraintComparisions int
}
const (
leafCertificate = iota
intermediateCertificate
rootCertificate
)
// rfc2821Mailbox represents a “mailbox” (which is an email address to most
// people) by breaking it into the “local” (i.e. before the '@') and “domain”
// parts.
type rfc2821Mailbox struct {
local, domain string
}
// parseRFC2821Mailbox parses an email address into local and domain parts,
// based on the ABNF for a “Mailbox” from RFC 2821. According to
// https://tools.ietf.org/html/rfc5280#section-4.2.1.6 that's correct for an
// rfc822Name from a certificate: “The format of an rfc822Name is a "Mailbox"
// as defined in https://tools.ietf.org/html/rfc2821#section-4.1.2”.
func parseRFC2821Mailbox(in string) (mailbox rfc2821Mailbox, ok bool) {
if len(in) == 0 {
return mailbox, false
}
localPartBytes := make([]byte, 0, len(in)/2)
if in[0] == '"' {
// Quoted-string = DQUOTE *qcontent DQUOTE
// non-whitespace-control = %d1-8 / %d11 / %d12 / %d14-31 / %d127
// qcontent = qtext / quoted-pair
// qtext = non-whitespace-control /
// %d33 / %d35-91 / %d93-126
// quoted-pair = ("\" text) / obs-qp
// text = %d1-9 / %d11 / %d12 / %d14-127 / obs-text
//
// (Names beginning with “obs-” are the obsolete syntax from
// https://tools.ietf.org/html/rfc2822#section-4. Since it has
// been 16 years, we no longer accept that.)
in = in[1:]
QuotedString:
for {
if len(in) == 0 {
return mailbox, false
}
c := in[0]
in = in[1:]
switch {
case c == '"':
break QuotedString
case c == '\\':
// quoted-pair
if len(in) == 0 {
return mailbox, false
}
if in[0] == 11 ||
in[0] == 12 ||
(1 <= in[0] && in[0] <= 9) ||
(14 <= in[0] && in[0] <= 127) {
localPartBytes = append(localPartBytes, in[0])
in = in[1:]
} else {
return mailbox, false
}
case c == 11 ||
c == 12 ||
// Space (char 32) is not allowed based on the
// BNF, but RFC 3696 gives an example that
// assumes that it is. Several “verified”
// errata continue to argue about this point.
// We choose to accept it.
c == 32 ||
c == 33 ||
c == 127 ||
(1 <= c && c <= 8) ||
(14 <= c && c <= 31) ||
(35 <= c && c <= 91) ||
(93 <= c && c <= 126):
// qtext
localPartBytes = append(localPartBytes, c)
default:
return mailbox, false
}
}
} else {
// Atom ("." Atom)*
NextChar:
for len(in) > 0 {
// atext from https://tools.ietf.org/html/rfc2822#section-3.2.4
c := in[0]
switch {
case c == '\\':
// Examples given in RFC 3696 suggest that
// escaped characters can appear outside of a
// quoted string. Several “verified” errata
// continue to argue the point. We choose to
// accept it.
in = in[1:]
if len(in) == 0 {
return mailbox, false
}
fallthrough
case ('0' <= c && c <= '9') ||
('a' <= c && c <= 'z') ||
('A' <= c && c <= 'Z') ||
c == '!' || c == '#' || c == '$' || c == '%' ||
c == '&' || c == '\'' || c == '*' || c == '+' ||
c == '-' || c == '/' || c == '=' || c == '?' ||
c == '^' || c == '_' || c == '`' || c == '{' ||
c == '|' || c == '}' || c == '~' || c == '.':
localPartBytes = append(localPartBytes, in[0])
in = in[1:]
default:
break NextChar
}
}
if len(localPartBytes) == 0 {
return mailbox, false
}
// https://tools.ietf.org/html/rfc3696#section-3
// “period (".") may also appear, but may not be used to start
// or end the local part, nor may two or more consecutive
// periods appear.”
twoDots := []byte{'.', '.'}
if localPartBytes[0] == '.' ||
localPartBytes[len(localPartBytes)-1] == '.' ||
bytes.Contains(localPartBytes, twoDots) {
return mailbox, false
}
}
if len(in) == 0 || in[0] != '@' {
return mailbox, false
}
in = in[1:]
// The RFC species a format for domains, but that's known to be
// violated in practice so we accept that anything after an '@' is the
// domain part.
if _, ok := domainToReverseLabels(in); !ok {
return mailbox, false
}
mailbox.local = string(localPartBytes)
mailbox.domain = in
return mailbox, true
}
// domainToReverseLabels converts a textual domain name like foo.example.com to
// the list of labels in reverse order, e.g. ["com", "example", "foo"].
func domainToReverseLabels(domain string) (reverseLabels []string, ok bool) {
for len(domain) > 0 {
if i := strings.LastIndexByte(domain, '.'); i == -1 {
reverseLabels = append(reverseLabels, domain)
domain = ""
} else {
reverseLabels = append(reverseLabels, domain[i+1:len(domain)])
domain = domain[:i]
}
}
if len(reverseLabels) > 0 && len(reverseLabels[0]) == 0 {
// An empty label at the end indicates an absolute value.
return nil, false
}
for _, label := range reverseLabels {
if len(label) == 0 {
// Empty labels are otherwise invalid.
return nil, false
}
for _, c := range label {
if c < 33 || c > 126 {
// Invalid character.
return nil, false
}
}
}
return reverseLabels, true
}
func matchEmailConstraint(mailbox rfc2821Mailbox, constraint string) (bool, error) {
// If the constraint contains an @, then it specifies an exact mailbox
// name.
if strings.Contains(constraint, "@") {
constraintMailbox, ok := parseRFC2821Mailbox(constraint)
if !ok {
return false, fmt.Errorf("x509: internal error: cannot parse constraint %q", constraint)
}
return mailbox.local == constraintMailbox.local && strings.EqualFold(mailbox.domain, constraintMailbox.domain), nil
}
// Otherwise the constraint is like a DNS constraint of the domain part
// of the mailbox.
return matchDomainConstraint(mailbox.domain, constraint)
}
func matchURIConstraint(uri *url.URL, constraint string) (bool, error) {
// https://tools.ietf.org/html/rfc5280#section-4.2.1.10
// “a uniformResourceIdentifier that does not include an authority
// component with a host name specified as a fully qualified domain
// name (e.g., if the URI either does not include an authority
// component or includes an authority component in which the host name
// is specified as an IP address), then the application MUST reject the
// certificate.”
host := uri.Host
if len(host) == 0 {
return false, fmt.Errorf("URI with empty host (%q) cannot be matched against constraints", uri.String())
}
if strings.Contains(host, ":") && !strings.HasSuffix(host, "]") {
var err error
host, _, err = net.SplitHostPort(uri.Host)
if err != nil {
return false, err
}
}
if strings.HasPrefix(host, "[") && strings.HasSuffix(host, "]") ||
net.ParseIP(host) != nil {
return false, fmt.Errorf("URI with IP (%q) cannot be matched against constraints", uri.String())
}
return matchDomainConstraint(host, constraint)
}
func matchIPConstraint(ip net.IP, constraint *net.IPNet) (bool, error) {
if len(ip) != len(constraint.IP) {
return false, nil
}
for i := range ip {
if mask := constraint.Mask[i]; ip[i]&mask != constraint.IP[i]&mask {
return false, nil
}
}
return true, nil
}
func matchDomainConstraint(domain, constraint string) (bool, error) {
// The meaning of zero length constraints is not specified, but this
// code follows NSS and accepts them as matching everything.
if len(constraint) == 0 {
return true, nil
}
domainLabels, ok := domainToReverseLabels(domain)
if !ok {
return false, fmt.Errorf("x509: internal error: cannot parse domain %q", domain)
}
// RFC 5280 says that a leading period in a domain name means that at
// least one label must be prepended, but only for URI and email
// constraints, not DNS constraints. The code also supports that
// behaviour for DNS constraints.
mustHaveSubdomains := false
if constraint[0] == '.' {
mustHaveSubdomains = true
constraint = constraint[1:]
}
constraintLabels, ok := domainToReverseLabels(constraint)
if !ok {
return false, fmt.Errorf("x509: internal error: cannot parse domain %q", constraint)
}
if len(domainLabels) < len(constraintLabels) ||
(mustHaveSubdomains && len(domainLabels) == len(constraintLabels)) {
return false, nil
}
for i, constraintLabel := range constraintLabels {
if !strings.EqualFold(constraintLabel, domainLabels[i]) {
return false, nil
}
}
return true, nil
}
// checkNameConstraints checks that c permits a child certificate to claim the
// given name, of type nameType. The argument parsedName contains the parsed
// form of name, suitable for passing to the match function. The total number
// of comparisons is tracked in the given count and should not exceed the given
// limit.
func (c *Certificate) checkNameConstraints(count *int,
maxConstraintComparisons int,
nameType string,
name string,
parsedName interface{},
match func(parsedName, constraint interface{}) (match bool, err error),
permitted, excluded interface{}) error {
excludedValue := reflect.ValueOf(excluded)
*count += excludedValue.Len()
if *count > maxConstraintComparisons {
return CertificateInvalidError{c, TooManyConstraints, ""}
}
for i := 0; i < excludedValue.Len(); i++ {
constraint := excludedValue.Index(i).Interface()
match, err := match(parsedName, constraint)
if err != nil {
return CertificateInvalidError{c, CANotAuthorizedForThisName, err.Error()}
}
if match {
return CertificateInvalidError{c, CANotAuthorizedForThisName, fmt.Sprintf("%s %q is excluded by constraint %q", nameType, name, constraint)}
}
}
permittedValue := reflect.ValueOf(permitted)
*count += permittedValue.Len()
if *count > maxConstraintComparisons {
return CertificateInvalidError{c, TooManyConstraints, ""}
}
ok := true
for i := 0; i < permittedValue.Len(); i++ {
constraint := permittedValue.Index(i).Interface()
var err error
if ok, err = match(parsedName, constraint); err != nil {
return CertificateInvalidError{c, CANotAuthorizedForThisName, err.Error()}
}
if ok {
break
}
}
if !ok {
return CertificateInvalidError{c, CANotAuthorizedForThisName, fmt.Sprintf("%s %q is not permitted by any constraint", nameType, name)}
}
return nil
}
// isValid performs validity checks on c given that it is a candidate to append
// to the chain in currentChain.
func (c *Certificate) isValid(certType int, currentChain []*Certificate, opts *VerifyOptions) error {
if len(c.UnhandledCriticalExtensions) > 0 {
return UnhandledCriticalExtension{}
}
if len(currentChain) > 0 {
child := currentChain[len(currentChain)-1]
if !bytes.Equal(child.RawIssuer, c.RawSubject) {
return CertificateInvalidError{c, NameMismatch, ""}
}
}
now := opts.CurrentTime
if now.IsZero() {
now = time.Now()
}
if now.Before(c.NotBefore) || now.After(c.NotAfter) {
return CertificateInvalidError{c, Expired, ""}
}
maxConstraintComparisons := opts.MaxConstraintComparisions
if maxConstraintComparisons == 0 {
maxConstraintComparisons = 250000
}
comparisonCount := 0
var leaf *Certificate
if certType == intermediateCertificate || certType == rootCertificate {
if len(currentChain) == 0 {
return errors.New("x509: internal error: empty chain when appending CA cert")
}
leaf = currentChain[0]
}
checkNameConstraints := (certType == intermediateCertificate || certType == rootCertificate) && c.hasNameConstraints()
if checkNameConstraints && leaf.commonNameAsHostname() {
// This is the deprecated, legacy case of depending on the commonName as
// a hostname. We don't enforce name constraints against the CN, but
// VerifyHostname will look for hostnames in there if there are no SANs.
// In order to ensure VerifyHostname will not accept an unchecked name,
// return an error here.
return CertificateInvalidError{c, NameConstraintsWithoutSANs, ""}
} else if checkNameConstraints && leaf.hasSANExtension() {
err := forEachSAN(leaf.getSANExtension(), func(tag int, data []byte) error {
switch tag {
case nameTypeEmail:
name := string(data)
mailbox, ok := parseRFC2821Mailbox(name)
if !ok {
return fmt.Errorf("x509: cannot parse rfc822Name %q", mailbox)
}
if err := c.checkNameConstraints(&comparisonCount, maxConstraintComparisons, "email address", name, mailbox,
func(parsedName, constraint interface{}) (bool, error) {
return matchEmailConstraint(parsedName.(rfc2821Mailbox), constraint.(string))
}, c.PermittedEmailAddresses, c.ExcludedEmailAddresses); err != nil {
return err
}
case nameTypeDNS:
name := string(data)
if _, ok := domainToReverseLabels(name); !ok {
return fmt.Errorf("x509: cannot parse dnsName %q", name)
}
if err := c.checkNameConstraints(&comparisonCount, maxConstraintComparisons, "DNS name", name, name,
func(parsedName, constraint interface{}) (bool, error) {
return matchDomainConstraint(parsedName.(string), constraint.(string))
}, c.PermittedDNSDomains, c.ExcludedDNSDomains); err != nil {
return err
}
case nameTypeURI:
name := string(data)
uri, err := url.Parse(name)
if err != nil {
return fmt.Errorf("x509: internal error: URI SAN %q failed to parse", name)
}
if err := c.checkNameConstraints(&comparisonCount, maxConstraintComparisons, "URI", name, uri,
func(parsedName, constraint interface{}) (bool, error) {
return matchURIConstraint(parsedName.(*url.URL), constraint.(string))
}, c.PermittedURIDomains, c.ExcludedURIDomains); err != nil {
return err
}
case nameTypeIP:
ip := net.IP(data)
if l := len(ip); l != net.IPv4len && l != net.IPv6len {
return fmt.Errorf("x509: internal error: IP SAN %x failed to parse", data)
}
if err := c.checkNameConstraints(&comparisonCount, maxConstraintComparisons, "IP address", ip.String(), ip,
func(parsedName, constraint interface{}) (bool, error) {
return matchIPConstraint(parsedName.(net.IP), constraint.(*net.IPNet))
}, c.PermittedIPRanges, c.ExcludedIPRanges); err != nil {
return err
}
default:
// Unknown SAN types are ignored.
}
return nil
})
if err != nil {
return err
}
}
// KeyUsage status flags are ignored. From Engineering Security, Peter
// Gutmann: A European government CA marked its signing certificates as
// being valid for encryption only, but no-one noticed. Another
// European CA marked its signature keys as not being valid for
// signatures. A different CA marked its own trusted root certificate
// as being invalid for certificate signing. Another national CA
// distributed a certificate to be used to encrypt data for the
// country’s tax authority that was marked as only being usable for
// digital signatures but not for encryption. Yet another CA reversed
// the order of the bit flags in the keyUsage due to confusion over
// encoding endianness, essentially setting a random keyUsage in
// certificates that it issued. Another CA created a self-invalidating
// certificate by adding a certificate policy statement stipulating
// that the certificate had to be used strictly as specified in the
// keyUsage, and a keyUsage containing a flag indicating that the RSA
// encryption key could only be used for Diffie-Hellman key agreement.
if certType == intermediateCertificate && (!c.BasicConstraintsValid || !c.IsCA) {
return CertificateInvalidError{c, NotAuthorizedToSign, ""}
}
if c.BasicConstraintsValid && c.MaxPathLen >= 0 {
numIntermediates := len(currentChain) - 1
if numIntermediates > c.MaxPathLen {
return CertificateInvalidError{c, TooManyIntermediates, ""}
}
}
return nil
}
// Verify attempts to verify c by building one or more chains from c to a
// certificate in opts.Roots, using certificates in opts.Intermediates if
// needed. If successful, it returns one or more chains where the first
// element of the chain is c and the last element is from opts.Roots.
//
// If opts.Roots is nil and system roots are unavailable the returned error
// will be of type SystemRootsError.
//
// Name constraints in the intermediates will be applied to all names claimed
// in the chain, not just opts.DNSName. Thus it is invalid for a leaf to claim
// example.com if an intermediate doesn't permit it, even if example.com is not
// the name being validated. Note that DirectoryName constraints are not
// supported.
//
// Extended Key Usage values are enforced down a chain, so an intermediate or
// root that enumerates EKUs prevents a leaf from asserting an EKU not in that
// list.
//
// WARNING: this function doesn't do any revocation checking.
func (c *Certificate) Verify(opts VerifyOptions) (chains [][]*Certificate, err error) {
// Platform-specific verification needs the ASN.1 contents so
// this makes the behavior consistent across platforms.
if len(c.Raw) == 0 {
return nil, errNotParsed
}
if opts.Intermediates != nil {
for _, intermediate := range opts.Intermediates.certs {
if len(intermediate.Raw) == 0 {
return nil, errNotParsed
}
}
}
// Use Windows's own verification and chain building.
if opts.Roots == nil && runtime.GOOS == "windows" {
return c.systemVerify(&opts)
}
if opts.Roots == nil {
opts.Roots = systemRootsPool()
if opts.Roots == nil {
return nil, SystemRootsError{systemRootsErr}
}
}
err = c.isValid(leafCertificate, nil, &opts)
if err != nil {
return
}
if len(opts.DNSName) > 0 {
err = c.VerifyHostname(opts.DNSName)
if err != nil {
return
}
}
var candidateChains [][]*Certificate
if opts.Roots.contains(c) {
candidateChains = append(candidateChains, []*Certificate{c})
} else {
if candidateChains, err = c.buildChains(make(map[int][][]*Certificate), []*Certificate{c}, &opts); err != nil {
return nil, err
}
}
keyUsages := opts.KeyUsages
if len(keyUsages) == 0 {
keyUsages = []ExtKeyUsage{ExtKeyUsageServerAuth}
}
// If any key usage is acceptable then we're done.
for _, usage := range keyUsages {
if usage == ExtKeyUsageAny {
return candidateChains, nil
}
}
for _, candidate := range candidateChains {
if checkChainForKeyUsage(candidate, keyUsages) {
chains = append(chains, candidate)
}
}
if len(chains) == 0 {
return nil, CertificateInvalidError{c, IncompatibleUsage, ""}
}
return chains, nil
}
func appendToFreshChain(chain []*Certificate, cert *Certificate) []*Certificate {
n := make([]*Certificate, len(chain)+1)
copy(n, chain)
n[len(chain)] = cert
return n
}
func (c *Certificate) buildChains(cache map[int][][]*Certificate, currentChain []*Certificate, opts *VerifyOptions) (chains [][]*Certificate, err error) {
possibleRoots, failedRoot, rootErr := opts.Roots.findVerifiedParents(c)
nextRoot:
for _, rootNum := range possibleRoots {
root := opts.Roots.certs[rootNum]
for _, cert := range currentChain {
if cert.Equal(root) {
continue nextRoot
}
}
err = root.isValid(rootCertificate, currentChain, opts)
if err != nil {
continue
}
chains = append(chains, appendToFreshChain(currentChain, root))
}
possibleIntermediates, failedIntermediate, intermediateErr := opts.Intermediates.findVerifiedParents(c)
nextIntermediate:
for _, intermediateNum := range possibleIntermediates {
intermediate := opts.Intermediates.certs[intermediateNum]
for _, cert := range currentChain {
if cert.Equal(intermediate) {
continue nextIntermediate
}
}
err = intermediate.isValid(intermediateCertificate, currentChain, opts)
if err != nil {
continue
}
var childChains [][]*Certificate
childChains, ok := cache[intermediateNum]
if !ok {
childChains, err = intermediate.buildChains(cache, appendToFreshChain(currentChain, intermediate), opts)
cache[intermediateNum] = childChains
}
chains = append(chains, childChains...)
}
if len(chains) > 0 {
err = nil
}
if len(chains) == 0 && err == nil {
hintErr := rootErr
hintCert := failedRoot
if hintErr == nil {
hintErr = intermediateErr
hintCert = failedIntermediate
}
err = UnknownAuthorityError{c, hintErr, hintCert}
}
return
}
// validHostname returns whether host is a valid hostname that can be matched or
// matched against according to RFC 6125 2.2, with some leniency to accommodate
// legacy values.
func validHostname(host string) bool {
host = strings.TrimSuffix(host, ".")
if len(host) == 0 {
return false
}
for i, part := range strings.Split(host, ".") {
if part == "" {
// Empty label.
return false
}
if i == 0 && part == "*" {
// Only allow full left-most wildcards, as those are the only ones
// we match, and matching literal '*' characters is probably never
// the expected behavior.
continue
}
for j, c := range part {
if 'a' <= c && c <= 'z' {
continue
}
if '0' <= c && c <= '9' {
continue
}
if 'A' <= c && c <= 'Z' {
continue
}
if c == '-' && j != 0 {
continue
}
if c == '_' || c == ':' {
// Not valid characters in hostnames, but commonly
// found in deployments outside the WebPKI.
continue
}
return false
}
}
return true
}
// commonNameAsHostname reports whether the Common Name field should be
// considered the hostname that the certificate is valid for. This is a legacy
// behavior, disabled if the Subject Alt Name extension is present.
//
// It applies the strict validHostname check to the Common Name field, so that
// certificates without SANs can still be validated against CAs with name
// constraints if there is no risk the CN would be matched as a hostname.
// See NameConstraintsWithoutSANs and issue 24151.
func (c *Certificate) commonNameAsHostname() bool {
return !ignoreCN && !c.hasSANExtension() && validHostname(c.Subject.CommonName)
}
func matchHostnames(pattern, host string) bool {
host = strings.TrimSuffix(host, ".")
pattern = strings.TrimSuffix(pattern, ".")
if len(pattern) == 0 || len(host) == 0 {
return false
}
patternParts := strings.Split(pattern, ".")
hostParts := strings.Split(host, ".")
if len(patternParts) != len(hostParts) {
return false
}
for i, patternPart := range patternParts {
if i == 0 && patternPart == "*" {
continue
}
if patternPart != hostParts[i] {
return false
}
}
return true
}
// toLowerCaseASCII returns a lower-case version of in. See RFC 6125 6.4.1. We use
// an explicitly ASCII function to avoid any sharp corners resulting from
// performing Unicode operations on DNS labels.
func toLowerCaseASCII(in string) string {
// If the string is already lower-case then there's nothing to do.
isAlreadyLowerCase := true
for _, c := range in {
if c == utf8.RuneError {
// If we get a UTF-8 error then there might be
// upper-case ASCII bytes in the invalid sequence.
isAlreadyLowerCase = false
break
}
if 'A' <= c && c <= 'Z' {
isAlreadyLowerCase = false
break
}
}
if isAlreadyLowerCase {
return in
}
out := []byte(in)
for i, c := range out {
if 'A' <= c && c <= 'Z' {
out[i] += 'a' - 'A'
}
}
return string(out)
}
// VerifyHostname returns nil if c is a valid certificate for the named host.
// Otherwise it returns an error describing the mismatch.
func (c *Certificate) VerifyHostname(h string) error {
// IP addresses may be written in [ ].
candidateIP := h
if len(h) >= 3 && h[0] == '[' && h[len(h)-1] == ']' {
candidateIP = h[1 : len(h)-1]
}
if ip := net.ParseIP(candidateIP); ip != nil {
// We only match IP addresses against IP SANs.
// https://tools.ietf.org/html/rfc6125#appendix-B.2
for _, candidate := range c.IPAddresses {
if ip.Equal(candidate) {
return nil
}
}
return HostnameError{c, candidateIP}
}
lowered := toLowerCaseASCII(h)
if c.commonNameAsHostname() {
if matchHostnames(toLowerCaseASCII(c.Subject.CommonName), lowered) {
return nil
}
} else {
for _, match := range c.DNSNames {
if matchHostnames(toLowerCaseASCII(match), lowered) {
return nil
}
}
}
return HostnameError{c, h}
}
func checkChainForKeyUsage(chain []*Certificate, keyUsages []ExtKeyUsage) bool {
usages := make([]ExtKeyUsage, len(keyUsages))
copy(usages, keyUsages)
if len(chain) == 0 {
return false
}
usagesRemaining := len(usages)
// We walk down the list and cross out any usages that aren't supported
// by each certificate. If we cross out all the usages, then the chain
// is unacceptable.
NextCert:
for i := len(chain) - 1; i >= 0; i-- {
cert := chain[i]
if len(cert.ExtKeyUsage) == 0 && len(cert.UnknownExtKeyUsage) == 0 {
// The certificate doesn't have any extended key usage specified.
continue
}
for _, usage := range cert.ExtKeyUsage {
if usage == ExtKeyUsageAny {
// The certificate is explicitly good for any usage.
continue NextCert
}
}
const invalidUsage ExtKeyUsage = -1
NextRequestedUsage:
for i, requestedUsage := range usages {
if requestedUsage == invalidUsage {
continue
}
for _, usage := range cert.ExtKeyUsage {
if requestedUsage == usage {
continue NextRequestedUsage
} else if requestedUsage == ExtKeyUsageServerAuth &&
(usage == ExtKeyUsageNetscapeServerGatedCrypto ||
usage == ExtKeyUsageMicrosoftServerGatedCrypto) {
// In order to support COMODO
// certificate chains, we have to
// accept Netscape or Microsoft SGC
// usages as equal to ServerAuth.
continue NextRequestedUsage
}
}
usages[i] = invalidUsage
usagesRemaining--
if usagesRemaining == 0 {
return false
}
}
}
return true
}
|
[
"\"GODEBUG\""
] |
[] |
[
"GODEBUG"
] |
[]
|
["GODEBUG"]
|
go
| 1 | 0 | |
src/main/java/com/june/util/SystemInfo.java
|
package com.june.util;
import java.net.InetAddress;
import java.net.UnknownHostException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import org.apache.log4j.Logger;
import org.hyperic.sigar.CpuPerc;
import org.hyperic.sigar.FileSystem;
import org.hyperic.sigar.FileSystemUsage;
import org.hyperic.sigar.Mem;
import org.hyperic.sigar.NetInterfaceConfig;
import org.hyperic.sigar.NetInterfaceStat;
import org.hyperic.sigar.Sigar;
import org.hyperic.sigar.Swap;
import com.june.entity.ServerInfoFormMap;
/**
*
* 系统信息工具类 <br>
* 使用 的jar包获取相关系统信息
*
* @author 王俊伟 [email protected]
* @date 2016年9月19日 下午5:54:16
*/
public class SystemInfo {
private static final Logger logger = Logger.getLogger(SystemInfo.class);
/**
* 获取系统基本信息,如ip地址、系统名称、架构、版本、临时路径等信息
*
* @return
* @date 2016年9月19日 下午5:55:15
* @writer iscas
*/
public static ServerInfoFormMap SystemProperty() {
ServerInfoFormMap monitorMap = new ServerInfoFormMap();
Runtime r = Runtime.getRuntime();
Properties props = System.getProperties();
Map<String, String> map = System.getenv();//
InetAddress addr = null;
String ip = "";
String hostName = "";
try {
addr = InetAddress.getLocalHost();
} catch (UnknownHostException e) {
ip = MessageUtil.resource("info_ip_no_addr");
hostName = MessageUtil.resource("info_ip_no_name");
}
if (null != addr) {
try {
ip = addr.getHostAddress();
} catch (Exception e) {
ip = MessageUtil.resource("info_ip_no_addr");
}
try {
hostName = addr.getHostName();
} catch (Exception e) {
hostName = MessageUtil.resource("info_ip_no_name");
}
}
String userName = map.get("USERNAME");// 获取用户名
String computerName = map.get("COMPUTERNAME");// 获取计算机名
String userDomain = map.get("USERDOMAIN");// 获取计算机域名
monitorMap.put("userName", userName);// 用户名
monitorMap.put("computerName", computerName);// 计算机名
monitorMap.put("userDomain", userDomain);// 计算机域名
monitorMap.put("hostIp", ip);// 本地ip地址
monitorMap.put("hostName", hostName);// 本地主机名
monitorMap.put("osName", props.getProperty("os.name"));// 操作系统的名称
monitorMap.put("arch", props.getProperty("os.arch"));// 操作系统的构架
monitorMap.put("osVersion", props.getProperty("os.version"));// 操作系统的版本
monitorMap.put("processors", r.availableProcessors());// JVM可以使用的处理器个数
monitorMap.put("javaVersion", props.getProperty("java.version"));// Java的运行环境版本
monitorMap.put("vendor", props.getProperty("java.vendor"));// Java的运行环境供应商
monitorMap.put("javaUrl", props.getProperty("java.vendor.url"));// Java供应商的URL
monitorMap.put("javaHome", props.getProperty("java.home"));// Java的安装路径
monitorMap.put("tmpdir", props.getProperty("java.io.tmpdir"));// 默认的临时文件路径
monitorMap.put("totalMemory", r.totalMemory());// JVM可以使用的总内存
monitorMap.put("freeMemory", r.freeMemory());// JVM可以使用的剩余内存
monitorMap.put("fileSep", props.getProperty("file.separator"));// 文件分隔符
monitorMap.put("pahtSep", props.getProperty("path.separator"));// 路径分隔符
monitorMap.put("lineSep", props.getProperty("line.separator"));// 行分隔符
monitorMap.put("userName", props.getProperty("user.name"));// 用户的账户名称
monitorMap.put("userHome", props.getProperty("user.home"));// 用户的主目录
monitorMap.put("userDir", props.getProperty("user.dir"));// 用户的当前工作目录
return monitorMap;
}
/**
* 获取系统内存相关信息
*
* @param sigar
* @return
* @date 2016年9月19日 下午5:56:42
* @writer iscas
*/
public static ServerInfoFormMap memory(Sigar sigar) {
ServerInfoFormMap monitorMap = new ServerInfoFormMap();
try {
Runtime r = Runtime.getRuntime();
monitorMap.put("jvmTotal", Common.div(r.totalMemory(), (1024 * 1024), 2) + "M");// java总内存
monitorMap.put("jvmUse", Common.div(r.totalMemory() - r.freeMemory(), (1024 * 1024), 2) + "M");// JVM使用内存
monitorMap.put("jvmFree", Common.div(r.freeMemory(), (1024 * 1024), 2) + "M");// JVM剩余内存
monitorMap.put("jvmUsage", Common.div(r.totalMemory() - r.freeMemory(), r.totalMemory(), 2));// JVM使用率
Mem mem = sigar.getMem();
// 内存总量
monitorMap.put("ramTotal", Common.div(mem.getTotal(), (1024 * 1024 * 1024), 2) + "G");// 内存总量
monitorMap.put("ramUse", Common.div(mem.getUsed(), (1024 * 1024 * 1024), 2) + "G");// 当前内存使用量
monitorMap.put("ramFree", Common.div(mem.getFree(), (1024 * 1024 * 1024), 2) + "G");// 当前内存剩余量
monitorMap.put("ramUsage", Common.div(mem.getUsed(), mem.getTotal(), 2));// 内存使用率
Swap swap = sigar.getSwap();
// 交换区总量
monitorMap.put("swapTotal", Common.div(swap.getTotal(), (1024 * 1024 * 1024), 2) + "G");
// 当前交换区使用量
monitorMap.put("swapUse", Common.div(swap.getUsed(), (1024 * 1024 * 1024), 2) + "G");
// 当前交换区剩余量
monitorMap.put("swapFree", Common.div(swap.getFree(), (1024 * 1024 * 1024), 2) + "G");
monitorMap.put("swapUsage", Common.div(swap.getUsed(), swap.getTotal(), 2));//
} catch (Exception e) {
}
return monitorMap;
}
/**
* 获取系统jvm、内存、cpu的使用率信息
*
* @param sigar
* @return
* @date 2016年9月19日 下午5:57:12
* @writer iscas
*/
public static ServerInfoFormMap usage(Sigar sigar) {
ServerInfoFormMap monitorMap = new ServerInfoFormMap();
try {
Runtime r = Runtime.getRuntime();
monitorMap.put("jvmUsage",
Math.round(Common.div(r.totalMemory() - r.freeMemory(), r.totalMemory(), 2) * 100));// JVM使用率
Mem mem = sigar.getMem();
// 内存总量
monitorMap.put("ramUsage", Math.round(Common.div(mem.getUsed(), mem.getTotal(), 2) * 100));// 内存使用率
List<ServerInfoFormMap> cpu = cpuInfos(sigar);
double b = 0.0;
for (ServerInfoFormMap m : cpu) {
b += Double.valueOf(m.get("cpuTotal") + "");
}
monitorMap.put("cpuUsage", Math.round(b / cpu.size()));// cpu使用率
} catch (Exception e) {
}
return monitorMap;
}
/**
* 获取系统cup的相关信息
*
* @param sigar
* @return
* @date 2016年9月19日 下午5:57:43
* @writer iscas
*/
public static List<ServerInfoFormMap> cpuInfos(Sigar sigar) {
List<ServerInfoFormMap> monitorMaps = new ArrayList<ServerInfoFormMap>();
try {
CpuPerc cpuList[] = sigar.getCpuPercList();
for (CpuPerc cpuPerc : cpuList) {
ServerInfoFormMap monitorMap = new ServerInfoFormMap();
monitorMap.put("cpuUserUse", Math.round(cpuPerc.getUser() * 100));// 用户使用率
monitorMap.put("cpuSysUse", Math.round(cpuPerc.getSys() * 100));// 系统使用率
monitorMap.put("cpuWait", Math.round(cpuPerc.getWait() * 100));// 当前等待率
monitorMap.put("cpuFree", Math.round(cpuPerc.getIdle() * 100));// 当前空闲率
monitorMap.put("cpuTotal", Math.round(cpuPerc.getCombined() * 100));// 总的使用率
monitorMaps.add(monitorMap);
}
} catch (Exception e) {
}
return monitorMaps;
}
public static List<ServerInfoFormMap> netInfos(Sigar sigar) {
List<ServerInfoFormMap> monitorMaps = new ArrayList<ServerInfoFormMap>();
try {
String[] ifNames = sigar.getNetInterfaceList();
for (String name : ifNames) {
ServerInfoFormMap monitorMap = new ServerInfoFormMap();
NetInterfaceConfig ifconfig = sigar.getNetInterfaceConfig(name);
monitorMap.put("name", name);// 网络设备名
monitorMap.put("address", ifconfig.getAddress());// IP地址
monitorMap.put("netmask", ifconfig.getNetmask());// 子网掩码
if ((ifconfig.getFlags() & 1L) <= 0L) {
// System.out.println("!IFF_UP...skippinggetNetInterfaceStat");
continue;
}
NetInterfaceStat ifstat = sigar.getNetInterfaceStat(name);
monitorMap.put("getRxPackets", ifstat.getRxPackets());// 接收的总包裹数
monitorMap.put("getTxPackets", ifstat.getTxPackets());// 发送的总包裹数
monitorMap.put("getRxBytes", ifstat.getRxBytes());// 接收到的总字节数
monitorMap.put("getTxBytes", ifstat.getTxBytes());// 发送的总字节数
monitorMap.put("getRxErrors", ifstat.getRxErrors());// 接收到的错误包数
monitorMap.put("getTxErrors", ifstat.getTxErrors());// 发送数据包时的错误数
monitorMap.put("getRxDropped", ifstat.getRxDropped());// 接收时丢弃的包数
monitorMap.put("getTxDropped", ifstat.getTxDropped());// 发送时丢弃的包数
monitorMaps.add(monitorMap);
}
} catch (Exception e) {
logger.debug("net info error");
}
return monitorMaps;
}
/**
* 获取文件系统类型名,比如本地硬盘、光驱、网络文件系统等
*
* @param sigar
* @return
* @throws Exception
* @date 2016年9月19日 下午5:58:01
* @writer iscas
*/
public List<ServerInfoFormMap> diskInfos(Sigar sigar) throws Exception {
List<ServerInfoFormMap> monitorMaps = new ArrayList<ServerInfoFormMap>();
FileSystem fslist[] = sigar.getFileSystemList();
for (int i = 0; i < fslist.length; i++) {
ServerInfoFormMap monitorMap = new ServerInfoFormMap();
FileSystem fs = fslist[i];
// 文件系统类型名,比如本地硬盘、光驱、网络文件系统等
FileSystemUsage usage = null;
usage = sigar.getFileSystemUsage(fs.getDirName());
switch (fs.getType()) {
case 0: // TYPE_UNKNOWN :未知
break;
case 1: // TYPE_NONE
break;
case 2: // TYPE_LOCAL_DISK : 本地硬盘
monitorMap.put("diskName", fs.getDevName());// 系统盘名称
monitorMap.put("diskType", fs.getSysTypeName());// 盘类型
// 文件系统总大小
monitorMap.put("diskTotal", fs.getSysTypeName());
// 文件系统剩余大小
monitorMap.put("diskFree", usage.getFree());
// 文件系统已经使用量
monitorMap.put("diskUse", usage.getUsed());
double usePercent = usage.getUsePercent() * 100D;
// 文件系统资源的利用率
monitorMap.put("diskUsage", usePercent);// 内存使用率
monitorMaps.add(monitorMap);
break;
case 3:// TYPE_NETWORK :网络
break;
case 4:// TYPE_RAM_DISK :闪存
break;
case 5:// TYPE_CDROM :光驱
break;
case 6:// TYPE_SWAP :页面交换
monitorMap.put("swapRead", usage.getDiskReads());
monitorMap.put("swapWrite", usage.getDiskWrites());
monitorMaps.add(monitorMap);
break;
}
}
return monitorMaps;
}
}
|
[] |
[] |
[] |
[]
|
[]
|
java
| 0 | 0 | |
autoshort.py
|
import inspect
import os
import pyperclip
import requests
import time
from urllib.parse import quote
# a list of the request error classes
request_errors = [obj for name, obj in inspect.getmembers(requests.exceptions)
if inspect.isclass(obj) and issubclass(obj, Exception)]
# main daemon loop
while True:
# get clipboard value
clipboard = pyperclip.paste()
try:
# percent encode the clipboard value
safe_cb = quote(clipboard,safe='')
# bitly API access token
token = os.environ.get('BITLY_TOKEN')
# URL that will make the API call
bitly_url = 'https://api-ssl.bitly.com/v3/shorten?' + \
'access_token=' + token + '&longUrl=' + safe_cb
# get the json return from the API call
short_url = requests.get(bitly_url).json()
# if everything went as planned
if(short_url['status_txt'] == 'OK'):
pyperclip.copy(short_url['data']['url'])
except Exception as e:
# if something went wrong with the request, i.e. not a link
if(any(issubclass(e.__class__, lv) for lv in request_errors)):
pass
else:
raise(e)
# wait until the clipboard changes
while(pyperclip.paste() == clipboard):
time.sleep(.1)
|
[] |
[] |
[
"BITLY_TOKEN"
] |
[]
|
["BITLY_TOKEN"]
|
python
| 1 | 0 | |
assignment_dashboard/app.py
|
import os
from flask import Flask
from werkzeug.contrib.cache import RedisCache, SimpleCache
from .config import BaseConfig
app = Flask(__name__)
app.config.from_object(BaseConfig)
if os.environ.get('FLASK_DEBUG'):
from flask_debugtoolbar import DebugToolbarExtension
toolbar = DebugToolbarExtension(app)
app.cache = RedisCache(host=app.config['REDIS_HOST']) if 'REDIS_HOST' in app.config else SimpleCache()
|
[] |
[] |
[
"FLASK_DEBUG"
] |
[]
|
["FLASK_DEBUG"]
|
python
| 1 | 0 | |
netbox/client/dcim/dcim_power_port_templates_update_responses.go
|
// Code generated by go-swagger; DO NOT EDIT.
// Copyright 2020 The go-netbox Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package dcim
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"fmt"
"io"
"github.com/go-openapi/runtime"
"github.com/go-openapi/strfmt"
"github.com/netbox-community/go-netbox/netbox/models"
)
// DcimPowerPortTemplatesUpdateReader is a Reader for the DcimPowerPortTemplatesUpdate structure.
type DcimPowerPortTemplatesUpdateReader struct {
formats strfmt.Registry
}
// ReadResponse reads a server response into the received o.
func (o *DcimPowerPortTemplatesUpdateReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
switch response.Code() {
case 200:
result := NewDcimPowerPortTemplatesUpdateOK()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return result, nil
default:
return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code())
}
}
// NewDcimPowerPortTemplatesUpdateOK creates a DcimPowerPortTemplatesUpdateOK with default headers values
func NewDcimPowerPortTemplatesUpdateOK() *DcimPowerPortTemplatesUpdateOK {
return &DcimPowerPortTemplatesUpdateOK{}
}
/* DcimPowerPortTemplatesUpdateOK describes a response with status code 200, with default header values.
DcimPowerPortTemplatesUpdateOK dcim power port templates update o k
*/
type DcimPowerPortTemplatesUpdateOK struct {
Payload *models.PowerPortTemplate
}
func (o *DcimPowerPortTemplatesUpdateOK) Error() string {
return fmt.Sprintf("[PUT /dcim/power-port-templates/{id}/][%d] dcimPowerPortTemplatesUpdateOK %+v", 200, o.Payload)
}
func (o *DcimPowerPortTemplatesUpdateOK) GetPayload() *models.PowerPortTemplate {
return o.Payload
}
func (o *DcimPowerPortTemplatesUpdateOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(models.PowerPortTemplate)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
self_attention_cv/common.py
|
import os
import random
import numpy as np
import torch
from einops import repeat
def expand_to_batch(tensor, desired_size):
tile = desired_size // tensor.shape[0]
return repeat(tensor, 'b ... -> (b tile) ...', tile=tile)
def init_random_seed(seed, gpu=False):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
if gpu:
torch.backends.cudnn.deterministic = True
|
[] |
[] |
[
"PYTHONHASHSEED"
] |
[]
|
["PYTHONHASHSEED"]
|
python
| 1 | 0 | |
demos/secure_inference/insecure/run.py
|
# Copyright 2021 Fedlearn authors.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
os.environ['KMP_DUPLICATE_LIB_OK']='True'
import numpy as np
import cv2
import time
import torch
from model_sphereface import sphere20a
THRESHOLD = 0.295
def get_cosdist(f1: np.ndarray, f2: np.ndarray):
if isinstance(f1, list):
f1 = np.asarray(f1)
if isinstance(f2, list):
f2 = np.asarray(f2)
# print(f1.shape, f2.shape)
return f1.dot(f2) / ( np.linalg.norm(f1) * np.linalg.norm(f2) + 1e-5)
class Insecure_Client(object):
def __init__(self):
self.torch_model = sphere20a(feature=True).cpu()
pretrained_weights = torch.load('../../data/FaceRecognition/sphere20a_20171020.pth')
pretrained_weights_for_inference = {k:v for k, v in pretrained_weights.items() if 'fc6' not in k}
self.torch_model.load_state_dict(pretrained_weights_for_inference )
def inference(self, raw_img):
t0 = time.time()
x = torch.tensor(raw_img).cpu()
_prob = self.torch_model(x).detach().numpy()
cosdist = get_cosdist(_prob[0], _prob[1])
return {'feature': _prob, 'dist': cosdist, 'pred': int(cosdist > THRESHOLD), 'runtime': time.time()-t0}
def get_input(n=1000):
with open('../../data/FaceRecognition/LFW/pairs.txt') as f:
pairs_lines = f.readlines()[1:]
img_label = []
for i in range(n):
p = pairs_lines[i].replace('\n','').split('\t')
if 3==len(p):
sameflag = 1
name1 = p[0]+'/'+p[0]+'_'+'{:04}.jpg'.format(int(p[1]))
name2 = p[0]+'/'+p[0]+'_'+'{:04}.jpg'.format(int(p[2]))
if 4==len(p):
sameflag = 0
name1 = p[0]+'/'+p[0]+'_'+'{:04}.jpg'.format(int(p[1]))
name2 = p[2]+'/'+p[2]+'_'+'{:04}.jpg'.format(int(p[3]))
img1 = cv2.imread("../../data/FaceRecognition/LFW/lfw_processed/"+name1)
img2 = cv2.imread("../../data/FaceRecognition/LFW/lfw_processed/"+name2)
img1_normalized = (img1.transpose(2, 0, 1)-127.5)/128.0
img2_normalized = (img2.transpose(2, 0, 1)-127.5)/128.0
img_label.append( [np.stack([img1_normalized, img2_normalized], 0).astype('float32'), sameflag] )
return img_label
if __name__ == '__main__':
insecure_client = Insecure_Client()
raw_img_set = get_input(20) #
correct = 0
for i, (raw_img, sameflag) in enumerate(raw_img_set):
ref = insecure_client.inference(raw_img)
print("label: %r; Pred: %r; Time: %.2fs; Dist: %.12f" % ( sameflag, ref['pred'], ref['runtime'], ref['dist']) )
|
[] |
[] |
[
"KMP_DUPLICATE_LIB_OK"
] |
[]
|
["KMP_DUPLICATE_LIB_OK"]
|
python
| 1 | 0 | |
smpp_client/settings.py
|
"""
Django settings for smpp_client project.
Generated by 'django-admin startproject' using Django 3.2.11.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
import os
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'api.apps.ApiConfig',
'rest_framework',
'frontend.apps.FrontendConfig',
'channels',
]
SESSION_ENGINE = "django.contrib.sessions.backends.cache"
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'smpp_client.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'smpp_client.wsgi.application'
ASGI_APPLICATION = 'smpp_client.asgi.application'
CHANNEL_LAYERS = {
'default': {
'BACKEND': 'channels_redis.core.RedisChannelLayer',
'CONFIG': {
"hosts": [('127.0.0.1', 6379)],
'capacity': 1000,
},
},
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
# AUTH_PASSWORD_VALIDATORS = [
# {
# 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
# },
# {
# 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
# },
# {
# 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
# },
# {
# 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
# },
# ]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# REST_FRAMEWORK = {
# 'DEFAULT_AUTHENTICATION_CLASSES': [],
# 'DEFAULT_PERMISSION_CLASSES': [],
# 'UNAUTHENTICATED_USER': None,
# }
# Override production variables if DJANGO_DEVELOPMENT env variable is set
if os.environ.get('DJANGO_DEVELOPMENT'):
from .settings_development import *
else:
from .settings_production import *
|
[] |
[] |
[
"DJANGO_DEVELOPMENT"
] |
[]
|
["DJANGO_DEVELOPMENT"]
|
python
| 1 | 0 | |
app/interface/main/creative/dao/whitelist/dao_test.go
|
package whitelist
import (
"context"
"flag"
"fmt"
"go-common/app/interface/main/creative/conf"
"go-common/app/interface/main/creative/model/archive"
"go-common/library/database/sql"
"os"
"reflect"
"testing"
"github.com/bouk/monkey"
"github.com/smartystreets/goconvey/convey"
)
var (
d *Dao
)
func TestMain(m *testing.M) {
if os.Getenv("DEPLOY_ENV") != "" {
flag.Set("app_id", "main.archive.creative")
flag.Set("conf_token", "96b6a6c10bb311e894c14a552f48fef8")
flag.Set("tree_id", "2305")
flag.Set("conf_version", "docker-1")
flag.Set("deploy_env", "uat")
flag.Set("conf_host", "config.bilibili.co")
flag.Set("conf_path", "/tmp")
flag.Set("region", "sh")
flag.Set("zone", "sh001")
} else {
flag.Set("conf", "../../cmd/creative.toml")
}
flag.Parse()
if err := conf.Init(); err != nil {
panic(err)
}
d = New(conf.Conf)
m.Run()
os.Exit(0)
}
func TestList(t *testing.T) {
var (
c = context.TODO()
res []*archive.WhiteList
err error
)
convey.Convey("Ping", t, func(ctx convey.C) {
guard := monkey.PatchInstanceMethod(reflect.TypeOf(d.db), "Ping", func(_ *sql.DB, _ context.Context) (err error) {
return nil
})
defer guard.Unpatch()
err = d.Ping(c)
ctx.Convey("Ping", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
})
})
convey.Convey("2", t, func(ctx convey.C) {
res, err = d.List(c)
ctx.Convey("2", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
ctx.So(len(res), convey.ShouldBeGreaterThanOrEqualTo, 0)
})
})
convey.Convey("1", t, func(ctx convey.C) {
guard := monkey.PatchInstanceMethod(reflect.TypeOf(d.getAllStmt), "Query", func(_ *sql.Stmt, _ context.Context, _ ...interface{}) (rows *sql.Rows, err error) {
return nil, fmt.Errorf("db.Query error")
})
defer guard.Unpatch()
res, err = d.List(c)
ctx.Convey("1", func(ctx convey.C) {
ctx.So(err, convey.ShouldNotBeNil)
ctx.So(len(res), convey.ShouldEqual, 0)
})
})
convey.Convey("Close", t, func(ctx convey.C) {
guard := monkey.PatchInstanceMethod(reflect.TypeOf(d.db), "Close", func(_ *sql.DB) (err error) {
return nil
})
defer guard.Unpatch()
err = d.Close()
ctx.Convey("Close", func(ctx convey.C) {
ctx.So(err, convey.ShouldBeNil)
})
})
}
|
[
"\"DEPLOY_ENV\""
] |
[] |
[
"DEPLOY_ENV"
] |
[]
|
["DEPLOY_ENV"]
|
go
| 1 | 0 | |
pyppeteer/__init__.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Meta data for pyppeteer."""
import logging
import os
try:
# noinspection PyCompatibility
from importlib.metadata import version
except ModuleNotFoundError:
# noinspection PyUnresolvedReferences
# <3.8 backport
from importlib_metadata import version
try:
__version__ = version(__name__)
except Exception:
pass
from appdirs import AppDirs
__chromium_revision__ = '588429'
__base_puppeteer_version__ = 'v1.6.0'
__pyppeteer_home__ = os.environ.get(
'PYPPETEER_HOME', AppDirs('pyppeteer').user_data_dir) # type: str
DEBUG = False
# Setup root logger
_fmt = '[{levelname[0]}:{name}] {msg}'
logging.basicConfig(level=logging.DEBUG, format=_fmt, style='{')
_logger = logging.getLogger(__name__)
_logger.propagate = False
from pyppeteer.launcher import connect, launch, executablePath # noqa: E402
from pyppeteer.launcher import defaultArgs # noqa: E402
version = __version__
version_info = tuple(int(i) for i in version.split('.'))
__all__ = [
'connect',
'launch',
'executablePath',
'defaultArgs',
'version',
'version_info',
]
|
[] |
[] |
[
"PYPPETEER_HOME"
] |
[]
|
["PYPPETEER_HOME"]
|
python
| 1 | 0 | |
qa/rpc-tests/test_framework/test_framework.py
|
#!/usr/bin/env python2
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Base class for RPC testing
# Add python-bitcoinrpc to module search path:
import os
import sys
import shutil
import tempfile
import traceback
from .util import (
initialize_chain,
assert_equal,
start_nodes,
connect_nodes_bi,
sync_blocks,
sync_mempools,
stop_nodes,
wait_bitcoinds,
enable_coverage,
check_json_precision,
initialize_chain_clean,
)
from .authproxy import AuthServiceProxy, JSONRPCException
class BitcoinTestFramework(object):
# These may be over-ridden by subclasses:
def run_test(self):
for node in self.nodes:
assert_equal(node.getblockcount(), 200)
assert_equal(node.getbalance(), 25*500)
def add_options(self, parser):
pass
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain(self.options.tmpdir)
def setup_nodes(self):
return start_nodes(4, self.options.tmpdir)
def setup_network(self, split = False):
self.nodes = self.setup_nodes()
# Connect the nodes as a "chain". This allows us
# to split the network between nodes 1 and 2 to get
# two halves that can work on competing chains.
# If we joined network halves, connect the nodes from the joint
# on outward. This ensures that chains are properly reorganised.
if not split:
connect_nodes_bi(self.nodes, 1, 2)
sync_blocks(self.nodes[1:3])
sync_mempools(self.nodes[1:3])
connect_nodes_bi(self.nodes, 0, 1)
connect_nodes_bi(self.nodes, 2, 3)
self.is_network_split = split
self.sync_all()
def split_network(self):
"""
Split the network of four nodes into nodes 0/1 and 2/3.
"""
assert not self.is_network_split
stop_nodes(self.nodes)
wait_bitcoinds()
self.setup_network(True)
def sync_all(self):
if self.is_network_split:
sync_blocks(self.nodes[:2])
sync_blocks(self.nodes[2:])
sync_mempools(self.nodes[:2])
sync_mempools(self.nodes[2:])
else:
sync_blocks(self.nodes)
sync_mempools(self.nodes)
def join_network(self):
"""
Join the (previously split) network halves together.
"""
assert self.is_network_split
stop_nodes(self.nodes)
wait_bitcoinds()
self.setup_network(False)
def main(self):
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave wikids and test.* datadir on exit or error")
parser.add_option("--noshutdown", dest="noshutdown", default=False, action="store_true",
help="Don't stop wikids after the test execution")
parser.add_option("--srcdir", dest="srcdir", default="../../src",
help="Source directory containing wikid/wiki-cli (default: %default)")
parser.add_option("--tmpdir", dest="tmpdir", default=tempfile.mkdtemp(prefix="test"),
help="Root directory for datadirs")
parser.add_option("--tracerpc", dest="trace_rpc", default=False, action="store_true",
help="Print out all RPC calls as they are made")
parser.add_option("--coveragedir", dest="coveragedir",
help="Write tested RPC commands into this directory")
self.add_options(parser)
(self.options, self.args) = parser.parse_args()
if self.options.trace_rpc:
import logging
logging.basicConfig(level=logging.DEBUG)
if self.options.coveragedir:
enable_coverage(self.options.coveragedir)
os.environ['PATH'] = self.options.srcdir+":"+self.options.srcdir+"/qt:"+os.environ['PATH']
check_json_precision()
success = False
try:
if not os.path.isdir(self.options.tmpdir):
os.makedirs(self.options.tmpdir)
self.setup_chain()
self.setup_network()
self.run_test()
success = True
except JSONRPCException as e:
print("JSONRPC error: "+e.error['message'])
traceback.print_tb(sys.exc_info()[2])
except AssertionError as e:
print("Assertion failed: "+ str(e))
traceback.print_tb(sys.exc_info()[2])
except Exception as e:
print("Unexpected exception caught during testing: " + repr(e))
traceback.print_tb(sys.exc_info()[2])
if not self.options.noshutdown:
print("Stopping nodes")
stop_nodes(self.nodes)
wait_bitcoinds()
else:
print("Note: wikids were not stopped and may still be running")
if not self.options.nocleanup and not self.options.noshutdown:
print("Cleaning up")
shutil.rmtree(self.options.tmpdir)
if success:
print("Tests successful")
sys.exit(0)
else:
print("Failed")
sys.exit(1)
# Test framework for doing p2p comparison testing, which sets up some bitcoind
# binaries:
# 1 binary: test binary
# 2 binaries: 1 test binary, 1 ref binary
# n>2 binaries: 1 test binary, n-1 ref binaries
class ComparisonTestFramework(BitcoinTestFramework):
# Can override the num_nodes variable to indicate how many nodes to run.
def __init__(self):
self.num_nodes = 2
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("WIKID", "wikid"),
help="bitcoind binary to test")
parser.add_option("--refbinary", dest="refbinary",
default=os.getenv("WIKID", "wikid"),
help="bitcoind binary to use for reference nodes (if any)")
def setup_chain(self):
print "Initializing test directory "+self.options.tmpdir
initialize_chain_clean(self.options.tmpdir, self.num_nodes)
def setup_network(self):
self.nodes = start_nodes(
self.num_nodes, self.options.tmpdir,
extra_args=[['-debug', '-whitelist=127.0.0.1']] * self.num_nodes,
binary=[self.options.testbinary] +
[self.options.refbinary]*(self.num_nodes-1))
|
[] |
[] |
[
"PATH",
"WIKID"
] |
[]
|
["PATH", "WIKID"]
|
python
| 2 | 0 | |
hooks/charmhelpers/fetch/__init__.py
|
# Copyright 2014-2015 Canonical Limited.
#
# This file is part of charm-helpers.
#
# charm-helpers is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3 as
# published by the Free Software Foundation.
#
# charm-helpers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
import importlib
from tempfile import NamedTemporaryFile
import time
from yaml import safe_load
from charmhelpers.core.host import (
lsb_release
)
import subprocess
from charmhelpers.core.hookenv import (
config,
log,
)
import os
import six
if six.PY3:
from urllib.parse import urlparse, urlunparse
else:
from urlparse import urlparse, urlunparse
CLOUD_ARCHIVE = """# Ubuntu Cloud Archive
deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main
"""
PROPOSED_POCKET = """# Proposed
deb http://archive.ubuntu.com/ubuntu {}-proposed main universe multiverse restricted
"""
CLOUD_ARCHIVE_POCKETS = {
# Folsom
'folsom': 'precise-updates/folsom',
'precise-folsom': 'precise-updates/folsom',
'precise-folsom/updates': 'precise-updates/folsom',
'precise-updates/folsom': 'precise-updates/folsom',
'folsom/proposed': 'precise-proposed/folsom',
'precise-folsom/proposed': 'precise-proposed/folsom',
'precise-proposed/folsom': 'precise-proposed/folsom',
# Grizzly
'grizzly': 'precise-updates/grizzly',
'precise-grizzly': 'precise-updates/grizzly',
'precise-grizzly/updates': 'precise-updates/grizzly',
'precise-updates/grizzly': 'precise-updates/grizzly',
'grizzly/proposed': 'precise-proposed/grizzly',
'precise-grizzly/proposed': 'precise-proposed/grizzly',
'precise-proposed/grizzly': 'precise-proposed/grizzly',
# Havana
'havana': 'precise-updates/havana',
'precise-havana': 'precise-updates/havana',
'precise-havana/updates': 'precise-updates/havana',
'precise-updates/havana': 'precise-updates/havana',
'havana/proposed': 'precise-proposed/havana',
'precise-havana/proposed': 'precise-proposed/havana',
'precise-proposed/havana': 'precise-proposed/havana',
# Icehouse
'icehouse': 'precise-updates/icehouse',
'precise-icehouse': 'precise-updates/icehouse',
'precise-icehouse/updates': 'precise-updates/icehouse',
'precise-updates/icehouse': 'precise-updates/icehouse',
'icehouse/proposed': 'precise-proposed/icehouse',
'precise-icehouse/proposed': 'precise-proposed/icehouse',
'precise-proposed/icehouse': 'precise-proposed/icehouse',
# Juno
'juno': 'trusty-updates/juno',
'trusty-juno': 'trusty-updates/juno',
'trusty-juno/updates': 'trusty-updates/juno',
'trusty-updates/juno': 'trusty-updates/juno',
'juno/proposed': 'trusty-proposed/juno',
'trusty-juno/proposed': 'trusty-proposed/juno',
'trusty-proposed/juno': 'trusty-proposed/juno',
# Kilo
'kilo': 'trusty-updates/kilo',
'trusty-kilo': 'trusty-updates/kilo',
'trusty-kilo/updates': 'trusty-updates/kilo',
'trusty-updates/kilo': 'trusty-updates/kilo',
'kilo/proposed': 'trusty-proposed/kilo',
'trusty-kilo/proposed': 'trusty-proposed/kilo',
'trusty-proposed/kilo': 'trusty-proposed/kilo',
}
# The order of this list is very important. Handlers should be listed in from
# least- to most-specific URL matching.
FETCH_HANDLERS = (
'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler',
'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler',
'charmhelpers.fetch.giturl.GitUrlFetchHandler',
)
APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT.
APT_NO_LOCK_RETRY_DELAY = 10 # Wait 10 seconds between apt lock checks.
APT_NO_LOCK_RETRY_COUNT = 30 # Retry to acquire the lock X times.
class SourceConfigError(Exception):
pass
class UnhandledSource(Exception):
pass
class AptLockError(Exception):
pass
class BaseFetchHandler(object):
"""Base class for FetchHandler implementations in fetch plugins"""
def can_handle(self, source):
"""Returns True if the source can be handled. Otherwise returns
a string explaining why it cannot"""
return "Wrong source type"
def install(self, source):
"""Try to download and unpack the source. Return the path to the
unpacked files or raise UnhandledSource."""
raise UnhandledSource("Wrong source type {}".format(source))
def parse_url(self, url):
return urlparse(url)
def base_url(self, url):
"""Return url without querystring or fragment"""
parts = list(self.parse_url(url))
parts[4:] = ['' for i in parts[4:]]
return urlunparse(parts)
def filter_installed_packages(packages):
"""Returns a list of packages that require installation"""
cache = apt_cache()
_pkgs = []
for package in packages:
try:
p = cache[package]
p.current_ver or _pkgs.append(package)
except KeyError:
log('Package {} has no installation candidate.'.format(package),
level='WARNING')
_pkgs.append(package)
return _pkgs
def apt_cache(in_memory=True):
"""Build and return an apt cache"""
import apt_pkg
apt_pkg.init()
if in_memory:
apt_pkg.config.set("Dir::Cache::pkgcache", "")
apt_pkg.config.set("Dir::Cache::srcpkgcache", "")
return apt_pkg.Cache()
def apt_install(packages, options=None, fatal=False):
"""Install one or more packages"""
if options is None:
options = ['--option=Dpkg::Options::=--force-confold']
cmd = ['apt-get', '--assume-yes']
cmd.extend(options)
cmd.append('install')
if isinstance(packages, six.string_types):
cmd.append(packages)
else:
cmd.extend(packages)
log("Installing {} with options: {}".format(packages,
options))
_run_apt_command(cmd, fatal)
def apt_upgrade(options=None, fatal=False, dist=False):
"""Upgrade all packages"""
if options is None:
options = ['--option=Dpkg::Options::=--force-confold']
cmd = ['apt-get', '--assume-yes']
cmd.extend(options)
if dist:
cmd.append('dist-upgrade')
else:
cmd.append('upgrade')
log("Upgrading with options: {}".format(options))
_run_apt_command(cmd, fatal)
def apt_update(fatal=False):
"""Update local apt cache"""
cmd = ['apt-get', 'update']
_run_apt_command(cmd, fatal)
def apt_purge(packages, fatal=False):
"""Purge one or more packages"""
cmd = ['apt-get', '--assume-yes', 'purge']
if isinstance(packages, six.string_types):
cmd.append(packages)
else:
cmd.extend(packages)
log("Purging {}".format(packages))
_run_apt_command(cmd, fatal)
def apt_hold(packages, fatal=False):
"""Hold one or more packages"""
cmd = ['apt-mark', 'hold']
if isinstance(packages, six.string_types):
cmd.append(packages)
else:
cmd.extend(packages)
log("Holding {}".format(packages))
if fatal:
subprocess.check_call(cmd)
else:
subprocess.call(cmd)
def add_source(source, key=None):
"""Add a package source to this system.
@param source: a URL or sources.list entry, as supported by
add-apt-repository(1). Examples::
ppa:charmers/example
deb https://stub:[email protected]/ubuntu trusty main
In addition:
'proposed:' may be used to enable the standard 'proposed'
pocket for the release.
'cloud:' may be used to activate official cloud archive pockets,
such as 'cloud:icehouse'
'distro' may be used as a noop
@param key: A key to be added to the system's APT keyring and used
to verify the signatures on packages. Ideally, this should be an
ASCII format GPG public key including the block headers. A GPG key
id may also be used, but be aware that only insecure protocols are
available to retrieve the actual public key from a public keyserver
placing your Juju environment at risk. ppa and cloud archive keys
are securely added automtically, so sould not be provided.
"""
if source is None:
log('Source is not present. Skipping')
return
if (source.startswith('ppa:') or
source.startswith('http') or
source.startswith('deb ') or
source.startswith('cloud-archive:')):
subprocess.check_call(['add-apt-repository', '--yes', source])
elif source.startswith('cloud:'):
apt_install(filter_installed_packages(['ubuntu-cloud-keyring']),
fatal=True)
pocket = source.split(':')[-1]
if pocket not in CLOUD_ARCHIVE_POCKETS:
raise SourceConfigError(
'Unsupported cloud: source option %s' %
pocket)
actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket]
with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt:
apt.write(CLOUD_ARCHIVE.format(actual_pocket))
elif source == 'proposed':
release = lsb_release()['DISTRIB_CODENAME']
with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt:
apt.write(PROPOSED_POCKET.format(release))
elif source == 'distro':
pass
else:
log("Unknown source: {!r}".format(source))
if key:
if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key:
with NamedTemporaryFile('w+') as key_file:
key_file.write(key)
key_file.flush()
key_file.seek(0)
subprocess.check_call(['apt-key', 'add', '-'], stdin=key_file)
else:
# Note that hkp: is in no way a secure protocol. Using a
# GPG key id is pointless from a security POV unless you
# absolutely trust your network and DNS.
subprocess.check_call(['apt-key', 'adv', '--keyserver',
'hkp://keyserver.ubuntu.com:80', '--recv',
key])
def configure_sources(update=False,
sources_var='install_sources',
keys_var='install_keys'):
"""
Configure multiple sources from charm configuration.
The lists are encoded as yaml fragments in the configuration.
The frament needs to be included as a string. Sources and their
corresponding keys are of the types supported by add_source().
Example config:
install_sources: |
- "ppa:foo"
- "http://example.com/repo precise main"
install_keys: |
- null
- "a1b2c3d4"
Note that 'null' (a.k.a. None) should not be quoted.
"""
sources = safe_load((config(sources_var) or '').strip()) or []
keys = safe_load((config(keys_var) or '').strip()) or None
if isinstance(sources, six.string_types):
sources = [sources]
if keys is None:
for source in sources:
add_source(source, None)
else:
if isinstance(keys, six.string_types):
keys = [keys]
if len(sources) != len(keys):
raise SourceConfigError(
'Install sources and keys lists are different lengths')
for source, key in zip(sources, keys):
add_source(source, key)
if update:
apt_update(fatal=True)
def install_remote(source, *args, **kwargs):
"""
Install a file tree from a remote source
The specified source should be a url of the form:
scheme://[host]/path[#[option=value][&...]]
Schemes supported are based on this modules submodules.
Options supported are submodule-specific.
Additional arguments are passed through to the submodule.
For example::
dest = install_remote('http://example.com/archive.tgz',
checksum='deadbeef',
hash_type='sha1')
This will download `archive.tgz`, validate it using SHA1 and, if
the file is ok, extract it and return the directory in which it
was extracted. If the checksum fails, it will raise
:class:`charmhelpers.core.host.ChecksumError`.
"""
# We ONLY check for True here because can_handle may return a string
# explaining why it can't handle a given source.
handlers = [h for h in plugins() if h.can_handle(source) is True]
installed_to = None
for handler in handlers:
try:
installed_to = handler.install(source, *args, **kwargs)
except UnhandledSource:
pass
if not installed_to:
raise UnhandledSource("No handler found for source {}".format(source))
return installed_to
def install_from_config(config_var_name):
charm_config = config()
source = charm_config[config_var_name]
return install_remote(source)
def plugins(fetch_handlers=None):
if not fetch_handlers:
fetch_handlers = FETCH_HANDLERS
plugin_list = []
for handler_name in fetch_handlers:
package, classname = handler_name.rsplit('.', 1)
try:
handler_class = getattr(
importlib.import_module(package),
classname)
plugin_list.append(handler_class())
except (ImportError, AttributeError):
# Skip missing plugins so that they can be ommitted from
# installation if desired
log("FetchHandler {} not found, skipping plugin".format(
handler_name))
return plugin_list
def _run_apt_command(cmd, fatal=False):
"""
Run an APT command, checking output and retrying if the fatal flag is set
to True.
:param: cmd: str: The apt command to run.
:param: fatal: bool: Whether the command's output should be checked and
retried.
"""
env = os.environ.copy()
if 'DEBIAN_FRONTEND' not in env:
env['DEBIAN_FRONTEND'] = 'noninteractive'
if fatal:
retry_count = 0
result = None
# If the command is considered "fatal", we need to retry if the apt
# lock was not acquired.
while result is None or result == APT_NO_LOCK:
try:
result = subprocess.check_call(cmd, env=env)
except subprocess.CalledProcessError as e:
retry_count = retry_count + 1
if retry_count > APT_NO_LOCK_RETRY_COUNT:
raise
result = e.returncode
log("Couldn't acquire DPKG lock. Will retry in {} seconds."
"".format(APT_NO_LOCK_RETRY_DELAY))
time.sleep(APT_NO_LOCK_RETRY_DELAY)
else:
subprocess.call(cmd, env=env)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
testhelpers/util.go
|
package testhelpers
import (
"fmt"
"net"
"os"
"path/filepath"
"sync"
"time"
"github.com/mitchellh/go-homedir"
)
// GetFreePort gets a free port from the kernel
// Credit: https://github.com/phayes/freeport
func GetFreePort() (int, error) {
addr, err := net.ResolveTCPAddr("tcp", "0.0.0.0:0")
if err != nil {
return 0, err
}
l, err := net.ListenTCP("tcp", addr)
if err != nil {
return 0, err
}
defer l.Close() // nolint: errcheck
return l.Addr().(*net.TCPAddr).Port, nil
}
// GetGoPath returns the current go path for the user.
func GetGoPath() (string, error) {
gp := os.Getenv("GOPATH")
if gp != "" {
return gp, nil
}
home, err := homedir.Dir()
if err != nil {
return "", err
}
return filepath.Join(home, "go"), nil
}
// MustGetFilecoinBinary returns the path where the filecoin binary will be if it has been built and panics otherwise.
func MustGetFilecoinBinary() string {
path, err := GetFilecoinBinary()
if err != nil {
panic(err)
}
return path
}
// GetFilecoinBinary returns the path where the filecoin binary will be if it has been built
func GetFilecoinBinary() (string, error) {
gopath, err := GetGoPath()
if err != nil {
return "", err
}
bin := filepath.Join(gopath, "/src/github.com/filecoin-project/go-filecoin/go-filecoin")
_, err = os.Stat(bin)
if err != nil {
return "", err
}
if os.IsNotExist(err) {
return "", err
}
return bin, nil
}
// WaitForIt waits until the given callback returns true.
func WaitForIt(count int, delay time.Duration, cb func() (bool, error)) error {
var done bool
var err error
for i := 0; i < count; i++ {
done, err = cb()
if err != nil {
return err
}
if done {
break
}
time.Sleep(delay)
}
if !done {
return fmt.Errorf("timeout waiting for it")
}
return nil
}
// WaitTimeout waits for the waitgroup for the specified max timeout.
// Returns true if waiting timed out.
func WaitTimeout(wg *sync.WaitGroup, timeout time.Duration) bool {
c := make(chan struct{})
go func() {
defer close(c)
wg.Wait()
}()
select {
case <-c:
return false // completed normally
case <-time.After(timeout):
return true // timed out
}
}
|
[
"\"GOPATH\""
] |
[] |
[
"GOPATH"
] |
[]
|
["GOPATH"]
|
go
| 1 | 0 | |
context/home.go
|
package context
import (
"os"
"path/filepath"
"runtime"
)
// HomeDir returns the home directory for the current user.
// On Windows:
// 1. the first of %HOME%, %HOMEDRIVE%%HOMEPATH%, %USERPROFILE% containing a `.kube\config` file is returned.
// 2. if none of those locations contain a `.kube\config` file, the first of %HOME%, %USERPROFILE%, %HOMEDRIVE%%HOMEPATH% that exists and is writeable is returned.
// 3. if none of those locations are writeable, the first of %HOME%, %USERPROFILE%, %HOMEDRIVE%%HOMEPATH% that exists is returned.
// 4. if none of those locations exists, the first of %HOME%, %USERPROFILE%, %HOMEDRIVE%%HOMEPATH% that is set is returned.
func HomeDir() string {
if runtime.GOOS == "windows" {
home := os.Getenv("HOME")
homeDriveHomePath := ""
if homeDrive, homePath := os.Getenv("HOMEDRIVE"), os.Getenv("HOMEPATH"); len(homeDrive) > 0 && len(homePath) > 0 {
homeDriveHomePath = homeDrive + homePath
}
userProfile := os.Getenv("USERPROFILE")
// Return first of %HOME%, %HOMEDRIVE%/%HOMEPATH%, %USERPROFILE% that contains a `.kube\config` file.
// %HOMEDRIVE%/%HOMEPATH% is preferred over %USERPROFILE% for backwards-compatibility.
for _, p := range []string{home, homeDriveHomePath, userProfile} {
if len(p) == 0 {
continue
}
if _, err := os.Stat(filepath.Join(p, ".kube", "config")); err != nil {
continue
}
return p
}
firstSetPath := ""
firstExistingPath := ""
// Prefer %USERPROFILE% over %HOMEDRIVE%/%HOMEPATH% for compatibility with other auth-writing tools
for _, p := range []string{home, userProfile, homeDriveHomePath} {
if len(p) == 0 {
continue
}
if len(firstSetPath) == 0 {
// remember the first path that is set
firstSetPath = p
}
info, err := os.Stat(p)
if err != nil {
continue
}
if len(firstExistingPath) == 0 {
// remember the first path that exists
firstExistingPath = p
}
if info.IsDir() && info.Mode().Perm()&(1<<(uint(7))) != 0 {
// return first path that is writeable
return p
}
}
// If none are writeable, return first location that exists
if len(firstExistingPath) > 0 {
return firstExistingPath
}
// If none exist, return first location that is set
if len(firstSetPath) > 0 {
return firstSetPath
}
// We've got nothing
return ""
}
return os.Getenv("HOME")
}
|
[
"\"HOME\"",
"\"HOMEDRIVE\"",
"\"HOMEPATH\"",
"\"USERPROFILE\"",
"\"HOME\""
] |
[] |
[
"USERPROFILE",
"HOME",
"HOMEPATH",
"HOMEDRIVE"
] |
[]
|
["USERPROFILE", "HOME", "HOMEPATH", "HOMEDRIVE"]
|
go
| 4 | 0 | |
config/ini_test.go
|
// Copyright 2014 beego Author. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package config
import (
"fmt"
"io/ioutil"
"os"
"strings"
"testing"
)
func TestIni(t *testing.T) {
var (
inicontext = `
;comment one
#comment two
appname = beeapi
httpport = 8080
mysqlport = 3600
PI = 3.1415976
runmode = "dev"
autorender = false
copyrequestbody = true
session= on
cookieon= off
newreg = OFF
needlogin = ON
enableSession = Y
enableCookie = N
flag = 1
path1 = ${GOPATH}
path2 = ${GOPATH||/home/go}
[demo]
key1="asta"
key2 = "xie"
CaseInsensitive = true
peers = one;two;three
password = ${GOPATH}
`
keyValue = map[string]interface{}{
"appname": "beeapi",
"httpport": 8080,
"mysqlport": int64(3600),
"pi": 3.1415976,
"runmode": "dev",
"autorender": false,
"copyrequestbody": true,
"session": true,
"cookieon": false,
"newreg": false,
"needlogin": true,
"enableSession": true,
"enableCookie": false,
"flag": true,
"path1": os.Getenv("GOPATH"),
"path2": os.Getenv("GOPATH"),
"demo::key1": "asta",
"demo::key2": "xie",
"demo::CaseInsensitive": true,
"demo::peers": []string{"one", "two", "three"},
"demo::password": os.Getenv("GOPATH"),
"null": "",
"demo2::key1": "",
"error": "",
"emptystrings": []string{},
}
)
f, err := os.Create("testini.conf")
if err != nil {
t.Fatal(err)
}
_, err = f.WriteString(inicontext)
if err != nil {
f.Close()
t.Fatal(err)
}
f.Close()
defer os.Remove("testini.conf")
iniconf, err := NewConfig("ini", "testini.conf")
if err != nil {
t.Fatal(err)
}
for k, v := range keyValue {
var err error
var value interface{}
switch v.(type) {
case int:
value, err = iniconf.Int(k)
case int64:
value, err = iniconf.Int64(k)
case float64:
value, err = iniconf.Float(k)
case bool:
value, err = iniconf.Bool(k)
case []string:
value = iniconf.Strings(k)
case string:
value = iniconf.String(k)
default:
value, err = iniconf.DIY(k)
}
if err != nil {
t.Fatalf("get key %q value fail,err %s", k, err)
} else if fmt.Sprintf("%v", v) != fmt.Sprintf("%v", value) {
t.Fatalf("get key %q value, want %v got %v .", k, v, value)
}
}
if err = iniconf.Set("name", "liuyuanting1209"); err != nil {
t.Fatal(err)
}
if iniconf.String("name") != "liuyuanting1209" {
t.Fatal("get name error")
}
}
func TestIniSave(t *testing.T) {
const (
inicontext = `
app = app
;comment one
#comment two
# comment three
appname = beeapi
httpport = 8080
# DB Info
# enable db
[dbinfo]
# db type name
# suport mysql,sqlserver
name = mysql
`
saveResult = `
app=app
#comment one
#comment two
# comment three
appname=beeapi
httpport=8080
# DB Info
# enable db
[dbinfo]
# db type name
# suport mysql,sqlserver
name=mysql
`
)
cfg, err := NewConfigData("ini", []byte(inicontext))
if err != nil {
t.Fatal(err)
}
name := "newIniConfig.ini"
if err := cfg.SaveConfigFile(name); err != nil {
t.Fatal(err)
}
defer os.Remove(name)
if data, err := ioutil.ReadFile(name); err != nil {
t.Fatal(err)
} else {
cfgData := string(data)
datas := strings.Split(saveResult, "\n")
for _, line := range datas {
if !strings.Contains(cfgData, line+"\n") {
t.Fatalf("different after save ini config file. need contains %q", line)
}
}
}
}
|
[
"\"GOPATH\"",
"\"GOPATH\"",
"\"GOPATH\""
] |
[] |
[
"GOPATH"
] |
[]
|
["GOPATH"]
|
go
| 1 | 0 | |
registry/mdns/mdns_test.go
|
package mdns
import (
"os"
"testing"
"time"
"github.com/asim/nitro/v3/registry"
)
func TestMDNS(t *testing.T) {
// skip test in travis because of sendto: operation not permitted error
if travis := os.Getenv("CI"); travis == "true" {
t.Skip()
}
testData := []*registry.Service{
{
Name: "test1",
Version: "1.0.1",
Nodes: []*registry.Node{
{
Id: "test1-1",
Address: "10.0.0.1:10001",
Metadata: map[string]string{
"foo": "bar",
},
},
},
},
{
Name: "test2",
Version: "1.0.2",
Nodes: []*registry.Node{
{
Id: "test2-1",
Address: "10.0.0.2:10002",
Metadata: map[string]string{
"foo2": "bar2",
},
},
},
},
{
Name: "test3",
Version: "1.0.3",
Nodes: []*registry.Node{
{
Id: "test3-1",
Address: "10.0.0.3:10003",
Metadata: map[string]string{
"foo3": "bar3",
},
},
},
},
}
travis := os.Getenv("CI")
var opts []registry.Option
if travis == "true" {
opts = append(opts, registry.Timeout(time.Millisecond*100))
}
// new registry
r := NewRegistry(opts...)
for _, service := range testData {
// register service
if err := r.Register(service); err != nil {
t.Fatal(err)
}
// get registered service
s, err := r.GetService(service.Name)
if err != nil {
t.Fatal(err)
}
if len(s) != 1 {
t.Fatalf("Expected one result for %s got %d", service.Name, len(s))
}
if s[0].Name != service.Name {
t.Fatalf("Expected name %s got %s", service.Name, s[0].Name)
}
if s[0].Version != service.Version {
t.Fatalf("Expected version %s got %s", service.Version, s[0].Version)
}
if len(s[0].Nodes) != 1 {
t.Fatalf("Expected 1 node, got %d", len(s[0].Nodes))
}
node := s[0].Nodes[0]
if node.Id != service.Nodes[0].Id {
t.Fatalf("Expected node id %s got %s", service.Nodes[0].Id, node.Id)
}
if node.Address != service.Nodes[0].Address {
t.Fatalf("Expected node address %s got %s", service.Nodes[0].Address, node.Address)
}
}
services, err := r.ListServices()
if err != nil {
t.Fatal(err)
}
for _, service := range testData {
var seen bool
for _, s := range services {
if s.Name == service.Name {
seen = true
break
}
}
if !seen {
t.Fatalf("Expected service %s got nothing", service.Name)
}
// deregister
if err := r.Deregister(service); err != nil {
t.Fatal(err)
}
time.Sleep(time.Millisecond * 5)
// check its gone
s, _ := r.GetService(service.Name)
if len(s) > 0 {
t.Fatalf("Expected nothing got %+v", s[0])
}
}
}
func TestEncoding(t *testing.T) {
testData := []*mdnsTxt{
{
Version: "1.0.0",
Metadata: map[string]string{
"foo": "bar",
},
Endpoints: []*registry.Endpoint{
{
Name: "endpoint1",
Request: ®istry.Value{
Name: "request",
Type: "request",
},
Response: ®istry.Value{
Name: "response",
Type: "response",
},
Metadata: map[string]string{
"foo1": "bar1",
},
},
},
},
}
for _, d := range testData {
encoded, err := encode(d)
if err != nil {
t.Fatal(err)
}
for _, txt := range encoded {
if len(txt) > 255 {
t.Fatalf("One of parts for txt is %d characters", len(txt))
}
}
decoded, err := decode(encoded)
if err != nil {
t.Fatal(err)
}
if decoded.Version != d.Version {
t.Fatalf("Expected version %s got %s", d.Version, decoded.Version)
}
if len(decoded.Endpoints) != len(d.Endpoints) {
t.Fatalf("Expected %d endpoints, got %d", len(d.Endpoints), len(decoded.Endpoints))
}
for k, v := range d.Metadata {
if val := decoded.Metadata[k]; val != v {
t.Fatalf("Expected %s=%s got %s=%s", k, v, k, val)
}
}
}
}
func TestWatcher(t *testing.T) {
if travis := os.Getenv("CI"); travis == "true" {
t.Skip()
}
testData := []*registry.Service{
{
Name: "test1",
Version: "1.0.1",
Nodes: []*registry.Node{
{
Id: "test1-1",
Address: "10.0.0.1:10001",
Metadata: map[string]string{
"foo": "bar",
},
},
},
},
{
Name: "test2",
Version: "1.0.2",
Nodes: []*registry.Node{
{
Id: "test2-1",
Address: "10.0.0.2:10002",
Metadata: map[string]string{
"foo2": "bar2",
},
},
},
},
{
Name: "test3",
Version: "1.0.3",
Nodes: []*registry.Node{
{
Id: "test3-1",
Address: "10.0.0.3:10003",
Metadata: map[string]string{
"foo3": "bar3",
},
},
},
},
}
testFn := func(service, s *registry.Service) {
if s == nil {
t.Fatalf("Expected one result for %s got nil", service.Name)
}
if s.Name != service.Name {
t.Fatalf("Expected name %s got %s", service.Name, s.Name)
}
if s.Version != service.Version {
t.Fatalf("Expected version %s got %s", service.Version, s.Version)
}
if len(s.Nodes) != 1 {
t.Fatalf("Expected 1 node, got %d", len(s.Nodes))
}
node := s.Nodes[0]
if node.Id != service.Nodes[0].Id {
t.Fatalf("Expected node id %s got %s", service.Nodes[0].Id, node.Id)
}
if node.Address != service.Nodes[0].Address {
t.Fatalf("Expected node address %s got %s", service.Nodes[0].Address, node.Address)
}
}
travis := os.Getenv("CI")
var opts []registry.Option
if travis == "true" {
opts = append(opts, registry.Timeout(time.Millisecond*100))
}
// new registry
r := NewRegistry(opts...)
w, err := r.Watch()
if err != nil {
t.Fatal(err)
}
defer w.Stop()
for _, service := range testData {
// register service
if err := r.Register(service); err != nil {
t.Fatal(err)
}
for {
res, err := w.Next()
if err != nil {
t.Fatal(err)
}
if res.Service.Name != service.Name {
continue
}
if res.Action != "create" {
t.Fatalf("Expected create event got %s for %s", res.Action, res.Service.Name)
}
testFn(service, res.Service)
break
}
// deregister
if err := r.Deregister(service); err != nil {
t.Fatal(err)
}
for {
res, err := w.Next()
if err != nil {
t.Fatal(err)
}
if res.Service.Name != service.Name {
continue
}
if res.Action != "delete" {
continue
}
testFn(service, res.Service)
break
}
}
}
|
[
"\"CI\"",
"\"CI\"",
"\"CI\"",
"\"CI\""
] |
[] |
[
"CI"
] |
[]
|
["CI"]
|
go
| 1 | 0 | |
config/profile.go
|
// Copyright (c) 2009-present, Alibaba Cloud All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package config
import (
"encoding/json"
"errors"
"fmt"
"os"
"os/exec"
"regexp"
"strconv"
"strings"
"time"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/auth/credentials"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/responses"
"github.com/aliyun/aliyun-cli/cli"
jmespath "github.com/jmespath/go-jmespath"
)
type AuthenticateMode string
const (
AK = AuthenticateMode("AK")
StsToken = AuthenticateMode("StsToken")
RamRoleArn = AuthenticateMode("RamRoleArn")
EcsRamRole = AuthenticateMode("EcsRamRole")
RsaKeyPair = AuthenticateMode("RsaKeyPair")
RamRoleArnWithEcs = AuthenticateMode("RamRoleArnWithRoleName")
ChainableRamRoleArn = AuthenticateMode("ChainableRamRoleArn")
External = AuthenticateMode("External")
)
type Profile struct {
Name string `json:"name"`
Mode AuthenticateMode `json:"mode"`
AccessKeyId string `json:"access_key_id"`
AccessKeySecret string `json:"access_key_secret"`
StsToken string `json:"sts_token"`
StsRegion string `json:"sts_region"`
RamRoleName string `json:"ram_role_name"`
RamRoleArn string `json:"ram_role_arn"`
RoleSessionName string `json:"ram_session_name"`
SourceProfile string `json:"source_profile"`
PrivateKey string `json:"private_key"`
KeyPairName string `json:"key_pair_name"`
ExpiredSeconds int `json:"expired_seconds"`
Verified string `json:"verified"`
RegionId string `json:"region_id"`
OutputFormat string `json:"output_format"`
Language string `json:"language"`
Site string `json:"site"`
ReadTimeout int `json:"retry_timeout"`
ConnectTimeout int `json:"connect_timeout"`
RetryCount int `json:"retry_count"`
ProcessCommand string `json:"process_command"`
parent *Configuration //`json:"-"`
}
func NewProfile(name string) Profile {
return Profile{
Name: name,
Mode: AK,
OutputFormat: "json",
Language: "en",
}
}
func (cp *Profile) Validate() error {
if cp.RegionId == "" {
return fmt.Errorf("region can't be empty")
}
if !IsRegion(cp.RegionId) {
return fmt.Errorf("invalid region %s", cp.RegionId)
}
if cp.Mode == "" {
return fmt.Errorf("profile %s is not configure yet, run `aliyun configure --profile %s` first", cp.Name, cp.Name)
}
switch cp.Mode {
case AK:
return cp.ValidateAK()
case StsToken:
err := cp.ValidateAK()
if err != nil {
return err
}
if cp.StsToken == "" {
return fmt.Errorf("invalid sts_token")
}
case RamRoleArn:
err := cp.ValidateAK()
if err != nil {
return err
}
if cp.RamRoleArn == "" {
return fmt.Errorf("invalid ram_role_arn")
}
if cp.RoleSessionName == "" {
return fmt.Errorf("invalid role_session_name")
}
case EcsRamRole, RamRoleArnWithEcs:
case RsaKeyPair:
if cp.PrivateKey == "" {
return fmt.Errorf("invalid private_key")
}
if cp.KeyPairName == "" {
return fmt.Errorf("invalid key_pair_name")
}
case External:
if cp.ProcessCommand == "" {
return fmt.Errorf("invalid process_command")
}
case ChainableRamRoleArn:
if cp.SourceProfile == "" {
return fmt.Errorf("invalid source_profile")
}
if cp.RamRoleArn == "" {
return fmt.Errorf("invalid ram_role_arn")
}
if cp.RoleSessionName == "" {
return fmt.Errorf("invalid role_session_name")
}
default:
return fmt.Errorf("invalid mode: %s", cp.Mode)
}
return nil
}
func (cp *Profile) GetParent() *Configuration {
return cp.parent
}
func (cp *Profile) OverwriteWithFlags(ctx *cli.Context) {
cp.Mode = AuthenticateMode(ModeFlag(ctx.Flags()).GetStringOrDefault(string(cp.Mode)))
cp.AccessKeyId = AccessKeyIdFlag(ctx.Flags()).GetStringOrDefault(cp.AccessKeyId)
cp.AccessKeySecret = AccessKeySecretFlag(ctx.Flags()).GetStringOrDefault(cp.AccessKeySecret)
cp.StsToken = StsTokenFlag(ctx.Flags()).GetStringOrDefault(cp.StsToken)
cp.StsRegion = StsRegionFlag(ctx.Flags()).GetStringOrDefault(cp.StsRegion)
cp.RamRoleName = RamRoleNameFlag(ctx.Flags()).GetStringOrDefault(cp.RamRoleName)
cp.RamRoleArn = RamRoleArnFlag(ctx.Flags()).GetStringOrDefault(cp.RamRoleArn)
cp.RoleSessionName = RoleSessionNameFlag(ctx.Flags()).GetStringOrDefault(cp.RoleSessionName)
cp.KeyPairName = KeyPairNameFlag(ctx.Flags()).GetStringOrDefault(cp.KeyPairName)
cp.PrivateKey = PrivateKeyFlag(ctx.Flags()).GetStringOrDefault(cp.PrivateKey)
cp.RegionId = RegionFlag(ctx.Flags()).GetStringOrDefault(cp.RegionId)
cp.Language = LanguageFlag(ctx.Flags()).GetStringOrDefault(cp.Language)
cp.ReadTimeout = ReadTimeoutFlag(ctx.Flags()).GetIntegerOrDefault(cp.ReadTimeout)
cp.ConnectTimeout = ConnectTimeoutFlag(ctx.Flags()).GetIntegerOrDefault(cp.ConnectTimeout)
cp.RetryCount = RetryCountFlag(ctx.Flags()).GetIntegerOrDefault(cp.RetryCount)
cp.ExpiredSeconds = ExpiredSecondsFlag(ctx.Flags()).GetIntegerOrDefault(cp.ExpiredSeconds)
cp.ProcessCommand = ProcessCommandFlag(ctx.Flags()).GetStringOrDefault(cp.ProcessCommand)
if cp.AccessKeyId == "" {
switch {
case os.Getenv("ALIBABACLOUD_ACCESS_KEY_ID") != "":
cp.AccessKeyId = os.Getenv("ALIBABACLOUD_ACCESS_KEY_ID")
case os.Getenv("ALICLOUD_ACCESS_KEY_ID") != "":
cp.AccessKeyId = os.Getenv("ALICLOUD_ACCESS_KEY_ID")
case os.Getenv("ACCESS_KEY_ID") != "":
cp.AccessKeyId = os.Getenv("ACCESS_KEY_ID")
}
}
if cp.AccessKeySecret == "" {
switch {
case os.Getenv("ALIBABACLOUD_ACCESS_KEY_SECRET") != "":
cp.AccessKeySecret = os.Getenv("ALIBABACLOUD_ACCESS_KEY_SECRET")
case os.Getenv("ALICLOUD_ACCESS_KEY_SECRET") != "":
cp.AccessKeySecret = os.Getenv("ALICLOUD_ACCESS_KEY_SECRET")
case os.Getenv("ACCESS_KEY_SECRET") != "":
cp.AccessKeySecret = os.Getenv("ACCESS_KEY_SECRET")
}
}
if cp.StsToken == "" {
cp.StsToken = os.Getenv("SECURITY_TOKEN")
}
if cp.RegionId == "" {
switch {
case os.Getenv("ALIBABACLOUD_REGION_ID") != "":
cp.RegionId = os.Getenv("ALIBABACLOUD_REGION_ID")
case os.Getenv("ALICLOUD_REGION_ID") != "":
cp.RegionId = os.Getenv("ALICLOUD_REGION_ID")
case os.Getenv("REGION") != "":
cp.RegionId = os.Getenv("REGION")
}
}
AutoModeRecognition(cp)
}
func AutoModeRecognition(cp *Profile) {
if cp.Mode != AuthenticateMode("") {
return
}
if cp.AccessKeyId != "" && cp.AccessKeySecret != "" {
cp.Mode = AK
if cp.StsToken != "" {
cp.Mode = StsToken
} else if cp.RamRoleArn != "" {
cp.Mode = RamRoleArn
}
} else if cp.PrivateKey != "" && cp.KeyPairName != "" {
cp.Mode = RsaKeyPair
} else if cp.RamRoleName != "" {
cp.Mode = EcsRamRole
} else if cp.ProcessCommand != "" {
cp.Mode = External
}
}
func (cp *Profile) ValidateAK() error {
if len(cp.AccessKeyId) == 0 {
return fmt.Errorf("invalid access_key_id: %s", cp.AccessKeyId)
}
if len(cp.AccessKeySecret) == 0 {
return fmt.Errorf("invaild access_key_secret: %s", cp.AccessKeySecret)
}
return nil
}
func (cp *Profile) GetClient(ctx *cli.Context) (*sdk.Client, error) {
config := sdk.NewConfig()
// get UserAgent from env
config.UserAgent = os.Getenv("ALIYUN_USER_AGENT")
if cp.RetryCount > 0 {
// when use --retry-count, enable auto retry
config.WithAutoRetry(true)
config.WithMaxRetryTime(cp.RetryCount)
}
var client *sdk.Client
var err error
switch cp.Mode {
case AK:
client, err = cp.GetClientByAK(config)
case StsToken:
client, err = cp.GetClientBySts(config)
case RamRoleArn:
client, err = cp.GetClientByRoleArn(config)
case EcsRamRole:
client, err = cp.GetClientByEcsRamRole(config)
case RsaKeyPair:
client, err = cp.GetClientByPrivateKey(config)
case RamRoleArnWithEcs:
client, err = cp.GetClientByRamRoleArnWithEcs(config)
case ChainableRamRoleArn:
return cp.GetClientByChainableRamRoleArn(config, ctx)
case External:
return cp.GetClientByExternal(config, ctx)
default:
client, err = nil, fmt.Errorf("unexcepted certificate mode: %s", cp.Mode)
}
if client != nil {
if cp.ReadTimeout > 0 {
client.SetReadTimeout(time.Duration(cp.ReadTimeout) * time.Second)
}
if cp.ConnectTimeout > 0 {
client.SetConnectTimeout(time.Duration(cp.ConnectTimeout) * time.Second)
}
if SkipSecureVerify(ctx.Flags()).IsAssigned() {
client.SetHTTPSInsecure(true)
}
}
return client, err
}
func (cp *Profile) GetClientByAK(config *sdk.Config) (*sdk.Client, error) {
if cp.AccessKeyId == "" || cp.AccessKeySecret == "" {
return nil, fmt.Errorf("AccessKeyId/AccessKeySecret is empty! run `aliyun configure` first")
}
if cp.RegionId == "" {
return nil, fmt.Errorf("default RegionId is empty! run `aliyun configure` first")
}
cred := credentials.NewAccessKeyCredential(cp.AccessKeyId, cp.AccessKeySecret)
client, err := sdk.NewClientWithOptions(cp.RegionId, config, cred)
return client, err
}
func (cp *Profile) GetClientBySts(config *sdk.Config) (*sdk.Client, error) {
cred := credentials.NewStsTokenCredential(cp.AccessKeyId, cp.AccessKeySecret, cp.StsToken)
client, err := sdk.NewClientWithOptions(cp.RegionId, config, cred)
return client, err
}
func (cp *Profile) GetClientByRoleArn(config *sdk.Config) (*sdk.Client, error) {
cred := credentials.NewRamRoleArnCredential(cp.AccessKeyId, cp.AccessKeySecret, cp.RamRoleArn, cp.RoleSessionName, cp.ExpiredSeconds)
cred.StsRegion = cp.StsRegion
client, err := sdk.NewClientWithOptions(cp.RegionId, config, cred)
return client, err
}
func (cp *Profile) GetClientByRamRoleArnWithEcs(config *sdk.Config) (*sdk.Client, error) {
client, err := cp.GetClientByEcsRamRole(config)
if err != nil {
return nil, err
}
accessKeyID, accessKeySecret, StsToken, err := cp.GetSessionCredential(client)
if err != nil {
return nil, err
}
cred := credentials.NewStsTokenCredential(accessKeyID, accessKeySecret, StsToken)
return sdk.NewClientWithOptions(cp.RegionId, config, cred)
}
func (cp *Profile) GetSessionCredential(client *sdk.Client) (string, string, string, error) {
req := requests.NewCommonRequest()
rep := responses.NewCommonResponse()
req.Scheme = "HTTPS"
req.Product = "Sts"
req.RegionId = cp.RegionId
req.Version = "2015-04-01"
if cp.StsRegion != "" {
req.Domain = fmt.Sprintf("sts.%s.aliyuncs.com", cp.StsRegion)
} else {
req.Domain = "sts.aliyuncs.com"
}
req.ApiName = "AssumeRole"
req.QueryParams["RoleArn"] = cp.RamRoleArn
req.QueryParams["RoleSessionName"] = cp.RoleSessionName
req.QueryParams["DurationSeconds"] = strconv.Itoa(cp.ExpiredSeconds)
req.TransToAcsRequest()
err := client.DoAction(req, rep)
if err != nil {
return "", "", "", err
}
var v interface{}
err = json.Unmarshal(rep.GetHttpContentBytes(), &v)
if err != nil {
return "", "", "", err
}
accessKeyID, _ := jmespath.Search("Credentials.AccessKeyId", v)
accessKeySecret, _ := jmespath.Search("Credentials.AccessKeySecret", v)
StsToken, _ := jmespath.Search("Credentials.SecurityToken", v)
if accessKeyID == nil || accessKeySecret == nil || StsToken == nil {
return "", "", "", errors.New("get session credential failed")
}
return accessKeyID.(string), accessKeySecret.(string), StsToken.(string), nil
}
func (cp *Profile) GetClientByEcsRamRole(config *sdk.Config) (*sdk.Client, error) {
cred := credentials.NewEcsRamRoleCredential(cp.RamRoleName)
client, err := sdk.NewClientWithOptions(cp.RegionId, config, cred)
return client, err
}
func (cp *Profile) GetClientByPrivateKey(config *sdk.Config) (*sdk.Client, error) {
cred := credentials.NewRsaKeyPairCredential(cp.PrivateKey, cp.KeyPairName, cp.ExpiredSeconds)
client, err := sdk.NewClientWithOptions(cp.RegionId, config, cred)
return client, err
}
func (cp *Profile) GetClientByExternal(config *sdk.Config, ctx *cli.Context) (*sdk.Client, error) {
args := strings.Fields(cp.ProcessCommand)
cmd := exec.Command(args[0], args[1:]...)
buf, err := cmd.CombinedOutput()
if err != nil {
return nil, err
}
err = json.Unmarshal(buf, cp)
if err != nil {
fmt.Println(cp.ProcessCommand)
fmt.Println(string(buf))
return nil, err
}
return cp.GetClient(ctx)
}
func (cp *Profile) GetClientByChainableRamRoleArn(config *sdk.Config, ctx *cli.Context) (*sdk.Client, error) {
profileName := cp.SourceProfile
// 从 configuration 中重新获取 source profile
source, loaded := cp.parent.GetProfile(profileName)
if !loaded {
return nil, fmt.Errorf("can not load the source profile: " + profileName)
}
client, err := source.GetClient(ctx)
if err != nil {
return nil, err
}
accessKeyID, accessKeySecret, StsToken, err := cp.GetSessionCredential(client)
if err != nil {
return nil, err
}
cred := credentials.NewStsTokenCredential(accessKeyID, accessKeySecret, StsToken)
return sdk.NewClientWithOptions(cp.RegionId, config, cred)
}
func IsRegion(region string) bool {
if match, _ := regexp.MatchString("^[a-zA-Z0-9-]*$", region); !match {
return false
}
return true
}
|
[
"\"ALIBABACLOUD_ACCESS_KEY_ID\"",
"\"ALIBABACLOUD_ACCESS_KEY_ID\"",
"\"ALICLOUD_ACCESS_KEY_ID\"",
"\"ALICLOUD_ACCESS_KEY_ID\"",
"\"ACCESS_KEY_ID\"",
"\"ACCESS_KEY_ID\"",
"\"ALIBABACLOUD_ACCESS_KEY_SECRET\"",
"\"ALIBABACLOUD_ACCESS_KEY_SECRET\"",
"\"ALICLOUD_ACCESS_KEY_SECRET\"",
"\"ALICLOUD_ACCESS_KEY_SECRET\"",
"\"ACCESS_KEY_SECRET\"",
"\"ACCESS_KEY_SECRET\"",
"\"SECURITY_TOKEN\"",
"\"ALIBABACLOUD_REGION_ID\"",
"\"ALIBABACLOUD_REGION_ID\"",
"\"ALICLOUD_REGION_ID\"",
"\"ALICLOUD_REGION_ID\"",
"\"REGION\"",
"\"REGION\"",
"\"ALIYUN_USER_AGENT\""
] |
[] |
[
"ALIYUN_USER_AGENT",
"SECURITY_TOKEN",
"ALIBABACLOUD_REGION_ID",
"ALICLOUD_ACCESS_KEY_SECRET",
"ALIBABACLOUD_ACCESS_KEY_ID",
"ALIBABACLOUD_ACCESS_KEY_SECRET",
"REGION",
"ALICLOUD_REGION_ID",
"ALICLOUD_ACCESS_KEY_ID",
"ACCESS_KEY_SECRET",
"ACCESS_KEY_ID"
] |
[]
|
["ALIYUN_USER_AGENT", "SECURITY_TOKEN", "ALIBABACLOUD_REGION_ID", "ALICLOUD_ACCESS_KEY_SECRET", "ALIBABACLOUD_ACCESS_KEY_ID", "ALIBABACLOUD_ACCESS_KEY_SECRET", "REGION", "ALICLOUD_REGION_ID", "ALICLOUD_ACCESS_KEY_ID", "ACCESS_KEY_SECRET", "ACCESS_KEY_ID"]
|
go
| 11 | 0 | |
config.py
|
import os
from dotenv import load_dotenv
basedir = os.path.abspath(os.path.dirname(__file__))
load_dotenv(os.path.join(basedir, '.env'))
class Config(object):
SECRET_KEY = os.environ.get('SECRET_KEY') or 'you-will-never-guess'
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'app.db')
SQLALCHEMY_TRACK_MODIFICATIONS = False
MAIL_SERVER = os.environ.get('MAIL_SERVER')
MAIL_PORT = int(os.environ.get('MAIL_PORT') or 25)
MAIL_USE_TLS = os.environ.get('MAIL_USE_TLS') is not None
MAIL_USERNAME = os.environ.get('MAIL_USERNAME')
MAIL_PASSWORD = os.environ.get('MAIL_PASSWORD')
ADMINS = ['[email protected]']
LANGUAGES = ['en', 'es']
YD_TRANSLATOR_KEY = os.environ.get('YD_TRANSLATOR_KEY')
POSTS_PER_PAGE = 25
ELASTICSEARCH_URL = os.environ.get('ELASTICSEARCH_URL')
|
[] |
[] |
[
"MAIL_SERVER",
"MAIL_PASSWORD",
"DATABASE_URL",
"MAIL_PORT",
"SECRET_KEY",
"YD_TRANSLATOR_KEY",
"MAIL_USERNAME",
"MAIL_USE_TLS",
"ELASTICSEARCH_URL"
] |
[]
|
["MAIL_SERVER", "MAIL_PASSWORD", "DATABASE_URL", "MAIL_PORT", "SECRET_KEY", "YD_TRANSLATOR_KEY", "MAIL_USERNAME", "MAIL_USE_TLS", "ELASTICSEARCH_URL"]
|
python
| 9 | 0 | |
demos/SAC/GraphDrive-Easy/W.py
|
# Read this guide for how to use this script: https://medium.com/distributed-computing-with-ray/intro-to-rllib-example-environments-3a113f532c70
import os
os.environ["TUNE_RESULT_DIR"] = 'tmp/ray_results'
import multiprocessing
import json
import shutil
import ray
import time
from xarl.utils.workflow import train
from xarl.agents.xasac import XASACTrainer, XASAC_DEFAULT_CONFIG
from environments import *
from ray.rllib.models import ModelCatalog
from xarl.models.sac import TFAdaptiveMultiHeadNet
# ModelCatalog.register_custom_model("adaptive_multihead_network", TFAdaptiveMultiHeadDDPG)
# SELECT_ENV = "CescoDrive-V1"
SELECT_ENV = "GraphDrive-Easy"
CONFIG = XASAC_DEFAULT_CONFIG.copy()
CONFIG.update({
# "model": { # this is for GraphDrive and GridDrive
# "custom_model": "adaptive_multihead_network",
# },
# "preprocessor_pref": "rllib", # this prevents reward clipping on Atari and other weird issues when running from checkpoints
# "framework": "torch",
"seed": 42, # This makes experiments reproducible.
###########################
"rollout_fragment_length": 2**6, # Divide episodes into fragments of this many steps each during rollouts. Default is 1.
"train_batch_size": 2**8, # Number of transitions per train-batch. Default is: 100 for TD3, 256 for SAC and DDPG, 32 for DQN, 500 for APPO.
# "batch_mode": "complete_episodes", # For some clustering schemes (e.g. extrinsic_reward, moving_best_extrinsic_reward, etc..) it has to be equal to 'complete_episodes', otherwise it can also be 'truncate_episodes'.
###########################
"prioritized_replay": True, # Whether to replay batches with the highest priority/importance/relevance for the agent.
'buffer_size': 2**14, # Size of the experience buffer. Default 50000
"prioritized_replay_alpha": 0.6,
"prioritized_replay_beta": 0.4, # The smaller this is, the stronger is over-sampling
"prioritized_replay_eps": 1e-6,
"learning_starts": 2**14, # How many steps of the model to sample before learning starts.
###########################
"gamma": 0.999, # We use an higher gamma to extend the MDP's horizon; optimal agency on GraphDrive requires a longer horizon.
"tau": 1e-4,
###########################
"buffer_options": {
'priority_id': 'td_errors', # Which batch column to use for prioritisation. Default is inherited by DQN and it is 'td_errors'. One of the following: rewards, prev_rewards, td_errors.
'priority_lower_limit': 0, # A value lower than the lowest possible priority. It depends on the priority_id. By default in DQN and DDPG it is td_error 0, while in PPO it is gain None.
'priority_aggregation_fn': 'np.mean', # A reduction that takes as input a list of numbers and returns a number representing a batch priority.
'cluster_size': None, # Default None, implying being equal to global_size. Maximum number of batches stored in a cluster (which number depends on the clustering scheme) of the experience buffer. Every batch has size 'replay_sequence_length' (default is 1).
'global_size': 2**14, # Default 50000. Maximum number of batches stored in all clusters (which number depends on the clustering scheme) of the experience buffer. Every batch has size 'replay_sequence_length' (default is 1).
'prioritization_alpha': 0.6, # How much prioritization is used (0 - no prioritization, 1 - full prioritization).
'prioritization_importance_beta': 0.4, # To what degree to use importance weights (0 - no corrections, 1 - full correction).
'prioritization_importance_eta': 1e-2, # Used only if priority_lower_limit is None. A value > 0 that enables eta-weighting, thus allowing for importance weighting with priorities lower than 0 if beta is > 0. Eta is used to avoid importance weights equal to 0 when the sampled batch is the one with the highest priority. The closer eta is to 0, the closer to 0 would be the importance weight of the highest-priority batch.
'prioritization_epsilon': 1e-6, # prioritization_epsilon to add to a priority so that it is never equal to 0.
'prioritized_drop_probability': 0, # Probability of dropping the batch having the lowest priority in the buffer instead of the one having the lowest timestamp. In DQN default is 0.
'global_distribution_matching': False, # Whether to use a random number rather than the batch priority during prioritised dropping. If True then: At time t the probability of any experience being the max experience is 1/t regardless of when the sample was added, guaranteeing that (when prioritized_drop_probability==1) at any given time the sampled experiences will approximately match the distribution of all samples seen so far.
'cluster_prioritisation_strategy': 'sum', # Whether to select which cluster to replay in a prioritised fashion -- Options: None; 'sum', 'avg', 'weighted_avg'.
'cluster_prioritization_alpha': 1, # How much prioritization is used (0 - no prioritization, 1 - full prioritization).
'cluster_level_weighting': True, # Whether to use only cluster-level information to compute importance weights rather than the whole buffer.
'clustering_xi': 3, # Let X be the minimum cluster's size, and C be the number of clusters, and q be clustering_xi, then the cluster's size is guaranteed to be in [X, X+(q-1)CX], with q >= 1, when all clusters have reached the minimum capacity X. This shall help having a buffer reflecting the real distribution of tasks (where each task is associated to a cluster), thus avoiding over-estimation of task's priority.
'max_age_window': None, # Consider only batches with a relative age within this age window, the younger is a batch the higher will be its importance. Set to None for no age weighting. # Idea from: Fedus, William, et al. "Revisiting fundamentals of experience replay." International Conference on Machine Learning. PMLR, 2020.
},
"clustering_scheme": "W", # Which scheme to use for building clusters. One of the following: "none", "positive_H", "H", "HW", "long_HW", "W", "long_W".
"clustering_scheme_options": {
"episode_window_size": 2**6,
"batch_window_size": 2**8,
"n_clusters": None,
},
"cluster_selection_policy": "min", # Which policy to follow when clustering_scheme is not "none" and multiple explanatory labels are associated to a batch. One of the following: 'random_uniform_after_filling', 'random_uniform', 'random_max', 'max', 'min', 'none'
"cluster_with_episode_type": False, # Useful with sparse-reward environments. Whether to cluster experience using information at episode-level.
"cluster_overview_size": 1, # cluster_overview_size <= train_batch_size. If None, then cluster_overview_size is automatically set to train_batch_size. -- When building a single train batch, do not sample a new cluster before x batches are sampled from it. The closer cluster_overview_size is to train_batch_size, the faster is the batch sampling procedure.
"collect_cluster_metrics": False, # Whether to collect metrics about the experience clusters. It consumes more resources.
"ratio_of_samples_from_unclustered_buffer": 0, # 0 for no, 1 for full. Whether to sample in a randomised fashion from both a non-prioritised buffer of most recent elements and the XA prioritised buffer.
})
CONFIG["callbacks"] = CustomEnvironmentCallbacks
####################################################################################
####################################################################################
ray.shutdown()
ray.init(ignore_reinit_error=True)
train(XASACTrainer, CONFIG, SELECT_ENV, test_every_n_step=4e7, stop_training_after_n_step=4e7)
|
[] |
[] |
[
"TUNE_RESULT_DIR"
] |
[]
|
["TUNE_RESULT_DIR"]
|
python
| 1 | 0 | |
drought_metrics.py
|
"""drought-metrics.py
Driver that runs the drought metrics evaluation.
Use:
python drought_metrics.py settings_file.yaml
Parameters:
-----------
Parameters are stored in a yaml file provided as the single argument
to this script.
test_path : str
GCM file directory. Can contain multiple files.
obs_path : str
Path to observations file
wgt_path : str
Weightfile path
hu_name : str
Name of evaluation region
shp_path : str
Path to regions shapefile
out_path : str
Path to output directory (default '.')
interpolation : bool
True to perform interpolation (default False)
pfa : str
Path to principal metrics file
"""
import json
import os
import sys
from evaluation import evaluation
# Set defaults based on demo data
hu_name = "New England Region"
interpolation = True
run_pfa = False
shp_path = "./HU/WBDHU2.shp"
wgt_path = "./data/weightfile/interpolated_pr_Amon_E3SM-1-1_historical_r1i1p1f1_gr_187001-200912.nc"
pfa = "./output_principal_metrics_column_defined"
obs_file_name = "precip.V1.0.mon.mean.nc"
# Get CMEC environment variables
test_path = os.getenv("CMEC_MODEL_DATA")
obs_path = os.getenv("CMEC_OBS_DATA")
out_path = os.getenv("CMEC_WK_DIR")
# Get user settings from cmec interface
user_settings_json = os.path.expandvars('$CMEC_CONFIG_DIR/cmec.json')
try:
with open(user_settings_json) as config_file:
user_settings = json.load(config_file).get("Drought_Metrics")
# Get any environment variables and check settings type
for setting in user_settings:
if isinstance(user_settings[setting], str):
user_settings[setting] = os.path.expandvars(user_settings[setting])
# User settings to global variables
globals().update(user_settings)
obs_path = os.path.join(obs_path, obs_file_name)
except json.decoder.JSONDecodeError:
print("*** Could not load settings from " + str(user_settings_json) + ". File may not be valid JSON. Using defaults ***\n")
obs_path = os.path.join(obs_path, obs_file_name)
# Loop over all files under TEST_PATH and conduct data analysis.
x = evaluation()
x.evaluate_multi_model(
test_path, obs_path, wgt_path, hu_name,
shp_path, out_path, interpolation=interpolation)
# Conduct the PFA to get Principal Metrics within the region defined.
# The column names of pricipal metrics are saved at 'output_principal_metrics_column_defined'.
if run_pfa:
print("Running Principal Features Analysis")
pfa_path = out_path + "/output_principal_metrics_column_defined"
x.PFA(out_path=out_path, column_name=pfa_path)
else:
print("Using principal features from " + str(pfa))
pfa_path = pfa
# Make sure get the name of pricipal metrics defined by PFA firstly.
# (Here I provide a template named 'output_principal_metrics_column_defined').
# Select the principal metrics defined at 'output_principal_metrics_column_defined' and make plots
x.PM_selection(out_path=out_path, column_name=pfa_path)
x.result_analysis(out_path=out_path, column_name=pfa_path, upper_limit=2)
x.make_taylor_diagram(out_path)
|
[] |
[] |
[
"CMEC_WK_DIR",
"CMEC_MODEL_DATA",
"CMEC_OBS_DATA"
] |
[]
|
["CMEC_WK_DIR", "CMEC_MODEL_DATA", "CMEC_OBS_DATA"]
|
python
| 3 | 0 | |
king_phisher/client/gui_utilities.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# king_phisher/client/gui_utilities.py
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import calendar
import contextlib
import copy
import datetime
import functools
import logging
import os
import socket
import threading
import xml.sax.saxutils as saxutils
from king_phisher import find
from king_phisher import utilities
from gi.repository import Gdk
from gi.repository import Gio
from gi.repository import GLib
from gi.repository import GObject
from gi.repository import Gtk
from gi.repository import GtkSource
GObject.type_register(GtkSource.View)
GOBJECT_PROPERTY_MAP = {
'calendar': None, # delayed definition
'checkbutton': 'active',
'combobox': (
lambda c, v: c.set_active_iter(gtk_list_store_search(c.get_model(), v)),
lambda c: c.get_model().get_value(c.get_active_iter() or c.get_model().get_iter_first(), 0)
),
'entry': 'text',
'spinbutton': 'value',
'switch': 'active',
'textview': (
lambda t, v: t.get_buffer().set_text(v),
lambda t: t.get_buffer().get_text(t.get_buffer().get_start_iter(), t.get_buffer().get_end_iter(), False)
)
}
"""
The dictionary which maps GObjects to either the names of properties to
store text or a tuple which contains a set and get function. If a tuple
of two functions is specified the set function will be provided two
parameters, the object and the value and the get function will just be
provided the object.
"""
# modified from the official python3 work-around per
# https://docs.python.org/3.0/whatsnew/3.0.html#ordering-comparisons
def _cmp(item1, item2):
"""
Compare two arbitrary Python objects. The object types should either be the
same or one or both may be ``None``.
:rtype: int
:return: ``-1`` if *item1* is less than *item2*, ``0`` if they are equal or
``1`` if *item1* is greater than *item2*.
"""
if item1 is None:
return 0 if item2 is None else -1
if item2 is None:
return 1
return (item1 > item2) - (item1 < item2)
def which_glade():
"""
Locate the glade data file which stores the UI information in a Gtk Builder
format.
:return: The path to the glade data file.
:rtype: str
"""
return find.data_file(os.environ.get('KING_PHISHER_GLADE_FILE', 'king-phisher-client.ui'))
def _store_extend(store, things, clear=False):
if clear:
store.clear()
for thing in things:
store.append(thing)
def glib_idle_add_store_extend(store, things, clear=False, wait=False):
"""
Extend a GTK store object (either :py:class:`Gtk.ListStore` or
:py:class:`Gtk.TreeStore`) object using :py:func:`GLib.idle_add`. This
function is suitable for use in non-main GUI threads for synchronizing data.
:param store: The GTK storage object to add *things* to.
:type store: :py:class:`Gtk.ListStore`, :py:class:`Gtk.TreeStore`
:param tuple things: The array of things to add to *store*.
:param bool clear: Whether or not to clear the storage object before adding *things* to it.
:param bool wait: Whether or not to wait for the operation to complete before returning.
:return: Regardless of the *wait* parameter, ``None`` is returned.
:rtype: None
"""
if not isinstance(store, Gtk.ListStore):
raise TypeError('store must be a Gtk.ListStore instance')
idle_add = glib_idle_add_wait if wait else glib_idle_add_once
idle_add(_store_extend, store, things, clear)
def glib_idle_add_once(function, *args, **kwargs):
"""
Execute *function* in the main GTK loop using :py:func:`GLib.idle_add`
one time. This is useful for threads that need to update GUI data.
:param function function: The function to call.
:param args: The positional arguments to *function*.
:param kwargs: The key word arguments to *function*.
:return: The result of the function call.
"""
@functools.wraps(function)
def wrapper():
function(*args, **kwargs)
return False
return GLib.idle_add(wrapper)
def glib_idle_add_wait(function, *args, **kwargs):
"""
Execute *function* in the main GTK loop using :py:func:`GLib.idle_add`
and block until it has completed. This is useful for threads that need
to update GUI data.
:param function function: The function to call.
:param args: The positional arguments to *function*.
:param kwargs: The key word arguments to *function*.
:return: The result of the function call.
"""
gsource_completed = threading.Event()
results = []
@functools.wraps(function)
def wrapper():
results.append(function(*args, **kwargs))
gsource_completed.set()
return False
GLib.idle_add(wrapper)
gsource_completed.wait()
return results.pop()
def gobject_get_value(gobject, gtype=None):
"""
Retrieve the value of a GObject widget. Only objects with corresponding
entries present in the :py:data:`.GOBJECT_PROPERTY_MAP` can be processed by
this function.
:param gobject: The object to retrieve the value for.
:type gobject: :py:class:`GObject.Object`
:param str gtype: An explicit type to treat *gobject* as.
:return: The value of *gobject*.
:rtype: str
"""
gtype = (gtype or gobject.__class__.__name__)
gtype = gtype.lower()
if isinstance(GOBJECT_PROPERTY_MAP[gtype], (list, tuple)):
try:
value = GOBJECT_PROPERTY_MAP[gtype][1](gobject)
except AttributeError:
return None
else:
value = gobject.get_property(GOBJECT_PROPERTY_MAP[gtype])
return value
def gobject_set_value(gobject, value, gtype=None):
"""
Set the value of a GObject widget. Only objects with corresponding entries
present in the :py:data:`.GOBJECT_PROPERTY_MAP` can be processed by this
function.
:param gobject: The object to set the value for.
:type gobject: :py:class:`GObject.Object`
:param value: The value to set for the object.
:param str gtype: An explicit type to treat *gobject* as.
"""
gtype = (gtype or gobject.__class__.__name__)
gtype = gtype.lower()
if gtype not in GOBJECT_PROPERTY_MAP:
raise ValueError('unsupported gtype: ' + gtype)
if isinstance(GOBJECT_PROPERTY_MAP[gtype], (list, tuple)):
GOBJECT_PROPERTY_MAP[gtype][0](gobject, value)
else:
gobject.set_property(GOBJECT_PROPERTY_MAP[gtype], value)
@contextlib.contextmanager
def gobject_signal_blocked(gobject, signal_name):
"""
This is a context manager that can be used with the 'with' statement
to execute a block of code while *signal_name* is blocked.
:param gobject: The object to block the signal on.
:type gobject: :py:class:`GObject.Object`
:param str signal_name: The name of the signal to block.
"""
signal_id = GObject.signal_lookup(signal_name, gobject.__class__)
handler_id = GObject.signal_handler_find(gobject, GObject.SignalMatchType.ID, signal_id, 0, None, 0, 0)
GObject.signal_handler_block(gobject, handler_id)
yield
GObject.signal_handler_unblock(gobject, handler_id)
def gobject_signal_accumulator(test=None):
"""
Create an accumulator function for use with GObject signals. All return
values will be collected and returned in a list. If provided, *test* is a
callback that will be called with two arguments, the return value from the
handler and the list of accumulated return values.
.. code-block:: python
stop = test(retval, accumulated)
:param test: A callback to test whether additional handler should be executed.
"""
if test is None:
test = lambda retval, accumulated: True
def _accumulator(_, accumulated, retval):
if accumulated is None:
accumulated = []
stop = test(retval, accumulated)
accumulated.append(retval)
return (stop, accumulated)
return _accumulator
def gtk_calendar_get_pydate(gtk_calendar):
"""
Get the Python date from a :py:class:`Gtk.Calendar` instance. If the day
in *gtk_calendar* is not within the valid range for the specified month, it
will be rounded to the closest value (i.e. 0 for unset will become 1 etc.).
:param gtk_calendar: The calendar to get the date from.
:type gtk_calendar: :py:class:`Gtk.Calendar`
:return: The date as returned by the calendar's :py:meth:`~Gtk.Calendar.get_date` method.
:rtype: :py:class:`datetime.date`
"""
if not isinstance(gtk_calendar, Gtk.Calendar):
raise ValueError('calendar must be a Gtk.Calendar instance')
year, month, day = gtk_calendar.get_date()
month += 1 # account for Gtk.Calendar starting at 0
_, last_day_of_month = calendar.monthrange(year, month)
day = max(1, min(day, last_day_of_month))
return datetime.date(year, month, day)
def gtk_calendar_set_pydate(gtk_calendar, pydate):
"""
Set the date on a :py:class:`Gtk.Calendar` instance from a Python
:py:class:`datetime.date` object.
:param gtk_calendar: The gtk_calendar to set the date for.
:type gtk_calendar: :py:class:`Gtk.Calendar`
:param pydate: The date to set on the gtk_calendar.
:type pydate: :py:class:`datetime.date`
"""
gtk_calendar.select_month(pydate.month - 1, pydate.year)
gtk_calendar.select_day(pydate.day)
GOBJECT_PROPERTY_MAP['calendar'] = (
gtk_calendar_set_pydate,
gtk_calendar_get_pydate
)
def gtk_list_store_search(list_store, value, column=0):
"""
Search a :py:class:`Gtk.ListStore` for a value and return a
:py:class:`Gtk.TreeIter` to the first match.
:param list_store: The list store to search.
:type list_store: :py:class:`Gtk.ListStore`
:param value: The value to search for.
:param int column: The column in the row to check.
:return: The row on which the value was found.
:rtype: :py:class:`Gtk.TreeIter`
"""
for row in list_store:
if row[column] == value:
return row.iter
return None
def gtk_listbox_populate_labels(listbox, label_strings):
"""
Formats and adds labels to a listbox. Each label is styled as a separate
entry.
.. versionadded:: 1.13.0
:param listbox: Gtk Listbox to put the labels in.
:type listbox: :py:class:`Gtk.listbox`
:param list label_strings: List of strings to add to the Gtk Listbox as labels.
"""
gtk_widget_destroy_children(listbox)
listbox.set_property('visible', True)
for label_text in label_strings:
label = Gtk.Label()
label.set_markup("<span font=\"smaller\"><tt>{0}</tt></span>".format(saxutils.escape(label_text)))
label.set_property('halign', Gtk.Align.START)
label.set_property('use-markup', True)
label.set_property('valign', Gtk.Align.START)
label.set_property('visible', True)
listbox.add(label)
def gtk_menu_get_item_by_label(menu, label):
"""
Retrieve a menu item from a menu by it's label. If more than one items share
the same label, only the first is returned.
:param menu: The menu to search for the item in.
:type menu: :py:class:`Gtk.Menu`
:param str label: The label to search for in *menu*.
:return: The identified menu item if it could be found, otherwise None is returned.
:rtype: :py:class:`Gtk.MenuItem`
"""
for item in menu:
if item.get_label() == label:
return item
def gtk_menu_insert_by_path(menu, menu_path, menu_item):
"""
Add a new menu item into the existing menu at the path specified in
*menu_path*.
:param menu: The existing menu to add the new item to.
:type menu: :py:class:`Gtk.Menu` :py:class:`Gtk.MenuBar`
:param list menu_path: The labels of submenus to traverse to insert the new item.
:param menu_item: The new menu item to insert.
:type menu_item: :py:class:`Gtk.MenuItem`
"""
utilities.assert_arg_type(menu, (Gtk.Menu, Gtk.MenuBar), 1)
utilities.assert_arg_type(menu_path, list, 2)
utilities.assert_arg_type(menu_item, Gtk.MenuItem, 3)
while len(menu_path):
label = menu_path.pop(0)
menu_cursor = gtk_menu_get_item_by_label(menu, label)
if menu_cursor is None:
raise ValueError('missing node labeled: ' + label)
menu = menu_cursor.get_submenu()
menu.append(menu_item)
def gtk_menu_position(event, *args):
"""
Create a menu at the given location for an event. This function is meant to
be used as the *func* parameter for the :py:meth:`Gtk.Menu.popup` method.
The *event* object must be passed in as the first parameter, which can be
accomplished using :py:func:`functools.partial`.
:param event: The event to retrieve the coordinates for.
"""
if not hasattr(event, 'get_root_coords'):
raise TypeError('event object has no get_root_coords method')
coords = event.get_root_coords()
return (coords[0], coords[1], True)
def gtk_style_context_get_color(sc, color_name, default=None):
"""
Look up a color by it's name in the :py:class:`Gtk.StyleContext` specified
in *sc*, and return it as an :py:class:`Gdk.RGBA` instance if the color is
defined. If the color is not found, *default* will be returned.
:param sc: The style context to use.
:type sc: :py:class:`Gtk.StyleContext`
:param str color_name: The name of the color to lookup.
:param default: The default color to return if the specified color was not found.
:type default: str, :py:class:`Gdk.RGBA`
:return: The color as an RGBA instance.
:rtype: :py:class:`Gdk.RGBA`
"""
found, color_rgba = sc.lookup_color(color_name)
if found:
return color_rgba
if isinstance(default, str):
color_rgba = Gdk.RGBA()
color_rgba.parse(default)
return color_rgba
elif isinstance(default, Gdk.RGBA):
return default
return
def gtk_sync():
"""Wait while all pending GTK events are processed."""
while Gtk.events_pending():
Gtk.main_iteration()
def gtk_treesortable_sort_func(model, iter1, iter2, column_id):
column_id = column_id or 0
item1 = model.get_value(iter1, column_id)
item2 = model.get_value(iter2, column_id)
return _cmp(item1, item2)
def gtk_treesortable_sort_func_numeric(model, iter1, iter2, column_id):
"""
Sort the model by comparing text numeric values with place holders such as
1,337. This is meant to be set as a sorting function using
:py:meth:`Gtk.TreeSortable.set_sort_func`. The user_data parameter must be
the column id which contains the numeric values to be sorted.
:param model: The model that is being sorted.
:type model: :py:class:`Gtk.TreeSortable`
:param iter1: The iterator of the first item to compare.
:type iter1: :py:class:`Gtk.TreeIter`
:param iter2: The iterator of the second item to compare.
:type iter2: :py:class:`Gtk.TreeIter`
:param column_id: The ID of the column containing numeric values.
:return: An integer, -1 if item1 should come before item2, 0 if they are the same and 1 if item1 should come after item2.
:rtype: int
"""
column_id = column_id or 0
item1 = model.get_value(iter1, column_id).replace(',', '')
item2 = model.get_value(iter2, column_id).replace(',', '')
if item1.isdigit() and item2.isdigit():
return _cmp(int(item1), int(item2))
if item1.isdigit():
return -1
elif item2.isdigit():
return 1
item1 = model.get_value(iter1, column_id)
item2 = model.get_value(iter2, column_id)
return _cmp(item1, item2)
def gtk_treeview_selection_iterate(treeview):
"""
Iterate over the a treeview's selected rows.
:param treeview: The treeview for which to iterate over.
:type treeview: :py:class:`Gtk.TreeView`
:return: The rows which are selected within the treeview.
:rtype: :py:class:`Gtk.TreeIter`
"""
selection = treeview.get_selection()
(model, tree_paths) = selection.get_selected_rows()
if not tree_paths:
return
for tree_path in tree_paths:
yield model.get_iter(tree_path)
def gtk_treeview_selection_to_clipboard(treeview, columns=0):
"""
Copy the currently selected values from the specified columns in the
treeview to the users clipboard. If no value is selected in the treeview,
then the clipboard is left unmodified. If multiple values are selected, they
will all be placed in the clipboard on separate lines.
:param treeview: The treeview instance to get the selection from.
:type treeview: :py:class:`Gtk.TreeView`
:param column: The column numbers to retrieve the value for.
:type column: int, list, tuple
"""
treeview_selection = treeview.get_selection()
(model, tree_paths) = treeview_selection.get_selected_rows()
if not tree_paths:
return
if isinstance(columns, int):
columns = (columns,)
tree_iters = map(model.get_iter, tree_paths)
selection_lines = []
for ti in tree_iters:
values = (model.get_value(ti, column) for column in columns)
values = (('' if value is None else str(value)) for value in values)
selection_lines.append(' '.join(values).strip())
selection_lines = os.linesep.join(selection_lines)
clipboard = Gtk.Clipboard.get(Gdk.SELECTION_CLIPBOARD)
clipboard.set_text(selection_lines, -1)
def gtk_treeview_get_column_titles(treeview):
"""
Iterate over a GTK TreeView and return a tuple containing the id and title
of each of it's columns.
:param treeview: The treeview instance to retrieve columns from.
:type treeview: :py:class:`Gtk.TreeView`
"""
for column_id, column in enumerate(treeview.get_columns()):
column_name = column.get_title()
yield (column_id, column_name)
def gtk_treeview_set_column_titles(treeview, column_titles, column_offset=0, renderers=None):
"""
Populate the column names of a GTK TreeView and set their sort IDs.
:param treeview: The treeview to set column names for.
:type treeview: :py:class:`Gtk.TreeView`
:param list column_titles: The names of the columns.
:param int column_offset: The offset to start setting column names at.
:param list renderers: A list containing custom renderers to use for each column.
:return: A dict of all the :py:class:`Gtk.TreeViewColumn` objects keyed by their column id.
:rtype: dict
"""
columns = {}
for column_id, column_title in enumerate(column_titles, column_offset):
renderer = renderers[column_id - column_offset] if renderers else Gtk.CellRendererText()
if isinstance(renderer, Gtk.CellRendererToggle):
column = Gtk.TreeViewColumn(column_title, renderer, active=column_id)
elif hasattr(renderer.props, 'python_value'):
column = Gtk.TreeViewColumn(column_title, renderer, python_value=column_id)
else:
column = Gtk.TreeViewColumn(column_title, renderer, text=column_id)
column.set_property('min-width', 25)
column.set_property('reorderable', True)
column.set_property('resizable', True)
column.set_sort_column_id(column_id)
treeview.append_column(column)
columns[column_id] = column
return columns
def gtk_widget_destroy_children(widget):
"""
Destroy all GTK child objects of *widget*.
:param widget: The widget to destroy all the children of.
:type widget: :py:class:`Gtk.Widget`
"""
for child in widget.get_children():
child.destroy()
def show_dialog(message_type, message, parent, secondary_text=None, message_buttons=Gtk.ButtonsType.OK, use_markup=False, secondary_use_markup=False):
"""
Display a dialog and return the response. The response is dependent on
the value of *message_buttons*.
:param message_type: The GTK message type to display.
:type message_type: :py:class:`Gtk.MessageType`
:param str message: The text to display in the dialog.
:param parent: The parent window that the dialog should belong to.
:type parent: :py:class:`Gtk.Window`
:param str secondary_text: Optional subtext for the dialog.
:param message_buttons: The buttons to display in the dialog box.
:type message_buttons: :py:class:`Gtk.ButtonsType`
:param bool use_markup: Whether or not to treat the message text as markup.
:param bool secondary_use_markup: Whether or not to treat the secondary text as markup.
:return: The response of the dialog.
:rtype: int
"""
dialog = Gtk.MessageDialog(parent, Gtk.DialogFlags.DESTROY_WITH_PARENT, message_type, message_buttons)
dialog.set_property('text', message)
dialog.set_property('use-markup', use_markup)
dialog.set_property('secondary-text', secondary_text)
dialog.set_property('secondary-use-markup', secondary_use_markup)
if secondary_use_markup:
signal_label_activate_link = lambda _, uri: utilities.open_uri(uri)
for label in dialog.get_message_area().get_children():
if not isinstance(label, Gtk.Label):
continue
label.connect('activate-link', signal_label_activate_link)
dialog.show_all()
response = dialog.run()
dialog.destroy()
return response
def show_dialog_error(*args, **kwargs):
"""Display an error dialog with :py:func:`.show_dialog`."""
return show_dialog(Gtk.MessageType.ERROR, *args, **kwargs)
def show_dialog_exc_socket_error(error, parent, title=None):
"""
Display an error dialog with details regarding a :py:exc:`socket.error`
exception that has been raised.
:param error: The exception instance that has been raised.
:type error: :py:exc:`socket.error`
:param parent: The parent window that the dialog should belong to.
:type parent: :py:class:`Gtk.Window`
:param title: The title of the error dialog that is displayed.
"""
title = title or 'Connection Error'
if isinstance(error, socket.timeout):
description = 'The connection to the server timed out.'
elif len(error.args) > 1:
error_number, error_message = error.args[:2]
if error_number == 111:
description = 'The server refused the connection.'
else:
description = "Socket error #{0} ({1}).".format((error_number or 'N/A'), error_message)
return show_dialog(Gtk.MessageType.ERROR, title, parent, secondary_text=description)
def show_dialog_info(*args, **kwargs):
"""Display an informational dialog with :py:func:`.show_dialog`."""
return show_dialog(Gtk.MessageType.INFO, *args, **kwargs)
def show_dialog_warning(*args, **kwargs):
"""Display an warning dialog with :py:func:`.show_dialog`."""
return show_dialog(Gtk.MessageType.WARNING, *args, **kwargs)
def show_dialog_yes_no(*args, **kwargs):
"""
Display a dialog which asks a yes or no question with
:py:func:`.show_dialog`.
:return: True if the response is Yes.
:rtype: bool
"""
kwargs['message_buttons'] = Gtk.ButtonsType.YES_NO
return show_dialog(Gtk.MessageType.QUESTION, *args, **kwargs) == Gtk.ResponseType.YES
class GladeDependencies(object):
"""
A class for defining how objects should be loaded from a GTK Builder data
file for use with :py:class:`.GladeGObject`.
"""
__slots__ = ('children', 'top_level', 'name')
def __init__(self, children=None, top_level=None, name=None):
children = children or ()
utilities.assert_arg_type(children, tuple, 1)
self.children = children
"""A tuple of string names or :py:class:`.GladeProxy` instances listing the children widgets to load from the parent."""
self.top_level = top_level
"""A tuple of string names listing additional top level widgets to load such as images."""
self.name = name
"""The string of the name of the top level parent widget to load."""
def __repr__(self):
return "<{0} name='{1}' >".format(self.__class__.__name__, self.name)
class GladeProxyDestination(object):
"""
A class that is used to define how a :py:class:`.GladeProxy` object shall
be loaded into a parent :py:class:`.GladeGObject` instance. This includes
the information such as what container widget in the parent the proxied
widget should be added to and what method should be used. The proxied widget
will be added to the parent by calling
:py:attr:`~.GladeProxyDestination.method` with the proxied widget as the
first argument.
"""
__slots__ = ('widget', 'method', 'args', 'kwargs')
def __init__(self, method, widget=None, args=None, kwargs=None):
"""
:param str method: The method of the container *widget* to use to add
the proxied widget.
:param str widget: The widget name to add the proxied widget to. If this
value is ``None``, the proxied widget is added to the top level
widget.
:param tuple args: Position arguments to provide when calling *method*.
:param dict kwargs: Key word arguments to provide when calling *method*.
"""
utilities.assert_arg_type(method, str, 1)
utilities.assert_arg_type(widget, (type(None), str), 2)
self.widget = widget
"""The name of the parent widget for this proxied child."""
self.method = method
"""The method of the parent widget that should be called to add the proxied child."""
self.args = args or ()
"""Arguments to append after the proxied child instance when calling :py:attr:`~.GladeProxyDestination.method`."""
self.kwargs = kwargs or {}
"""Key word arguments to append after the proxied child instance when calling :py:attr:`~.GladeProxyDestination.method`."""
def __repr__(self):
return "<{0} widget='{1}' method='{2}' >".format(self.__class__.__name__, self.widget, self.method)
class GladeProxy(object):
"""
A class that can be used to load another top level widget from the GTK
builder data file in place of a child. This is useful for reusing small
widgets as children in larger ones.
"""
__slots__ = ('destination',)
name = None
"""The string of the name of the top level widget to load."""
children = ()
"""A tuple of string names or :py:class:`.GladeProxy` instances listing the children widgets to load from the top level."""
def __init__(self, destination):
utilities.assert_arg_type(destination, GladeProxyDestination, 1)
self.destination = destination
"""A :py:class:`.GladeProxyDestination` instance describing how this proxied widget should be added to the parent."""
def __repr__(self):
return "<{0} name='{1}' destination={2} >".format(self.__class__.__name__, self.name, repr(self.destination))
class GladeGObjectMeta(type):
"""
A meta class that will update the :py:attr:`.GladeDependencies.name` value
in the :py:attr:`.GladeGObject.dependencies` attribute of instances if no
value is defined.
"""
assigned_name = type('assigned_name', (str,), {})
"""A type subclassed from str that is used to define names which have been automatically assigned by this class."""
def __init__(cls, *args, **kwargs):
dependencies = getattr(cls, 'dependencies', None)
if dependencies is not None:
dependencies = copy.deepcopy(dependencies)
setattr(cls, 'dependencies', dependencies)
if isinstance(dependencies.name, (None.__class__, cls.assigned_name)):
dependencies.name = cls.assigned_name(cls.__name__)
super(GladeGObjectMeta, cls).__init__(*args, **kwargs)
# stylized metaclass definition to be Python 2.7 and 3.x compatible
class GladeGObject(GladeGObjectMeta('_GladeGObject', (object,), {})):
"""
A base object to wrap GTK widgets loaded from Glade data files. This
provides a number of convenience methods for managing the main widget and
child widgets. This class is meant to be subclassed by classes representing
objects from the Glade data file.
"""
dependencies = GladeDependencies()
"""A :py:class:`.GladeDependencies` instance which defines information for loading the widget from the GTK builder data."""
config_prefix = ''
"""A prefix to be used for keys when looking up value in the :py:attr:`~.GladeGObject.config`."""
top_gobject = 'gobject'
"""The name of the attribute to set a reference of the top level GObject to."""
objects_persist = True
"""Whether objects should be automatically loaded from and saved to the configuration."""
def __init__(self, application):
"""
:param application: The parent application for this object.
:type application: :py:class:`Gtk.Application`
"""
utilities.assert_arg_type(application, Gtk.Application, arg_pos=1)
self.config = application.config
"""A reference to the King Phisher client configuration."""
self.application = application
"""The parent :py:class:`Gtk.Application` instance."""
self.logger = logging.getLogger('KingPhisher.Client.' + self.__class__.__name__)
builder = Gtk.Builder()
self.gtk_builder = builder
"""A :py:class:`Gtk.Builder` instance used to load Glade data with."""
top_level_dependencies = [gobject.name for gobject in self.dependencies.children if isinstance(gobject, GladeProxy)]
top_level_dependencies.append(self.dependencies.name)
if self.dependencies.top_level is not None:
top_level_dependencies.extend(self.dependencies.top_level)
builder.add_objects_from_file(which_glade(), top_level_dependencies)
builder.connect_signals(self)
gobject = builder.get_object(self.dependencies.name)
setattr(self, self.top_gobject, gobject)
if isinstance(gobject, Gtk.Window):
gobject.set_transient_for(self.application.get_active_window())
self.application.add_reference(self)
if isinstance(gobject, Gtk.ApplicationWindow):
application.add_window(gobject)
if isinstance(gobject, Gtk.Dialog):
gobject.set_modal(True)
self.gobjects = utilities.FreezableDict()
"""A :py:class:`~king_phisher.utilities.FreezableDict` which maps gobjects to their unique GTK Builder id."""
self._load_child_dependencies(self.dependencies)
self.gobjects.freeze()
self._load_child_proxies()
if self.objects_persist:
self.objects_load_from_config()
def _load_child_dependencies(self, dependencies):
for child in dependencies.children:
if isinstance(child, GladeProxy):
self._load_child_dependencies(child)
child = child.destination.widget
if child is None:
continue
gobject = self.gtk_builder_get(child, parent_name=dependencies.name)
# the following five lines ensure that the types match up, this is to enforce clean development
gtype = child.split('_', 1)[0]
if gobject is None:
raise TypeError("gobject {0} could not be found in the glade file".format(child))
elif gobject.__class__.__name__.lower() != gtype:
raise TypeError("gobject {0} is of type {1} expected {2}".format(child, gobject.__class__.__name__, gtype))
elif child in self.gobjects:
raise ValueError("key: {0!r} is already in self.gobjects".format(child))
self.gobjects[child] = gobject
def _load_child_proxies(self):
for child in self.dependencies.children or []:
if not isinstance(child, GladeProxy):
continue
dest = child.destination
widget = self.gtk_builder.get_object(self.dependencies.name) if dest.widget is None else self.gobjects[dest.widget]
method = getattr(widget, dest.method)
if method is None:
raise ValueError("gobject {0} does not have method {1}".format(dest.widget, dest.method))
src_widget = self.gtk_builder.get_object(child.name)
self.logger.debug("setting proxied widget {0} via {1}.{2}".format(child.name, dest.widget, dest.method))
method(src_widget, *dest.args, **dest.kwargs)
@property
def parent(self):
return self.application.get_active_window()
def get_entry_value(self, entry_name):
"""
Get the value of the specified entry then remove leading and trailing
white space and finally determine if the string is empty, in which case
return None.
:param str entry_name: The name of the entry to retrieve text from.
:return: Either the non-empty string or None.
:rtype: None, str
"""
text = self.gobjects['entry_' + entry_name].get_text()
text = text.strip()
if not text:
return None
return text
def gtk_builder_get(self, gobject_id, parent_name=None):
"""
Find the child GObject with name *gobject_id* from the GTK builder.
:param str gobject_id: The object name to look for.
:param str parent_name: The name of the parent object in the builder data file.
:return: The GObject as found by the GTK builder.
:rtype: :py:class:`GObject.Object`
"""
parent_name = parent_name or self.dependencies.name
gtkbuilder_id = "{0}.{1}".format(parent_name, gobject_id)
self.logger.debug('loading GTK builder object with id: ' + gtkbuilder_id)
return self.gtk_builder.get_object(gtkbuilder_id)
def objects_load_from_config(self):
"""
Iterate through :py:attr:`.gobjects` and set the GObject's value
from the corresponding value in the :py:attr:`~.GladeGObject.config`.
"""
for gobject_id, gobject in self.gobjects.items():
if '_' not in gobject_id:
continue
gtype, config_name = gobject_id.split('_', 1)
config_name = self.config_prefix + config_name
if gtype not in GOBJECT_PROPERTY_MAP:
continue
value = self.config.get(config_name)
if value is None:
continue
if isinstance(GOBJECT_PROPERTY_MAP[gtype], (list, tuple)):
GOBJECT_PROPERTY_MAP[gtype][0](gobject, value)
else:
gobject.set_property(GOBJECT_PROPERTY_MAP[gtype], value)
def objects_save_to_config(self):
for gobject_id, gobject in self.gobjects.items():
if not '_' in gobject_id:
continue
gtype, config_name = gobject_id.split('_', 1)
config_name = self.config_prefix + config_name
if not gtype in GOBJECT_PROPERTY_MAP:
continue
self.config[config_name] = gobject_get_value(gobject, gtype)
# forwarded methods
def destroy(self):
"""Call :py:meth:`~Gtk.Widget.destroy` on the top-level GTK Widget."""
getattr(self, self.top_gobject).destroy()
def hide(self):
"""Call :py:meth:`~Gtk.Widget.hide` on the top-level GTK Widget."""
getattr(self, self.top_gobject).hide()
def show(self):
"""Call :py:meth:`~Gtk.Widget.show` on the top-level GTK Widget."""
getattr(self, self.top_gobject).show()
def show_all(self):
"""Call :py:meth:`~Gtk.Widget.show_all` on the top-level GTK Widget."""
getattr(self, self.top_gobject).show_all()
class FileMonitor(object):
"""Monitor a file for changes."""
def __init__(self, path, on_changed):
"""
:param str path: The path to monitor for changes.
:param on_changed: The callback function to be called when changes are detected.
:type on_changed: function
"""
self.logger = logging.getLogger('KingPhisher.Utility.FileMonitor')
self.on_changed = on_changed
self.path = path
self._gfile = Gio.file_new_for_path(path)
self._gfile_monitor = self._gfile.monitor(Gio.FileMonitorFlags.NONE, None)
self._gfile_monitor.connect('changed', self.cb_changed)
self.logger.debug('starting file monitor for: ' + path)
def __del__(self):
self.stop()
def stop(self):
"""Stop monitoring the file."""
if self._gfile_monitor.is_cancelled():
return
self._gfile_monitor.cancel()
self.logger.debug('cancelled file monitor for: ' + self.path)
def cb_changed(self, gfile_monitor, gfile, gfile_other, gfile_monitor_event):
self.logger.debug("file monitor {0} received event: {1}".format(self.path, gfile_monitor_event.value_name))
self.on_changed(self.path, gfile_monitor_event)
|
[] |
[] |
[
"KING_PHISHER_GLADE_FILE"
] |
[]
|
["KING_PHISHER_GLADE_FILE"]
|
python
| 1 | 0 | |
luigi/contrib/scalding.py
|
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import os
import re
import subprocess
import warnings
from luigi import six
import luigi.configuration
import luigi.contrib.hadoop
import luigi.contrib.hadoop_jar
import luigi.contrib.hdfs
from luigi import LocalTarget
from luigi.task import flatten
logger = logging.getLogger('luigi-interface')
"""
Scalding support for Luigi.
Example configuration section in luigi.cfg::
[scalding]
# scala home directory, which should include a lib subdir with scala jars.
scala-home: /usr/share/scala
# scalding home directory, which should include a lib subdir with
# scalding-*-assembly-* jars as built from the official Twitter build script.
scalding-home: /usr/share/scalding
# provided dependencies, e.g. jars required for compiling but not executing
# scalding jobs. Currently requred jars:
# org.apache.hadoop/hadoop-core/0.20.2
# org.slf4j/slf4j-log4j12/1.6.6
# log4j/log4j/1.2.15
# commons-httpclient/commons-httpclient/3.1
# commons-cli/commons-cli/1.2
# org.apache.zookeeper/zookeeper/3.3.4
scalding-provided: /usr/share/scalding/provided
# additional jars required.
scalding-libjars: /usr/share/scalding/libjars
"""
class ScaldingJobRunner(luigi.contrib.hadoop.JobRunner):
"""
JobRunner for `pyscald` commands. Used to run a ScaldingJobTask.
"""
def __init__(self):
conf = luigi.configuration.get_config()
default = os.environ.get('SCALA_HOME', '/usr/share/scala')
self.scala_home = conf.get('scalding', 'scala-home', default)
default = os.environ.get('SCALDING_HOME', '/usr/share/scalding')
self.scalding_home = conf.get('scalding', 'scalding-home', default)
self.provided_dir = conf.get(
'scalding', 'scalding-provided', os.path.join(default, 'provided'))
self.libjars_dir = conf.get(
'scalding', 'scalding-libjars', os.path.join(default, 'libjars'))
self.tmp_dir = LocalTarget(is_tmp=True)
def _get_jars(self, path):
return [os.path.join(path, j) for j in os.listdir(path)
if j.endswith('.jar')]
def get_scala_jars(self, include_compiler=False):
lib_dir = os.path.join(self.scala_home, 'lib')
jars = [os.path.join(lib_dir, 'scala-library.jar')]
# additional jar for scala 2.10 only
reflect = os.path.join(lib_dir, 'scala-reflect.jar')
if os.path.exists(reflect):
jars.append(reflect)
if include_compiler:
jars.append(os.path.join(lib_dir, 'scala-compiler.jar'))
return jars
def get_scalding_jars(self):
lib_dir = os.path.join(self.scalding_home, 'lib')
return self._get_jars(lib_dir)
def get_scalding_core(self):
lib_dir = os.path.join(self.scalding_home, 'lib')
for j in os.listdir(lib_dir):
if j.startswith('scalding-core-'):
p = os.path.join(lib_dir, j)
logger.debug('Found scalding-core: %s', p)
return p
raise luigi.contrib.hadoop.HadoopJobError('Could not find scalding-core.')
def get_provided_jars(self):
return self._get_jars(self.provided_dir)
def get_libjars(self):
return self._get_jars(self.libjars_dir)
def get_tmp_job_jar(self, source):
job_name = os.path.basename(os.path.splitext(source)[0])
return os.path.join(self.tmp_dir.path, job_name + '.jar')
def get_build_dir(self, source):
build_dir = os.path.join(self.tmp_dir.path, 'build')
return build_dir
def get_job_class(self, source):
# find name of the job class
# usually the one that matches file name or last class that extends Job
job_name = os.path.splitext(os.path.basename(source))[0]
package = None
job_class = None
for l in open(source).readlines():
p = re.search(r'package\s+([^\s\(]+)', l)
if p:
package = p.groups()[0]
p = re.search(r'class\s+([^\s\(]+).*extends\s+.*Job', l)
if p:
job_class = p.groups()[0]
if job_class == job_name:
break
if job_class:
if package:
job_class = package + '.' + job_class
logger.debug('Found scalding job class: %s', job_class)
return job_class
else:
raise luigi.contrib.hadoop.HadoopJobError('Coudl not find scalding job class.')
def build_job_jar(self, job):
job_jar = job.jar()
if job_jar:
if not os.path.exists(job_jar):
logger.error("Can't find jar: %s, full path %s", job_jar, os.path.abspath(job_jar))
raise Exception("job jar does not exist")
if not job.job_class():
logger.error("Undefined job_class()")
raise Exception("Undefined job_class()")
return job_jar
job_src = job.source()
if not job_src:
logger.error("Both source() and jar() undefined")
raise Exception("Both source() and jar() undefined")
if not os.path.exists(job_src):
logger.error("Can't find source: %s, full path %s", job_src, os.path.abspath(job_src))
raise Exception("job source does not exist")
job_src = job.source()
job_jar = self.get_tmp_job_jar(job_src)
build_dir = self.get_build_dir(job_src)
if not os.path.exists(build_dir):
os.makedirs(build_dir)
classpath = ':'.join(filter(None,
self.get_scalding_jars() +
self.get_provided_jars() +
self.get_libjars() +
job.extra_jars()))
scala_cp = ':'.join(self.get_scala_jars(include_compiler=True))
# compile scala source
arglist = ['java', '-cp', scala_cp, 'scala.tools.nsc.Main',
'-classpath', classpath,
'-d', build_dir, job_src]
logger.info('Compiling scala source: %s', subprocess.list2cmdline(arglist))
subprocess.check_call(arglist)
# build job jar file
arglist = ['jar', 'cf', job_jar, '-C', build_dir, '.']
logger.info('Building job jar: %s', subprocess.list2cmdline(arglist))
subprocess.check_call(arglist)
return job_jar
def run_job(self, job, tracking_url_callback=None):
if tracking_url_callback is not None:
warnings.warn("tracking_url_callback argument is deprecated, task.set_tracking_url is "
"used instead.", DeprecationWarning)
job_jar = self.build_job_jar(job)
jars = [job_jar] + self.get_libjars() + job.extra_jars()
scalding_core = self.get_scalding_core()
libjars = ','.join(filter(None, jars))
arglist = luigi.contrib.hdfs.load_hadoop_cmd() + ['jar', scalding_core, '-libjars', libjars]
arglist += ['-D%s' % c for c in job.jobconfs()]
job_class = job.job_class() or self.get_job_class(job.source())
arglist += [job_class, '--hdfs']
# scalding does not parse argument with '=' properly
arglist += ['--name', job.task_id.replace('=', ':')]
(tmp_files, job_args) = luigi.contrib.hadoop_jar.fix_paths(job)
arglist += job_args
env = os.environ.copy()
jars.append(scalding_core)
hadoop_cp = ':'.join(filter(None, jars))
env['HADOOP_CLASSPATH'] = hadoop_cp
logger.info("Submitting Hadoop job: HADOOP_CLASSPATH=%s %s",
hadoop_cp, subprocess.list2cmdline(arglist))
luigi.contrib.hadoop.run_and_track_hadoop_job(arglist, job.set_tracking_url, env=env)
for a, b in tmp_files:
a.move(b)
class ScaldingJobTask(luigi.contrib.hadoop.BaseHadoopJobTask):
"""
A job task for Scalding that define a scala source and (optional) main method.
requires() should return a dictionary where the keys are Scalding argument
names and values are sub tasks or lists of subtasks.
For example:
.. code-block:: python
{'input1': A, 'input2': C} => --input1 <Aoutput> --input2 <Coutput>
{'input1': [A, B], 'input2': [C]} => --input1 <Aoutput> <Boutput> --input2 <Coutput>
"""
def relpath(self, current_file, rel_path):
"""
Compute path given current file and relative path.
"""
script_dir = os.path.dirname(os.path.abspath(current_file))
rel_path = os.path.abspath(os.path.join(script_dir, rel_path))
return rel_path
def source(self):
"""
Path to the scala source for this Scalding Job
Either one of source() or jar() must be specified.
"""
return None
def jar(self):
"""
Path to the jar file for this Scalding Job
Either one of source() or jar() must be specified.
"""
return None
def extra_jars(self):
"""
Extra jars for building and running this Scalding Job.
"""
return []
def job_class(self):
"""
optional main job class for this Scalding Job.
"""
return None
def job_runner(self):
return ScaldingJobRunner()
def atomic_output(self):
"""
If True, then rewrite output arguments to be temp locations and
atomically move them into place after the job finishes.
"""
return True
def requires(self):
return {}
def job_args(self):
"""
Extra arguments to pass to the Scalding job.
"""
return []
def args(self):
"""
Returns an array of args to pass to the job.
"""
arglist = []
for k, v in six.iteritems(self.requires_hadoop()):
arglist.append('--' + k)
arglist.extend([t.output().path for t in flatten(v)])
arglist.extend(['--output', self.output()])
arglist.extend(self.job_args())
return arglist
|
[] |
[] |
[
"SCALA_HOME",
"SCALDING_HOME"
] |
[]
|
["SCALA_HOME", "SCALDING_HOME"]
|
python
| 2 | 0 | |
clef/collections.py
|
#!/usr/bin/env python
"""
Copyright 2018 ARC Centre of Excellence for Climate Systems Science
author: Paola Petrelli <[email protected]>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.dialects import sqlite
from .db_noesgf import Base, Dataset, Variable, QC
SQASession = sessionmaker()
class Session(object):
"""Holds a connection to the catalog
Create using :func:`clef.dataset.connect()`
"""
def query(self, *args, **kwargs):
"""Query the not-ESGF collections catalog
Allows you to filter the full list of datasets using `SQLAlchemy commands <http://docs.sqlalchemy.org/en/rel_1_0/orm/tutorial.html#querying>`_
:return: A SQLalchemy query object
"""
return self.session.query(*args, **kwargs)
def files(self, **kwargs):
""" Query the list of files
Returns a list of files that match the arguments
:argument **kwargs: Match any attribute in :class:`Model.Instance`, e.g. `model = 'ACCESS1-3'`
:return: An iterable returning :py:class:`Model.File`
matching the search query
"""
raise NotImplementedError
def dsets(self):
""" Get the list of all the datasets including version number and the file format
:return: A list of strings
"""
sets = self.query(Dataset).all()
return [x.name + " v"+ x.version + " (" + x.fileformat + ")" for x in sets]
def standard_names(self):
""" Get the list of all variables in datasets collection as standard_names
:return: A list of strings
"""
return [x[0] for x in self.query(Variable.standard_name).distinct().all()]
def vars_names(self):
""" Get the list of all variables in datasets collection as actual names
:return: A list of strings
"""
return [x[0] for x in self.query(Variable.varname).distinct().all()]
def cmor_names(self):
""" Get the list of all variables in datasets collection as cmor names
:return: A list of strings
"""
return [x[0] for x in self.query(Variable.cmor_name).distinct().all()]
def qc_list(self, dataset=None):
""" Get the list of all the qc tests in qc table, if dataset passed only the one applying to it
:input: dataset optional if passed return only tests for that dataset
:return: A list of strings
"""
if dataset:
#return [x[0] for x in self.query(QC.qc_test).where(QC.dataset=dataset).distinct().all()]
pass
return [x[0] for x in self.query(QC.qc_test).distinct().all()]
def command_query(self,**kwargs):
""" Calling query after working out if output should be a dataset or variable list,
depending on constraints passed by the user.
:input:
:return:
"""
# empty dictionaries to separate constraints for dataset and variable tables
dsargs={}
vargs={}
vlargs={}
variables = []
if kwargs['dname']: dsargs['name'] = kwargs.pop('dname')
if kwargs['version']: dsargs['version'] = kwargs.pop('version')
if kwargs['fileformat']: dsargs['fileformat'] = kwargs.pop('fileformat')
ds_outs = self.query(Dataset).filter_by(**dsargs).all()
if ds_outs:
kwargs['dataset_id'] = [ds.id for ds in ds_outs]
datasets = [x for x in ds_outs]
for k,v in kwargs.items():
if v not in [None, ()]:
if len(v) > 1:
vlargs[k] = [x for x in v]
else:
vargs[k] = v[0]
# if dataset_id is the only key in vargs, return datasets only
if len(vargs.keys()) + len(vlargs.keys()) == 1:
return datasets, variables, False
# build query filtering all single value arguments: vargs
# filter query results using in_() for list of values arguments: vlargs
q1 = self.query(Variable).filter_by(**vargs)
for attr, value in vlargs.items():
q = q1.filter(getattr(Variable, attr).in_(value))
#print( str(q.statement.compile(dialect=sqlite.dialect())))
var_outs = q.all()
if var_outs:
variables= [x for x in var_outs]
return datasets, variables, True
# Default collections database
default_db = 'sqlite:////g/data1/ua8/Download/clef.db'
def connect(path = None):
"""Connect to the not-ESGF datasets catalog
:return: A new :py:class:`Session`
Example::
>>> from clef import collections
>>> clefdb = collections.connect() # doctest: +SKIP
>>> outputs = clefdb.query() # doctest: +SKIP
"""
if path is None:
# Get the path from the environment
path = os.environ.get('CLEF_DB', default_db)
engine = create_engine(path)
Base.metadata.create_all(engine)
SQASession.configure(bind=engine, autoflush=False)
connection = Session()
connection.session = SQASession()
return connection
|
[] |
[] |
[
"CLEF_DB"
] |
[]
|
["CLEF_DB"]
|
python
| 1 | 0 | |
src/env/config.go
|
package env
import (
"encoding/json"
"errors"
"fmt"
"github.com/satori/go.uuid"
"gitlab.com/flaneurtv/samm/core"
"io/ioutil"
"os"
"strings"
)
const (
defaultNamespace = "default"
nullNamespace = "null"
defaultListenerCredentialsPath = "/run/secrets/mqtt_listener.json"
defaultPublisherCredentialsPath = "/run/secrets/mqtt_publisher.json"
defaultListenerURL = "tcp://mqtt:1883"
defaultPublisherURL = "tcp://mqtt:1883"
defaultServiceCmdLine = "/srv/processor"
defaultSubscriptionsFile = "/srv/subscriptions.txt"
)
type config struct {
serviceName string
serviceUUID string
serviceHost string
serviceCmdLine string
namespaceListener string
namespacePublisher string
listenerURL string
listenerCredentials core.Credentials
publisherURL string
publisherCredentials core.Credentials
subscriptions []string
logLevelConsole string
logLevelRemote string
}
func NewAdapterConfig(logger core.Logger) (core.Configuration, error) {
return newConfig(logger, true)
}
func NewBridgeConfig(logger core.Logger) (core.Configuration, error) {
return newConfig(logger, false)
}
func newConfig(logger core.Logger, withServiceProcessor bool) (core.Configuration, error) {
serviceName := os.Getenv("SERVICE_NAME")
serviceUUID := uuid.NewV4().String()
serviceHost, _ := os.Hostname()
var serviceCmdLine string
if withServiceProcessor {
var err error
serviceCmdLine, err = getServiceCmdLine(logger)
if err != nil {
return nil, err
}
}
namespace := os.Getenv("NAMESPACE")
if namespace == "" {
namespace = defaultNamespace
}
namespaceListener := os.Getenv("NAMESPACE_LISTENER")
if namespaceListener == "" {
namespaceListener = namespace
}
namespacePublisher := os.Getenv("NAMESPACE_PUBLISHER")
if namespacePublisher == "" {
namespacePublisher = namespace
}
listenerURL := os.Getenv("MQTT_LISTENER_URL")
if listenerURL == "" {
logger.Log(core.LogLevelInfo, fmt.Sprintf("MQTT_LISTENER_URL not set, trying default url '%s'", defaultListenerURL))
listenerURL = defaultListenerURL
}
publisherURL := os.Getenv("MQTT_PUBLISHER_URL")
if publisherURL == "" {
logger.Log(core.LogLevelInfo, fmt.Sprintf("MQTT_PUBLISHER_URL not set, trying default url '%s'", defaultPublisherURL))
publisherURL = defaultPublisherURL
}
listenerCredentials, err := readCredentials("Listener", "MQTT_LISTENER_CREDENTIALS", defaultListenerCredentialsPath, logger)
if err != nil {
return nil, err
}
publisherCredentials, err := readCredentials("Publisher", "MQTT_PUBLISHER_CREDENTIALS", defaultPublisherCredentialsPath, logger)
if err != nil {
return nil, err
}
subscriptions, err := readSubscriptions(namespaceListener, logger)
if err != nil {
return nil, err
}
logLevel := strings.ToLower(os.Getenv("LOG_LEVEL"))
if logLevel == "" {
logLevel = "error"
}
logLevelConsole := strings.ToLower(os.Getenv("LOG_LEVEL_CONSOLE"))
if logLevelConsole == "" {
logLevelConsole = logLevel
}
logLevelRemote := strings.ToLower(os.Getenv("LOG_LEVEL_MQTT"))
if logLevelRemote == "" {
logLevelRemote = logLevel
}
return &config{
serviceName: serviceName,
serviceUUID: serviceUUID,
serviceHost: serviceHost,
serviceCmdLine: serviceCmdLine,
namespaceListener: namespaceListener,
namespacePublisher: namespacePublisher,
listenerURL: listenerURL,
listenerCredentials: listenerCredentials,
publisherURL: publisherURL,
publisherCredentials: publisherCredentials,
subscriptions: subscriptions,
logLevelConsole: logLevelConsole,
logLevelRemote: logLevelRemote,
}, nil
}
func (cfg *config) ServiceName() string {
return cfg.serviceName
}
func (cfg *config) ServiceUUID() string {
return cfg.serviceUUID
}
func (cfg *config) ServiceHost() string {
return cfg.serviceHost
}
func (cfg *config) ServiceCmdLine() string {
return cfg.serviceCmdLine
}
func (cfg *config) NamespaceListener() string {
return cfg.namespaceListener
}
func (cfg *config) NamespacePublisher() string {
return cfg.namespacePublisher
}
func (cfg *config) ListenerURL() string {
return cfg.listenerURL
}
func (cfg *config) ListenerCredentials() core.Credentials {
return cfg.listenerCredentials
}
func (cfg *config) PublisherURL() string {
return cfg.publisherURL
}
func (cfg *config) PublisherCredentials() core.Credentials {
return cfg.publisherCredentials
}
func (cfg *config) Subscriptions() []string {
return cfg.subscriptions
}
func (cfg *config) LogLevelConsole() string {
return cfg.logLevelConsole
}
func (cfg *config) LogLevelRemote() string {
return cfg.logLevelRemote
}
func readSubscriptions(namespace string, logger core.Logger) ([]string, error) {
subscriptionsPath, ok := os.LookupEnv("SUBSCRIPTIONS")
if !ok {
logger.Log(core.LogLevelWarning, fmt.Sprintf("SUBSCRIPTIONS not set, trying default location '%s'", defaultSubscriptionsFile))
subscriptionsPath = defaultSubscriptionsFile
} else if strings.TrimSpace(subscriptionsPath) == "" {
logger.Log(core.LogLevelInfo, "SUBSCRIPTIONS set to nil, starting without subscriptions")
return nil, nil
}
content, err := ioutil.ReadFile(subscriptionsPath)
if err != nil {
if os.IsNotExist(err) {
logger.Log(core.LogLevelWarning, fmt.Sprintf("Subscriptions file not found at '%s', starting without subscriptions: %s", subscriptionsPath, err))
return nil, nil
}
return nil, fmt.Errorf("can't read subscriptions: %s", err)
}
logger.Log(core.LogLevelInfo, fmt.Sprintf("Subscriptions file found at '%s'", subscriptionsPath))
lines := strings.Split(string(content), "\n")
subscriptions := make([]string, 0, len(lines))
for _, line := range lines {
line = strings.TrimSpace(line)
if line != "" {
topic := line
if namespace != nullNamespace {
topic = fmt.Sprintf("%s/%s", namespace, topic)
}
subscriptions = append(subscriptions, topic)
}
}
if len(subscriptions) == 0 {
logger.Log(core.LogLevelWarning, "Subscriptions file empty, starting without subscriptions")
}
return subscriptions, nil
}
func readCredentials(credentialsTitle, credentialsEnvVar, defaultCredentialsPath string, logger core.Logger) (core.Credentials, error) {
var credentials core.Credentials
credentialsPath, ok := os.LookupEnv(credentialsEnvVar)
if !ok {
logger.Log(core.LogLevelWarning, fmt.Sprintf("%s not set, trying default location %s", credentialsEnvVar, defaultCredentialsPath))
credentialsPath = defaultCredentialsPath
} else if strings.TrimSpace(credentialsPath) == "" {
return credentials, fmt.Errorf("%s can't be empty", credentialsEnvVar)
}
content, err := ioutil.ReadFile(credentialsPath)
if err != nil {
if os.IsNotExist(err) {
logger.Log(core.LogLevelWarning, fmt.Sprintf("%s credentials file '%s' doesn't exist - trying to connect with empty credentials", credentialsTitle, credentialsPath))
return credentials, nil
}
return credentials, fmt.Errorf("can't read credentials: %s", err)
}
logger.Log(core.LogLevelInfo, fmt.Sprintf("%s credentials found at '%s'", credentialsTitle, credentialsPath))
err = json.Unmarshal(content, &credentials)
if err != nil {
return credentials, fmt.Errorf("can't parse credentials: %s", err)
}
return credentials, nil
}
func getServiceCmdLine(logger core.Logger) (string, error) {
serviceCmdLine, ok := os.LookupEnv("SERVICE_PROCESSOR")
if !ok {
logger.Log(core.LogLevelWarning, fmt.Sprintf("SERVICE_PROCESSOR not set, trying %s", defaultServiceCmdLine))
serviceCmdLine = defaultServiceCmdLine
} else if strings.TrimSpace(serviceCmdLine) == "" {
return "", errors.New("SERVICE_PROCESSOR can't be empty")
}
parts := strings.Fields(serviceCmdLine)
info, err := os.Stat(parts[0])
if err != nil {
return "", err
}
if info.IsDir() {
return "", errors.New("SERVICE_PROCESSOR should reference to a file")
}
return serviceCmdLine, nil
}
|
[
"\"SERVICE_NAME\"",
"\"NAMESPACE\"",
"\"NAMESPACE_LISTENER\"",
"\"NAMESPACE_PUBLISHER\"",
"\"MQTT_LISTENER_URL\"",
"\"MQTT_PUBLISHER_URL\"",
"\"LOG_LEVEL\"",
"\"LOG_LEVEL_CONSOLE\"",
"\"LOG_LEVEL_MQTT\""
] |
[] |
[
"SERVICE_NAME",
"MQTT_PUBLISHER_URL",
"NAMESPACE_PUBLISHER",
"LOG_LEVEL",
"MQTT_LISTENER_URL",
"LOG_LEVEL_CONSOLE",
"NAMESPACE",
"LOG_LEVEL_MQTT",
"NAMESPACE_LISTENER"
] |
[]
|
["SERVICE_NAME", "MQTT_PUBLISHER_URL", "NAMESPACE_PUBLISHER", "LOG_LEVEL", "MQTT_LISTENER_URL", "LOG_LEVEL_CONSOLE", "NAMESPACE", "LOG_LEVEL_MQTT", "NAMESPACE_LISTENER"]
|
go
| 9 | 0 | |
py_scripts/preprocessing/prep_shard.py
|
# <editor-fold desc="Basic Imports">
import os
import os.path as p
import requests
from time import time
from argparse import ArgumentParser
import sys
sys.path.append(p.join(p.dirname(__file__), '..'))
sys.path.append(p.join(p.dirname(__file__), '../..'))
# </editor-fold>
# <editor-fold desc="Parse Command Line Args">
prog_file_path = p.join(p.dirname(__file__), 'progress.txt')
relative_base_path = '../../base_indexes/USE_lite_base_IVF16K.index'
base_index_path = p.abspath(p.join(p.dirname(__file__), relative_base_path))
arp = ArgumentParser(description='Vectorize Sentences for Searchable Index.')
arp.add_argument('input_dir', help='Path to raw news dir.')
arp.add_argument('output_dir', help='Path to saved index dir.')
arp.add_argument('-p', '--progress_file', default=prog_file_path,
help='For keeping track of news that has been preprocessed. '
'Default: dig-text-similarity-search/progress.txt')
arp.add_argument('-b', '--base_index_path', default=base_index_path,
help='Path to pre-trained empty faiss index. '
'Default: dig-text-similarity-search/base_indexes/*.index')
arp.add_argument('-l', '--large', action='store_true',
help='Toggle large Universal Sentence Encoder (Transformer NN).')
arp.add_argument('-m', '--m_per_batch', type=int, default=512*128,
help='Sentences per batch.')
arp.add_argument('-n', '--n_per_minibatch', type=int, default=64,
help='Sentences per mini-batch.')
arp.add_argument('-v', '--verbose', action='store_true',
help='Shows progress of batch vectorization.')
arp.add_argument('-t', '--num_threads', default='2',
help='Set CPU thread budget for numpy.')
arp.add_argument('-d', '--no_delete', action='store_false', default=True,
help='Keeps faiss indexes for each batch after merging on-disk.')
arp.add_argument('-a', '--add_shard', action='store_true',
help='Adds shard to running similarity server.')
arp.add_argument('-u', '--url', default='http://localhost:5954/faiss',
help='Port handling similarity server.')
arp.add_argument('-T', '--TF_logging', action='store_false', default=True,
help='Increase verbosity of TensorFlow.')
opts = arp.parse_args()
# </editor-fold>
if opts.num_threads:
print(f'\nRestricting numpy to {opts.num_threads} thread(s)\n')
os.environ['OPENBLAS_NUM_THREADS'] = opts.num_threads
os.environ['NUMEXPR_NUM_THREADS'] = opts.num_threads
os.environ['MKL_NUM_THREADS'] = opts.num_threads
os.environ['OMP_NUM_THREADS'] = opts.num_threads
from dt_sim.data_reader.jl_io_funcs import check_all_docs, get_all_docs
from dt_sim.data_reader.misc_io_funcs import check_unique, clear_dir
from dt_sim.vectorizer.sentence_vectorizer import SentenceVectorizer
from dt_sim.indexer.index_builder import OnDiskIVFBuilder
from dt_sim.processor.corpus_processor import CorpusProcessor
# Suppress TF logging
if opts.TF_logging:
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
# Init
sv = SentenceVectorizer(large=opts.large)
idx_bdr = OnDiskIVFBuilder(path_to_base_index=opts.base_index_path)
cp = CorpusProcessor(vectorizer=sv, index_builder=idx_bdr,
progress_file=opts.progress_file)
# Track progress
prepped_news = cp.track_preprocessing(cp.progress_file, verbose=opts.verbose)
raw_news = cp.get_news_paths(opts.input_dir, verbose=opts.verbose)
candidates = cp.candidate_files(prepped_news, raw_news, verbose=opts.verbose)
file_to_process = candidates[:1] # Preprocesses one news.jl per call
def main(raw_jl, output_dir: str = opts.output_dir,
m_per_batch: int = opts.m_per_batch, n_per_minibatch: int = opts.n_per_minibatch,
no_delete: bool = opts.no_delete, verbose: bool = opts.verbose,
add_shard: bool = opts.add_shard, url: str = opts.url):
subidx_dir, shard_date = cp.init_paths(raw_jl)
if verbose:
print(f'Will process: {raw_jl}\n')
# Check File Content
if verbose:
print(f'\nReading file: {raw_jl}')
jl_stats = check_all_docs(raw_jl, batch_size=m_per_batch)
(doc_count, line_count, junk, n_batches) = jl_stats
if verbose:
print(f'* Found {doc_count} good documents with {line_count} total sentences\n'
f'* Will skip {junk} junk documents\n'
f'* Processing {n_batches} batches\n')
# Preprocess
t_start = time()
doc_batch_gen = get_all_docs(raw_jl, batch_size=m_per_batch)
for i, (batched_sents, batched_ids) in enumerate(doc_batch_gen):
t_0 = time()
if verbose:
print(f' Starting doc batch: {i+1:3d}')
subidx = str(raw_jl.split('/')[-1]).replace('.jl', f'_{i:03d}_sub.index')
subidx_path = p.join(subidx_dir, subidx)
if p.exists(subidx_path):
print(f' File exists: {subidx_path} \n Skipping... ')
cp.index_builder.include_subidx_path(subidx_path)
else:
# Vectorize
emb_batch, id_batch = cp.batch_vectorize(
text_batch=batched_sents, id_batch=batched_ids,
n_minibatch=n_per_minibatch, very_verbose=False
)
t_vect = time()
if verbose:
print(f' * Vectorized in {t_vect - t_0:6.2f}s')
# Make faiss subindex
subidx_path = check_unique(subidx_path)
cp.index_builder.generate_subindex(subidx_path, emb_batch, id_batch)
t_subidx = time()
if verbose:
print(f' * Subindexed in {t_subidx - t_vect:6.2f}s')
# Clear graph
del emb_batch, batched_sents, id_batch
cp.vectorizer.close_session()
t_reset = time()
if verbose:
print(f' * Cleared TF in {t_reset - t_subidx:6.2f}s')
# Restart TF session if necessary
if i < n_batches - 1:
cp.vectorizer.start_session()
if verbose:
print(f' * Started TF in {time() - t_reset:6.2f}s')
if verbose:
mp, sp = divmod(time() - t_start, 60)
print(f' Completed doc batch: {i+1:3d}/{n_batches} '
f' Total time passed: {int(mp):3d}m{sp:0.2f}s\n')
# Merge
# TODO: Title indexes
t_merge = time()
merged_index_path = shard_date + '_all.index'
merged_index_path = p.join(output_dir, merged_index_path)
merged_index_path = check_unique(merged_index_path)
merged_ivfdata_path = shard_date + '_all.ivfdata'
merged_ivfdata_path = p.join(output_dir, merged_ivfdata_path)
merged_ivfdata_path = check_unique(merged_ivfdata_path)
if verbose:
print(f'\n Merging {merged_index_path.split("/")[-1]} on-disk')
assert cp.index_builder.index_path_clear(merged_index_path)
assert cp.index_builder.index_path_clear(merged_ivfdata_path, '.ivfdata')
n_vect = cp.index_builder.merge_IVFs(index_path=merged_index_path,
ivfdata_path=merged_ivfdata_path)
if verbose:
mm, sm = divmod(time() - t_merge, 60)
print(f' Merged subindexes ({n_vect} vectors) in: {int(mm):3d}m{sm:0.2f}s')
# Record progress
cp.record_progress(raw_jl)
# Clear sub.index files after merge
if no_delete:
clear_dir(subidx_dir)
if verbose:
print('\n Cleared sub.index files')
if add_shard:
try:
url = url
payload = {'path': merged_index_path}
r = requests.put(url, params=payload)
print(r.text)
except Exception as e:
print(f'Shard was not added because an exception occurred: {e}')
if __name__ == '__main__':
if len(file_to_process):
jl = file_to_process[0]
main(raw_jl=jl)
else:
print('Nothing to process.')
|
[] |
[] |
[
"MKL_NUM_THREADS",
"TF_CPP_MIN_LOG_LEVEL",
"NUMEXPR_NUM_THREADS",
"OPENBLAS_NUM_THREADS",
"OMP_NUM_THREADS"
] |
[]
|
["MKL_NUM_THREADS", "TF_CPP_MIN_LOG_LEVEL", "NUMEXPR_NUM_THREADS", "OPENBLAS_NUM_THREADS", "OMP_NUM_THREADS"]
|
python
| 5 | 0 | |
aiven/resource_kafka_connector_test.go
|
// Copyright (c) 2017 jelmersnoeck
// Copyright (c) 2018-2021 Aiven, Helsinki, Finland. https://aiven.io/
package aiven
import (
"fmt"
"os"
"testing"
"github.com/aiven/aiven-go-client"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource"
"github.com/hashicorp/terraform-plugin-sdk/v2/terraform"
)
func init() {
resource.AddTestSweepers("aiven_kafka_connector", &resource.Sweeper{
Name: "aiven_kafka_connector",
F: sweepKafkaConnectors,
})
}
func sweepKafkaConnectors(region string) error {
client, err := sharedClient(region)
if err != nil {
return fmt.Errorf("error getting client: %s", err)
}
conn := client.(*aiven.Client)
projects, err := conn.Projects.List()
if err != nil {
return fmt.Errorf("error retrieving a list of projects : %s", err)
}
for _, project := range projects {
if project.Name == os.Getenv("AIVEN_PROJECT_NAME") {
services, err := conn.Services.List(project.Name)
if err != nil {
return fmt.Errorf("error retrieving a list of services for a project `%s`: %s", project.Name, err)
}
for _, service := range services {
if service.Type != "kafka" {
continue
}
connectorsList, err := conn.KafkaConnectors.List(project.Name, service.Name)
if err != nil {
if err.(aiven.Error).Status == 403 || err.(aiven.Error).Status == 501 {
continue
}
return fmt.Errorf("error retrieving a list of kafka connectors for a service `%s`: %s", service.Name, err)
}
for _, c := range connectorsList.Connectors {
err = conn.KafkaConnectors.Delete(project.Name, service.Name, c.Name)
if err != nil {
return fmt.Errorf("error destroying kafka connector `%s` during sweep: %s", c.Name, err)
}
}
}
}
}
return nil
}
func TestAccAivenKafkaConnector_basic(t *testing.T) {
resourceName := "aiven_kafka_connector.foo"
rName := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)
resource.ParallelTest(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
ProviderFactories: testAccProviderFactories,
CheckDestroy: testAccCheckAivenKafkaConnectorResourceDestroy,
Steps: []resource.TestStep{
{
Config: testAccKafkaConnectorResource(rName),
Check: resource.ComposeTestCheckFunc(
testAccCheckAivenKafkaConnectorAttributes("data.aiven_kafka_connector.connector"),
resource.TestCheckResourceAttr(resourceName, "project", os.Getenv("AIVEN_PROJECT_NAME")),
resource.TestCheckResourceAttr(resourceName, "service_name", fmt.Sprintf("test-acc-sr-%s", rName)),
resource.TestCheckResourceAttr(resourceName, "connector_name", fmt.Sprintf("test-acc-con-%s", rName)),
),
},
},
})
}
func TestAccAivenKafkaConnector_mogosink(t *testing.T) {
if os.Getenv("MONGO_URI") == "" {
t.Skip("MONGO_URI environment variable is required to run this test")
}
resourceName := "aiven_kafka_connector.foo"
rName := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)
resource.ParallelTest(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
ProviderFactories: testAccProviderFactories,
CheckDestroy: testAccCheckAivenKafkaConnectorResourceDestroy,
Steps: []resource.TestStep{
{
Config: testAccKafkaConnectorMonoSinkResource(rName),
Check: resource.ComposeTestCheckFunc(
resource.TestCheckResourceAttr(resourceName, "project", os.Getenv("AIVEN_PROJECT_NAME")),
resource.TestCheckResourceAttr(resourceName, "service_name", fmt.Sprintf("test-acc-sr-%s", rName)),
resource.TestCheckResourceAttr(resourceName, "connector_name", fmt.Sprintf("test-acc-con-mongo-sink-%s", rName)),
),
},
},
})
}
func testAccCheckAivenKafkaConnectorResourceDestroy(s *terraform.State) error {
c := testAccProvider.Meta().(*aiven.Client)
// loop through the resources in state, verifying each aiven_kafka_connector is destroyed
for _, rs := range s.RootModule().Resources {
if rs.Type != "aiven_kafka" {
continue
}
projectName, serviceName := splitResourceID2(rs.Primary.ID)
_, err := c.Services.Get(projectName, serviceName)
if err != nil {
if err.(aiven.Error).Status == 404 {
return nil
}
return err
}
list, err := c.KafkaConnectors.List(projectName, serviceName)
if err != nil {
if err.(aiven.Error).Status == 404 {
return nil
}
return err
}
for _, connector := range list.Connectors {
res, err := c.KafkaConnectors.GetByName(projectName, serviceName, connector.Name)
if err != nil {
if err.(aiven.Error).Status == 404 {
return nil
}
return err
}
if res != nil {
return fmt.Errorf("kafka connector (%s) still exists", connector.Name)
}
}
}
return nil
}
// nosemgrep: kafka connectors need kafka with business plans
func testAccKafkaConnectorResource(name string) string {
return fmt.Sprintf(`
data "aiven_project" "foo" {
project = "%s"
}
resource "aiven_kafka" "bar" {
project = data.aiven_project.foo.project
cloud_name = "google-europe-west1"
plan = "business-4"
service_name = "test-acc-sr-%s"
maintenance_window_dow = "monday"
maintenance_window_time = "10:00:00"
kafka_user_config {
kafka_connect = true
kafka {
group_max_session_timeout_ms = 70000
log_retention_bytes = 1000000000
}
}
}
resource "aiven_kafka_topic" "foo" {
project = data.aiven_project.foo.project
service_name = aiven_kafka.bar.service_name
topic_name = "test-acc-topic-%s"
partitions = 3
replication = 2
}
resource "aiven_elasticsearch" "dest" {
project = data.aiven_project.foo.project
cloud_name = "google-europe-west1"
plan = "startup-4"
service_name = "test-acc-sr2-%s"
maintenance_window_dow = "monday"
maintenance_window_time = "10:00:00"
}
resource "aiven_kafka_connector" "foo" {
project = data.aiven_project.foo.project
service_name = aiven_kafka.bar.service_name
connector_name = "test-acc-con-%s"
config = {
"topics" = aiven_kafka_topic.foo.topic_name
"connector.class" : "io.aiven.connect.elasticsearch.ElasticsearchSinkConnector"
"type.name" = "es-connector"
"name" = "test-acc-con-%s"
"connection.url" = aiven_elasticsearch.dest.service_uri
}
}
data "aiven_kafka_connector" "connector" {
project = aiven_kafka_connector.foo.project
service_name = aiven_kafka_connector.foo.service_name
connector_name = aiven_kafka_connector.foo.connector_name
depends_on = [aiven_kafka_connector.foo]
}
`, os.Getenv("AIVEN_PROJECT_NAME"), name, name, name, name, name)
}
func testAccKafkaConnectorMonoSinkResource(name string) string {
return fmt.Sprintf(`
data "aiven_project" "foo" {
project = "%s"
}
resource "aiven_kafka" "bar" {
project = data.aiven_project.foo.project
cloud_name = "google-europe-west1"
plan = "business-4"
service_name = "test-acc-sr-%s"
maintenance_window_dow = "monday"
maintenance_window_time = "10:00:00"
kafka_user_config {
kafka_connect = true
schema_registry = true
kafka {
group_max_session_timeout_ms = 70000
log_retention_bytes = 1000000000
}
}
}
resource "aiven_kafka_topic" "foo" {
project = data.aiven_project.foo.project
service_name = aiven_kafka.bar.service_name
topic_name = "test-acc-topic-%s"
partitions = 3
replication = 2
}
resource "aiven_kafka_connector" "foo" {
project = data.aiven_project.foo.project
service_name = aiven_kafka.bar.service_name
connector_name = "test-acc-con-mongo-sink-%s"
config = {
"name" = "test-acc-con-mongo-sink-%s"
"connector.class" : "com.mongodb.kafka.connect.MongoSinkConnector"
"topics" = aiven_kafka_topic.foo.topic_name
"tasks.max" = 1
# mongo connect settings
"connection.uri" = "%s"
"database" = "acc-test-mongo"
"collection" = "mongo_collection_name"
"max.batch.size" = 1
}
}
data "aiven_kafka_connector" "connector" {
project = aiven_kafka_connector.foo.project
service_name = aiven_kafka_connector.foo.service_name
connector_name = aiven_kafka_connector.foo.connector_name
depends_on = [aiven_kafka_connector.foo]
}
`, os.Getenv("AIVEN_PROJECT_NAME"), name, name, name, name, os.Getenv("MONGO_URI"))
}
func testAccCheckAivenKafkaConnectorAttributes(n string) resource.TestCheckFunc {
return func(s *terraform.State) error {
r := s.RootModule().Resources[n]
a := r.Primary.Attributes
if a["project"] == "" {
return fmt.Errorf("expected to get a project from Aiven")
}
if a["service_name"] == "" {
return fmt.Errorf("expected to get a service_name from Aiven)")
}
if a["plugin_doc_url"] == "" {
return fmt.Errorf("expected to get a plugin_doc_url from Aiven)")
}
if a["plugin_title"] == "" {
return fmt.Errorf("expected to get a plugin_title from Aiven)")
}
if a["plugin_version"] == "" {
return fmt.Errorf("expected to get a plugin_version from Aiven)")
}
if a["config.connector.class"] != "io.aiven.connect.elasticsearch.ElasticsearchSinkConnector" {
return fmt.Errorf("expected to get a correct config.connector.class from Aiven)")
}
if a["config.connection.url"] == "" {
return fmt.Errorf("expected to get a config.connection.url from Aiven)")
}
if a["config.topics"] == "" {
return fmt.Errorf("expected to get a config.topics from Aiven)")
}
if a["config.type.name"] != "es-connector" {
return fmt.Errorf("expected to get a corect config.type.name from Aiven)")
}
if a["config.name"] == "" {
return fmt.Errorf("expected to get a config.name from Aiven)")
}
if a["plugin_type"] != "sink" {
return fmt.Errorf("expected to get a correct plugin_type from Aiven)")
}
return nil
}
}
|
[
"\"AIVEN_PROJECT_NAME\"",
"\"AIVEN_PROJECT_NAME\"",
"\"MONGO_URI\"",
"\"AIVEN_PROJECT_NAME\"",
"\"AIVEN_PROJECT_NAME\"",
"\"AIVEN_PROJECT_NAME\"",
"\"MONGO_URI\""
] |
[] |
[
"MONGO_URI",
"AIVEN_PROJECT_NAME"
] |
[]
|
["MONGO_URI", "AIVEN_PROJECT_NAME"]
|
go
| 2 | 0 | |
1068/service/user/api/db/user.go
|
package db
import (
"gorm.io/driver/mysql"
"gorm.io/gorm"
"gorm.io/gorm/schema"
"time"
)
func Conn(dns string) *gorm.DB {
//dsn1 := "zhangzhuang:Zhang123456@tcp(rm-uf63x44czp1i998i41o.mysql.rds.aliyuncs.com:3306)/admin?charset=utf8mb4&parseTime=True&loc=Local"
db, _ := gorm.Open(mysql.New(mysql.Config{
DSN: dns,
DefaultStringSize: 256, // string 类型字段的默认长度
DisableDatetimePrecision: true, // 禁用 datetime 精度,MySQL 5.6 之前的数据库不支持
DontSupportRenameIndex: true, // 重命名索引时采用删除并新建的方式,MySQL 5.7 之前的数据库和 MariaDB 不支持重命名索引
DontSupportRenameColumn: true, // 用 `change` 重命名列,MySQL 8 之前的数据库和 MariaDB 不支持重命名列
SkipInitializeWithVersion: false, // 根据当前 MySQL 版本自动配置
}), &gorm.Config{
SkipDefaultTransaction: false,
NamingStrategy: schema.NamingStrategy{
TablePrefix: "", // 表名前缀,`User`表为`t_users`
SingularTable: true, // 使用单数表名,启用该选项后,`User` 表将是`user`
},
DisableAutomaticPing: true, //在完成初始化后,GORM 会自动 ping 数据库以检查数据库的可用性,若要禁用该特性,可将其设置为 true
DisableForeignKeyConstraintWhenMigrating: true, //在 AutoMigrate 或 CreateTable 时,GORM 会自动创建外键约束,若要禁用该特性,可将其设置为 true
})
dbs, _ := db.DB()
dbs.SetMaxIdleConns(10) //连接池中最大的空闲连接数
dbs.SetMaxOpenConns(100) //连接池中最大可以容纳多少链接数
dbs.SetConnMaxLifetime(time.Hour) //连接池中链接最大可可复用时间
return db
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
camel-core/src/test/java/org/apache/camel/component/properties/PropertiesComponentServicePortTest.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.component.properties;
import org.apache.camel.ContextTestSupport;
import org.apache.camel.builder.RouteBuilder;
import org.junit.Test;
public class PropertiesComponentServicePortTest extends ContextTestSupport {
@Override
public boolean isUseRouteBuilder() {
return false;
}
@Test
public void testFunction() throws Exception {
context.addRoutes(new RouteBuilder() {
@Override
public void configure() throws Exception {
from("direct:start")
.to("mock:foo")
.transform().constant("someserver:{{service.port:FOO}}")
.to("mock:bar");
}
});
context.start();
String body = "someserver:" + System.getenv("FOO_SERVICE_PORT");
getMockEndpoint("mock:foo").expectedBodiesReceived("Hello World");
getMockEndpoint("mock:bar").expectedBodiesReceived(body);
template.sendBody("direct:start", "Hello World");
assertMockEndpointsSatisfied();
}
@Test
public void testFunctionGetOrElse() throws Exception {
context.addRoutes(new RouteBuilder() {
@Override
public void configure() throws Exception {
from("direct:start")
.to("mock:foo")
.transform().constant("myotherserver:{{service.port:BAR:8888}}")
.to("mock:bar");
}
});
context.start();
getMockEndpoint("mock:foo").expectedBodiesReceived("Hello World");
getMockEndpoint("mock:bar").expectedBodiesReceived("myotherserver:8888");
template.sendBody("direct:start", "Hello World");
assertMockEndpointsSatisfied();
}
}
|
[
"\"FOO_SERVICE_PORT\""
] |
[] |
[
"FOO_SERVICE_PORT"
] |
[]
|
["FOO_SERVICE_PORT"]
|
java
| 1 | 0 | |
prod/jobs/refill_bao_stock_bars.py
|
# flake8: noqa
"""
下载证券宝5分钟bar => vnpy项目目录/bar_data/
"""
import os
import sys
import csv
import json
from collections import OrderedDict
import pandas as pd
from datetime import datetime, timedelta
vnpy_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))
if vnpy_root not in sys.path:
sys.path.append(vnpy_root)
os.environ["VNPY_TESTING"] = "1"
import baostock as bs
from vnpy.trader.constant import Exchange
from vnpy.data.tdx.tdx_common import get_tdx_market_code
from vnpy.trader.utility import load_json, get_csv_last_dt, extract_vt_symbol
from vnpy.data.stock.stock_base import update_stock_base, get_stock_base
# 保存的1分钟指数 bar目录
bar_data_folder = os.path.abspath(os.path.join(vnpy_root, 'bar_data'))
# 开始日期(每年大概需要几分钟)
start_date = '20060101'
if __name__ == "__main__":
# 证券宝连接
login_msg = bs.login()
if login_msg.error_code != '0':
print(f'证券宝登录错误代码:{login_msg.error_code}, 错误信息:{login_msg.error_msg}')
print('更新股票基本信息')
update_stock_base()
symbol_dict = get_stock_base()
if len(sys.argv) >= 2 and sys.argv[1].lower() == 'all':
stock_list = list(symbol_dict.keys())
print('使用全量股票,共{}个'.format(len(stock_list)))
else:
# 更新本地合约缓存信息
stock_list = load_json('stock_list.json')
print('读取本地stock_list.json文件,共{}个'.format(len(stock_list)))
day_fields = "date,code,open,high,low,close,preclose,volume,amount,adjustflag,turn,tradestatus,pctChg,isST"
min_fields = "date,time,code,open,high,low,close,volume,amount,adjustflag"
count = 0
# 逐一股票下载并更新
for stock_code in stock_list:
count += 1
print('下载进度:{}%'.format(round(count* 100/len(stock_list), 4)))
if '.' not in stock_code:
market_id = get_tdx_market_code(stock_code)
if market_id == 0:
exchange_name = '深交所'
exchange = Exchange.SZSE
exchange_code = 'sz'
else:
exchange_name = '上交所'
exchange = Exchange.SSE
exchange_code = 'sh'
symbol = stock_code
vt_symbol = f'{stock_code}.{exchange.value}'
else:
vt_symbol = stock_code
symbol, exchange = extract_vt_symbol(vt_symbol)
if exchange == Exchange.SSE:
exchange_name = '上交所'
exchange_code = 'sh'
else:
exchange_name = '深交所'
exchange_code = 'sz'
symbol_info = symbol_dict.get(vt_symbol,None)
if symbol_info is None:
print(f'找不到{vt_symbol}得配置信息', file=sys.stderr)
continue
if symbol_info['类型'] == '指数':
continue
stock_name = symbol_info.get('name')
print(f'开始更新:{exchange_name}/{stock_name}, 代码:{symbol}')
bar_file_folder = os.path.abspath(os.path.join(bar_data_folder, f'{exchange.value}'))
if not os.path.exists(bar_file_folder):
os.makedirs(bar_file_folder)
# csv数据文件名
bar_file_path = os.path.abspath(os.path.join(bar_file_folder, f'{symbol}_5m.csv'))
# 如果文件存在,
if os.path.exists(bar_file_path):
# df_old = pd.read_csv(bar_file_path, index_col=0)
# df_old = df_old.rename(lambda x: pd.to_datetime(x, format="%Y-%m-%d %H:%M:%S"))
# 取最后一条时间
# last_dt = df_old.index[-1]
last_dt = get_csv_last_dt(bar_file_path)
start_dt = last_dt - timedelta(days=1)
print(f'文件{bar_file_path}存在,最后时间:{start_dt}')
else:
last_dt = None
start_dt = datetime.strptime(start_date, '%Y%m%d')
print(f'文件{bar_file_path}不存在,开始时间:{start_dt}')
rs = bs.query_history_k_data_plus(
code=f'{exchange_code}.{symbol}',
fields=min_fields,
start_date=start_dt.strftime('%Y-%m-%d'), end_date=datetime.now().strftime('%Y-%m-%d'),
frequency="5",
adjustflag="3"
)
if rs.error_code != '0':
print(f'证券宝获取沪深A股历史K线数据错误代码:{rs.error_code}, 错误信息:{rs.error_msg}')
continue
# [dict] => dataframe
bars = []
while (rs.error_code == '0') and rs.next():
row = rs.get_row_data()
dt = datetime.strptime(row[1], '%Y%m%d%H%M%S%f')
if last_dt and last_dt > dt:
continue
bar = {
'datetime': dt,
'open': float(row[3]),
'close': float(row[6]),
'high': float(row[4]),
'low': float(row[5]),
'volume': float(row[7]),
'amount': float(row[8]),
'symbol': symbol,
'trading_date': row[0],
'date': row[0],
'time': dt.strftime('%H:%M:%S')
}
bars.append(bar)
# 获取标题
if len(bars) == 0:
continue
headers = list(bars[0].keys())
if headers[0] != 'datetime':
headers.remove('datetime')
headers.insert(0, 'datetime')
bar_count = 0
# 写入所有大于最后bar时间的数据
with open(bar_file_path, 'a', encoding='utf8', newline='\n') as csvWriteFile:
writer = csv.DictWriter(f=csvWriteFile, fieldnames=headers, dialect='excel',
extrasaction='ignore')
if last_dt is None:
writer.writeheader()
for bar in bars:
bar_count += 1
writer.writerow(bar)
print(f'更新{vt_symbol}数据 => 文件{bar_file_path}, 最后记录:{bars[-1]}')
print('更新完毕')
bs.logout()
os._exit(0)
|
[] |
[] |
[
"VNPY_TESTING"
] |
[]
|
["VNPY_TESTING"]
|
python
| 1 | 0 | |
Data/Juliet-Java/Juliet-Java-v103/000/132/824/CWE197_Numeric_Truncation_Error__int_Environment_to_short_68a.java
|
/* TEMPLATE GENERATED TESTCASE FILE
Filename: CWE197_Numeric_Truncation_Error__int_Environment_to_short_68a.java
Label Definition File: CWE197_Numeric_Truncation_Error__int.label.xml
Template File: sources-sink-68a.tmpl.java
*/
/*
* @description
* CWE: 197 Numeric Truncation Error
* BadSource: Environment Read data from an environment variable
* GoodSource: A hardcoded non-zero, non-min, non-max, even number
* BadSink: to_short Convert data to a short
* Flow Variant: 68 Data flow: data passed as a member variable in the "a" class, which is used by a method in another class in the same package
*
* */
import java.util.logging.Level;
public class CWE197_Numeric_Truncation_Error__int_Environment_to_short_68a extends AbstractTestCase
{
public static int data;
public void bad() throws Throwable
{
data = Integer.MIN_VALUE; /* Initialize data */
/* get environment variable ADD */
/* POTENTIAL FLAW: Read data from an environment variable */
{
String stringNumber = System.getenv("ADD");
if (stringNumber != null) // avoid NPD incidental warnings
{
try
{
data = Integer.parseInt(stringNumber.trim());
}
catch(NumberFormatException exceptNumberFormat)
{
IO.logger.log(Level.WARNING, "Number format exception parsing data from string", exceptNumberFormat);
}
}
}
(new CWE197_Numeric_Truncation_Error__int_Environment_to_short_68b()).badSink();
}
public void good() throws Throwable
{
goodG2B();
}
/* goodG2B() - use goodsource and badsink */
private void goodG2B() throws Throwable
{
/* FIX: Use a hardcoded number that won't cause underflow, overflow, divide by zero, or loss-of-precision issues */
data = 2;
(new CWE197_Numeric_Truncation_Error__int_Environment_to_short_68b()).goodG2BSink();
}
/* Below is the main(). It is only used when building this testcase on
* its own for testing or for building a binary to use in testing binary
* analysis tools. It is not used when compiling all the testcases as one
* application, which is how source code analysis tools are tested.
*/
public static void main(String[] args) throws ClassNotFoundException,
InstantiationException, IllegalAccessException
{
mainFromParent(args);
}
}
|
[
"\"ADD\""
] |
[] |
[
"ADD"
] |
[]
|
["ADD"]
|
java
| 1 | 0 | |
Chapter06/04_onion/main.go
|
package main
import (
. "utils"
. "interfaces"
"os"
"io/ioutil"
"infrastructure"
"github.com/pkg/errors"
"net/http"
)
const defaultFileName = "eventset1.jsonl"
var fileName string
var wsh WebserviceHandler
func init() {
GetOptions()
if Config.LogDebugInfo {
InitLog("trace-debug-log.txt", os.Stdout, os.Stdout, os.Stderr)
} else {
InitLog("trace-log.txt", ioutil.Discard, os.Stdout, os.Stderr)
}
// use a filename in a downloads subdirectory
fileName = os.Getenv("TEST_FILENAME")
if len(fileName) == 0 {
fileName = defaultFileName // CloudflareLogFilename(time.Now())
}
Debug.Printf("ProjectRoot: %s", PadRight(Config.ProjectRoot, " ", 20))
Debug.Printf("AppEnv: %s", PadRight(Config.AppEnv, " ", 20))
Debug.Printf("GcpSourceKeyFile: %s", PadRight(Config.GcpSourceKeyFile, " ", 20))
Debug.Printf("GcpSinkKeyFile: %s", PadRight(Config.GcpSinkKeyFile, " ", 20))
Debug.Printf("LogDebugInfo: %v", Config.LogDebugInfo)
HandlePanic(os.Chdir(Config.ProjectRoot))
}
type endpoint struct {
Api
uriExample string
}
func printApiExample(url, uriExample string) {
if len(uriExample) == 0 {
Info.Printf("http://localhost:%s%s", Config.ApiPort, url)
} else {
Info.Printf("http://localhost:%s%s?%s", Config.ApiPort, url, uriExample)
}
}
func main() {
gcpi, err := infrastructure.GetGcpInteractor()
HandlePanic(errors.Wrap(err, "unable to get gcp interactor"))
li, err := infrastructure.GetLocalInteractor()
HandlePanic(errors.Wrap(err, "unable to get local interactor"))
wsh = WebserviceHandler{}
wsh.GcpInteractor = gcpi
wsh.LocalInteractor = li
var endpoints = []endpoint{
{Api{wsh.Health, "/health"}, ""},
{Api{wsh.ListSourceBuckets, "/list-source-buckets"}, "projectId="+Config.GcpSourceProjectId},
{Api{wsh.ListSinkBuckets, "/list-sink-buckets"}, "projectId="+Config.GcpSinkProjectId},
{Api{wsh.SourceFileExists, "/source-file-exists"}, "fileName="+fileName},
{Api{wsh.DownloadFile, "/download-file"}, "fileName="+fileName},
{Api{wsh.UploadFile, "/upload-file"}, "fileName="+fileName},
{Api{wsh.LocalFileExists, "/local-file-exists"}, "fileName="+fileName},
}
Info.Println("Example API endpoints:")
{
for _, ep := range endpoints {
http.HandleFunc(ep.Api.Url, ep.Api.Handler)
printApiExample(ep.Api.Url, ep.uriExample)
}
}
http.ListenAndServe(":"+Config.ApiPort, nil)
}
|
[
"\"TEST_FILENAME\""
] |
[] |
[
"TEST_FILENAME"
] |
[]
|
["TEST_FILENAME"]
|
go
| 1 | 0 | |
tests/items/integration_test.py
|
import os
import pytest
import re
import tempfile
from json import JSONDecodeError
from hamcrest import assert_that
from mbtest.imposters import Imposter, Predicate, Response, Stub
from mbtest.matchers import had_request
from os import listdir
from os.path import isfile, join
from caia.commands.items import Command
def setup_environment(imposter, temp_storage_dir, temp_success_filename):
os.environ["ITEMS_SOURCE_URL"] = f"{imposter.url}/src"
os.environ["ITEMS_DEST_NEW_URL"] = f"{imposter.url}/dest/new"
os.environ["ITEMS_DEST_UPDATES_URL"] = f"{imposter.url}/dest/updated"
os.environ["ITEMS_STORAGE_DIR"] = temp_storage_dir
os.environ["ITEMS_LAST_SUCCESS_LOOKUP"] = temp_success_filename
os.environ["CAIASOFT_API_KEY"] = 'TEST_KEY'
def test_successful_job(mock_server):
with open("tests/resources/items/valid_src_response.json") as file:
valid_src_response = file.read()
with open("tests/resources/items/valid_dest_new_items_response.json") as file:
valid_dest_new_items_response = file.read()
with open("tests/resources/items/valid_dest_updated_items_response.json") as file:
valid_dest_updated_items_response = file.read()
# Set up mock server with required behavior
imposter = Imposter([
Stub(Predicate(path="/src"), Response(body=valid_src_response)),
Stub(Predicate(path="/dest/new", method="POST"), Response(body=valid_dest_new_items_response)),
Stub(Predicate(path="/dest/updated", method="POST"), Response(body=valid_dest_updated_items_response)),
])
# Create a temporary file to use as last success lookup
try:
[temp_file_handle, temp_success_filename] = tempfile.mkstemp()
with open(temp_success_filename, 'w') as f:
f.write('etc/items_FIRST.json')
with tempfile.TemporaryDirectory() as temp_storage_dir, mock_server(imposter) as server:
setup_environment(imposter, temp_storage_dir, temp_success_filename)
start_time = '20200521132905'
args = []
command = Command()
result = command(start_time, args)
assert result.was_successful() is True
assert_that(server, had_request().with_path("/src").and_method("GET"))
assert_that(server, had_request().with_path("/dest/new").and_method("POST"))
assert_that(server, had_request().with_path("/dest/updated").and_method("POST"))
finally:
# Clean up the temporary file
os.close(temp_file_handle)
os.remove(temp_success_filename)
def test_successful_job_no_new_items(mock_server):
with open("tests/resources/items/valid_src_no_new_items_response.json") as file:
valid_src_response = file.read()
with open("tests/resources/items/valid_dest_updated_items_response.json") as file:
valid_dest_updated_items_response = file.read()
# Set up mock server with required behavior
imposter = Imposter([
Stub(Predicate(path="/src"), Response(body=valid_src_response)),
Stub(Predicate(path="/dest/new", method="POST"), Response(body="{}")),
Stub(Predicate(path="/dest/updated", method="POST"), Response(body=valid_dest_updated_items_response)),
])
# Create a temporary file to use as last success lookup
try:
[temp_file_handle, temp_success_filename] = tempfile.mkstemp()
with open(temp_success_filename, 'w') as f:
f.write('etc/items_FIRST.json')
with tempfile.TemporaryDirectory() as temp_storage_dir, mock_server(imposter) as server:
setup_environment(imposter, temp_storage_dir, temp_success_filename)
start_time = '20200521132905'
args = []
command = Command()
result = command(start_time, args)
assert result.was_successful() is True
assert 2 == len(server.get_actual_requests()[imposter.port])
assert_that(server, had_request().with_path("/src").and_method("GET"))
assert_that(server, had_request().with_path("/dest/updated").and_method("POST"))
finally:
# Clean up the temporary file
os.close(temp_file_handle)
os.remove(temp_success_filename)
def test_successful_job_no_updated_items(mock_server):
with open("tests/resources/items/valid_src_no_updated_items_response.json") as file:
valid_src_response = file.read()
with open("tests/resources/items/valid_dest_new_items_response.json") as file:
valid_dest_new_items_response = file.read()
# Set up mock server with required behavior
imposter = Imposter([
Stub(Predicate(path="/src"), Response(body=valid_src_response)),
Stub(Predicate(path="/dest/new", method="POST"), Response(body=valid_dest_new_items_response)),
Stub(Predicate(path="/dest/updated", method="POST"), Response(body="{}")),
])
# Create a temporary file to use as last success lookup
try:
[temp_file_handle, temp_success_filename] = tempfile.mkstemp()
with open(temp_success_filename, 'w') as f:
f.write('etc/items_FIRST.json')
with tempfile.TemporaryDirectory() as temp_storage_dir, mock_server(imposter) as server:
setup_environment(imposter, temp_storage_dir, temp_success_filename)
start_time = '20200521132905'
args = []
command = Command()
result = command(start_time, args)
assert result.was_successful() is True
assert 2 == len(server.get_actual_requests()[imposter.port])
assert_that(server, had_request().with_path("/src").and_method("GET"))
assert_that(server, had_request().with_path("/dest/new").and_method("POST"))
finally:
# Clean up the temporary file
os.close(temp_file_handle)
os.remove(temp_success_filename)
def test_successful_job_no_new_or_updated_items(mock_server):
with open("tests/resources/items/valid_src_no_new_or_updated_items_response.json") as file:
valid_src_response = file.read()
# Set up mock server with required behavior
imposter = Imposter([
Stub(Predicate(path="/src"), Response(body=valid_src_response)),
Stub(Predicate(path="/dest/new", method="POST"), Response(body="{}")),
Stub(Predicate(path="/dest/updated", method="POST"), Response(body="{}")),
])
# Create a temporary file to use as last success lookup
try:
[temp_file_handle, temp_success_filename] = tempfile.mkstemp()
with open(temp_success_filename, 'w') as f:
f.write('etc/items_FIRST.json')
with tempfile.TemporaryDirectory() as temp_storage_dir, mock_server(imposter) as server:
setup_environment(imposter, temp_storage_dir, temp_success_filename)
start_time = '20200521132905'
args = []
command = Command()
result = command(start_time, args)
assert result.was_successful() is True
assert 1 == len(server.get_actual_requests()[imposter.port])
assert_that(server, had_request().with_path("/src").and_method("GET"))
finally:
# Clean up the temporary file
os.close(temp_file_handle)
os.remove(temp_success_filename)
def test_src_returns_404_error(mock_server):
with open("tests/resources/items/valid_dest_new_items_response.json") as file:
valid_dest_new_items_response = file.read()
with open("tests/resources/items/valid_dest_updated_items_response.json") as file:
valid_dest_updated_items_response = file.read()
# Set up mock server with required behavior
imposter = Imposter([
Stub(Predicate(path="/src"), Response(status_code=404)),
Stub(Predicate(path="/dest/new", method="POST"), Response(body=valid_dest_new_items_response)),
Stub(Predicate(path="/dest/updated", method="POST"), Response(body=valid_dest_updated_items_response)),
])
# Create a temporary file to use as last success lookup
try:
[temp_file_handle, temp_success_filename] = tempfile.mkstemp()
with open(temp_success_filename, 'w') as f:
f.write('etc/items_FIRST.json')
with tempfile.TemporaryDirectory() as temp_storage_dir, mock_server(imposter) as server:
setup_environment(imposter, temp_storage_dir, temp_success_filename)
start_time = '20200521132905'
args = []
command = Command()
result = command(start_time, args)
assert result.was_successful() is False
assert 1 == len(result.get_errors())
# There should be only one request to the server (the /src request)
assert 1 == len(server.get_actual_requests()[imposter.port])
assert_that(server, had_request().with_path("/src").and_method("GET"))
finally:
# Clean up the temporary file
os.close(temp_file_handle)
os.remove(temp_success_filename)
def test_dest_returns_404_error(mock_server):
with open("tests/resources/items/valid_src_response.json") as file:
valid_src_response = file.read()
# Set up mock server with required behavior
imposter = Imposter([
Stub(Predicate(path="/src"), Response(body=valid_src_response)),
Stub(Predicate(path="/dest/new", method="POST"), Response(status_code=404)),
])
# Create a temporary file to use as last success lookup
try:
[temp_file_handle, temp_success_filename] = tempfile.mkstemp()
with open(temp_success_filename, 'w') as f:
f.write('etc/items_FIRST.json')
with tempfile.TemporaryDirectory() as temp_storage_dir, mock_server(imposter) as server:
setup_environment(imposter, temp_storage_dir, temp_success_filename)
start_time = '20200521132905'
args = []
command = Command()
result = command(start_time, args)
assert result.was_successful() is False
assert 1 == len(result.get_errors())
# There should be only two requests to the server
assert 2 == len(server.get_actual_requests()[imposter.port])
assert_that(server, had_request().with_path("/src").and_method("GET"))
assert_that(server, had_request().with_path("/dest/new").and_method("POST"))
finally:
# Clean up the temporary file
os.close(temp_file_handle)
os.remove(temp_success_filename)
def test_dest_returns_unparseable_json_response_for_new_items(mock_server):
with open("tests/resources/items/valid_src_response.json") as file:
valid_src_response = file.read()
with open("tests/resources/items/unparseable_json_dest_response.json") as file:
unparseable_json_dest_response = file.read()
# Set up mock server with required behavior
imposter = Imposter([
Stub(Predicate(path="/src"), Response(body=valid_src_response)),
Stub(Predicate(path="/dest/new", method="POST"), Response(body=unparseable_json_dest_response)),
])
# Create a temporary file to use as last success lookup
try:
[temp_file_handle, temp_success_filename] = tempfile.mkstemp()
with open(temp_success_filename, 'w') as f:
f.write('etc/items_FIRST.json')
with tempfile.TemporaryDirectory() as temp_storage_dir, mock_server(imposter) as server:
setup_environment(imposter, temp_storage_dir, temp_success_filename)
start_time = '20200521132905'
args = []
with pytest.raises(JSONDecodeError):
command = Command()
result = command(start_time, args)
assert result.was_successful() is False
files_in_storage_dir = [f for f in listdir(temp_storage_dir) if isfile(join(temp_storage_dir, f))]
expected_file_regex = re.compile('.*\\.dest_new_items_response_body\\..*')
# There should be a "dest_updated_response_body" file, even
# though the JSON was unparseable
if not any(expected_file_regex.match(x) for x in files_in_storage_dir):
pytest.fail(f"Expected file matching '#{expected_file_regex.pattern}' was not found.")
# There should have been two requests to the server, as the updated
# items call will not occur
assert 2 == len(server.get_actual_requests()[imposter.port])
assert_that(server, had_request().with_path("/src").and_method("GET"))
assert_that(server, had_request().with_path("/dest/new").and_method("POST"))
finally:
# Clean up the temporary file
os.close(temp_file_handle)
os.remove(temp_success_filename)
def test_dest_returns_unparseable_json_response_for_updated_items(mock_server):
with open("tests/resources/items/valid_src_response.json") as file:
valid_src_response = file.read()
with open("tests/resources/items/valid_dest_new_items_response.json") as file:
valid_dest_new_items_response = file.read()
with open("tests/resources/items/unparseable_json_dest_response.json") as file:
unparseable_json_dest_response = file.read()
# Set up mock server with required behavior
imposter = Imposter([
Stub(Predicate(path="/src"), Response(body=valid_src_response)),
Stub(Predicate(path="/dest/new", method="POST"), Response(body=valid_dest_new_items_response)),
Stub(Predicate(path="/dest/updated", method="POST"), Response(body=unparseable_json_dest_response)),
])
# Create a temporary file to use as last success lookup
try:
[temp_file_handle, temp_success_filename] = tempfile.mkstemp()
with open(temp_success_filename, 'w') as f:
f.write('etc/items_FIRST.json')
with tempfile.TemporaryDirectory() as temp_storage_dir, mock_server(imposter) as server:
setup_environment(imposter, temp_storage_dir, temp_success_filename)
start_time = '20200521132905'
args = []
with pytest.raises(JSONDecodeError):
command = Command()
result = command(start_time, args)
assert result.was_successful() is False
files_in_storage_dir = [f for f in listdir(temp_storage_dir) if isfile(join(temp_storage_dir, f))]
expected_file_regex = re.compile('.*\\.dest_updated_items_response_body\\..*')
# There should be a "dest_updated_response_body" file, even
# though the JSON was unparseable
if not any(expected_file_regex.match(x) for x in files_in_storage_dir):
pytest.fail(f"Expected file matching '#{expected_file_regex.pattern}' was not found.")
# There should have been three requests to the server
assert 3 == len(server.get_actual_requests()[imposter.port])
assert_that(server, had_request().with_path("/src").and_method("GET"))
assert_that(server, had_request().with_path("/dest/new").and_method("POST"))
assert_that(server, had_request().with_path("/dest/updated").and_method("POST"))
finally:
# Clean up the temporary file
os.close(temp_file_handle)
os.remove(temp_success_filename)
|
[] |
[] |
[
"ITEMS_DEST_UPDATES_URL",
"ITEMS_DEST_NEW_URL",
"ITEMS_SOURCE_URL",
"ITEMS_LAST_SUCCESS_LOOKUP",
"CAIASOFT_API_KEY",
"ITEMS_STORAGE_DIR"
] |
[]
|
["ITEMS_DEST_UPDATES_URL", "ITEMS_DEST_NEW_URL", "ITEMS_SOURCE_URL", "ITEMS_LAST_SUCCESS_LOOKUP", "CAIASOFT_API_KEY", "ITEMS_STORAGE_DIR"]
|
python
| 6 | 0 | |
provider/resource.go
|
// Copyright 2016-2018, Pulumi Corporation.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package provider
import (
"fmt"
"os"
"strings"
"unicode"
"github.com/Azure/go-autorest/autorest/azure/cli"
"github.com/hashicorp/terraform-plugin-sdk/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/terraform"
"github.com/pulumi/pulumi-azure/provider/v3/pkg/version"
"github.com/pulumi/pulumi-terraform-bridge/v2/pkg/tfbridge"
"github.com/pulumi/pulumi/sdk/v2/go/common/resource"
"github.com/pulumi/pulumi/sdk/v2/go/common/tokens"
"github.com/pulumi/pulumi/sdk/v2/go/common/util/contract"
"github.com/terraform-providers/terraform-provider-azurerm/azurerm"
"github.com/terraform-providers/terraform-provider-azurerm/azurerm/helpers/azure"
)
const (
azureName = "name"
azureLocation = "location"
)
// all of the Azure token components used below.
const (
// packages:
azurePkg = "azure"
// modules; in general, we took naming inspiration from the Azure SDK for Go:
// https://godoc.org/github.com/Azure/azure-sdk-for-go
azureAnalysisServices = "AnalysisServices" // Analysis Services
azureAPIManagement = "ApiManagement" // API Management
azureAppConfiguration = "AppConfiguration" // App Configuration
azureAppInsights = "AppInsights" // AppInsights
azureAppPlatform = "AppPlatform" // AppPlatform
azureAppService = "AppService" // App Service
azureAutomation = "Automation" // Automation
azureAuthorization = "Authorization" // Authorization
azureBackup = "Backup" // Backup
azureBatch = "Batch" // Batch
azureBot = "Bot" // Bot
azureCDN = "Cdn" // CDN
azureCognitive = "Cognitive" // Cognitive
azureCompute = "Compute" // Virtual Machines
azureContainerService = "ContainerService" // Azure Container Service
azureCore = "Core" // Base Resources
azureCosmosDB = "CosmosDB" // Cosmos DB
azureCostManagement = "CostManagement" // CostManagement
azureDashboard = "Dashboard" // Dashboard
azureDatabaseMigration = "DatabaseMigration" // Database Migration
azureDataFactory = "DataFactory" // Data Factory
azureDatalake = "DataLake" // Data Lake
azureDataShare = "DataShare" // DataShare
azureDataBricks = "DataBricks" // DataBricks
azureDevSpace = "DevSpace" // DevSpace
azureDevTest = "DevTest" // Dev Test Labs
azureDNS = "Dns" // DNS
azureFrontdoor = "FrontDoor" // Frontdoor
azureHdInsight = "HDInsight" // nolint:misspell // HDInsight
azureHealthcare = "Healthcare" // HealthCare
azureHpc = "Hpc" // High-performance Compute
azureIot = "Iot" // IoT resource
azureIotCentral = "IotCentral" // IoT central
azureKeyVault = "KeyVault" // Key Vault
azureKusto = "Kusto" // Kusto
azureLogAnalytics = "LogAnalytics" // Log Analytics
azureLogicApps = "LogicApps" // Logic Apps
azureLB = "Lb" // Load Balancer
azureMariaDB = "MariaDB" // MariaDB
azureEventGrid = "EventGrid" // Event Grid
azureEventHub = "EventHub" // Event Hub
azureMachineLearning = "MachineLearning" // Machine Learning Resources
azureMaintenance = "Maintenance" // Maintenance Resources
azureManagedApplication = "ManagedApplication" // ManagedApplication
azureManagement = "Management" // Management Resources
azureMaps = "Maps" // Maps
azureMarketPlace = "Marketplace" // Marketplace
azureMediaServices = "MediaServices" // Media Services
azureMixedReality = "MixedReality" // Mixed Reality
azureMonitoring = "Monitoring" // Metrics/monitoring resources
azureMSSQL = "MSSql" // MS Sql
azureMySQL = "MySql" // MySql
azureNetapp = "NetApp" // NetApp
azureNetwork = "Network" // Networking
azureNotificationHub = "NotificationHub" // Notification Hub
azureOperationalInsights = "OperationalInsights" // Operational Insights
azurePostgresql = "PostgreSql" // Postgress SQL
azurePolicy = "Policy" // Policy
azurePowerBi = "PowerBI" // PowerBI
azureProximity = "Proximity" // Proximity
azurePrivateDNS = "PrivateDns" // Private DNS
azurePrivateLink = "PrivateLink" // PrivateLink
azureRecoveryServices = "RecoveryServices" // Recovery Services
azureRedis = "Redis" // RedisCache
azureRelay = "Relay" // Relay
azureSecurityCenter = "SecurityCenter" // Security Center
azureSentinel = "Sentinel" // Sentinel
azureServiceBus = "ServiceBus" // ServiceBus
azureServiceFabric = "ServiceFabric" // Service Fabric
azureSearch = "Search" // Search
azureSignalr = "SignalR" // SignalR
azureSiteRecovery = "SiteRecovery" // SiteRecovery
azureSQL = "Sql" // SQL
azureStorage = "Storage" // Storage
azureStreamAnalytics = "StreamAnalytics" // StreamAnalytics
azureWaf = "Waf" // WAF
// Legacy Module Names
azureLegacyRole = "Role" // Azure Role (Legacy)
azureLegacyMSI = "Msi" // Managed Service Identity / MSI (Legacy)
azureLegacyManagementGroups = "ManagementGroups" // Management Groups (Legacy)
azureLegacyMgmtResource = "ManagementResource" // Management Resource (Legacy)
azureLegacyTrafficManager = "TrafficManager" // Traffic Manager (Legacy)
)
var namespaceMap = map[string]string{
"azure": "Azure",
}
// azureMember manufactures a member for the Azure package and the given module and type. It automatically
// names the file by simply lower casing the member's first character.
func azureMember(moduleTitle, mem string) tokens.ModuleMember {
moduleName := strings.ToLower(moduleTitle)
namespaceMap[moduleName] = moduleTitle
fn := string(unicode.ToLower(rune(mem[0]))) + mem[1:]
token := moduleName + "/" + fn
return tokens.ModuleMember(azurePkg + ":" + token + ":" + mem)
}
// azureType manufactures a type token for the Azure package and the given module and type.
func azureType(mod string, typ string) tokens.Type {
return tokens.Type(azureMember(mod, typ))
}
// azureDataSource manufactures a standard member given a module and data source name.
func azureDataSource(mod string, res string) tokens.ModuleMember {
return azureMember(mod, res)
}
// azureResource manufactures a standard resource token given a module and resource name.
func azureResource(mod string, res string) tokens.Type {
return azureType(mod, res)
}
// boolRef returns a reference to the bool argument.
func boolRef(b bool) *bool {
return &b
}
type cloudShellProfile struct {
useMSI bool
msiEndpoint string
subscriptionID string
tenantID string
}
// Azure Cloud Shell is a special case in terms of config: it provides authentication via
// an MSI endpoint, but it also has Azure CLI installed. Therefore, to make Pulumi CLI work
// out of the box with no actions required from a user, we combine the MSI endpoint authentication
// with retrieving the subscription information from the current profile. If both are found,
// we switch the provider to the endpoint/subscriptoin by default.
func detectCloudShell() cloudShellProfile {
negative := cloudShellProfile{
useMSI: false,
}
msiEndpoint := os.Getenv("MSI_ENDPOINT")
if msiEndpoint == "" {
return negative
}
profilePath, err := cli.ProfilePath()
if err != nil {
return negative
}
profile, err := cli.LoadProfile(profilePath)
if err != nil {
return negative
}
for _, subscription := range profile.Subscriptions {
if subscription.IsDefault {
return cloudShellProfile{
useMSI: true,
msiEndpoint: msiEndpoint,
subscriptionID: subscription.ID,
tenantID: subscription.TenantID,
}
}
}
return negative
}
// Provider returns additional overlaid schema and metadata associated with the azure package.
//
// nolint: lll
func Provider() tfbridge.ProviderInfo {
p := azurerm.Provider().(*schema.Provider)
// Adjust the defaults if running in Azure Cloud Shell.
// Environment variables still take preference, e.g. USE_MSI=false disables the MSI endpoint.
cloudShell := detectCloudShell()
prov := tfbridge.ProviderInfo{
P: p,
Name: "azurerm",
Description: "A Pulumi package for creating and managing Microsoft Azure cloud resources.",
Keywords: []string{"pulumi", "azure"},
Homepage: "https://pulumi.io",
License: "Apache-2.0",
Repository: "https://github.com/pulumi/pulumi-azure",
Version: version.Version,
Config: map[string]*tfbridge.SchemaInfo{
"subscription_id": {
Default: &tfbridge.DefaultInfo{
Value: cloudShell.subscriptionID,
EnvVars: []string{"ARM_SUBSCRIPTION_ID"},
},
},
"client_id": {
Default: &tfbridge.DefaultInfo{
Value: "",
EnvVars: []string{"AZURE_CLIENT_ID", "ARM_CLIENT_ID"},
},
},
"tenant_id": {
Default: &tfbridge.DefaultInfo{
Value: cloudShell.tenantID,
EnvVars: []string{"AZURE_TENANT_ID", "ARM_TENANT_ID"},
},
},
"environment": {
Default: &tfbridge.DefaultInfo{
Value: "public",
EnvVars: []string{"AZURE_ENVIRONMENT", "ARM_ENVIRONMENT"},
},
},
"client_certificate_password": {
Default: &tfbridge.DefaultInfo{
Value: "",
EnvVars: []string{"AZURE_CLIENT_CERTIFICATE_PASSWORD", "ARM_CLIENT_CERTIFICATE_PASSWORD"},
},
},
"client_certificate_path": {
Default: &tfbridge.DefaultInfo{
Value: "",
EnvVars: []string{"AZURE_CLIENT_CERTIFICATE_PATH", "ARM_CLIENT_CERTIFICATE_PATH"},
},
},
"client_secret": {
Default: &tfbridge.DefaultInfo{
Value: "",
EnvVars: []string{"AZURE_CLIENT_SECRET", "ARM_CLIENT_SECRET"},
},
},
"partner_id": {
Default: &tfbridge.DefaultInfo{
Value: "",
EnvVars: []string{"ARM_PARTNER_ID"},
},
},
"skip_credentials_validation": {
Default: &tfbridge.DefaultInfo{
Value: false,
EnvVars: []string{"ARM_SKIP_CREDENTIALS_VALIDATION"},
},
},
"skip_provider_registration": {
Default: &tfbridge.DefaultInfo{
Value: false,
EnvVars: []string{"ARM_SKIP_PROVIDER_REGISTRATION"},
},
},
"use_msi": {
Default: &tfbridge.DefaultInfo{
Value: cloudShell.useMSI,
EnvVars: []string{"ARM_USE_MSI"},
},
},
"msi_endpoint": {
Default: &tfbridge.DefaultInfo{
Value: cloudShell.msiEndpoint,
EnvVars: []string{"ARM_MSI_ENDPOINT"},
},
},
"disable_terraform_partner_id": {
Default: &tfbridge.DefaultInfo{
Value: true,
EnvVars: []string{"ARM_DISABLE_TERRAFORM_PARTNER_ID"},
},
},
"storage_use_azuread": {
Default: &tfbridge.DefaultInfo{
Value: false,
EnvVars: []string{"ARM_STORAGE_USE_AZUREAD"},
},
},
},
ExtraConfig: map[string]*tfbridge.ConfigInfo{
azureLocation: {
Schema: &schema.Schema{
Type: schema.TypeString,
Optional: true,
},
Info: &tfbridge.SchemaInfo{
Default: &tfbridge.DefaultInfo{
EnvVars: []string{"ARM_LOCATION"},
},
},
},
},
Resources: map[string]*tfbridge.ResourceInfo{
// API Mannagement
"azurerm_api_management": {
Tok: azureResource(azureAPIManagement, "Service"),
Fields: map[string]*tfbridge.SchemaInfo{
// Max length of an API Management name is 50.
// Source: https://docs.microsoft.com/en-us/azure/architecture/best-practices/naming-conventions#general
//azureName: (azureName, 50),
azureName: tfbridge.AutoNameWithCustomOptions(azureName, tfbridge.AutoNameOptions{
Separator: "",
Maxlen: 50,
Randlen: 8,
Transform: func(name string) string {
return strings.ToLower(name)
},
}),
},
},
"azurerm_api_management_api": {Tok: azureResource(azureAPIManagement, "Api")},
"azurerm_api_management_api_operation": {Tok: azureResource(azureAPIManagement, "ApiOperation")},
"azurerm_api_management_api_operation_policy": {Tok: azureResource(azureAPIManagement, "ApiOperationPolicy")},
"azurerm_api_management_api_policy": {Tok: azureResource(azureAPIManagement, "ApiPolicy")},
"azurerm_api_management_api_schema": {Tok: azureResource(azureAPIManagement, "ApiSchema")},
"azurerm_api_management_api_version_set": {Tok: azureResource(azureAPIManagement, "ApiVersionSet")},
"azurerm_api_management_authorization_server": {Tok: azureResource(azureAPIManagement, "AuthorizationServer")},
"azurerm_api_management_backend": {Tok: azureResource(azureAPIManagement, "Backend")},
"azurerm_api_management_certificate": {Tok: azureResource(azureAPIManagement, "Certificate")},
"azurerm_api_management_group": {Tok: azureResource(azureAPIManagement, "Group")},
"azurerm_api_management_group_user": {Tok: azureResource(azureAPIManagement, "GroupUser")},
"azurerm_api_management_logger": {Tok: azureResource(azureAPIManagement, "Logger")},
"azurerm_api_management_openid_connect_provider": {Tok: azureResource(azureAPIManagement, "OpenIdConnectProvider")},
"azurerm_api_management_product": {Tok: azureResource(azureAPIManagement, "Product")},
"azurerm_api_management_product_api": {Tok: azureResource(azureAPIManagement, "ProductApi")},
"azurerm_api_management_product_group": {Tok: azureResource(azureAPIManagement, "ProductGroup")},
"azurerm_api_management_product_policy": {Tok: azureResource(azureAPIManagement, "ProductPolicy")},
"azurerm_api_management_property": {Tok: azureResource(azureAPIManagement, "Property")},
"azurerm_api_management_subscription": {Tok: azureResource(azureAPIManagement, "Subscription")},
"azurerm_api_management_user": {Tok: azureResource(azureAPIManagement, "User")},
"azurerm_api_management_diagnostic": {Tok: azureResource(azureAPIManagement, "Diagnostic")},
"azurerm_api_management_identity_provider_aad": {Tok: azureResource(azureAPIManagement, "IdentityProviderAad")},
"azurerm_api_management_identity_provider_google": {Tok: azureResource(azureAPIManagement, "IdentityProviderGoogle")},
"azurerm_api_management_identity_provider_facebook": {Tok: azureResource(azureAPIManagement, "IdentityProviderFacebook")},
"azurerm_api_management_identity_provider_twitter": {Tok: azureResource(azureAPIManagement, "IdentityProviderTwitter")},
"azurerm_api_management_identity_provider_microsoft": {Tok: azureResource(azureAPIManagement, "IdentityProviderMicrosoft")},
"azurerm_api_management_named_value": {Tok: azureResource(azureAPIManagement, "NamedValue")},
// Analysis Services
"azurerm_analysis_services_server": {Tok: azureResource(azureAnalysisServices, "Server")},
// AppInsights
"azurerm_application_insights": {Tok: azureResource(azureAppInsights, "Insights")},
"azurerm_application_insights_api_key": {
Tok: azureResource(azureAppInsights, "ApiKey"),
Fields: map[string]*tfbridge.SchemaInfo{
"api_key": {
CSharpName: "Key",
},
},
},
"azurerm_application_insights_web_test": {
Tok: azureResource(azureAppInsights, "WebTest"),
Docs: &tfbridge.DocInfo{
Source: "application_insights_webtests.html.markdown",
},
},
"azurerm_application_insights_analytics_item": {Tok: azureResource(azureAppInsights, "AnalyticsItem")},
// App Service
"azurerm_app_service": {
Tok: azureResource(azureAppService, "AppService"),
Fields: map[string]*tfbridge.SchemaInfo{
// Max length of an app service name is 60.
// Source: https://docs.microsoft.com/en-us/azure/architecture/best-practices/naming-conventions#general
azureName: tfbridge.AutoNameWithCustomOptions(azureName, tfbridge.AutoNameOptions{
Separator: "",
Maxlen: 60,
Randlen: 8,
Transform: func(name string) string {
return strings.ToLower(name)
},
}),
"site_config": {
Elem: &tfbridge.SchemaInfo{
Fields: map[string]*tfbridge.SchemaInfo{
"use_32_bit_worker_process": {
Name: "use32BitWorkerProcess",
},
},
},
},
},
},
"azurerm_app_service_custom_hostname_binding": {Tok: azureResource(azureAppService, "CustomHostnameBinding")},
"azurerm_app_service_plan": {
Tok: azureResource(azureAppService, "Plan"),
Fields: map[string]*tfbridge.SchemaInfo{
// Max length of an app service plan name is 40.
// This was discovered directly through the portal.
azureName: tfbridge.AutoNameWithCustomOptions(azureName, tfbridge.AutoNameOptions{
Separator: "",
Maxlen: 40,
Randlen: 8,
Transform: func(name string) string {
return strings.ToLower(name)
},
}),
"kind": {
Type: "string",
AltTypes: []tokens.Type{azureType(azureAppService, "Kind")},
},
}},
"azurerm_app_service_slot": {Tok: azureResource(azureAppService, "Slot")},
"azurerm_app_service_active_slot": {Tok: azureResource(azureAppService, "ActiveSlot")},
"azurerm_function_app": {
Tok: azureResource(azureAppService, "FunctionApp"),
Fields: map[string]*tfbridge.SchemaInfo{
// Max length of a functionapp name is 60.
// Source: https://docs.microsoft.com/en-us/azure/architecture/best-practices/naming-conventions#compute
azureName: tfbridge.AutoNameWithCustomOptions(azureName, tfbridge.AutoNameOptions{
Separator: "",
Maxlen: 60,
Randlen: 8,
Transform: func(name string) string {
return strings.ToLower(name)
},
}),
"site_config": {
Elem: &tfbridge.SchemaInfo{
Fields: map[string]*tfbridge.SchemaInfo{
"use_32_bit_worker_process": {
Name: "use32BitWorkerProcess",
},
},
},
},
}},
"azurerm_function_app_slot": {Tok: azureResource(azureAppService, "FunctionAppSlot")},
"azurerm_app_service_certificate": {Tok: azureResource(azureAppService, "Certificate")},
"azurerm_app_service_source_control_token": {Tok: azureResource(azureAppService, "SourceCodeToken")},
"azurerm_app_service_certificate_order": {Tok: azureResource(azureAppService, "CertificateOrder")},
"azurerm_app_service_virtual_network_swift_connection": {
Tok: azureResource(azureAppService, "VirtualNetworkSwiftConnection"),
},
"azurerm_app_service_environment": {Tok: azureResource(azureAppService, "Environment")},
// AppPlatform
"azurerm_spring_cloud_service": {Tok: azureResource(azureAppPlatform, "SpringCloudService")},
"azurerm_spring_cloud_app": {Tok: azureResource(azureAppPlatform, "SpringCloudApp")},
// Automation
"azurerm_automation_account": {Tok: azureResource(azureAutomation, "Account")},
"azurerm_automation_credential": {Tok: azureResource(azureAutomation, "Credential")},
"azurerm_automation_dsc_configuration": {Tok: azureResource(azureAutomation, "DscConfiguration")},
"azurerm_automation_dsc_nodeconfiguration": {Tok: azureResource(azureAutomation, "DscNodeConfiguration")},
"azurerm_automation_module": {Tok: azureResource(azureAutomation, "Module")},
"azurerm_automation_runbook": {Tok: azureResource(azureAutomation, "RunBook")},
"azurerm_automation_schedule": {Tok: azureResource(azureAutomation, "Schedule")},
"azurerm_automation_variable_bool": {Tok: azureResource(azureAutomation, "BoolVariable")},
"azurerm_automation_variable_datetime": {Tok: azureResource(azureAutomation, "DateTimeVariable")},
"azurerm_automation_variable_int": {Tok: azureResource(azureAutomation, "IntVariable")},
"azurerm_automation_variable_string": {Tok: azureResource(azureAutomation, "StringVariable")},
"azurerm_automation_job_schedule": {Tok: azureResource(azureAutomation, "JobSchedule")},
"azurerm_automation_certificate": {Tok: azureResource(azureAutomation, "Certificate")},
// Azure Container Service
"azurerm_container_registry": {
Tok: azureResource(azureContainerService, "Registry"),
Fields: map[string]*tfbridge.SchemaInfo{
// https://docs.microsoft.com/en-us/azure/architecture/best-practices/naming-conventions#containers
// Max length of a container name is 50
azureName: tfbridge.AutoNameWithCustomOptions(azureName, tfbridge.AutoNameOptions{
Separator: "",
Maxlen: 50,
Randlen: 8,
Transform: func(name string) string {
return strings.ToLower(name)
},
}),
},
},
"azurerm_container_group": {
Tok: azureResource(azureContainerService, "Group"),
Fields: map[string]*tfbridge.SchemaInfo{
// https://docs.microsoft.com/en-us/azure/architecture/best-practices/naming-conventions#containers
// Max length of a container group/instance is 63
azureName: tfbridge.AutoNameWithCustomOptions(azureName, tfbridge.AutoNameOptions{
Separator: "",
Maxlen: 63,
Randlen: 8,
Transform: func(name string) string {
return strings.ToLower(name)
},
}),
}},
"azurerm_kubernetes_cluster": {
Tok: azureResource(azureContainerService, "KubernetesCluster"),
Fields: map[string]*tfbridge.SchemaInfo{
"addon_profile": {
SuppressEmptyMapElements: boolRef(true),
},
},
},
"azurerm_kubernetes_cluster_node_pool": {
Tok: azureResource(azureContainerService, "KubernetesClusterNodePool"),
},
// Batch
"azurerm_batch_account": {Tok: azureResource(azureBatch, "Account")},
"azurerm_batch_application": {Tok: azureResource(azureBatch, "Application")},
"azurerm_batch_certificate": {
Tok: azureResource(azureBatch, "Certificate"),
Fields: map[string]*tfbridge.SchemaInfo{
"certificate": {
CSharpName: "BatchCertificate",
},
},
},
"azurerm_batch_pool": {Tok: azureResource(azureBatch, "Pool")},
// Core
"azurerm_resource_group": {
Tok: azureResource(azureCore, "ResourceGroup"),
Fields: map[string]*tfbridge.SchemaInfo{
// https://docs.microsoft.com/en-us/azure/architecture/best-practices/naming-conventions#general
// Max length of a resource group name is 90
azureName: tfbridge.AutoNameWithCustomOptions(azureName, tfbridge.AutoNameOptions{
Separator: "",
Maxlen: 90,
Randlen: 8,
Transform: func(name string) string {
return strings.ToLower(name)
},
}),
azureLocation: {
Default: &tfbridge.DefaultInfo{
// To make it easy to repurpose an entire stack for different regions, we will let the
// resource group's location be parameterizable using configuration. Note that all other
// resources, by default, will take the location from the resource group to which they belong.
Config: azureLocation,
},
},
},
},
"azurerm_template_deployment": {Tok: azureResource(azureCore, "TemplateDeployment")},
"azurerm_custom_provider": {Tok: azureResource(azureCore, "CustomProvider")},
// CDN
"azurerm_cdn_endpoint": {Tok: azureResource(azureCDN, "Endpoint")},
"azurerm_cdn_profile": {Tok: azureResource(azureCDN, "Profile")},
// Cognitive
"azurerm_cognitive_account": {Tok: azureResource(azureCognitive, "Account")},
// Compute
"azurerm_availability_set": {
Tok: azureResource(azureCompute, "AvailabilitySet"),
Fields: map[string]*tfbridge.SchemaInfo{
// Max length of an availability set name is 80.
// Source: https://docs.microsoft.com/en-us/azure/architecture/best-practices/naming-conventions#general
azureName: tfbridge.AutoNameWithCustomOptions(azureName, tfbridge.AutoNameOptions{
Separator: "",
Maxlen: 80,
Randlen: 8,
Transform: func(name string) string {
return strings.ToLower(name)
},
}),
},
},
"azurerm_virtual_machine_extension": {Tok: azureResource(azureCompute, "Extension")},
"azurerm_virtual_machine": {
Tok: azureResource(azureCompute, "VirtualMachine"),
Fields: map[string]*tfbridge.SchemaInfo{
// Max length of a virtual machine name is 64. Note that the Windows host name is max 15 characters but it can be set separately.
// Source: https://docs.microsoft.com/en-us/azure/architecture/best-practices/naming-conventions#compute
azureName: tfbridge.AutoNameWithCustomOptions(azureName, tfbridge.AutoNameOptions{
Separator: "",
Maxlen: 64,
Randlen: 8,
Transform: func(name string) string {
return strings.ToLower(name)
},
}),
},
},
"azurerm_virtual_machine_data_disk_attachment": {Tok: azureResource(azureCompute, "DataDiskAttachment")},
"azurerm_virtual_machine_scale_set": {Tok: azureResource(azureCompute, "ScaleSet")},
"azurerm_managed_disk": {Tok: azureResource(azureCompute, "ManagedDisk")},
"azurerm_snapshot": {Tok: azureResource(azureCompute, "Snapshot")},
"azurerm_image": {Tok: azureResource(azureCompute, "Image")},
"azurerm_shared_image": {Tok: azureResource(azureCompute, "SharedImage")},
"azurerm_shared_image_gallery": {Tok: azureResource(azureCompute, "SharedImageGallery")},
"azurerm_shared_image_version": {Tok: azureResource(azureCompute, "SharedImageVersion")},
"azurerm_bastion_host": {Tok: azureResource(azureCompute, "BastionHost")},
"azurerm_dedicated_host_group": {Tok: azureResource(azureCompute, "DedicatedHostGroup")},
"azurerm_disk_encryption_set": {Tok: azureResource(azureCompute, "DiskEncryptionSet")},
"azurerm_dedicated_host": {Tok: azureResource(azureCompute, "DedicatedHost")},
"azurerm_linux_virtual_machine": {Tok: azureResource(azureCompute, "LinuxVirtualMachine")},
"azurerm_linux_virtual_machine_scale_set": {Tok: azureResource(azureCompute, "LinuxVirtualMachineScaleSet")},
"azurerm_virtual_machine_scale_set_extension": {Tok: azureResource(azureCompute, "VirtualMachineScaleSetExtension")},
"azurerm_windows_virtual_machine": {Tok: azureResource(azureCompute, "WindowsVirtualMachine")},
"azurerm_windows_virtual_machine_scale_set": {Tok: azureResource(azureCompute, "WindowsVirtualMachineScaleSet")},
// DataBricks
"azurerm_databricks_workspace": {Tok: azureResource(azureDataBricks, "Workspace")},
// DataFactory
"azurerm_data_factory": {Tok: azureResource(azureDataFactory, "Factory")},
"azurerm_data_factory_dataset_mysql": {Tok: azureResource(azureDataFactory, "DatasetMysql")},
"azurerm_data_factory_dataset_postgresql": {Tok: azureResource(azureDataFactory, "DatasetPostgresql")},
"azurerm_data_factory_dataset_sql_server_table": {Tok: azureResource(azureDataFactory, "DatasetSqlServerTable")},
"azurerm_data_factory_linked_service_data_lake_storage_gen2": {
Tok: azureResource(azureDataFactory, "LinkedServiceDataLakeStorageGen2"),
},
"azurerm_data_factory_linked_service_mysql": {Tok: azureResource(azureDataFactory, "LinkedServiceMysql")},
"azurerm_data_factory_linked_service_postgresql": {Tok: azureResource(azureDataFactory, "LinkedServicePostgresql")},
"azurerm_data_factory_linked_service_sql_server": {Tok: azureResource(azureDataFactory, "LinkedServiceSqlServer")},
"azurerm_data_factory_pipeline": {Tok: azureResource(azureDataFactory, "Pipeline")},
"azurerm_data_factory_integration_runtime_managed": {Tok: azureResource(azureDataFactory, "IntegrationRuntimeManaged")},
"azurerm_data_factory_trigger_schedule": {Tok: azureResource(azureDataFactory, "TriggerSchedule")},
// Data Lake
"azurerm_data_lake_analytics_account": {Tok: azureResource(azureDatalake, "AnalyticsAccount")},
"azurerm_data_lake_analytics_firewall_rule": {Tok: azureResource(azureDatalake, "AnalyticsFirewallRule")},
"azurerm_data_lake_store": {Tok: azureResource(azureDatalake, "Store")},
"azurerm_data_lake_store_file": {Tok: azureResource(azureDatalake, "StoreFile")},
"azurerm_data_lake_store_firewall_rule": {Tok: azureResource(azureDatalake, "StoreFirewallRule")},
// DataShare
"azurerm_data_share_account": {Tok: azureResource(azureDataShare, "Account")},
// DevSpace
"azurerm_devspace_controller": {Tok: azureResource(azureDevSpace, "Controller")},
// Dev Test
"azurerm_dev_test_lab": {Tok: azureResource(azureDevTest, "Lab")},
"azurerm_dev_test_linux_virtual_machine": {Tok: azureResource(azureDevTest, "LinuxVirtualMachine")},
"azurerm_dev_test_policy": {Tok: azureResource(azureDevTest, "Policy")},
"azurerm_dev_test_schedule": {Tok: azureResource(azureDevTest, "Schedule")},
"azurerm_dev_test_virtual_network": {Tok: azureResource(azureDevTest, "VirtualNetwork")},
"azurerm_dev_test_windows_virtual_machine": {Tok: azureResource(azureDevTest, "WindowsVirtualMachine")},
// DNS
"azurerm_dns_a_record": {Tok: azureResource(azureDNS, "ARecord")},
"azurerm_dns_aaaa_record": {Tok: azureResource(azureDNS, "AaaaRecord")},
"azurerm_dns_caa_record": {Tok: azureResource(azureDNS, "CaaRecord")},
"azurerm_dns_cname_record": {Tok: azureResource(azureDNS, "CNameRecord")},
"azurerm_dns_mx_record": {Tok: azureResource(azureDNS, "MxRecord")},
"azurerm_dns_ns_record": {
Tok: azureResource(azureDNS, "NsRecord"),
Fields: map[string]*tfbridge.SchemaInfo{
// We need this explicit mapping to avoid automatic singularization when converting Pulumi
// to Terraform name, so that the deprecated singular property isn't picked up.
"records": {Name: "records"},
},
},
"azurerm_dns_ptr_record": {Tok: azureResource(azureDNS, "PtrRecord")},
"azurerm_dns_srv_record": {Tok: azureResource(azureDNS, "SrvRecord")},
"azurerm_dns_txt_record": {Tok: azureResource(azureDNS, "TxtRecord")},
"azurerm_dns_zone": {Tok: azureResource(azureDNS, "Zone")},
// HDInsights
"azurerm_hdinsight_hadoop_cluster": {Tok: azureResource(azureHdInsight, "HadoopCluster")},
"azurerm_hdinsight_hbase_cluster": {Tok: azureResource(azureHdInsight, "HBaseCluster")},
"azurerm_hdinsight_interactive_query_cluster": {Tok: azureResource(azureHdInsight, "InteractiveQueryCluster")},
"azurerm_hdinsight_kafka_cluster": {Tok: azureResource(azureHdInsight, "KafkaCluster")},
"azurerm_hdinsight_ml_services_cluster": {Tok: azureResource(azureHdInsight, "MLServicesCluster")},
"azurerm_hdinsight_rserver_cluster": {Tok: azureResource(azureHdInsight, "RServerCluster")},
"azurerm_hdinsight_spark_cluster": {Tok: azureResource(azureHdInsight, "SparkCluster")},
"azurerm_hdinsight_storm_cluster": {Tok: azureResource(azureHdInsight, "StormCluster")},
// EventHub
"azurerm_eventhub": {Tok: azureResource(azureEventHub, "EventHub")},
"azurerm_eventhub_namespace": {Tok: azureResource(azureEventHub, "EventHubNamespace")},
"azurerm_eventhub_namespace_authorization_rule": {Tok: azureResource(azureEventHub, "EventHubNamespaceAuthorizationRule")},
"azurerm_eventhub_namespace_disaster_recovery_config": {
Tok: azureResource(azureEventHub, "EventhubNamespaceDisasterRecoveryConfig"),
},
// IoT Resources
"azurerm_iothub": {Tok: azureResource(azureIot, "IoTHub"),
Docs: &tfbridge.DocInfo{
Source: "iothub.html.markdown",
},
},
"azurerm_iothub_consumer_group": {Tok: azureResource(azureIot, "ConsumerGroup")},
"azurerm_iothub_shared_access_policy": {Tok: azureResource(azureIot, "SharedAccessPolicy")},
"azurerm_iothub_endpoint_eventhub": {Tok: azureResource(azureIot, "EndpointEventhub")},
"azurerm_iothub_endpoint_servicebus_queue": {Tok: azureResource(azureIot, "EndpointServicebusQueue")},
"azurerm_iothub_endpoint_servicebus_topic": {Tok: azureResource(azureIot, "EndpointServicebusTopic")},
"azurerm_iothub_endpoint_storage_container": {Tok: azureResource(azureIot, "EndpointStorageContainer")},
"azurerm_iothub_route": {Tok: azureResource(azureIot, "Route")},
"azurerm_iothub_dps": {Tok: azureResource(azureIot, "IotHubDps")},
"azurerm_iothub_dps_certificate": {Tok: azureResource(azureIot, "IotHubCertificate")},
"azurerm_iothub_fallback_route": {Tok: azureResource(azureIot, "FallbackRoute")},
"azurerm_iothub_dps_shared_access_policy": {Tok: azureResource(azureIot, "DpsSharedAccessPolicy")},
// KeyVault
"azurerm_key_vault": {Tok: azureResource(azureKeyVault, "KeyVault")},
"azurerm_key_vault_access_policy": {Tok: azureResource(azureKeyVault, "AccessPolicy")},
"azurerm_key_vault_key": {Tok: azureResource(azureKeyVault, "Key")},
"azurerm_key_vault_secret": {Tok: azureResource(azureKeyVault, "Secret")},
// LoadBalancer
"azurerm_lb": {
Tok: azureResource(azureLB, "LoadBalancer"),
Docs: &tfbridge.DocInfo{
Source: "loadbalancer.html.markdown",
},
Fields: map[string]*tfbridge.SchemaInfo{
// Max length of a Load Balancer is 80.
// Source: https://docs.microsoft.com/en-us/azure/architecture/best-practices/naming-conventions#networking
azureName: tfbridge.AutoNameWithCustomOptions(azureName, tfbridge.AutoNameOptions{
Separator: "",
Maxlen: 80,
Randlen: 8,
Transform: func(name string) string {
return strings.ToLower(name)
},
}),
},
},
"azurerm_lb_backend_address_pool": {Tok: azureResource(azureLB, "BackendAddressPool"),
Docs: &tfbridge.DocInfo{
Source: "loadbalancer_backend_address_pool.html.markdown",
},
},
"azurerm_lb_nat_rule": {Tok: azureResource(azureLB, "NatRule"),
Docs: &tfbridge.DocInfo{
Source: "loadbalancer_nat_rule.html.markdown",
},
},
"azurerm_lb_nat_pool": {Tok: azureResource(azureLB, "NatPool"),
Docs: &tfbridge.DocInfo{
Source: "loadbalancer_nat_pool.html.markdown",
}},
"azurerm_lb_outbound_rule": {Tok: azureResource(azureLB, "OutboundRule"),
Docs: &tfbridge.DocInfo{
Source: "loadbalancer_outbound_rule.html.markdown",
},
},
"azurerm_lb_probe": {Tok: azureResource(azureLB, "Probe"),
Docs: &tfbridge.DocInfo{
Source: "loadbalancer_probe.html.markdown",
},
},
"azurerm_lb_rule": {Tok: azureResource(azureLB, "Rule"),
Docs: &tfbridge.DocInfo{
Source: "loadbalancer_rule.html.markdown",
},
},
// Log Analytics
"azurerm_log_analytics_linked_service": {Tok: azureResource(azureLogAnalytics, "LinkedService")},
"azurerm_log_analytics_datasource_windows_event": {
Tok: azureResource(azureLogAnalytics, "DataSourceWindowsEvent"),
},
"azurerm_log_analytics_datasource_windows_performance_counter": {
Tok: azureResource(azureLogAnalytics, "DataSourceWindowsPerformanceCounter"),
},
// Logic Apps
"azurerm_logic_app_action_custom": {Tok: azureResource(azureLogicApps, "ActionCustom")},
"azurerm_logic_app_action_http": {Tok: azureResource(azureLogicApps, "ActionHttp")},
"azurerm_logic_app_trigger_custom": {Tok: azureResource(azureLogicApps, "TriggerCustom")},
"azurerm_logic_app_trigger_http_request": {Tok: azureResource(azureLogicApps, "TriggerHttpRequest")},
"azurerm_logic_app_trigger_recurrence": {Tok: azureResource(azureLogicApps, "TriggerRecurrence")},
"azurerm_logic_app_workflow": {Tok: azureResource(azureLogicApps, "Workflow")},
// MariaDB
"azurerm_mariadb_configuration": {Tok: azureResource(azureMariaDB, "Configuration")},
"azurerm_mariadb_database": {Tok: azureResource(azureMariaDB, "Database")},
"azurerm_mariadb_firewall_rule": {Tok: azureResource(azureMariaDB, "FirewallRule")},
"azurerm_mariadb_server": {Tok: azureResource(azureMariaDB, "Server")},
"azurerm_mariadb_virtual_network_rule": {Tok: azureResource(azureMariaDB, "VirtualNetworkRule")},
// Notification Hub
"azurerm_notification_hub": {Tok: azureResource(azureNotificationHub, "Hub")},
"azurerm_notification_hub_authorization_rule": {Tok: azureResource(azureNotificationHub, "AuthorizationRule")},
"azurerm_notification_hub_namespace": {Tok: azureResource(azureNotificationHub, "Namespace")},
// Operational Insights
"azurerm_log_analytics_workspace": {Tok: azureResource(azureOperationalInsights, "AnalyticsWorkspace")},
"azurerm_log_analytics_solution": {Tok: azureResource(azureOperationalInsights, "AnalyticsSolution"),
Docs: &tfbridge.DocInfo{
Source: "log_analytics_solution.html.markdown",
}},
// CosmosDB
"azurerm_cosmosdb_account": {Tok: azureResource(azureCosmosDB, "Account")},
"azurerm_cosmosdb_cassandra_keyspace": {Tok: azureResource(azureCosmosDB, "CassandraKeyspace")},
"azurerm_cosmosdb_mongo_collection": {Tok: azureResource(azureCosmosDB, "MongoCollection")},
"azurerm_cosmosdb_mongo_database": {Tok: azureResource(azureCosmosDB, "MongoDatabase")},
"azurerm_cosmosdb_sql_container": {Tok: azureResource(azureCosmosDB, "SqlContainer")},
"azurerm_cosmosdb_sql_database": {Tok: azureResource(azureCosmosDB, "SqlDatabase")},
"azurerm_cosmosdb_table": {Tok: azureResource(azureCosmosDB, "Table")},
"azurerm_cosmosdb_gremlin_database": {Tok: azureResource(azureCosmosDB, "GremlinDatabase")},
"azurerm_cosmosdb_gremlin_graph": {Tok: azureResource(azureCosmosDB, "GremlinGraph")},
// Cost Management
"azurerm_cost_management_export_resource_group": {
Tok: azureResource(azureCostManagement, "ResourceGroupExport"),
},
// Maps
"azurerm_maps_account": {Tok: azureResource(azureMaps, "Account")},
// Media Services
"azurerm_media_services_account": {Tok: azureResource(azureMediaServices, "Account")},
// Monitoring resources
"azurerm_monitor_action_group": {Tok: azureResource(azureMonitoring, "ActionGroup")},
"azurerm_monitor_activity_log_alert": {Tok: azureResource(azureMonitoring, "ActivityLogAlert")},
"azurerm_monitor_autoscale_setting": {Tok: azureResource(azureMonitoring, "AutoscaleSetting")},
"azurerm_monitor_diagnostic_setting": {Tok: azureResource(azureMonitoring, "DiagnosticSetting")},
"azurerm_monitor_log_profile": {Tok: azureResource(azureMonitoring, "LogProfile")},
"azurerm_monitor_metric_alert": {Tok: azureResource(azureMonitoring, "MetricAlert")},
"azurerm_monitor_scheduled_query_rules_alert": {Tok: azureResource(azureMonitoring, "ScheduledQueryRulesAlert")},
"azurerm_monitor_scheduled_query_rules_log": {Tok: azureResource(azureMonitoring, "ScheduledQueryRulesLog")},
// MS SQL
"azurerm_mssql_elasticpool": {Tok: azureResource(azureMSSQL, "ElasticPool")},
"azurerm_mssql_database_vulnerability_assessment_rule_baseline": {
Tok: azureResource(azureMSSQL, "DatabaseVulnerabilityAssessmentRuleBaseline"),
},
"azurerm_mssql_server_vulnerability_assessment": {
Tok: azureResource(azureMSSQL, "ServerVulnerabilityAssessment"),
},
"azurerm_mssql_server_security_alert_policy": {
Tok: azureResource(azureMSSQL, "ServerSecurityAlertPolicy"),
},
"azurerm_mssql_database": {Tok: azureResource(azureMSSQL, "Database")},
"azurerm_mssql_virtual_machine": {Tok: azureResource(azureMSSQL, "VirtualMachine")},
"azurerm_mssql_server": {Tok: azureResource(azureMSSQL, "Server")},
// MySQL
"azurerm_mysql_configuration": {Tok: azureResource(azureMySQL, "Configuration")},
"azurerm_mysql_database": {Tok: azureResource(azureMySQL, "Database")},
"azurerm_mysql_firewall_rule": {Tok: azureResource(azureMySQL, "FirewallRule")},
"azurerm_mysql_server": {Tok: azureResource(azureMySQL, "Server")},
"azurerm_mysql_virtual_network_rule": {Tok: azureResource(azureMySQL, "VirtualNetworkRule")},
// Postgress SQL
"azurerm_postgresql_configuration": {Tok: azureResource(azurePostgresql, "Configuration")},
"azurerm_postgresql_database": {Tok: azureResource(azurePostgresql, "Database")},
"azurerm_postgresql_firewall_rule": {Tok: azureResource(azurePostgresql, "FirewallRule")},
"azurerm_postgresql_server": {Tok: azureResource(azurePostgresql, "Server")},
"azurerm_postgresql_virtual_network_rule": {Tok: azureResource(azurePostgresql, "VirtualNetworkRule")},
// Policy
"azurerm_policy_assignment": {Tok: azureResource(azurePolicy, "Assignment")},
"azurerm_policy_definition": {Tok: azureResource(azurePolicy, "Definition")},
"azurerm_policy_set_definition": {Tok: azureResource(azurePolicy, "PolicySetDefinition")},
"azurerm_policy_remediation": {Tok: azureResource(azurePolicy, "Remediation")},
// Private Dns
"azurerm_private_dns_a_record": {Tok: azureResource(azurePrivateDNS, "ARecord")},
"azurerm_private_dns_cname_record": {Tok: azureResource(azurePrivateDNS, "CnameRecord")},
"azurerm_private_dns_zone": {Tok: azureResource(azurePrivateDNS, "Zone")},
"azurerm_private_dns_zone_virtual_network_link": {
Tok: azureResource(azurePrivateDNS, "ZoneVirtualNetworkLink"),
},
"azurerm_private_dns_aaaa_record": {
Tok: azureResource(azurePrivateDNS, "AAAARecord"),
},
"azurerm_private_dns_ptr_record": {
Tok: azureResource(azurePrivateDNS, "PTRRecord"),
},
"azurerm_private_dns_srv_record": {
Tok: azureResource(azurePrivateDNS, "SRVRecord"),
},
"azurerm_private_link_service": {
Tok: azureResource(azurePrivateDNS, "LinkService"),
},
"azurerm_private_dns_mx_record": {
Tok: azureResource(azurePrivateDNS, "MxRecord"),
},
"azurerm_private_dns_txt_record": {
Tok: azureResource(azurePrivateDNS, "TxtRecord"),
},
// SQL
"azurerm_sql_elasticpool": {Tok: azureResource(azureSQL, "ElasticPool")},
"azurerm_sql_database": {Tok: azureResource(azureSQL, "Database")},
"azurerm_sql_failover_group": {Tok: azureResource(azureSQL, "FailoverGroup")},
"azurerm_sql_firewall_rule": {Tok: azureResource(azureSQL, "FirewallRule")},
"azurerm_sql_server": {Tok: azureResource(azureSQL, "SqlServer")},
"azurerm_sql_virtual_network_rule": {Tok: azureResource(azureSQL, "VirtualNetworkRule"),
Docs: &tfbridge.DocInfo{
Source: "sql_virtual_network_rule.html.markdown",
},
},
"azurerm_sql_active_directory_administrator": {Tok: azureResource(azureSQL, "ActiveDirectoryAdministrator")},
// Network
"azurerm_virtual_network": {
Tok: azureResource(azureNetwork, "VirtualNetwork"),
Fields: map[string]*tfbridge.SchemaInfo{
// Max length of a Virtual Network is 64.
// Source: https://docs.microsoft.com/en-us/azure/architecture/best-practices/naming-conventions#networking
azureName: tfbridge.AutoNameWithCustomOptions(azureName, tfbridge.AutoNameOptions{
Separator: "",
Maxlen: 64,
Randlen: 8,
Transform: func(name string) string {
return strings.ToLower(name)
},
}),
},
},
"azurerm_virtual_wan": {Tok: azureResource(azureNetwork, "VirtualWan")},
"azurerm_virtual_network_peering": {Tok: azureResource(azureNetwork, "VirtualNetworkPeering")},
"azurerm_virtual_network_gateway": {Tok: azureResource(azureNetwork, "VirtualNetworkGateway")},
"azurerm_virtual_network_gateway_connection": {Tok: azureResource(azureNetwork, "VirtualNetworkGatewayConnection")},
"azurerm_local_network_gateway": {Tok: azureResource(azureNetwork, "LocalNetworkGateway")},
"azurerm_application_gateway": {
Tok: azureResource(azureNetwork, "ApplicationGateway"),
Fields: map[string]*tfbridge.SchemaInfo{
// Max length of an Application Gateway is 80.
// Source: https://docs.microsoft.com/en-us/azure/architecture/best-practices/naming-conventions#networking
azureName: tfbridge.AutoNameWithCustomOptions(azureName, tfbridge.AutoNameOptions{
Separator: "",
Maxlen: 80,
Randlen: 8,
Transform: func(name string) string {
return strings.ToLower(name)
},
}),
},
},
"azurerm_application_security_group": {Tok: azureResource(azureNetwork, "ApplicationSecurityGroup")},
"azurerm_firewall": {Tok: azureResource(azureNetwork, "Firewall")},
"azurerm_firewall_application_rule_collection": {Tok: azureResource(azureNetwork, "FirewallApplicationRuleCollection")},
"azurerm_firewall_nat_rule_collection": {Tok: azureResource(azureNetwork, "FirewallNatRuleCollection")},
"azurerm_firewall_network_rule_collection": {Tok: azureResource(azureNetwork, "FirewallNetworkRuleCollection")},
"azurerm_network_connection_monitor": {Tok: azureResource(azureNetwork, "NetworkConnectionMonitor")},
"azurerm_network_ddos_protection_plan": {Tok: azureResource(azureNetwork, "DdosProtectionPlan")},
"azurerm_network_interface": {
Tok: azureResource(azureNetwork, "NetworkInterface"),
Fields: map[string]*tfbridge.SchemaInfo{
// Max length of a Network Interface is 80.
// Source: https://docs.microsoft.com/en-us/azure/architecture/best-practices/naming-conventions#networking
azureName: tfbridge.AutoNameWithCustomOptions(azureName, tfbridge.AutoNameOptions{
Separator: "",
Maxlen: 80,
Randlen: 8,
Transform: func(name string) string {
return strings.ToLower(name)
},
}),
},
},
"azurerm_network_interface_application_gateway_backend_address_pool_association": {Tok: azureResource(azureNetwork, "NetworkInterfaceApplicationGatewayBackendAddressPoolAssociation")},
"azurerm_network_interface_application_security_group_association": {Tok: azureResource(azureNetwork, "NetworkInterfaceApplicationSecurityGroupAssociation")},
"azurerm_network_interface_backend_address_pool_association": {Tok: azureResource(azureNetwork, "NetworkInterfaceBackendAddressPoolAssociation")},
"azurerm_network_interface_nat_rule_association": {Tok: azureResource(azureNetwork, "NetworkInterfaceNatRuleAssociation")},
"azurerm_network_interface_security_group_association": {Tok: azureResource(azureNetwork, "NetworkInterfaceSecurityGroupAssociation")},
"azurerm_network_packet_capture": {Tok: azureResource(azureNetwork, "NetworkPacketCapture")},
"azurerm_network_profile": {Tok: azureResource(azureNetwork, "Profile")},
"azurerm_network_security_group": {
Tok: azureResource(azureNetwork, "NetworkSecurityGroup"),
Fields: map[string]*tfbridge.SchemaInfo{
// Max length of a Network Security Group is 80.
// Source: https://docs.microsoft.com/en-us/azure/architecture/best-practices/naming-conventions#networking
azureName: tfbridge.AutoNameWithCustomOptions(azureName, tfbridge.AutoNameOptions{
Separator: "",
Maxlen: 80,
Randlen: 8,
Transform: func(name string) string {
return strings.ToLower(name)
},
}),
},
},
"azurerm_network_security_rule": {
Tok: azureResource(azureNetwork, "NetworkSecurityRule"),
Fields: map[string]*tfbridge.SchemaInfo{
// Max length of a Network Security Rule is 80.
// Source: https://docs.microsoft.com/en-us/azure/architecture/best-practices/naming-conventions#networking
azureName: tfbridge.AutoNameWithCustomOptions(azureName, tfbridge.AutoNameOptions{
Separator: "",
Maxlen: 80,
Randlen: 8,
Transform: func(name string) string {
return strings.ToLower(name)
},
}),
},
},
"azurerm_network_watcher": {Tok: azureResource(azureNetwork, "NetworkWatcher")},
"azurerm_network_watcher_flow_log": {Tok: azureResource(azureNetwork, "NetworkWatcherFlowLog")},
"azurerm_packet_capture": {Tok: azureResource(azureNetwork, "PacketCapture")},
"azurerm_public_ip": {
Tok: azureResource(azureNetwork, "PublicIp"),
Fields: map[string]*tfbridge.SchemaInfo{
// Max length of a Public IP Address is 80.
// Source: https://docs.microsoft.com/en-us/azure/architecture/best-practices/naming-conventions#networking
azureName: tfbridge.AutoNameWithCustomOptions(azureName, tfbridge.AutoNameOptions{
Separator: "",
Maxlen: 80,
Randlen: 8,
Transform: func(name string) string {
return strings.ToLower(name)
},
}),
},
},
"azurerm_public_ip_prefix": {
Tok: azureResource(azureNetwork, "PublicIpPrefix"),
Fields: map[string]*tfbridge.SchemaInfo{
// Ensure "sku" is a singleton
"sku": {Name: "sku", MaxItemsOne: boolRef(true)},
},
},
"azurerm_route": {Tok: azureResource(azureNetwork, "Route")},
"azurerm_route_table": {Tok: azureResource(azureNetwork, "RouteTable")},
"azurerm_subnet": {
Tok: azureResource(azureNetwork, "Subnet"),
Fields: map[string]*tfbridge.SchemaInfo{
// Max length of a Subnet is 80.
// Source: https://docs.microsoft.com/en-us/azure/architecture/best-practices/naming-conventions#networking
azureName: tfbridge.AutoNameWithCustomOptions(azureName, tfbridge.AutoNameOptions{
Separator: "",
Maxlen: 80,
Randlen: 8,
Transform: func(name string) string {
return strings.ToLower(name)
},
}),
},
},
"azurerm_subnet_network_security_group_association": {Tok: azureResource(azureNetwork, "SubnetNetworkSecurityGroupAssociation")},
"azurerm_subnet_route_table_association": {Tok: azureResource(azureNetwork, "SubnetRouteTableAssociation")},
"azurerm_express_route_circuit": {Tok: azureResource(azureNetwork, "ExpressRouteCircuit")},
"azurerm_express_route_circuit_authorization": {Tok: azureResource(azureNetwork, "ExpressRouteCircuitAuthorization"),
Docs: &tfbridge.DocInfo{
Source: "express_route_circuit_authorization.html.markdown",
},
},
"azurerm_express_route_circuit_peering": {Tok: azureResource(azureNetwork, "ExpressRouteCircuitPeering"),
Docs: &tfbridge.DocInfo{
Source: "express_route_circuit_authorization.html.markdown",
},
},
"azurerm_express_route_gateway": {Tok: azureResource(azureNetwork, "ExpressRouteGateway")},
"azurerm_nat_gateway": {Tok: azureResource(azureNetwork, "NatGateway")},
"azurerm_subnet_nat_gateway_association": {Tok: azureResource(azureNetwork, "SubnetNatGatewayAssociation")},
"azurerm_point_to_site_vpn_gateway": {Tok: azureResource(azureNetwork, "PointToPointVpnGateway")},
"azurerm_virtual_hub": {Tok: azureResource(azureNetwork, "VirtualHub")},
"azurerm_virtual_hub_connection": {Tok: azureResource(azureNetwork, "VirtualHubConnection")},
"azurerm_vpn_gateway": {Tok: azureResource(azureNetwork, "VpnGateway")},
"azurerm_vpn_server_configuration": {Tok: azureResource(azureNetwork, "VpnServerConfiguration")},
// Redis
"azurerm_redis_cache": {Tok: azureResource(azureRedis, "Cache")},
"azurerm_redis_firewall_rule": {Tok: azureResource(azureRedis, "FirewallRule")},
// Relay
"azurerm_relay_namespace": {Tok: azureResource(azureRelay, "Namespace")},
"azurerm_relay_hybrid_connection": {Tok: azureResource(azureRelay, "HybridConnection")},
// Security Center
"azurerm_security_center_contact": {Tok: azureResource(azureSecurityCenter, "Contact")},
"azurerm_security_center_subscription_pricing": {Tok: azureResource(azureSecurityCenter, "SubscriptionPricing")},
"azurerm_security_center_workspace": {Tok: azureResource(azureSecurityCenter, "Workspace")},
"azurerm_advanced_threat_protection": {Tok: azureResource(azureSecurityCenter, "AdvancedThreatProtection")},
// Service Fabric
"azurerm_service_fabric_cluster": {Tok: azureResource(azureServiceFabric, "Cluster")},
// Search
"azurerm_search_service": {Tok: azureResource(azureSearch, "Service")},
// SignalR
"azurerm_signalr_service": {Tok: azureResource(azureSignalr, "Service")},
// Storage
"azurerm_storage_account": {
Tok: azureResource(azureStorage, "Account"),
Fields: map[string]*tfbridge.SchemaInfo{
"static_website": {
Name: "staticWebsite",
Elem: &tfbridge.SchemaInfo{
Fields: map[string]*tfbridge.SchemaInfo{
// By default, this gets reverse-mapped to `error404_document` by the
// bridge's naming logic, so we force it to the correct mapping here.
"error_404_document": {
Name: "error404Document",
},
},
},
},
},
},
"azurerm_storage_blob": {
Tok: azureResource(azureStorage, "Blob"),
Fields: map[string]*tfbridge.SchemaInfo{
// https://docs.microsoft.com/en-us/azure/architecture/best-practices/naming-conventions#storage
// Max length of a container name is 1024.
azureName: tfbridge.AutoNameWithCustomOptions(azureName, tfbridge.AutoNameOptions{
Separator: "",
Maxlen: 1024,
Randlen: 8,
Transform: func(name string) string {
return strings.ToLower(name)
},
}),
"source": {
Asset: &tfbridge.AssetTranslation{
Kind: tfbridge.FileAsset,
},
},
}},
"azurerm_storage_container": {
Tok: azureResource(azureStorage, "Container"),
Fields: map[string]*tfbridge.SchemaInfo{
// https://docs.microsoft.com/en-us/azure/architecture/best-practices/naming-conventions#storage
// Max length of a container name is 63.
azureName: tfbridge.AutoNameWithCustomOptions(azureName, tfbridge.AutoNameOptions{
Separator: "",
Maxlen: 63,
Randlen: 8,
Transform: func(name string) string {
return strings.ToLower(name)
},
}),
}},
"azurerm_storage_share": {Tok: azureResource(azureStorage, "Share")},
"azurerm_storage_share_directory": {Tok: azureResource(azureStorage, "ShareDirectory")},
"azurerm_storage_queue": {
Tok: azureResource(azureStorage, "Queue"),
Fields: map[string]*tfbridge.SchemaInfo{
// https://docs.microsoft.com/en-us/azure/architecture/best-practices/naming-conventions#storage
// Max length of a queue name is 63.
azureName: tfbridge.AutoNameWithCustomOptions(azureName, tfbridge.AutoNameOptions{
Separator: "",
Maxlen: 63,
Randlen: 8,
Transform: func(name string) string {
return strings.ToLower(name)
},
}),
}},
"azurerm_storage_table": {
Tok: azureResource(azureStorage, "Table"),
Fields: map[string]*tfbridge.SchemaInfo{
// https://docs.microsoft.com/en-us/rest/api/storageservices/Understanding-the-Table-Service-Data-Model#table-names
// Max length of a table name is 63.
azureName: tfbridge.AutoNameWithCustomOptions(azureName, tfbridge.AutoNameOptions{
Separator: "",
Maxlen: 63,
Randlen: 8,
Transform: func(name string) string {
return strings.ToLower(name)
},
}),
}},
"azurerm_storage_table_entity": {
Tok: azureResource(azureStorage, "TableEntity"),
},
"azurerm_storage_data_lake_gen2_filesystem": {Tok: azureResource(azureStorage, "DataLakeGen2Filesystem")},
"azurerm_storage_management_policy": {Tok: azureResource(azureStorage, "ManagementPolicy")},
"azurerm_storage_account_network_rules": {Tok: azureResource(azureStorage, "AccountNetworkRules")},
"azurerm_storage_account_customer_managed_key": {Tok: azureResource(azureStorage, "CustomerManagedKey")},
//StreamAnalytics
"azurerm_stream_analytics_function_javascript_udf": {Tok: azureResource(azureStreamAnalytics, "FunctionJavaScriptUDF")},
"azurerm_stream_analytics_job": {Tok: azureResource(azureStreamAnalytics, "Job")},
"azurerm_stream_analytics_output_blob": {Tok: azureResource(azureStreamAnalytics, "OutputBlob")},
"azurerm_stream_analytics_output_eventhub": {Tok: azureResource(azureStreamAnalytics, "OutputEventHub")},
"azurerm_stream_analytics_output_mssql": {Tok: azureResource(azureStreamAnalytics, "OutputMssql")},
"azurerm_stream_analytics_output_servicebus_queue": {Tok: azureResource(azureStreamAnalytics, "OutputServiceBusQueue")},
"azurerm_stream_analytics_stream_input_blob": {Tok: azureResource(azureStreamAnalytics, "StreamInputBlob")},
"azurerm_stream_analytics_stream_input_eventhub": {Tok: azureResource(azureStreamAnalytics, "StreamInputEventHub")},
"azurerm_stream_analytics_stream_input_iothub": {Tok: azureResource(azureStreamAnalytics, "StreamInputIotHub")},
"azurerm_stream_analytics_output_servicebus_topic": {Tok: azureResource(azureStreamAnalytics, "OutputServicebusTopic")},
"azurerm_stream_analytics_reference_input_blob": {Tok: azureResource(azureStreamAnalytics, "ReferenceInputBlob")},
// Marketplace
"azurerm_marketplace_agreement": {Tok: azureResource(azureMarketPlace, "Agreement")},
// Kusto
"azurerm_kusto_cluster": {Tok: azureResource(azureKusto, "Cluster")},
"azurerm_kusto_database": {Tok: azureResource(azureKusto, "Database")},
"azurerm_kusto_eventhub_data_connection": {Tok: azureResource(azureKusto, "EventhubDataConnection")},
"azurerm_kusto_database_principal": {Tok: azureResource(azureKusto, "DatabasePrincipal")},
// Frontdoor
"azurerm_frontdoor": {
Tok: azureResource(azureFrontdoor, "Frontdoor"),
Docs: &tfbridge.DocInfo{
Source: "front_door.html.markdown",
},
},
"azurerm_frontdoor_firewall_policy": {
Tok: azureResource(azureFrontdoor, "FirewallPolicy"),
Docs: &tfbridge.DocInfo{
Source: "front_door_firewall_policy.html.markdown",
},
},
// Bot
"azurerm_bot_channels_registration": {Tok: azureResource(azureBot, "ChannelsRegistration")},
"azurerm_bot_connection": {Tok: azureResource(azureBot, "Connection")},
"azurerm_bot_channel_email": {Tok: azureResource(azureBot, "ChannelEmail")},
"azurerm_bot_channel_slack": {Tok: azureResource(azureBot, "ChannelSlack")},
"azurerm_bot_web_app": {Tok: azureResource(azureBot, "WebApp")},
"azurerm_bot_channel_ms_teams": {Tok: azureResource(azureBot, "ChannelTeams")},
"azurerm_bot_channel_directline": {Tok: azureResource(azureBot, "ChannelDirectLine")},
// Proximity
"azurerm_proximity_placement_group": {Tok: azureResource(azureProximity, "PlacementGroup")},
// WAF
"azurerm_web_application_firewall_policy": {Tok: azureResource(azureWaf, "Policy")},
// Dashboard
"azurerm_dashboard": {Tok: azureResource(azureDashboard, "Dashboard")},
// Healthcare
"azurerm_healthcare_service": {Tok: azureResource(azureHealthcare, "Service")},
// NetApp
"azurerm_netapp_account": {Tok: azureResource(azureNetapp, "Account")},
"azurerm_netapp_pool": {Tok: azureResource(azureNetapp, "Pool")},
"azurerm_netapp_volume": {Tok: azureResource(azureNetapp, "Volume")},
"azurerm_netapp_snapshot": {Tok: azureResource(azureNetapp, "Snapshot")},
//AppConfiguration
"azurerm_app_configuration": {Tok: azureResource(azureAppConfiguration, "ConfigurationStore")},
// Backup
"azurerm_backup_container_storage_account": {Tok: azureResource(azureBackup, "ContainerStorageAccount")},
"azurerm_backup_policy_file_share": {Tok: azureResource(azureBackup, "PolicyFileShare")},
"azurerm_backup_protected_file_share": {Tok: azureResource(azureBackup, "ProtectedFileShare")},
"azurerm_backup_policy_vm": {Tok: azureResource(azureBackup, "PolicyVM")},
"azurerm_backup_protected_vm": {Tok: azureResource(azureBackup, "ProtectedVM")},
// Private Link
"azurerm_private_endpoint": {Tok: azureResource(azurePrivateLink, "Endpoint")},
// SiteRecovery
"azurerm_site_recovery_fabric": {Tok: azureResource(azureSiteRecovery, "Fabric")},
"azurerm_site_recovery_network_mapping": {Tok: azureResource(azureSiteRecovery, "NetworkMapping")},
"azurerm_site_recovery_protection_container": {Tok: azureResource(azureSiteRecovery, "ProtectionContainer")},
"azurerm_site_recovery_replicated_vm": {Tok: azureResource(azureSiteRecovery, "ReplicatedVM")},
"azurerm_site_recovery_replication_policy": {Tok: azureResource(azureSiteRecovery, "ReplicationPolicy")},
"azurerm_site_recovery_protection_container_mapping": {
Tok: azureResource(azureSiteRecovery, "ProtectionContainerMapping"),
},
"azurerm_recovery_services_vault": {Tok: azureResource(azureRecoveryServices, "Vault")},
// Database Migration
"azurerm_database_migration_project": {Tok: azureResource(azureDatabaseMigration, "Project")},
"azurerm_database_migration_service": {Tok: azureResource(azureDatabaseMigration, "Service")},
// IoT Central
"azurerm_iotcentral_application": {Tok: azureResource(azureIotCentral, "Application")},
// HPC
"azurerm_hpc_cache": {Tok: azureResource(azureHpc, "Cache")},
"azurerm_hpc_cache_blob_target": {Tok: azureResource(azureHpc, "CacheBlobTarget")},
"azurerm_hpc_cache_nfs_target": {Tok: azureResource(azureHpc, "CacheNfsTarget")},
// Mixed Reality
"azurerm_spatial_anchors_account": {Tok: azureResource(azureMixedReality, "SpatialAnchorsAccount")},
// PowerBI
"azurerm_powerbi_embedded": {Tok: azureResource(azurePowerBi, "Embedded")},
// Machine Learning
"azurerm_machine_learning_workspace": {Tok: azureResource(azureMachineLearning, "Workspace")},
// Managed Applications
"azurerm_managed_application": {Tok: azureResource(azureManagedApplication, "Application")},
"azurerm_managed_application_definition": {Tok: azureResource(azureManagedApplication, "Definition")},
// Maintenance
"azurerm_maintenance_configuration": {Tok: azureResource(azureMaintenance, "Configuration")},
// Servicebus
"azurerm_servicebus_namespace_network_rule_set": {
Tok: azureResource(azureServiceBus, "NamespaceNetworkRuleSet"),
},
// Sentinel
"azurerm_sentinel_alert_rule_ms_security_incident": {
Tok: azureResource(azureSentinel, "AlertRuleMsSecurityIncident"),
},
"azurerm_sentinel_alert_rule_scheduled": {
Tok: azureResource(azureSentinel, "AlertRuleScheduled"),
},
},
DataSources: map[string]*tfbridge.DataSourceInfo{
"azurerm_application_insights": {Tok: azureDataSource(azureAppInsights, "getInsights")},
"azurerm_api_management": {
Tok: azureDataSource(azureAPIManagement, "getService"),
Fields: map[string]*tfbridge.SchemaInfo{
// Ensure "sku" is a singleton
"sku": {Name: "sku", MaxItemsOne: boolRef(true)},
},
},
"azurerm_api_management_api": {Tok: azureDataSource(azureAPIManagement, "getApi")},
"azurerm_api_management_group": {Tok: azureDataSource(azureAPIManagement, "getGroup")},
"azurerm_api_management_product": {Tok: azureDataSource(azureAPIManagement, "getProduct")},
"azurerm_api_management_user": {Tok: azureDataSource(azureAPIManagement, "getUser")},
"azurerm_api_management_api_version_set": {Tok: azureDataSource(azureAPIManagement, "getApiVersionSet")},
"azurerm_app_service": {
Tok: azureDataSource(azureAppService, "getAppService"),
Fields: map[string]*tfbridge.SchemaInfo{
// Ensure "sku" is a singleton
"sku": {Name: "sku", MaxItemsOne: boolRef(true)},
},
},
"azurerm_app_service_plan": {
Tok: azureDataSource(azureAppService, "getAppServicePlan"),
Fields: map[string]*tfbridge.SchemaInfo{
// Ensure "sku" is a singleton
"sku": {Name: "sku", MaxItemsOne: boolRef(true)},
},
},
"azurerm_automation_variable_bool": {Tok: azureDataSource(azureAutomation, "getBoolVariable")},
"azurerm_automation_variable_datetime": {Tok: azureDataSource(azureAutomation, "getDateTimeVariable")},
"azurerm_automation_variable_int": {Tok: azureDataSource(azureAutomation, "getIntVariable")},
"azurerm_automation_variable_string": {Tok: azureDataSource(azureAutomation, "getStringVariable")},
"azurerm_automation_account": {Tok: azureDataSource(azureAutomation, "getAccount")},
"azurerm_availability_set": {Tok: azureDataSource(azureCompute, "getAvailabilitySet")},
"azurerm_batch_account": {Tok: azureDataSource(azureBatch, "getAccount")},
"azurerm_batch_certificate": {Tok: azureDataSource(azureBatch, "getCertificate")},
"azurerm_batch_pool": {Tok: azureDataSource(azureBatch, "getPool")},
"azurerm_subscriptions": {Tok: azureDataSource(azureCore, "getSubscriptions")},
"azurerm_cdn_profile": {Tok: azureDataSource(azureCDN, "getProfile")},
"azurerm_client_config": {Tok: azureDataSource(azureCore, "getClientConfig")},
"azurerm_container_registry": {Tok: azureDataSource(azureContainerService, "getRegistry")},
"azurerm_cosmosdb_account": {Tok: azureDataSource(azureCosmosDB, "getAccount")},
"azurerm_data_lake_store": {Tok: azureDataSource(azureDatalake, "getStore")},
"azurerm_data_share_account": {Tok: azureDataSource(azureDataShare, "getAccount")},
"azurerm_dev_test_lab": {Tok: azureDataSource(azureDevTest, "getLab")},
"azurerm_dev_test_virtual_network": {Tok: azureDataSource(azureDevTest, "getVirtualNetwork")},
"azurerm_image": {Tok: azureDataSource(azureCompute, "getImage")},
"azurerm_shared_image": {Tok: azureDataSource(azureCompute, "getSharedImage")},
"azurerm_shared_image_gallery": {Tok: azureDataSource(azureCompute, "getSharedImageGallery")},
"azurerm_shared_image_version": {Tok: azureDataSource(azureCompute, "getSharedImageVersion")},
"azurerm_lb": {
Tok: azureDataSource(azureLB, "getLB"),
Docs: &tfbridge.DocInfo{
Source: "loadbalancer.html.markdown",
},
},
"azurerm_lb_backend_address_pool": {
Tok: azureDataSource(azureLB, "getBackendAddressPool"),
Docs: &tfbridge.DocInfo{
Source: "loadbalancer_backend_address_pool.html.markdown",
},
},
"azurerm_log_analytics_workspace": {
Tok: azureDataSource(azureOperationalInsights, "getAnalyticsWorkspace"),
},
"azurerm_logic_app_workflow": {Tok: azureDataSource(azureLogicApps, "getWorkflow")},
"azurerm_maps_account": {Tok: azureDataSource(azureMaps, "getAccount")},
"azurerm_monitor_action_group": {Tok: azureDataSource(azureMonitoring, "getActionGroup")},
"azurerm_monitor_diagnostic_categories": {
Tok: azureDataSource(azureMonitoring, "getDiagnosticCategories"),
},
"azurerm_monitor_log_profile": {Tok: azureDataSource(azureMonitoring, "getLogProfile")},
"azurerm_monitor_scheduled_query_rules_alert": {
Tok: azureDataSource(azureMonitoring, "getScheduledQueryRulesAlert"),
},
"azurerm_monitor_scheduled_query_rules_log": {
Tok: azureDataSource(azureMonitoring, "getScheduledQueryRulesLog"),
},
"azurerm_mssql_elasticpool": {Tok: azureDataSource(azureMSSQL, "getElasticPool")},
"azurerm_mssql_database": {Tok: azureDataSource(azureMSSQL, "getDatabase")},
"azurerm_dns_zone": {Tok: azureDataSource(azureDNS, "getZone")},
"azurerm_key_vault": {
Tok: azureDataSource(azureKeyVault, "getKeyVault"),
Fields: map[string]*tfbridge.SchemaInfo{
// Ensure "sku" is a singleton
"sku": {Name: "sku", MaxItemsOne: boolRef(true)},
},
},
"azurerm_key_vault_access_policy": {Tok: azureDataSource(azureKeyVault, "getAccessPolicy")},
"azurerm_key_vault_key": {Tok: azureDataSource(azureKeyVault, "getKey")},
"azurerm_key_vault_secret": {Tok: azureDataSource(azureKeyVault, "getSecret")},
"azurerm_kubernetes_cluster": {Tok: azureDataSource(azureContainerService, "getKubernetesCluster")},
"azurerm_kubernetes_service_versions": {Tok: azureDataSource(azureContainerService, "getKubernetesServiceVersions")},
"azurerm_notification_hub": {Tok: azureDataSource(azureNotificationHub, "getHub")},
"azurerm_notification_hub_namespace": {
Tok: azureDataSource(azureNotificationHub, "getNamespace"),
Fields: map[string]*tfbridge.SchemaInfo{
// Ensure "sku" is a singleton
"sku": {Name: "sku", MaxItemsOne: boolRef(true)},
},
},
"azurerm_virtual_network": {
Tok: azureDataSource(azureNetwork, "getVirtualNetwork"),
Fields: map[string]*tfbridge.SchemaInfo{
// Explicitly map addressSpace => addressSpaces to avoid confusion
// with addressSpaces => addressSpacesCollection below.
"address_space": {Name: "addressSpaces"},
// Conflicts with the pluralized `addressSpaces` property. Since address
// spaces is deprectaed upstream and we will pluralize address_space, consumers
// should not be broken but this will avoid duplicate field definitions.
"address_spaces": {Name: "addressSpacesCollection"},
},
},
"azurerm_virtual_network_gateway": {Tok: azureDataSource(azureNetwork, "getVirtualNetworkGateway")},
"azurerm_network_security_group": {Tok: azureDataSource(azureNetwork, "getNetworkSecurityGroup")},
"azurerm_network_interface": {Tok: azureDataSource(azureNetwork, "getNetworkInterface")},
"azurerm_network_watcher": {Tok: azureDataSource(azureNetwork, "getNetworkWatcher")},
"azurerm_public_ip": {Tok: azureDataSource(azureNetwork, "getPublicIP")},
"azurerm_public_ips": {Tok: azureDataSource(azureNetwork, "getPublicIPs")},
"azurerm_public_ip_prefix": {Tok: azureDataSource(azureNetwork, "getPublicIpPrefix")},
"azurerm_application_security_group": {Tok: azureDataSource(azureNetwork, "getApplicationSecurityGroup")},
"azurerm_redis_cache": {Tok: azureDataSource(azureRedis, "getCache")},
"azurerm_resource_group": {Tok: azureDataSource(azureCore, "getResourceGroup")},
"azurerm_snapshot": {Tok: azureDataSource(azureCompute, "getSnapshot")},
"azurerm_subnet": {Tok: azureDataSource(azureNetwork, "getSubnet")},
"azurerm_route_table": {Tok: azureDataSource(azureNetwork, "getRouteTable")},
"azurerm_network_ddos_protection_plan": {Tok: azureDataSource(azureNetwork, "getNetworkDdosProtectionPlan")},
"azurerm_network_service_tags": {Tok: azureDataSource(azureNetwork, "getServiceTags")},
"azurerm_express_route_circuit": {
Tok: azureDataSource(azureNetwork, "getExpressRouteCircuit"),
Fields: map[string]*tfbridge.SchemaInfo{
// Ensure "sku" is a singleton
"sku": {Name: "sku", MaxItemsOne: boolRef(true)},
},
},
"azurerm_sql_server": {Tok: azureDataSource(azureSQL, "getServer")},
"azurerm_sql_database": {Tok: azureDataSource(azureSQL, "getDatabase")},
"azurerm_virtual_network_gateway_connection": {Tok: azureDataSource(azureNetwork, "getGatewayConnection")},
"azurerm_firewall": {Tok: azureDataSource(azureNetwork, "getFirewall")},
"azurerm_subscription": {Tok: azureDataSource(azureCore, "getSubscription")},
"azurerm_policy_definition": {Tok: azureDataSource(azurePolicy, "getPolicyDefintion")},
"azurerm_policy_set_definition": {Tok: azureDataSource(azurePolicy, "getPolicySetDefinition")},
"azurerm_platform_image": {Tok: azureDataSource(azureCompute, "getPlatformImage")},
"azurerm_managed_disk": {Tok: azureDataSource(azureCompute, "getManagedDisk")},
"azurerm_shared_image_versions": {Tok: azureDataSource(azureCompute, "getSharedImageVersions")},
"azurerm_backup_policy_vm": {Tok: azureDataSource(azureBackup, "getPolicyVM")},
"azurerm_storage_account": {Tok: azureDataSource(azureStorage, "getAccount")},
"azurerm_storage_account_sas": {Tok: azureDataSource(azureStorage, "getAccountSAS")},
"azurerm_storage_account_blob_container_sas": {Tok: azureDataSource(azureStorage, "getAccountBlobContainerSAS")},
"azurerm_storage_management_policy": {Tok: azureDataSource(azureStorage, "getPolicy")},
"azurerm_virtual_machine": {Tok: azureDataSource(azureCompute, "getVirtualMachine")},
"azurerm_hdinsight_cluster": {Tok: azureDataSource(azureHdInsight, "getCluster")},
"azurerm_stream_analytics_job": {Tok: azureDataSource(azureStreamAnalytics, "getJob")},
"azurerm_proximity_placement_group": {Tok: azureDataSource(azureProximity, "getPlacementGroup")},
"azurerm_servicebus_namespace_authorization_rule": {Tok: azureDataSource(azureServiceBus, "getNamespaceAuthorizationRule")},
"azurerm_app_service_certificate": {Tok: azureDataSource(azureAppService, "getCertificate")},
"azurerm_app_service_certificate_order": {Tok: azureDataSource(azureAppService, "getCertificateOrder")},
"azurerm_data_factory": {Tok: azureDataSource(azureDataFactory, "getFactory")},
"azurerm_healthcare_service": {Tok: azureDataSource(azureHealthcare, "getService")},
"azurerm_postgresql_server": {Tok: azureDataSource(azurePostgresql, "getServer")},
"azurerm_resources": {Tok: azureDataSource(azureCore, "getResources")},
"azurerm_netapp_account": {Tok: azureDataSource(azureNetapp, "getAccount")},
"azurerm_netapp_pool": {Tok: azureDataSource(azureNetapp, "getPool")},
"azurerm_netapp_volume": {Tok: azureDataSource(azureNetapp, "getVolume")},
"azurerm_netapp_snapshot": {Tok: azureDataSource(azureNetapp, "getSnapshot")},
"azurerm_private_link_service": {Tok: azureDataSource(azurePrivateLink, "getService")},
"azurerm_private_endpoint_connection": {Tok: azureDataSource(azurePrivateLink, "getEndpointConnection")},
"azurerm_private_link_service_endpoint_connections": {
Tok: azureDataSource(azurePrivateLink, "getServiceEndpointConnections"),
},
"azurerm_nat_gateway": {Tok: azureDataSource(azureNetwork, "getNatGateway")},
"azurerm_virtual_hub": {Tok: azureDataSource(azureNetwork, "getVirtualHub")},
"azurerm_signalr_service": {Tok: azureDataSource(azureSignalr, "getService")},
"azurerm_storage_container": {Tok: azureDataSource(azureStorage, "getStorageContainer")},
"azurerm_iothub_shared_access_policy": {Tok: azureDataSource(azureIot, "getSharedAccessPolicy")},
"azurerm_iothub_dps": {Tok: azureDataSource(azureIot, "getDps")},
"azurerm_eventgrid_topic": {Tok: azureDataSource(azureEventGrid, "getTopic")},
"azurerm_disk_encryption_set": {Tok: azureDataSource(azureCompute, "getDiskEncryptionSet")},
"azurerm_dedicated_host_group": {Tok: azureDataSource(azureCompute, "getDedicatedHostGroup")},
"azurerm_dedicated_host": {Tok: azureDataSource(azureCompute, "getDedicatedHost")},
"azurerm_mariadb_server": {Tok: azureDataSource(azureMariaDB, "getMariaDbServer")},
"azurerm_eventhub_namespace_authorization_rule": {
Tok: azureDataSource(azureEventHub, "getNamespaceAuthorizationRule"),
},
"azurerm_eventhub_authorization_rule": {Tok: azureDataSource(azureEventHub, "getAuthorizationRule")},
"azurerm_eventhub_consumer_group": {Tok: azureDataSource(azureEventHub, "getConsumeGroup")},
"azurerm_function_app": {Tok: azureDataSource(azureAppService, "getFunctionApp")},
"azurerm_app_service_environment": {Tok: azureDataSource(azureAppService, "getAppServiceEnvironment")},
"azurerm_iothub_dps_shared_access_policy": {Tok: azureDataSource(azureIot, "getDpsSharedAccessPolicy")},
"azurerm_recovery_services_vault": {Tok: azureDataSource(azureRecoveryServices, "getVault")},
"azurerm_database_migration_project": {Tok: azureDataSource(azureDatabaseMigration, "getProject")},
"azurerm_database_migration_service": {Tok: azureDataSource(azureDatabaseMigration, "getService")},
"azurerm_kusto_cluster": {Tok: azureDataSource(azureKusto, "getCluster")},
"azurerm_servicebus_topic_authorization_rule": {
Tok: azureDataSource(azureServiceBus, "getTopicAuthorizationRule"),
},
"azurerm_app_configuration": {
Tok: azureDataSource(azureAppConfiguration, "getConfigurationStore"),
},
"azurerm_machine_learning_workspace": {Tok: azureDataSource(azureMachineLearning, "getWorkspace")},
"azurerm_managed_application_definition": {Tok: azureDataSource(azureManagedApplication, "getDefinition")},
"azurerm_spring_cloud_service": {Tok: azureDataSource(azureAppPlatform, "getSpringCloudService")},
"azurerm_private_dns_zone": {Tok: azureDataSource(azurePrivateDNS, "getDnsZone")},
"azurerm_sentinel_alert_rule": {Tok: azureDataSource(azureSentinel, "getAlertRule")},
"azurerm_maintenance_configuration": {Tok: azureDataSource(azureMaintenance, "getConfiguration")},
},
JavaScript: &tfbridge.JavaScriptInfo{
DevDependencies: map[string]string{
"@types/node": "^8.0.0", // so we can access strongly typed node definitions.
},
Dependencies: map[string]string{
"@pulumi/pulumi": "^2.0.0",
"azure-eventgrid": "^1.6.0",
"@azure/functions": "^1.0.3",
"@azure/ms-rest-azure-js": "^2.0.1",
"@azure/ms-rest-nodeauth": "^3.0.0",
"azure-functions-ts-essentials": "^1.3.2",
"moment": "2.24.0",
},
Overlay: &tfbridge.OverlayInfo{
Files: []string{},
DestFiles: []string{
"location.ts",
"util.ts",
},
Modules: map[string]*tfbridge.OverlayInfo{
"appservice": {
DestFiles: []string{
"kind.ts",
"zMixins.ts",
"zMixins_durable.ts",
"zMixins_http.ts",
"zMixins_timer.ts",
},
},
"core": {
DestFiles: []string{
"zMixins.ts",
},
},
"cosmosdb": {
DestFiles: []string{
"zMixins.ts",
},
},
"eventgrid": {
DestFiles: []string{
"zMixins.ts",
},
},
"eventhub": {
DestFiles: []string{
"zMixins.ts",
},
},
"iot": {
DestFiles: []string{
"zMixins.ts",
},
},
"servicebus": {
DestFiles: []string{
"zMixins.ts",
},
},
"storage": {
DestFiles: []string{
"zMixins.ts",
},
},
},
},
},
Python: &tfbridge.PythonInfo{
Requires: map[string]string{
"pulumi": ">=2.0.0,<3.0.0",
},
},
CSharp: &tfbridge.CSharpInfo{
PackageReferences: map[string]string{
"Pulumi": "2.*",
"System.Collections.Immutable": "1.6.0",
},
Overlay: &tfbridge.OverlayInfo{
Modules: map[string]*tfbridge.OverlayInfo{
"Storage": {
DestFiles: []string{
"SharedAccessSignature.cs",
},
},
},
},
Namespaces: namespaceMap,
},
}
// New Authorization Mod - this combines the old MSI and Role Modules
prov.RenameResourceWithAlias("azurerm_role_assignment", azureResource(azureLegacyRole, "Assignment"),
azureResource(azureAuthorization, "Assignment"), azureLegacyRole, azureAuthorization, &tfbridge.ResourceInfo{
Fields: map[string]*tfbridge.SchemaInfo{
// Suppress auto-naming of this field. It is autonamed to a GUID in the underlying provider.
azureName: {Name: azureName},
},
})
prov.RenameResourceWithAlias("azurerm_role_definition", azureResource(azureLegacyRole, "Definition"),
azureResource(azureAuthorization, "RoleDefinition"), azureLegacyRole, azureAuthorization, nil)
prov.RenameResourceWithAlias("azurerm_user_assigned_identity", azureResource(azureLegacyMSI, "UserAssignedIdentity"),
azureResource(azureAuthorization, "UserAssignedIdentity"), azureLegacyMSI, azureAuthorization, &tfbridge.ResourceInfo{
Docs: &tfbridge.DocInfo{
Source: "user_assigned_identity.markdown",
},
})
prov.RenameDataSource("azurerm_role_definition", azureDataSource(azureLegacyRole, "getRoleDefinition"),
azureDataSource(azureAuthorization, "getRoleDefinition"), azureLegacyRole, azureAuthorization, &tfbridge.DataSourceInfo{
Docs: &tfbridge.DocInfo{
Source: "role_definition.markdown",
},
})
prov.RenameDataSource("azurerm_user_assigned_identity", azureDataSource(azureCore, "getUserAssignedIdentity"),
azureDataSource(azureAuthorization, "getUserAssignedIdentity"), azureCore, azureAuthorization, nil)
// Migrate azureLegacyManagementGroups -> azureManagement
prov.RenameResourceWithAlias("azurerm_management_group", azureResource(azureLegacyManagementGroups, "ManagementGroup"),
azureResource(azureManagement, "Group"), azureLegacyManagementGroups, azureManagement, nil)
prov.RenameDataSource("azurerm_management_group", azureDataSource(azureLegacyManagementGroups, "getManagementGroup"),
azureDataSource(azureManagement, "getGroup"), azureLegacyManagementGroups, azureManagement, nil)
// Migrate azureLegacyMgmtResource -> azureManagement
prov.RenameResourceWithAlias("azurerm_management_lock", azureResource(azureLegacyMgmtResource, "ManangementLock"),
azureResource(azureManagement, "Lock"), azureLegacyMgmtResource, azureManagement, nil)
// Migrate `azurerm_event_grid_*` to new EventGrid Mod
prov.RenameResourceWithAlias("azurerm_eventgrid_domain", azureResource(azureEventHub, "Domain"),
azureResource(azureEventGrid, "Domain"), azureEventHub, azureEventGrid, nil)
prov.RenameResourceWithAlias("azurerm_eventgrid_event_subscription", azureResource(azureEventHub, "EventSubscription"),
azureResource(azureEventGrid, "EventSubscription"), azureEventHub, azureEventGrid, nil)
prov.RenameResourceWithAlias("azurerm_eventgrid_topic", azureResource(azureEventHub, "EventGridTopic"),
azureResource(azureEventGrid, "Topic"), azureEventHub, azureEventGrid, nil)
// Migrate `azurerm_servicebus_*` to new ServiceBus Mod
prov.RenameResourceWithAlias("azurerm_servicebus_namespace", azureResource(azureEventHub, "Namespace"),
azureResource(azureServiceBus, "Namespace"), azureEventHub, azureServiceBus, &tfbridge.ResourceInfo{
Fields: map[string]*tfbridge.SchemaInfo{
// https://docs.microsoft.com/en-us/rest/api/servicebus/create-namespace
// Max length of a servicehub namespace is 50.
azureName: tfbridge.AutoNameWithCustomOptions(azureName, tfbridge.AutoNameOptions{
Separator: "",
Maxlen: 50,
Randlen: 8,
Transform: func(name string) string {
return strings.ToLower(name)
},
}),
},
})
prov.RenameResourceWithAlias("azurerm_servicebus_namespace_authorization_rule",
azureResource(azureEventHub, "NamespaceAuthorizationRule"),
azureResource(azureServiceBus, "NamespaceAuthorizationRule"),
azureEventHub, azureServiceBus, nil)
prov.RenameResourceWithAlias("azurerm_servicebus_queue", azureResource(azureEventHub, "Queue"),
azureResource(azureServiceBus, "Queue"), azureEventHub, azureServiceBus, &tfbridge.ResourceInfo{
Fields: map[string]*tfbridge.SchemaInfo{
// https://groups.google.com/forum/#!topic/particularsoftware/XuHp_8wZ09o
// Max length of a servicehub queue is 260.
azureName: tfbridge.AutoNameWithCustomOptions(azureName, tfbridge.AutoNameOptions{
Separator: "",
Maxlen: 260,
Randlen: 8,
Transform: func(name string) string {
return strings.ToLower(name)
},
}),
},
})
prov.RenameResourceWithAlias("azurerm_servicebus_queue_authorization_rule",
azureResource(azureEventHub, "QueueAuthorizationRule"), azureResource(azureServiceBus, "QueueAuthorizationRule"),
azureEventHub, azureServiceBus, nil)
prov.RenameResourceWithAlias("azurerm_servicebus_subscription",
azureResource(azureEventHub, "Subscription"), azureResource(azureServiceBus, "Subscription"),
azureEventHub, azureServiceBus, nil)
prov.RenameResourceWithAlias("azurerm_servicebus_subscription_rule",
azureResource(azureEventHub, "SubscriptionRule"), azureResource(azureServiceBus, "SubscriptionRule"),
azureEventHub, azureServiceBus, nil)
prov.RenameResourceWithAlias("azurerm_servicebus_topic",
azureResource(azureEventHub, "Topic"), azureResource(azureServiceBus, "Topic"),
azureEventHub, azureServiceBus, &tfbridge.ResourceInfo{
Fields: map[string]*tfbridge.SchemaInfo{
// https://groups.google.com/forum/#!topic/particularsoftware/XuHp_8wZ09o
// Max length of a servicehub topic is 260.
azureName: tfbridge.AutoNameWithCustomOptions(azureName, tfbridge.AutoNameOptions{
Separator: "",
Maxlen: 260,
Randlen: 8,
Transform: func(name string) string {
return strings.ToLower(name)
},
}),
},
})
prov.RenameResourceWithAlias("azurerm_servicebus_topic_authorization_rule",
azureResource(azureEventHub, "TopicAuthorizationRule"), azureResource(azureServiceBus, "TopicAuthorizationRule"),
azureEventHub, azureServiceBus, nil)
prov.RenameDataSource("azurerm_servicebus_namespace",
azureDataSource(azureEventHub, "getServiceBusNamespace"), azureDataSource(azureServiceBus, "getNamespace"),
azureEventHub, azureServiceBus, nil)
// Rename Eventhub Resources
prov.RenameResourceWithAlias("azurerm_eventhub_authorization_rule",
azureResource(azureEventHub, "EventHubAuthorizationRule"), azureResource(azureEventHub, "AuthorizationRule"),
azureEventHub, azureEventHub, nil)
prov.RenameResourceWithAlias("azurerm_eventhub_consumer_group",
azureResource(azureEventHub, "EventHubConsumerGroup"), azureResource(azureEventHub, "ConsumerGroup"),
azureEventHub, azureEventHub, nil)
prov.RenameDataSource("azurerm_eventhub_namespace",
azureDataSource(azureEventHub, "getEventhubNamespace"), azureDataSource(azureEventHub, "getNamespace"),
azureEventHub, azureEventHub, nil)
// Migrate `azurerm_traffic_manager_*` to network module
prov.RenameResourceWithAlias("azurerm_traffic_manager_endpoint",
azureResource(azureLegacyTrafficManager, "Endpoint"), azureResource(azureNetwork, "TrafficManagerEndpoint"),
azureLegacyTrafficManager, azureNetwork, nil)
prov.RenameResourceWithAlias("azurerm_traffic_manager_profile",
azureResource(azureLegacyTrafficManager, "Profile"), azureResource(azureNetwork, "TrafficManagerProfile"),
azureLegacyTrafficManager, azureNetwork, &tfbridge.ResourceInfo{
Fields: map[string]*tfbridge.SchemaInfo{
// Max length of a Traffic Manager Profile is 80.
// Source: https://docs.microsoft.com/en-us/azure/architecture/best-practices/naming-conventions#networking
azureName: tfbridge.AutoNameWithCustomOptions(azureName, tfbridge.AutoNameOptions{
Separator: "",
Maxlen: 80,
Randlen: 8,
Transform: func(name string) string {
return strings.ToLower(name)
},
}),
}})
prov.RenameDataSource("azurerm_traffic_manager_geographical_location",
azureDataSource(azureLegacyTrafficManager, "getGeographicalLocation"),
azureDataSource(azureNetwork, "getTrafficManager"), azureLegacyTrafficManager, azureNetwork, nil)
// Fix the spelling on the KeyVault Certificate
prov.RenameResourceWithAlias("azurerm_key_vault_certificate",
azureResource(azureKeyVault, "Certifiate"), azureResource(azureKeyVault, "Certificate"),
azureKeyVault, azureKeyVault, &tfbridge.ResourceInfo{
Fields: map[string]*tfbridge.SchemaInfo{
"certificate": {
CSharpName: "KeyVaultCertificate",
},
}})
// Fix the spelling of ContainerService Webook to Webhook
prov.RenameResourceWithAlias("azurerm_container_registry_webhook",
azureResource(azureContainerService, "RegistryWebook"),
azureResource(azureContainerService, "RegistryWebhook"), azureContainerService, azureContainerService, nil)
// Deprecated, remove in 3.0.
prov.P.ResourcesMap["azurerm_storage_zipblob"] = prov.P.ResourcesMap["azurerm_storage_blob"]
prov.Resources["azurerm_storage_zipblob"] = &tfbridge.ResourceInfo{
Tok: azureResource(azureStorage, "ZipBlob"),
DeprecationMessage: "ZipBlob resource is deprecated in the 2.0 version of the provider. Use Blob resource instead.",
Fields: map[string]*tfbridge.SchemaInfo{
"source": {
Name: "content",
Asset: &tfbridge.AssetTranslation{
Kind: tfbridge.FileArchive,
Format: resource.ZIPArchive,
},
},
// https://docs.microsoft.com/en-us/azure/architecture/best-practices/naming-conventions#storage
// Max length of a container name is 1024.
azureName: tfbridge.AutoNameWithCustomOptions(azureName, tfbridge.AutoNameOptions{
Separator: "",
Maxlen: 1024,
Randlen: 8,
Transform: func(name string) string {
return strings.ToLower(name)
},
}),
},
}
// Provide default values for certain resource properties, to improve usability:
// 1) For all resources with `name` properties, we will add an auto-name property. Make sure to skip those
// that already have a name mapping entry, since those may have custom overrides set above (e.g., for length).
// 2) For all resources with `location` properties, default to the resource group's location to which the
// resource belongs. This ensures that each resource doesn't need to be given a location explicitly.
rgRegionMap := make(map[string]string)
for resname, res := range prov.Resources {
if schema := p.ResourcesMap[resname]; schema != nil {
// Only apply automatic values for input properties (Optional || Required) named `name`
if tfs, has := schema.Schema[azureName]; has && (tfs.Optional || tfs.Required) {
if _, hasfield := res.Fields[azureName]; !hasfield {
if res.Fields == nil {
res.Fields = make(map[string]*tfbridge.SchemaInfo)
}
// Use conservative options that apply broadly for Azure. See
// https://docs.microsoft.com/en-us/azure/architecture/best-practices/naming-conventions for
// details.
res.Fields[azureName] = tfbridge.AutoNameWithCustomOptions(azureName, tfbridge.AutoNameOptions{
Separator: "",
Maxlen: 24,
Randlen: 8,
Transform: func(name string) string {
return strings.ToLower(name)
},
})
}
}
if tfs, has := schema.Schema[azureLocation]; has && (tfs.Optional || tfs.Required) {
if _, hasfield := res.Fields[azureLocation]; !hasfield {
if res.Fields == nil {
res.Fields = make(map[string]*tfbridge.SchemaInfo)
}
res.Fields[azureLocation] = &tfbridge.SchemaInfo{
Name: azureLocation,
Default: &tfbridge.DefaultInfo{
From: func(res *tfbridge.PulumiResource) (interface{}, error) {
// In here we will fetch the resource group property from this resource and
// use it to query the Azure API and return the resource group's location. We
// maintain a little cache to avoid querying the API too many times. Note that
// it's possible (likely) during previews that the location will be unknown, so
// we special logic to propagate likewise unknown location values.
if rg, has := res.Properties["resourceGroupName"]; has {
if rg.IsComputed() || rg.IsOutput() {
return tfbridge.TerraformUnknownVariableValue, nil
}
if rg.IsString() {
rgName := rg.StringValue()
rgRegion, has := rgRegionMap[rgName]
if !has {
rgRes := p.ResourcesMap["azurerm_resource_group"]
contract.Assert(rgRes != nil)
rgData := rgRes.Data(&terraform.InstanceState{
// Mock up a URI with the relevant pieces, so that we can read back
// the resource group's location information.
ID: fmt.Sprintf("/subscriptions/_/resourceGroups/%s", rg.StringValue()),
})
if err := rgRes.Read(rgData, p.Meta()); err != nil {
return nil, err
}
if rgData.Id() == "" {
rgRegion = tfbridge.TerraformUnknownVariableValue
} else {
rgRegion = azure.NormalizeLocation(rgData.Get("location"))
}
rgRegionMap[rgName] = rgRegion // memoize the value.
}
return rgRegion, nil
}
}
return nil, nil
},
},
}
}
}
}
}
return prov
}
|
[
"\"MSI_ENDPOINT\""
] |
[] |
[
"MSI_ENDPOINT"
] |
[]
|
["MSI_ENDPOINT"]
|
go
| 1 | 0 | |
script/cloud_images_segmentation_utillity_script.py
|
# Dependencies
import os
import cv2
import math
import random
import shutil
import warnings
warnings.simplefilter('ignore')
import numpy as np
import pandas as pd
import seaborn as sns
import multiprocessing as mp
import albumentations as albu
import matplotlib.pyplot as plt
from tensorflow import set_random_seed
from sklearn.model_selection import train_test_split
from keras import optimizers
from keras import backend as K
from keras.utils import Sequence
from keras.losses import binary_crossentropy
from keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint, Callback, CSVLogger
# Required repositories
#os.system('pip install segmentation-models')
#os.system('pip install keras-rectified-adam')
from keras_radam import RAdam
import segmentation_models as sm
# Misc
def seed_everything(seed=0):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
set_random_seed(seed)
# Segmentation related
def rle_decode(mask_rle, shape=(1400, 2100)):
s = mask_rle.split()
starts, lengths = [np.asarray(x, dtype=int) for x in (s[0:][::2], s[1:][::2])]
starts -= 1
ends = starts + lengths
img = np.zeros(shape[0]*shape[1], dtype=np.uint8)
for lo, hi in zip(starts, ends):
img[lo:hi] = 1
return img.reshape(shape, order='F') # Needed to align to RLE direction
def rle_to_mask(rle_string, height, width):
rows, cols = height, width
if rle_string == -1:
return np.zeros((height, width))
else:
rle_numbers = [int(num_string) for num_string in rle_string.split(' ')]
rle_pairs = np.array(rle_numbers).reshape(-1,2)
img = np.zeros(rows*cols, dtype=np.uint8)
for index, length in rle_pairs:
index -= 1
img[index:index+length] = 255
img = img.reshape(cols,rows)
img = img.T
return img
def get_mask_area(df, index, column_name, shape=(1400, 2100)):
rle = df.loc[index][column_name]
try:
math.isnan(rle)
np_mask = np.zeros((shape[0], shape[1], 3))
except:
np_mask = rle_to_mask(rle, shape[0], shape[1])
np_mask = np.clip(np_mask, 0, 1)
return int(np.sum(np_mask))
def np_resize(img, input_shape):
"""
Reshape a numpy array, which is input_shape=(height, width),
as opposed to input_shape=(width, height) for cv2
"""
height, width = input_shape
return cv2.resize(img, (width, height))
def mask2rle(img):
'''
img: numpy array, 1 - mask, 0 - background
Returns run length as string formated
'''
pixels= img.T.flatten()
pixels = np.concatenate([[0], pixels, [0]])
runs = np.where(pixels[1:] != pixels[:-1])[0] + 1
runs[1::2] -= runs[::2]
return ' '.join(str(x) for x in runs)
def build_rles(masks, reshape=None):
width, height, depth = masks.shape
rles = []
for i in range(depth):
mask = masks[:, :, i]
if reshape:
mask = mask.astype(np.float32)
mask = np_resize(mask, reshape).astype(np.int64)
rle = mask2rle(mask)
rles.append(rle)
return rles
def build_masks(rles, input_shape, reshape=None):
depth = len(rles)
if reshape is None:
masks = np.zeros((*input_shape, depth))
else:
masks = np.zeros((*reshape, depth))
for i, rle in enumerate(rles):
if type(rle) is str:
if reshape is None:
masks[:, :, i] = rle2mask(rle, input_shape)
else:
mask = rle2mask(rle, input_shape)
reshaped_mask = np_resize(mask, reshape)
masks[:, :, i] = reshaped_mask
return masks
def rle2mask(rle, input_shape):
width, height = input_shape[:2]
mask = np.zeros( width*height ).astype(np.uint8)
array = np.asarray([int(x) for x in rle.split()])
starts = array[0::2]
lengths = array[1::2]
current_position = 0
for index, start in enumerate(starts):
mask[int(start):int(start+lengths[index])] = 1
current_position += lengths[index]
return mask.reshape(height, width).T
def dice_coefficient(y_true, y_pred):
y_true = np.asarray(y_true).astype(np.bool)
y_pred = np.asarray(y_pred).astype(np.bool)
intersection = np.logical_and(y_true, y_pred)
return (2. * intersection.sum()) / (y_true.sum() + y_pred.sum())
def dice_coef(y_true, y_pred, smooth=1):
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f)
return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
# Data pre-process
def preprocess_image(image_id, base_path, save_path, HEIGHT, WIDTH):
image = cv2.imread(base_path + image_id)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = cv2.resize(image, (WIDTH, HEIGHT))
cv2.imwrite(save_path + image_id, image)
def pre_process_set(df, preprocess_fn):
#n_cpu = mp.cpu_count()
#df_n_cnt = df.shape[0]//n_cpu
#pool = mp.Pool(n_cpu)
#
#dfs = [df.iloc[df_n_cnt*i:df_n_cnt*(i+1)] for i in range(n_cpu)]
#dfs[-1] = df.iloc[df_n_cnt*(n_cpu-1):]
#res = pool.map(preprocess_fn, [x_df for x_df in dfs])
#pool.close()
res = preprocess_fn(df)
# def preprocess_data(df, HEIGHT=HEIGHT, WIDTH=WIDTH):
# df = df.reset_index()
# for i in range(df.shape[0]):
# item = df.iloc[i]
# image_id = item['image']
# item_set = item['set']
# if item_set == 'train':
# preprocess_image(image_id, train_base_path, train_images_dest_path, HEIGHT, WIDTH)
# if item_set == 'validation':
# preprocess_image(image_id, train_base_path, validation_images_dest_path, HEIGHT, WIDTH)
# if item_set == 'test':
# preprocess_image(image_id, test_base_path, test_images_dest_path, HEIGHT, WIDTH)
# Model evaluation
def plot_metrics(history, metric_list=['loss', 'dice_coef'], figsize=(22, 14)):
fig, axes = plt.subplots(len(metric_list), 1, sharex='col', figsize=(22, len(metric_list)*4))
axes = axes.flatten()
for index, metric in enumerate(metric_list):
axes[index].plot(history[metric], label='Train %s' % metric)
axes[index].plot(history['val_%s' % metric], label='Validation %s' % metric)
axes[index].legend(loc='best')
axes[index].set_title(metric)
plt.xlabel('Epochs')
sns.despine()
plt.show()
# Model post process
def post_process(probability, threshold=0.5, min_size=10000):
mask = cv2.threshold(probability, threshold, 1, cv2.THRESH_BINARY)[1]
num_component, component = cv2.connectedComponents(mask.astype(np.uint8))
predictions = np.zeros(probability.shape, np.float32)
for c in range(1, num_component):
p = (component == c)
if p.sum() > min_size:
predictions[p] = 1
return predictions
# Prediction evaluation
def get_metrics(model, target_df, df, df_images_dest_path, tresholds, min_mask_sizes, N_CLASSES=4, seed=0, preprocessing=None, set_name='Complete set'):
class_names = ['Fish', 'Flower', 'Gravel', 'Sugar']
metrics = []
for class_name in class_names:
metrics.append([class_name, 0, 0])
metrics_df = pd.DataFrame(metrics, columns=['Class', 'Dice', 'Dice Post'])
for i in range(0, df.shape[0], 300):
batch_idx = list(range(i, min(df.shape[0], i + 300)))
batch_set = df[batch_idx[0]: batch_idx[-1]+1]
ratio = len(batch_set) / len(df)
generator = DataGenerator(
directory=df_images_dest_path,
dataframe=batch_set,
target_df=target_df,
batch_size=len(batch_set),
target_size=model.input_shape[1:3],
n_channels=model.input_shape[3],
n_classes=N_CLASSES,
preprocessing=preprocessing,
seed=seed,
mode='fit',
shuffle=False)
x, y = generator.__getitem__(0)
preds = model.predict(x)
for class_index in range(N_CLASSES):
class_score = []
class_score_post = []
mask_class = y[..., class_index]
pred_class = preds[..., class_index]
for index in range(len(batch_idx)):
sample_mask = mask_class[index, ]
sample_pred = pred_class[index, ]
sample_pred_post = post_process(sample_pred, threshold=tresholds[class_index], min_size=min_mask_sizes[class_index])
if (sample_mask.sum() == 0) & (sample_pred.sum() == 0):
dice_score = 1.
else:
dice_score = dice_coefficient(sample_pred, sample_mask)
if (sample_mask.sum() == 0) & (sample_pred_post.sum() == 0):
dice_score_post = 1.
else:
dice_score_post = dice_coefficient(sample_pred_post, sample_mask)
class_score.append(dice_score)
class_score_post.append(dice_score_post)
metrics_df.loc[metrics_df['Class'] == class_names[class_index], 'Dice'] += np.mean(class_score) * ratio
metrics_df.loc[metrics_df['Class'] == class_names[class_index], 'Dice Post'] += np.mean(class_score_post) * ratio
metrics_df = metrics_df.append({'Class':set_name, 'Dice':np.mean(metrics_df['Dice'].values), 'Dice Post':np.mean(metrics_df['Dice Post'].values)}, ignore_index=True).set_index('Class')
return metrics_df
def inspect_predictions(df, image_ids, images_dest_path, pred_col=None, label_col='EncodedPixels', title_col='Image_Label', img_shape=(525, 350), figsize=(22, 6)):
if pred_col:
for sample in image_ids:
sample_df = df[df['image'] == sample]
fig, axes = plt.subplots(2, 5, figsize=figsize)
img = cv2.imread(images_dest_path + sample_df['image'].values[0])
img = cv2.resize(img, img_shape)
axes[0][0].imshow(img)
axes[1][0].imshow(img)
axes[0][0].set_title('Label', fontsize=16)
axes[1][0].set_title('Predicted', fontsize=16)
axes[0][0].axis('off')
axes[1][0].axis('off')
for i in range(4):
mask = sample_df[label_col].values[i]
try:
math.isnan(mask)
mask = np.zeros((img_shape[1], img_shape[0]))
except:
mask = rle_decode(mask)
axes[0][i+1].imshow(mask)
axes[1][i+1].imshow(rle2mask(sample_df[pred_col].values[i], img.shape))
axes[0][i+1].set_title(sample_df[title_col].values[i], fontsize=18)
axes[1][i+1].set_title(sample_df[title_col].values[i], fontsize=18)
axes[0][i+1].axis('off')
axes[1][i+1].axis('off')
plt.show()
else:
for sample in image_ids:
sample_df = df[df['image'] == sample]
fig, axes = plt.subplots(1, 5, figsize=figsize)
img = cv2.imread(images_dest_path + sample_df['image'].values[0])
img = cv2.resize(img, img_shape)
axes[0].imshow(img)
axes[0].set_title('Original', fontsize=16)
axes[0].axis('off')
for i in range(4):
axes[i+1].imshow(rle2mask(sample_df[label_col].values[i], img.shape))
axes[i+1].set_title(sample_df[title_col].values[i], fontsize=18)
axes[i+1].axis('off')
plt.show()
# Data generator
class DataGenerator(Sequence):
def __init__(self, dataframe, directory, batch_size, n_channels, target_size, n_classes,
mode='fit', target_df=None, shuffle=True, preprocessing=None, augmentation=None, seed=0):
self.batch_size = batch_size
self.dataframe = dataframe
self.mode = mode
self.directory = directory
self.target_df = target_df
self.target_size = target_size
self.n_channels = n_channels
self.n_classes = n_classes
self.shuffle = shuffle
self.augmentation = augmentation
self.preprocessing = preprocessing
self.seed = seed
self.mask_shape = (1400, 2100)
self.list_IDs = self.dataframe.index
if self.seed is not None:
np.random.seed(self.seed)
self.on_epoch_end()
def __len__(self):
return len(self.list_IDs) // self.batch_size
def __getitem__(self, index):
indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]
list_IDs_batch = [self.list_IDs[k] for k in indexes]
X = self.__generate_X(list_IDs_batch)
if self.mode == 'fit':
Y = self.__generate_Y(list_IDs_batch)
if self.augmentation:
X, Y = self.__augment_batch(X, Y)
return X, Y
elif self.mode == 'predict':
return X
def on_epoch_end(self):
self.indexes = np.arange(len(self.list_IDs))
if self.shuffle == True:
np.random.shuffle(self.indexes)
def __generate_X(self, list_IDs_batch):
X = np.empty((self.batch_size, *self.target_size, self.n_channels))
for i, ID in enumerate(list_IDs_batch):
img_name = self.dataframe['image'].loc[ID]
img_path = self.directory + img_name
img = cv2.imread(img_path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
if self.preprocessing:
img = self.preprocessing(img)
X[i,] = img
return X
def __generate_Y(self, list_IDs_batch):
Y = np.empty((self.batch_size, *self.target_size, self.n_classes), dtype=int)
for i, ID in enumerate(list_IDs_batch):
img_name = self.dataframe['image'].loc[ID]
image_df = self.target_df[self.target_df['image'] == img_name]
rles = image_df['EncodedPixels'].values
masks = build_masks(rles, input_shape=self.mask_shape, reshape=self.target_size)
Y[i, ] = masks
return Y
def __augment_batch(self, X_batch, Y_batch):
for i in range(X_batch.shape[0]):
X_batch[i, ], Y_batch[i, ] = self.__random_transform(X_batch[i, ], Y_batch[i, ])
return X_batch, Y_batch
def __random_transform(self, X, Y):
composed = self.augmentation(image=X, mask=Y)
X_aug = composed['image']
Y_aug = composed['mask']
return X_aug, Y_aug
# Learning rate schedulers
class CyclicLR(Callback):
"""This callback implements a cyclical learning rate policy (CLR).
The method cycles the learning rate between two boundaries with
some constant frequency.
# Arguments
base_lr: initial learning rate which is the
lower boundary in the cycle.
max_lr: upper boundary in the cycle. Functionally,
it defines the cycle amplitude (max_lr - base_lr).
The lr at any cycle is the sum of base_lr
and some scaling of the amplitude; therefore
max_lr may not actually be reached depending on
scaling function.
step_size: number of training iterations per
half cycle. Authors suggest setting step_size
2-8 x training iterations in epoch.
mode: one of {triangular, triangular2, exp_range}.
Default 'triangular'.
Values correspond to policies detailed above.
If scale_fn is not None, this argument is ignored.
gamma: constant in 'exp_range' scaling function:
gamma**(cycle iterations)
scale_fn: Custom scaling policy defined by a single
argument lambda function, where
0 <= scale_fn(x) <= 1 for all x >= 0.
mode paramater is ignored
scale_mode: {'cycle', 'iterations'}.
Defines whether scale_fn is evaluated on
cycle number or cycle iterations (training
iterations since start of cycle). Default is 'cycle'.
The amplitude of the cycle can be scaled on a per-iteration or
per-cycle basis.
This class has three built-in policies, as put forth in the paper.
"triangular":
A basic triangular cycle w/ no amplitude scaling.
"triangular2":
A basic triangular cycle that scales initial amplitude by half each cycle.
"exp_range":
A cycle that scales initial amplitude by gamma**(cycle iterations) at each
cycle iteration.
For more detail, please see paper.
# Example for CIFAR-10 w/ batch size 100:
```python
clr = CyclicLR(base_lr=0.001, max_lr=0.006,
step_size=2000., mode='triangular')
model.fit(X_train, Y_train, callbacks=[clr])
```
Class also supports custom scaling functions:
```python
clr_fn = lambda x: 0.5*(1+np.sin(x*np.pi/2.))
clr = CyclicLR(base_lr=0.001, max_lr=0.006,
step_size=2000., scale_fn=clr_fn,
scale_mode='cycle')
model.fit(X_train, Y_train, callbacks=[clr])
```
# References
- [Cyclical Learning Rates for Training Neural Networks](
https://arxiv.org/abs/1506.01186)
"""
def __init__(self,
base_lr=0.001,
max_lr=0.006,
step_size=2000.,
mode='triangular',
gamma=1.,
scale_fn=None,
scale_mode='cycle'):
super(CyclicLR, self).__init__()
if mode not in ['triangular', 'triangular2', 'exp_range']:
raise KeyError("mode must be one of 'triangular', ""'triangular2', or 'exp_range'")
self.base_lr = base_lr
self.max_lr = max_lr
self.step_size = step_size
self.mode = mode
self.gamma = gamma
if scale_fn is None:
if self.mode == 'triangular':
self.scale_fn = lambda x: 1.
self.scale_mode = 'cycle'
elif self.mode == 'triangular2':
self.scale_fn = lambda x: 1 / (2.**(x - 1))
self.scale_mode = 'cycle'
elif self.mode == 'exp_range':
self.scale_fn = lambda x: gamma ** x
self.scale_mode = 'iterations'
else:
self.scale_fn = scale_fn
self.scale_mode = scale_mode
self.clr_iterations = 0.
self.trn_iterations = 0.
self.history = {}
self._reset()
def _reset(self, new_base_lr=None, new_max_lr=None,
new_step_size=None):
if new_base_lr is not None:
self.base_lr = new_base_lr
if new_max_lr is not None:
self.max_lr = new_max_lr
if new_step_size is not None:
self.step_size = new_step_size
self.clr_iterations = 0.
def clr(self):
cycle = np.floor(1 + self.clr_iterations / (2 * self.step_size))
x = np.abs(self.clr_iterations / self.step_size - 2 * cycle + 1)
if self.scale_mode == 'cycle':
return self.base_lr + (self.max_lr - self.base_lr) * \
np.maximum(0, (1 - x)) * self.scale_fn(cycle)
else:
return self.base_lr + (self.max_lr - self.base_lr) * \
np.maximum(0, (1 - x)) * self.scale_fn(self.clr_iterations)
def on_train_begin(self, logs={}):
logs = logs or {}
if self.clr_iterations == 0:
K.set_value(self.model.optimizer.lr, self.base_lr)
else:
K.set_value(self.model.optimizer.lr, self.clr())
def on_batch_end(self, epoch, logs=None):
logs = logs or {}
self.trn_iterations += 1
self.clr_iterations += 1
K.set_value(self.model.optimizer.lr, self.clr())
self.history.setdefault(
'lr', []).append(
K.get_value(
self.model.optimizer.lr))
self.history.setdefault('iterations', []).append(self.trn_iterations)
for k, v in logs.items():
self.history.setdefault(k, []).append(v)
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
logs['lr'] = K.get_value(self.model.optimizer.lr)
def cosine_decay_with_warmup(global_step,
learning_rate_base,
total_steps,
warmup_learning_rate=0.0,
warmup_steps=0,
hold_base_rate_steps=0):
"""
Cosine decay schedule with warm up period.
In this schedule, the learning rate grows linearly from warmup_learning_rate
to learning_rate_base for warmup_steps, then transitions to a cosine decay
schedule.
:param global_step {int}: global step.
:param learning_rate_base {float}: base learning rate.
:param total_steps {int}: total number of training steps.
:param warmup_learning_rate {float}: initial learning rate for warm up. (default: {0.0}).
:param warmup_steps {int}: number of warmup steps. (default: {0}).
:param hold_base_rate_steps {int}: Optional number of steps to hold base learning rate before decaying. (default: {0}).
:param global_step {int}: global step.
:Returns : a float representing learning rate.
:Raises ValueError: if warmup_learning_rate is larger than learning_rate_base, or if warmup_steps is larger than total_steps.
"""
if total_steps < warmup_steps:
raise ValueError('total_steps must be larger or equal to warmup_steps.')
learning_rate = 0.5 * learning_rate_base * (1 + np.cos(
np.pi *
(global_step - warmup_steps - hold_base_rate_steps
) / float(total_steps - warmup_steps - hold_base_rate_steps)))
if hold_base_rate_steps > 0:
learning_rate = np.where(global_step > warmup_steps + hold_base_rate_steps,
learning_rate, learning_rate_base)
if warmup_steps > 0:
if learning_rate_base < warmup_learning_rate:
raise ValueError('learning_rate_base must be larger or equal to warmup_learning_rate.')
slope = (learning_rate_base - warmup_learning_rate) / warmup_steps
warmup_rate = slope * global_step + warmup_learning_rate
learning_rate = np.where(global_step < warmup_steps, warmup_rate,
learning_rate)
return np.where(global_step > total_steps, 0.0, learning_rate)
class WarmUpCosineDecayScheduler(Callback):
"""Cosine decay with warmup learning rate scheduler"""
def __init__(self,
learning_rate_base,
total_steps,
global_step_init=0,
warmup_learning_rate=0.0,
warmup_steps=0,
hold_base_rate_steps=0,
verbose=0):
"""
Constructor for cosine decay with warmup learning rate scheduler.
:param learning_rate_base {float}: base learning rate.
:param total_steps {int}: total number of training steps.
:param global_step_init {int}: initial global step, e.g. from previous checkpoint.
:param warmup_learning_rate {float}: initial learning rate for warm up. (default: {0.0}).
:param warmup_steps {int}: number of warmup steps. (default: {0}).
:param hold_base_rate_steps {int}: Optional number of steps to hold base learning rate before decaying. (default: {0}).
:param verbose {int}: quiet, 1: update messages. (default: {0}).
"""
super(WarmUpCosineDecayScheduler, self).__init__()
self.learning_rate_base = learning_rate_base
self.total_steps = total_steps
self.global_step = global_step_init
self.warmup_learning_rate = warmup_learning_rate
self.warmup_steps = warmup_steps
self.hold_base_rate_steps = hold_base_rate_steps
self.verbose = verbose
self.learning_rates = []
def on_batch_end(self, batch, logs=None):
self.global_step = self.global_step + 1
lr = K.get_value(self.model.optimizer.lr)
self.learning_rates.append(lr)
def on_batch_begin(self, batch, logs=None):
lr = cosine_decay_with_warmup(global_step=self.global_step,
learning_rate_base=self.learning_rate_base,
total_steps=self.total_steps,
warmup_learning_rate=self.warmup_learning_rate,
warmup_steps=self.warmup_steps,
hold_base_rate_steps=self.hold_base_rate_steps)
K.set_value(self.model.optimizer.lr, lr)
if self.verbose > 0:
print('\nBatch %02d: setting learning rate to %s.' % (self.global_step + 1, lr))
|
[] |
[] |
[
"PYTHONHASHSEED"
] |
[]
|
["PYTHONHASHSEED"]
|
python
| 1 | 0 | |
cloud-control-manager/cloud-driver/drivers/openstack/resources/ImageHandler.go
|
package resources
import (
"bytes"
"errors"
"fmt"
"io/ioutil"
"os"
"github.com/gophercloud/gophercloud"
"github.com/gophercloud/gophercloud/openstack/compute/v2/images"
"github.com/gophercloud/gophercloud/openstack/imageservice/v2/imagedata"
imgsvc "github.com/gophercloud/gophercloud/openstack/imageservice/v2/images"
call "github.com/cloud-barista/cb-spider/cloud-control-manager/cloud-driver/call-log"
irs "github.com/cloud-barista/cb-spider/cloud-control-manager/cloud-driver/interfaces/resources"
)
const (
Image = "IMAGE"
)
type OpenStackImageHandler struct {
Client *gophercloud.ServiceClient
ImageClient *gophercloud.ServiceClient
}
func setterImage(image images.Image) *irs.ImageInfo {
imageInfo := &irs.ImageInfo{
IId: irs.IID{
NameId: image.Name,
SystemId: image.ID,
},
Status: image.Status,
}
// 메타 정보 등록
var metadataList []irs.KeyValue
for key, val := range image.Metadata {
metadata := irs.KeyValue{
Key: key,
Value: val.(string),
}
metadataList = append(metadataList, metadata)
}
imageInfo.KeyValueList = metadataList
return imageInfo
}
func (imageHandler *OpenStackImageHandler) CreateImage(imageReqInfo irs.ImageReqInfo) (irs.ImageInfo, error) {
// log HisCall
hiscallInfo := GetCallLogScheme(imageHandler.Client.IdentityEndpoint, call.VMIMAGE, imageReqInfo.IId.NameId, "CreateImage()")
// @TODO: Image 생성 요청 파라미터 정의 필요
type ImageReqInfo struct {
Name string
ContainerFormat string
DiskFormat string
}
reqInfo := ImageReqInfo{
Name: imageReqInfo.IId.NameId,
ContainerFormat: "bare",
DiskFormat: "iso",
}
createOpts := imgsvc.CreateOpts{
Name: reqInfo.Name,
ContainerFormat: reqInfo.ContainerFormat,
DiskFormat: reqInfo.DiskFormat,
}
// Check Image file exists
rootPath := os.Getenv("CBSPIDER_ROOT")
imageFilePath := fmt.Sprintf("%s/image/%s.iso", rootPath, reqInfo.Name)
if _, err := os.Stat(imageFilePath); os.IsNotExist(err) {
createErr := errors.New(fmt.Sprintf("Image files in path %s not exist", imageFilePath))
cblogger.Error(createErr.Error())
LoggingError(hiscallInfo, createErr)
return irs.ImageInfo{}, createErr
}
// Create Image
start := call.Start()
image, err := imgsvc.Create(imageHandler.ImageClient, createOpts).Extract()
if err != nil {
cblogger.Error(err.Error())
LoggingError(hiscallInfo, err)
return irs.ImageInfo{}, err
}
LoggingInfo(hiscallInfo, start)
// Upload Image file
imageBytes, err := ioutil.ReadFile(imageFilePath)
if err != nil {
cblogger.Error(err.Error())
LoggingError(hiscallInfo, err)
return irs.ImageInfo{}, err
}
result := imagedata.Upload(imageHandler.ImageClient, image.ID, bytes.NewReader(imageBytes))
if result.Err != nil {
cblogger.Error(result.Err.Error())
LoggingError(hiscallInfo, err)
return irs.ImageInfo{}, err
}
// 생성된 Imgae 정보 리턴
mappedImageInfo := images.Image{
ID: image.ID,
Created: image.CreatedAt.String(),
MinDisk: image.MinDiskGigabytes,
MinRAM: image.MinRAMMegabytes,
Name: image.Name,
Status: string(image.Status),
Updated: image.UpdatedAt.String(),
Metadata: image.Properties,
}
imageInfo := setterImage(mappedImageInfo)
return *imageInfo, nil
}
func (imageHandler *OpenStackImageHandler) ListImage() ([]*irs.ImageInfo, error) {
// log HisCall
hiscallInfo := GetCallLogScheme(imageHandler.Client.IdentityEndpoint, call.VMIMAGE, Image, "ListImage()")
start := call.Start()
pager, err := images.ListDetail(imageHandler.Client, images.ListOpts{}).AllPages()
if err != nil {
cblogger.Error(err.Error())
LoggingError(hiscallInfo, err)
return nil, err
}
LoggingInfo(hiscallInfo, start)
imageList, err := images.ExtractImages(pager)
if err != nil {
cblogger.Error(err.Error())
LoggingError(hiscallInfo, err)
return nil, err
}
imageInfoList := make([]*irs.ImageInfo, len(imageList))
for i, img := range imageList {
imageInfo := setterImage(img)
imageInfoList[i] = imageInfo
}
return imageInfoList, nil
}
func (imageHandler *OpenStackImageHandler) GetImage(imageIID irs.IID) (irs.ImageInfo, error) {
// log HisCall
hiscallInfo := GetCallLogScheme(imageHandler.Client.IdentityEndpoint, call.VMIMAGE, imageIID.NameId, "GetImage()")
imageId, err := imageHandler.IDFromName(imageHandler.Client, imageIID.NameId)
if err != nil {
cblogger.Error(err.Error())
LoggingError(hiscallInfo, err)
return irs.ImageInfo{}, err
}
start := call.Start()
image, err := images.Get(imageHandler.Client, imageId).Extract()
if err != nil {
cblogger.Error(err.Error())
LoggingError(hiscallInfo, err)
return irs.ImageInfo{}, err
}
LoggingInfo(hiscallInfo, start)
imageInfo := setterImage(*image)
return *imageInfo, nil
}
func (imageHandler *OpenStackImageHandler) DeleteImage(imageIID irs.IID) (bool, error) {
// log HisCall
hiscallInfo := GetCallLogScheme(imageHandler.Client.IdentityEndpoint, call.VMIMAGE, imageIID.NameId, "DeleteImage()")
imageId, err := imageHandler.IDFromName(imageHandler.Client, imageIID.NameId)
if err != nil {
cblogger.Error(err.Error())
LoggingError(hiscallInfo, err)
return false, err
}
start := call.Start()
err = images.Delete(imageHandler.Client, imageId).ExtractErr()
if err != nil {
cblogger.Error(err.Error())
LoggingError(hiscallInfo, err)
return false, err
}
LoggingInfo(hiscallInfo, start)
return true, nil
}
func (imageHandler *OpenStackImageHandler) IDFromName(serviceClient *gophercloud.ServiceClient, imageName string) (string, error) {
pager, err := images.ListDetail(serviceClient, images.ListOpts{Name: imageName}).AllPages()
if err != nil {
return "", err
}
imageList, err := images.ExtractImages(pager)
if err != nil {
return "", err
}
if len(imageList) > 1 {
return "", errors.New(fmt.Sprintf("found multiple images with name %s", imageName))
} else if len(imageList) == 0 {
return "", errors.New(fmt.Sprintf("could not found image with name %s", imageName))
}
return imageList[0].ID, nil
}
|
[
"\"CBSPIDER_ROOT\""
] |
[] |
[
"CBSPIDER_ROOT"
] |
[]
|
["CBSPIDER_ROOT"]
|
go
| 1 | 0 | |
util/getuccm.Src/uccm/GetUccm.java
|
package uccm;
import java.io.*;
import java.net.MalformedURLException;
import java.net.URL;
import java.net.URLConnection;
import java.util.Enumeration;
import java.util.zip.ZipEntry;
import java.util.zip.ZipFile;
public class GetUccm {
public static void main(String[] argv) {
try {
URL uccm100Url = new URL("https://github.com/sudachen/uccm/archive/uccm100.zip");
System.out.println("it uses source "+uccm100Url.toString());
File uccmRepo;
if ( System.getenv("UCCM100REPO") != null ) {
uccmRepo = new File(System.getenv("UCCM100REPO"));
} else {
File appData = new File(System.getenv("LOCALAPPDATA"));
if ( !appData.exists() ) {
System.err.println("could not find local AppData");
System.exit(1);
}
uccmRepo = new File(appData,"uCcm100Repo");
}
if ( !uccmRepo.exists() )
uccmRepo.mkdir();
System.out.println("it uses UCCM repo "+uccmRepo.getAbsolutePath());
File uccm100Zip = File.createTempFile("uccm100", ".zip");
uccm100Zip.deleteOnExit();
OutputStream os = new FileOutputStream(uccm100Zip);
byte[] buffer = new byte[102400];
int totalBytesRead = 0;
int bytesRead = 0;
System.out.println("connecting...");
URLConnection connection = uccm100Url.openConnection();
InputStream is = connection.getInputStream();
System.out.println("getting ZIP-archive ...");
while ((bytesRead = is.read(buffer)) > 0) {
os.write(buffer, 0, bytesRead);
totalBytesRead += bytesRead;
}
os.close();
is.close();
System.out.println("unpacking ...");
ZipFile zf = new ZipFile(uccm100Zip);
for (Enumeration zfe = zf.entries(); zfe.hasMoreElements(); ) {
ZipEntry r = (ZipEntry)zfe.nextElement();
File f = new File(uccmRepo,r.getName());
if ( r.isDirectory() ) {
if (!f.exists()) f.mkdir();
} else {
os = new FileOutputStream(f);
is = zf.getInputStream(r);
int len = is.read(buffer);
while (len >= 0) {
os.write(buffer, 0, len);
len = is.read(buffer);
}
is.close();
os.close();
}
}
} catch (Exception e) {
e.printStackTrace();
System.exit(1);
}
}
}
|
[
"\"UCCM100REPO\"",
"\"UCCM100REPO\"",
"\"LOCALAPPDATA\""
] |
[] |
[
"UCCM100REPO",
"LOCALAPPDATA"
] |
[]
|
["UCCM100REPO", "LOCALAPPDATA"]
|
java
| 2 | 0 | |
scripts/test63.find_L_top_m_scale_m.py
|
#! python3
import os
import sys
import subprocess
if len(sys.argv) < 10:
print(f"{sys.argv[0]} <app> <data_dir> <data> <tag> <num_t> <L_low> <L_up> <M> <P_target> [<P_target> ...]")
exit()
app = sys.argv[1]
base_dir = sys.argv[2]
data = sys.argv[3]
tag = sys.argv[4]
num_t = int(sys.argv[5])
L_lower = int(sys.argv[6])
L_upper = int(sys.argv[7])
M = int(sys.argv[8])
base_loc_P_target = 9
targets = [sys.argv[i] for i in range(base_loc_P_target, len(sys.argv))]
P_level = " ".join(targets)
env_vars = os.environ
env_vars["KMP_AFFINITY"] = "granularity=fine,compact,1,0"
bin = F"numactl -m 0 ./{app}"
if data == "sift1m":
data_dir = base_dir + "/sift1m"
data_name = "sift"
elif data == "gist1m":
data_dir = base_dir + "/gist1m"
data_name = "gist"
elif data == "deep10m":
data_dir = base_dir + "/deep1b"
data_name = "deep10M"
elif data == "sift100m":
data_dir = base_dir + "/sift1b"
data_name = "sift100M"
elif data == "deep100m":
data_dir = base_dir + "/deep1b"
data_name = "deep100M"
else:
print(F"Error: data {data} is unknown.")
exit()
label = F"{data}.{tag}"
raw_file = F"output.{label}.raw.txt"
subprocess.run(F':> {raw_file}', shell=True, check=True)
command = F"{bin} {data_dir}/{data_name}_base.fvecs {data_dir}/{data_name}_query.fvecs {data_dir}/{data_name}.nsg " \
F"{L_lower} 100 output.ivecs {M} {data_dir}/{data_name}.true-100_NN.v2.binary " \
F"{num_t} {L_upper} {P_level} " \
F"| tee -a {raw_file}"
subprocess.run(command, env=env_vars, shell=True, check=True)
rows_file = F"output.{label}.rows.txt"
table_file = F"output.{label}.table.txt"
subprocess.run(F"python3 ../scripts/output_surrounding.py {raw_file} {rows_file}", shell=True, check=True)
subprocess.run(F"python3 ../scripts/output_format.py {rows_file} {table_file} 0:14", shell=True, check=True)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
client-quickstart-java-master/src/main/java/com/twilio/Webapp.java
|
package com.twilio;
import static spark.Spark.get;
import static spark.Spark.post;
import static spark.Spark.staticFileLocation;
import static spark.Spark.afterAfter;
import java.util.ArrayList;
import java.util.List;
import java.util.Optional;
import org.apache.http.HttpStatus;
import java.util.HashMap;
import com.github.javafaker.Faker;
import com.google.gson.Gson;
// Token generation imports
import com.twilio.jwt.Jwt;
import com.twilio.jwt.client.ClientCapability;
import com.twilio.jwt.client.IncomingClientScope;
import com.twilio.jwt.client.OutgoingClientScope;
import com.twilio.jwt.client.Scope;
// TwiML generation imports
import com.twilio.twiml.VoiceResponse;
import com.twilio.twiml.Dial;
import com.twilio.twiml.Number;
import com.twilio.twiml.Client;
import com.twilio.twiml.Say;
public class Webapp {
static List<Agent> agents = new ArrayList<>();
public static void main(String[] args) {
// Serve static files from src/main/resources/public
staticFileLocation("/public");
// Create a Faker instance to generate a random username for the connecting user
Faker faker = new Faker();
agents.add(new Agent(1, "Anand", "991", true));
agents.add(new Agent(2, "Piyush", "992", true));
agents.add(new Agent(3, "Gorge", "993", true));
// Log all requests and responses
afterAfter(new LoggingFilter());
// Create a capability token using our Twilio credentials
get("/token", "application/json", (request, response) -> {
String acctSid = "-----------------------------";// System.getenv("TWILIO_ACCOUNT_SID");
String authToken = "---------------------------"; // System.getenv("TWILIO_AUTH_TOKEN");
String applicationSid = "------------------------"; //System.getenv("TWILIO_TWIML_APP_SID");
// Generate a random username for the connecting client
//String identity = faker.firstName() + faker.lastName() + faker.zipCode();
Optional<Agent> agent = agents.stream().filter(a->(a.isIdle())).findFirst();
if(!agent.isPresent()){
response.status(HttpStatus.SC_EXPECTATION_FAILED);
return null;
}
agent.get().setIdle(false);
String identity = agent.get().getName();
// Generate capability token
List<Scope> scopes = new ArrayList<>();
scopes.add(new IncomingClientScope(identity));
scopes.add(new OutgoingClientScope.Builder(applicationSid).build());
Jwt jwt = new ClientCapability.Builder(acctSid, authToken).scopes(scopes).build();
String token = jwt.toJwt();
// create JSON response payload
HashMap<String, String> json = new HashMap<>();
json.put("identity", identity);
json.put("token", token);
// Render JSON response
response.header("Content-Type", "application/json");
Gson gson = new Gson();
return gson.toJson(json);
});
// Create a capability token using our Twilio credentials
get("/release-token", "application/text", (request, response) -> {
String agentname = request.queryParams("agent");
agents.stream().filter(a->(agentname.equalsIgnoreCase(a.getName()))).forEach(a->a.setIdle(true));
response.status(HttpStatus.SC_OK);
return agentname + " is free now.";
});
// Generate voice TwiML
post("/voice", "application/x-www-form-urlencoded", (request, response) -> {
VoiceResponse voiceTwimlResponse;
String to = request.queryParams("To");
if (to != null) {
Dial.Builder dialBuilder = new Dial.Builder()
.callerId(System.getenv("TWILIO_CALLER_ID"));
// wrap the phone number or client name in the appropriate TwiML verb
// by checking if the number given has only digits and format symbols
if(to.matches("^[\\d\\+\\-\\(\\) ]+$")) {
dialBuilder = dialBuilder.number(new Number.Builder(to).build());
} else {
dialBuilder = dialBuilder.client(new Client.Builder(to).build());
}
voiceTwimlResponse = new VoiceResponse.Builder()
.dial(dialBuilder.build())
.build();
} else {
voiceTwimlResponse = new VoiceResponse.Builder()
.say(new Say.Builder("Thanks for calling!").build())
.build();
}
response.header("Content-Type", "text/xml");
return voiceTwimlResponse.toXml();
});
}
}
|
[
"\"TWILIO_ACCOUNT_SID\"",
"\"TWILIO_AUTH_TOKEN\"",
"\"TWILIO_TWIML_APP_SID\"",
"\"TWILIO_CALLER_ID\""
] |
[] |
[
"TWILIO_TWIML_APP_SID",
"TWILIO_CALLER_ID",
"TWILIO_AUTH_TOKEN",
"TWILIO_ACCOUNT_SID"
] |
[]
|
["TWILIO_TWIML_APP_SID", "TWILIO_CALLER_ID", "TWILIO_AUTH_TOKEN", "TWILIO_ACCOUNT_SID"]
|
java
| 4 | 0 | |
test/integration/test_interactivetools_api.py
|
"""Integration tests for realtime tools."""
import os
import tempfile
import pytest
import requests
from galaxy_test.base import api_asserts
from galaxy_test.base.populators import (
DatasetPopulator,
wait_on,
)
from galaxy_test.driver import integration_util
from .test_containerized_jobs import (
ContainerizedIntegrationTestCase,
disable_dependency_resolution,
DOCKERIZED_JOB_CONFIG_FILE,
)
from .test_kubernetes_staging import (
CONTAINERIZED_TEMPLATE,
job_config,
set_infrastucture_url,
)
SCRIPT_DIRECTORY = os.path.abspath(os.path.dirname(__file__))
EMBEDDED_PULSAR_JOB_CONFIG_FILE_DOCKER = os.path.join(SCRIPT_DIRECTORY, "embedded_pulsar_docker_job_conf.yml")
class BaseInteractiveToolsIntegrationTestCase(ContainerizedIntegrationTestCase):
framework_tool_and_types = True
container_type = "docker"
enable_realtime_mapping = True
def setUp(self):
super().setUp()
self.dataset_populator = DatasetPopulator(self.galaxy_interactor)
self.history_id = self.dataset_populator.new_history()
# Move helpers to populators.py
def wait_on_proxied_content(self, target):
def get_hosted_content():
try:
scheme, rest = target.split("://", 1)
prefix, host_and_port = rest.split(".interactivetool.")
faked_host = rest
if "/" in rest:
faked_host = rest.split("/", 1)[0]
url = f"{scheme}://{host_and_port}"
response = requests.get(url, timeout=1, headers={"Host": faked_host})
response.raise_for_status()
return response.text
except Exception as e:
print(e)
return None
content = wait_on(get_hosted_content, "realtime hosted content at %s" % target)
return content
def entry_point_target(self, entry_point_id):
entry_point_access_response = self._get("entry_points/%s/access" % entry_point_id)
api_asserts.assert_status_code_is(entry_point_access_response, 200)
access_json = entry_point_access_response.json()
api_asserts.assert_has_key(access_json, "target")
return access_json["target"]
def wait_on_entry_points_active(self, job_id, expected_num=1):
def active_entry_points():
entry_points = self.entry_points_for_job(job_id)
if len(entry_points) != expected_num:
return None
elif any(not e["active"] for e in entry_points):
job_json = self._get(f"jobs/{job_id}?full=true").json()
if job_json["state"] == "error":
raise Exception(f"Interactive tool job {job_id} failed: {job_json}")
return None
else:
return entry_points
# It currently takes at least 90 seconds until we can be sure the container monitor failed.
# Can be decreased when galaxy_ext/container_monitor/monitor.py changes
return wait_on(active_entry_points, "entry points to become active", timeout=120)
def entry_points_for_job(self, job_id):
entry_points_response = self._get("entry_points?job_id=%s" % job_id)
api_asserts.assert_status_code_is(entry_points_response, 200)
return entry_points_response.json()
class RunsInterativeToolTests:
def test_simple_execution(self):
response_dict = self.dataset_populator.run_tool("interactivetool_simple", {}, self.history_id)
assert "jobs" in response_dict, response_dict
jobs = response_dict["jobs"]
assert isinstance(jobs, list)
assert len(jobs) == 1
job0 = jobs[0]
entry_points = self.wait_on_entry_points_active(job0["id"])
assert len(entry_points) == 1
entry_point0 = entry_points[0]
target = self.entry_point_target(entry_point0["id"])
content = self.wait_on_proxied_content(target)
assert content == "moo cow\n", content
def test_multi_server_realtime_tool(self):
response_dict = self.dataset_populator.run_tool("interactivetool_two_entry_points", {}, self.history_id)
assert "jobs" in response_dict, response_dict
jobs = response_dict["jobs"]
assert isinstance(jobs, list)
assert len(jobs) == 1
job0 = jobs[0]
entry_points = self.wait_on_entry_points_active(job0["id"], expected_num=2)
entry_point0 = entry_points[0]
entry_point1 = entry_points[1]
target0 = self.entry_point_target(entry_point0["id"])
target1 = self.entry_point_target(entry_point1["id"])
assert target0 != target1
content0 = self.wait_on_proxied_content(target0)
assert content0 == "moo cow\n", content0
content1 = self.wait_on_proxied_content(target1)
assert content1 == "moo cow\n", content1
stop_response = self.dataset_populator._delete(f'entry_points/{entry_point0["id"]}')
stop_response.raise_for_status()
self.dataset_populator.wait_for_job(job0["id"], assert_ok=True)
job_details = self.dataset_populator.get_job_details(job0["id"], full=True)
job_details.raise_for_status()
job_details = job_details.json()
assert job_details["state"] == "ok"
it_output_details = self.dataset_populator.get_history_dataset_details_raw(
self.history_id, dataset_id=job_details["outputs"]["test_output"]["id"]
)
it_output_details.raise_for_status()
it_output_details = it_output_details.json()
assert it_output_details["state"] == "ok"
assert not it_output_details["deleted"]
class InteractiveToolsIntegrationTestCase(BaseInteractiveToolsIntegrationTestCase, RunsInterativeToolTests):
pass
class InteractiveToolsPulsarIntegrationTestCase(BaseInteractiveToolsIntegrationTestCase, RunsInterativeToolTests):
@classmethod
def handle_galaxy_config_kwds(cls, config):
config["job_config_file"] = EMBEDDED_PULSAR_JOB_CONFIG_FILE_DOCKER
config["galaxy_infrastructure_url"] = "http://localhost:$GALAXY_WEB_PORT"
disable_dependency_resolution(config)
class InteractiveToolsRemoteProxyIntegrationTestCase(BaseInteractiveToolsIntegrationTestCase, RunsInterativeToolTests):
"""
$ cd gx-it-proxy
$ ./lib/createdb.js --sessions $HOME/gxitexproxy.sqlite
$ ./lib/main.js --port 9001 --ip 0.0.0.0 --verbose --sessions $HOME/gxitexproxy.sqlite
$ # Need to create new DB for each test I think, duplicate IDs are the problem I think because each test starts at 1
$ GALAXY_TEST_EXTERNAL_PROXY_HOST="localhost:9001" GALAXY_TEST_EXTERNAL_PROXY_MAP="$HOME/gxitexproxy.sqlite" pytest -s test/integration/test_interactivetools_api.py::InteractiveToolsRemoteProxyIntegrationTestCase
"""
@classmethod
def handle_galaxy_config_kwds(cls, config):
interactivetools_map = os.environ.get("GALAXY_TEST_EXTERNAL_PROXY_MAP")
interactivetools_proxy_host = os.environ.get("GALAXY_TEST_EXTERNAL_PROXY_HOST")
if not interactivetools_map or not interactivetools_proxy_host:
pytest.skip(
f"External proxy not configured for test [map={interactivetools_map},host={interactivetools_proxy_host}]"
)
config["job_config_file"] = DOCKERIZED_JOB_CONFIG_FILE
config["interactivetools_proxy_host"] = interactivetools_proxy_host
config["interactivetools_map"] = interactivetools_map
disable_dependency_resolution(config)
@integration_util.skip_unless_kubernetes()
@integration_util.skip_unless_amqp()
@integration_util.skip_if_github_workflow()
class KubeInteractiveToolsRemoteProxyIntegrationTestCase(
BaseInteractiveToolsIntegrationTestCase, RunsInterativeToolTests
):
"""
$ git clone https://github.com/galaxyproject/gx-it-proxy.git $HOME/gx-it-proxy
$ cd $HOME/gx-it-proxy/docker/k8s
$ # Setup proxy inside K8 cluster with kubectl - including forwarding port 8910
$ bash run.sh
$ cd ../.. # back session.
$ # Need new DB for every test.
$ rm -rf $HOME/gxitk8proxy.sqlite
$ ./lib/createdb.js --sessions $HOME/gxitk8proxy.sqlite
$ ./lib/main.js --port 9002 --ip 0.0.0.0 --verbose --sessions $HOME/gxitk8proxy.sqlite --forwardIP localhost --forwardPort 8910 &
$ cd back/to/galaxy
$ GALAXY_TEST_K8S_EXTERNAL_PROXY_HOST="localhost:9002" GALAXY_TEST_K8S_EXTERNAL_PROXY_MAP="$HOME/gxitk8proxy.sqlite" pytest -s test/integration/test_interactivetools_api.py::KubeInteractiveToolsRemoteProxyIntegrationTestCase
"""
@classmethod
def setUpClass(cls):
# realpath for docker deployed in a VM on Mac, also done in driver_util.
cls.jobs_directory = os.path.realpath(tempfile.mkdtemp())
super().setUpClass()
@classmethod
def handle_galaxy_config_kwds(cls, config):
interactivetools_map = os.environ.get("GALAXY_TEST_K8S_EXTERNAL_PROXY_MAP")
interactivetools_proxy_host = os.environ.get("GALAXY_TEST_K8S_EXTERNAL_PROXY_HOST")
if not interactivetools_map or not interactivetools_proxy_host:
pytest.skip(
f"External proxy not configured for test [map={interactivetools_map},host={interactivetools_proxy_host}]"
)
config["interactivetools_proxy_host"] = interactivetools_proxy_host
config["interactivetools_map"] = interactivetools_map
config["jobs_directory"] = cls.jobs_directory
config["file_path"] = cls.jobs_directory
config["job_config_file"] = job_config(CONTAINERIZED_TEMPLATE, cls.jobs_directory)
config["default_job_shell"] = "/bin/sh"
set_infrastucture_url(config)
disable_dependency_resolution(config)
|
[] |
[] |
[
"GALAXY_TEST_K8S_EXTERNAL_PROXY_MAP",
"GALAXY_TEST_K8S_EXTERNAL_PROXY_HOST",
"GALAXY_TEST_EXTERNAL_PROXY_MAP",
"GALAXY_TEST_EXTERNAL_PROXY_HOST"
] |
[]
|
["GALAXY_TEST_K8S_EXTERNAL_PROXY_MAP", "GALAXY_TEST_K8S_EXTERNAL_PROXY_HOST", "GALAXY_TEST_EXTERNAL_PROXY_MAP", "GALAXY_TEST_EXTERNAL_PROXY_HOST"]
|
python
| 4 | 0 | |
doc/build/conf.py
|
# -*- coding: utf-8 -*-
#
# SQLAlchemy documentation build configuration file, created by
# sphinx-quickstart on Wed Nov 26 19:50:10 2008.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath("../../lib"))
sys.path.insert(0, os.path.abspath("../..")) # examples
sys.path.insert(0, os.path.abspath("."))
# -- General configuration --------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = "3.5.0"
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
"sphinx.ext.autodoc",
"zzzeeksphinx",
"changelog",
"sphinx_paramlinks",
]
needs_extensions = {"zzzeeksphinx": "1.2.1"}
# Add any paths that contain templates here, relative to this directory.
# not sure why abspath() is needed here, some users
# have reported this.
templates_path = [os.path.abspath("templates")]
nitpicky = False
# The suffix of source filenames.
source_suffix = ".rst"
# section names used by the changelog extension.
changelog_sections = [
"general",
"platform",
"orm",
"orm declarative",
"orm querying",
"orm configuration",
"engine",
"sql",
"schema",
"extensions",
"mypy",
"asyncio",
"postgresql",
"mysql",
"sqlite",
"mssql",
"oracle",
"firebird",
]
# tags to sort on inside of sections
changelog_inner_tag_sort = [
"feature",
"usecase",
"change",
"changed",
"performance",
"bug",
"deprecated",
"removed",
"renamed",
"moved",
]
# how to render changelog links
changelog_render_ticket = "https://www.sqlalchemy.org/trac/ticket/%s"
changelog_render_pullreq = {
"default": "https://github.com/sqlalchemy/sqlalchemy/pull/%s",
"github": "https://github.com/sqlalchemy/sqlalchemy/pull/%s",
}
changelog_render_changeset = "https://www.sqlalchemy.org/trac/changeset/%s"
exclude_patterns = ["build", "**/unreleased*/*", "*_include.rst"]
# zzzeeksphinx makes these conversions when it is rendering the
# docstrings classes, methods, and functions within the scope of
# Sphinx autodoc
autodocmods_convert_modname = {
"sqlalchemy.sql.sqltypes": "sqlalchemy.types",
"sqlalchemy.sql.type_api": "sqlalchemy.types",
"sqlalchemy.sql.schema": "sqlalchemy.schema",
"sqlalchemy.sql.elements": "sqlalchemy.sql.expression",
"sqlalchemy.sql.selectable": "sqlalchemy.sql.expression",
"sqlalchemy.sql.dml": "sqlalchemy.sql.expression",
"sqlalchemy.sql.ddl": "sqlalchemy.schema",
"sqlalchemy.sql.base": "sqlalchemy.sql.expression",
"sqlalchemy.sql.operators": "sqlalchemy.sql.expression",
"sqlalchemy.event.base": "sqlalchemy.event",
"sqlalchemy.engine.base": "sqlalchemy.engine",
"sqlalchemy.engine.url": "sqlalchemy.engine",
"sqlalchemy.engine.row": "sqlalchemy.engine",
"sqlalchemy.engine.cursor": "sqlalchemy.engine",
"sqlalchemy.engine.result": "sqlalchemy.engine",
"sqlalchemy.ext.asyncio.result": "sqlalchemy.ext.asyncio",
"sqlalchemy.ext.asyncio.engine": "sqlalchemy.ext.asyncio",
"sqlalchemy.ext.asyncio.session": "sqlalchemy.ext.asyncio",
"sqlalchemy.util._collections": "sqlalchemy.util",
"sqlalchemy.orm.attributes": "sqlalchemy.orm",
"sqlalchemy.orm.relationships": "sqlalchemy.orm",
"sqlalchemy.orm.interfaces": "sqlalchemy.orm",
"sqlalchemy.orm.query": "sqlalchemy.orm",
"sqlalchemy.orm.util": "sqlalchemy.orm",
}
autodocmods_convert_modname_w_class = {
("sqlalchemy.engine.interfaces", "Connectable"): "sqlalchemy.engine",
("sqlalchemy.sql.base", "DialectKWArgs"): "sqlalchemy.sql.base",
}
# on the referencing side, a newer zzzeeksphinx extension
# applies shorthand symbols to references so that we can have short
# names that are still using absolute references.
zzzeeksphinx_module_prefixes = {
"_sa": "sqlalchemy",
"_engine": "sqlalchemy.engine",
"_url": "sqlalchemy.engine",
"_result": "sqlalchemy.engine",
"_row": "sqlalchemy.engine",
"_schema": "sqlalchemy.schema",
"_types": "sqlalchemy.types",
"_asyncio": "sqlalchemy.ext.asyncio",
"_expression": "sqlalchemy.sql.expression",
"_sql": "sqlalchemy.sql.expression",
"_dml": "sqlalchemy.sql.expression",
"_ddl": "sqlalchemy.schema",
"_functions": "sqlalchemy.sql.functions",
"_pool": "sqlalchemy.pool",
"_event": "sqlalchemy.event",
"_events": "sqlalchemy.events",
"_exc": "sqlalchemy.exc",
"_reflection": "sqlalchemy.engine.reflection",
"_orm": "sqlalchemy.orm",
"_query": "sqlalchemy.orm",
"_ormevent": "sqlalchemy.orm.event",
"_ormexc": "sqlalchemy.orm.exc",
"_roles": "sqlalchemy.sql.roles",
"_baked": "sqlalchemy.ext.baked",
"_horizontal": "sqlalchemy.ext.horizontal_shard",
"_associationproxy": "sqlalchemy.ext.associationproxy",
"_automap": "sqlalchemy.ext.automap",
"_hybrid": "sqlalchemy.ext.hybrid",
"_compilerext": "sqlalchemy.ext.compiler",
"_mutable": "sqlalchemy.ext.mutable",
"_declarative": "sqlalchemy.ext.declarative",
"_future": "sqlalchemy.future",
"_futureorm": "sqlalchemy.future.orm",
"_postgresql": "sqlalchemy.dialects.postgresql",
"_mysql": "sqlalchemy.dialects.mysql",
"_mssql": "sqlalchemy.dialects.mssql",
"_oracle": "sqlalchemy.dialects.oracle",
"_sqlite": "sqlalchemy.dialects.sqlite",
"_util": "sqlalchemy.util",
}
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "contents"
# General information about the project.
project = u"SQLAlchemy"
copyright = u"2007-2021, the SQLAlchemy authors and contributors" # noqa
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "1.4"
# The full version, including alpha/beta/rc tags.
release = "1.4.21"
release_date = "July 14, 2021"
site_base = os.environ.get("RTD_SITE_BASE", "https://www.sqlalchemy.org")
site_adapter_template = "docs_adapter.mako"
site_adapter_py = "docs_adapter.py"
# arbitrary number recognized by builders.py, incrementing this
# will force a rebuild
build_number = "3"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# have the "gettext" build generate .pot for each individual
# .rst
gettext_compact = False
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "zzzeeksphinx"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
html_style = "default.css"
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "%s %s Documentation" % (project, version)
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = "%m/%d/%Y %H:%M:%S"
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
html_additional_pages = {"notfound": "notfound.html"}
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, the reST sources are included in the HTML build as _sources/<name>.
# html_copy_source = True
html_copy_source = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = "SQLAlchemydoc"
# autoclass_content = 'both'
# -- Options for LaTeX output ------------------------------------------------
# The paper size ('letter' or 'a4').
# latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
# latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples (source start
# file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
(
"contents",
"sqlalchemy_%s.tex" % release.replace(".", "_"),
"SQLAlchemy Documentation",
"Mike Bayer",
"manual",
)
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Additional stuff for the LaTeX preamble.
# sets TOC depth to 2.
latex_preamble = r"\setcounter{tocdepth}{3}"
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# latex_elements = {
# 'papersize': 'letterpaper',
# 'pointsize': '10pt',
# }
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
"index",
"sqlalchemy",
u"SQLAlchemy Documentation",
[u"SQLAlchemy authors"],
1,
)
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u"SQLAlchemy"
epub_author = u"SQLAlchemy authors"
epub_publisher = u"SQLAlchemy authors"
epub_copyright = u"2007-2015, SQLAlchemy authors"
# The language of the text. It defaults to the language option
# or en if the language is not set.
# epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
# epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
# epub_identifier = ''
# A unique identification for the text.
# epub_uid = ''
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
# epub_pre_files = []
# HTML files that should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
# epub_post_files = []
# A list of files that should not be packed into the epub file.
# epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
# epub_tocdepth = 3
# Allow duplicate toc entries.
# epub_tocdup = True
|
[] |
[] |
[
"RTD_SITE_BASE"
] |
[]
|
["RTD_SITE_BASE"]
|
python
| 1 | 0 | |
demo/cmd/bookstore/bookstore.go
|
package main
import (
"encoding/json"
"flag"
"fmt"
"html"
"html/template"
"math/rand"
"net/http"
"os"
"strings"
"sync/atomic"
"time"
"github.com/gorilla/mux"
"github.com/openservicemesh/osm/demo/cmd/common"
"github.com/openservicemesh/osm/pkg/logger"
)
var (
booksSold int64 = 0
log = logger.NewPretty("bookstore")
identity = flag.String("ident", "unidentified", "the identity of the container where this demo app is running (VM, K8s, etc)")
port = flag.Int("port", 80, "port on which this app is listening for incoming HTTP")
path = flag.String("path", ".", "path to the HTML template")
)
type handler struct {
path string
fn func(http.ResponseWriter, *http.Request)
method string
}
func getIdentity() string {
ident := os.Getenv("IDENTITY")
if ident == "" {
if identity != nil {
ident = *identity
}
}
return ident
}
func setHeaders(w http.ResponseWriter) {
w.Header().Set(common.BooksBoughtHeader, fmt.Sprintf("%d", booksSold))
w.Header().Set(common.IdentityHeader, getIdentity())
}
func renderTemplate(w http.ResponseWriter) {
tmpl, err := template.ParseFiles(fmt.Sprintf("%s/bookstore.html.template", *path))
if err != nil {
log.Fatal().Err(err).Msg("Failed to parse HTML template file")
}
err = tmpl.Execute(w, map[string]string{
"Identity": getIdentity(),
"BooksSold": fmt.Sprintf("%d", booksSold),
"Time": time.Now().Format("Mon, 02 Jan 2006 15:04:05 MST"),
})
if err != nil {
log.Fatal().Err(err).Msg("Could not render template")
}
}
func getBooksSold(w http.ResponseWriter, r *http.Request) {
setHeaders(w)
renderTemplate(w)
log.Info().Msgf("%s; URL: %q; Count: %d\n", getIdentity(), html.EscapeString(r.URL.Path), booksSold)
}
func getIndex(w http.ResponseWriter, r *http.Request) {
setHeaders(w)
renderTemplate(w)
log.Info().Msgf("%s; URL: %q; Count: %d\n", getIdentity(), html.EscapeString(r.URL.Path), booksSold)
}
// updateBooksSold updates the booksSold value to the one specified by the user
func updateBooksSold(w http.ResponseWriter, r *http.Request) {
var updatedBooksSold int64
err := json.NewDecoder(r.Body).Decode(&updatedBooksSold)
if err != nil {
log.Fatal().Err(err).Msg("Could not decode request body")
}
atomic.StoreInt64(&booksSold, updatedBooksSold)
setHeaders(w)
renderTemplate(w)
log.Info().Msgf("%s; URL: %q; %s: %d\n", getIdentity(), html.EscapeString(r.URL.Path), common.BooksBoughtHeader, booksSold)
return
}
// sellBook increments the value of the booksSold
func sellBook(w http.ResponseWriter, r *http.Request) {
fmt.Println("Selling a book!")
atomic.AddInt64(&booksSold, 1)
setHeaders(w)
renderTemplate(w)
log.Info().Msgf("%s; URL: %q; Count: %d\n", getIdentity(), html.EscapeString(r.URL.Path), booksSold)
// Loop through headers
for name, headers := range r.Header {
name = strings.ToLower(name)
for _, h := range headers {
log.Info().Msgf("%v: %v", name, h)
}
}
go common.RestockBooks(1) // make this async for a smoother demo
// Slow down the responses artificially.
maxNoiseMilliseconds := 750
minNoiseMilliseconds := 150
intNoise := rand.Intn(maxNoiseMilliseconds-minNoiseMilliseconds) + minNoiseMilliseconds // #nosec G404
pretendToBeBusy := time.Duration(intNoise) * time.Millisecond
log.Info().Msgf("Sleeping %+v", pretendToBeBusy)
time.Sleep(pretendToBeBusy)
}
func getHandlers() []handler {
return []handler{
{"/", getIndex, "GET"},
{"/books-bought", getBooksSold, "GET"},
{"/books-bought", updateBooksSold, "POST"},
{"/buy-a-book/new", sellBook, "GET"},
{"/reset", reset, "GET"},
}
}
func reset(w http.ResponseWriter, r *http.Request) {
booksSold = 0
renderTemplate(w)
}
func main() {
flag.Parse()
router := mux.NewRouter()
for _, h := range getHandlers() {
router.HandleFunc(h.path, h.fn).Methods(h.method)
}
http.HandleFunc("/favicon.ico", func(w http.ResponseWriter, r *http.Request) {})
log.Info().Msgf("Bookstore running on port %d", *port)
err := http.ListenAndServe(fmt.Sprintf(":%d", *port), router)
log.Fatal().Err(err).Msgf("Failed to start HTTP server on port %d", *port)
}
|
[
"\"IDENTITY\""
] |
[] |
[
"IDENTITY"
] |
[]
|
["IDENTITY"]
|
go
| 1 | 0 | |
github_tracker/__init__.py
|
# Copyright (c) Microsoft Corporation. All Rights Reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the Software), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE
import logging
import datetime
import os
import azure.functions as func
from .traffic_tracker import Database, Repo, UserOrOrg
# Repos to collect metrics from.
# The first variable is a human readable name, the second needs to match the
# url extension for the repo exactly.
repos = {
"<REPO-NAME>": "<REPO-URL-EXTENSION>",
}
def main(mytimer: func.TimerRequest):
utc_timestamp = (
datetime.datetime.utcnow()
.replace(tzinfo=datetime.timezone.utc)
.isoformat()
)
if mytimer.past_due:
logging.info("The timer is past due!")
logging.info("Python timer trigger function ran at %s", utc_timestamp)
cosmos_db = Database(
os.getenv("CosmosDBConnectionString"),
"<YOUR-DATABASE-NAME>",
"<YOUR-CONTAINER-NAME>"
)
# Uncomment this to load the repos from GitHub each time
# user_or_org = UserOrOrg("<USER-OR-ORG>", os.getenv("GithubApiKey"))
# repos = user_or_org.repos()
for name, url in repos.items():
repo = Repo("<REPO-OWNER>", name, url, os.getenv("GithubApiKey"))
output = repo.metrics()
cosmos_db.upload(output)
|
[] |
[] |
[
"GithubApiKey",
"CosmosDBConnectionString"
] |
[]
|
["GithubApiKey", "CosmosDBConnectionString"]
|
python
| 2 | 0 | |
cmd/zoomer/main.go
|
package main
import (
"errors"
"flag"
"log"
"os"
"strconv"
"strings"
"github.com/chris124567/zoomer/pkg/zoom"
)
func main() {
var meetingNumber = flag.String("meetingNumber", "", "Meeting number")
var meetingPassword = flag.String("password", "", "Meeting password")
flag.Parse()
// get keys from environment
apiKey := os.Getenv("ZOOM_JWT_API_KEY")
apiSecret := os.Getenv("ZOOM_JWT_API_SECRET")
// create new session
// meetingNumber, meetingPassword, username, hardware uuid (can be random but should be relatively constant or it will appear to zoom that you have many many many devices), proxy url, jwt api key, jwt api secret)
session, err := zoom.NewZoomSession(*meetingNumber, *meetingPassword, "Bot", "ad8ffee7-d47c-4357-9ac8-965ed64e96fc", "", apiKey, apiSecret)
if err != nil {
log.Fatal(err)
}
// get the rwc token and other info needed to construct the websocket url for the meeting
meetingInfo, cookieString, err := session.GetMeetingInfoData()
if err != nil {
log.Fatal(err)
}
// get the url for the websocket connection. always pass false for the second parameter (its used internally to keep track of some parameters used for getting out of waiting rooms)
websocketUrl, err := session.GetWebsocketUrl(meetingInfo, false)
if err != nil {
log.Fatal(err)
}
log.Print(websocketUrl)
// the third argument is the "onmessage" function. it will be triggered everytime the websocket client receives a message
err = session.MakeWebsocketConnection(websocketUrl, cookieString, func(session *zoom.ZoomSession, message zoom.Message) error {
switch m := message.(type) {
case *zoom.ConferenceRosterIndication:
// if we get an indication that someone joined the meeting, welcome them
for _, person := range m.Add {
// don't welcome ourselves
if person.ID != session.JoinInfo.UserID {
// you could switch out EVERYONE_CHAT_ID with person.ID to private message them instead of sending the welcome to everyone
session.SendChatMessage(zoom.EVERYONE_CHAT_ID, "Welcome to the meeting, "+string(person.Dn2)+"!")
}
}
return nil
case *zoom.ConferenceChatIndication:
// respond to chats
return handleChatMessage(session, m, string(m.Text))
default:
return nil
}
})
if err != nil {
log.Fatal(err)
}
}
// only respond to messages with this prefix
const MESSAGE_PREFIX = "++"
func handleChatMessage(session *zoom.ZoomSession, body *zoom.ConferenceChatIndication, messageText string) error {
// takes commands of the form "++command argument1 argument2 ..."
if !strings.HasPrefix(messageText, MESSAGE_PREFIX) {
// this message is not for the bot
return nil
}
messageText = strings.TrimPrefix(messageText, MESSAGE_PREFIX)
words := strings.Fields(messageText)
wordsCount := len(words)
if wordsCount < 1 {
return errors.New("No command provided after prefix")
}
args := words[1:]
argsCount := len(args)
switch words[0] {
case "rename":
if argsCount > 0 {
session.RenameMe(strings.Join(args, " "))
}
case "mute":
// if we get no arguments or "on", turn mute on
if argsCount == 0 || args[0] == "on" {
session.SetAudioMuted(true)
session.SetVideoMuted(true)
} else if args[0] == "off" {
session.SetAudioMuted(false)
session.SetVideoMuted(false)
}
case "screenshare":
// if we get no arguments or "on", turn screenshare on
if argsCount == 0 || args[0] == "on" {
session.SetScreenShareMuted(false)
} else if args[0] == "off" {
session.SetScreenShareMuted(true)
}
case "chatlevel":
// take the first argument, convert to integer and try to use that to set the room chat level
if argsCount > 0 {
chatLevelInt, err := strconv.Atoi(args[0])
if err == nil {
session.SetChatLevel(chatLevelInt)
}
}
default:
// just echo the message it if its not code for anything
session.SendChatMessage(body.DestNodeID, "I don't understand this message so I am echoing it: "+string(body.Text))
}
return nil
}
|
[
"\"ZOOM_JWT_API_KEY\"",
"\"ZOOM_JWT_API_SECRET\""
] |
[] |
[
"ZOOM_JWT_API_KEY",
"ZOOM_JWT_API_SECRET"
] |
[]
|
["ZOOM_JWT_API_KEY", "ZOOM_JWT_API_SECRET"]
|
go
| 2 | 0 | |
subscriptions/models.py
|
from datetime import date, timedelta
from django.conf import settings
from django.contrib.auth.models import User
from django.db.models import (CASCADE, PROTECT, BigAutoField, CharField,
DateField, ForeignKey, ManyToManyField, Model,
OneToOneField, TextChoices, TextField)
from django.utils.translation import gettext_lazy as _
class Account(Model):
user = OneToOneField(User, on_delete=CASCADE)
subscription = DateField()
def save(self, force_insert=False, force_update=False, using=None, update_fields=None):
if not self.pk:
if self.subscription < date.today():
self.subscription = date.today()
self.subscription += timedelta(days=settings.DEFAULT_TRIAL_PERIOD)
super().save(force_insert, force_update, using, update_fields)
def __str__(self):
return self.user.get_full_name()
def is_active(self):
return self.subscription >= date.today()
def update_subscription(self, days):
if self.subscription < date.today():
self.subscription = date.today()
self.subscription += timedelta(days=days)
self.save()
return self.subscription
class Author(Model):
id = BigAutoField(primary_key=True)
name = CharField(max_length=50)
surname = CharField(max_length=50)
about = TextField(blank=True)
books = ManyToManyField(
'Book',
through='AuthorBook',
blank=True
)
def __str__(self):
return f'{self.name} {self.surname}'
class Book(Model):
class Type(TextChoices):
TEXT = 'TEXT', _('Text')
AUDIO = 'AUDIO', _('Audio')
class Genre(TextChoices):
COMEDY = 'COMEDY', _('Comedy')
TRAGEDY = 'TRAGEDY', _('Tragedy')
DRAMA = 'DRAMA', _('Drama')
HORROR = 'HORROR', _('Horror')
id = BigAutoField(primary_key=True)
title = CharField(max_length=50)
type = CharField(
max_length=50,
choices=Type.choices,
default=Type.TEXT
)
genre = CharField(
max_length=50,
choices=Genre.choices,
default=Genre.DRAMA
)
about = TextField(blank=True)
authors = ManyToManyField(
'Author',
through='AuthorBook',
blank=True
)
def __str__(self):
return self.title
class AuthorBook(Model):
author = ForeignKey(Author, on_delete=PROTECT)
book = ForeignKey(Book, on_delete=PROTECT)
class Meta:
auto_created = True
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
xds/src/main/java/io/grpc/xds/ClientXdsClient.java
|
/*
* Copyright 2020 The gRPC Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.grpc.xds;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.base.Preconditions.checkNotNull;
import static io.grpc.xds.Bootstrapper.XDSTP_SCHEME;
import com.github.udpa.udpa.type.v1.TypedStruct;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Joiner;
import com.google.common.base.Splitter;
import com.google.common.base.Stopwatch;
import com.google.common.base.Strings;
import com.google.common.base.Supplier;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.SettableFuture;
import com.google.protobuf.Any;
import com.google.protobuf.Duration;
import com.google.protobuf.InvalidProtocolBufferException;
import com.google.protobuf.Message;
import com.google.protobuf.util.Durations;
import com.google.re2j.Pattern;
import com.google.re2j.PatternSyntaxException;
import io.envoyproxy.envoy.config.cluster.v3.CircuitBreakers.Thresholds;
import io.envoyproxy.envoy.config.cluster.v3.Cluster;
import io.envoyproxy.envoy.config.cluster.v3.Cluster.CustomClusterType;
import io.envoyproxy.envoy.config.cluster.v3.Cluster.DiscoveryType;
import io.envoyproxy.envoy.config.core.v3.HttpProtocolOptions;
import io.envoyproxy.envoy.config.core.v3.RoutingPriority;
import io.envoyproxy.envoy.config.core.v3.SocketAddress;
import io.envoyproxy.envoy.config.core.v3.SocketAddress.PortSpecifierCase;
import io.envoyproxy.envoy.config.core.v3.TrafficDirection;
import io.envoyproxy.envoy.config.core.v3.TypedExtensionConfig;
import io.envoyproxy.envoy.config.endpoint.v3.ClusterLoadAssignment;
import io.envoyproxy.envoy.config.listener.v3.Listener;
import io.envoyproxy.envoy.config.route.v3.ClusterSpecifierPlugin;
import io.envoyproxy.envoy.config.route.v3.RetryPolicy.RetryBackOff;
import io.envoyproxy.envoy.config.route.v3.RouteConfiguration;
import io.envoyproxy.envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager;
import io.envoyproxy.envoy.extensions.filters.network.http_connection_manager.v3.Rds;
import io.envoyproxy.envoy.extensions.transport_sockets.tls.v3.CertificateValidationContext;
import io.envoyproxy.envoy.extensions.transport_sockets.tls.v3.CommonTlsContext;
import io.envoyproxy.envoy.extensions.transport_sockets.tls.v3.DownstreamTlsContext;
import io.envoyproxy.envoy.service.discovery.v3.Resource;
import io.envoyproxy.envoy.type.v3.FractionalPercent;
import io.envoyproxy.envoy.type.v3.FractionalPercent.DenominatorType;
import io.grpc.ChannelCredentials;
import io.grpc.Context;
import io.grpc.EquivalentAddressGroup;
import io.grpc.Grpc;
import io.grpc.InternalLogId;
import io.grpc.LoadBalancerProvider;
import io.grpc.LoadBalancerRegistry;
import io.grpc.ManagedChannel;
import io.grpc.NameResolver;
import io.grpc.Status;
import io.grpc.Status.Code;
import io.grpc.SynchronizationContext;
import io.grpc.SynchronizationContext.ScheduledHandle;
import io.grpc.internal.BackoffPolicy;
import io.grpc.internal.ServiceConfigUtil;
import io.grpc.internal.ServiceConfigUtil.LbConfig;
import io.grpc.internal.ServiceConfigUtil.PolicySelection;
import io.grpc.internal.TimeProvider;
import io.grpc.xds.AbstractXdsClient.ResourceType;
import io.grpc.xds.Bootstrapper.AuthorityInfo;
import io.grpc.xds.Bootstrapper.ServerInfo;
import io.grpc.xds.ClusterSpecifierPlugin.NamedPluginConfig;
import io.grpc.xds.ClusterSpecifierPlugin.PluginConfig;
import io.grpc.xds.Endpoints.DropOverload;
import io.grpc.xds.Endpoints.LbEndpoint;
import io.grpc.xds.Endpoints.LocalityLbEndpoints;
import io.grpc.xds.EnvoyServerProtoData.CidrRange;
import io.grpc.xds.EnvoyServerProtoData.ConnectionSourceType;
import io.grpc.xds.EnvoyServerProtoData.FilterChain;
import io.grpc.xds.EnvoyServerProtoData.FilterChainMatch;
import io.grpc.xds.EnvoyServerProtoData.UpstreamTlsContext;
import io.grpc.xds.Filter.ClientInterceptorBuilder;
import io.grpc.xds.Filter.FilterConfig;
import io.grpc.xds.Filter.NamedFilterConfig;
import io.grpc.xds.Filter.ServerInterceptorBuilder;
import io.grpc.xds.LoadStatsManager2.ClusterDropStats;
import io.grpc.xds.LoadStatsManager2.ClusterLocalityStats;
import io.grpc.xds.VirtualHost.Route;
import io.grpc.xds.VirtualHost.Route.RouteAction;
import io.grpc.xds.VirtualHost.Route.RouteAction.ClusterWeight;
import io.grpc.xds.VirtualHost.Route.RouteAction.HashPolicy;
import io.grpc.xds.VirtualHost.Route.RouteAction.RetryPolicy;
import io.grpc.xds.VirtualHost.Route.RouteMatch;
import io.grpc.xds.VirtualHost.Route.RouteMatch.PathMatcher;
import io.grpc.xds.XdsClient.ResourceStore;
import io.grpc.xds.XdsClient.XdsResponseHandler;
import io.grpc.xds.XdsLogger.XdsLogLevel;
import io.grpc.xds.internal.Matchers.FractionMatcher;
import io.grpc.xds.internal.Matchers.HeaderMatcher;
import java.net.InetSocketAddress;
import java.net.URI;
import java.net.UnknownHostException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import javax.annotation.Nullable;
/**
* XdsClient implementation for client side usages.
*/
final class ClientXdsClient extends XdsClient implements XdsResponseHandler, ResourceStore {
// Longest time to wait, since the subscription to some resource, for concluding its absence.
@VisibleForTesting
static final int INITIAL_RESOURCE_FETCH_TIMEOUT_SEC = 15;
private static final String TRANSPORT_SOCKET_NAME_TLS = "envoy.transport_sockets.tls";
@VisibleForTesting
static final String AGGREGATE_CLUSTER_TYPE_NAME = "envoy.clusters.aggregate";
@VisibleForTesting
static final String HASH_POLICY_FILTER_STATE_KEY = "io.grpc.channel_id";
@VisibleForTesting
static boolean enableFaultInjection =
Strings.isNullOrEmpty(System.getenv("GRPC_XDS_EXPERIMENTAL_FAULT_INJECTION"))
|| Boolean.parseBoolean(System.getenv("GRPC_XDS_EXPERIMENTAL_FAULT_INJECTION"));
@VisibleForTesting
static boolean enableRetry =
Strings.isNullOrEmpty(System.getenv("GRPC_XDS_EXPERIMENTAL_ENABLE_RETRY"))
|| Boolean.parseBoolean(System.getenv("GRPC_XDS_EXPERIMENTAL_ENABLE_RETRY"));
@VisibleForTesting
static boolean enableRbac =
Strings.isNullOrEmpty(System.getenv("GRPC_XDS_EXPERIMENTAL_RBAC"))
|| Boolean.parseBoolean(System.getenv("GRPC_XDS_EXPERIMENTAL_RBAC"));
@VisibleForTesting
static boolean enableRouteLookup =
!Strings.isNullOrEmpty(System.getenv("GRPC_EXPERIMENTAL_XDS_RLS_LB"))
&& Boolean.parseBoolean(System.getenv("GRPC_EXPERIMENTAL_XDS_RLS_LB"));
@VisibleForTesting
static boolean enableLeastRequest =
!Strings.isNullOrEmpty(System.getenv("GRPC_EXPERIMENTAL_ENABLE_LEAST_REQUEST"))
? Boolean.parseBoolean(System.getenv("GRPC_EXPERIMENTAL_ENABLE_LEAST_REQUEST"))
: Boolean.parseBoolean(System.getProperty("io.grpc.xds.experimentalEnableLeastRequest"));
private static final String TYPE_URL_HTTP_CONNECTION_MANAGER_V2 =
"type.googleapis.com/envoy.config.filter.network.http_connection_manager.v2"
+ ".HttpConnectionManager";
static final String TYPE_URL_HTTP_CONNECTION_MANAGER =
"type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3"
+ ".HttpConnectionManager";
private static final String TYPE_URL_UPSTREAM_TLS_CONTEXT =
"type.googleapis.com/envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext";
private static final String TYPE_URL_UPSTREAM_TLS_CONTEXT_V2 =
"type.googleapis.com/envoy.api.v2.auth.UpstreamTlsContext";
private static final String TYPE_URL_CLUSTER_CONFIG_V2 =
"type.googleapis.com/envoy.config.cluster.aggregate.v2alpha.ClusterConfig";
private static final String TYPE_URL_CLUSTER_CONFIG =
"type.googleapis.com/envoy.extensions.clusters.aggregate.v3.ClusterConfig";
private static final String TYPE_URL_TYPED_STRUCT_UDPA =
"type.googleapis.com/udpa.type.v1.TypedStruct";
private static final String TYPE_URL_TYPED_STRUCT =
"type.googleapis.com/xds.type.v3.TypedStruct";
private static final String TYPE_URL_FILTER_CONFIG =
"type.googleapis.com/envoy.config.route.v3.FilterConfig";
private static final String TYPE_URL_RESOURCE_V2 = "type.googleapis.com/envoy.api.v2.Resource";
private static final String TYPE_URL_RESOURCE_V3 =
"type.googleapis.com/envoy.service.discovery.v3.Resource";
// TODO(zdapeng): need to discuss how to handle unsupported values.
private static final Set<Code> SUPPORTED_RETRYABLE_CODES =
Collections.unmodifiableSet(EnumSet.of(
Code.CANCELLED, Code.DEADLINE_EXCEEDED, Code.INTERNAL, Code.RESOURCE_EXHAUSTED,
Code.UNAVAILABLE));
private final SynchronizationContext syncContext = new SynchronizationContext(
new Thread.UncaughtExceptionHandler() {
@Override
public void uncaughtException(Thread t, Throwable e) {
logger.log(
XdsLogLevel.ERROR,
"Uncaught exception in XdsClient SynchronizationContext. Panic!",
e);
// TODO(chengyuanzhang): better error handling.
throw new AssertionError(e);
}
});
private final FilterRegistry filterRegistry = FilterRegistry.getDefaultRegistry();
private final LoadBalancerRegistry loadBalancerRegistry
= LoadBalancerRegistry.getDefaultRegistry();
private final Map<ServerInfo, AbstractXdsClient> serverChannelMap = new HashMap<>();
private final Map<String, ResourceSubscriber> ldsResourceSubscribers = new HashMap<>();
private final Map<String, ResourceSubscriber> rdsResourceSubscribers = new HashMap<>();
private final Map<String, ResourceSubscriber> cdsResourceSubscribers = new HashMap<>();
private final Map<String, ResourceSubscriber> edsResourceSubscribers = new HashMap<>();
private final LoadStatsManager2 loadStatsManager;
private final Map<ServerInfo, LoadReportClient> serverLrsClientMap = new HashMap<>();
private final XdsChannelFactory xdsChannelFactory;
private final Bootstrapper.BootstrapInfo bootstrapInfo;
private final Context context;
private final ScheduledExecutorService timeService;
private final BackoffPolicy.Provider backoffPolicyProvider;
private final Supplier<Stopwatch> stopwatchSupplier;
private final TimeProvider timeProvider;
private boolean reportingLoad;
private final TlsContextManager tlsContextManager;
private final InternalLogId logId;
private final XdsLogger logger;
private volatile boolean isShutdown;
// TODO(zdapeng): rename to XdsClientImpl
ClientXdsClient(
XdsChannelFactory xdsChannelFactory,
Bootstrapper.BootstrapInfo bootstrapInfo,
Context context,
ScheduledExecutorService timeService,
BackoffPolicy.Provider backoffPolicyProvider,
Supplier<Stopwatch> stopwatchSupplier,
TimeProvider timeProvider,
TlsContextManager tlsContextManager) {
this.xdsChannelFactory = xdsChannelFactory;
this.bootstrapInfo = bootstrapInfo;
this.context = context;
this.timeService = timeService;
loadStatsManager = new LoadStatsManager2(stopwatchSupplier);
this.backoffPolicyProvider = backoffPolicyProvider;
this.stopwatchSupplier = stopwatchSupplier;
this.timeProvider = timeProvider;
this.tlsContextManager = checkNotNull(tlsContextManager, "tlsContextManager");
logId = InternalLogId.allocate("xds-client", null);
logger = XdsLogger.withLogId(logId);
logger.log(XdsLogLevel.INFO, "Created");
}
private void maybeCreateXdsChannelWithLrs(ServerInfo serverInfo) {
syncContext.throwIfNotInThisSynchronizationContext();
if (serverChannelMap.containsKey(serverInfo)) {
return;
}
AbstractXdsClient xdsChannel = new AbstractXdsClient(
xdsChannelFactory,
serverInfo,
bootstrapInfo.node(),
this,
this,
context,
timeService,
syncContext,
backoffPolicyProvider,
stopwatchSupplier);
LoadReportClient lrsClient = new LoadReportClient(
loadStatsManager, xdsChannel.channel(), context, serverInfo.useProtocolV3(),
bootstrapInfo.node(), syncContext, timeService, backoffPolicyProvider, stopwatchSupplier);
serverChannelMap.put(serverInfo, xdsChannel);
serverLrsClientMap.put(serverInfo, lrsClient);
}
private Any maybeUnwrapResources(Any resource)
throws InvalidProtocolBufferException {
if (resource.getTypeUrl().equals(TYPE_URL_RESOURCE_V2)
|| resource.getTypeUrl().equals(TYPE_URL_RESOURCE_V3)) {
return unpackCompatibleType(resource, Resource.class, TYPE_URL_RESOURCE_V3,
TYPE_URL_RESOURCE_V2).getResource();
} else {
return resource;
}
}
@Override
public void handleLdsResponse(
ServerInfo serverInfo, String versionInfo, List<Any> resources, String nonce) {
syncContext.throwIfNotInThisSynchronizationContext();
Map<String, ParsedResource> parsedResources = new HashMap<>(resources.size());
Set<String> unpackedResources = new HashSet<>(resources.size());
Set<String> invalidResources = new HashSet<>();
List<String> errors = new ArrayList<>();
Set<String> retainedRdsResources = new HashSet<>();
for (int i = 0; i < resources.size(); i++) {
Any resource = resources.get(i);
boolean isResourceV3;
Listener listener;
try {
resource = maybeUnwrapResources(resource);
// Unpack the Listener.
isResourceV3 = resource.getTypeUrl().equals(ResourceType.LDS.typeUrl());
listener = unpackCompatibleType(resource, Listener.class, ResourceType.LDS.typeUrl(),
ResourceType.LDS.typeUrlV2());
} catch (InvalidProtocolBufferException e) {
errors.add("LDS response Resource index " + i + " - can't decode Listener: " + e);
continue;
}
if (!isResourceNameValid(listener.getName(), resource.getTypeUrl())) {
errors.add(
"Unsupported resource name: " + listener.getName() + " for type: " + ResourceType.LDS);
continue;
}
String listenerName = canonifyResourceName(listener.getName());
unpackedResources.add(listenerName);
// Process Listener into LdsUpdate.
LdsUpdate ldsUpdate;
try {
if (listener.hasApiListener()) {
ldsUpdate = processClientSideListener(
listener, retainedRdsResources, enableFaultInjection && isResourceV3);
} else {
ldsUpdate = processServerSideListener(
listener, retainedRdsResources, enableRbac && isResourceV3);
}
} catch (ResourceInvalidException e) {
errors.add(
"LDS response Listener '" + listenerName + "' validation error: " + e.getMessage());
invalidResources.add(listenerName);
continue;
}
// LdsUpdate parsed successfully.
parsedResources.put(listenerName, new ParsedResource(ldsUpdate, resource));
}
logger.log(XdsLogLevel.INFO,
"Received LDS Response version {0} nonce {1}. Parsed resources: {2}",
versionInfo, nonce, unpackedResources);
handleResourceUpdate(
serverInfo, ResourceType.LDS, parsedResources, invalidResources, retainedRdsResources,
versionInfo, nonce, errors);
}
private LdsUpdate processClientSideListener(
Listener listener, Set<String> rdsResources, boolean parseHttpFilter)
throws ResourceInvalidException {
// Unpack HttpConnectionManager from the Listener.
HttpConnectionManager hcm;
try {
hcm = unpackCompatibleType(
listener.getApiListener().getApiListener(), HttpConnectionManager.class,
TYPE_URL_HTTP_CONNECTION_MANAGER, TYPE_URL_HTTP_CONNECTION_MANAGER_V2);
} catch (InvalidProtocolBufferException e) {
throw new ResourceInvalidException(
"Could not parse HttpConnectionManager config from ApiListener", e);
}
return LdsUpdate.forApiListener(parseHttpConnectionManager(
hcm, rdsResources, filterRegistry, parseHttpFilter, true /* isForClient */));
}
private LdsUpdate processServerSideListener(
Listener proto, Set<String> rdsResources, boolean parseHttpFilter)
throws ResourceInvalidException {
Set<String> certProviderInstances = null;
if (getBootstrapInfo() != null && getBootstrapInfo().certProviders() != null) {
certProviderInstances = getBootstrapInfo().certProviders().keySet();
}
return LdsUpdate.forTcpListener(parseServerSideListener(
proto, rdsResources, tlsContextManager, filterRegistry, certProviderInstances,
parseHttpFilter));
}
@VisibleForTesting
static EnvoyServerProtoData.Listener parseServerSideListener(
Listener proto, Set<String> rdsResources, TlsContextManager tlsContextManager,
FilterRegistry filterRegistry, Set<String> certProviderInstances, boolean parseHttpFilter)
throws ResourceInvalidException {
if (!proto.getTrafficDirection().equals(TrafficDirection.INBOUND)) {
throw new ResourceInvalidException(
"Listener " + proto.getName() + " with invalid traffic direction: "
+ proto.getTrafficDirection());
}
if (!proto.getListenerFiltersList().isEmpty()) {
throw new ResourceInvalidException(
"Listener " + proto.getName() + " cannot have listener_filters");
}
if (proto.hasUseOriginalDst()) {
throw new ResourceInvalidException(
"Listener " + proto.getName() + " cannot have use_original_dst set to true");
}
String address = null;
if (proto.getAddress().hasSocketAddress()) {
SocketAddress socketAddress = proto.getAddress().getSocketAddress();
address = socketAddress.getAddress();
switch (socketAddress.getPortSpecifierCase()) {
case NAMED_PORT:
address = address + ":" + socketAddress.getNamedPort();
break;
case PORT_VALUE:
address = address + ":" + socketAddress.getPortValue();
break;
default:
// noop
}
}
ImmutableList.Builder<FilterChain> filterChains = ImmutableList.builder();
Set<FilterChainMatch> uniqueSet = new HashSet<>();
for (io.envoyproxy.envoy.config.listener.v3.FilterChain fc : proto.getFilterChainsList()) {
filterChains.add(
parseFilterChain(fc, rdsResources, tlsContextManager, filterRegistry, uniqueSet,
certProviderInstances, parseHttpFilter));
}
FilterChain defaultFilterChain = null;
if (proto.hasDefaultFilterChain()) {
defaultFilterChain = parseFilterChain(
proto.getDefaultFilterChain(), rdsResources, tlsContextManager, filterRegistry,
null, certProviderInstances, parseHttpFilter);
}
return EnvoyServerProtoData.Listener.create(
proto.getName(), address, filterChains.build(), defaultFilterChain);
}
@VisibleForTesting
static FilterChain parseFilterChain(
io.envoyproxy.envoy.config.listener.v3.FilterChain proto, Set<String> rdsResources,
TlsContextManager tlsContextManager, FilterRegistry filterRegistry,
Set<FilterChainMatch> uniqueSet, Set<String> certProviderInstances, boolean parseHttpFilters)
throws ResourceInvalidException {
if (proto.getFiltersCount() != 1) {
throw new ResourceInvalidException("FilterChain " + proto.getName()
+ " should contain exact one HttpConnectionManager filter");
}
io.envoyproxy.envoy.config.listener.v3.Filter filter = proto.getFiltersList().get(0);
if (!filter.hasTypedConfig()) {
throw new ResourceInvalidException(
"FilterChain " + proto.getName() + " contains filter " + filter.getName()
+ " without typed_config");
}
Any any = filter.getTypedConfig();
// HttpConnectionManager is the only supported network filter at the moment.
if (!any.getTypeUrl().equals(TYPE_URL_HTTP_CONNECTION_MANAGER)) {
throw new ResourceInvalidException(
"FilterChain " + proto.getName() + " contains filter " + filter.getName()
+ " with unsupported typed_config type " + any.getTypeUrl());
}
HttpConnectionManager hcmProto;
try {
hcmProto = any.unpack(HttpConnectionManager.class);
} catch (InvalidProtocolBufferException e) {
throw new ResourceInvalidException("FilterChain " + proto.getName() + " with filter "
+ filter.getName() + " failed to unpack message", e);
}
io.grpc.xds.HttpConnectionManager httpConnectionManager = parseHttpConnectionManager(
hcmProto, rdsResources, filterRegistry, parseHttpFilters, false /* isForClient */);
EnvoyServerProtoData.DownstreamTlsContext downstreamTlsContext = null;
if (proto.hasTransportSocket()) {
if (!TRANSPORT_SOCKET_NAME_TLS.equals(proto.getTransportSocket().getName())) {
throw new ResourceInvalidException("transport-socket with name "
+ proto.getTransportSocket().getName() + " not supported.");
}
DownstreamTlsContext downstreamTlsContextProto;
try {
downstreamTlsContextProto =
proto.getTransportSocket().getTypedConfig().unpack(DownstreamTlsContext.class);
} catch (InvalidProtocolBufferException e) {
throw new ResourceInvalidException("FilterChain " + proto.getName()
+ " failed to unpack message", e);
}
downstreamTlsContext =
EnvoyServerProtoData.DownstreamTlsContext.fromEnvoyProtoDownstreamTlsContext(
validateDownstreamTlsContext(downstreamTlsContextProto, certProviderInstances));
}
FilterChainMatch filterChainMatch = parseFilterChainMatch(proto.getFilterChainMatch());
checkForUniqueness(uniqueSet, filterChainMatch);
return FilterChain.create(
proto.getName(),
filterChainMatch,
httpConnectionManager,
downstreamTlsContext,
tlsContextManager
);
}
@VisibleForTesting
static DownstreamTlsContext validateDownstreamTlsContext(
DownstreamTlsContext downstreamTlsContext, Set<String> certProviderInstances)
throws ResourceInvalidException {
if (downstreamTlsContext.hasCommonTlsContext()) {
validateCommonTlsContext(downstreamTlsContext.getCommonTlsContext(), certProviderInstances,
true);
} else {
throw new ResourceInvalidException(
"common-tls-context is required in downstream-tls-context");
}
if (downstreamTlsContext.hasRequireSni()) {
throw new ResourceInvalidException(
"downstream-tls-context with require-sni is not supported");
}
DownstreamTlsContext.OcspStaplePolicy ocspStaplePolicy = downstreamTlsContext
.getOcspStaplePolicy();
if (ocspStaplePolicy != DownstreamTlsContext.OcspStaplePolicy.UNRECOGNIZED
&& ocspStaplePolicy != DownstreamTlsContext.OcspStaplePolicy.LENIENT_STAPLING) {
throw new ResourceInvalidException(
"downstream-tls-context with ocsp_staple_policy value " + ocspStaplePolicy.name()
+ " is not supported");
}
return downstreamTlsContext;
}
@VisibleForTesting
static io.envoyproxy.envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext
validateUpstreamTlsContext(
io.envoyproxy.envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext upstreamTlsContext,
Set<String> certProviderInstances)
throws ResourceInvalidException {
if (upstreamTlsContext.hasCommonTlsContext()) {
validateCommonTlsContext(upstreamTlsContext.getCommonTlsContext(), certProviderInstances,
false);
} else {
throw new ResourceInvalidException("common-tls-context is required in upstream-tls-context");
}
return upstreamTlsContext;
}
@VisibleForTesting
static void validateCommonTlsContext(
CommonTlsContext commonTlsContext, Set<String> certProviderInstances, boolean server)
throws ResourceInvalidException {
if (commonTlsContext.hasCustomHandshaker()) {
throw new ResourceInvalidException(
"common-tls-context with custom_handshaker is not supported");
}
if (commonTlsContext.hasTlsParams()) {
throw new ResourceInvalidException("common-tls-context with tls_params is not supported");
}
if (commonTlsContext.hasValidationContextSdsSecretConfig()) {
throw new ResourceInvalidException(
"common-tls-context with validation_context_sds_secret_config is not supported");
}
if (commonTlsContext.hasValidationContextCertificateProvider()) {
throw new ResourceInvalidException(
"common-tls-context with validation_context_certificate_provider is not supported");
}
if (commonTlsContext.hasValidationContextCertificateProviderInstance()) {
throw new ResourceInvalidException(
"common-tls-context with validation_context_certificate_provider_instance is not"
+ " supported");
}
String certInstanceName = getIdentityCertInstanceName(commonTlsContext);
if (certInstanceName == null) {
if (server) {
throw new ResourceInvalidException(
"tls_certificate_provider_instance is required in downstream-tls-context");
}
if (commonTlsContext.getTlsCertificatesCount() > 0) {
throw new ResourceInvalidException(
"tls_certificate_provider_instance is unset");
}
if (commonTlsContext.getTlsCertificateSdsSecretConfigsCount() > 0) {
throw new ResourceInvalidException(
"tls_certificate_provider_instance is unset");
}
if (commonTlsContext.hasTlsCertificateCertificateProvider()) {
throw new ResourceInvalidException(
"tls_certificate_provider_instance is unset");
}
} else if (certProviderInstances == null || !certProviderInstances.contains(certInstanceName)) {
throw new ResourceInvalidException(
"CertificateProvider instance name '" + certInstanceName
+ "' not defined in the bootstrap file.");
}
String rootCaInstanceName = getRootCertInstanceName(commonTlsContext);
if (rootCaInstanceName == null) {
if (!server) {
throw new ResourceInvalidException(
"ca_certificate_provider_instance is required in upstream-tls-context");
}
} else {
if (certProviderInstances == null || !certProviderInstances.contains(rootCaInstanceName)) {
throw new ResourceInvalidException(
"ca_certificate_provider_instance name '" + rootCaInstanceName
+ "' not defined in the bootstrap file.");
}
CertificateValidationContext certificateValidationContext = null;
if (commonTlsContext.hasValidationContext()) {
certificateValidationContext = commonTlsContext.getValidationContext();
} else if (commonTlsContext.hasCombinedValidationContext() && commonTlsContext
.getCombinedValidationContext().hasDefaultValidationContext()) {
certificateValidationContext = commonTlsContext.getCombinedValidationContext()
.getDefaultValidationContext();
}
if (certificateValidationContext != null) {
if (certificateValidationContext.getMatchSubjectAltNamesCount() > 0 && server) {
throw new ResourceInvalidException(
"match_subject_alt_names only allowed in upstream_tls_context");
}
if (certificateValidationContext.getVerifyCertificateSpkiCount() > 0) {
throw new ResourceInvalidException(
"verify_certificate_spki in default_validation_context is not supported");
}
if (certificateValidationContext.getVerifyCertificateHashCount() > 0) {
throw new ResourceInvalidException(
"verify_certificate_hash in default_validation_context is not supported");
}
if (certificateValidationContext.hasRequireSignedCertificateTimestamp()) {
throw new ResourceInvalidException(
"require_signed_certificate_timestamp in default_validation_context is not "
+ "supported");
}
if (certificateValidationContext.hasCrl()) {
throw new ResourceInvalidException("crl in default_validation_context is not supported");
}
if (certificateValidationContext.hasCustomValidatorConfig()) {
throw new ResourceInvalidException(
"custom_validator_config in default_validation_context is not supported");
}
}
}
}
private static String getIdentityCertInstanceName(CommonTlsContext commonTlsContext) {
if (commonTlsContext.hasTlsCertificateProviderInstance()) {
return commonTlsContext.getTlsCertificateProviderInstance().getInstanceName();
} else if (commonTlsContext.hasTlsCertificateCertificateProviderInstance()) {
return commonTlsContext.getTlsCertificateCertificateProviderInstance().getInstanceName();
}
return null;
}
private static String getRootCertInstanceName(CommonTlsContext commonTlsContext) {
if (commonTlsContext.hasValidationContext()) {
if (commonTlsContext.getValidationContext().hasCaCertificateProviderInstance()) {
return commonTlsContext.getValidationContext().getCaCertificateProviderInstance()
.getInstanceName();
}
} else if (commonTlsContext.hasCombinedValidationContext()) {
CommonTlsContext.CombinedCertificateValidationContext combinedCertificateValidationContext
= commonTlsContext.getCombinedValidationContext();
if (combinedCertificateValidationContext.hasDefaultValidationContext()
&& combinedCertificateValidationContext.getDefaultValidationContext()
.hasCaCertificateProviderInstance()) {
return combinedCertificateValidationContext.getDefaultValidationContext()
.getCaCertificateProviderInstance().getInstanceName();
} else if (combinedCertificateValidationContext
.hasValidationContextCertificateProviderInstance()) {
return combinedCertificateValidationContext
.getValidationContextCertificateProviderInstance().getInstanceName();
}
}
return null;
}
private static void checkForUniqueness(Set<FilterChainMatch> uniqueSet,
FilterChainMatch filterChainMatch) throws ResourceInvalidException {
if (uniqueSet != null) {
List<FilterChainMatch> crossProduct = getCrossProduct(filterChainMatch);
for (FilterChainMatch cur : crossProduct) {
if (!uniqueSet.add(cur)) {
throw new ResourceInvalidException("FilterChainMatch must be unique. "
+ "Found duplicate: " + cur);
}
}
}
}
private static List<FilterChainMatch> getCrossProduct(FilterChainMatch filterChainMatch) {
// repeating fields to process:
// prefixRanges, applicationProtocols, sourcePrefixRanges, sourcePorts, serverNames
List<FilterChainMatch> expandedList = expandOnPrefixRange(filterChainMatch);
expandedList = expandOnApplicationProtocols(expandedList);
expandedList = expandOnSourcePrefixRange(expandedList);
expandedList = expandOnSourcePorts(expandedList);
return expandOnServerNames(expandedList);
}
private static List<FilterChainMatch> expandOnPrefixRange(FilterChainMatch filterChainMatch) {
ArrayList<FilterChainMatch> expandedList = new ArrayList<>();
if (filterChainMatch.prefixRanges().isEmpty()) {
expandedList.add(filterChainMatch);
} else {
for (EnvoyServerProtoData.CidrRange cidrRange : filterChainMatch.prefixRanges()) {
expandedList.add(FilterChainMatch.create(filterChainMatch.destinationPort(),
ImmutableList.of(cidrRange),
filterChainMatch.applicationProtocols(),
filterChainMatch.sourcePrefixRanges(),
filterChainMatch.connectionSourceType(),
filterChainMatch.sourcePorts(),
filterChainMatch.serverNames(),
filterChainMatch.transportProtocol()));
}
}
return expandedList;
}
private static List<FilterChainMatch> expandOnApplicationProtocols(
Collection<FilterChainMatch> set) {
ArrayList<FilterChainMatch> expandedList = new ArrayList<>();
for (FilterChainMatch filterChainMatch : set) {
if (filterChainMatch.applicationProtocols().isEmpty()) {
expandedList.add(filterChainMatch);
} else {
for (String applicationProtocol : filterChainMatch.applicationProtocols()) {
expandedList.add(FilterChainMatch.create(filterChainMatch.destinationPort(),
filterChainMatch.prefixRanges(),
ImmutableList.of(applicationProtocol),
filterChainMatch.sourcePrefixRanges(),
filterChainMatch.connectionSourceType(),
filterChainMatch.sourcePorts(),
filterChainMatch.serverNames(),
filterChainMatch.transportProtocol()));
}
}
}
return expandedList;
}
private static List<FilterChainMatch> expandOnSourcePrefixRange(
Collection<FilterChainMatch> set) {
ArrayList<FilterChainMatch> expandedList = new ArrayList<>();
for (FilterChainMatch filterChainMatch : set) {
if (filterChainMatch.sourcePrefixRanges().isEmpty()) {
expandedList.add(filterChainMatch);
} else {
for (EnvoyServerProtoData.CidrRange cidrRange : filterChainMatch.sourcePrefixRanges()) {
expandedList.add(FilterChainMatch.create(filterChainMatch.destinationPort(),
filterChainMatch.prefixRanges(),
filterChainMatch.applicationProtocols(),
ImmutableList.of(cidrRange),
filterChainMatch.connectionSourceType(),
filterChainMatch.sourcePorts(),
filterChainMatch.serverNames(),
filterChainMatch.transportProtocol()));
}
}
}
return expandedList;
}
private static List<FilterChainMatch> expandOnSourcePorts(Collection<FilterChainMatch> set) {
ArrayList<FilterChainMatch> expandedList = new ArrayList<>();
for (FilterChainMatch filterChainMatch : set) {
if (filterChainMatch.sourcePorts().isEmpty()) {
expandedList.add(filterChainMatch);
} else {
for (Integer sourcePort : filterChainMatch.sourcePorts()) {
expandedList.add(FilterChainMatch.create(filterChainMatch.destinationPort(),
filterChainMatch.prefixRanges(),
filterChainMatch.applicationProtocols(),
filterChainMatch.sourcePrefixRanges(),
filterChainMatch.connectionSourceType(),
ImmutableList.of(sourcePort),
filterChainMatch.serverNames(),
filterChainMatch.transportProtocol()));
}
}
}
return expandedList;
}
private static List<FilterChainMatch> expandOnServerNames(Collection<FilterChainMatch> set) {
ArrayList<FilterChainMatch> expandedList = new ArrayList<>();
for (FilterChainMatch filterChainMatch : set) {
if (filterChainMatch.serverNames().isEmpty()) {
expandedList.add(filterChainMatch);
} else {
for (String serverName : filterChainMatch.serverNames()) {
expandedList.add(FilterChainMatch.create(filterChainMatch.destinationPort(),
filterChainMatch.prefixRanges(),
filterChainMatch.applicationProtocols(),
filterChainMatch.sourcePrefixRanges(),
filterChainMatch.connectionSourceType(),
filterChainMatch.sourcePorts(),
ImmutableList.of(serverName),
filterChainMatch.transportProtocol()));
}
}
}
return expandedList;
}
private static FilterChainMatch parseFilterChainMatch(
io.envoyproxy.envoy.config.listener.v3.FilterChainMatch proto)
throws ResourceInvalidException {
ImmutableList.Builder<CidrRange> prefixRanges = ImmutableList.builder();
ImmutableList.Builder<CidrRange> sourcePrefixRanges = ImmutableList.builder();
try {
for (io.envoyproxy.envoy.config.core.v3.CidrRange range : proto.getPrefixRangesList()) {
prefixRanges.add(
CidrRange.create(range.getAddressPrefix(), range.getPrefixLen().getValue()));
}
for (io.envoyproxy.envoy.config.core.v3.CidrRange range
: proto.getSourcePrefixRangesList()) {
sourcePrefixRanges.add(
CidrRange.create(range.getAddressPrefix(), range.getPrefixLen().getValue()));
}
} catch (UnknownHostException e) {
throw new ResourceInvalidException("Failed to create CidrRange", e);
}
ConnectionSourceType sourceType;
switch (proto.getSourceType()) {
case ANY:
sourceType = ConnectionSourceType.ANY;
break;
case EXTERNAL:
sourceType = ConnectionSourceType.EXTERNAL;
break;
case SAME_IP_OR_LOOPBACK:
sourceType = ConnectionSourceType.SAME_IP_OR_LOOPBACK;
break;
default:
throw new ResourceInvalidException("Unknown source-type: " + proto.getSourceType());
}
return FilterChainMatch.create(
proto.getDestinationPort().getValue(),
prefixRanges.build(),
ImmutableList.copyOf(proto.getApplicationProtocolsList()),
sourcePrefixRanges.build(),
sourceType,
ImmutableList.copyOf(proto.getSourcePortsList()),
ImmutableList.copyOf(proto.getServerNamesList()),
proto.getTransportProtocol());
}
@VisibleForTesting
static io.grpc.xds.HttpConnectionManager parseHttpConnectionManager(
HttpConnectionManager proto, Set<String> rdsResources, FilterRegistry filterRegistry,
boolean parseHttpFilter, boolean isForClient) throws ResourceInvalidException {
if (enableRbac && proto.getXffNumTrustedHops() != 0) {
throw new ResourceInvalidException(
"HttpConnectionManager with xff_num_trusted_hops unsupported");
}
if (enableRbac && !proto.getOriginalIpDetectionExtensionsList().isEmpty()) {
throw new ResourceInvalidException("HttpConnectionManager with "
+ "original_ip_detection_extensions unsupported");
}
// Obtain max_stream_duration from Http Protocol Options.
long maxStreamDuration = 0;
if (proto.hasCommonHttpProtocolOptions()) {
HttpProtocolOptions options = proto.getCommonHttpProtocolOptions();
if (options.hasMaxStreamDuration()) {
maxStreamDuration = Durations.toNanos(options.getMaxStreamDuration());
}
}
// Parse http filters.
List<NamedFilterConfig> filterConfigs = null;
if (parseHttpFilter) {
if (proto.getHttpFiltersList().isEmpty()) {
throw new ResourceInvalidException("Missing HttpFilter in HttpConnectionManager.");
}
filterConfigs = new ArrayList<>();
Set<String> names = new HashSet<>();
for (int i = 0; i < proto.getHttpFiltersCount(); i++) {
io.envoyproxy.envoy.extensions.filters.network.http_connection_manager.v3.HttpFilter
httpFilter = proto.getHttpFiltersList().get(i);
String filterName = httpFilter.getName();
if (!names.add(filterName)) {
throw new ResourceInvalidException(
"HttpConnectionManager contains duplicate HttpFilter: " + filterName);
}
StructOrError<FilterConfig> filterConfig =
parseHttpFilter(httpFilter, filterRegistry, isForClient);
if ((i == proto.getHttpFiltersCount() - 1)
&& (filterConfig == null || !isTerminalFilter(filterConfig.struct))) {
throw new ResourceInvalidException("The last HttpFilter must be a terminal filter: "
+ filterName);
}
if (filterConfig == null) {
continue;
}
if (filterConfig.getErrorDetail() != null) {
throw new ResourceInvalidException(
"HttpConnectionManager contains invalid HttpFilter: "
+ filterConfig.getErrorDetail());
}
if ((i < proto.getHttpFiltersCount() - 1) && isTerminalFilter(filterConfig.getStruct())) {
throw new ResourceInvalidException("A terminal HttpFilter must be the last filter: "
+ filterName);
}
filterConfigs.add(new NamedFilterConfig(filterName, filterConfig.struct));
}
}
// Parse inlined RouteConfiguration or RDS.
if (proto.hasRouteConfig()) {
List<VirtualHost> virtualHosts = extractVirtualHosts(
proto.getRouteConfig(), filterRegistry, parseHttpFilter);
return io.grpc.xds.HttpConnectionManager.forVirtualHosts(
maxStreamDuration, virtualHosts, filterConfigs);
}
if (proto.hasRds()) {
Rds rds = proto.getRds();
if (!rds.hasConfigSource()) {
throw new ResourceInvalidException(
"HttpConnectionManager contains invalid RDS: missing config_source");
}
if (!rds.getConfigSource().hasAds() && !rds.getConfigSource().hasSelf()) {
throw new ResourceInvalidException(
"HttpConnectionManager contains invalid RDS: must specify ADS or self ConfigSource");
}
// Collect the RDS resource referenced by this HttpConnectionManager.
rdsResources.add(rds.getRouteConfigName());
return io.grpc.xds.HttpConnectionManager.forRdsName(
maxStreamDuration, rds.getRouteConfigName(), filterConfigs);
}
throw new ResourceInvalidException(
"HttpConnectionManager neither has inlined route_config nor RDS");
}
// hard-coded: currently router config is the only terminal filter.
private static boolean isTerminalFilter(FilterConfig filterConfig) {
return RouterFilter.ROUTER_CONFIG.equals(filterConfig);
}
@VisibleForTesting
@Nullable // Returns null if the filter is optional but not supported.
static StructOrError<FilterConfig> parseHttpFilter(
io.envoyproxy.envoy.extensions.filters.network.http_connection_manager.v3.HttpFilter
httpFilter, FilterRegistry filterRegistry, boolean isForClient) {
String filterName = httpFilter.getName();
boolean isOptional = httpFilter.getIsOptional();
if (!httpFilter.hasTypedConfig()) {
if (isOptional) {
return null;
} else {
return StructOrError.fromError(
"HttpFilter [" + filterName + "] is not optional and has no typed config");
}
}
Message rawConfig = httpFilter.getTypedConfig();
String typeUrl = httpFilter.getTypedConfig().getTypeUrl();
try {
if (typeUrl.equals(TYPE_URL_TYPED_STRUCT_UDPA)) {
TypedStruct typedStruct = httpFilter.getTypedConfig().unpack(TypedStruct.class);
typeUrl = typedStruct.getTypeUrl();
rawConfig = typedStruct.getValue();
} else if (typeUrl.equals(TYPE_URL_TYPED_STRUCT)) {
com.github.xds.type.v3.TypedStruct newTypedStruct =
httpFilter.getTypedConfig().unpack(com.github.xds.type.v3.TypedStruct.class);
typeUrl = newTypedStruct.getTypeUrl();
rawConfig = newTypedStruct.getValue();
}
} catch (InvalidProtocolBufferException e) {
return StructOrError.fromError(
"HttpFilter [" + filterName + "] contains invalid proto: " + e);
}
Filter filter = filterRegistry.get(typeUrl);
if ((isForClient && !(filter instanceof ClientInterceptorBuilder))
|| (!isForClient && !(filter instanceof ServerInterceptorBuilder))) {
if (isOptional) {
return null;
} else {
return StructOrError.fromError(
"HttpFilter [" + filterName + "](" + typeUrl + ") is required but unsupported for "
+ (isForClient ? "client" : "server"));
}
}
ConfigOrError<? extends FilterConfig> filterConfig = filter.parseFilterConfig(rawConfig);
if (filterConfig.errorDetail != null) {
return StructOrError.fromError(
"Invalid filter config for HttpFilter [" + filterName + "]: " + filterConfig.errorDetail);
}
return StructOrError.fromStruct(filterConfig.config);
}
private static StructOrError<VirtualHost> parseVirtualHost(
io.envoyproxy.envoy.config.route.v3.VirtualHost proto, FilterRegistry filterRegistry,
boolean parseHttpFilter, Map<String, PluginConfig> pluginConfigMap) {
String name = proto.getName();
List<Route> routes = new ArrayList<>(proto.getRoutesCount());
for (io.envoyproxy.envoy.config.route.v3.Route routeProto : proto.getRoutesList()) {
StructOrError<Route> route = parseRoute(
routeProto, filterRegistry, parseHttpFilter, pluginConfigMap);
if (route == null) {
continue;
}
if (route.getErrorDetail() != null) {
return StructOrError.fromError(
"Virtual host [" + name + "] contains invalid route : " + route.getErrorDetail());
}
routes.add(route.getStruct());
}
if (!parseHttpFilter) {
return StructOrError.fromStruct(VirtualHost.create(
name, proto.getDomainsList(), routes, new HashMap<String, FilterConfig>()));
}
StructOrError<Map<String, FilterConfig>> overrideConfigs =
parseOverrideFilterConfigs(proto.getTypedPerFilterConfigMap(), filterRegistry);
if (overrideConfigs.errorDetail != null) {
return StructOrError.fromError(
"VirtualHost [" + proto.getName() + "] contains invalid HttpFilter config: "
+ overrideConfigs.errorDetail);
}
return StructOrError.fromStruct(VirtualHost.create(
name, proto.getDomainsList(), routes, overrideConfigs.struct));
}
@VisibleForTesting
static StructOrError<Map<String, FilterConfig>> parseOverrideFilterConfigs(
Map<String, Any> rawFilterConfigMap, FilterRegistry filterRegistry) {
Map<String, FilterConfig> overrideConfigs = new HashMap<>();
for (String name : rawFilterConfigMap.keySet()) {
Any anyConfig = rawFilterConfigMap.get(name);
String typeUrl = anyConfig.getTypeUrl();
boolean isOptional = false;
if (typeUrl.equals(TYPE_URL_FILTER_CONFIG)) {
io.envoyproxy.envoy.config.route.v3.FilterConfig filterConfig;
try {
filterConfig =
anyConfig.unpack(io.envoyproxy.envoy.config.route.v3.FilterConfig.class);
} catch (InvalidProtocolBufferException e) {
return StructOrError.fromError(
"FilterConfig [" + name + "] contains invalid proto: " + e);
}
isOptional = filterConfig.getIsOptional();
anyConfig = filterConfig.getConfig();
typeUrl = anyConfig.getTypeUrl();
}
Message rawConfig = anyConfig;
try {
if (typeUrl.equals(TYPE_URL_TYPED_STRUCT_UDPA)) {
TypedStruct typedStruct = anyConfig.unpack(TypedStruct.class);
typeUrl = typedStruct.getTypeUrl();
rawConfig = typedStruct.getValue();
} else if (typeUrl.equals(TYPE_URL_TYPED_STRUCT)) {
com.github.xds.type.v3.TypedStruct newTypedStruct =
anyConfig.unpack(com.github.xds.type.v3.TypedStruct.class);
typeUrl = newTypedStruct.getTypeUrl();
rawConfig = newTypedStruct.getValue();
}
} catch (InvalidProtocolBufferException e) {
return StructOrError.fromError(
"FilterConfig [" + name + "] contains invalid proto: " + e);
}
Filter filter = filterRegistry.get(typeUrl);
if (filter == null) {
if (isOptional) {
continue;
}
return StructOrError.fromError(
"HttpFilter [" + name + "](" + typeUrl + ") is required but unsupported");
}
ConfigOrError<? extends FilterConfig> filterConfig =
filter.parseFilterConfigOverride(rawConfig);
if (filterConfig.errorDetail != null) {
return StructOrError.fromError(
"Invalid filter config for HttpFilter [" + name + "]: " + filterConfig.errorDetail);
}
overrideConfigs.put(name, filterConfig.config);
}
return StructOrError.fromStruct(overrideConfigs);
}
@VisibleForTesting
@Nullable
static StructOrError<Route> parseRoute(
io.envoyproxy.envoy.config.route.v3.Route proto, FilterRegistry filterRegistry,
boolean parseHttpFilter, Map<String, PluginConfig> pluginConfigMap) {
StructOrError<RouteMatch> routeMatch = parseRouteMatch(proto.getMatch());
if (routeMatch == null) {
return null;
}
if (routeMatch.getErrorDetail() != null) {
return StructOrError.fromError(
"Route [" + proto.getName() + "] contains invalid RouteMatch: "
+ routeMatch.getErrorDetail());
}
Map<String, FilterConfig> overrideConfigs = Collections.emptyMap();
if (parseHttpFilter) {
StructOrError<Map<String, FilterConfig>> overrideConfigsOrError =
parseOverrideFilterConfigs(proto.getTypedPerFilterConfigMap(), filterRegistry);
if (overrideConfigsOrError.errorDetail != null) {
return StructOrError.fromError(
"Route [" + proto.getName() + "] contains invalid HttpFilter config: "
+ overrideConfigsOrError.errorDetail);
}
overrideConfigs = overrideConfigsOrError.struct;
}
switch (proto.getActionCase()) {
case ROUTE:
StructOrError<RouteAction> routeAction =
parseRouteAction(proto.getRoute(), filterRegistry, parseHttpFilter, pluginConfigMap);
if (routeAction == null) {
return null;
}
if (routeAction.errorDetail != null) {
return StructOrError.fromError(
"Route [" + proto.getName() + "] contains invalid RouteAction: "
+ routeAction.getErrorDetail());
}
return StructOrError.fromStruct(
Route.forAction(routeMatch.struct, routeAction.struct, overrideConfigs));
case NON_FORWARDING_ACTION:
return StructOrError.fromStruct(
Route.forNonForwardingAction(routeMatch.struct, overrideConfigs));
case REDIRECT:
case DIRECT_RESPONSE:
case FILTER_ACTION:
case ACTION_NOT_SET:
default:
return StructOrError.fromError(
"Route [" + proto.getName() + "] with unknown action type: " + proto.getActionCase());
}
}
@VisibleForTesting
@Nullable
static StructOrError<RouteMatch> parseRouteMatch(
io.envoyproxy.envoy.config.route.v3.RouteMatch proto) {
if (proto.getQueryParametersCount() != 0) {
return null;
}
StructOrError<PathMatcher> pathMatch = parsePathMatcher(proto);
if (pathMatch.getErrorDetail() != null) {
return StructOrError.fromError(pathMatch.getErrorDetail());
}
FractionMatcher fractionMatch = null;
if (proto.hasRuntimeFraction()) {
StructOrError<FractionMatcher> parsedFraction =
parseFractionMatcher(proto.getRuntimeFraction().getDefaultValue());
if (parsedFraction.getErrorDetail() != null) {
return StructOrError.fromError(parsedFraction.getErrorDetail());
}
fractionMatch = parsedFraction.getStruct();
}
List<HeaderMatcher> headerMatchers = new ArrayList<>();
for (io.envoyproxy.envoy.config.route.v3.HeaderMatcher hmProto : proto.getHeadersList()) {
StructOrError<HeaderMatcher> headerMatcher = parseHeaderMatcher(hmProto);
if (headerMatcher.getErrorDetail() != null) {
return StructOrError.fromError(headerMatcher.getErrorDetail());
}
headerMatchers.add(headerMatcher.getStruct());
}
return StructOrError.fromStruct(RouteMatch.create(
pathMatch.getStruct(), headerMatchers, fractionMatch));
}
@VisibleForTesting
static StructOrError<PathMatcher> parsePathMatcher(
io.envoyproxy.envoy.config.route.v3.RouteMatch proto) {
boolean caseSensitive = proto.getCaseSensitive().getValue();
switch (proto.getPathSpecifierCase()) {
case PREFIX:
return StructOrError.fromStruct(
PathMatcher.fromPrefix(proto.getPrefix(), caseSensitive));
case PATH:
return StructOrError.fromStruct(PathMatcher.fromPath(proto.getPath(), caseSensitive));
case SAFE_REGEX:
String rawPattern = proto.getSafeRegex().getRegex();
Pattern safeRegEx;
try {
safeRegEx = Pattern.compile(rawPattern);
} catch (PatternSyntaxException e) {
return StructOrError.fromError("Malformed safe regex pattern: " + e.getMessage());
}
return StructOrError.fromStruct(PathMatcher.fromRegEx(safeRegEx));
case PATHSPECIFIER_NOT_SET:
default:
return StructOrError.fromError("Unknown path match type");
}
}
private static StructOrError<FractionMatcher> parseFractionMatcher(FractionalPercent proto) {
int numerator = proto.getNumerator();
int denominator = 0;
switch (proto.getDenominator()) {
case HUNDRED:
denominator = 100;
break;
case TEN_THOUSAND:
denominator = 10_000;
break;
case MILLION:
denominator = 1_000_000;
break;
case UNRECOGNIZED:
default:
return StructOrError.fromError(
"Unrecognized fractional percent denominator: " + proto.getDenominator());
}
return StructOrError.fromStruct(FractionMatcher.create(numerator, denominator));
}
@VisibleForTesting
static StructOrError<HeaderMatcher> parseHeaderMatcher(
io.envoyproxy.envoy.config.route.v3.HeaderMatcher proto) {
switch (proto.getHeaderMatchSpecifierCase()) {
case EXACT_MATCH:
return StructOrError.fromStruct(HeaderMatcher.forExactValue(
proto.getName(), proto.getExactMatch(), proto.getInvertMatch()));
case SAFE_REGEX_MATCH:
String rawPattern = proto.getSafeRegexMatch().getRegex();
Pattern safeRegExMatch;
try {
safeRegExMatch = Pattern.compile(rawPattern);
} catch (PatternSyntaxException e) {
return StructOrError.fromError(
"HeaderMatcher [" + proto.getName() + "] contains malformed safe regex pattern: "
+ e.getMessage());
}
return StructOrError.fromStruct(HeaderMatcher.forSafeRegEx(
proto.getName(), safeRegExMatch, proto.getInvertMatch()));
case RANGE_MATCH:
HeaderMatcher.Range rangeMatch = HeaderMatcher.Range.create(
proto.getRangeMatch().getStart(), proto.getRangeMatch().getEnd());
return StructOrError.fromStruct(HeaderMatcher.forRange(
proto.getName(), rangeMatch, proto.getInvertMatch()));
case PRESENT_MATCH:
return StructOrError.fromStruct(HeaderMatcher.forPresent(
proto.getName(), proto.getPresentMatch(), proto.getInvertMatch()));
case PREFIX_MATCH:
return StructOrError.fromStruct(HeaderMatcher.forPrefix(
proto.getName(), proto.getPrefixMatch(), proto.getInvertMatch()));
case SUFFIX_MATCH:
return StructOrError.fromStruct(HeaderMatcher.forSuffix(
proto.getName(), proto.getSuffixMatch(), proto.getInvertMatch()));
case HEADERMATCHSPECIFIER_NOT_SET:
default:
return StructOrError.fromError("Unknown header matcher type");
}
}
/**
* Parses the RouteAction config. The returned result may contain a (parsed form)
* {@link RouteAction} or an error message. Returns {@code null} if the RouteAction
* should be ignored.
*/
@VisibleForTesting
@Nullable
static StructOrError<RouteAction> parseRouteAction(
io.envoyproxy.envoy.config.route.v3.RouteAction proto, FilterRegistry filterRegistry,
boolean parseHttpFilter, Map<String, PluginConfig> pluginConfigMap) {
Long timeoutNano = null;
if (proto.hasMaxStreamDuration()) {
io.envoyproxy.envoy.config.route.v3.RouteAction.MaxStreamDuration maxStreamDuration
= proto.getMaxStreamDuration();
if (maxStreamDuration.hasGrpcTimeoutHeaderMax()) {
timeoutNano = Durations.toNanos(maxStreamDuration.getGrpcTimeoutHeaderMax());
} else if (maxStreamDuration.hasMaxStreamDuration()) {
timeoutNano = Durations.toNanos(maxStreamDuration.getMaxStreamDuration());
}
}
RetryPolicy retryPolicy = null;
if (enableRetry && proto.hasRetryPolicy()) {
StructOrError<RetryPolicy> retryPolicyOrError = parseRetryPolicy(proto.getRetryPolicy());
if (retryPolicyOrError != null) {
if (retryPolicyOrError.errorDetail != null) {
return StructOrError.fromError(retryPolicyOrError.errorDetail);
}
retryPolicy = retryPolicyOrError.struct;
}
}
List<HashPolicy> hashPolicies = new ArrayList<>();
for (io.envoyproxy.envoy.config.route.v3.RouteAction.HashPolicy config
: proto.getHashPolicyList()) {
HashPolicy policy = null;
boolean terminal = config.getTerminal();
switch (config.getPolicySpecifierCase()) {
case HEADER:
io.envoyproxy.envoy.config.route.v3.RouteAction.HashPolicy.Header headerCfg =
config.getHeader();
Pattern regEx = null;
String regExSubstitute = null;
if (headerCfg.hasRegexRewrite() && headerCfg.getRegexRewrite().hasPattern()
&& headerCfg.getRegexRewrite().getPattern().hasGoogleRe2()) {
regEx = Pattern.compile(headerCfg.getRegexRewrite().getPattern().getRegex());
regExSubstitute = headerCfg.getRegexRewrite().getSubstitution();
}
policy = HashPolicy.forHeader(
terminal, headerCfg.getHeaderName(), regEx, regExSubstitute);
break;
case FILTER_STATE:
if (config.getFilterState().getKey().equals(HASH_POLICY_FILTER_STATE_KEY)) {
policy = HashPolicy.forChannelId(terminal);
}
break;
default:
// Ignore
}
if (policy != null) {
hashPolicies.add(policy);
}
}
switch (proto.getClusterSpecifierCase()) {
case CLUSTER:
return StructOrError.fromStruct(RouteAction.forCluster(
proto.getCluster(), hashPolicies, timeoutNano, retryPolicy));
case CLUSTER_HEADER:
return null;
case WEIGHTED_CLUSTERS:
List<io.envoyproxy.envoy.config.route.v3.WeightedCluster.ClusterWeight> clusterWeights
= proto.getWeightedClusters().getClustersList();
if (clusterWeights.isEmpty()) {
return StructOrError.fromError("No cluster found in weighted cluster list");
}
List<ClusterWeight> weightedClusters = new ArrayList<>();
for (io.envoyproxy.envoy.config.route.v3.WeightedCluster.ClusterWeight clusterWeight
: clusterWeights) {
StructOrError<ClusterWeight> clusterWeightOrError =
parseClusterWeight(clusterWeight, filterRegistry, parseHttpFilter);
if (clusterWeightOrError.getErrorDetail() != null) {
return StructOrError.fromError("RouteAction contains invalid ClusterWeight: "
+ clusterWeightOrError.getErrorDetail());
}
weightedClusters.add(clusterWeightOrError.getStruct());
}
// TODO(chengyuanzhang): validate if the sum of weights equals to total weight.
return StructOrError.fromStruct(RouteAction.forWeightedClusters(
weightedClusters, hashPolicies, timeoutNano, retryPolicy));
case CLUSTER_SPECIFIER_PLUGIN:
if (enableRouteLookup) {
String pluginName = proto.getClusterSpecifierPlugin();
PluginConfig pluginConfig = pluginConfigMap.get(pluginName);
if (pluginConfig == null) {
return StructOrError.fromError(
"ClusterSpecifierPlugin for [" + pluginName + "] not found");
}
NamedPluginConfig namedPluginConfig = NamedPluginConfig.create(pluginName, pluginConfig);
return StructOrError.fromStruct(RouteAction.forClusterSpecifierPlugin(
namedPluginConfig, hashPolicies, timeoutNano, retryPolicy));
} else {
return null;
}
case CLUSTERSPECIFIER_NOT_SET:
default:
return null;
}
}
@Nullable // Return null if we ignore the given policy.
private static StructOrError<RetryPolicy> parseRetryPolicy(
io.envoyproxy.envoy.config.route.v3.RetryPolicy retryPolicyProto) {
int maxAttempts = 2;
if (retryPolicyProto.hasNumRetries()) {
maxAttempts = retryPolicyProto.getNumRetries().getValue() + 1;
}
Duration initialBackoff = Durations.fromMillis(25);
Duration maxBackoff = Durations.fromMillis(250);
if (retryPolicyProto.hasRetryBackOff()) {
RetryBackOff retryBackOff = retryPolicyProto.getRetryBackOff();
if (!retryBackOff.hasBaseInterval()) {
return StructOrError.fromError("No base_interval specified in retry_backoff");
}
Duration originalInitialBackoff = initialBackoff = retryBackOff.getBaseInterval();
if (Durations.compare(initialBackoff, Durations.ZERO) <= 0) {
return StructOrError.fromError("base_interval in retry_backoff must be positive");
}
if (Durations.compare(initialBackoff, Durations.fromMillis(1)) < 0) {
initialBackoff = Durations.fromMillis(1);
}
if (retryBackOff.hasMaxInterval()) {
maxBackoff = retryPolicyProto.getRetryBackOff().getMaxInterval();
if (Durations.compare(maxBackoff, originalInitialBackoff) < 0) {
return StructOrError.fromError(
"max_interval in retry_backoff cannot be less than base_interval");
}
if (Durations.compare(maxBackoff, Durations.fromMillis(1)) < 0) {
maxBackoff = Durations.fromMillis(1);
}
} else {
maxBackoff = Durations.fromNanos(Durations.toNanos(initialBackoff) * 10);
}
}
Iterable<String> retryOns =
Splitter.on(',').omitEmptyStrings().trimResults().split(retryPolicyProto.getRetryOn());
ImmutableList.Builder<Code> retryableStatusCodesBuilder = ImmutableList.builder();
for (String retryOn : retryOns) {
Code code;
try {
code = Code.valueOf(retryOn.toUpperCase(Locale.US).replace('-', '_'));
} catch (IllegalArgumentException e) {
// unsupported value, such as "5xx"
continue;
}
if (!SUPPORTED_RETRYABLE_CODES.contains(code)) {
// unsupported value
continue;
}
retryableStatusCodesBuilder.add(code);
}
List<Code> retryableStatusCodes = retryableStatusCodesBuilder.build();
return StructOrError.fromStruct(
RetryPolicy.create(
maxAttempts, retryableStatusCodes, initialBackoff, maxBackoff,
/* perAttemptRecvTimeout= */ null));
}
@VisibleForTesting
static StructOrError<ClusterWeight> parseClusterWeight(
io.envoyproxy.envoy.config.route.v3.WeightedCluster.ClusterWeight proto,
FilterRegistry filterRegistry, boolean parseHttpFilter) {
if (!parseHttpFilter) {
return StructOrError.fromStruct(ClusterWeight.create(
proto.getName(), proto.getWeight().getValue(), new HashMap<String, FilterConfig>()));
}
StructOrError<Map<String, FilterConfig>> overrideConfigs =
parseOverrideFilterConfigs(proto.getTypedPerFilterConfigMap(), filterRegistry);
if (overrideConfigs.errorDetail != null) {
return StructOrError.fromError(
"ClusterWeight [" + proto.getName() + "] contains invalid HttpFilter config: "
+ overrideConfigs.errorDetail);
}
return StructOrError.fromStruct(ClusterWeight.create(
proto.getName(), proto.getWeight().getValue(), overrideConfigs.struct));
}
@Override
public void handleRdsResponse(
ServerInfo serverInfo, String versionInfo, List<Any> resources, String nonce) {
syncContext.throwIfNotInThisSynchronizationContext();
Map<String, ParsedResource> parsedResources = new HashMap<>(resources.size());
Set<String> unpackedResources = new HashSet<>(resources.size());
Set<String> invalidResources = new HashSet<>();
List<String> errors = new ArrayList<>();
for (int i = 0; i < resources.size(); i++) {
Any resource = resources.get(i);
// Unpack the RouteConfiguration.
RouteConfiguration routeConfig;
try {
resource = maybeUnwrapResources(resource);
routeConfig = unpackCompatibleType(resource, RouteConfiguration.class,
ResourceType.RDS.typeUrl(), ResourceType.RDS.typeUrlV2());
} catch (InvalidProtocolBufferException e) {
errors.add("RDS response Resource index " + i + " - can't decode RouteConfiguration: " + e);
continue;
}
if (!isResourceNameValid(routeConfig.getName(), resource.getTypeUrl())) {
errors.add(
"Unsupported resource name: " + routeConfig.getName() + " for type: "
+ ResourceType.RDS);
continue;
}
String routeConfigName = canonifyResourceName(routeConfig.getName());
unpackedResources.add(routeConfigName);
// Process RouteConfiguration into RdsUpdate.
RdsUpdate rdsUpdate;
boolean isResourceV3 = resource.getTypeUrl().equals(ResourceType.RDS.typeUrl());
try {
rdsUpdate = processRouteConfiguration(
routeConfig, filterRegistry, enableFaultInjection && isResourceV3);
} catch (ResourceInvalidException e) {
errors.add(
"RDS response RouteConfiguration '" + routeConfigName + "' validation error: " + e
.getMessage());
invalidResources.add(routeConfigName);
continue;
}
parsedResources.put(routeConfigName, new ParsedResource(rdsUpdate, resource));
}
logger.log(XdsLogLevel.INFO,
"Received RDS Response version {0} nonce {1}. Parsed resources: {2}",
versionInfo, nonce, unpackedResources);
handleResourceUpdate(
serverInfo, ResourceType.RDS, parsedResources, invalidResources,
Collections.<String>emptySet(), versionInfo, nonce, errors);
}
private static RdsUpdate processRouteConfiguration(
RouteConfiguration routeConfig, FilterRegistry filterRegistry, boolean parseHttpFilter)
throws ResourceInvalidException {
return new RdsUpdate(extractVirtualHosts(routeConfig, filterRegistry, parseHttpFilter));
}
private static List<VirtualHost> extractVirtualHosts(
RouteConfiguration routeConfig, FilterRegistry filterRegistry, boolean parseHttpFilter)
throws ResourceInvalidException {
Map<String, PluginConfig> pluginConfigMap = new HashMap<>();
if (enableRouteLookup) {
List<ClusterSpecifierPlugin> plugins = routeConfig.getClusterSpecifierPluginsList();
for (ClusterSpecifierPlugin plugin : plugins) {
PluginConfig existing = pluginConfigMap.put(
plugin.getExtension().getName(), parseClusterSpecifierPlugin(plugin));
if (existing != null) {
throw new ResourceInvalidException(
"Multiple ClusterSpecifierPlugins with the same name: "
+ plugin.getExtension().getName());
}
}
}
List<VirtualHost> virtualHosts = new ArrayList<>(routeConfig.getVirtualHostsCount());
for (io.envoyproxy.envoy.config.route.v3.VirtualHost virtualHostProto
: routeConfig.getVirtualHostsList()) {
StructOrError<VirtualHost> virtualHost =
parseVirtualHost(virtualHostProto, filterRegistry, parseHttpFilter, pluginConfigMap);
if (virtualHost.getErrorDetail() != null) {
throw new ResourceInvalidException(
"RouteConfiguration contains invalid virtual host: " + virtualHost.getErrorDetail());
}
virtualHosts.add(virtualHost.getStruct());
}
return virtualHosts;
}
private static PluginConfig parseClusterSpecifierPlugin(ClusterSpecifierPlugin pluginProto)
throws ResourceInvalidException {
return parseClusterSpecifierPlugin(
pluginProto, ClusterSpecifierPluginRegistry.getDefaultRegistry());
}
@VisibleForTesting
static PluginConfig parseClusterSpecifierPlugin(
ClusterSpecifierPlugin pluginProto, ClusterSpecifierPluginRegistry registry)
throws ResourceInvalidException {
TypedExtensionConfig extension = pluginProto.getExtension();
String pluginName = extension.getName();
Any anyConfig = extension.getTypedConfig();
String typeUrl = anyConfig.getTypeUrl();
Message rawConfig = anyConfig;
if (typeUrl.equals(TYPE_URL_TYPED_STRUCT_UDPA) || typeUrl.equals(TYPE_URL_TYPED_STRUCT)) {
try {
TypedStruct typedStruct = unpackCompatibleType(
anyConfig, TypedStruct.class, TYPE_URL_TYPED_STRUCT_UDPA, TYPE_URL_TYPED_STRUCT);
typeUrl = typedStruct.getTypeUrl();
rawConfig = typedStruct.getValue();
} catch (InvalidProtocolBufferException e) {
throw new ResourceInvalidException(
"ClusterSpecifierPlugin [" + pluginName + "] contains invalid proto", e);
}
}
io.grpc.xds.ClusterSpecifierPlugin plugin = registry.get(typeUrl);
if (plugin == null) {
throw new ResourceInvalidException("Unsupported ClusterSpecifierPlugin type: " + typeUrl);
}
ConfigOrError<? extends PluginConfig> pluginConfigOrError = plugin.parsePlugin(rawConfig);
if (pluginConfigOrError.errorDetail != null) {
throw new ResourceInvalidException(pluginConfigOrError.errorDetail);
}
return pluginConfigOrError.config;
}
@Override
public void handleCdsResponse(
ServerInfo serverInfo, String versionInfo, List<Any> resources, String nonce) {
syncContext.throwIfNotInThisSynchronizationContext();
Map<String, ParsedResource> parsedResources = new HashMap<>(resources.size());
Set<String> unpackedResources = new HashSet<>(resources.size());
Set<String> invalidResources = new HashSet<>();
List<String> errors = new ArrayList<>();
Set<String> retainedEdsResources = new HashSet<>();
for (int i = 0; i < resources.size(); i++) {
Any resource = resources.get(i);
// Unpack the Cluster.
Cluster cluster;
try {
resource = maybeUnwrapResources(resource);
cluster = unpackCompatibleType(
resource, Cluster.class, ResourceType.CDS.typeUrl(), ResourceType.CDS.typeUrlV2());
} catch (InvalidProtocolBufferException e) {
errors.add("CDS response Resource index " + i + " - can't decode Cluster: " + e);
continue;
}
if (!isResourceNameValid(cluster.getName(), resource.getTypeUrl())) {
errors.add(
"Unsupported resource name: " + cluster.getName() + " for type: " + ResourceType.CDS);
continue;
}
String clusterName = canonifyResourceName(cluster.getName());
// Management server is required to always send newly requested resources, even if they
// may have been sent previously (proactively). Thus, client does not need to cache
// unrequested resources.
if (!cdsResourceSubscribers.containsKey(clusterName)) {
continue;
}
unpackedResources.add(clusterName);
// Process Cluster into CdsUpdate.
CdsUpdate cdsUpdate;
try {
Set<String> certProviderInstances = null;
if (getBootstrapInfo() != null && getBootstrapInfo().certProviders() != null) {
certProviderInstances = getBootstrapInfo().certProviders().keySet();
}
cdsUpdate = processCluster(cluster, retainedEdsResources, certProviderInstances, serverInfo,
loadBalancerRegistry);
} catch (ResourceInvalidException e) {
errors.add(
"CDS response Cluster '" + clusterName + "' validation error: " + e.getMessage());
invalidResources.add(clusterName);
continue;
}
parsedResources.put(clusterName, new ParsedResource(cdsUpdate, resource));
}
logger.log(XdsLogLevel.INFO,
"Received CDS Response version {0} nonce {1}. Parsed resources: {2}",
versionInfo, nonce, unpackedResources);
handleResourceUpdate(
serverInfo, ResourceType.CDS, parsedResources, invalidResources, retainedEdsResources,
versionInfo, nonce, errors);
}
@VisibleForTesting
static CdsUpdate processCluster(Cluster cluster, Set<String> retainedEdsResources,
Set<String> certProviderInstances, ServerInfo serverInfo,
LoadBalancerRegistry loadBalancerRegistry)
throws ResourceInvalidException {
StructOrError<CdsUpdate.Builder> structOrError;
switch (cluster.getClusterDiscoveryTypeCase()) {
case TYPE:
structOrError = parseNonAggregateCluster(cluster, retainedEdsResources,
certProviderInstances, serverInfo);
break;
case CLUSTER_TYPE:
structOrError = parseAggregateCluster(cluster);
break;
case CLUSTERDISCOVERYTYPE_NOT_SET:
default:
throw new ResourceInvalidException(
"Cluster " + cluster.getName() + ": unspecified cluster discovery type");
}
if (structOrError.getErrorDetail() != null) {
throw new ResourceInvalidException(structOrError.getErrorDetail());
}
CdsUpdate.Builder updateBuilder = structOrError.getStruct();
LbConfig lbConfig = ServiceConfigUtil.unwrapLoadBalancingConfig(
LegacyLoadBalancerConfigFactory.newConfig(cluster, enableLeastRequest));
LoadBalancerProvider lbProvider = loadBalancerRegistry.getProvider(lbConfig.getPolicyName());
NameResolver.ConfigOrError configOrError = lbProvider
.parseLoadBalancingPolicyConfig(lbConfig.getRawConfigValue());
if (configOrError.getError() != null) {
throw new ResourceInvalidException(structOrError.getErrorDetail());
}
updateBuilder.lbPolicySelection(new PolicySelection(lbProvider, configOrError.getConfig()));
return updateBuilder.build();
}
private static StructOrError<CdsUpdate.Builder> parseAggregateCluster(Cluster cluster) {
String clusterName = cluster.getName();
CustomClusterType customType = cluster.getClusterType();
String typeName = customType.getName();
if (!typeName.equals(AGGREGATE_CLUSTER_TYPE_NAME)) {
return StructOrError.fromError(
"Cluster " + clusterName + ": unsupported custom cluster type: " + typeName);
}
io.envoyproxy.envoy.extensions.clusters.aggregate.v3.ClusterConfig clusterConfig;
try {
clusterConfig = unpackCompatibleType(customType.getTypedConfig(),
io.envoyproxy.envoy.extensions.clusters.aggregate.v3.ClusterConfig.class,
TYPE_URL_CLUSTER_CONFIG, TYPE_URL_CLUSTER_CONFIG_V2);
} catch (InvalidProtocolBufferException e) {
return StructOrError.fromError("Cluster " + clusterName + ": malformed ClusterConfig: " + e);
}
return StructOrError.fromStruct(CdsUpdate.forAggregate(
clusterName, clusterConfig.getClustersList()));
}
private static StructOrError<CdsUpdate.Builder> parseNonAggregateCluster(
Cluster cluster, Set<String> edsResources, Set<String> certProviderInstances,
ServerInfo serverInfo) {
String clusterName = cluster.getName();
ServerInfo lrsServerInfo = null;
Long maxConcurrentRequests = null;
UpstreamTlsContext upstreamTlsContext = null;
if (cluster.hasLrsServer()) {
if (!cluster.getLrsServer().hasSelf()) {
return StructOrError.fromError(
"Cluster " + clusterName + ": only support LRS for the same management server");
}
lrsServerInfo = serverInfo;
}
if (cluster.hasCircuitBreakers()) {
List<Thresholds> thresholds = cluster.getCircuitBreakers().getThresholdsList();
for (Thresholds threshold : thresholds) {
if (threshold.getPriority() != RoutingPriority.DEFAULT) {
continue;
}
if (threshold.hasMaxRequests()) {
maxConcurrentRequests = (long) threshold.getMaxRequests().getValue();
}
}
}
if (cluster.getTransportSocketMatchesCount() > 0) {
return StructOrError.fromError("Cluster " + clusterName
+ ": transport-socket-matches not supported.");
}
if (cluster.hasTransportSocket()) {
if (!TRANSPORT_SOCKET_NAME_TLS.equals(cluster.getTransportSocket().getName())) {
return StructOrError.fromError("transport-socket with name "
+ cluster.getTransportSocket().getName() + " not supported.");
}
try {
upstreamTlsContext = UpstreamTlsContext.fromEnvoyProtoUpstreamTlsContext(
validateUpstreamTlsContext(
unpackCompatibleType(cluster.getTransportSocket().getTypedConfig(),
io.envoyproxy.envoy.extensions.transport_sockets.tls.v3.UpstreamTlsContext.class,
TYPE_URL_UPSTREAM_TLS_CONTEXT, TYPE_URL_UPSTREAM_TLS_CONTEXT_V2),
certProviderInstances));
} catch (InvalidProtocolBufferException | ResourceInvalidException e) {
return StructOrError.fromError(
"Cluster " + clusterName + ": malformed UpstreamTlsContext: " + e);
}
}
DiscoveryType type = cluster.getType();
if (type == DiscoveryType.EDS) {
String edsServiceName = null;
io.envoyproxy.envoy.config.cluster.v3.Cluster.EdsClusterConfig edsClusterConfig =
cluster.getEdsClusterConfig();
if (!edsClusterConfig.getEdsConfig().hasAds()
&& ! edsClusterConfig.getEdsConfig().hasSelf()) {
return StructOrError.fromError(
"Cluster " + clusterName + ": field eds_cluster_config must be set to indicate to use"
+ " EDS over ADS or self ConfigSource");
}
// If the service_name field is set, that value will be used for the EDS request.
if (!edsClusterConfig.getServiceName().isEmpty()) {
edsServiceName = edsClusterConfig.getServiceName();
edsResources.add(edsServiceName);
} else {
edsResources.add(clusterName);
}
return StructOrError.fromStruct(CdsUpdate.forEds(
clusterName, edsServiceName, lrsServerInfo, maxConcurrentRequests, upstreamTlsContext));
} else if (type.equals(DiscoveryType.LOGICAL_DNS)) {
if (!cluster.hasLoadAssignment()) {
return StructOrError.fromError(
"Cluster " + clusterName + ": LOGICAL_DNS clusters must have a single host");
}
ClusterLoadAssignment assignment = cluster.getLoadAssignment();
if (assignment.getEndpointsCount() != 1
|| assignment.getEndpoints(0).getLbEndpointsCount() != 1) {
return StructOrError.fromError(
"Cluster " + clusterName + ": LOGICAL_DNS clusters must have a single "
+ "locality_lb_endpoint and a single lb_endpoint");
}
io.envoyproxy.envoy.config.endpoint.v3.LbEndpoint lbEndpoint =
assignment.getEndpoints(0).getLbEndpoints(0);
if (!lbEndpoint.hasEndpoint() || !lbEndpoint.getEndpoint().hasAddress()
|| !lbEndpoint.getEndpoint().getAddress().hasSocketAddress()) {
return StructOrError.fromError(
"Cluster " + clusterName
+ ": LOGICAL_DNS clusters must have an endpoint with address and socket_address");
}
SocketAddress socketAddress = lbEndpoint.getEndpoint().getAddress().getSocketAddress();
if (!socketAddress.getResolverName().isEmpty()) {
return StructOrError.fromError(
"Cluster " + clusterName
+ ": LOGICAL DNS clusters must NOT have a custom resolver name set");
}
if (socketAddress.getPortSpecifierCase() != PortSpecifierCase.PORT_VALUE) {
return StructOrError.fromError(
"Cluster " + clusterName
+ ": LOGICAL DNS clusters socket_address must have port_value");
}
String dnsHostName =
String.format("%s:%d", socketAddress.getAddress(), socketAddress.getPortValue());
return StructOrError.fromStruct(CdsUpdate.forLogicalDns(
clusterName, dnsHostName, lrsServerInfo, maxConcurrentRequests, upstreamTlsContext));
}
return StructOrError.fromError(
"Cluster " + clusterName + ": unsupported built-in discovery type: " + type);
}
@Override
public void handleEdsResponse(
ServerInfo serverInfo, String versionInfo, List<Any> resources, String nonce) {
syncContext.throwIfNotInThisSynchronizationContext();
Map<String, ParsedResource> parsedResources = new HashMap<>(resources.size());
Set<String> unpackedResources = new HashSet<>(resources.size());
Set<String> invalidResources = new HashSet<>();
List<String> errors = new ArrayList<>();
for (int i = 0; i < resources.size(); i++) {
Any resource = resources.get(i);
// Unpack the ClusterLoadAssignment.
ClusterLoadAssignment assignment;
try {
resource = maybeUnwrapResources(resource);
assignment =
unpackCompatibleType(resource, ClusterLoadAssignment.class, ResourceType.EDS.typeUrl(),
ResourceType.EDS.typeUrlV2());
} catch (InvalidProtocolBufferException e) {
errors.add(
"EDS response Resource index " + i + " - can't decode ClusterLoadAssignment: " + e);
continue;
}
if (!isResourceNameValid(assignment.getClusterName(), resource.getTypeUrl())) {
errors.add("Unsupported resource name: " + assignment.getClusterName() + " for type: "
+ ResourceType.EDS);
continue;
}
String clusterName = canonifyResourceName(assignment.getClusterName());
// Skip information for clusters not requested.
// Management server is required to always send newly requested resources, even if they
// may have been sent previously (proactively). Thus, client does not need to cache
// unrequested resources.
if (!edsResourceSubscribers.containsKey(clusterName)) {
continue;
}
unpackedResources.add(clusterName);
// Process ClusterLoadAssignment into EdsUpdate.
EdsUpdate edsUpdate;
try {
edsUpdate = processClusterLoadAssignment(assignment);
} catch (ResourceInvalidException e) {
errors.add("EDS response ClusterLoadAssignment '" + clusterName
+ "' validation error: " + e.getMessage());
invalidResources.add(clusterName);
continue;
}
parsedResources.put(clusterName, new ParsedResource(edsUpdate, resource));
}
logger.log(
XdsLogLevel.INFO, "Received EDS Response version {0} nonce {1}. Parsed resources: {2}",
versionInfo, nonce, unpackedResources);
handleResourceUpdate(
serverInfo, ResourceType.EDS, parsedResources, invalidResources,
Collections.<String>emptySet(), versionInfo, nonce, errors);
}
private static EdsUpdate processClusterLoadAssignment(ClusterLoadAssignment assignment)
throws ResourceInvalidException {
Set<Integer> priorities = new HashSet<>();
Map<Locality, LocalityLbEndpoints> localityLbEndpointsMap = new LinkedHashMap<>();
List<DropOverload> dropOverloads = new ArrayList<>();
int maxPriority = -1;
for (io.envoyproxy.envoy.config.endpoint.v3.LocalityLbEndpoints localityLbEndpointsProto
: assignment.getEndpointsList()) {
StructOrError<LocalityLbEndpoints> structOrError =
parseLocalityLbEndpoints(localityLbEndpointsProto);
if (structOrError == null) {
continue;
}
if (structOrError.getErrorDetail() != null) {
throw new ResourceInvalidException(structOrError.getErrorDetail());
}
LocalityLbEndpoints localityLbEndpoints = structOrError.getStruct();
maxPriority = Math.max(maxPriority, localityLbEndpoints.priority());
priorities.add(localityLbEndpoints.priority());
// Note endpoints with health status other than HEALTHY and UNKNOWN are still
// handed over to watching parties. It is watching parties' responsibility to
// filter out unhealthy endpoints. See EnvoyProtoData.LbEndpoint#isHealthy().
localityLbEndpointsMap.put(
parseLocality(localityLbEndpointsProto.getLocality()),
localityLbEndpoints);
}
if (priorities.size() != maxPriority + 1) {
throw new ResourceInvalidException("ClusterLoadAssignment has sparse priorities");
}
for (ClusterLoadAssignment.Policy.DropOverload dropOverloadProto
: assignment.getPolicy().getDropOverloadsList()) {
dropOverloads.add(parseDropOverload(dropOverloadProto));
}
return new EdsUpdate(assignment.getClusterName(), localityLbEndpointsMap, dropOverloads);
}
private static Locality parseLocality(io.envoyproxy.envoy.config.core.v3.Locality proto) {
return Locality.create(proto.getRegion(), proto.getZone(), proto.getSubZone());
}
private static DropOverload parseDropOverload(
io.envoyproxy.envoy.config.endpoint.v3.ClusterLoadAssignment.Policy.DropOverload proto) {
return DropOverload.create(proto.getCategory(), getRatePerMillion(proto.getDropPercentage()));
}
@VisibleForTesting
@Nullable
static StructOrError<LocalityLbEndpoints> parseLocalityLbEndpoints(
io.envoyproxy.envoy.config.endpoint.v3.LocalityLbEndpoints proto) {
// Filter out localities without or with 0 weight.
if (!proto.hasLoadBalancingWeight() || proto.getLoadBalancingWeight().getValue() < 1) {
return null;
}
if (proto.getPriority() < 0) {
return StructOrError.fromError("negative priority");
}
List<LbEndpoint> endpoints = new ArrayList<>(proto.getLbEndpointsCount());
for (io.envoyproxy.envoy.config.endpoint.v3.LbEndpoint endpoint : proto.getLbEndpointsList()) {
// The endpoint field of each lb_endpoints must be set.
// Inside of it: the address field must be set.
if (!endpoint.hasEndpoint() || !endpoint.getEndpoint().hasAddress()) {
return StructOrError.fromError("LbEndpoint with no endpoint/address");
}
io.envoyproxy.envoy.config.core.v3.SocketAddress socketAddress =
endpoint.getEndpoint().getAddress().getSocketAddress();
InetSocketAddress addr =
new InetSocketAddress(socketAddress.getAddress(), socketAddress.getPortValue());
boolean isHealthy =
endpoint.getHealthStatus() == io.envoyproxy.envoy.config.core.v3.HealthStatus.HEALTHY
|| endpoint.getHealthStatus()
== io.envoyproxy.envoy.config.core.v3.HealthStatus.UNKNOWN;
endpoints.add(LbEndpoint.create(
new EquivalentAddressGroup(ImmutableList.<java.net.SocketAddress>of(addr)),
endpoint.getLoadBalancingWeight().getValue(), isHealthy));
}
return StructOrError.fromStruct(LocalityLbEndpoints.create(
endpoints, proto.getLoadBalancingWeight().getValue(), proto.getPriority()));
}
/**
* Helper method to unpack serialized {@link com.google.protobuf.Any} message, while replacing
* Type URL {@code compatibleTypeUrl} with {@code typeUrl}.
*
* @param <T> The type of unpacked message
* @param any serialized message to unpack
* @param clazz the class to unpack the message to
* @param typeUrl type URL to replace message Type URL, when it's compatible
* @param compatibleTypeUrl compatible Type URL to be replaced with {@code typeUrl}
* @return Unpacked message
* @throws InvalidProtocolBufferException if the message couldn't be unpacked
*/
private static <T extends com.google.protobuf.Message> T unpackCompatibleType(
Any any, Class<T> clazz, String typeUrl, String compatibleTypeUrl)
throws InvalidProtocolBufferException {
if (any.getTypeUrl().equals(compatibleTypeUrl)) {
any = any.toBuilder().setTypeUrl(typeUrl).build();
}
return any.unpack(clazz);
}
private static int getRatePerMillion(FractionalPercent percent) {
int numerator = percent.getNumerator();
DenominatorType type = percent.getDenominator();
switch (type) {
case TEN_THOUSAND:
numerator *= 100;
break;
case HUNDRED:
numerator *= 10_000;
break;
case MILLION:
break;
case UNRECOGNIZED:
default:
throw new IllegalArgumentException("Unknown denominator type of " + percent);
}
if (numerator > 1_000_000 || numerator < 0) {
numerator = 1_000_000;
}
return numerator;
}
@Override
public void handleStreamClosed(Status error) {
syncContext.throwIfNotInThisSynchronizationContext();
cleanUpResourceTimers();
for (ResourceSubscriber subscriber : ldsResourceSubscribers.values()) {
subscriber.onError(error);
}
for (ResourceSubscriber subscriber : rdsResourceSubscribers.values()) {
subscriber.onError(error);
}
for (ResourceSubscriber subscriber : cdsResourceSubscribers.values()) {
subscriber.onError(error);
}
for (ResourceSubscriber subscriber : edsResourceSubscribers.values()) {
subscriber.onError(error);
}
}
@Override
public void handleStreamRestarted(ServerInfo serverInfo) {
syncContext.throwIfNotInThisSynchronizationContext();
for (ResourceSubscriber subscriber : ldsResourceSubscribers.values()) {
if (subscriber.serverInfo.equals(serverInfo)) {
subscriber.restartTimer();
}
}
for (ResourceSubscriber subscriber : rdsResourceSubscribers.values()) {
if (subscriber.serverInfo.equals(serverInfo)) {
subscriber.restartTimer();
}
}
for (ResourceSubscriber subscriber : cdsResourceSubscribers.values()) {
if (subscriber.serverInfo.equals(serverInfo)) {
subscriber.restartTimer();
}
}
for (ResourceSubscriber subscriber : edsResourceSubscribers.values()) {
if (subscriber.serverInfo.equals(serverInfo)) {
subscriber.restartTimer();
}
}
}
@Override
void shutdown() {
syncContext.execute(
new Runnable() {
@Override
public void run() {
if (isShutdown) {
return;
}
isShutdown = true;
for (AbstractXdsClient xdsChannel : serverChannelMap.values()) {
xdsChannel.shutdown();
}
if (reportingLoad) {
for (final LoadReportClient lrsClient : serverLrsClientMap.values()) {
lrsClient.stopLoadReporting();
}
}
cleanUpResourceTimers();
}
});
}
@Override
boolean isShutDown() {
return isShutdown;
}
private Map<String, ResourceSubscriber> getSubscribedResourcesMap(ResourceType type) {
switch (type) {
case LDS:
return ldsResourceSubscribers;
case RDS:
return rdsResourceSubscribers;
case CDS:
return cdsResourceSubscribers;
case EDS:
return edsResourceSubscribers;
case UNKNOWN:
default:
throw new AssertionError("Unknown resource type");
}
}
@Nullable
@Override
public Collection<String> getSubscribedResources(ServerInfo serverInfo, ResourceType type) {
Map<String, ResourceSubscriber> resources = getSubscribedResourcesMap(type);
ImmutableSet.Builder<String> builder = ImmutableSet.builder();
for (String key : resources.keySet()) {
if (resources.get(key).serverInfo.equals(serverInfo)) {
builder.add(key);
}
}
Collection<String> retVal = builder.build();
return retVal.isEmpty() ? null : retVal;
}
@Override
ListenableFuture<Map<ResourceType, Map<String, ResourceMetadata>>>
getSubscribedResourcesMetadataSnapshot() {
final SettableFuture<Map<ResourceType, Map<String, ResourceMetadata>>> future =
SettableFuture.create();
syncContext.execute(new Runnable() {
@Override
public void run() {
// A map from a "resource type" to a map ("resource name": "resource metadata")
ImmutableMap.Builder<ResourceType, Map<String, ResourceMetadata>> metadataSnapshot =
ImmutableMap.builder();
for (ResourceType type : ResourceType.values()) {
if (type == ResourceType.UNKNOWN) {
continue;
}
ImmutableMap.Builder<String, ResourceMetadata> metadataMap = ImmutableMap.builder();
for (Map.Entry<String, ResourceSubscriber> resourceEntry
: getSubscribedResourcesMap(type).entrySet()) {
metadataMap.put(resourceEntry.getKey(), resourceEntry.getValue().metadata);
}
metadataSnapshot.put(type, metadataMap.build());
}
future.set(metadataSnapshot.build());
}
});
return future;
}
@Override
TlsContextManager getTlsContextManager() {
return tlsContextManager;
}
@Override
void watchLdsResource(final String resourceName, final LdsResourceWatcher watcher) {
syncContext.execute(new Runnable() {
@Override
public void run() {
ResourceSubscriber subscriber = ldsResourceSubscribers.get(resourceName);
if (subscriber == null) {
logger.log(XdsLogLevel.INFO, "Subscribe LDS resource {0}", resourceName);
subscriber = new ResourceSubscriber(ResourceType.LDS, resourceName);
ldsResourceSubscribers.put(resourceName, subscriber);
subscriber.xdsChannel.adjustResourceSubscription(ResourceType.LDS);
}
subscriber.addWatcher(watcher);
}
});
}
@Override
void cancelLdsResourceWatch(final String resourceName, final LdsResourceWatcher watcher) {
syncContext.execute(new Runnable() {
@Override
public void run() {
ResourceSubscriber subscriber = ldsResourceSubscribers.get(resourceName);
subscriber.removeWatcher(watcher);
if (!subscriber.isWatched()) {
subscriber.stopTimer();
logger.log(XdsLogLevel.INFO, "Unsubscribe LDS resource {0}", resourceName);
ldsResourceSubscribers.remove(resourceName);
subscriber.xdsChannel.adjustResourceSubscription(ResourceType.LDS);
}
}
});
}
@Override
void watchRdsResource(final String resourceName, final RdsResourceWatcher watcher) {
syncContext.execute(new Runnable() {
@Override
public void run() {
ResourceSubscriber subscriber = rdsResourceSubscribers.get(resourceName);
if (subscriber == null) {
logger.log(XdsLogLevel.INFO, "Subscribe RDS resource {0}", resourceName);
subscriber = new ResourceSubscriber(ResourceType.RDS, resourceName);
rdsResourceSubscribers.put(resourceName, subscriber);
subscriber.xdsChannel.adjustResourceSubscription(ResourceType.RDS);
}
subscriber.addWatcher(watcher);
}
});
}
@Override
void cancelRdsResourceWatch(final String resourceName, final RdsResourceWatcher watcher) {
syncContext.execute(new Runnable() {
@Override
public void run() {
ResourceSubscriber subscriber = rdsResourceSubscribers.get(resourceName);
subscriber.removeWatcher(watcher);
if (!subscriber.isWatched()) {
subscriber.stopTimer();
logger.log(XdsLogLevel.INFO, "Unsubscribe RDS resource {0}", resourceName);
rdsResourceSubscribers.remove(resourceName);
subscriber.xdsChannel.adjustResourceSubscription(ResourceType.RDS);
}
}
});
}
@Override
void watchCdsResource(final String resourceName, final CdsResourceWatcher watcher) {
syncContext.execute(new Runnable() {
@Override
public void run() {
ResourceSubscriber subscriber = cdsResourceSubscribers.get(resourceName);
if (subscriber == null) {
logger.log(XdsLogLevel.INFO, "Subscribe CDS resource {0}", resourceName);
subscriber = new ResourceSubscriber(ResourceType.CDS, resourceName);
cdsResourceSubscribers.put(resourceName, subscriber);
subscriber.xdsChannel.adjustResourceSubscription(ResourceType.CDS);
}
subscriber.addWatcher(watcher);
}
});
}
@Override
void cancelCdsResourceWatch(final String resourceName, final CdsResourceWatcher watcher) {
syncContext.execute(new Runnable() {
@Override
public void run() {
ResourceSubscriber subscriber = cdsResourceSubscribers.get(resourceName);
subscriber.removeWatcher(watcher);
if (!subscriber.isWatched()) {
subscriber.stopTimer();
logger.log(XdsLogLevel.INFO, "Unsubscribe CDS resource {0}", resourceName);
cdsResourceSubscribers.remove(resourceName);
subscriber.xdsChannel.adjustResourceSubscription(ResourceType.CDS);
}
}
});
}
@Override
void watchEdsResource(final String resourceName, final EdsResourceWatcher watcher) {
syncContext.execute(new Runnable() {
@Override
public void run() {
ResourceSubscriber subscriber = edsResourceSubscribers.get(resourceName);
if (subscriber == null) {
logger.log(XdsLogLevel.INFO, "Subscribe EDS resource {0}", resourceName);
subscriber = new ResourceSubscriber(ResourceType.EDS, resourceName);
edsResourceSubscribers.put(resourceName, subscriber);
subscriber.xdsChannel.adjustResourceSubscription(ResourceType.EDS);
}
subscriber.addWatcher(watcher);
}
});
}
@Override
void cancelEdsResourceWatch(final String resourceName, final EdsResourceWatcher watcher) {
syncContext.execute(new Runnable() {
@Override
public void run() {
ResourceSubscriber subscriber = edsResourceSubscribers.get(resourceName);
subscriber.removeWatcher(watcher);
if (!subscriber.isWatched()) {
subscriber.stopTimer();
logger.log(XdsLogLevel.INFO, "Unsubscribe EDS resource {0}", resourceName);
edsResourceSubscribers.remove(resourceName);
subscriber.xdsChannel.adjustResourceSubscription(ResourceType.EDS);
}
}
});
}
@Override
ClusterDropStats addClusterDropStats(
final ServerInfo serverInfo, String clusterName, @Nullable String edsServiceName) {
ClusterDropStats dropCounter =
loadStatsManager.getClusterDropStats(clusterName, edsServiceName);
syncContext.execute(new Runnable() {
@Override
public void run() {
if (!reportingLoad) {
serverLrsClientMap.get(serverInfo).startLoadReporting();
reportingLoad = true;
}
}
});
return dropCounter;
}
@Override
ClusterLocalityStats addClusterLocalityStats(
final ServerInfo serverInfo, String clusterName, @Nullable String edsServiceName,
Locality locality) {
ClusterLocalityStats loadCounter =
loadStatsManager.getClusterLocalityStats(clusterName, edsServiceName, locality);
syncContext.execute(new Runnable() {
@Override
public void run() {
if (!reportingLoad) {
serverLrsClientMap.get(serverInfo).startLoadReporting();
reportingLoad = true;
}
}
});
return loadCounter;
}
@Override
Bootstrapper.BootstrapInfo getBootstrapInfo() {
return bootstrapInfo;
}
@Override
public String toString() {
return logId.toString();
}
private void cleanUpResourceTimers() {
for (ResourceSubscriber subscriber : ldsResourceSubscribers.values()) {
subscriber.stopTimer();
}
for (ResourceSubscriber subscriber : rdsResourceSubscribers.values()) {
subscriber.stopTimer();
}
for (ResourceSubscriber subscriber : cdsResourceSubscribers.values()) {
subscriber.stopTimer();
}
for (ResourceSubscriber subscriber : edsResourceSubscribers.values()) {
subscriber.stopTimer();
}
}
private void handleResourceUpdate(
ServerInfo serverInfo, ResourceType type, Map<String, ParsedResource> parsedResources,
Set<String> invalidResources, Set<String> retainedResources, String version, String nonce,
List<String> errors) {
String errorDetail = null;
if (errors.isEmpty()) {
checkArgument(invalidResources.isEmpty(), "found invalid resources but missing errors");
serverChannelMap.get(serverInfo).ackResponse(type, version, nonce);
} else {
errorDetail = Joiner.on('\n').join(errors);
logger.log(XdsLogLevel.WARNING,
"Failed processing {0} Response version {1} nonce {2}. Errors:\n{3}",
type, version, nonce, errorDetail);
serverChannelMap.get(serverInfo).nackResponse(type, nonce, errorDetail);
}
long updateTime = timeProvider.currentTimeNanos();
for (Map.Entry<String, ResourceSubscriber> entry : getSubscribedResourcesMap(type).entrySet()) {
String resourceName = entry.getKey();
ResourceSubscriber subscriber = entry.getValue();
// Attach error details to the subscribed resources that included in the ADS update.
if (invalidResources.contains(resourceName)) {
subscriber.onRejected(version, updateTime, errorDetail);
}
// Notify the watchers.
if (parsedResources.containsKey(resourceName)) {
subscriber.onData(parsedResources.get(resourceName), version, updateTime);
} else if (type == ResourceType.LDS || type == ResourceType.CDS) {
if (subscriber.data != null && invalidResources.contains(resourceName)) {
// Update is rejected but keep using the cached data.
if (type == ResourceType.LDS) {
LdsUpdate ldsUpdate = (LdsUpdate) subscriber.data;
io.grpc.xds.HttpConnectionManager hcm = ldsUpdate.httpConnectionManager();
if (hcm != null) {
String rdsName = hcm.rdsName();
if (rdsName != null) {
retainedResources.add(rdsName);
}
}
} else {
CdsUpdate cdsUpdate = (CdsUpdate) subscriber.data;
String edsName = cdsUpdate.edsServiceName();
if (edsName == null) {
edsName = cdsUpdate.clusterName();
}
retainedResources.add(edsName);
}
} else if (invalidResources.contains(resourceName)) {
subscriber.onError(Status.UNAVAILABLE.withDescription(errorDetail));
} else {
// For State of the World services, notify watchers when their watched resource is missing
// from the ADS update.
subscriber.onAbsent();
}
}
}
// LDS/CDS responses represents the state of the world, RDS/EDS resources not referenced in
// LDS/CDS resources should be deleted.
if (type == ResourceType.LDS || type == ResourceType.CDS) {
Map<String, ResourceSubscriber> dependentSubscribers =
type == ResourceType.LDS ? rdsResourceSubscribers : edsResourceSubscribers;
for (String resource : dependentSubscribers.keySet()) {
if (!retainedResources.contains(resource)) {
dependentSubscribers.get(resource).onAbsent();
}
}
}
}
private static final class ParsedResource {
private final ResourceUpdate resourceUpdate;
private final Any rawResource;
private ParsedResource(ResourceUpdate resourceUpdate, Any rawResource) {
this.resourceUpdate = checkNotNull(resourceUpdate, "resourceUpdate");
this.rawResource = checkNotNull(rawResource, "rawResource");
}
private ResourceUpdate getResourceUpdate() {
return resourceUpdate;
}
private Any getRawResource() {
return rawResource;
}
}
/**
* Tracks a single subscribed resource.
*/
private final class ResourceSubscriber {
private final ServerInfo serverInfo;
private final AbstractXdsClient xdsChannel;
private final ResourceType type;
private final String resource;
private final Set<ResourceWatcher> watchers = new HashSet<>();
private ResourceUpdate data;
private boolean absent;
private ScheduledHandle respTimer;
private ResourceMetadata metadata;
ResourceSubscriber(ResourceType type, String resource) {
syncContext.throwIfNotInThisSynchronizationContext();
this.type = type;
this.resource = resource;
this.serverInfo = getServerInfo(resource);
// Initialize metadata in UNKNOWN state to cover the case when resource subscriber,
// is created but not yet requested because the client is in backoff.
this.metadata = ResourceMetadata.newResourceMetadataUnknown();
maybeCreateXdsChannelWithLrs(serverInfo);
this.xdsChannel = serverChannelMap.get(serverInfo);
if (xdsChannel.isInBackoff()) {
return;
}
restartTimer();
}
private ServerInfo getServerInfo(String resource) {
if (resource.startsWith(XDSTP_SCHEME)) {
URI uri = URI.create(resource);
String authority = uri.getAuthority();
if (authority == null) {
authority = "";
}
AuthorityInfo authorityInfo = bootstrapInfo.authorities().get(authority);
return authorityInfo.xdsServers().get(0);
}
return bootstrapInfo.servers().get(0); // use first server
}
void addWatcher(ResourceWatcher watcher) {
checkArgument(!watchers.contains(watcher), "watcher %s already registered", watcher);
watchers.add(watcher);
if (data != null) {
notifyWatcher(watcher, data);
} else if (absent) {
watcher.onResourceDoesNotExist(resource);
}
}
void removeWatcher(ResourceWatcher watcher) {
checkArgument(watchers.contains(watcher), "watcher %s not registered", watcher);
watchers.remove(watcher);
}
void restartTimer() {
if (data != null || absent) { // resource already resolved
return;
}
class ResourceNotFound implements Runnable {
@Override
public void run() {
logger.log(XdsLogLevel.INFO, "{0} resource {1} initial fetch timeout",
type, resource);
respTimer = null;
onAbsent();
}
@Override
public String toString() {
return type + this.getClass().getSimpleName();
}
}
// Initial fetch scheduled or rescheduled, transition metadata state to REQUESTED.
metadata = ResourceMetadata.newResourceMetadataRequested();
respTimer = syncContext.schedule(
new ResourceNotFound(), INITIAL_RESOURCE_FETCH_TIMEOUT_SEC, TimeUnit.SECONDS,
timeService);
}
void stopTimer() {
if (respTimer != null && respTimer.isPending()) {
respTimer.cancel();
respTimer = null;
}
}
boolean isWatched() {
return !watchers.isEmpty();
}
void onData(ParsedResource parsedResource, String version, long updateTime) {
if (respTimer != null && respTimer.isPending()) {
respTimer.cancel();
respTimer = null;
}
this.metadata = ResourceMetadata
.newResourceMetadataAcked(parsedResource.getRawResource(), version, updateTime);
ResourceUpdate oldData = this.data;
this.data = parsedResource.getResourceUpdate();
absent = false;
if (!Objects.equals(oldData, data)) {
for (ResourceWatcher watcher : watchers) {
notifyWatcher(watcher, data);
}
}
}
void onAbsent() {
if (respTimer != null && respTimer.isPending()) { // too early to conclude absence
return;
}
logger.log(XdsLogLevel.INFO, "Conclude {0} resource {1} not exist", type, resource);
if (!absent) {
data = null;
absent = true;
metadata = ResourceMetadata.newResourceMetadataDoesNotExist();
for (ResourceWatcher watcher : watchers) {
watcher.onResourceDoesNotExist(resource);
}
}
}
void onError(Status error) {
if (respTimer != null && respTimer.isPending()) {
respTimer.cancel();
respTimer = null;
}
for (ResourceWatcher watcher : watchers) {
watcher.onError(error);
}
}
void onRejected(String rejectedVersion, long rejectedTime, String rejectedDetails) {
metadata = ResourceMetadata
.newResourceMetadataNacked(metadata, rejectedVersion, rejectedTime, rejectedDetails);
}
private void notifyWatcher(ResourceWatcher watcher, ResourceUpdate update) {
switch (type) {
case LDS:
((LdsResourceWatcher) watcher).onChanged((LdsUpdate) update);
break;
case RDS:
((RdsResourceWatcher) watcher).onChanged((RdsUpdate) update);
break;
case CDS:
((CdsResourceWatcher) watcher).onChanged((CdsUpdate) update);
break;
case EDS:
((EdsResourceWatcher) watcher).onChanged((EdsUpdate) update);
break;
case UNKNOWN:
default:
throw new AssertionError("should never be here");
}
}
}
@VisibleForTesting
static final class ResourceInvalidException extends Exception {
private static final long serialVersionUID = 0L;
ResourceInvalidException(String message) {
super(message, null, false, false);
}
ResourceInvalidException(String message, Throwable cause) {
super(cause != null ? message + ": " + cause.getMessage() : message, cause, false, false);
}
}
@VisibleForTesting
static final class StructOrError<T> {
/**
* Returns a {@link StructOrError} for the successfully converted data object.
*/
private static <T> StructOrError<T> fromStruct(T struct) {
return new StructOrError<>(struct);
}
/**
* Returns a {@link StructOrError} for the failure to convert the data object.
*/
private static <T> StructOrError<T> fromError(String errorDetail) {
return new StructOrError<>(errorDetail);
}
private final String errorDetail;
private final T struct;
private StructOrError(T struct) {
this.struct = checkNotNull(struct, "struct");
this.errorDetail = null;
}
private StructOrError(String errorDetail) {
this.struct = null;
this.errorDetail = checkNotNull(errorDetail, "errorDetail");
}
/**
* Returns struct if exists, otherwise null.
*/
@VisibleForTesting
@Nullable
T getStruct() {
return struct;
}
/**
* Returns error detail if exists, otherwise null.
*/
@VisibleForTesting
@Nullable
String getErrorDetail() {
return errorDetail;
}
}
abstract static class XdsChannelFactory {
static final XdsChannelFactory DEFAULT_XDS_CHANNEL_FACTORY = new XdsChannelFactory() {
@Override
ManagedChannel create(ServerInfo serverInfo) {
String target = serverInfo.target();
ChannelCredentials channelCredentials = serverInfo.channelCredentials();
return Grpc.newChannelBuilder(target, channelCredentials)
.keepAliveTime(5, TimeUnit.MINUTES)
.build();
}
};
abstract ManagedChannel create(ServerInfo serverInfo);
}
}
|
[
"\"GRPC_XDS_EXPERIMENTAL_FAULT_INJECTION\"",
"\"GRPC_XDS_EXPERIMENTAL_FAULT_INJECTION\"",
"\"GRPC_XDS_EXPERIMENTAL_ENABLE_RETRY\"",
"\"GRPC_XDS_EXPERIMENTAL_ENABLE_RETRY\"",
"\"GRPC_XDS_EXPERIMENTAL_RBAC\"",
"\"GRPC_XDS_EXPERIMENTAL_RBAC\"",
"\"GRPC_EXPERIMENTAL_XDS_RLS_LB\"",
"\"GRPC_EXPERIMENTAL_XDS_RLS_LB\"",
"\"GRPC_EXPERIMENTAL_ENABLE_LEAST_REQUEST\"",
"\"GRPC_EXPERIMENTAL_ENABLE_LEAST_REQUEST\""
] |
[] |
[
"GRPC_EXPERIMENTAL_XDS_RLS_LB",
"GRPC_XDS_EXPERIMENTAL_ENABLE_RETRY",
"GRPC_XDS_EXPERIMENTAL_FAULT_INJECTION",
"GRPC_EXPERIMENTAL_ENABLE_LEAST_REQUEST",
"GRPC_XDS_EXPERIMENTAL_RBAC"
] |
[]
|
["GRPC_EXPERIMENTAL_XDS_RLS_LB", "GRPC_XDS_EXPERIMENTAL_ENABLE_RETRY", "GRPC_XDS_EXPERIMENTAL_FAULT_INJECTION", "GRPC_EXPERIMENTAL_ENABLE_LEAST_REQUEST", "GRPC_XDS_EXPERIMENTAL_RBAC"]
|
java
| 5 | 0 | |
fpga/mqnic_tdma/ExaNIC_X10/fpga/tb/fpga_core/test_fpga_core.py
|
"""
Copyright 2020, The Regents of the University of California.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE REGENTS OF THE UNIVERSITY OF CALIFORNIA ''AS
IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE REGENTS OF THE UNIVERSITY OF CALIFORNIA OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those
of the authors and should not be interpreted as representing official policies,
either expressed or implied, of The Regents of the University of California.
"""
import logging
import os
import sys
import scapy.utils
from scapy.layers.l2 import Ether
from scapy.layers.inet import IP, UDP
import cocotb_test.simulator
import cocotb
from cocotb.log import SimLog
from cocotb.clock import Clock
from cocotb.triggers import RisingEdge, FallingEdge, Timer
from cocotbext.axi import AxiStreamBus
from cocotbext.eth import XgmiiSource, XgmiiSink
from cocotbext.pcie.core import RootComplex
from cocotbext.pcie.xilinx.us import UltraScalePcieDevice
try:
import mqnic
except ImportError:
# attempt import from current directory
sys.path.insert(0, os.path.join(os.path.dirname(__file__)))
try:
import mqnic
finally:
del sys.path[0]
class TB(object):
def __init__(self, dut):
self.dut = dut
self.BAR0_APERTURE = int(os.getenv("PARAM_BAR0_APERTURE"))
self.log = SimLog("cocotb.tb")
self.log.setLevel(logging.DEBUG)
# PCIe
self.rc = RootComplex()
self.rc.max_payload_size = 0x1 # 256 bytes
self.rc.max_read_request_size = 0x2 # 512 bytes
self.dev = UltraScalePcieDevice(
# configuration options
pcie_generation=3,
pcie_link_width=8,
user_clk_frequency=250e6,
alignment="dword",
straddle=False,
enable_pf1=False,
enable_client_tag=True,
enable_extended_tag=True,
enable_parity=False,
enable_rx_msg_interface=False,
enable_sriov=False,
enable_extended_configuration=False,
enable_pf0_msi=True,
enable_pf1_msi=False,
# signals
# Clock and Reset Interface
user_clk=dut.clk_250mhz,
user_reset=dut.rst_250mhz,
# user_lnk_up
# sys_clk
# sys_clk_gt
# sys_reset
# phy_rdy_out
# Requester reQuest Interface
rq_bus=AxiStreamBus.from_prefix(dut, "m_axis_rq"),
pcie_rq_seq_num=dut.s_axis_rq_seq_num,
pcie_rq_seq_num_vld=dut.s_axis_rq_seq_num_valid,
# pcie_rq_tag
# pcie_rq_tag_av
# pcie_rq_tag_vld
# Requester Completion Interface
rc_bus=AxiStreamBus.from_prefix(dut, "s_axis_rc"),
# Completer reQuest Interface
cq_bus=AxiStreamBus.from_prefix(dut, "s_axis_cq"),
# pcie_cq_np_req
# pcie_cq_np_req_count
# Completer Completion Interface
cc_bus=AxiStreamBus.from_prefix(dut, "m_axis_cc"),
# Transmit Flow Control Interface
# pcie_tfc_nph_av=dut.pcie_tfc_nph_av,
# pcie_tfc_npd_av=dut.pcie_tfc_npd_av,
# Configuration Management Interface
cfg_mgmt_addr=dut.cfg_mgmt_addr,
cfg_mgmt_write=dut.cfg_mgmt_write,
cfg_mgmt_write_data=dut.cfg_mgmt_write_data,
cfg_mgmt_byte_enable=dut.cfg_mgmt_byte_enable,
cfg_mgmt_read=dut.cfg_mgmt_read,
cfg_mgmt_read_data=dut.cfg_mgmt_read_data,
cfg_mgmt_read_write_done=dut.cfg_mgmt_read_write_done,
# cfg_mgmt_debug_access
# Configuration Status Interface
# cfg_phy_link_down
# cfg_phy_link_status
# cfg_negotiated_width
# cfg_current_speed
cfg_max_payload=dut.cfg_max_payload,
cfg_max_read_req=dut.cfg_max_read_req,
# cfg_function_status
# cfg_vf_status
# cfg_function_power_state
# cfg_vf_power_state
# cfg_link_power_state
# cfg_err_cor_out
# cfg_err_nonfatal_out
# cfg_err_fatal_out
# cfg_local_error_out
# cfg_local_error_valid
# cfg_rx_pm_state
# cfg_tx_pm_state
# cfg_ltssm_state
# cfg_rcb_status
# cfg_obff_enable
# cfg_pl_status_change
# cfg_tph_requester_enable
# cfg_tph_st_mode
# cfg_vf_tph_requester_enable
# cfg_vf_tph_st_mode
# Configuration Received Message Interface
# cfg_msg_received
# cfg_msg_received_data
# cfg_msg_received_type
# Configuration Transmit Message Interface
# cfg_msg_transmit
# cfg_msg_transmit_type
# cfg_msg_transmit_data
# cfg_msg_transmit_done
# Configuration Flow Control Interface
cfg_fc_ph=dut.cfg_fc_ph,
cfg_fc_pd=dut.cfg_fc_pd,
cfg_fc_nph=dut.cfg_fc_nph,
cfg_fc_npd=dut.cfg_fc_npd,
cfg_fc_cplh=dut.cfg_fc_cplh,
cfg_fc_cpld=dut.cfg_fc_cpld,
cfg_fc_sel=dut.cfg_fc_sel,
# Configuration Control Interface
# cfg_hot_reset_in
# cfg_hot_reset_out
# cfg_config_space_enable
# cfg_dsn
# cfg_bus_number
# cfg_ds_port_number
# cfg_ds_bus_number
# cfg_ds_device_number
# cfg_ds_function_number
# cfg_power_state_change_ack
# cfg_power_state_change_interrupt
cfg_err_cor_in=dut.status_error_cor,
cfg_err_uncor_in=dut.status_error_uncor,
# cfg_flr_in_process
# cfg_flr_done
# cfg_vf_flr_in_process
# cfg_vf_flr_func_num
# cfg_vf_flr_done
# cfg_pm_aspm_l1_entry_reject
# cfg_pm_aspm_tx_l0s_entry_disable
# cfg_req_pm_transition_l23_ready
# cfg_link_training_enable
# Configuration Interrupt Controller Interface
# cfg_interrupt_int
# cfg_interrupt_sent
# cfg_interrupt_pending
cfg_interrupt_msi_enable=dut.cfg_interrupt_msi_enable,
cfg_interrupt_msi_vf_enable=dut.cfg_interrupt_msi_vf_enable,
cfg_interrupt_msi_mmenable=dut.cfg_interrupt_msi_mmenable,
cfg_interrupt_msi_mask_update=dut.cfg_interrupt_msi_mask_update,
cfg_interrupt_msi_data=dut.cfg_interrupt_msi_data,
cfg_interrupt_msi_select=dut.cfg_interrupt_msi_select,
cfg_interrupt_msi_int=dut.cfg_interrupt_msi_int,
cfg_interrupt_msi_pending_status=dut.cfg_interrupt_msi_pending_status,
cfg_interrupt_msi_pending_status_data_enable=dut.cfg_interrupt_msi_pending_status_data_enable,
cfg_interrupt_msi_pending_status_function_num=dut.cfg_interrupt_msi_pending_status_function_num,
cfg_interrupt_msi_sent=dut.cfg_interrupt_msi_sent,
cfg_interrupt_msi_fail=dut.cfg_interrupt_msi_fail,
# cfg_interrupt_msix_enable
# cfg_interrupt_msix_mask
# cfg_interrupt_msix_vf_enable
# cfg_interrupt_msix_vf_mask
# cfg_interrupt_msix_address
# cfg_interrupt_msix_data
# cfg_interrupt_msix_int
# cfg_interrupt_msix_vec_pending
# cfg_interrupt_msix_vec_pending_status
cfg_interrupt_msi_attr=dut.cfg_interrupt_msi_attr,
cfg_interrupt_msi_tph_present=dut.cfg_interrupt_msi_tph_present,
cfg_interrupt_msi_tph_type=dut.cfg_interrupt_msi_tph_type,
# cfg_interrupt_msi_tph_st_tag=dut.cfg_interrupt_msi_tph_st_tag,
# cfg_interrupt_msi_function_number=dut.cfg_interrupt_msi_function_number,
# Configuration Extend Interface
# cfg_ext_read_received
# cfg_ext_write_received
# cfg_ext_register_number
# cfg_ext_function_number
# cfg_ext_write_data
# cfg_ext_write_byte_enable
# cfg_ext_read_data
# cfg_ext_read_data_valid
)
# self.dev.log.setLevel(logging.DEBUG)
self.rc.make_port().connect(self.dev)
self.driver = mqnic.Driver(self.rc)
self.dev.functions[0].msi_multiple_message_capable = 5
self.dev.functions[0].configure_bar(0, 2**self.BAR0_APERTURE, ext=True, prefetch=True)
# Ethernet
cocotb.fork(Clock(dut.sfp_1_rx_clk, 6.4, units="ns").start())
self.sfp_1_source = XgmiiSource(dut.sfp_1_rxd, dut.sfp_1_rxc, dut.sfp_1_rx_clk, dut.sfp_1_rx_rst)
cocotb.fork(Clock(dut.sfp_1_tx_clk, 6.4, units="ns").start())
self.sfp_1_sink = XgmiiSink(dut.sfp_1_txd, dut.sfp_1_txc, dut.sfp_1_tx_clk, dut.sfp_1_tx_rst)
cocotb.fork(Clock(dut.sfp_2_rx_clk, 6.4, units="ns").start())
self.sfp_2_source = XgmiiSource(dut.sfp_2_rxd, dut.sfp_2_rxc, dut.sfp_2_rx_clk, dut.sfp_2_rx_rst)
cocotb.fork(Clock(dut.sfp_2_tx_clk, 6.4, units="ns").start())
self.sfp_2_sink = XgmiiSink(dut.sfp_2_txd, dut.sfp_2_txc, dut.sfp_2_tx_clk, dut.sfp_2_tx_rst)
dut.sfp_1_npres.setimmediatevalue(0)
dut.sfp_2_npres.setimmediatevalue(0)
dut.sfp_1_los.setimmediatevalue(0)
dut.sfp_2_los.setimmediatevalue(0)
dut.sma_in.setimmediatevalue(0)
dut.sfp_i2c_scl_i.setimmediatevalue(1)
dut.sfp_1_i2c_sda_i.setimmediatevalue(1)
dut.sfp_2_i2c_sda_i.setimmediatevalue(1)
dut.eeprom_i2c_scl_i.setimmediatevalue(1)
dut.eeprom_i2c_sda_i.setimmediatevalue(1)
dut.flash_dq_i.setimmediatevalue(0)
self.loopback_enable = False
cocotb.fork(self._run_loopback())
async def init(self):
self.dut.sfp_1_rx_rst.setimmediatevalue(0)
self.dut.sfp_1_tx_rst.setimmediatevalue(0)
self.dut.sfp_2_rx_rst.setimmediatevalue(0)
self.dut.sfp_2_tx_rst.setimmediatevalue(0)
await RisingEdge(self.dut.clk_250mhz)
await RisingEdge(self.dut.clk_250mhz)
self.dut.sfp_1_rx_rst.setimmediatevalue(1)
self.dut.sfp_1_tx_rst.setimmediatevalue(1)
self.dut.sfp_2_rx_rst.setimmediatevalue(1)
self.dut.sfp_2_tx_rst.setimmediatevalue(1)
await FallingEdge(self.dut.rst_250mhz)
await Timer(100, 'ns')
await RisingEdge(self.dut.clk_250mhz)
await RisingEdge(self.dut.clk_250mhz)
self.dut.sfp_1_rx_rst.setimmediatevalue(0)
self.dut.sfp_1_tx_rst.setimmediatevalue(0)
self.dut.sfp_2_rx_rst.setimmediatevalue(0)
self.dut.sfp_2_tx_rst.setimmediatevalue(0)
await self.rc.enumerate(enable_bus_mastering=True, configure_msi=True)
async def _run_loopback(self):
while True:
await RisingEdge(self.dut.clk_250mhz)
if self.loopback_enable:
if not self.sfp_1_sink.empty():
await self.sfp_1_source.send(await self.sfp_1_sink.recv())
if not self.sfp_2_sink.empty():
await self.sfp_2_source.send(await self.sfp_2_sink.recv())
@cocotb.test()
async def run_test_nic(dut):
tb = TB(dut)
await tb.init()
tb.log.info("Init driver")
await tb.driver.init_dev(tb.dev.functions[0].pcie_id)
await tb.driver.interfaces[0].open()
# await driver.interfaces[1].open()
# enable queues
tb.log.info("Enable queues")
await tb.rc.mem_write_dword(tb.driver.interfaces[0].ports[0].hw_addr+mqnic.MQNIC_PORT_REG_SCHED_ENABLE, 0x00000001)
for k in range(tb.driver.interfaces[0].tx_queue_count):
await tb.rc.mem_write_dword(tb.driver.interfaces[0].ports[0].schedulers[0].hw_addr+4*k, 0x00000003)
# wait for all writes to complete
await tb.rc.mem_read(tb.driver.hw_addr, 4)
tb.log.info("Init complete")
tb.log.info("Send and receive single packet")
data = bytearray([x % 256 for x in range(1024)])
await tb.driver.interfaces[0].start_xmit(data, 0)
pkt = await tb.sfp_1_sink.recv()
tb.log.info("Packet: %s", pkt)
await tb.sfp_1_source.send(pkt)
pkt = await tb.driver.interfaces[0].recv()
tb.log.info("Packet: %s", pkt)
assert pkt.rx_checksum == ~scapy.utils.checksum(bytes(pkt.data[14:])) & 0xffff
# await tb.driver.interfaces[1].start_xmit(data, 0)
# pkt = await tb.sfp_2_sink.recv()
# tb.log.info("Packet: %s", pkt)
# await tb.sfp_2_source.send(pkt)
# pkt = await tb.driver.interfaces[1].recv()
# tb.log.info("Packet: %s", pkt)
# assert pkt.rx_checksum == ~scapy.utils.checksum(bytes(pkt.data[14:])) & 0xffff
tb.log.info("RX and TX checksum tests")
payload = bytes([x % 256 for x in range(256)])
eth = Ether(src='5A:51:52:53:54:55', dst='DA:D1:D2:D3:D4:D5')
ip = IP(src='192.168.1.100', dst='192.168.1.101')
udp = UDP(sport=1, dport=2)
test_pkt = eth / ip / udp / payload
test_pkt2 = test_pkt.copy()
test_pkt2[UDP].chksum = scapy.utils.checksum(bytes(test_pkt2[UDP]))
await tb.driver.interfaces[0].start_xmit(test_pkt2.build(), 0, 34, 6)
pkt = await tb.sfp_1_sink.recv()
tb.log.info("Packet: %s", pkt)
await tb.sfp_1_source.send(pkt)
pkt = await tb.driver.interfaces[0].recv()
tb.log.info("Packet: %s", pkt)
assert pkt.rx_checksum == ~scapy.utils.checksum(bytes(pkt.data[14:])) & 0xffff
assert Ether(pkt.data).build() == test_pkt.build()
tb.log.info("Multiple small packets")
count = 64
pkts = [bytearray([(x+k) % 256 for x in range(60)]) for k in range(count)]
tb.loopback_enable = True
for p in pkts:
await tb.driver.interfaces[0].start_xmit(p, 0)
for k in range(count):
pkt = await tb.driver.interfaces[0].recv()
tb.log.info("Packet: %s", pkt)
assert pkt.data == pkts[k]
assert pkt.rx_checksum == ~scapy.utils.checksum(bytes(pkt.data[14:])) & 0xffff
tb.loopback_enable = False
tb.log.info("Multiple large packets")
count = 64
pkts = [bytearray([(x+k) % 256 for x in range(1514)]) for k in range(count)]
tb.loopback_enable = True
for p in pkts:
await tb.driver.interfaces[0].start_xmit(p, 0)
for k in range(count):
pkt = await tb.driver.interfaces[0].recv()
tb.log.info("Packet: %s", pkt)
assert pkt.data == pkts[k]
assert pkt.rx_checksum == ~scapy.utils.checksum(bytes(pkt.data[14:])) & 0xffff
tb.loopback_enable = False
await Timer(1000, 'ns')
tb.log.info("TDMA")
count = 16
pkts = [bytearray([(x+k) % 256 for x in range(1514)]) for k in range(count)]
tb.loopback_enable = True
# configure TDMA scheduler
await tb.rc.mem_write_dword(tb.driver.interfaces[0].ports[0].hw_addr+mqnic.MQNIC_PORT_REG_TDMA_SCHED_PERIOD_FNS, 0)
await tb.rc.mem_write_dword(tb.driver.interfaces[0].ports[0].hw_addr+mqnic.MQNIC_PORT_REG_TDMA_SCHED_PERIOD_NS, 40000)
await tb.rc.mem_write_dword(tb.driver.interfaces[0].ports[0].hw_addr+mqnic.MQNIC_PORT_REG_TDMA_SCHED_PERIOD_SEC_L, 0)
await tb.rc.mem_write_dword(tb.driver.interfaces[0].ports[0].hw_addr+mqnic.MQNIC_PORT_REG_TDMA_SCHED_PERIOD_SEC_H, 0)
await tb.rc.mem_write_dword(tb.driver.interfaces[0].ports[0].hw_addr+mqnic.MQNIC_PORT_REG_TDMA_TIMESLOT_PERIOD_FNS, 0)
await tb.rc.mem_write_dword(tb.driver.interfaces[0].ports[0].hw_addr+mqnic.MQNIC_PORT_REG_TDMA_TIMESLOT_PERIOD_NS, 10000)
await tb.rc.mem_write_dword(tb.driver.interfaces[0].ports[0].hw_addr+mqnic.MQNIC_PORT_REG_TDMA_TIMESLOT_PERIOD_SEC_L, 0)
await tb.rc.mem_write_dword(tb.driver.interfaces[0].ports[0].hw_addr+mqnic.MQNIC_PORT_REG_TDMA_TIMESLOT_PERIOD_SEC_H, 0)
await tb.rc.mem_write_dword(tb.driver.interfaces[0].ports[0].hw_addr+mqnic.MQNIC_PORT_REG_TDMA_ACTIVE_PERIOD_FNS, 0)
await tb.rc.mem_write_dword(tb.driver.interfaces[0].ports[0].hw_addr+mqnic.MQNIC_PORT_REG_TDMA_ACTIVE_PERIOD_NS, 5000)
await tb.rc.mem_write_dword(tb.driver.interfaces[0].ports[0].hw_addr+mqnic.MQNIC_PORT_REG_TDMA_ACTIVE_PERIOD_SEC_L, 0)
await tb.rc.mem_write_dword(tb.driver.interfaces[0].ports[0].hw_addr+mqnic.MQNIC_PORT_REG_TDMA_ACTIVE_PERIOD_SEC_H, 0)
await tb.rc.mem_write_dword(tb.driver.interfaces[0].ports[0].hw_addr+mqnic.MQNIC_PORT_REG_TDMA_CTRL, 0x00000001)
# enable queues with global enable off
await tb.rc.mem_write_dword(tb.driver.interfaces[0].ports[0].hw_addr+mqnic.MQNIC_PORT_REG_SCHED_ENABLE, 0x00000001)
for k in range(tb.driver.interfaces[0].tx_queue_count):
await tb.rc.mem_write_dword(tb.driver.interfaces[0].ports[0].schedulers[0].hw_addr+4*k, 0x00000001)
# configure slots
await tb.rc.mem_write_dword(tb.driver.interfaces[0].ports[0].schedulers[1].hw_addr+8*0, 0x00000001)
await tb.rc.mem_write_dword(tb.driver.interfaces[0].ports[0].schedulers[1].hw_addr+8*1, 0x00000002)
await tb.rc.mem_write_dword(tb.driver.interfaces[0].ports[0].schedulers[1].hw_addr+8*2, 0x00000004)
await tb.rc.mem_write_dword(tb.driver.interfaces[0].ports[0].schedulers[1].hw_addr+8*3, 0x00000008)
await tb.rc.mem_read(tb.driver.hw_addr, 4) # wait for all writes to complete
# send packets
for k in range(count):
await tb.driver.interfaces[0].start_xmit(pkts[k], k % 4)
for k in range(count):
pkt = await tb.driver.interfaces[0].recv()
tb.log.info("Packet: %s", pkt)
# assert pkt.data == pkts[k]
assert pkt.rx_checksum == ~scapy.utils.checksum(bytes(pkt.data[14:])) & 0xffff
tb.loopback_enable = False
await RisingEdge(dut.clk_250mhz)
await RisingEdge(dut.clk_250mhz)
# cocotb-test
tests_dir = os.path.dirname(__file__)
rtl_dir = os.path.abspath(os.path.join(tests_dir, '..', '..', 'rtl'))
lib_dir = os.path.abspath(os.path.join(rtl_dir, '..', 'lib'))
axi_rtl_dir = os.path.abspath(os.path.join(lib_dir, 'axi', 'rtl'))
axis_rtl_dir = os.path.abspath(os.path.join(lib_dir, 'axis', 'rtl'))
eth_rtl_dir = os.path.abspath(os.path.join(lib_dir, 'eth', 'rtl'))
pcie_rtl_dir = os.path.abspath(os.path.join(lib_dir, 'pcie', 'rtl'))
def test_fpga_core(request):
dut = "fpga_core"
module = os.path.splitext(os.path.basename(__file__))[0]
toplevel = dut
verilog_sources = [
os.path.join(rtl_dir, f"{dut}.v"),
os.path.join(rtl_dir, "common", "mqnic_interface.v"),
os.path.join(rtl_dir, "common", "mqnic_port.v"),
os.path.join(rtl_dir, "common", "cpl_write.v"),
os.path.join(rtl_dir, "common", "cpl_op_mux.v"),
os.path.join(rtl_dir, "common", "desc_fetch.v"),
os.path.join(rtl_dir, "common", "desc_op_mux.v"),
os.path.join(rtl_dir, "common", "queue_manager.v"),
os.path.join(rtl_dir, "common", "cpl_queue_manager.v"),
os.path.join(rtl_dir, "common", "tx_engine.v"),
os.path.join(rtl_dir, "common", "rx_engine.v"),
os.path.join(rtl_dir, "common", "tx_checksum.v"),
os.path.join(rtl_dir, "common", "rx_hash.v"),
os.path.join(rtl_dir, "common", "rx_checksum.v"),
os.path.join(rtl_dir, "common", "tx_scheduler_rr.v"),
os.path.join(rtl_dir, "common", "tx_scheduler_ctrl_tdma.v"),
os.path.join(rtl_dir, "common", "event_mux.v"),
os.path.join(rtl_dir, "common", "tdma_scheduler.v"),
os.path.join(rtl_dir, "common", "tdma_ber.v"),
os.path.join(rtl_dir, "common", "tdma_ber_ch.v"),
os.path.join(eth_rtl_dir, "eth_mac_10g_fifo.v"),
os.path.join(eth_rtl_dir, "eth_mac_10g.v"),
os.path.join(eth_rtl_dir, "axis_xgmii_rx_64.v"),
os.path.join(eth_rtl_dir, "axis_xgmii_tx_64.v"),
os.path.join(eth_rtl_dir, "lfsr.v"),
os.path.join(eth_rtl_dir, "ptp_clock.v"),
os.path.join(eth_rtl_dir, "ptp_clock_cdc.v"),
os.path.join(eth_rtl_dir, "ptp_perout.v"),
os.path.join(eth_rtl_dir, "ptp_ts_extract.v"),
os.path.join(axi_rtl_dir, "axil_interconnect.v"),
os.path.join(axi_rtl_dir, "arbiter.v"),
os.path.join(axi_rtl_dir, "priority_encoder.v"),
os.path.join(axis_rtl_dir, "axis_adapter.v"),
os.path.join(axis_rtl_dir, "axis_arb_mux.v"),
os.path.join(axis_rtl_dir, "axis_async_fifo.v"),
os.path.join(axis_rtl_dir, "axis_async_fifo_adapter.v"),
os.path.join(axis_rtl_dir, "axis_fifo.v"),
os.path.join(axis_rtl_dir, "axis_register.v"),
os.path.join(pcie_rtl_dir, "pcie_us_axil_master.v"),
os.path.join(pcie_rtl_dir, "dma_if_pcie_us.v"),
os.path.join(pcie_rtl_dir, "dma_if_pcie_us_rd.v"),
os.path.join(pcie_rtl_dir, "dma_if_pcie_us_wr.v"),
os.path.join(pcie_rtl_dir, "dma_if_mux.v"),
os.path.join(pcie_rtl_dir, "dma_if_mux_rd.v"),
os.path.join(pcie_rtl_dir, "dma_if_mux_wr.v"),
os.path.join(pcie_rtl_dir, "dma_psdpram.v"),
os.path.join(pcie_rtl_dir, "dma_client_axis_sink.v"),
os.path.join(pcie_rtl_dir, "dma_client_axis_source.v"),
os.path.join(pcie_rtl_dir, "pcie_us_cfg.v"),
os.path.join(pcie_rtl_dir, "pcie_us_msi.v"),
os.path.join(pcie_rtl_dir, "pulse_merge.v"),
]
parameters = {}
parameters['AXIS_PCIE_DATA_WIDTH'] = 256
parameters['AXIS_PCIE_KEEP_WIDTH'] = parameters['AXIS_PCIE_DATA_WIDTH'] // 32
parameters['AXIS_PCIE_RQ_USER_WIDTH'] = 60
parameters['AXIS_PCIE_RC_USER_WIDTH'] = 75
parameters['AXIS_PCIE_CQ_USER_WIDTH'] = 85
parameters['AXIS_PCIE_CC_USER_WIDTH'] = 33
parameters['RQ_SEQ_NUM_WIDTH'] = 4
parameters['BAR0_APERTURE'] = 24
extra_env = {f'PARAM_{k}': str(v) for k, v in parameters.items()}
sim_build = os.path.join(tests_dir, "sim_build",
request.node.name.replace('[', '-').replace(']', ''))
cocotb_test.simulator.run(
python_search=[tests_dir],
verilog_sources=verilog_sources,
toplevel=toplevel,
module=module,
parameters=parameters,
sim_build=sim_build,
extra_env=extra_env,
)
|
[] |
[] |
[
"PARAM_BAR0_APERTURE"
] |
[]
|
["PARAM_BAR0_APERTURE"]
|
python
| 1 | 0 | |
pgcli/main.py
|
#!/usr/bin/env python
from __future__ import unicode_literals
from __future__ import print_function
import os
import sys
import traceback
import logging
from time import time
import click
import sqlparse
from prompt_toolkit import CommandLineInterface, AbortAction, Exit
from prompt_toolkit.document import Document
from prompt_toolkit.layout import Layout
from prompt_toolkit.layout.prompt import DefaultPrompt
from prompt_toolkit.layout.menus import CompletionsMenu
from prompt_toolkit.history import FileHistory
from pygments.lexers.sql import PostgresLexer
from .packages.tabulate import tabulate
from .packages.expanded import expanded_table
from .packages.pgspecial import (CASE_SENSITIVE_COMMANDS,
NON_CASE_SENSITIVE_COMMANDS, is_expanded_output)
import pgcli.packages.pgspecial as pgspecial
from .pgcompleter import PGCompleter
from .pgtoolbar import PGToolbar
from .pgstyle import style_factory
from .pgexecute import PGExecute
from .pgbuffer import PGBuffer
from .config import write_default_config, load_config
from .key_bindings import pgcli_bindings
from .encodingutils import utf8tounicode
from .__init__ import __version__
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse
from getpass import getuser
from psycopg2 import OperationalError
from collections import namedtuple
# Query tuples are used for maintaining history
Query = namedtuple('Query', ['query', 'successful', 'mutating'])
class PGCli(object):
def __init__(self, force_passwd_prompt=False, never_passwd_prompt=False,
pgexecute=None):
self.force_passwd_prompt = force_passwd_prompt
self.never_passwd_prompt = never_passwd_prompt
self.pgexecute = pgexecute
from pgcli import __file__ as package_root
package_root = os.path.dirname(package_root)
default_config = os.path.join(package_root, 'pgclirc')
write_default_config(default_config, '~/.pgclirc')
# Load config.
c = self.config = load_config('~/.pgclirc', default_config)
self.multi_line = c.getboolean('main', 'multi_line')
self.vi_mode = c.getboolean('main', 'vi')
pgspecial.TIMING_ENABLED = c.getboolean('main', 'timing')
self.table_format = c.get('main', 'table_format')
self.syntax_style = c.get('main', 'syntax_style')
self.logger = logging.getLogger(__name__)
self.initialize_logging()
self.query_history = []
# Initialize completer
smart_completion = c.getboolean('main', 'smart_completion')
completer = PGCompleter(smart_completion)
completer.extend_special_commands(CASE_SENSITIVE_COMMANDS.keys())
completer.extend_special_commands(NON_CASE_SENSITIVE_COMMANDS.keys())
self.completer = completer
def initialize_logging(self):
log_file = self.config.get('main', 'log_file')
log_level = self.config.get('main', 'log_level')
level_map = {'CRITICAL': logging.CRITICAL,
'ERROR': logging.ERROR,
'WARNING': logging.WARNING,
'INFO': logging.INFO,
'DEBUG': logging.DEBUG
}
handler = logging.FileHandler(os.path.expanduser(log_file))
formatter = logging.Formatter(
'%(asctime)s (%(process)d/%(threadName)s) '
'%(name)s %(levelname)s - %(message)s')
handler.setFormatter(formatter)
root_logger = logging.getLogger('pgcli')
root_logger.addHandler(handler)
root_logger.setLevel(level_map[log_level.upper()])
root_logger.debug('Initializing pgcli logging.')
root_logger.debug('Log file %r.', log_file)
def connect_uri(self, uri):
uri = urlparse(uri)
database = uri.path[1:] # ignore the leading fwd slash
self.connect(database, uri.hostname, uri.username,
uri.port, uri.password)
def connect(self, database='', host='', user='', port='', passwd=''):
# Connect to the database.
if not database:
if user:
database = user
else:
# default to current OS username just like psql
database = user = getuser()
# Prompt for a password immediately if requested via the -W flag. This
# avoids wasting time trying to connect to the database and catching a
# no-password exception.
# If we successfully parsed a password from a URI, there's no need to
# prompt for it, even with the -W flag
if self.force_passwd_prompt and not passwd:
passwd = click.prompt('Password', hide_input=True,
show_default=False, type=str)
# Prompt for a password after 1st attempt to connect without a password
# fails. Don't prompt if the -w flag is supplied
auto_passwd_prompt = not passwd and not self.never_passwd_prompt
# Attempt to connect to the database.
# Note that passwd may be empty on the first attempt. If connection
# fails because of a missing password, but we're allowed to prompt for
# a password (no -w flag), prompt for a passwd and try again.
try:
try:
pgexecute = PGExecute(database, user, passwd, host, port)
except OperationalError as e:
if ('no password supplied' in utf8tounicode(e.args[0]) and
auto_passwd_prompt):
passwd = click.prompt('Password', hide_input=True,
show_default=False, type=str)
pgexecute = PGExecute(database, user, passwd, host, port)
else:
raise e
except Exception as e: # Connecting to a database could fail.
self.logger.debug('Database connection failed: %r.', e)
self.logger.error("traceback: %r", traceback.format_exc())
click.secho(str(e), err=True, fg='red')
exit(1)
self.pgexecute = pgexecute
def run_cli(self):
pgexecute = self.pgexecute
prompt = '%s> ' % pgexecute.dbname
logger = self.logger
original_less_opts = self.adjust_less_opts()
completer = self.completer
self.refresh_completions()
key_binding_manager = pgcli_bindings(self.vi_mode)
print('Version:', __version__)
print('Chat: https://gitter.im/amjith/pgcli')
print('Mail: https://groups.google.com/forum/#!forum/pgcli')
print('Home: http://pgcli.com')
layout = Layout(before_input=DefaultPrompt(prompt),
menus=[CompletionsMenu(max_height=10)],
lexer=PostgresLexer,
bottom_toolbars=[PGToolbar(key_binding_manager)])
buf = PGBuffer(always_multiline=self.multi_line, completer=completer,
history=FileHistory(os.path.expanduser('~/.pgcli-history')))
cli = CommandLineInterface(style=style_factory(self.syntax_style),
layout=layout, buffer=buf,
key_bindings_registry=key_binding_manager.registry)
try:
while True:
cli.layout.before_input = DefaultPrompt(prompt)
document = cli.read_input(on_exit=AbortAction.RAISE_EXCEPTION)
# The reason we check here instead of inside the pgexecute is
# because we want to raise the Exit exception which will be
# caught by the try/except block that wraps the pgexecute.run()
# statement.
if quit_command(document.text):
raise Exit
# Keep track of whether or not the query is mutating. In case
# of a multi-statement query, the overall query is considered
# mutating if any one of the component statements is mutating
mutating = False
try:
logger.debug('sql: %r', document.text)
successful = False
# Initialized to [] because res might never get initialized
# if an exception occurs in pgexecute.run(). Which causes
# finally clause to fail.
res = []
start = time()
# Run the query.
res = pgexecute.run(document.text)
duration = time() - start
successful = True
output = []
total = 0
for title, cur, headers, status in res:
logger.debug("headers: %r", headers)
logger.debug("rows: %r", cur)
logger.debug("status: %r", status)
start = time()
threshold = 1000
if (is_select(status) and
cur and cur.rowcount > threshold):
click.secho('The result set has more than %s rows.'
% threshold, fg='red')
if not click.confirm('Do you want to continue?'):
click.secho("Aborted!", err=True, fg='red')
break
output.extend(format_output(title, cur, headers,
status, self.table_format))
end = time()
total += end - start
mutating = mutating or is_mutating(status)
except KeyboardInterrupt:
# Restart connection to the database
pgexecute.connect()
logger.debug("cancelled query, sql: %r", document.text)
click.secho("cancelled query", err=True, fg='red')
except Exception as e:
logger.error("sql: %r, error: %r", document.text, e)
logger.error("traceback: %r", traceback.format_exc())
click.secho(str(e), err=True, fg='red')
else:
click.echo_via_pager('\n'.join(output))
if pgspecial.TIMING_ENABLED:
print('Command Time:', duration)
print('Format Time:', total)
finally:
for cur, _, _ in res:
if hasattr(cur, 'close'):
cur.close()
# Refresh the table names and column names if necessary.
if need_completion_refresh(document.text):
prompt = '%s> ' % pgexecute.dbname
self.refresh_completions()
# Refresh search_path to set default schema.
if need_search_path_refresh(document.text):
logger.debug('Refreshing search path')
completer.set_search_path(pgexecute.search_path())
logger.debug('Search path: %r', completer.search_path)
query = Query(document.text, successful, mutating)
self.query_history.append(query)
except Exit:
print ('GoodBye!')
finally: # Reset the less opts back to original.
logger.debug('Restoring env var LESS to %r.', original_less_opts)
os.environ['LESS'] = original_less_opts
def adjust_less_opts(self):
less_opts = os.environ.get('LESS', '')
self.logger.debug('Original value for LESS env var: %r', less_opts)
os.environ['LESS'] = '-RXF'
#if 'X' not in less_opts:
#os.environ['LESS'] += 'X'
#if 'F' not in less_opts:
#os.environ['LESS'] += 'F'
return less_opts
def refresh_completions(self):
completer = self.completer
completer.reset_completions()
pgexecute = self.pgexecute
completer.set_search_path(pgexecute.search_path())
completer.extend_schemata(pgexecute.schemata())
completer.extend_tables(pgexecute.tables())
completer.extend_columns(pgexecute.columns())
completer.extend_functions(pgexecute.functions())
completer.extend_database_names(pgexecute.databases())
def get_completions(self, text, cursor_positition):
return self.completer.get_completions(
Document(text=text, cursor_position=cursor_positition), None)
@click.command()
# Default host is '' so psycopg2 can default to either localhost or unix socket
@click.option('-h', '--host', default='', envvar='PGHOST',
help='Host address of the postgres database.')
@click.option('-p', '--port', default=5432, help='Port number at which the '
'postgres instance is listening.', envvar='PGPORT')
@click.option('-U', '--user', envvar='PGUSER', help='User name to '
'connect to the postgres database.')
@click.option('-W', '--password', 'prompt_passwd', is_flag=True, default=False,
help='Force password prompt.')
@click.option('-w', '--no-password', 'never_prompt', is_flag=True,
default=False, help='Never prompt for password.')
@click.option('-v', '--version', is_flag=True, help='Version of pgcli.')
@click.option('-d', '--dbname', default='', envvar='PGDATABASE',
help='database name to connect to.')
@click.argument('database', default=lambda: None, envvar='PGDATABASE', nargs=1)
@click.argument('username', default=lambda: None, envvar='PGUSER', nargs=1)
def cli(database, user, host, port, prompt_passwd, never_prompt, dbname,
username, version):
if version:
print('Version:', __version__)
sys.exit(0)
pgcli = PGCli(prompt_passwd, never_prompt)
# Choose which ever one has a valid value.
database = database or dbname
user = username or user
if '://' in database:
pgcli.connect_uri(database)
else:
pgcli.connect(database, host, user, port)
pgcli.logger.debug('Launch Params: \n'
'\tdatabase: %r'
'\tuser: %r'
'\thost: %r'
'\tport: %r', database, user, host, port)
pgcli.run_cli()
def format_output(title, cur, headers, status, table_format):
output = []
if title: # Only print the title if it's not None.
output.append(title)
if cur:
headers = [utf8tounicode(x) for x in headers]
if is_expanded_output():
output.append(expanded_table(cur, headers))
else:
output.append(tabulate(cur, headers, tablefmt=table_format,
missingval='<null>'))
if status: # Only print the status if it's not None.
output.append(status)
return output
def need_completion_refresh(queries):
"""Determines if the completion needs a refresh by checking if the sql
statement is an alter, create, drop or change db."""
for query in sqlparse.split(queries):
try:
first_token = query.split()[0]
return first_token.lower() in ('alter', 'create', 'use', '\\c',
'\\connect', 'drop')
except Exception:
return False
def need_search_path_refresh(sql):
"""Determines if the search_path should be refreshed by checking if the
sql has 'set search_path'."""
return 'set search_path' in sql.lower()
def is_mutating(status):
"""Determines if the statement is mutating based on the status."""
if not status:
return False
mutating = set(['insert', 'update', 'delete', 'alter', 'create', 'drop'])
return status.split(None, 1)[0].lower() in mutating
def is_select(status):
"""Returns true if the first word in status is 'select'."""
if not status:
return False
return status.split(None, 1)[0].lower() == 'select'
def quit_command(sql):
return (sql.strip().lower() == 'exit'
or sql.strip().lower() == 'quit'
or sql.strip() == '\q'
or sql.strip() == ':q')
if __name__ == "__main__":
cli()
|
[] |
[] |
[
"LESS"
] |
[]
|
["LESS"]
|
python
| 1 | 0 | |
cmd/frontend/internal/cli/config.go
|
package cli
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"log"
"os"
"os/user"
"strings"
"sync"
"github.com/inconshreveable/log15"
"github.com/pkg/errors"
"github.com/sourcegraph/sourcegraph/cmd/frontend/db"
"github.com/sourcegraph/sourcegraph/cmd/frontend/globals"
"github.com/sourcegraph/sourcegraph/cmd/frontend/types"
"github.com/sourcegraph/sourcegraph/internal/api"
"github.com/sourcegraph/sourcegraph/internal/conf"
"github.com/sourcegraph/sourcegraph/internal/conf/conftypes"
"github.com/sourcegraph/sourcegraph/internal/db/confdb"
"github.com/sourcegraph/sourcegraph/internal/db/dbutil"
"github.com/sourcegraph/sourcegraph/internal/jsonc"
)
func printConfigValidation() {
messages, err := conf.Validate(globals.ConfigurationServerFrontendOnly.Raw())
if err != nil {
log.Printf("Warning: Unable to validate Sourcegraph site configuration: %s", err)
return
}
if len(messages) > 0 {
log15.Warn("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
log15.Warn("⚠️ Warnings related to the Sourcegraph site configuration:")
for _, verr := range messages {
log15.Warn(verr.String())
}
log15.Warn("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
}
}
// handleConfigOverrides handles allowing dev environments to forcibly override
// the configuration in the database upon startup. This is used to e.g. ensure
// dev environments have a consistent configuration and to load secrets from a
// separate private repository.
//
// As this method writes to the configuration DB, it should be invoked before
// the configuration server is started but after PostgreSQL is connected.
func handleConfigOverrides() error {
ctx := context.Background()
overrideCriticalConfig := os.Getenv("CRITICAL_CONFIG_FILE")
overrideSiteConfig := os.Getenv("SITE_CONFIG_FILE")
overrideExtSvcConfig := os.Getenv("EXTSVC_CONFIG_FILE")
overrideGlobalSettings := os.Getenv("GLOBAL_SETTINGS_FILE")
overrideAny := overrideCriticalConfig != "" || overrideSiteConfig != "" || overrideExtSvcConfig != "" || overrideGlobalSettings != ""
if overrideAny || conf.IsDev(conf.DeployType()) {
raw, err := (&configurationSource{}).Read(ctx)
if err != nil {
return errors.Wrap(err, "reading existing config for applying overrides")
}
if overrideCriticalConfig != "" {
critical, err := ioutil.ReadFile(overrideCriticalConfig)
if err != nil {
return errors.Wrap(err, "reading CRITICAL_CONFIG_FILE")
}
raw.Critical = string(critical)
}
if overrideSiteConfig != "" {
site, err := ioutil.ReadFile(overrideSiteConfig)
if err != nil {
return errors.Wrap(err, "reading SITE_CONFIG_FILE")
}
raw.Site = string(site)
}
if overrideCriticalConfig != "" || overrideSiteConfig != "" {
err := (&configurationSource{}).Write(ctx, raw)
if err != nil {
return errors.Wrap(err, "writing critical/site config overrides to database")
}
}
if overrideGlobalSettings != "" {
globalSettingsBytes, err := ioutil.ReadFile(overrideGlobalSettings)
if err != nil {
return errors.Wrap(err, "reading GLOBAL_SETTINGS_FILE")
}
currentSettings, err := db.Settings.GetLatest(ctx, api.SettingsSubject{Site: true})
if err != nil {
return errors.Wrap(err, "could not fetch current settings")
}
// Only overwrite the settings if the current settings differ, don't exist, or were
// created by a human user to prevent creating unnecessary rows in the DB.
globalSettings := string(globalSettingsBytes)
if currentSettings == nil || currentSettings.AuthorUserID != nil || currentSettings.Contents != globalSettings {
var lastID *int32 = nil
if currentSettings != nil {
lastID = ¤tSettings.ID
}
_, err = db.Settings.CreateIfUpToDate(ctx, api.SettingsSubject{Site: true}, lastID, nil, globalSettings)
if err != nil {
return errors.Wrap(err, "writing global setting override to database")
}
}
}
if overrideExtSvcConfig != "" {
parsed, err := conf.ParseConfig(raw)
if err != nil {
return errors.Wrap(err, "parsing critical/site config")
}
confGet := func() *conf.Unified { return parsed }
extsvc, err := ioutil.ReadFile(overrideExtSvcConfig)
if err != nil {
return errors.Wrap(err, "reading EXTSVC_CONFIG_FILE")
}
var rawConfigs map[string][]*json.RawMessage
if err := jsonc.Unmarshal(string(extsvc), &rawConfigs); err != nil {
return errors.Wrap(err, "parsing EXTSVC_CONFIG_FILE")
}
if len(rawConfigs) == 0 {
log15.Warn("EXTSVC_CONFIG_FILE contains zero external service configurations")
}
existing, err := db.ExternalServices.List(ctx, db.ExternalServicesListOptions{})
if err != nil {
return errors.Wrap(err, "ExternalServices.List")
}
// Perform delta update for external services. We don't want to
// just delete all external services and re-add all of them,
// because that would cause repo-updater to need to update
// repositories and reassociate them with external services each
// time the frontend restarts.
//
// Start out by assuming we will remove all and re-add all.
var (
toAdd = make(map[*types.ExternalService]bool)
toRemove = make(map[*types.ExternalService]bool)
toUpdate = make(map[int64]*types.ExternalService)
)
for _, existing := range existing {
toRemove[existing] = true
}
for key, cfgs := range rawConfigs {
for i, cfg := range cfgs {
marshaledCfg, err := json.MarshalIndent(cfg, "", " ")
if err != nil {
return errors.Wrap(err, fmt.Sprintf("marshaling extsvc config ([%v][%v])", key, i))
}
toAdd[&types.ExternalService{
Kind: key,
DisplayName: fmt.Sprintf("%s #%d", key, i+1),
Config: string(marshaledCfg),
}] = true
}
}
// Now eliminate operations from toAdd/toRemove where the config
// file and DB describe an equivalent external service.
isEquiv := func(a, b *types.ExternalService) bool {
return a.Kind == b.Kind && a.DisplayName == b.DisplayName && a.Config == b.Config
}
shouldUpdate := func(a, b *types.ExternalService) bool {
return a.Kind == b.Kind && a.DisplayName == b.DisplayName && a.Config != b.Config
}
for a := range toAdd {
for b := range toRemove {
if isEquiv(a, b) { // Nothing changed
delete(toAdd, a)
delete(toRemove, b)
} else if shouldUpdate(a, b) {
delete(toAdd, a)
delete(toRemove, b)
toUpdate[b.ID] = a
}
}
}
// Apply the delta update.
for extSvc := range toRemove {
log15.Debug("Deleting external service", "id", extSvc.ID, "displayName", extSvc.DisplayName)
err := db.ExternalServices.Delete(ctx, extSvc.ID)
if err != nil {
return errors.Wrap(err, "ExternalServices.Delete")
}
}
for extSvc := range toAdd {
log15.Debug("Adding external service", "displayName", extSvc.DisplayName)
if err := db.ExternalServices.Create(ctx, confGet, extSvc); err != nil {
return errors.Wrap(err, "ExternalServices.Create")
}
}
ps := confGet().AuthProviders
for id, extSvc := range toUpdate {
log15.Debug("Updating external service", "id", id, "displayName", extSvc.DisplayName)
update := &db.ExternalServiceUpdate{DisplayName: &extSvc.DisplayName, Config: &extSvc.Config}
if err := db.ExternalServices.Update(ctx, ps, id, update); err != nil {
return errors.Wrap(err, "ExternalServices.Update")
}
}
}
}
return nil
}
type configurationSource struct{}
func (c configurationSource) Read(ctx context.Context) (conftypes.RawUnified, error) {
critical, err := confdb.CriticalGetLatest(ctx)
if err != nil {
return conftypes.RawUnified{}, errors.Wrap(err, "confdb.CriticalGetLatest")
}
site, err := confdb.SiteGetLatest(ctx)
if err != nil {
return conftypes.RawUnified{}, errors.Wrap(err, "confdb.SiteGetLatest")
}
return conftypes.RawUnified{
Critical: critical.Contents,
Site: site.Contents,
ServiceConnections: serviceConnections(),
}, nil
}
func (c configurationSource) Write(ctx context.Context, input conftypes.RawUnified) error {
// TODO(slimsag): future: pass lastID through for race prevention
critical, err := confdb.CriticalGetLatest(ctx)
if err != nil {
return errors.Wrap(err, "confdb.CriticalGetLatest")
}
site, err := confdb.SiteGetLatest(ctx)
if err != nil {
return errors.Wrap(err, "confdb.SiteGetLatest")
}
_, err = confdb.CriticalCreateIfUpToDate(ctx, &critical.ID, input.Critical)
if err != nil {
return errors.Wrap(err, "confdb.CriticalCreateIfUpToDate")
}
_, err = confdb.SiteCreateIfUpToDate(ctx, &site.ID, input.Site)
if err != nil {
return errors.Wrap(err, "confdb.SiteCreateIfUpToDate")
}
return nil
}
var (
serviceConnectionsVal conftypes.ServiceConnections
serviceConnectionsOnce sync.Once
)
func serviceConnections() conftypes.ServiceConnections {
serviceConnectionsOnce.Do(func() {
username := ""
if user, err := user.Current(); err == nil {
username = user.Username
}
serviceConnectionsVal = conftypes.ServiceConnections{
GitServers: gitServers(),
PostgresDSN: dbutil.PostgresDSN(username, os.Getenv),
}
})
return serviceConnectionsVal
}
func gitServers() []string {
v := os.Getenv("SRC_GIT_SERVERS")
if v == "" {
// Detect 'go test' and setup default addresses in that case.
p, err := os.Executable()
if err == nil && strings.HasSuffix(p, ".test") {
v = "gitserver:3178"
}
}
return strings.Fields(v)
}
|
[
"\"CRITICAL_CONFIG_FILE\"",
"\"SITE_CONFIG_FILE\"",
"\"EXTSVC_CONFIG_FILE\"",
"\"GLOBAL_SETTINGS_FILE\"",
"\"SRC_GIT_SERVERS\""
] |
[] |
[
"EXTSVC_CONFIG_FILE",
"SITE_CONFIG_FILE",
"GLOBAL_SETTINGS_FILE",
"CRITICAL_CONFIG_FILE",
"SRC_GIT_SERVERS"
] |
[]
|
["EXTSVC_CONFIG_FILE", "SITE_CONFIG_FILE", "GLOBAL_SETTINGS_FILE", "CRITICAL_CONFIG_FILE", "SRC_GIT_SERVERS"]
|
go
| 5 | 0 | |
martini.go
|
// Package martini is a powerful package for quickly writing modular web applications/services in Golang.
//
// For a full guide visit http://github.com/codegangsta/martini
//
// package main
//
// import "github.com/codegangsta/martini"
//
// func main() {
// m := martini.Classic()
//
// m.Get("/", func() string {
// return "Hello world!"
// })
//
// m.Run()
// }
package martini
import (
"github.com/beatrichartz/inject"
"log"
"net/http"
"os"
"reflect"
)
// Martini represents the top level web application. inject.Injector methods can be invoked to map services on a global level.
type Martini struct {
inject.Injector
handlers []Handler
action Handler
logger *log.Logger
}
// New creates a bare bones Martini instance. Use this method if you want to have full control over the middleware that is used.
func New() *Martini {
m := &Martini{inject.New(), []Handler{}, func() {}, log.New(os.Stdout, "[martini] ", 0)}
m.Map(m.logger)
m.Map(defaultReturnHandler())
return m
}
// Handlers sets the entire middleware stack with the given Handlers. This will clear any current middleware handlers.
// Will panic if any of the handlers is not a callable function
func (m *Martini) Handlers(handlers ...Handler) {
m.handlers = make([]Handler, 0)
for _, handler := range handlers {
m.Use(handler)
}
}
// Action sets the handler that will be called after all the middleware has been invoked. This is set to martini.Router in a martini.Classic().
func (m *Martini) Action(handler Handler) {
validateHandler(handler)
m.action = handler
}
// Use adds a middleware Handler to the stack. Will panic if the handler is not a callable func. Middleware Handlers are invoked in the order that they are added.
func (m *Martini) Use(handler Handler) {
validateHandler(handler)
m.handlers = append(m.handlers, handler)
}
// ServeHTTP is the HTTP Entry point for a Martini instance. Useful if you want to control your own HTTP server.
func (m *Martini) ServeHTTP(res http.ResponseWriter, req *http.Request) {
m.createContext(res, req).run()
}
// Run the http server. Listening on os.GetEnv("PORT") or 3000 by default.
func (m *Martini) Run() {
port := os.Getenv("PORT")
if len(port) == 0 {
port = "3000"
}
host := os.Getenv("HOST")
if len(host) == 0 {
host = ""
}
m.logger.Println("listening on host:port " + host + ":" + port)
m.logger.Fatalln(http.ListenAndServe(host+":"+port, m))
}
func (m *Martini) createContext(res http.ResponseWriter, req *http.Request) *context {
c := &context{inject.New(), append(m.handlers, m.action), NewResponseWriter(res), 0}
c.SetParent(m)
c.MapTo(c, (*Context)(nil))
c.MapTo(c.rw, (*http.ResponseWriter)(nil))
c.Map(req)
return c
}
// ClassicMartini represents a Martini with some reasonable defaults. Embeds the router functions for convenience.
type ClassicMartini struct {
*Martini
Router
}
// Classic creates a classic Martini with some basic default middleware - martini.Logger, martini.Recovery, and martini.Static.
func Classic() *ClassicMartini {
r := NewRouter()
m := New()
m.Use(Logger())
m.Use(Recovery())
m.Use(Static("public"))
m.Action(r.Handle)
return &ClassicMartini{m, r}
}
// Handler can be any callable function. Martini attempts to inject services into the handler's argument list.
// Martini will panic if an argument could not be fullfilled via dependency injection.
type Handler interface{}
func validateHandler(handler Handler) {
if reflect.TypeOf(handler).Kind() != reflect.Func {
panic("martini handler must be a callable func")
}
}
// Context represents a request context. Services can be mapped on the request level from this interface.
type Context interface {
inject.Injector
// Next is an optional function that Middleware Handlers can call to yield the until after
// the other Handlers have been executed. This works really well for any operations that must
// happen after an http request
Next()
// Written returns whether or not the response for this context has been written.
Written() bool
}
type context struct {
inject.Injector
handlers []Handler
rw ResponseWriter
index int
}
func (c *context) Next() {
c.index += 1
c.run()
}
func (c *context) Written() bool {
return c.rw.Written()
}
func (c *context) run() {
for c.index < len(c.handlers) {
_, err := c.Invoke(c.handlers[c.index])
if err != nil {
panic(err)
}
c.index += 1
if c.Written() {
return
}
}
}
|
[
"\"PORT\"",
"\"HOST\""
] |
[] |
[
"PORT",
"HOST"
] |
[]
|
["PORT", "HOST"]
|
go
| 2 | 0 | |
frameworks/TPOT/exec.py
|
import logging
import os
import pprint
import sys
import tempfile as tmp
if sys.platform == 'darwin':
os.environ['OBJC_DISABLE_INITIALIZE_FORK_SAFETY'] = 'YES'
os.environ['JOBLIB_TEMP_FOLDER'] = tmp.gettempdir()
os.environ['OMP_NUM_THREADS'] = '1'
os.environ['OPENBLAS_NUM_THREADS'] = '1'
os.environ['MKL_NUM_THREADS'] = '1'
from tpot import TPOTClassifier, TPOTRegressor
from amlb.benchmark import TaskConfig
from amlb.data import Dataset
from amlb.datautils import Encoder, impute
from amlb.results import save_predictions_to_file
from amlb.utils import Timer, touch
log = logging.getLogger(__name__)
def run(dataset: Dataset, config: TaskConfig):
log.info("\n**** TPOT ****\n")
is_classification = config.type == 'classification'
# Mapping of benchmark metrics to TPOT metrics
metrics_mapping = dict(
acc='accuracy',
auc='roc_auc',
f1='f1',
logloss='neg_log_loss',
mae='neg_mean_absolute_error',
mse='neg_mean_squared_error',
msle='neg_mean_squared_log_error',
r2='r2'
)
scoring_metric = metrics_mapping[config.metric] if config.metric in metrics_mapping else None
if scoring_metric is None:
raise ValueError("Performance metric {} not supported.".format(config.metric))
X_train, X_test = impute(dataset.train.X_enc, dataset.test.X_enc)
y_train, y_test = dataset.train.y_enc, dataset.test.y_enc
training_params = {k: v for k, v in config.framework_params.items() if not k.startswith('_')}
n_jobs = config.framework_params.get('_n_jobs', config.cores) # useful to disable multicore, regardless of the dataset config
log.info('Running TPOT with a maximum time of %ss on %s cores, optimizing %s.',
config.max_runtime_seconds, n_jobs, scoring_metric)
runtime_min = (config.max_runtime_seconds/60)
estimator = TPOTClassifier if is_classification else TPOTRegressor
tpot = estimator(n_jobs=n_jobs,
max_time_mins=runtime_min,
scoring=scoring_metric,
random_state=config.seed,
**training_params)
with Timer() as training:
tpot.fit(X_train, y_train)
log.info('Predicting on the test set.')
predictions = tpot.predict(X_test)
try:
probabilities = tpot.predict_proba(X_test) if is_classification else None
except RuntimeError:
# TPOT throws a RuntimeError if the optimized pipeline does not support `predict_proba`.
target_values_enc = dataset.target.label_encoder.transform(dataset.target.values)
probabilities = Encoder('one-hot', target=False, encoded_type=float).fit(target_values_enc).transform(predictions)
save_predictions_to_file(dataset=dataset,
output_file=config.output_predictions_file,
probabilities=probabilities,
predictions=predictions,
truth=y_test,
target_is_encoded=is_classification)
save_artifacts(tpot, config)
return dict(
models_count=len(tpot.evaluated_individuals_),
training_duration=training.duration
)
def make_subdir(name, config):
subdir = os.path.join(config.output_dir, name, config.name, str(config.fold))
touch(subdir, as_dir=True)
return subdir
def save_artifacts(estimator, config):
try:
log.debug("All individuals :\n%s", list(estimator.evaluated_individuals_.items()))
models = estimator.pareto_front_fitted_pipelines_
hall_of_fame = list(zip(reversed(estimator._pareto_front.keys), estimator._pareto_front.items))
artifacts = config.framework_params.get('_save_artifacts', False)
if 'models' in artifacts:
models_file = os.path.join(make_subdir('models', config), 'models.txt')
with open(models_file, 'w') as f:
for m in hall_of_fame:
pprint.pprint(dict(
fitness=str(m[0]),
model=str(m[1]),
pipeline=models[str(m[1])],
), stream=f)
except:
log.debug("Error when saving artifacts.", exc_info=True)
|
[] |
[] |
[
"JOBLIB_TEMP_FOLDER",
"MKL_NUM_THREADS",
"OPENBLAS_NUM_THREADS",
"OBJC_DISABLE_INITIALIZE_FORK_SAFETY",
"OMP_NUM_THREADS"
] |
[]
|
["JOBLIB_TEMP_FOLDER", "MKL_NUM_THREADS", "OPENBLAS_NUM_THREADS", "OBJC_DISABLE_INITIALIZE_FORK_SAFETY", "OMP_NUM_THREADS"]
|
python
| 5 | 0 | |
test/test_db.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright Toolkit Authors
from typing import Optional
from pydtk.db import V4DBHandler, V4MetaDBHandler, V4DatabaseIDDBHandler
import pytest
db_args = 'db_engine,db_host,db_username,db_password'
db_list = [
('tinydb', 'test/test_v4.json', None, None),
('tinymongo', 'test/test_v4', None, None),
('montydb', 'test/test_v4', None, None),
# ('mongodb', 'host', 'username', 'password')
]
default_db_parameter = db_list[0]
def _add_data_to_db(handler: V4DBHandler):
from pydtk.models import MetaDataModel
paths = [
'test/records/sample/data/records.bag.json',
'test/records/csv_model_test/data/test.csv.json',
'test/records/json_model_test/json_test.json.json',
'test/records/forecast_model_test/forecast_test.csv.json',
'test/records/annotation_model_test/annotation_test.csv.json'
]
# Load metadata and add to DB
record_ids = set()
for path in paths:
metadata = MetaDataModel()
metadata.load(path)
record_ids.add(metadata.data['record_id'])
handler.add_data(metadata.data)
# Get DF
df = handler.df
assert len(df) == len(handler) and len(df) > 0
# Save
handler.save()
def _load_data_from_db(handler: V4DBHandler):
assert handler.count_total > 0
assert len(handler) > 0
try:
for sample in handler:
assert 'contents' in sample.keys()
assert isinstance(sample['contents'], dict)
assert len(sample['contents'].keys()) == 1
except (EOFError, StopIteration):
pass
@pytest.mark.parametrize(db_args, db_list)
def test_create_db(
db_engine: str,
db_host: str,
db_username: Optional[str],
db_password: Optional[str]
):
"""Create DB of records directory.
Args:
db_engine (str): DB engine (e.g., 'tinydb')
db_host (str): Host of path of DB
db_username (str): Username
db_password (str): Password
"""
handler = V4DBHandler(
db_class='meta',
db_engine=db_engine,
db_host=db_host,
db_username=db_username,
db_password=db_password,
base_dir_path='/opt/pydtk/test'
)
handler.read()
assert isinstance(handler, V4MetaDBHandler)
_add_data_to_db(handler)
@pytest.mark.parametrize(db_args, db_list)
def test_load_db(
db_engine: str,
db_host: str,
db_username: Optional[str],
db_password: Optional[str]
):
"""Load DB.
Args:
db_engine (str): DB engine (e.g., 'tinydb')
db_host (str): Host of path of DB
db_username (str): Username
db_password (str): Password
"""
handler = V4DBHandler(
db_class='meta',
db_engine=db_engine,
db_host=db_host,
db_username=db_username,
db_password=db_password,
base_dir_path='/opt/pydtk/test',
orient='contents'
)
handler.read()
assert isinstance(handler, V4MetaDBHandler)
_load_data_from_db(handler)
@pytest.mark.parametrize(db_args, db_list)
def test_load_database_id(
db_engine: str,
db_host: str,
db_username: Optional[str],
db_password: Optional[str]
):
"""Load DB.
Args:
db_engine (str): DB engine (e.g., 'tinydb')
db_host (str): Host of path of DB
db_username (str): Username
db_password (str): Password
"""
handler = V4DBHandler(
db_class='database_id',
db_engine=db_engine,
db_host=db_host,
db_username=db_username,
db_password=db_password,
)
handler.read()
assert isinstance(handler, V4DatabaseIDDBHandler)
assert len(handler.df) == 1
assert next(handler)['database_id'] == 'default'
@pytest.mark.parametrize(db_args, db_list)
def test_update_configs_db(
db_engine: str,
db_host: str,
db_username: Optional[str],
db_password: Optional[str]
):
"""Load DB.
Args:
db_engine (str): DB engine (e.g., 'tinydb')
db_host (str): Host of path of DB
db_username (str): Username
db_password (str): Password
"""
handler = V4DBHandler(
db_class='meta',
db_engine=db_engine,
db_host=db_host,
db_username=db_username,
db_password=db_password,
base_dir_path='/opt/pydtk/test',
orient='contents'
)
handler.read()
assert isinstance(handler, V4MetaDBHandler)
try:
handler.config.update({'_df_name': 'aaa'})
handler.config['_df_name'] = ''
raise AssertionError
except KeyError:
pass
handler.config['columns'].append({'name': 'test', 'dtype': 'str'})
handler.save()
del handler
handler = V4DBHandler(
db_class='meta',
db_engine=db_engine,
db_host=db_host,
db_username=db_username,
db_password=db_password,
base_dir_path='/opt/pydtk/test',
orient='contents'
)
handler.read()
assert handler.config['columns'][-1]['name'] == 'test'
del handler.config['columns'][-1]
handler.save()
@pytest.mark.parametrize(db_args, db_list)
def test_delete_records(
db_engine: str,
db_host: str,
db_username: Optional[str],
db_password: Optional[str]
):
"""Delete records from DB.
Args:
db_engine (str): DB engine (e.g., 'tinydb')
db_host (str): Host of path of DB
db_username (str): Username
db_password (str): Password
"""
handler = V4DBHandler(
db_class='meta',
db_engine=db_engine,
db_host=db_host,
db_username=db_username,
db_password=db_password,
base_dir_path='/opt/pydtk/test',
orient='record_id'
)
handler.read()
assert isinstance(handler, V4MetaDBHandler)
assert len(handler) == handler.count_total
num_data = len(handler)
# Remove one record without saving
handler.remove_data(next(handler))
assert len(handler) == num_data - 1
handler.read()
assert len(handler) == num_data
# Remove all data and save
try:
for sample in handler:
handler.remove_data(sample)
num_data -= 1
assert len(handler) == num_data
except (EOFError, StopIteration):
pass
assert len(handler) == 0
handler.save()
# Rollback data
_add_data_to_db(handler)
@pytest.mark.parametrize(db_args, db_list)
def test_delete_collection(
db_engine: str,
db_host: str,
db_username: Optional[str],
db_password: Optional[str]
):
"""Delete a collection from DB.
Args:
db_engine (str): DB engine (e.g., 'tinydb')
db_host (str): Host of path of DB
db_username (str): Username
db_password (str): Password
"""
handler = V4DBHandler(
db_class='database_id',
db_engine=db_engine,
db_host=db_host,
db_username=db_username,
db_password=db_password,
)
handler.read()
assert isinstance(handler, V4DatabaseIDDBHandler)
num_databases_original = len(handler)
database = next(handler)
handler.remove_data(database)
handler.save()
assert len(handler) == num_databases_original - 1
handler.read()
assert len(handler) == num_databases_original - 1
if db_engine not in ['tinydb', 'tinymongo']:
# Check if the corresponding table is deleted
meta_handler = V4DBHandler(
db_class='meta',
db_engine=db_engine,
db_host=db_host,
db_username=db_username,
db_password=db_password,
)
assert isinstance(meta_handler, V4MetaDBHandler)
assert len(meta_handler) == 0
@pytest.mark.parametrize(db_args, db_list)
def test_create_db_with_env_var(
db_engine: str,
db_host: str,
db_username: Optional[str],
db_password: Optional[str]
):
"""Create DB of records directory.
Args:
db_engine (str): DB engine (e.g., 'tinydb')
db_host (str): Host of path of DB
db_username (str): Username
db_password (str): Password
"""
import os
# Set environment variables
if db_engine is not None:
os.environ['PYDTK_META_DB_ENGINE'] = db_engine
if db_host is not None:
os.environ['PYDTK_META_DB_HOST'] = db_host
if db_username is not None:
os.environ['PYDTK_META_DB_USERNAME'] = db_username
if db_password is not None:
os.environ['PYDTK_META_DB_PASSWORD'] = db_password
handler = V4DBHandler(
db_class='meta',
base_dir_path='/opt/pydtk/test'
)
handler.read()
assert isinstance(handler, V4MetaDBHandler)
_add_data_to_db(handler)
@pytest.mark.parametrize(db_args, db_list)
def test_load_db_with_env_var(
db_engine: str,
db_host: str,
db_username: Optional[str],
db_password: Optional[str]
):
"""Load DB.
Args:
db_engine (str): DB engine (e.g., 'tinydb')
db_host (str): Host of path of DB
db_username (str): Username
db_password (str): Password
"""
import os
# Set environment variables
if db_engine is not None:
os.environ['PYDTK_META_DB_ENGINE'] = db_engine
if db_host is not None:
os.environ['PYDTK_META_DB_HOST'] = db_host
if db_username is not None:
os.environ['PYDTK_META_DB_USERNAME'] = db_username
if db_password is not None:
os.environ['PYDTK_META_DB_PASSWORD'] = db_password
handler = V4DBHandler(db_class='meta')
handler.read()
assert isinstance(handler, V4MetaDBHandler)
_load_data_from_db(handler)
@pytest.mark.parametrize(db_args, db_list)
def test_merge(
db_engine: str,
db_host: str,
db_username: Optional[str],
db_password: Optional[str]
):
"""Test merging dicts.
Args:
db_engine (str): DB engine (e.g., 'tinydb')
db_host (str): Host of path of DB
db_username (str): Username
db_password (str): Password
"""
handler = V4DBHandler(
db_class='meta',
db_engine=db_engine,
db_host=db_host,
db_username=db_username,
db_password=db_password,
base_dir_path='/opt/pydtk/test',
orient='contents',
read_on_init=False
)
data_1 = {
'record_id': 'aaa',
'string': 'test123',
'dict': {
'aaa': 'aaa'
},
'list': [
'aaa'
]
}
data_2 = {
'record_id': 'aaa',
'string': 'test123',
'dict': {
'bbb': 'bbb'
},
'list': [
'bbb'
]
}
data_merged = {
'record_id': 'aaa',
'string': 'test123',
'dict': {
'aaa': 'aaa',
'bbb': 'bbb'
},
'list': [
'aaa',
'bbb'
]
}
handler.add_data(data_1, strategy='merge')
handler.add_data(data_2, strategy='merge')
data = handler.data[0]
assert len(handler) == 1
assert all([set(data[key]) == set(data_merged[key]) for key in data_merged.keys()])
@pytest.mark.parametrize(db_args, list(filter(lambda d: d[0] in ['tinydb'], db_list)))
def test_search_tinydb(
db_engine: str,
db_host: str,
db_username: Optional[str],
db_password: Optional[str]
):
"""Search on TinyDB.
Args:
db_engine (str): DB engine (e.g., 'tinydb')
db_host (str): Host of path of DB
db_username (str): Username
db_password (str): Password
"""
from tinydb import where
handler = V4DBHandler(
db_class='meta',
db_engine=db_engine,
db_host=db_host,
db_username=db_username,
db_password=db_password,
base_dir_path='/opt/pydtk/test',
orient='contents',
read_on_init=False
)
handler.read(query=where('record_id') == 'test')
assert len(handler) > 0
handler.read(query=where('start_timestamp') < 1489728492.0)
assert len(handler) > 0
@pytest.mark.parametrize(
db_args,
list(filter(lambda d: d[0] in ['tinymongo', 'mongodb', 'montydb'], db_list))
)
def test_search_mongo(
db_engine: str,
db_host: str,
db_username: Optional[str],
db_password: Optional[str]
):
"""Search on MongoDB.
Args:
db_engine (str): DB engine (e.g., 'tinydb')
db_host (str): Host of path of DB
db_username (str): Username
db_password (str): Password
"""
handler = V4DBHandler(
db_class='meta',
db_engine=db_engine,
db_host=db_host,
db_username=db_username,
db_password=db_password,
base_dir_path='/opt/pydtk/test',
orient='contents',
read_on_init=False
)
# MongoDB-like query
handler.read(query={'record_id': 'test'})
assert len(handler) > 0
handler.read(query={'record_id': {'$regex': '016'}})
assert len(handler) > 0
handler.read(query={'record_id': {'$regex': '^016.*'}})
assert len(handler) > 0
handler.read(query={
'$and': [
{'record_id': {'$regex': '.*'}},
{'start_timestamp': {'$lt': 1489728492.0}}
]
})
assert len(handler) > 0
# Python-Query-Language (PQL)
handler.read(pql="record_id == 'test'")
assert len(handler) > 0
handler.read(pql="record_id == regex('test.*')")
assert len(handler) > 0
handler.read(query={'contents./points_concat_downsampled': {'$exists': True}})
assert len(handler) > 0
handler.read(pql='"contents./points_concat_downsampled" == exists(True)')
assert len(handler) > 0
handler.read(pql="start_timestamp > 1500000000.0")
assert len(handler) > 0
handler.read(
pql='start_timestamp > 1400000000.0 '
'and "contents./points_concat_downsampled" == exists(True)'
)
assert len(handler) > 0
@pytest.mark.parametrize(db_args, list(filter(lambda d: d[0] in ['mongodb'], db_list)))
def test_group_by_mongo(
db_engine: str,
db_host: str,
db_username: Optional[str],
db_password: Optional[str]
):
"""Evaluate Group-by on MongoDB.
Args:
db_engine (str): DB engine (e.g., 'tinydb')
db_host (str): Host of path of DB
db_username (str): Username
db_password (str): Password
"""
handler = V4DBHandler(
db_class='meta',
db_engine=db_engine,
db_host=db_host,
db_username=db_username,
db_password=db_password,
base_dir_path='/opt/pydtk/test',
orient='contents',
read_on_init=False
)
handler.read()
group_keys = ['database_id', 'record_id', 'content_type', 'data_type']
all = {k: [data[k] for data in handler.data] for k in group_keys}
for key in group_keys:
handler.read(group_by=key)
grouped = [data[key] for data in handler.data]
assert len(grouped) == len(set(all[key])), 'AssertionError: group_key: {}'.format(key)
@pytest.mark.parametrize(
db_args,
list(filter(lambda d: d[0] in ['mongodb', 'montydb'], db_list))
)
def test_limit_mongo(
db_engine: str,
db_host: str,
db_username: Optional[str],
db_password: Optional[str]
):
"""Test for limit.
Args:
db_engine (str): DB engine (e.g., 'tinydb')
db_host (str): Host of path of DB
db_username (str): Username
db_password (str): Password
"""
handler = V4DBHandler(
db_class='meta',
db_engine=db_engine,
db_host=db_host,
db_username=db_username,
db_password=db_password,
base_dir_path='/opt/pydtk/test',
orient='file',
read_on_init=False
)
handler.read(limit=1)
assert len(handler) == 1
handler.read(limit=2)
assert len(handler) == 2
@pytest.mark.parametrize(db_args, db_list)
def test_add_columns(
db_engine: str,
db_host: str,
db_username: Optional[str],
db_password: Optional[str]
):
"""Add columns to DB.
Args:
db_engine (str): DB engine (e.g., 'tinydb')
db_host (str): Host of path of DB
db_username (str): Username
db_password (str): Password
"""
handler = V4DBHandler(
db_class='meta',
db_engine=db_engine,
db_host=db_host,
db_username=db_username,
db_password=db_password,
base_dir_path='/opt/pydtk/test',
orient='contents'
)
assert isinstance(handler, V4MetaDBHandler)
data = {
'key-int': int(0),
'key-float': float(0.0),
'key-str': 'str',
'key-dict': {
'abc': 'def'
}
}
handler.add_data(data)
for key in ['key-int', 'key-float', 'key-str', 'key-dict']:
assert key in [c['name'] for c in handler.config['columns']]
assert next(filter(lambda c: c['name'] == key, handler.config['columns']))['dtype'] \
== type(data[key]).__name__ # noqa: E721
handler.save()
handler.read()
for key in ['key-int', 'key-float', 'key-str', 'key-dict']:
assert key in [c['name'] for c in handler.config['columns']]
assert next(filter(lambda c: c['name'] == key, handler.config['columns']))['dtype'] \
== type(data[key]).__name__ # noqa: E721
handler.remove_data(data)
handler.save()
@pytest.mark.parametrize(db_args, db_list)
def test_display_name(
db_engine: str,
db_host: str,
db_username: Optional[str],
db_password: Optional[str]
):
"""Test for display_name in configs.
Args:
db_engine (str): DB engine (e.g., 'tinydb')
db_host (str): Host of path of DB
db_username (str): Username
db_password (str): Password
"""
handler = V4DBHandler(
db_class='meta',
db_engine=db_engine,
db_host=db_host,
db_username=db_username,
db_password=db_password,
base_dir_path='/opt/pydtk/test',
orient='path'
)
assert isinstance(handler, V4MetaDBHandler)
reserved_names = ['_id', '_uuid', '_creation_time']
names = [c for c in handler.columns if c not in reserved_names]
display_names = [c for c in handler.df.columns.tolist() if c not in reserved_names]
assert all([n in [c['name'] for c in handler.config['columns']] for n in names])
assert all([n in [c['display_name'] for c in handler.config['columns']] for n in display_names])
@pytest.mark.parametrize(db_args, db_list)
def test_read_with_offset(
db_engine: str,
db_host: str,
db_username: Optional[str],
db_password: Optional[str]
):
"""Test for reading database with offset.
Args:
db_engine (str): DB engine (e.g., 'tinydb')
db_host (str): Host of path of DB
db_username (str): Username
db_password (str): Password
"""
handler = V4DBHandler(
db_class='meta',
db_engine=db_engine,
db_host=db_host,
db_username=db_username,
db_password=db_password,
base_dir_path='/opt/pydtk/test',
orient='path'
)
assert isinstance(handler, V4MetaDBHandler)
handler.read(offset=0)
assert handler.df.index[0] == 0
handler.read(offset=1)
assert handler.df.index[0] == 1
handler.read(offset=1, limit=1)
assert handler.df.index[0] == 1
@pytest.mark.parametrize(db_args, db_list)
def test_db_handler_dtype(
db_engine: str,
db_host: str,
db_username: Optional[str],
db_password: Optional[str]
):
"""Test for checking data-types handled by DBHandler.
Args:
db_engine (str): DB engine (e.g., 'tinydb')
db_host (str): Host of path of DB
db_username (str): Username
db_password (str): Password
"""
from pydtk.db import DBHandler
handler = DBHandler(db_class='meta')
handler.add_data({
'record_id': 1,
'path': 'abc',
'contents': {},
'new_column_str': '',
'new_column_int': 1,
'new_column_float': 1.234,
'new_column_list': [],
'new_column_dict': {}
})
assert isinstance(handler.data[0]['record_id'], str)
assert isinstance(handler.data[0]['path'], str)
assert isinstance(handler.data[0]['contents'], dict)
assert isinstance(handler.data[0]['new_column_str'], str)
assert isinstance(handler.data[0]['new_column_int'], int)
assert isinstance(handler.data[0]['new_column_float'], float)
assert isinstance(handler.data[0]['new_column_list'], list)
assert isinstance(handler.data[0]['new_column_dict'], dict)
handler.save()
handler = DBHandler(db_class='meta')
handler.read(pql='"record_id" == regex(".*")')
assert len(handler) > 0
@pytest.mark.parametrize(
db_args, list(filter(lambda d: d[0] in ['mongodb', 'montydb'], db_list))
)
def test_remove_database_id(
db_engine: str,
db_host: str,
db_username: Optional[str],
db_password: Optional[str]
):
"""Test `drop_table` function."""
from pydtk.db import DBHandler
# Create a database with database-id 'pytest'
handler = DBHandler(
db_class='meta',
db_engine=db_engine,
db_host=db_host,
db_username=db_username,
db_password=db_password,
base_dir_path='/opt/pydtk/test',
database_id='pytest'
)
_add_data_to_db(handler)
# Load database-id handler
handler = DBHandler(
db_class='database_id',
db_engine=db_engine,
db_host=db_host,
db_username=db_username,
db_password=db_password,
)
handler.read()
assert len(list(filter(lambda x: x['database_id'] == 'pytest', handler.data))) > 0
# Remove database-id 'pytest' (in-memory)
database_info_to_remove = next(filter(lambda x: x['database_id'] == 'pytest', handler.data))
handler.remove_data(database_info_to_remove)
# Make sure that no resources are changed on the remote DB
_handler = DBHandler(
db_class='database_id',
db_engine=db_engine,
db_host=db_host,
db_username=db_username,
db_password=db_password,
)
_handler.read()
assert len(list(filter(lambda x: x['database_id'] == 'pytest', _handler.data))) > 0
_metadata_handler = DBHandler(
db_class='meta',
db_engine=db_engine,
db_host=db_host,
db_username=db_username,
db_password=db_password,
base_dir_path='/opt/pydtk/test',
database_id='pytest'
)
_metadata_handler.read()
assert len(_handler) > 0
# Reflect the removal of database-id 'pytest' to the remote DB
handler.save()
# Confirm that the resources are removed on the remote DB
_handler.read()
assert len(list(filter(lambda x: x['database_id'] == 'pytest', _handler.data))) == 0
_metadata_handler.read()
assert len(_metadata_handler) == 0
if __name__ == '__main__':
test_create_db(*default_db_parameter)
test_load_db(*default_db_parameter)
test_create_db_with_env_var(*default_db_parameter)
test_load_db_with_env_var(*default_db_parameter)
test_merge(*default_db_parameter)
test_search_tinydb()
test_search_mongo(*next(filter(lambda d: d[0] in ['tinymongo'], db_list)))
|
[] |
[] |
[
"PYDTK_META_DB_PASSWORD",
"PYDTK_META_DB_USERNAME",
"PYDTK_META_DB_HOST",
"PYDTK_META_DB_ENGINE"
] |
[]
|
["PYDTK_META_DB_PASSWORD", "PYDTK_META_DB_USERNAME", "PYDTK_META_DB_HOST", "PYDTK_META_DB_ENGINE"]
|
python
| 4 | 0 | |
pkg/exec/cli.go
|
package exec
import (
"errors"
"fmt"
"os"
"os/exec"
"os/user"
"path"
"runtime"
"strconv"
"strings"
"syscall"
"time"
"github.com/fatih/color"
"github.com/mitchellh/go-ps"
"github.com/pyroscope-io/pyroscope/pkg/agent"
"github.com/pyroscope-io/pyroscope/pkg/agent/spy"
"github.com/pyroscope-io/pyroscope/pkg/agent/upstream/remote"
"github.com/pyroscope-io/pyroscope/pkg/config"
"github.com/pyroscope-io/pyroscope/pkg/util/atexit"
"github.com/pyroscope-io/pyroscope/pkg/util/names"
"github.com/sirupsen/logrus"
)
func Cli(cfg *config.Config, args []string) error {
if len(args) == 0 {
return errors.New("no arguments passed")
}
spyName := cfg.Exec.SpyName
if spyName == "auto" {
baseName := path.Base(args[0])
spyName = spy.ResolveAutoName(baseName)
if spyName == "" {
supportedSpies := spy.SupportedExecSpies()
suggestedCommand := fmt.Sprintf("pyroscope exec -spy-name %s %s", supportedSpies[0], strings.Join(args, " "))
return fmt.Errorf(
"could not automatically find a spy for program \"%s\". Pass spy name via %s argument, for example: \n %s\n\nAvailable spies are: %s\n%s\nIf you believe this is a mistake, please submit an issue at %s",
baseName,
color.YellowString("-spy-name"),
color.YellowString(suggestedCommand),
strings.Join(supportedSpies, ","),
armMessage(),
color.BlueString("https://github.com/pyroscope-io/pyroscope/issues"),
)
}
}
logrus.Info("to disable logging from pyroscope, pass " + color.YellowString("-no-logging") + " argument to pyroscope exec")
if err := performChecks(spyName); err != nil {
return err
}
if cfg.Exec.ApplicationName == "" {
logrus.Infof("we recommend specifying application name via %s flag or env variable %s", color.YellowString("-application-name"), color.YellowString("PYROSCOPE_APPLICATION_NAME"))
cfg.Exec.ApplicationName = spyName + "." + names.GetRandomName(generateSeed(args))
logrus.Infof("for now we chose the name for you and it's \"%s\"", color.BlueString(cfg.Exec.ApplicationName))
}
logrus.WithFields(logrus.Fields{
"args": fmt.Sprintf("%q", args),
}).Debug("starting command")
cmd := exec.Command(args[0], args[1:]...)
cmd.Stderr = os.Stderr
cmd.Stdout = os.Stdout
cmd.Stdin = os.Stdin
cmd.SysProcAttr = &syscall.SysProcAttr{}
// permissions drop
if isRoot() && !cfg.Exec.NoRootDrop && os.Getenv("SUDO_UID") != "" && os.Getenv("SUDO_GID") != "" {
creds, err := generateCredentialsDrop()
if err != nil {
logrus.Errorf("failed to drop permissions, %q", err)
} else {
cmd.SysProcAttr.Credential = creds
}
}
cmd.SysProcAttr.Setpgid = true
err := cmd.Start()
if err != nil {
return err
}
u := remote.New(remote.RemoteConfig{
UpstreamAddress: cfg.Exec.ServerAddress,
UpstreamThreads: cfg.Exec.UpstreamThreads,
UpstreamRequestTimeout: cfg.Exec.UpstreamRequestTimeout,
})
defer u.Stop()
logrus.WithFields(logrus.Fields{
"app-name": cfg.Exec.ApplicationName,
"spy-name": spyName,
"pid": cmd.Process.Pid,
"detect-subprocesses": cfg.Exec.DetectSubprocesses,
}).Debug("starting agent session")
// TODO: add sample rate, make it configurable
sess := agent.NewSession(u, cfg.Exec.ApplicationName, spyName, 100, cmd.Process.Pid, cfg.Exec.DetectSubprocesses)
err = sess.Start()
if err != nil {
logrus.Errorf("error when starting session: %q", err)
}
defer sess.Stop()
waitForProcessToExit(cmd)
return nil
}
// TODO: very hacky, at some point we'll need to make `cmd.Wait()` work
// Currently the issue is that on Linux it often thinks the process exited when it did not.
func waitForProcessToExit(cmd *exec.Cmd) {
sigc := make(chan struct{})
go func() {
cmd.Wait()
}()
atexit.Register(func() {
sigc <- struct{}{}
})
t := time.NewTicker(time.Second)
for {
select {
case <-sigc:
logrus.Debug("received a signal, killing subprocess")
cmd.Process.Kill()
return
case <-t.C:
p, err := ps.FindProcess(cmd.Process.Pid)
if p == nil || err != nil {
logrus.WithField("err", err).Debug("could not find subprocess, it might be dead")
return
}
}
}
}
func performChecks(spyName string) error {
if spyName == "gospy" {
return fmt.Errorf("gospy can not profile other processes. See our documentation on using gospy: %s", color.BlueString("https://pyroscope.io/docs/"))
}
if runtime.GOOS == "darwin" {
if !isRoot() {
logrus.Fatal("on macOS you're required to run the agent with sudo")
}
}
if !stringsContains(spy.SupportedSpies, spyName) {
supportedSpies := spy.SupportedExecSpies()
return fmt.Errorf(
"Spy \"%s\" is not supported. Available spies are: %s\n%s",
color.BlueString(spyName),
strings.Join(supportedSpies, ","),
armMessage(),
)
}
return nil
}
func stringsContains(arr []string, element string) bool {
for _, v := range arr {
if v == element {
return true
}
}
return false
}
func isRoot() bool {
u, err := user.Current()
return err == nil && u.Username == "root"
}
func armMessage() string {
if runtime.GOARCH == "arm64" {
return "Note that rbspy is not available on arm64 platform"
}
return ""
}
func generateSeed(args []string) string {
path, err := os.Getwd()
if err != nil {
path = "<unknown>"
}
return path + "|" + strings.Join(args, "&")
}
func generateCredentialsDrop() (*syscall.Credential, error) {
sudoUser := os.Getenv("SUDO_USER")
sudoUid := os.Getenv("SUDO_UID")
sudoGid := os.Getenv("SUDO_GID")
logrus.Infof("dropping permissions, running command as %q (%s/%s)", sudoUser, sudoUid, sudoGid)
uid, err := strconv.Atoi(sudoUid)
if err != nil {
return nil, err
}
gid, err := strconv.Atoi(sudoGid)
if err != nil {
return nil, err
}
return &syscall.Credential{Uid: uint32(uid), Gid: uint32(gid)}, nil
}
|
[
"\"SUDO_UID\"",
"\"SUDO_GID\"",
"\"SUDO_USER\"",
"\"SUDO_UID\"",
"\"SUDO_GID\""
] |
[] |
[
"SUDO_USER",
"SUDO_GID",
"SUDO_UID"
] |
[]
|
["SUDO_USER", "SUDO_GID", "SUDO_UID"]
|
go
| 3 | 0 | |
pytorch/pytorch-engine/src/main/java/ai/djl/pytorch/jni/LibUtils.java
|
/*
* Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
* with the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0/
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
* OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package ai.djl.pytorch.jni;
import ai.djl.util.Platform;
import ai.djl.util.Utils;
import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.net.URL;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.nio.file.StandardCopyOption;
import java.util.Arrays;
import java.util.Collections;
import java.util.Enumeration;
import java.util.List;
import java.util.Properties;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.stream.Stream;
import java.util.zip.GZIPInputStream;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Utilities for finding the PyTorch Engine binary on the System.
*
* <p>The Engine will be searched for in a variety of locations in the following order:
*
* <ol>
* <li>In the path specified by the PYTORCH_LIBRARY_PATH environment variable
* <li>In a jar file location in the classpath. These jars can be created with the pytorch-native
* module.
* </ol>
*/
@SuppressWarnings("MissingJavadocMethod")
public final class LibUtils {
private static final Logger logger = LoggerFactory.getLogger(LibUtils.class);
private static final String LIB_NAME = "djl_torch";
private static final String NATIVE_LIB_NAME = "torch";
private static final Pattern VERSION_PATTERN =
Pattern.compile("(\\d+\\.\\d+\\.\\d+(-\\w)?)(-SNAPSHOT)?(-\\d+)?");
private LibUtils() {}
public static void loadLibrary() {
// TODO workaround to make it work on Android Studio
// It should search for several places to find the native library
if (System.getProperty("java.vendor.url").equals("http://www.android.com/")) {
System.loadLibrary(LIB_NAME); // NOPMD
return;
}
String libName = findOverrideLibrary();
if (libName == null) {
AtomicBoolean fallback = new AtomicBoolean(false);
String nativeLibDir = findNativeLibrary(fallback);
if (nativeLibDir != null) {
libName = copyJniLibraryFromClasspath(Paths.get(nativeLibDir), fallback.get());
} else {
throw new IllegalStateException("Native library not found");
}
}
logger.debug("Loading pytorch library from: {}", libName);
if (System.getProperty("os.name").startsWith("Win")) {
loadWinDependencies(libName);
}
System.load(libName); // NOPMD
}
private static void loadWinDependencies(String libName) {
Path libDir = Paths.get(libName).getParent();
if (libDir == null) {
throw new IllegalArgumentException("Invalid library path!");
}
try (Stream<Path> paths = Files.walk(libDir)) {
paths.filter(
path -> {
String name = path.getFileName().toString();
return !"c10_cuda.dll".equals(name)
&& !"torch.dll".equals(name)
&& !"torch_cpu.dll".equals(name)
&& !"torch_cuda.dll".equals(name)
&& !"fbgemm.dll".equals(name)
&& Files.isRegularFile(path)
&& !name.endsWith("djl_torch.dll");
})
.map(path -> path.toAbsolutePath().toString())
.forEach(System::load);
System.load(libDir.resolve("fbgemm.dll").toAbsolutePath().toString());
System.load(libDir.resolve("torch_cpu.dll").toAbsolutePath().toString());
if (Files.exists(libDir.resolve("c10_cuda.dll"))) {
// Windows System.load is global load
System.load(libDir.resolve("c10_cuda.dll").toAbsolutePath().toString());
System.load(libDir.resolve("torch_cuda.dll").toAbsolutePath().toString());
}
System.load(libDir.resolve("torch.dll").toAbsolutePath().toString());
} catch (IOException e) {
throw new IllegalArgumentException("Folder not exist! " + libDir, e);
}
}
private static String findOverrideLibrary() {
String libPath = System.getenv("PYTORCH_LIBRARY_PATH");
if (libPath != null) {
String libName = findLibraryInPath(libPath);
if (libName != null) {
return libName;
}
}
libPath = System.getProperty("java.library.path");
if (libPath != null) {
return findLibraryInPath(libPath);
}
return null;
}
private static String findLibraryInPath(String libPath) {
String[] paths = libPath.split(File.pathSeparator);
List<String> mappedLibNames;
mappedLibNames = Collections.singletonList(System.mapLibraryName(LIB_NAME));
for (String path : paths) {
File p = new File(path);
if (!p.exists()) {
continue;
}
for (String name : mappedLibNames) {
if (p.isFile() && p.getName().endsWith(name)) {
return p.getAbsolutePath();
}
File file = new File(path, name);
if (file.exists() && file.isFile()) {
return file.getAbsolutePath();
}
}
}
return null;
}
private static String copyJniLibraryFromClasspath(Path nativeDir, boolean fallback) {
String name = System.mapLibraryName(LIB_NAME);
Platform platform = Platform.fromSystem();
String classifier = platform.getClassifier();
String flavor = platform.getFlavor();
if (fallback || flavor.isEmpty()) {
flavor = "cpu";
}
Path precxx11Lib = nativeDir.resolve("libstdc++.so.6");
if (Files.exists(precxx11Lib)) {
flavor += "-precxx11"; // NOPMD
}
Properties prop = new Properties();
try (InputStream stream =
LibUtils.class.getResourceAsStream("/jnilib/pytorch.properties")) {
prop.load(stream);
} catch (IOException e) {
throw new IllegalStateException("Cannot find pytorch property file", e);
}
String version = prop.getProperty("version");
Path path = nativeDir.resolve(version + '-' + flavor + '-' + name);
if (Files.exists(path)) {
return path.toAbsolutePath().toString();
}
Path tmp = null;
try (InputStream stream =
LibUtils.class.getResourceAsStream(
"/jnilib/" + classifier + '/' + flavor + '/' + name)) {
tmp = Files.createTempFile(nativeDir, "jni", "tmp");
Files.copy(stream, tmp, StandardCopyOption.REPLACE_EXISTING);
Utils.moveQuietly(tmp, path);
return path.toAbsolutePath().toString();
} catch (IOException e) {
throw new IllegalStateException("Cannot copy jni files", e);
} finally {
if (tmp != null) {
Utils.deleteQuietly(tmp);
}
}
}
private static synchronized String findNativeLibrary(AtomicBoolean fallback) {
Enumeration<URL> urls;
try {
urls =
Thread.currentThread()
.getContextClassLoader()
.getResources("native/lib/pytorch.properties");
} catch (IOException e) {
logger.warn("", e);
return null;
}
// No native jars
if (!urls.hasMoreElements()) {
return null;
}
Platform systemPlatform = Platform.fromSystem();
try {
Platform matching = null;
Platform placeholder = null;
while (urls.hasMoreElements()) {
URL url = urls.nextElement();
Platform platform = Platform.fromUrl(url);
if (platform.isPlaceholder()) {
placeholder = platform;
} else if (platform.matches(systemPlatform)) {
matching = platform;
break;
}
}
if (matching != null) {
return copyNativeLibraryFromClasspath(matching);
}
if (placeholder != null) {
try {
return downloadPyTorch(placeholder, fallback);
} catch (IOException e) {
throw new IllegalStateException("Failed to download PyTorch native library", e);
}
}
} catch (IOException e) {
throw new IllegalStateException(
"Failed to read PyTorch native library jar properties", e);
}
throw new IllegalStateException(
"Your PyTorch native library jar does not match your operating system. Make sure the Maven Dependency Classifier matches your system type.");
}
private static String copyNativeLibraryFromClasspath(Platform platform) {
Path tmp = null;
String version = platform.getVersion();
String flavor = platform.getFlavor();
// TODO: include precxx11 into native jar's flavor property
if (Arrays.asList(platform.getLibraries()).contains("libstdc++.so.6")) {
flavor += "-precxx11"; // NOPMD
}
String classifier = platform.getClassifier();
try {
String libName = System.mapLibraryName(NATIVE_LIB_NAME);
Path cacheDir = getCacheDir();
logger.debug("Using cache dir: {}", cacheDir);
Path dir = cacheDir.resolve(version + '-' + flavor + '-' + classifier);
Path path = dir.resolve(libName);
if (Files.exists(path)) {
return dir.toAbsolutePath().toString();
}
Files.createDirectories(cacheDir);
tmp = Files.createTempDirectory(cacheDir, "tmp");
for (String file : platform.getLibraries()) {
String libPath = "/native/lib/" + file;
try (InputStream is = LibUtils.class.getResourceAsStream(libPath)) {
Files.copy(is, tmp.resolve(file), StandardCopyOption.REPLACE_EXISTING);
}
}
Utils.moveQuietly(tmp, dir);
return dir.toAbsolutePath().toString();
} catch (IOException e) {
throw new IllegalStateException("Failed to extract PyTorch native library", e);
} finally {
if (tmp != null) {
Utils.deleteQuietly(tmp);
}
}
}
private static String downloadPyTorch(Platform platform, AtomicBoolean fallback)
throws IOException {
String version = platform.getVersion();
String flavor = platform.getFlavor();
if (flavor.isEmpty()) {
flavor = "cpu";
}
String classifier = platform.getClassifier();
String os = platform.getOsPrefix();
String libName = System.mapLibraryName(NATIVE_LIB_NAME);
Path cacheDir = getCacheDir();
logger.debug("Using cache dir: {}", cacheDir);
Path dir = cacheDir.resolve(version + '-' + flavor + '-' + classifier);
Path path = dir.resolve(libName);
if (Files.exists(path)) {
return dir.toAbsolutePath().toString();
}
// if files not found
Files.createDirectories(cacheDir);
Matcher matcher = VERSION_PATTERN.matcher(version);
if (!matcher.matches()) {
throw new IllegalArgumentException("Unexpected version: " + version);
}
String link = "https://djl-ai.s3.amazonaws.com/publish/pytorch-" + matcher.group(1);
Path tmp = null;
try (InputStream is = new URL(link + "/files.txt").openStream()) {
List<String> lines = Utils.readLines(is);
if (flavor.startsWith("cu")
&& !lines.contains(flavor + '/' + os + "/native/lib/" + libName + ".gz")) {
logger.warn("No matching cuda flavor for {} found: {}.", os, flavor);
// fallback to CPU
flavor = "cpu";
fallback.set(true);
// check again
dir = cacheDir.resolve(version + '-' + flavor + '-' + classifier);
path = dir.resolve(libName);
if (Files.exists(path)) {
return dir.toAbsolutePath().toString();
}
}
tmp = Files.createTempDirectory(cacheDir, "tmp");
for (String line : lines) {
if (line.startsWith(flavor + '/' + os + '/')) {
URL url = new URL(link + '/' + line);
String fileName = line.substring(line.lastIndexOf('/') + 1, line.length() - 3);
logger.info("Downloading {} ...", url);
try (InputStream fis = new GZIPInputStream(url.openStream())) {
Files.copy(fis, tmp.resolve(fileName), StandardCopyOption.REPLACE_EXISTING);
}
}
}
Utils.moveQuietly(tmp, dir);
return dir.toAbsolutePath().toString();
} finally {
if (tmp != null) {
Utils.deleteQuietly(tmp);
}
}
}
private static Path getCacheDir() {
String cacheDir = System.getProperty("ENGINE_CACHE_DIR");
if (cacheDir == null || cacheDir.isEmpty()) {
cacheDir = System.getenv("ENGINE_CACHE_DIR");
if (cacheDir == null || cacheDir.isEmpty()) {
cacheDir = System.getProperty("DJL_CACHE_DIR");
if (cacheDir == null || cacheDir.isEmpty()) {
cacheDir = System.getenv("DJL_CACHE_DIR");
if (cacheDir == null || cacheDir.isEmpty()) {
String userHome = System.getProperty("user.home");
return Paths.get(userHome, ".djl.ai").resolve("pytorch");
}
}
}
}
return Paths.get(cacheDir, "pytorch");
}
}
|
[
"\"PYTORCH_LIBRARY_PATH\"",
"\"ENGINE_CACHE_DIR\"",
"\"DJL_CACHE_DIR\""
] |
[] |
[
"DJL_CACHE_DIR",
"ENGINE_CACHE_DIR",
"PYTORCH_LIBRARY_PATH"
] |
[]
|
["DJL_CACHE_DIR", "ENGINE_CACHE_DIR", "PYTORCH_LIBRARY_PATH"]
|
java
| 3 | 0 | |
pytorch_lightning/utilities/imports.py
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""General utilities"""
import importlib
import operator
import os
import platform
import sys
from importlib.util import find_spec
import torch
from packaging.version import Version
from pkg_resources import DistributionNotFound
def _module_available(module_path: str) -> bool:
"""
Check if a path is available in your environment
>>> _module_available('os')
True
>>> _module_available('bla.bla')
False
"""
try:
return find_spec(module_path) is not None
except AttributeError:
# Python 3.6
return False
except ModuleNotFoundError:
# Python 3.7+
return False
def _compare_version(package: str, op, version) -> bool:
"""
Compare package version with some requirements
>>> _compare_version("torch", operator.ge, "0.1")
True
"""
try:
pkg = importlib.import_module(package)
except (ModuleNotFoundError, DistributionNotFound):
return False
try:
pkg_version = Version(pkg.__version__)
except TypeError:
# this is mock by sphinx, so it shall return True ro generate all summaries
return True
return op(pkg_version, Version(version))
_IS_WINDOWS = platform.system() == "Windows"
_IS_INTERACTIVE = hasattr(sys, "ps1") # https://stackoverflow.com/a/64523765
_TORCH_GREATER_EQUAL_1_7 = _compare_version("torch", operator.ge, "1.7.0")
_TORCH_GREATER_EQUAL_1_8 = _compare_version("torch", operator.ge, "1.8.0")
_TORCH_GREATER_EQUAL_1_8_1 = _compare_version("torch", operator.ge, "1.8.1")
_TORCH_GREATER_EQUAL_1_9 = _compare_version("torch", operator.ge, "1.9.0")
_TORCH_GREATER_EQUAL_1_10 = _compare_version("torch", operator.ge, "1.10.0")
_APEX_AVAILABLE = _module_available("apex.amp")
_BOLTS_AVAILABLE = _module_available("pl_bolts")
_DEEPSPEED_AVAILABLE = _module_available("deepspeed")
_FAIRSCALE_AVAILABLE = not _IS_WINDOWS and _module_available("fairscale.nn")
_FAIRSCALE_OSS_FP16_BROADCAST_AVAILABLE = _FAIRSCALE_AVAILABLE and _compare_version("fairscale", operator.ge, "0.3.3")
_FAIRSCALE_FULLY_SHARDED_AVAILABLE = _FAIRSCALE_AVAILABLE and _compare_version("fairscale", operator.ge, "0.3.4")
_GROUP_AVAILABLE = not _IS_WINDOWS and _module_available("torch.distributed.group")
_HOROVOD_AVAILABLE = _module_available("horovod.torch")
_HYDRA_AVAILABLE = _module_available("hydra")
_HYDRA_EXPERIMENTAL_AVAILABLE = _module_available("hydra.experimental")
_JSONARGPARSE_AVAILABLE = _module_available("jsonargparse")
_KINETO_AVAILABLE = _TORCH_GREATER_EQUAL_1_8_1 and torch.profiler.kineto_available()
_NATIVE_AMP_AVAILABLE = _module_available("torch.cuda.amp") and hasattr(torch.cuda.amp, "autocast")
_OMEGACONF_AVAILABLE = _module_available("omegaconf")
_POPTORCH_AVAILABLE = _module_available("poptorch")
_RICH_AVAILABLE = _module_available("rich")
_TORCH_CPU_AMP_AVAILABLE = _compare_version(
"torch", operator.ge, "1.10.0dev20210501"
) # todo: swap to 1.10.0 once released
_TORCH_BFLOAT_AVAILABLE = _compare_version(
"torch", operator.ge, "1.10.0.dev20210820"
) # todo: swap to 1.10.0 once released
_TORCH_QUANTIZE_AVAILABLE = bool([eg for eg in torch.backends.quantized.supported_engines if eg != "none"])
_TORCH_SHARDED_TENSOR_AVAILABLE = _compare_version(
"torch", operator.ge, "1.10.0.dev20210809"
) # todo: swap to 1.10.0 once released
_TORCHTEXT_AVAILABLE = _module_available("torchtext")
_TORCHVISION_AVAILABLE = _module_available("torchvision")
_TORCHMETRICS_LOWER_THAN_0_3 = _compare_version("torchmetrics", operator.lt, "0.3.0")
_TORCHMETRICS_GREATER_EQUAL_0_3 = _compare_version("torchmetrics", operator.ge, "0.3.0")
_XLA_AVAILABLE: bool = _module_available("torch_xla")
from pytorch_lightning.utilities.xla_device import XLADeviceUtils # noqa: E402
_TPU_AVAILABLE = XLADeviceUtils.tpu_device_exists()
if _POPTORCH_AVAILABLE:
import poptorch
_IPU_AVAILABLE = poptorch.ipuHardwareIsAvailable()
else:
_IPU_AVAILABLE = False
# experimental feature within PyTorch Lightning.
def _fault_tolerant_training() -> bool:
return _TORCH_GREATER_EQUAL_1_7 and int(os.getenv("PL_FAULT_TOLERANT_TRAINING", 0))
|
[] |
[] |
[
"PL_FAULT_TOLERANT_TRAINING"
] |
[]
|
["PL_FAULT_TOLERANT_TRAINING"]
|
python
| 1 | 0 | |
Model/NFM.py
|
'''
Created on Dec 18, 2018
Tensorflow Implementation of the Baseline model, NFM, in:
Wang Xiang et al. KGAT: Knowledge Graph Attention Network for Recommendation. In KDD 2019.
@author: Xiang Wang ([email protected])
'''
import tensorflow.compat.v1 as tf
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
class NFM(object):
def __init__(self, data_config, pretrain_data, args):
self._parse_args(data_config, pretrain_data, args)
self._build_inputs()
self.weights = self._build_weights()
self._build_model()
self._build_loss()
self._statistics_params()
def _parse_args(self, data_config, pretrain_data, args):
if args.model_type == 'nfm':
self.model_type = 'nfm'
else:
self.model_type = 'fm'
self.pretrain_data = pretrain_data
self.n_users = data_config['n_users']
self.n_items = data_config['n_items']
self.n_entities = data_config['n_entities']
self.n_features = data_config['n_users'] + data_config['n_entities']
self.lr = args.lr
# settings for CF part.
self.emb_dim = args.embed_size
self.batch_size = args.batch_size
# settings for neural CF part.
if args.model_type == 'nfm':
self.weight_size = eval(args.layer_size)
self.n_layers = len(self.weight_size)
self.model_type += '_l%d' % self.n_layers
else:
self.weight_size = []
self.n_layers = 0
self.regs = eval(args.regs)
self.verbose = args.verbose
def _build_inputs(self):
self.pos_indices = tf.placeholder(tf.int64, shape=[None, 2], name='pos_indices')
self.pos_values = tf.placeholder(tf.float32, shape=[None], name='pos_values')
self.pos_shape = tf.placeholder(tf.int64, shape=[2], name='pos_shape')
self.neg_indices = tf.placeholder(tf.int64, shape=[None, 2], name='neg_indices')
self.neg_values = tf.placeholder(tf.float32, shape=[None], name='neg_values')
self.neg_shape = tf.placeholder(tf.int64, shape=[2], name='neg_shape')
self.mess_dropout = tf.placeholder(tf.float32, shape=[None], name='mess_dropout')
# Input positive features, shape=(batch_size * feature_dim)
self.sp_pos_feats = tf.SparseTensor(self.pos_indices, self.pos_values, self.pos_shape)
# Input negative features, shape=(batch_size * feature_dim)
self.sp_neg_feats = tf.SparseTensor(self.neg_indices, self.neg_values, self.neg_shape)
def _build_weights(self):
all_weights = dict()
initializer = tf.initializers.glorot_uniform()
all_weights['var_linear'] = tf.Variable(initializer([self.n_features, 1]), name='var_linear')
# model parameters for FM.
if self.pretrain_data is None:
all_weights['var_factor'] = tf.Variable(initializer([self.n_features, self.emb_dim]), name='var_factor')
print('using xavier initialization')
else:
user_embed = self.pretrain_data['user_embed']
item_embed = self.pretrain_data['item_embed']
other_embed = initializer([self.n_entities - self.n_items, self.emb_dim])
all_weights['var_factor'] = tf.Variable(initial_value=tf.concat([user_embed, item_embed, other_embed], 0),
trainable=True, name='var_factor', dtype=tf.float32)
# user_embed = tf.Variable(initial_value=self.pretrain_data['user_embed'], trainable=True, dtype=tf.float32)
# item_embed = tf.Variable(initial_value=self.pretrain_data['item_embed'], trainable=True, dtype=tf.float32)
# other_embed = tf.Variable(initializer([self.n_entities - self.n_items, self.emb_dim]))
#
# all_weights['var_factor'] = tf.concat([user_embed, item_embed, other_embed], 0, name='var_factor')
print('using pretrained initialization')
# model parameters for NFM.
self.weight_size_list = [self.emb_dim] + self.weight_size
for i in range(self.n_layers):
all_weights['W_%d' %i] = tf.Variable(
initializer([self.weight_size_list[i], self.weight_size_list[i+1]]), name='W_%d' %i)
all_weights['b_%d' %i] = tf.Variable(
initializer([1, self.weight_size_list[i+1]]), name='b_%d' %i)
if self.model_type == 'fm':
all_weights['h'] = tf.constant(1., tf.float32, [self.emb_dim, 1])
else:
all_weights['h'] = tf.Variable(initializer([self.weight_size_list[-1], 1]), name='h')
return all_weights
def _build_model(self):
self.batch_predictions = self._get_bi_pooling_predictions(self.sp_pos_feats)
def _build_loss(self):
pos_scores = self._get_bi_pooling_predictions(self.sp_pos_feats)
neg_scores = self._get_bi_pooling_predictions(self.sp_neg_feats)
maxi = tf.log(1e-10 + tf.nn.sigmoid(pos_scores - neg_scores))
cf_loss = tf.negative(tf.reduce_mean(maxi))
self.base_loss = cf_loss
self.reg_loss = self.regs[0] * tf.nn.l2_loss(self.weights['h'])
# self.reg_loss = self.regs[0] * tf.nn.l2_loss(self.weights['var_factor']) + \
# self.regs[1] * tf.nn.l2_loss(self.weights['h'])
#
# for k in range(self.n_layers):
# self.reg_loss += self.regs[-1] * (tf.nn.l2_loss(self.weights['W_%d' % k]))
self.kge_loss = tf.constant(0.0, tf.float32, [1])
self.loss = self.base_loss + self.kge_loss + self.reg_loss
# Optimization process.
self.opt = tf.train.AdamOptimizer(learning_rate=self.lr).minimize(self.loss)
def _get_bi_pooling_predictions(self, feats):
# Linear terms: batch_size * 1
term0 = tf.sparse_tensor_dense_matmul(feats, self.weights['var_linear'])
# Interaction terms w.r.t. first sum then square: batch_size * emb_size.
# e.g., sum_{k from 1 to K}{(v1k+v2k)**2}
sum_emb = tf.sparse_tensor_dense_matmul(feats, self.weights['var_factor'])
term1 = tf.square(sum_emb)
# Interaction terms w.r.t. first square then sum: batch_size * emb_size.
# e.g., sum_{k from 1 to K}{v1k**2 + v2k**2}
square_emb = tf.sparse_tensor_dense_matmul(tf.square(feats), tf.square(self.weights['var_factor']))
term2 = square_emb
# "neural factorization machine", Equation 3, the result of bi-interaction pooling: batch_size * emb_size
term3 = 0.5 * (term1 - term2)
# "neural factorization machine", Equation 7, the result of MLP: batch_size * 1
z = [term3]
for i in range(self.n_layers):
temp = tf.nn.relu(tf.matmul(z[i], self.weights['W_%d' % i]) + self.weights['b_%d' % i])
temp = tf.nn.dropout(temp, 1 - self.mess_dropout[i])
z.append(temp)
preds = term0 + tf.matmul(z[-1], self.weights['h'])
return preds
def _statistics_params(self):
# number of params
total_parameters = 0
for variable in self.weights.values():
shape = variable.get_shape() # shape is an array of tf.Dimension
variable_parameters = 1
for dim in shape:
variable_parameters *= dim.value
total_parameters += variable_parameters
if self.verbose > 0:
print("#params: %d" % total_parameters)
def train(self, sess, feed_dict):
return sess.run([self.opt, self.loss, self.base_loss, self.kge_loss, self.reg_loss], feed_dict)
def eval(self, sess, feed_dict):
batch_predictions = sess.run(self.batch_predictions, feed_dict)
return batch_predictions
|
[] |
[] |
[
"TF_CPP_MIN_LOG_LEVEL"
] |
[]
|
["TF_CPP_MIN_LOG_LEVEL"]
|
python
| 1 | 0 | |
enterprise/dev/ci/ci/helpers.go
|
package ci
import (
"fmt"
"os"
"os/exec"
"strconv"
"strings"
"time"
"github.com/hashicorp/go-multierror"
"github.com/sourcegraph/sourcegraph/internal/lazyregexp"
)
// Config is the set of configuration parameters that determine the structure of the CI build. These
// parameters are extracted from the build environment (branch name, commit hash, timestamp, etc.)
type Config struct {
now time.Time
branch string
version string
commit string
// mustIncludeCommit, if non-empty, is a list of commits at least one of which must be present
// in the branch. If empty, then no check is enforced.
mustIncludeCommit []string
taggedRelease bool
releaseBranch bool
isBextReleaseBranch bool
patch bool
patchNoTest bool
isQuick bool
}
func ComputeConfig() Config {
now := time.Now()
branch := os.Getenv("BUILDKITE_BRANCH")
version := os.Getenv("BUILDKITE_TAG")
commit := os.Getenv("BUILDKITE_COMMIT")
if commit == "" {
commit = "1234567890123456789012345678901234567890" // for testing
}
taggedRelease := true // true if this is a tagged release
switch {
case strings.HasPrefix(version, "v"):
// The Git tag "v1.2.3" should map to the Docker image "1.2.3" (without v prefix).
version = strings.TrimPrefix(version, "v")
default:
taggedRelease = false
buildNum, _ := strconv.Atoi(os.Getenv("BUILDKITE_BUILD_NUMBER"))
version = fmt.Sprintf("%05d_%s_%.7s", buildNum, now.Format("2006-01-02"), commit)
}
patchNoTest := strings.HasPrefix(branch, "docker-images-patch-notest/")
patch := strings.HasPrefix(branch, "docker-images-patch/")
if patchNoTest || patch {
version = version + "_patch"
}
isQuick := strings.HasPrefix(branch, "quick/")
var mustIncludeCommits []string
if rawMustIncludeCommit := os.Getenv("MUST_INCLUDE_COMMIT"); rawMustIncludeCommit != "" {
mustIncludeCommits = strings.Split(rawMustIncludeCommit, ",")
for i := range mustIncludeCommits {
mustIncludeCommits[i] = strings.TrimSpace(mustIncludeCommits[i])
}
}
return Config{
now: now,
branch: branch,
version: version,
commit: commit,
mustIncludeCommit: mustIncludeCommits,
taggedRelease: taggedRelease,
releaseBranch: lazyregexp.New(`^[0-9]+\.[0-9]+$`).MatchString(branch),
isBextReleaseBranch: branch == "bext/release",
patch: patch,
patchNoTest: patchNoTest,
isQuick: isQuick,
}
}
func (c Config) ensureCommit() error {
if len(c.mustIncludeCommit) == 0 {
return nil
}
found := false
var errs error
for _, mustIncludeCommit := range c.mustIncludeCommit {
output, err := exec.Command("git", "merge-base", "--is-ancestor", mustIncludeCommit, "HEAD").CombinedOutput()
if err == nil {
found = true
break
}
errs = multierror.Append(errs, fmt.Errorf("%v | Output: %q", err, string(output)))
}
if !found {
fmt.Printf("This branch %q at commit %s does not include any of these commits: %s.\n", c.branch, c.commit, strings.Join(c.mustIncludeCommit, ", "))
fmt.Println("Rebase onto the latest master to get the latest CI fixes.")
fmt.Printf("Errors from `git merge-base --is-ancestor $COMMIT HEAD`: %s", errs.Error())
return errs
}
return nil
}
func (c Config) isPR() bool {
return !c.isBextReleaseBranch &&
!c.releaseBranch &&
!c.taggedRelease &&
c.branch != "master" &&
!strings.HasPrefix(c.branch, "master-dry-run/") &&
!strings.HasPrefix(c.branch, "docker-images-patch/")
}
func isDocsOnly() bool {
output, err := exec.Command("git", "diff", "--name-only", "origin/master...").Output()
if err != nil {
panic(err)
}
for _, line := range strings.Split(strings.TrimSpace(string(output)), "\n") {
if !strings.HasPrefix(line, "doc") && line != "CHANGELOG.md" {
return false
}
}
return true
}
|
[
"\"BUILDKITE_BRANCH\"",
"\"BUILDKITE_TAG\"",
"\"BUILDKITE_COMMIT\"",
"\"BUILDKITE_BUILD_NUMBER\"",
"\"MUST_INCLUDE_COMMIT\""
] |
[] |
[
"BUILDKITE_COMMIT",
"BUILDKITE_BRANCH",
"MUST_INCLUDE_COMMIT",
"BUILDKITE_BUILD_NUMBER",
"BUILDKITE_TAG"
] |
[]
|
["BUILDKITE_COMMIT", "BUILDKITE_BRANCH", "MUST_INCLUDE_COMMIT", "BUILDKITE_BUILD_NUMBER", "BUILDKITE_TAG"]
|
go
| 5 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.