filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
docs_en/conf.py
|
# -*- coding: utf-8 -*-
#
# Test documentation build configuration file, created by
# sphinx-quickstart on Sat Feb 7 20:09:23 2015.
#
# This file is execfile() with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
#sys.path.insert(0, os.path.abspath('..'))
import sphinx_rtd_theme
from recommonmark.transform import AutoStructify
DOC_SOURCES_DIR = os.path.dirname(os.path.abspath(__file__))
PROJECT_ROOT_DIR = os.path.dirname(os.path.abspath(DOC_SOURCES_DIR))
sys.path.insert(0, DOC_SOURCES_DIR)
print('PROJECT_ROOT_DIR:', PROJECT_ROOT_DIR)
print('DOC_SOURCES_DIR:', DOC_SOURCES_DIR)
# If runs on ReadTheDocs environment
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
# Hack for lacking git-lfs support ReadTheDocs
# if on_rtd:
# print('Fetching files with git_lfs')
# import git_lfs
# try:
# from urllib.error import HTTPError
# except ImportError:
# from urllib2 import HTTPError
# _fetch_urls = git_lfs.fetch_urls
# def _patched_fetch_urls(lfs_url, oid_list):
# """Hack git_lfs library that sometimes makes too big requests"""
# objects = []
# try:
# objects.extend(_fetch_urls(lfs_url, oid_list))
# except HTTPError as err:
# if err.code != 413:
# raise
# print("LFS: request entity too large, splitting in half")
# objects.extend(_patched_fetch_urls(lfs_url, oid_list[:len(oid_list) // 2]))
# objects.extend(_patched_fetch_urls(lfs_url, oid_list[len(oid_list) // 2:]))
# return objects
# git_lfs.fetch_urls = _patched_fetch_urls
# git_lfs.fetch(PROJECT_ROOT_DIR)
# The suffix of source filenames.
source_parsers = {
'.md': 'recommonmark.parser.CommonMarkParser',
}
source_suffix = ['.rst', '.md']
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx_copybutton',
'sphinxcontrib.mermaid',
'sphinx.ext.mathjax',
'sphinx_markdown_tables',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['../_templates']
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'WeBASE EN'
copyright = u'© 2020. All rights reserved.'
author = u'WeBankFinTech'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.4'
# The full version, including alpha/beta/rc tags.
release = 'v1.4.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
#language = None
language = 'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
html_theme_options = {'navigation_depth': 4,}
using_rtd_theme = True
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
todo_include_todos = True
# VCS options: https://docs.readthedocs.io/en/latest/vcs.html#github
html_context = {
"display_github": True, # Integrate GitHub
"github_repo": "WeBASE-DOC", # Repo name
"github_version": "master", # Version
"conf_py_path": "/", # Path in the checkout to the docs root
}
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['../_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'WeBASE_en_doc'
# -- Options for LaTeX output ---------------------------------------------
latex_engine = 'pdflatex'
latex_use_xindy = False
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
'papersize': 'a4paper',
# The font size ('10pt', '11pt' or '12pt').
'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
'preamble': r'''
\hypersetup{unicode=true}
\usepackage{CJKutf8}
\DeclareUnicodeCharacter{00A0}{\nobreakspace}
\DeclareUnicodeCharacter{2203}{\ensuremath{\exists}}
\DeclareUnicodeCharacter{2200}{\ensuremath{\forall}}
\DeclareUnicodeCharacter{2286}{\ensuremath{\subseteq}}
\DeclareUnicodeCharacter{2713}{x}
\DeclareUnicodeCharacter{27FA}{\ensuremath{\Longleftrightarrow}}
\DeclareUnicodeCharacter{221A}{\ensuremath{\sqrt{}}}
\DeclareUnicodeCharacter{221B}{\ensuremath{\sqrt[3]{}}}
\DeclareUnicodeCharacter{2295}{\ensuremath{\oplus}}
\DeclareUnicodeCharacter{2297}{\ensuremath{\otimes}}
\begin{CJK}{UTF8}{gbsn}
\AtEndDocument{\end{CJK}}
''',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'WeBASE_en.tex', u'WeBASE Documentation',
u'WeBankFinTech', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'WeBASE', u'WeBASE EN Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'WeBASE', u'WeBASE EN Documentation',
author, 'WeBankFinTech', 'english documents of WeBASE',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Markdown support
github_doc_root = 'https://github.com/rtfd/recommonmark/tree/master/doc/'
def setup(app):
app.add_config_value('recommonmark_config', {
'url_resolver': lambda url: github_doc_root + url,
'auto_toc_tree_section': 'Contents',
'enable_eval_rst': True,
'enable_auto_doc_ref': True,
}, True)
app.add_transform(AutoStructify)
app.add_stylesheet('css/custom.css')
app.add_javascript('../_static/js/en-readthedocs-analytics.js')
|
[] |
[] |
[
"READTHEDOCS"
] |
[]
|
["READTHEDOCS"]
|
python
| 1 | 0 | |
pkg/plugin/plugin.go
|
// Copyright 2018 The kubecfg authors
//
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package plugin
import (
"os"
"os/exec"
"path/filepath"
"strings"
"github.com/pkg/errors"
"github.com/spf13/afero"
yaml "gopkg.in/yaml.v2"
)
// Config is configuration for a Plugin.
type Config struct {
// Name is the name of the plugin.
Name string `yaml:"name,omitempty"`
// Version is the version of the plugin.
Version string `yaml:"version,omitempty"`
// Description is the plugin description.
Description string `yaml:"description,omitempty"`
// IgnoreFlags is set if this plugin will ignore flags.
IgnoreFlags bool `yaml:"ignore_flags,omitempty"`
// Command is the command that needs to be called to invoke the plugin.
Command string `yaml:"command,omitempty"`
}
func readConfig(fs afero.Fs, path string) (Config, error) {
b, err := afero.ReadFile(fs, path)
if err != nil {
return Config{}, err
}
var config Config
if err := yaml.Unmarshal(b, &config); err != nil {
return Config{}, err
}
return config, nil
}
// Plugin is a ksonnet plugin.
type Plugin struct {
// RootDir is the root directory for the plugin.
RootDir string
// Config is configuration for the plugin.
Config Config
}
// BuildRunCmd builds a command that runs the plugin.
func (p *Plugin) BuildRunCmd(env, args []string) *exec.Cmd {
bin := strings.Replace(p.Config.Command, "$KS_PLUGIN_DIR", p.RootDir, 1)
cmd := exec.Command(bin, args...)
cmd.Env = env
cmd.Stderr = os.Stderr
cmd.Stdout = os.Stdout
return cmd
}
// importPlugin creates a new Plugin given a path.
func importPlugin(fs afero.Fs, path string) (Plugin, error) {
configPath := filepath.Join(path, "plugin.yaml")
exists, err := afero.Exists(fs, configPath)
if err != nil {
return Plugin{}, err
}
if !exists {
return Plugin{}, errors.Errorf("plugin in %q doesn't not a configuration", path)
}
config, err := readConfig(fs, configPath)
if err != nil {
return Plugin{}, errors.Wrapf(err, "read plugin configuration %q", configPath)
}
plugin := Plugin{
RootDir: path,
Config: config,
}
return plugin, nil
}
// Find finds a plugin by name.
func Find(fs afero.Fs, name string) (Plugin, error) {
plugins, err := List(fs)
if err != nil {
return Plugin{}, err
}
for _, plugin := range plugins {
if plugin.Config.Name == name {
return plugin, nil
}
}
return Plugin{}, errors.Errorf("%s is not a known plugin", name)
}
// List plugins
func List(fs afero.Fs) ([]Plugin, error) {
rootPath, err := pluginDir()
if err != nil {
return []Plugin{}, err
}
exist, err := afero.Exists(fs, rootPath)
if err != nil {
return nil, err
}
if !exist {
return []Plugin{}, nil
}
fis, err := afero.ReadDir(fs, rootPath)
if err != nil {
return nil, err
}
plugins := make([]Plugin, 0)
for _, fi := range fis {
if fi.IsDir() {
path := filepath.Join(rootPath, fi.Name())
plugin, err := importPlugin(fs, path)
if err != nil {
return nil, err
}
plugins = append(plugins, plugin)
}
}
return plugins, nil
}
// TODO: make this work with windows
func pluginDir() (string, error) {
homeDir := os.Getenv("HOME")
if homeDir == "" {
return "", errors.New("could not find home directory")
}
return filepath.Join(homeDir, ".config", "ksonnet", "plugins"), nil
}
|
[
"\"HOME\""
] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
go
| 1 | 0 | |
lib/googlecloudsdk/core/cache/resource_cache.py
|
# -*- coding: utf-8 -*- #
# Copyright 2017 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The Cloud SDK resource cache.
A resource is an object maintained by a service. Each resource has a
corresponding URI. A URI is composed of one or more parameters. A
service-specific resource parser extracts the parameter tuple from a URI. A
corresponding resource formatter reconstructs the URI from the parameter tuple.
Each service has an API List request that returns the list of resource URIs
visible to the caller. Some APIs are aggregated and return the list of all URIs
for all parameter values. Other APIs are not aggregated and require one or more
of the parsed parameter tuple values to be specified in the list request. This
means that getting the list of all URIs for a non-aggregated resource requires
multiple List requests, ranging over the combination of all values for all
aggregate parameters.
A collection is list of resource URIs in a service visible to the caller. The
collection name uniqely identifies the collection and the service.
A resource cache is a persistent cache that stores parsed resource parameter
tuples for multiple collections. The data for a collection is in one or more
tables.
+---------------------------+
| resource cache |
| +-----------------------+ |
| | collection | |
| | +-------------------+ | |
| | | table | | |
| | | (key,...,col,...) | | |
| | | ... | | |
| | +-------------------+ | |
| | ... | |
| +-----------------------+ |
| ... |
+---------------------------+
A resource cache is implemented as a ResourceCache object that contains
Collection objects. A Collection is a virtual table that contains one or more
persistent cache tables. Each Collection is also an Updater that handles
resource parsing and updates. Updates are typically done by service List or
Query requests that populate the tables.
The Updater objects make this module resource agnostic. For example, there
could be updater objects that are not associated with a URI. The ResourceCache
doesn't care.
If the List request API for a collection aggregates then its parsed parameter
tuples are contained in one table. Otherwise the collection is stored in
multiple tables. The total number of tables is determined by the number of
aggregate parameters for the List API, and the number of values each aggregate
parameter can take on.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import abc
import os
from googlecloudsdk.core import config
from googlecloudsdk.core import log
from googlecloudsdk.core import module_util
from googlecloudsdk.core import properties
from googlecloudsdk.core.cache import exceptions
from googlecloudsdk.core.cache import file_cache
from googlecloudsdk.core.util import encoding
from googlecloudsdk.core.util import files
import six
# Rollout hedge just in case a cache implementation causes problems.
try:
from googlecloudsdk.core.cache import sqlite_cache # pylint: disable=g-import-not-at-top, sqlite3 is not ubiquitous
except ImportError:
sqlite_cache = None
if (sqlite_cache and
'sql' in encoding.GetEncodedValue(
os.environ, 'CLOUDSDK_CACHE_IMPLEMENTATION', 'sqlite')):
PERSISTENT_CACHE_IMPLEMENTATION = sqlite_cache
else:
PERSISTENT_CACHE_IMPLEMENTATION = file_cache
DEFAULT_TIMEOUT = 1*60*60
VERSION = 'googlecloudsdk.resource-1.0'
class ParameterInfo(object):
"""An object for accessing parameter values in the program state.
"program state" is defined by this class. It could include parsed command
line arguments and properties. The class also can also map between resource
and program parameter names.
Attributes:
_additional_params: The list of parameter names not in the parsed resource.
_updaters: A parameter_name => (Updater, aggregator) dict.
"""
def __init__(self, additional_params=None, updaters=None):
self._additional_params = additional_params or []
self._updaters = updaters or {}
def GetValue(self, parameter_name, check_properties=True):
"""Returns the program state string value for parameter_name.
Args:
parameter_name: The Parameter name.
check_properties: Check the property value if True.
Returns:
The parameter value from the program state.
"""
del parameter_name, check_properties
return None
def GetAdditionalParams(self):
"""Return the list of parameter names not in the parsed resource.
These names are associated with the resource but not a specific parameter
in the resource. For example a global resource might not have a global
Boolean parameter in the parsed resource, but its command line specification
might require a --global flag to completly qualify the resource.
Returns:
The list of parameter names not in the parsed resource.
"""
return self._additional_params
def GetUpdater(self, parameter_name):
"""Returns the updater and aggregator property for parameter_name.
Args:
parameter_name: The Parameter name.
Returns:
An (updater, aggregator) tuple where updater is the Updater class and
aggregator is True if this updater must be used to aggregate all resource
values.
"""
return self._updaters.get(parameter_name, (None, None))
class Parameter(object):
"""A parsed resource tuple parameter descriptor.
A parameter tuple has one or more columns. Each has a Parameter descriptor.
Attributes:
column: The parameter tuple column index.
name: The parameter name.
"""
def __init__(self, column=0, name=None):
self.column = column
self.name = name
class _RuntimeParameter(Parameter):
"""A runtime Parameter.
Attributes:
aggregator: True if parameter is an aggregator (not aggregated by updater).
generate: True if values must be generated for this parameter.
updater_class: The updater class.
value: A default value from the program state.
"""
def __init__(self, parameter, updater_class, value, aggregator):
super(_RuntimeParameter, self).__init__(
parameter.column, name=parameter.name)
self.generate = False
self.updater_class = updater_class
self.value = value
self.aggregator = aggregator
class BaseUpdater(object):
"""A base object for thin updater wrappers."""
@six.add_metaclass(abc.ABCMeta)
class Updater(BaseUpdater):
"""A resource cache table updater.
An updater returns a list of parsed parameter tuples that replaces the rows in
one cache table. It can also adjust the table timeout.
The parameters may have their own updaters. These objects are organized as a
tree with one resource at the root.
Attributes:
cache: The persistent cache object.
collection: The resource collection name.
columns: The number of columns in the parsed resource parameter tuple.
parameters: A list of Parameter objects.
timeout: The resource table timeout in seconds, 0 for no timeout (0 is easy
to represent in a persistent cache tuple which holds strings and numbers).
"""
def __init__(self, cache=None, collection=None, columns=0, column=0,
parameters=None, timeout=DEFAULT_TIMEOUT):
"""Updater constructor.
Args:
cache: The persistent cache object.
collection: The resource collection name that (1) uniquely names the
table(s) for the parsed resource parameters (2) is the lookup name of
the resource URI parser. Resource collection names are unique by
definition. Non-resource collection names must not clash with resource
collections names. Prepending a '.' to non-resource collections names
will avoid the clash.
columns: The number of columns in the parsed resource parameter tuple.
Must be >= 1.
column: If this is an updater for an aggregate parameter then the updater
produces a table of aggregate_resource tuples. The parent collection
copies aggregate_resource[column] to a column in its own resource
parameter tuple.
parameters: A list of Parameter objects.
timeout: The resource table timeout in seconds, 0 for no timeout.
"""
super(Updater, self).__init__()
self.cache = cache
self.collection = collection
self.columns = columns if collection else 1
self.column = column
self.parameters = parameters or []
self.timeout = timeout or 0
def _GetTableName(self):
"""Returns the table name [prefix], the module path if no collection."""
if self.collection:
return self.collection
return module_util.GetModulePath(self)
def _GetRuntimeParameters(self, parameter_info):
"""Constructs and returns the _RuntimeParameter list.
This method constructs a muable shadow of self.parameters with updater_class
and table instantiations. Each runtime parameter can be:
(1) A static value derived from parameter_info.
(2) A parameter with it's own updater_class. The updater is used to list
all of the possible values for the parameter.
(3) An unknown value (None). The possible values are contained in the
resource cache for self.
The Select method combines the caller supplied row template and the runtime
parameters to filter the list of parsed resources in the resource cache.
Args:
parameter_info: A ParamaterInfo object for accessing parameter values in
the program state.
Returns:
The runtime parameters shadow of the immutable self.parameters.
"""
runtime_parameters = []
for parameter in self.parameters:
updater_class, aggregator = parameter_info.GetUpdater(parameter.name)
value = parameter_info.GetValue(
parameter.name, check_properties=aggregator)
runtime_parameter = _RuntimeParameter(
parameter, updater_class, value, aggregator)
runtime_parameters.append(runtime_parameter)
return runtime_parameters
def ParameterInfo(self):
"""Returns the parameter info object."""
return ParameterInfo()
def SelectTable(self, table, row_template, parameter_info, aggregations=None):
"""Returns the list of rows matching row_template in table.
Refreshes expired tables by calling the updater.
Args:
table: The persistent table object.
row_template: A row template to match in Select().
parameter_info: A ParamaterInfo object for accessing parameter values in
the program state.
aggregations: A list of aggregation Parameter objects.
Returns:
The list of rows matching row_template in table.
"""
if not aggregations:
aggregations = []
log.info('cache table=%s aggregations=[%s]',
table.name,
' '.join(['{}={}'.format(x.name, x.value) for x in aggregations]))
try:
return table.Select(row_template)
except exceptions.CacheTableExpired:
rows = self.Update(parameter_info, aggregations)
table.DeleteRows()
table.AddRows(rows)
table.Validate()
return table.Select(row_template, ignore_expiration=True)
def Select(self, row_template, parameter_info=None):
"""Returns the list of rows matching row_template in the collection.
All tables in the collection are in play. The row matching done by the
cache layer conveniently prunes the number of tables accessed.
Args:
row_template: A row template tuple. The number of columns in the template
must match the number of columns in the collection. A column with value
None means match all values for the column. Each column may contain
these wildcard characters:
* - match any string of zero or more characters
? - match any character
The matching is anchored on the left.
parameter_info: A ParamaterInfo object for accessing parameter values in
the program state.
Returns:
The list of rows that match the template row.
"""
template = list(row_template)
if self.columns > len(template):
template += [None] * (self.columns - len(template))
log.info(
'cache template=[%s]', ', '.join(["'{}'".format(t) for t in template]))
# Values keeps track of all valid permutations of values to select from
# cache tables. The nth item in each permutation corresponds to the nth
# parameter for which generate is True. The list of aggregations (which is
# a list of runtime parameters that are aggregators) must also be the same
# length as these permutations.
values = [[]]
aggregations = []
parameters = self._GetRuntimeParameters(parameter_info)
for i, parameter in enumerate(parameters):
parameter.generate = False
if parameter.value and template[parameter.column] in (None, '*'):
template[parameter.column] = parameter.value
log.info('cache parameter=%s column=%s value=%s aggregate=%s',
parameter.name, parameter.column, parameter.value,
parameter.aggregator)
if parameter.aggregator:
aggregations.append(parameter)
parameter.generate = True
for v in values:
v.append(parameter.value)
elif parameter.aggregator:
aggregations.append(parameter)
parameter.generate = True
log.info('cache parameter=%s column=%s value=%s aggregate=%s',
parameter.name, parameter.column, parameter.value,
parameter.aggregator)
# Updater object instantiation is on demand so they don't have to be
# instantiated at import time in the static CLI tree. It also makes it
# easier to serialize in the static CLI tree JSON object.
updater = parameter.updater_class(cache=self.cache)
sub_template = [None] * updater.columns
sub_template[updater.column] = template[parameter.column]
log.info('cache parameter=%s column=%s aggregate=%s',
parameter.name, parameter.column, parameter.aggregator)
new_values = []
for perm, selected in updater.YieldSelectTableFromPermutations(
parameters[:i], values, sub_template, parameter_info):
updater.ExtendValues(new_values, perm, selected)
values = new_values
if not values:
aggregation_values = [x.value for x in aggregations]
# Given that values is essentially a reduced crossproduct of all results
# from the parameter updaters, it collapses to [] if any intermediate
# update finds no results. We only want to keep going here if no
# aggregators needed to be updated in the first place.
if None in aggregation_values:
return []
table_name = '.'.join([self._GetTableName()] + aggregation_values)
table = self.cache.Table(
table_name,
columns=self.columns,
keys=self.columns,
timeout=self.timeout)
return self.SelectTable(table, template, parameter_info, aggregations)
rows = []
for _, selected in self.YieldSelectTableFromPermutations(
parameters, values, template, parameter_info):
rows.extend(selected)
log.info('cache rows=%s' % rows)
return rows
def _GetParameterColumn(self, parameter_info, parameter_name):
"""Get this updater's column number for a certain parameter."""
updater_parameters = self._GetRuntimeParameters(parameter_info)
for parameter in updater_parameters:
if parameter.name == parameter_name:
return parameter.column
return None
def ExtendValues(self, values, perm, selected):
"""Add selected values to a template and extend the selected rows."""
vals = [row[self.column] for row in selected]
log.info('cache collection={} adding values={}'.format(
self.collection, vals))
v = [perm + [val] for val in vals]
values.extend(v)
def YieldSelectTableFromPermutations(self, parameters, values, template,
parameter_info):
"""Selects completions from tables using multiple permutations of values.
For each vector in values, e.g. ['my-project', 'my-zone'], this method
selects rows matching the template from a leaf table corresponding to the
vector (e.g. 'my.collection.my-project.my-zone') and yields a 2-tuple
containing that vector and the selected rows.
Args:
parameters: [Parameter], the list of parameters up through the
current updater belonging to the parent. These will be used to iterate
through each permutation contained in values.
values: list(list()), a list of lists of valid values. Each item in values
corresponds to a single permutation of values for which item[n] is a
possible value for the nth generator in parent_parameters.
template: list(str), the template to use to select new values.
parameter_info: ParameterInfo, the object that is used to get runtime
values.
Yields:
(perm, list(list)): a 2-tuple where the first value is the permutation
currently being used to select values and the second value is the result
of selecting to match the permutation.
"""
for perm in values:
temp_perm = [val for val in perm]
table = self.cache.Table(
'.'.join([self._GetTableName()] + perm),
columns=self.columns,
keys=self.columns,
timeout=self.timeout)
aggregations = []
for parameter in parameters:
if parameter.generate:
# Find the matching parameter from current updater. If the parameter
# isn't found, the value is discarded.
column = self._GetParameterColumn(parameter_info, parameter.name)
if column is None:
continue
template[column] = temp_perm.pop(0)
parameter.value = template[column]
if parameter.value:
aggregations.append(parameter)
selected = self.SelectTable(table, template, parameter_info, aggregations)
yield perm, selected
def GetTableForRow(self, row, parameter_info=None, create=True):
"""Returns the table for row.
Args:
row: The fully populated resource row.
parameter_info: A ParamaterInfo object for accessing parameter values in
the program state.
create: Create the table if it doesn't exist if True.
Returns:
The table for row.
"""
parameters = self._GetRuntimeParameters(parameter_info)
values = [row[p.column] for p in parameters if p.aggregator]
return self.cache.Table(
'.'.join([self._GetTableName()] + values),
columns=self.columns,
keys=self.columns,
timeout=self.timeout,
create=create)
@abc.abstractmethod
def Update(self, parameter_info=None, aggregations=None):
"""Returns the list of all current parsed resource parameters."""
del parameter_info, aggregations
class ResourceCache(PERSISTENT_CACHE_IMPLEMENTATION.Cache):
"""A resource cache object."""
def __init__(self, name=None, create=True):
"""ResourceCache constructor.
Args:
name: The persistent cache object name. If None then a default name
conditioned on the account name is used.
<GLOBAL_CONFIG_DIR>/cache/<ACCOUNT>/resource.cache
create: Create the cache if it doesn't exist if True.
"""
if not name:
name = self.GetDefaultName()
super(ResourceCache, self).__init__(
name=name, create=create, version=VERSION)
@staticmethod
def GetDefaultName():
"""Returns the default resource cache name."""
path = [config.Paths().cache_dir]
account = properties.VALUES.core.account.Get(required=False)
if account:
path.append(account)
files.MakeDir(os.path.join(*path))
path.append('resource.cache')
return os.path.join(*path)
def DeleteDeprecatedCache():
"""Silently deletes the deprecated resource completion cache if it exists."""
cache_dir = config.Paths().completion_cache_dir
if os.path.isdir(cache_dir):
files.RmTree(cache_dir)
def Delete(name=None):
"""Deletes the current persistent resource cache however it's implemented."""
if not name:
name = ResourceCache.GetDefaultName()
# Keep trying implementation until cache not found or a matching cache found.
for implementation in (sqlite_cache, file_cache):
if not implementation:
continue
try:
implementation.Cache(name=name, create=False, version=VERSION).Delete()
return
except exceptions.CacheInvalid:
continue
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
py/desisurvey/config.py
|
"""Manage survey planning and schedule configuration data.
The normal usage is::
>>> config = Configuration()
>>> config.programs.BRIGHT.max_sun_altitude()
<Quantity -13.0 deg>
Use dot notation to specify nodes in the configuration hieararchy and
function call notation to access terminal node values.
Terminal node values are first converted according to YAML rules. Strings
containing a number followed by valid astropy units are subsequently converted
to astropy quantities. Strings of the form YYYY-MM-DD are converted to
datetime.date objects.
To change a value after the configuration has been loaded into memory use,
for example::
>>> config.full_moon_nights.set_value(5)
Assigned values must have the appropriate converted types, for example::
>>> import datetime
>>> config.last_day.set_value(datetime.date(2024, 1, 1))
>>> import astropy.units as u
>>> config.location.temperature.set_value(-5 * u.deg_C)
The configuration is implemented as a singleton so the YAML file is only
loaded and parsed the first time a Configuration() is built. Subsequent
calls to Configuration() always return the same object.
"""
from __future__ import print_function, division
import os
import re
import yaml
import astropy.units
import astropy.utils.data
# Extract a number from a string with optional leading and
# trailing whitespace.
_float_pattern = re.compile(
r'\s*([-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?)\s*')
class Node(object):
"""A single node of a configuration data structure.
The purpose of this class is to allow nested dictionaries to be
accessed using attribute dot notation, and to implement automatic
conversion of strings of the form "<value> <units>" into corresponding
astropy quantities.
"""
def __init__(self, value, path=[]):
self._path = path
if isinstance(value, dict):
# Remember our keys.
self._keys = value.keys()
# Recursively add sub-dictionaries as new child attributes.
for name in self._keys:
child_path = path + [name]
self.__dict__[name] = Node(value[name], child_path)
else:
# Define the value of a leaf node.
try:
# Try to interpret as an astropy quantity with units.
found_number = _float_pattern.match(value)
if found_number:
number = float(found_number.group(1))
unit = value[found_number.end():]
try:
self._value = astropy.units.Quantity(number, unit)
except ValueError:
raise ValueError(
'Invalid unit for {0}: {1}'
.format('.'.join(self._path), unit))
else:
self._value = value
except TypeError:
self._value = value
@property
def path(self):
"""Return the full path to this node using dot notation.
"""
return '.'.join(self._path)
@property
def keys(self):
"""Return the list of keys for a non-leaf node or raise a RuntimeError
for a terminal node.
"""
try:
return self._keys
except AttributeError:
raise RuntimeError(
'{0} is a terminal config node.'.format(self.path))
def __call__(self):
"""Return a terminal node's value or raise a RuntimeError for
a non-terminal node.
"""
try:
return self._value
except AttributeError:
raise RuntimeError(
'{0} is a non-terminal config node.'.format(self.path))
def set_value(self, new_value):
"""Set a terminal node's value or raise a RuntimeError for
a non-terminal node.
"""
try:
old_value = self._value
if not isinstance(new_value, type(old_value)):
raise RuntimeError(
'new type ({}) does not match old type ({}).'
.format(type(new_value), type(old_value)))
self._value = new_value
except AttributeError:
raise RuntimeError(
'{0} is a non-terminal config node.'.format(self.path))
class Configuration(Node):
"""Top-level configuration data node.
"""
__instance = None
@staticmethod
def reset():
"""Forget our singleton instance. Mainly intended for unit tests."""
Configuration.__instance = None
def __new__(cls, file_name=None):
"""Implement a singleton access pattern.
"""
if Configuration.__instance is None:
Configuration.__instance = object.__new__(cls)
Configuration.__instance._initialize(file_name)
elif file_name is not None and file_name != Configuration.__instance.file_name:
raise RuntimeError('Configuration already loaded from {0}'
.format(Configuration.__instance.file_name))
return Configuration.__instance
def __init__(self, file_name='config.yaml'):
"""Return the unique configuration object for this session.
The configuration will be loaded from the specified file when this
constructor is called for the first time. Subsequent calls with
a different file name will result in a RuntimeError.
Parameters
----------
file_name : string
Name of a YAML file including a valid YAML extension. The file
is assumed to be under this package's data/ directory unless
a path is included (either relative or absolute).
"""
pass
def _initialize(self, file_name=None):
"""Initialize a configuration data structure from a YAML file.
"""
if file_name is None:
file_name = 'config.yaml'
# Remember the file name since it is not allowed to change.
self.file_name = file_name
# Locate the config file in our pkg data/ directory if no path is given.
if os.path.split(file_name)[0] == '':
full_path = astropy.utils.data._find_pkg_data_path(
os.path.join('data', file_name))
else:
full_path = file_name
# Validate that all mapping keys are valid python identifiers
# and that there are no embedded sequences.
valid_key = re.compile(r'^[a-zA-Z_][a-zA-Z0-9_]*\Z')
with open(full_path) as f:
next_value_is_key = False
for token in yaml.scan(f):
if isinstance(
token,
(yaml.BlockSequenceStartToken,
yaml.FlowSequenceStartToken)):
raise RuntimeError('Config sequences not supported.')
if next_value_is_key:
if not isinstance(token, yaml.ScalarToken):
raise RuntimeError(
'Invalid config key type: {0}'.format(token))
if not valid_key.match(token.value):
raise RuntimeError(
'Invalid config key name: {0}'.format(token.value))
next_value_is_key = isinstance(token, yaml.KeyToken)
# Load the config data into memory.
with open(full_path) as f:
Node.__init__(self, yaml.safe_load(f))
# Output path is not set until it is first used.
self._output_path = None
def set_output_path(self, output_path):
"""Set the output directory for relative paths.
The path must exist when this method is called. Called by
:meth:`get_path` for a non-absolute path. This method updates the
configuration output_path value.
Parameters
----------
output_path : str
A path possibly including environment variables enclosed in {...}
that will be substituted from the current environment.
Raises
------
ValueError
Path uses undefined environment variable or does not exist.
"""
try:
self._output_path = output_path.format(**os.environ)
except KeyError as e:
raise ValueError(
'Environment variable not set for output_path: {0}'.format(e))
if not os.path.isdir(self._output_path):
raise ValueError(
'Non-existent output_path: {0}'.format(self._output_path))
# Update our config node.
self.output_path._value = output_path
def get_path(self, name):
"""Prepend this configuration's output_path to non-absolute paths.
Configured by the ``output_path`` node and :meth:`set_output_path`.
An absolute path is returned immediately so an environment variable
used in output_path only needs to be defined if relative paths
are used.
Parameters
----------
name : str
Absolute or relative path name, which does not need to exist yet.
Returns
-------
str
Path name to use. Relative path names will have our output_path
prepended. Absolute path names will be unchanged.
"""
if os.path.isabs(name):
return name
if self._output_path is None:
self.set_output_path(self.output_path())
return os.path.join(self._output_path, name)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
covid19analysis/asgi.py
|
"""
ASGI config for covid19analysis project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'covid19analysis.settings')
application = get_asgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
src/cmd/compile/internal/ssa/config.go
|
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ssa
import (
"cmd/internal/obj"
"cmd/internal/src"
"os"
"strconv"
)
// A Config holds readonly compilation information.
// It is created once, early during compilation,
// and shared across all compilations.
type Config struct {
arch string // "amd64", etc.
IntSize int64 // 4 or 8
PtrSize int64 // 4 or 8
RegSize int64 // 4 or 8
Types Types
lowerBlock blockRewriter // lowering function
lowerValue valueRewriter // lowering function
registers []Register // machine registers
gpRegMask regMask // general purpose integer register mask
fpRegMask regMask // floating point register mask
specialRegMask regMask // special register mask
FPReg int8 // register number of frame pointer, -1 if not used
LinkReg int8 // register number of link register if it is a general purpose register, -1 if not used
hasGReg bool // has hardware g register
ctxt *obj.Link // Generic arch information
optimize bool // Do optimization
noDuffDevice bool // Don't use Duff's device
nacl bool // GOOS=nacl
use387 bool // GO386=387
NeedsFpScratch bool // No direct move between GP and FP register sets
BigEndian bool //
sparsePhiCutoff uint64 // Sparse phi location algorithm used above this #blocks*#variables score
}
type (
blockRewriter func(*Block) bool
valueRewriter func(*Value) bool
)
type Types struct {
Bool Type
Int8 Type
Int16 Type
Int32 Type
Int64 Type
UInt8 Type
UInt16 Type
UInt32 Type
UInt64 Type
Int Type
Float32 Type
Float64 Type
Uintptr Type
String Type
BytePtr Type // TODO: use unsafe.Pointer instead?
Int32Ptr Type
UInt32Ptr Type
IntPtr Type
UintptrPtr Type
Float32Ptr Type
Float64Ptr Type
BytePtrPtr Type
}
type Logger interface {
// Logf logs a message from the compiler.
Logf(string, ...interface{})
// Log returns true if logging is not a no-op
// some logging calls account for more than a few heap allocations.
Log() bool
// Fatal reports a compiler error and exits.
Fatalf(pos src.XPos, msg string, args ...interface{})
// Error reports a compiler error but keep going.
Error(pos src.XPos, msg string, args ...interface{})
// Warnl writes compiler messages in the form expected by "errorcheck" tests
Warnl(pos src.XPos, fmt_ string, args ...interface{})
// Forwards the Debug flags from gc
Debug_checknil() bool
Debug_wb() bool
}
type Frontend interface {
CanSSA(t Type) bool
Logger
// StringData returns a symbol pointing to the given string's contents.
StringData(string) interface{} // returns *gc.Sym
// Auto returns a Node for an auto variable of the given type.
// The SSA compiler uses this function to allocate space for spills.
Auto(src.XPos, Type) GCNode
// Given the name for a compound type, returns the name we should use
// for the parts of that compound type.
SplitString(LocalSlot) (LocalSlot, LocalSlot)
SplitInterface(LocalSlot) (LocalSlot, LocalSlot)
SplitSlice(LocalSlot) (LocalSlot, LocalSlot, LocalSlot)
SplitComplex(LocalSlot) (LocalSlot, LocalSlot)
SplitStruct(LocalSlot, int) LocalSlot
SplitArray(LocalSlot) LocalSlot // array must be length 1
SplitInt64(LocalSlot) (LocalSlot, LocalSlot) // returns (hi, lo)
// DerefItab dereferences an itab function
// entry, given the symbol of the itab and
// the byte offset of the function pointer.
// It may return nil.
DerefItab(sym *obj.LSym, offset int64) *obj.LSym
// Line returns a string describing the given position.
Line(src.XPos) string
// AllocFrame assigns frame offsets to all live auto variables.
AllocFrame(f *Func)
// Syslook returns a symbol of the runtime function/variable with the
// given name.
Syslook(string) *obj.LSym
// UseWriteBarrier returns whether write barrier is enabled
UseWriteBarrier() bool
}
// interface used to hold *gc.Node. We'd use *gc.Node directly but
// that would lead to an import cycle.
type GCNode interface {
Typ() Type
String() string
}
// NewConfig returns a new configuration object for the given architecture.
func NewConfig(arch string, types Types, ctxt *obj.Link, optimize bool) *Config {
c := &Config{arch: arch, Types: types}
switch arch {
case "amd64":
c.IntSize = 8
c.PtrSize = 8
c.RegSize = 8
c.lowerBlock = rewriteBlockAMD64
c.lowerValue = rewriteValueAMD64
c.registers = registersAMD64[:]
c.gpRegMask = gpRegMaskAMD64
c.fpRegMask = fpRegMaskAMD64
c.FPReg = framepointerRegAMD64
c.LinkReg = linkRegAMD64
c.hasGReg = false
case "amd64p32":
c.IntSize = 4
c.PtrSize = 4
c.RegSize = 8
c.lowerBlock = rewriteBlockAMD64
c.lowerValue = rewriteValueAMD64
c.registers = registersAMD64[:]
c.gpRegMask = gpRegMaskAMD64
c.fpRegMask = fpRegMaskAMD64
c.FPReg = framepointerRegAMD64
c.LinkReg = linkRegAMD64
c.hasGReg = false
c.noDuffDevice = true
case "386":
c.IntSize = 4
c.PtrSize = 4
c.RegSize = 4
c.lowerBlock = rewriteBlock386
c.lowerValue = rewriteValue386
c.registers = registers386[:]
c.gpRegMask = gpRegMask386
c.fpRegMask = fpRegMask386
c.FPReg = framepointerReg386
c.LinkReg = linkReg386
c.hasGReg = false
case "arm":
c.IntSize = 4
c.PtrSize = 4
c.RegSize = 4
c.lowerBlock = rewriteBlockARM
c.lowerValue = rewriteValueARM
c.registers = registersARM[:]
c.gpRegMask = gpRegMaskARM
c.fpRegMask = fpRegMaskARM
c.FPReg = framepointerRegARM
c.LinkReg = linkRegARM
c.hasGReg = true
case "arm64":
c.IntSize = 8
c.PtrSize = 8
c.RegSize = 8
c.lowerBlock = rewriteBlockARM64
c.lowerValue = rewriteValueARM64
c.registers = registersARM64[:]
c.gpRegMask = gpRegMaskARM64
c.fpRegMask = fpRegMaskARM64
c.FPReg = framepointerRegARM64
c.LinkReg = linkRegARM64
c.hasGReg = true
c.noDuffDevice = obj.GOOS == "darwin" // darwin linker cannot handle BR26 reloc with non-zero addend
case "ppc64":
c.BigEndian = true
fallthrough
case "ppc64le":
c.IntSize = 8
c.PtrSize = 8
c.RegSize = 8
c.lowerBlock = rewriteBlockPPC64
c.lowerValue = rewriteValuePPC64
c.registers = registersPPC64[:]
c.gpRegMask = gpRegMaskPPC64
c.fpRegMask = fpRegMaskPPC64
c.FPReg = framepointerRegPPC64
c.LinkReg = linkRegPPC64
c.noDuffDevice = true // TODO: Resolve PPC64 DuffDevice (has zero, but not copy)
c.hasGReg = true
case "mips64":
c.BigEndian = true
fallthrough
case "mips64le":
c.IntSize = 8
c.PtrSize = 8
c.RegSize = 8
c.lowerBlock = rewriteBlockMIPS64
c.lowerValue = rewriteValueMIPS64
c.registers = registersMIPS64[:]
c.gpRegMask = gpRegMaskMIPS64
c.fpRegMask = fpRegMaskMIPS64
c.specialRegMask = specialRegMaskMIPS64
c.FPReg = framepointerRegMIPS64
c.LinkReg = linkRegMIPS64
c.hasGReg = true
case "s390x":
c.IntSize = 8
c.PtrSize = 8
c.RegSize = 8
c.lowerBlock = rewriteBlockS390X
c.lowerValue = rewriteValueS390X
c.registers = registersS390X[:]
c.gpRegMask = gpRegMaskS390X
c.fpRegMask = fpRegMaskS390X
c.FPReg = framepointerRegS390X
c.LinkReg = linkRegS390X
c.hasGReg = true
c.noDuffDevice = true
c.BigEndian = true
case "mips":
c.BigEndian = true
fallthrough
case "mipsle":
c.IntSize = 4
c.PtrSize = 4
c.RegSize = 4
c.lowerBlock = rewriteBlockMIPS
c.lowerValue = rewriteValueMIPS
c.registers = registersMIPS[:]
c.gpRegMask = gpRegMaskMIPS
c.fpRegMask = fpRegMaskMIPS
c.specialRegMask = specialRegMaskMIPS
c.FPReg = framepointerRegMIPS
c.LinkReg = linkRegMIPS
c.hasGReg = true
c.noDuffDevice = true
default:
ctxt.Diag("arch %s not implemented", arch)
}
c.ctxt = ctxt
c.optimize = optimize
c.nacl = obj.GOOS == "nacl"
// Don't use Duff's device on Plan 9 AMD64, because floating
// point operations are not allowed in note handler.
if obj.GOOS == "plan9" && arch == "amd64" {
c.noDuffDevice = true
}
if c.nacl {
c.noDuffDevice = true // Don't use Duff's device on NaCl
// runtime call clobber R12 on nacl
opcodeTable[OpARMCALLudiv].reg.clobbers |= 1 << 12 // R12
}
// cutoff is compared with product of numblocks and numvalues,
// if product is smaller than cutoff, use old non-sparse method.
// cutoff == 0 implies all sparse.
// cutoff == -1 implies none sparse.
// Good cutoff values seem to be O(million) depending on constant factor cost of sparse.
// TODO: get this from a flag, not an environment variable
c.sparsePhiCutoff = 2500000 // 0 for testing. // 2500000 determined with crude experiments w/ make.bash
ev := os.Getenv("GO_SSA_PHI_LOC_CUTOFF")
if ev != "" {
v, err := strconv.ParseInt(ev, 10, 64)
if err != nil {
ctxt.Diag("Environment variable GO_SSA_PHI_LOC_CUTOFF (value '%s') did not parse as a number", ev)
}
c.sparsePhiCutoff = uint64(v) // convert -1 to maxint, for never use sparse
}
return c
}
func (c *Config) Set387(b bool) {
c.NeedsFpScratch = b
c.use387 = b
}
func (c *Config) SparsePhiCutoff() uint64 { return c.sparsePhiCutoff }
func (c *Config) Ctxt() *obj.Link { return c.ctxt }
|
[
"\"GO_SSA_PHI_LOC_CUTOFF\""
] |
[] |
[
"GO_SSA_PHI_LOC_CUTOFF"
] |
[]
|
["GO_SSA_PHI_LOC_CUTOFF"]
|
go
| 1 | 0 | |
derp/derphttp/derphttp_client.go
|
// Copyright (c) 2020 Tailscale Inc & AUTHORS All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package derphttp implements DERP-over-HTTP.
//
// This makes DERP look exactly like WebSockets.
// A server can implement DERP over HTTPS and even if the TLS connection
// intercepted using a fake root CA, unless the interceptor knows how to
// detect DERP packets, it will look like a web socket.
package derphttp
import (
"bufio"
"context"
"crypto/tls"
"crypto/x509"
"errors"
"fmt"
"io"
"io/ioutil"
"log"
"net"
"net/http"
"net/url"
"os"
"strings"
"sync"
"time"
"go4.org/mem"
"inet.af/netaddr"
"tailscale.com/derp"
"tailscale.com/net/dnscache"
"tailscale.com/net/netns"
"tailscale.com/net/tlsdial"
"tailscale.com/net/tshttpproxy"
"tailscale.com/tailcfg"
"tailscale.com/types/key"
"tailscale.com/types/logger"
)
// Client is a DERP-over-HTTP client.
//
// It automatically reconnects on error retry. That is, a failed Send or
// Recv will report the error and not retry, but subsequent calls to
// Send/Recv will completely re-establish the connection (unless Close
// has been called).
type Client struct {
TLSConfig *tls.Config // optional; nil means default
DNSCache *dnscache.Resolver // optional; nil means no caching
MeshKey string // optional; for trusted clients
IsProber bool // optional; for probers to optional declare themselves as such
privateKey key.Private
logf logger.Logf
dialer func(ctx context.Context, network, addr string) (net.Conn, error)
// Either url or getRegion is non-nil:
url *url.URL
getRegion func() *tailcfg.DERPRegion
ctx context.Context // closed via cancelCtx in Client.Close
cancelCtx context.CancelFunc
mu sync.Mutex
preferred bool
canAckPings bool
closed bool
netConn io.Closer
client *derp.Client
connGen int // incremented once per new connection; valid values are >0
serverPubKey key.Public
}
// NewRegionClient returns a new DERP-over-HTTP client. It connects lazily.
// To trigger a connection, use Connect.
func NewRegionClient(privateKey key.Private, logf logger.Logf, getRegion func() *tailcfg.DERPRegion) *Client {
ctx, cancel := context.WithCancel(context.Background())
c := &Client{
privateKey: privateKey,
logf: logf,
getRegion: getRegion,
ctx: ctx,
cancelCtx: cancel,
}
return c
}
// NewNetcheckClient returns a Client that's only able to have its DialRegion method called.
// It's used by the netcheck package.
func NewNetcheckClient(logf logger.Logf) *Client {
return &Client{logf: logf}
}
// NewClient returns a new DERP-over-HTTP client. It connects lazily.
// To trigger a connection, use Connect.
func NewClient(privateKey key.Private, serverURL string, logf logger.Logf) (*Client, error) {
u, err := url.Parse(serverURL)
if err != nil {
return nil, fmt.Errorf("derphttp.NewClient: %v", err)
}
if urlPort(u) == "" {
return nil, fmt.Errorf("derphttp.NewClient: invalid URL scheme %q", u.Scheme)
}
ctx, cancel := context.WithCancel(context.Background())
c := &Client{
privateKey: privateKey,
logf: logf,
url: u,
ctx: ctx,
cancelCtx: cancel,
}
return c, nil
}
// Connect connects or reconnects to the server, unless already connected.
// It returns nil if there was already a good connection, or if one was made.
func (c *Client) Connect(ctx context.Context) error {
_, _, err := c.connect(ctx, "derphttp.Client.Connect")
return err
}
// ServerPublicKey returns the server's public key.
//
// It only returns a non-zero value once a connection has succeeded
// from an earlier call.
func (c *Client) ServerPublicKey() key.Public {
c.mu.Lock()
defer c.mu.Unlock()
return c.serverPubKey
}
// SelfPublicKey returns our own public key.
func (c *Client) SelfPublicKey() key.Public {
return c.privateKey.Public()
}
func urlPort(u *url.URL) string {
if p := u.Port(); p != "" {
return p
}
switch u.Scheme {
case "https":
return "443"
case "http":
return "80"
}
return ""
}
func (c *Client) targetString(reg *tailcfg.DERPRegion) string {
if c.url != nil {
return c.url.String()
}
return fmt.Sprintf("region %d (%v)", reg.RegionID, reg.RegionCode)
}
func (c *Client) useHTTPS() bool {
if c.url != nil && c.url.Scheme == "http" {
return false
}
return true
}
// tlsServerName returns the tls.Config.ServerName value (for the TLS ClientHello).
func (c *Client) tlsServerName(node *tailcfg.DERPNode) string {
if c.url != nil {
return c.url.Host
}
return node.HostName
}
func (c *Client) urlString(node *tailcfg.DERPNode) string {
if c.url != nil {
return c.url.String()
}
return fmt.Sprintf("https://%s/derp", node.HostName)
}
func (c *Client) connect(ctx context.Context, caller string) (client *derp.Client, connGen int, err error) {
c.mu.Lock()
defer c.mu.Unlock()
if c.closed {
return nil, 0, ErrClientClosed
}
if c.client != nil {
return c.client, c.connGen, nil
}
// timeout is the fallback maximum time (if ctx doesn't limit
// it further) to do all of: DNS + TCP + TLS + HTTP Upgrade +
// DERP upgrade.
const timeout = 10 * time.Second
ctx, cancel := context.WithTimeout(ctx, timeout)
go func() {
select {
case <-ctx.Done():
// Either timeout fired (handled below), or
// we're returning via the defer cancel()
// below.
case <-c.ctx.Done():
// Propagate a Client.Close call into
// cancelling this context.
cancel()
}
}()
defer cancel()
var reg *tailcfg.DERPRegion // nil when using c.url to dial
if c.getRegion != nil {
reg = c.getRegion()
if reg == nil {
return nil, 0, errors.New("DERP region not available")
}
}
var tcpConn net.Conn
defer func() {
if err != nil {
if ctx.Err() != nil {
err = fmt.Errorf("%v: %v", ctx.Err(), err)
}
err = fmt.Errorf("%s connect to %v: %v", caller, c.targetString(reg), err)
if tcpConn != nil {
go tcpConn.Close()
}
}
}()
var node *tailcfg.DERPNode // nil when using c.url to dial
if c.url != nil {
c.logf("%s: connecting to %v", caller, c.url)
tcpConn, err = c.dialURL(ctx)
} else {
c.logf("%s: connecting to derp-%d (%v)", caller, reg.RegionID, reg.RegionCode)
tcpConn, node, err = c.dialRegion(ctx, reg)
}
if err != nil {
return nil, 0, err
}
// Now that we have a TCP connection, force close it if the
// TLS handshake + DERP setup takes too long.
done := make(chan struct{})
defer close(done)
go func() {
select {
case <-done:
// Normal path. Upgrade occurred in time.
case <-ctx.Done():
select {
case <-done:
// Normal path. Upgrade occurred in time.
// But the ctx.Done() is also done because
// the "defer cancel()" above scheduled
// before this goroutine.
default:
// The TLS or HTTP or DERP exchanges didn't complete
// in time. Force close the TCP connection to force
// them to fail quickly.
tcpConn.Close()
}
}
}()
var httpConn net.Conn // a TCP conn or a TLS conn; what we speak HTTP to
var serverPub key.Public // or zero if unknown (if not using TLS or TLS middlebox eats it)
var serverProtoVersion int
if c.useHTTPS() {
tlsConn := c.tlsClient(tcpConn, node)
httpConn = tlsConn
// Force a handshake now (instead of waiting for it to
// be done implicitly on read/write) so we can check
// the ConnectionState.
if err := tlsConn.Handshake(); err != nil {
return nil, 0, err
}
// We expect to be using TLS 1.3 to our own servers, and only
// starting at TLS 1.3 are the server's returned certificates
// encrypted, so only look for and use our "meta cert" if we're
// using TLS 1.3. If we're not using TLS 1.3, it might be a user
// running cmd/derper themselves with a different configuration,
// in which case we can avoid this fast-start optimization.
// (If a corporate proxy is MITM'ing TLS 1.3 connections with
// corp-mandated TLS root certs than all bets are off anyway.)
// Note that we're not specifically concerned about TLS downgrade
// attacks. TLS handles that fine:
// https://blog.gypsyengineer.com/en/security/how-does-tls-1-3-protect-against-downgrade-attacks.html
connState := tlsConn.ConnectionState()
if connState.Version >= tls.VersionTLS13 {
serverPub, serverProtoVersion = parseMetaCert(connState.PeerCertificates)
}
} else {
httpConn = tcpConn
}
brw := bufio.NewReadWriter(bufio.NewReader(httpConn), bufio.NewWriter(httpConn))
var derpClient *derp.Client
req, err := http.NewRequest("GET", c.urlString(node), nil)
if err != nil {
return nil, 0, err
}
req.Header.Set("Upgrade", "DERP")
req.Header.Set("Connection", "Upgrade")
if !serverPub.IsZero() && serverProtoVersion != 0 {
// parseMetaCert found the server's public key (no TLS
// middlebox was in the way), so skip the HTTP upgrade
// exchange. See https://github.com/tailscale/tailscale/issues/693
// for an overview. We still send the HTTP request
// just to get routed into the server's HTTP Handler so it
// can Hijack the request, but we signal with a special header
// that we don't want to deal with its HTTP response.
req.Header.Set(fastStartHeader, "1") // suppresses the server's HTTP response
if err := req.Write(brw); err != nil {
return nil, 0, err
}
// No need to flush the HTTP request. the derp.Client's initial
// client auth frame will flush it.
} else {
if err := req.Write(brw); err != nil {
return nil, 0, err
}
if err := brw.Flush(); err != nil {
return nil, 0, err
}
resp, err := http.ReadResponse(brw.Reader, req)
if err != nil {
return nil, 0, err
}
if resp.StatusCode != http.StatusSwitchingProtocols {
b, _ := ioutil.ReadAll(resp.Body)
resp.Body.Close()
return nil, 0, fmt.Errorf("GET failed: %v: %s", err, b)
}
}
derpClient, err = derp.NewClient(c.privateKey, httpConn, brw, c.logf,
derp.MeshKey(c.MeshKey),
derp.ServerPublicKey(serverPub),
derp.CanAckPings(c.canAckPings),
derp.IsProber(c.IsProber),
)
if err != nil {
return nil, 0, err
}
if c.preferred {
if err := derpClient.NotePreferred(true); err != nil {
go httpConn.Close()
return nil, 0, err
}
}
c.serverPubKey = derpClient.ServerPublicKey()
c.client = derpClient
c.netConn = tcpConn
c.connGen++
return c.client, c.connGen, nil
}
// SetURLDialer sets the dialer to use for dialing URLs.
// This dialer is only use for clients created with NewClient, not NewRegionClient.
// If unset or nil, the default dialer is used.
//
// The primary use for this is the derper mesh mode to connect to each
// other over a VPC network.
func (c *Client) SetURLDialer(dialer func(ctx context.Context, network, addr string) (net.Conn, error)) {
c.dialer = dialer
}
func (c *Client) dialURL(ctx context.Context) (net.Conn, error) {
host := c.url.Hostname()
if c.dialer != nil {
return c.dialer(ctx, "tcp", net.JoinHostPort(host, urlPort(c.url)))
}
hostOrIP := host
dialer := netns.NewDialer()
if c.DNSCache != nil {
ip, _, _, err := c.DNSCache.LookupIP(ctx, host)
if err == nil {
hostOrIP = ip.String()
}
if err != nil && netns.IsSOCKSDialer(dialer) {
// Return an error if we're not using a dial
// proxy that can do DNS lookups for us.
return nil, err
}
}
tcpConn, err := dialer.DialContext(ctx, "tcp", net.JoinHostPort(hostOrIP, urlPort(c.url)))
if err != nil {
return nil, fmt.Errorf("dial of %v: %v", host, err)
}
return tcpConn, nil
}
// dialRegion returns a TCP connection to the provided region, trying
// each node in order (with dialNode) until one connects or ctx is
// done.
func (c *Client) dialRegion(ctx context.Context, reg *tailcfg.DERPRegion) (net.Conn, *tailcfg.DERPNode, error) {
if len(reg.Nodes) == 0 {
return nil, nil, fmt.Errorf("no nodes for %s", c.targetString(reg))
}
var firstErr error
for _, n := range reg.Nodes {
if n.STUNOnly {
if firstErr == nil {
firstErr = fmt.Errorf("no non-STUNOnly nodes for %s", c.targetString(reg))
}
continue
}
c, err := c.dialNode(ctx, n)
if err == nil {
return c, n, nil
}
if firstErr == nil {
firstErr = err
}
}
return nil, nil, firstErr
}
func (c *Client) tlsClient(nc net.Conn, node *tailcfg.DERPNode) *tls.Conn {
tlsConf := tlsdial.Config(c.tlsServerName(node), c.TLSConfig)
if node != nil {
tlsConf.InsecureSkipVerify = node.InsecureForTests
if node.CertName != "" {
tlsdial.SetConfigExpectedCert(tlsConf, node.CertName)
}
}
if n := os.Getenv("SSLKEYLOGFILE"); n != "" {
f, err := os.OpenFile(n, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600)
if err != nil {
log.Fatal(err)
}
log.Printf("WARNING: writing to SSLKEYLOGFILE %v", n)
tlsConf.KeyLogWriter = f
}
return tls.Client(nc, tlsConf)
}
func (c *Client) DialRegionTLS(ctx context.Context, reg *tailcfg.DERPRegion) (tlsConn *tls.Conn, connClose io.Closer, err error) {
tcpConn, node, err := c.dialRegion(ctx, reg)
if err != nil {
return nil, nil, err
}
done := make(chan bool) // unbufferd
defer close(done)
tlsConn = c.tlsClient(tcpConn, node)
go func() {
select {
case <-done:
case <-ctx.Done():
tcpConn.Close()
}
}()
err = tlsConn.Handshake()
if err != nil {
return nil, nil, err
}
select {
case done <- true:
return tlsConn, tcpConn, nil
case <-ctx.Done():
return nil, nil, ctx.Err()
}
}
func (c *Client) dialContext(ctx context.Context, proto, addr string) (net.Conn, error) {
return netns.NewDialer().DialContext(ctx, proto, addr)
}
// shouldDialProto reports whether an explicitly provided IPv4 or IPv6
// address (given in s) is valid. An empty value means to dial, but to
// use DNS. The predicate function reports whether the non-empty
// string s contained a valid IP address of the right family.
func shouldDialProto(s string, pred func(netaddr.IP) bool) bool {
if s == "" {
return true
}
ip, _ := netaddr.ParseIP(s)
return pred(ip)
}
const dialNodeTimeout = 1500 * time.Millisecond
// dialNode returns a TCP connection to node n, racing IPv4 and IPv6
// (both as applicable) against each other.
// A node is only given dialNodeTimeout to connect.
//
// TODO(bradfitz): longer if no options remain perhaps? ... Or longer
// overall but have dialRegion start overlapping races?
func (c *Client) dialNode(ctx context.Context, n *tailcfg.DERPNode) (net.Conn, error) {
// First see if we need to use an HTTP proxy.
proxyReq := &http.Request{
Method: "GET", // doesn't really matter
URL: &url.URL{
Scheme: "https",
Host: c.tlsServerName(n),
Path: "/", // unused
},
}
if proxyURL, err := tshttpproxy.ProxyFromEnvironment(proxyReq); err == nil && proxyURL != nil {
return c.dialNodeUsingProxy(ctx, n, proxyURL)
}
type res struct {
c net.Conn
err error
}
resc := make(chan res) // must be unbuffered
ctx, cancel := context.WithTimeout(ctx, dialNodeTimeout)
defer cancel()
nwait := 0
startDial := func(dstPrimary, proto string) {
nwait++
go func() {
dst := dstPrimary
if dst == "" {
dst = n.HostName
}
port := "443"
if n.DERPPort != 0 {
port = fmt.Sprint(n.DERPPort)
}
c, err := c.dialContext(ctx, proto, net.JoinHostPort(dst, port))
select {
case resc <- res{c, err}:
case <-ctx.Done():
if c != nil {
c.Close()
}
}
}()
}
if shouldDialProto(n.IPv4, netaddr.IP.Is4) {
startDial(n.IPv4, "tcp4")
}
if shouldDialProto(n.IPv6, netaddr.IP.Is6) {
startDial(n.IPv6, "tcp6")
}
if nwait == 0 {
return nil, errors.New("both IPv4 and IPv6 are explicitly disabled for node")
}
var firstErr error
for {
select {
case res := <-resc:
nwait--
if res.err == nil {
return res.c, nil
}
if firstErr == nil {
firstErr = res.err
}
if nwait == 0 {
return nil, firstErr
}
case <-ctx.Done():
return nil, ctx.Err()
}
}
}
func firstStr(a, b string) string {
if a != "" {
return a
}
return b
}
// dialNodeUsingProxy connects to n using a CONNECT to the HTTP(s) proxy in proxyURL.
func (c *Client) dialNodeUsingProxy(ctx context.Context, n *tailcfg.DERPNode, proxyURL *url.URL) (proxyConn net.Conn, err error) {
pu := proxyURL
if pu.Scheme == "https" {
var d tls.Dialer
proxyConn, err = d.DialContext(ctx, "tcp", net.JoinHostPort(pu.Hostname(), firstStr(pu.Port(), "443")))
} else {
var d net.Dialer
proxyConn, err = d.DialContext(ctx, "tcp", net.JoinHostPort(pu.Hostname(), firstStr(pu.Port(), "80")))
}
defer func() {
if err != nil && proxyConn != nil {
// In a goroutine in case it's a *tls.Conn (that can block on Close)
// TODO(bradfitz): track the underlying tcp.Conn and just close that instead.
go proxyConn.Close()
}
}()
if err != nil {
return nil, err
}
done := make(chan struct{})
defer close(done)
go func() {
select {
case <-done:
return
case <-ctx.Done():
proxyConn.Close()
}
}()
target := net.JoinHostPort(n.HostName, "443")
var authHeader string
if v, err := tshttpproxy.GetAuthHeader(pu); err != nil {
c.logf("derphttp: error getting proxy auth header for %v: %v", proxyURL, err)
} else if v != "" {
authHeader = fmt.Sprintf("Proxy-Authorization: %s\r\n", v)
}
if _, err := fmt.Fprintf(proxyConn, "CONNECT %s HTTP/1.1\r\nHost: %s\r\n%s\r\n", target, pu.Hostname(), authHeader); err != nil {
if ctx.Err() != nil {
return nil, ctx.Err()
}
return nil, err
}
br := bufio.NewReader(proxyConn)
res, err := http.ReadResponse(br, nil)
if err != nil {
if ctx.Err() != nil {
return nil, ctx.Err()
}
c.logf("derphttp: CONNECT dial to %s: %v", target, err)
return nil, err
}
c.logf("derphttp: CONNECT dial to %s: %v", target, res.Status)
if res.StatusCode != 200 {
return nil, fmt.Errorf("invalid response status from HTTP proxy %s on CONNECT to %s: %v", pu, target, res.Status)
}
return proxyConn, nil
}
func (c *Client) Send(dstKey key.Public, b []byte) error {
client, _, err := c.connect(context.TODO(), "derphttp.Client.Send")
if err != nil {
return err
}
if err := client.Send(dstKey, b); err != nil {
c.closeForReconnect(client)
}
return err
}
func (c *Client) ForwardPacket(from, to key.Public, b []byte) error {
client, _, err := c.connect(context.TODO(), "derphttp.Client.ForwardPacket")
if err != nil {
return err
}
if err := client.ForwardPacket(from, to, b); err != nil {
c.closeForReconnect(client)
}
return err
}
// SendPong sends a reply to a ping, with the ping's provided
// challenge/identifier data.
//
// Unlike other send methods, SendPong makes no attempt to connect or
// reconnect to the peer. It's best effort. If there's a connection
// problem, the server will choose to hang up on us if we're not
// replying.
func (c *Client) SendPong(data [8]byte) error {
c.mu.Lock()
if c.closed {
c.mu.Unlock()
return ErrClientClosed
}
if c.client == nil {
c.mu.Unlock()
return errors.New("not connected")
}
dc := c.client
c.mu.Unlock()
return dc.SendPong(data)
}
// SetCanAckPings sets whether this client will reply to ping requests from the server.
//
// This only affects future connections.
func (c *Client) SetCanAckPings(v bool) {
c.mu.Lock()
defer c.mu.Unlock()
c.canAckPings = v
}
// NotePreferred notes whether this Client is the caller's preferred
// (home) DERP node. It's only used for stats.
func (c *Client) NotePreferred(v bool) {
c.mu.Lock()
if c.preferred == v {
c.mu.Unlock()
return
}
c.preferred = v
client := c.client
c.mu.Unlock()
if client != nil {
if err := client.NotePreferred(v); err != nil {
c.closeForReconnect(client)
}
}
}
// WatchConnectionChanges sends a request to subscribe to
// notifications about clients connecting & disconnecting.
//
// Only trusted connections (using MeshKey) are allowed to use this.
func (c *Client) WatchConnectionChanges() error {
client, _, err := c.connect(context.TODO(), "derphttp.Client.WatchConnectionChanges")
if err != nil {
return err
}
err = client.WatchConnectionChanges()
if err != nil {
c.closeForReconnect(client)
}
return err
}
// ClosePeer asks the server to close target's TCP connection.
//
// Only trusted connections (using MeshKey) are allowed to use this.
func (c *Client) ClosePeer(target key.Public) error {
client, _, err := c.connect(context.TODO(), "derphttp.Client.ClosePeer")
if err != nil {
return err
}
err = client.ClosePeer(target)
if err != nil {
c.closeForReconnect(client)
}
return err
}
// Recv reads a message from c. The returned message may alias memory from Client.
// The message should only be used until the next Client call.
func (c *Client) Recv() (derp.ReceivedMessage, error) {
m, _, err := c.RecvDetail()
return m, err
}
// RecvDetail is like Recv, but additional returns the connection generation on each message.
// The connGen value is incremented every time the derphttp.Client reconnects to the server.
func (c *Client) RecvDetail() (m derp.ReceivedMessage, connGen int, err error) {
client, connGen, err := c.connect(context.TODO(), "derphttp.Client.Recv")
if err != nil {
return nil, 0, err
}
m, err = client.Recv()
if err != nil {
c.closeForReconnect(client)
if c.isClosed() {
err = ErrClientClosed
}
}
return m, connGen, err
}
func (c *Client) isClosed() bool {
c.mu.Lock()
defer c.mu.Unlock()
return c.closed
}
// Close closes the client. It will not automatically reconnect after
// being closed.
func (c *Client) Close() error {
c.cancelCtx() // not in lock, so it can cancel Connect, which holds mu
c.mu.Lock()
defer c.mu.Unlock()
if c.closed {
return ErrClientClosed
}
c.closed = true
if c.netConn != nil {
c.netConn.Close()
}
return nil
}
// closeForReconnect closes the underlying network connection and
// zeros out the client field so future calls to Connect will
// reconnect.
//
// The provided brokenClient is the client to forget. If current
// client is not brokenClient, closeForReconnect does nothing. (This
// prevents a send and receive goroutine from failing at the ~same
// time and both calling closeForReconnect and the caller goroutines
// forever calling closeForReconnect in lockstep endlessly;
// https://github.com/tailscale/tailscale/pull/264)
func (c *Client) closeForReconnect(brokenClient *derp.Client) {
c.mu.Lock()
defer c.mu.Unlock()
if c.client != brokenClient {
return
}
if c.netConn != nil {
c.netConn.Close()
c.netConn = nil
}
c.client = nil
}
var ErrClientClosed = errors.New("derphttp.Client closed")
func parseMetaCert(certs []*x509.Certificate) (serverPub key.Public, serverProtoVersion int) {
for _, cert := range certs {
if cn := cert.Subject.CommonName; strings.HasPrefix(cn, "derpkey") {
var err error
serverPub, err = key.NewPublicFromHexMem(mem.S(strings.TrimPrefix(cn, "derpkey")))
if err == nil && cert.SerialNumber.BitLen() <= 8 { // supports up to version 255
return serverPub, int(cert.SerialNumber.Int64())
}
}
}
return key.Public{}, 0
}
|
[
"\"SSLKEYLOGFILE\""
] |
[] |
[
"SSLKEYLOGFILE"
] |
[]
|
["SSLKEYLOGFILE"]
|
go
| 1 | 0 | |
backend/nastyeagle/asgi.py
|
"""
ASGI config for nastyeagle project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'nastyeagle.settings')
application = get_asgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
libpod/healthcheck.go
|
package libpod
import (
"bufio"
"bytes"
"io/ioutil"
"os"
"path/filepath"
"strings"
"time"
"github.com/containers/libpod/libpod/define"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// HealthCheckStatus represents the current state of a container
type HealthCheckStatus int
const (
// HealthCheckSuccess means the health worked
HealthCheckSuccess HealthCheckStatus = iota
// HealthCheckFailure means the health ran and failed
HealthCheckFailure HealthCheckStatus = iota
// HealthCheckContainerStopped means the health check cannot
// be run because the container is stopped
HealthCheckContainerStopped HealthCheckStatus = iota
// HealthCheckContainerNotFound means the container could
// not be found in local store
HealthCheckContainerNotFound HealthCheckStatus = iota
// HealthCheckNotDefined means the container has no health
// check defined in it
HealthCheckNotDefined HealthCheckStatus = iota
// HealthCheckInternalError means somes something failed obtaining or running
// a given health check
HealthCheckInternalError HealthCheckStatus = iota
// HealthCheckDefined means the healthcheck was found on the container
HealthCheckDefined HealthCheckStatus = iota
// MaxHealthCheckNumberLogs is the maximum number of attempts we keep
// in the healthcheck history file
MaxHealthCheckNumberLogs int = 5
// MaxHealthCheckLogLength in characters
MaxHealthCheckLogLength = 500
// HealthCheckHealthy describes a healthy container
HealthCheckHealthy string = "healthy"
// HealthCheckUnhealthy describes an unhealthy container
HealthCheckUnhealthy string = "unhealthy"
// HealthCheckStarting describes the time between when the container starts
// and the start-period (time allowed for the container to start and application
// to be running) expires.
HealthCheckStarting string = "starting"
)
// HealthCheckResults describes the results/logs from a healthcheck
type HealthCheckResults struct {
// Status healthy or unhealthy
Status string `json:"Status"`
// FailingStreak is the number of consecutive failed healthchecks
FailingStreak int `json:"FailingStreak"`
// Log describes healthcheck attempts and results
Log []HealthCheckLog `json:"Log"`
}
// HealthCheckLog describes the results of a single healthcheck
type HealthCheckLog struct {
// Start time as string
Start string `json:"Start"`
// End time as a string
End string `json:"End"`
// Exitcode is 0 or 1
ExitCode int `json:"ExitCode"`
// Output is the stdout/stderr from the healthcheck command
Output string `json:"Output"`
}
// hcWriteCloser allows us to use bufio as a WriteCloser
type hcWriteCloser struct {
*bufio.Writer
}
// Used to add a closer to bufio
func (hcwc hcWriteCloser) Close() error {
return nil
}
// HealthCheck verifies the state and validity of the healthcheck configuration
// on the container and then executes the healthcheck
func (r *Runtime) HealthCheck(name string) (HealthCheckStatus, error) {
container, err := r.LookupContainer(name)
if err != nil {
return HealthCheckContainerNotFound, errors.Wrapf(err, "unable to lookup %s to perform a health check", name)
}
hcStatus, err := checkHealthCheckCanBeRun(container)
if err == nil {
return container.runHealthCheck()
}
return hcStatus, err
}
// runHealthCheck runs the health check as defined by the container
func (c *Container) runHealthCheck() (HealthCheckStatus, error) {
var (
newCommand []string
returnCode int
capture bytes.Buffer
inStartPeriod bool
)
hcCommand := c.HealthCheckConfig().Test
if len(hcCommand) < 1 {
return HealthCheckNotDefined, errors.Errorf("container %s has no defined healthcheck", c.ID())
}
switch hcCommand[0] {
case "", "NONE":
return HealthCheckNotDefined, errors.Errorf("container %s has no defined healthcheck", c.ID())
case "CMD":
newCommand = hcCommand[1:]
case "CMD-SHELL":
// TODO: SHELL command from image not available in Container - use Docker default
newCommand = []string{"/bin/sh", "-c", strings.Join(hcCommand[1:], " ")}
default:
// command supplied on command line - pass as-is
newCommand = hcCommand
}
if len(newCommand) < 1 || newCommand[0] == "" {
return HealthCheckNotDefined, errors.Errorf("container %s has no defined healthcheck", c.ID())
}
captureBuffer := bufio.NewWriter(&capture)
hcw := hcWriteCloser{
captureBuffer,
}
streams := new(AttachStreams)
streams.OutputStream = hcw
streams.ErrorStream = hcw
streams.InputStream = os.Stdin
streams.AttachOutput = true
streams.AttachError = true
streams.AttachInput = true
logrus.Debugf("executing health check command %s for %s", strings.Join(newCommand, " "), c.ID())
timeStart := time.Now()
hcResult := HealthCheckSuccess
_, hcErr := c.Exec(false, false, []string{}, newCommand, "", "", streams, 0, nil, "")
if hcErr != nil {
errCause := errors.Cause(hcErr)
hcResult = HealthCheckFailure
if errCause == define.ErrOCIRuntimeNotFound ||
errCause == define.ErrOCIRuntimePermissionDenied ||
errCause == define.ErrOCIRuntime {
returnCode = 1
hcErr = nil
} else {
returnCode = 125
}
}
timeEnd := time.Now()
if c.HealthCheckConfig().StartPeriod > 0 {
// there is a start-period we need to honor; we add startPeriod to container start time
startPeriodTime := c.state.StartedTime.Add(c.HealthCheckConfig().StartPeriod)
if timeStart.Before(startPeriodTime) {
// we are still in the start period, flip the inStartPeriod bool
inStartPeriod = true
logrus.Debugf("healthcheck for %s being run in start-period", c.ID())
}
}
eventLog := capture.String()
if len(eventLog) > MaxHealthCheckLogLength {
eventLog = eventLog[:MaxHealthCheckLogLength]
}
if timeEnd.Sub(timeStart) > c.HealthCheckConfig().Timeout {
returnCode = -1
hcResult = HealthCheckFailure
hcErr = errors.Errorf("healthcheck command exceeded timeout of %s", c.HealthCheckConfig().Timeout.String())
}
hcl := newHealthCheckLog(timeStart, timeEnd, returnCode, eventLog)
if err := c.updateHealthCheckLog(hcl, inStartPeriod); err != nil {
return hcResult, errors.Wrapf(err, "unable to update health check log %s for %s", c.healthCheckLogPath(), c.ID())
}
return hcResult, hcErr
}
func checkHealthCheckCanBeRun(c *Container) (HealthCheckStatus, error) {
cstate, err := c.State()
if err != nil {
return HealthCheckInternalError, err
}
if cstate != define.ContainerStateRunning {
return HealthCheckContainerStopped, errors.Errorf("container %s is not running", c.ID())
}
if !c.HasHealthCheck() {
return HealthCheckNotDefined, errors.Errorf("container %s has no defined healthcheck", c.ID())
}
return HealthCheckDefined, nil
}
func newHealthCheckLog(start, end time.Time, exitCode int, log string) HealthCheckLog {
return HealthCheckLog{
Start: start.Format(time.RFC3339Nano),
End: end.Format(time.RFC3339Nano),
ExitCode: exitCode,
Output: log,
}
}
// updatedHealthCheckStatus updates the health status of the container
// in the healthcheck log
func (c *Container) updateHealthStatus(status string) error {
healthCheck, err := c.GetHealthCheckLog()
if err != nil {
return err
}
healthCheck.Status = status
newResults, err := json.Marshal(healthCheck)
if err != nil {
return errors.Wrapf(err, "unable to marshall healthchecks for writing status")
}
return ioutil.WriteFile(c.healthCheckLogPath(), newResults, 0700)
}
// UpdateHealthCheckLog parses the health check results and writes the log
func (c *Container) updateHealthCheckLog(hcl HealthCheckLog, inStartPeriod bool) error {
healthCheck, err := c.GetHealthCheckLog()
if err != nil {
return err
}
if hcl.ExitCode == 0 {
// set status to healthy, reset failing state to 0
healthCheck.Status = HealthCheckHealthy
healthCheck.FailingStreak = 0
} else {
if len(healthCheck.Status) < 1 {
healthCheck.Status = HealthCheckHealthy
}
if !inStartPeriod {
// increment failing streak
healthCheck.FailingStreak = healthCheck.FailingStreak + 1
// if failing streak > retries, then status to unhealthy
if healthCheck.FailingStreak >= c.HealthCheckConfig().Retries {
healthCheck.Status = HealthCheckUnhealthy
}
}
}
healthCheck.Log = append(healthCheck.Log, hcl)
if len(healthCheck.Log) > MaxHealthCheckNumberLogs {
healthCheck.Log = healthCheck.Log[1:]
}
newResults, err := json.Marshal(healthCheck)
if err != nil {
return errors.Wrapf(err, "unable to marshall healthchecks for writing")
}
return ioutil.WriteFile(c.healthCheckLogPath(), newResults, 0700)
}
// HealthCheckLogPath returns the path for where the health check log is
func (c *Container) healthCheckLogPath() string {
return filepath.Join(filepath.Dir(c.LogPath()), "healthcheck.log")
}
// GetHealthCheckLog returns HealthCheck results by reading the container's
// health check log file. If the health check log file does not exist, then
// an empty healthcheck struct is returned
func (c *Container) GetHealthCheckLog() (HealthCheckResults, error) {
var healthCheck HealthCheckResults
if _, err := os.Stat(c.healthCheckLogPath()); os.IsNotExist(err) {
return healthCheck, nil
}
b, err := ioutil.ReadFile(c.healthCheckLogPath())
if err != nil {
return healthCheck, errors.Wrapf(err, "failed to read health check log file %s", c.healthCheckLogPath())
}
if err := json.Unmarshal(b, &healthCheck); err != nil {
return healthCheck, errors.Wrapf(err, "failed to unmarshal existing healthcheck results in %s", c.healthCheckLogPath())
}
return healthCheck, nil
}
// HealthCheckStatus returns the current state of a container with a healthcheck
func (c *Container) HealthCheckStatus() (string, error) {
if !c.HasHealthCheck() {
return "", errors.Errorf("container %s has no defined healthcheck", c.ID())
}
results, err := c.GetHealthCheckLog()
if err != nil {
return "", errors.Wrapf(err, "unable to get healthcheck log for %s", c.ID())
}
return results.Status, nil
}
func (c *Container) disableHealthCheckSystemd() bool {
if os.Getenv("DISABLE_HC_SYSTEMD") == "true" {
return true
}
if c.config.HealthCheckConfig.Interval == 0 {
return true
}
return false
}
|
[
"\"DISABLE_HC_SYSTEMD\""
] |
[] |
[
"DISABLE_HC_SYSTEMD"
] |
[]
|
["DISABLE_HC_SYSTEMD"]
|
go
| 1 | 0 | |
iot/api-client/mqtt_example/cloudiot_mqtt_image_test.py
|
# Copyright 2019 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import time
from google.cloud import pubsub
import pytest
# Add manager as library
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'manager')) # noqa
import cloudiot_mqtt_image
import manager
cloud_region = 'us-central1'
device_id_template = 'test-device-{}'
ca_cert_path = 'resources/roots.pem'
rsa_cert_path = 'resources/rsa_cert.pem'
rsa_private_path = 'resources/rsa_private.pem'
topic_id = 'test-device-events-{}'.format(int(time.time()))
subscription_name = 'test-device-images-{}'.format(int(time.time()))
project_id = os.environ['GCLOUD_PROJECT']
service_account_json = os.environ['GOOGLE_APPLICATION_CREDENTIALS']
pubsub_topic = 'projects/{}/topics/{}'.format(project_id, topic_id)
registry_id = 'test-registry-{}'.format(int(time.time()))
image_path = './resources/owlister_hootie.png'
mqtt_bridge_hostname = 'mqtt.googleapis.com'
mqtt_bridge_port = 443
@pytest.fixture(scope='module')
def test_topic():
topic = manager.create_iot_topic(project_id, topic_id)
yield topic
pubsub_client = pubsub.PublisherClient()
topic_path = pubsub_client.topic_path(project_id, topic_id)
pubsub_client.delete_topic(topic_path)
def test_image(test_topic, capsys):
"""Send an inage to a device registry"""
device_id = device_id_template.format('RSA256')
manager.open_registry(
service_account_json, project_id, cloud_region, pubsub_topic,
registry_id)
manager.create_rs256_device(
service_account_json, project_id, cloud_region, registry_id,
device_id, rsa_cert_path)
manager.get_device(
service_account_json, project_id, cloud_region, registry_id,
device_id)
cloudiot_mqtt_image.transmit_image(
cloud_region, registry_id, device_id, rsa_private_path, ca_cert_path,
image_path, project_id, service_account_json)
# Clean up
manager.delete_device(
service_account_json, project_id, cloud_region, registry_id,
device_id)
manager.delete_registry(
service_account_json, project_id, cloud_region, registry_id)
out, _ = capsys.readouterr()
assert 'on_publish' in out
def test_image_recv(test_topic, capsys):
"""Transmit an image with IoT Core and receive it from PubSub"""
subscriber = pubsub.SubscriberClient()
topic_path = subscriber.topic_path(project_id, topic_id)
subscription_path = subscriber.subscription_path(
project_id, subscription_name)
subscriber.create_subscription(subscription_path, topic_path)
time.sleep(10)
device_id = device_id_template.format('RSA256')
manager.open_registry(
service_account_json, project_id, cloud_region, pubsub_topic,
registry_id)
manager.create_rs256_device(
service_account_json, project_id, cloud_region, registry_id,
device_id, rsa_cert_path)
manager.get_device(
service_account_json, project_id, cloud_region, registry_id,
device_id)
cloudiot_mqtt_image.transmit_image(
cloud_region, registry_id, device_id, rsa_private_path, ca_cert_path,
image_path, project_id, service_account_json)
time.sleep(10)
cloudiot_mqtt_image.receive_image(
project_id, subscription_name, 'test', 'png', 30)
# Clean up
subscriber.delete_subscription(subscription_path)
manager.delete_device(
service_account_json, project_id, cloud_region, registry_id,
device_id)
manager.delete_registry(
service_account_json, project_id, cloud_region, registry_id)
out, _ = capsys.readouterr()
assert 'Received image' in out
|
[] |
[] |
[
"GCLOUD_PROJECT",
"GOOGLE_APPLICATION_CREDENTIALS"
] |
[]
|
["GCLOUD_PROJECT", "GOOGLE_APPLICATION_CREDENTIALS"]
|
python
| 2 | 0 | |
launcher/src/main/java/io/bdeploy/launcher/cli/ui/MessageDialogs.java
|
package io.bdeploy.launcher.cli.ui;
import java.awt.Dimension;
import java.io.PrintWriter;
import java.io.StringWriter;
import java.util.Date;
import java.util.Map;
import java.util.TreeMap;
import javax.swing.JOptionPane;
import javax.swing.JScrollPane;
import javax.swing.JTextArea;
import io.bdeploy.common.util.FormatHelper;
import io.bdeploy.common.util.OsHelper;
import io.bdeploy.common.util.OsHelper.OperatingSystem;
import io.bdeploy.common.util.ProcessHelper;
import io.bdeploy.common.util.VersionHelper;
import io.bdeploy.interfaces.descriptor.client.ClickAndStartDescriptor;
import io.bdeploy.launcher.cli.LauncherTool;
import io.bdeploy.launcher.cli.SoftwareUpdateException;
/**
* Provides static helpers to display a {@linkplain MessageDialog}
*/
public class MessageDialogs {
private MessageDialogs() {
}
/**
* Opens the dialog to show that a required update is available but cannot be installed.
*/
public static void showUpdateRequired(ClickAndStartDescriptor config, SoftwareUpdateException ex) {
MessageDialog dialog = new MessageDialog("Software Update");
dialog.setHeaderIcon(WindowHelper.loadIcon("/update.png", 32, 32));
dialog.setHeaderText("Software Update Required");
dialog.setSummary("<html>A required software update is available " + //
"but cannot be installed due to insufficient permissions. " + //
"Contact the system administrator.</html>");
dialog.setDetails(getDetailedErrorMessage(config, ex));
dialog.setVisible(true);
dialog.waitForExit();
}
/**
* Opens the dialog to show that launching the given application failed.
*/
public static void showLaunchFailed(ClickAndStartDescriptor config, Throwable ex) {
MessageDialog dialog = new MessageDialog("Error");
dialog.setHeaderIcon(WindowHelper.loadIcon("/error.png", 32, 32));
dialog.setHeaderText("Application could not be launched");
dialog.setSummary("<html>Unexpected error occurred while launching the application. " + //
"If the problem persists, contact the system administrator.</html>");
dialog.setDetails(getDetailedErrorMessage(config, ex));
dialog.setVisible(true);
dialog.waitForExit();
}
/**
* Opens the dialog to show uninstallation failed.
*/
public static void showUninstallationFailed(ClickAndStartDescriptor config, Throwable ex) {
MessageDialog dialog = new MessageDialog("Error");
dialog.setHeaderIcon(WindowHelper.loadIcon("/error.png", 32, 32));
dialog.setHeaderText("Application could not be uninstalled");
dialog.setSummary("<html>Unexpected error occurred while uninstalling the application. " + //
"If the problem persists, contact the system administrator.</html>");
dialog.setDetails(getDetailedErrorMessage(config, ex));
dialog.setVisible(true);
}
/**
* Opens a dialog to show the given multi-line result.
*/
public static void showDetailedMessage(String message) {
JTextArea textArea = new JTextArea(message);
textArea.setEditable(false);
JScrollPane scrollPane = new JScrollPane(textArea);
scrollPane.setPreferredSize(new Dimension(480, 320));
JOptionPane.showMessageDialog(null, scrollPane, "Result", JOptionPane.INFORMATION_MESSAGE);
}
/** Returns the detailed error message to be displayed */
private static String getDetailedErrorMessage(ClickAndStartDescriptor config, Throwable ex) {
StringBuilder builder = new StringBuilder();
builder.append("*** Date: ").append(FormatHelper.format(new Date())).append("\n");
builder.append("\n");
StringWriter writer = new StringWriter();
ex.printStackTrace(new PrintWriter(writer));
builder.append("*** Stacktrace: \n").append(writer);
builder.append("\n");
builder.append("*** BDeploy properties: \n");
builder.append("LauncherVersion=").append(VersionHelper.getVersion()).append("\n");
if (config != null) {
builder.append("ServerVersion=").append(getServerVersion(config)).append("\n");
builder.append("ApplicationId=").append(config.applicationId).append("\n");
builder.append("GroupId=").append(config.groupId).append("\n");
builder.append("InstanceId=").append(config.instanceId).append("\n");
builder.append("Host=").append(config.host.getUri()).append("\n");
builder.append("Token=").append(config.host.getAuthPack()).append("\n");
}
builder.append("\n");
builder.append("*** System properties: \n");
Map<Object, Object> properties = new TreeMap<>(System.getProperties());
properties.forEach((k, v) -> builder.append(k).append("=").append(v).append("\n"));
builder.append("\n");
builder.append("*** System environment variables: \n");
Map<String, String> env = new TreeMap<>(System.getenv());
env.forEach((k, v) -> builder.append(k).append("=").append(v).append("\n"));
builder.append("\n");
String osDetails = getOsDetails();
if (osDetails != null) {
builder.append("*** Operating system: \n");
builder.append(osDetails);
}
return builder.toString();
}
/** Returns a string containing details about the running OS. */
private static String getOsDetails() {
// Windows: Return full version including build number
if (OsHelper.getRunningOs() == OperatingSystem.WINDOWS) {
return ProcessHelper.launch(new ProcessBuilder("cmd.exe", "/c", "ver"));
}
// No specific information to display
return null;
}
/** Returns the version of the remove BDdeploy server */
private static String getServerVersion(ClickAndStartDescriptor config) {
try {
return LauncherTool.getServerVersion(config).toString();
} catch (Exception ex) {
return ex.getMessage();
}
}
}
|
[] |
[] |
[] |
[]
|
[]
|
java
| 0 | 0 | |
conary/build/action.py
|
#
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Provides superclasses for build and policy.
"""
from conary.lib import fixedfnmatch
import itertools
import os
import re
import shlex
import sys
import string
import traceback
from conary.lib import debugger, log, util
from conary.local import database
TARGET_LINUX = 'linux'
TARGET_WINDOWS = 'windows'
# build.py and policy.py need some common definitions
def checkUse(use):
"""
Determines whether to take an action, based on system configuration
@param use: Flags telling whether to take action
@type use: None, boolean, or tuple of booleans
"""
if use is None:
return True
if type(use) is not tuple:
use = (use,)
for usevar in use:
if not usevar:
return False
return True
class _AnyDict(dict):
"""A dictionary that returns None for any key that is accessed. Used
internally to verify dictionary format string expansion"""
def __getitem__(self, key):
return None
class Action:
"""
Pure virtual base class for all actions -- classes which are
instantiated with data, and later asked to take an action based
on that data.
@cvar keywords: The keywords and default values accepted by the class
"""
keywords = { 'debug' : False }
supported_targets = (TARGET_LINUX, )
def __init__(self, *args, **keywords):
assert(self.__class__ is not Action)
# keywords will be in the class object, not the instance
if not hasattr(self.__class__, 'keywords'):
self.keywords = {}
self._applyDefaults()
self.addArgs(*args, **keywords)
# verify that there are not broken format strings
d = _AnyDict()
for arg in args:
if type(arg) is str and '%' in arg:
try:
arg % d
except ValueError, msg:
log.error('invalid macro substitution in "%s", missing "s"?' %arg)
raise
def doAction(self):
if not self._isSupportedTarget():
log.warning('Action %s not supported for target OS %s'
% self.__class__.__name__, self._getTarget())
return
if self.debug:
debugger.set_trace()
self.do()
def do(self):
pass
def _getTarget(self):
target = None
if 'targetos' in self.recipe.macros:
target = self.recipe.macros.targetos
if not target or 'linux' in target:
target = TARGET_LINUX
return target
def _isSupportedTarget(self):
if not hasattr(self, 'recipe'):
return True
target = self._getTarget()
return target in self.supported_targets
def _applyDefaults(self):
"""
Traverse the class hierarchy, picking up default keywords. We
ascend to the topmost class and pick up the keywords as we work
back to our class, to allow proper overriding.
"""
baselist = [self.__class__]
bases = list(self.__class__.__bases__)
while bases:
parent = bases.pop()
bases.extend(list(parent.__bases__))
baselist.append(parent)
baselist.reverse()
for base in baselist:
if 'keywords' in base.__dict__:
self.__dict__.update(base.__dict__['keywords'])
def addArgs(self, *args, **keywords):
# check to make sure that we don't get a keyword we don't expect
for key in keywords.keys():
# XXX this is not the best test, but otherwise we have to
# keep a dictionary of all of the keywords (including the parent
# keywords)
if key not in self.__dict__.keys():
raise TypeError, ("%s.__init__() got an unexpected keyword argument "
"'%s'" % (self.__class__.__name__, key))
# copy the keywords into our dict, overwriting the defaults
self.__dict__.update(keywords)
def genExcepthook(self):
def excepthook(type, exc_msg, tb):
cfg = self.recipe.cfg
sys.excepthook = sys.__excepthook__
if cfg.debugRecipeExceptions:
lines = traceback.format_exception(type, exc_msg, tb)
print string.joinfields(lines, "")
if self.linenum is not None:
prefix = "%s:%s:" % (self.file, self.linenum)
prefix_len = len(prefix)
if str(exc_msg)[:prefix_len] != prefix:
exc_message = "%s:%s: %s: %s" % (self.file, self.linenum,
type.__name__, exc_msg)
print exc_message
if self.recipe.buildinfo:
try:
buildinfo = self.recipe.buildinfo
buildinfo.error = exc_message
buildinfo.file = self.file
buildinfo.lastline = self.linenum
buildinfo.stop()
except:
log.warning("could not write out to buildinfo")
if cfg.debugRecipeExceptions and self.recipe.isatty():
debugger.post_mortem(tb, type, exc_msg)
else:
sys.exit(1)
return excepthook
class RecipeAction(Action):
"""
Action class which accepts the use= keyword to control execution,
and which assumes that the action is being called from within a recipe.
The action stores the line in the recipe file which calls it, in order
to allow for that line number to be reported when raising an exception.
"""
keywords = {
'use': None
}
# define which types of recipe an action is available for
_packageAction = True
_groupAction = False
# using an action may suggest the addition of a build requirement
# (like r.Make requiring make)
_actionTroveBuildRequires = set([])
_actionPathBuildRequires = set([])
def __init__(self, recipe, *args, **keywords):
assert(self.__class__ is not RecipeAction)
self._getLineNum()
Action.__init__(self, *args, **keywords)
self.recipe = recipe
# change self.use to be a simple flag
self.use = checkUse(self.use)
def _addActionPathBuildRequires(self, buildRequires):
# We do not want dynamically added requirements to modify the class
if id(self._actionPathBuildRequires) == \
id(self.__class__._actionPathBuildRequires):
self._actionPathBuildRequires = set(self._actionPathBuildRequires)
self._actionPathBuildRequires.update(buildRequires)
def _addActionTroveBuildRequires(self, buildRequires):
# We do not want dynamically added requirements to modify the class
if id(self._actionTroveBuildRequires) == \
id(self.__class__._actionTroveBuildRequires):
self._actionTroveBuildRequires = set(self._actionTroveBuildRequires)
self._actionTroveBuildRequires.update(buildRequires)
# virtual method for actually executing the action
def doAction(self):
if not self._isSupportedTarget():
log.warning('Action %s not supported for target OS'
% self.__class__.__name__)
return
if self.use:
try:
if self.linenum is None:
self.do()
else:
oldexcepthook = sys.excepthook
sys.excepthook = genExcepthook(self)
if self.recipe.buildinfo:
self.recipe.buildinfo.lastline = self.linenum
self.do()
sys.excepthook = oldexcepthook
finally:
# we need to provide suggestions even in the failure case
self.doSuggestAutoBuildReqs()
else:
# any invariant suggestions should be provided even if not self.use
self.doSuggestAutoBuildReqs()
def doSuggestAutoBuildReqs(self):
if not hasattr(self.recipe, "buildRequires"):
# Most likely group recipe
return
if hasattr(self.recipe, "getRepos"):
repos = self.recipe.getRepos()
else:
repos = None
paths = []
buildRequires = self.recipe._getTransitiveBuildRequiresNames()
for cmd in self._actionPathBuildRequires:
# Catch the case "python setup.py", as well as
# 'ENV="a b" somecommand'
cmdarr = shlex.split(cmd)
# Try to catch the command "ENVVAR=val make": skip all words that
# have an equal sign in them
c = cmd
for x in cmdarr:
if '=' not in x:
c = x
break
# If the above for loop didn't find anything remotely resembling a
# command, use the original one
c = c % self.recipe.macros
fullPath = util.checkPath(c)
if (not fullPath) and repos:
if not c.startswith('/'):
candidatePaths = [os.path.join(x, c)
for x in os.getenv('PATH', '').split(os.path.pathsep)]
else:
candidatePaths = [c]
foundProvider = False
for label in self.recipe.cfg.installLabelPath:
trvDict = repos.getTroveVersionsByPath(candidatePaths,
label)
trvs = [x for x in trvDict.values() if x]
if trvs:
foundProvider = True
self._addActionTroveBuildRequires([trvs[0][0][0]])
break
if not foundProvider:
log.warning('Failed to find possible build requirement'
' for path "%s"' % c)
continue
paths.append(fullPath)
if not hasattr(self.recipe, '_pathLookupCache'):
pathCache = self.recipe._pathLookupCache = _pathLookupCache()
else:
pathCache = self.recipe._pathLookupCache
suggestsMap = pathCache.getTrovesByPaths(self._getDb(), paths)
suggests = set()
for k, v in suggestsMap.items():
suggests.update(v)
# Add the trove requirements
suggests.update(self._actionTroveBuildRequires)
# Tell reportExcessBuildRequires that all these are necessary
if not hasattr(self.recipe, 'reportExcessBuildRequires'):
return
self.recipe.reportExcessBuildRequires(suggests)
# Remove build requires that were already added
suggests = suggests - set(buildRequires)
if suggests:
log.warning('Some missing buildRequires %s' %(sorted(suggests)))
self.recipe.reportMissingBuildRequires(sorted(suggests))
def doPrep(self):
pass
def do(self):
pass
def _getLineNum(self):
"""Gets the line number and file name of the place where the
Action is instantiated, which is important for returning
useful error messages"""
# Moves up the frame stack to outside of Action class --
# also passes by __call__ function, used by helper functions
# internally to instantiate Actions.
#
# Another alternative would be to look at filepath until we
# reach outside of conary source tree
f = sys._getframe(1) # get frame above this one
while f != None:
if f.f_code.co_argcount == 0: # break if non-class fn
break
firstargname = f.f_code.co_varnames[0]
firstarg = f.f_locals[firstargname]
if not isinstance(firstarg, Action):
if f.f_code.co_name != '__call__':
break
f = f.f_back # go up a frame
assert f is not None
self.file = f.f_code.co_filename
self.linenum = f.f_lineno
if not self.file:
self.file = '<None>'
def init_error(self, type, msg):
"""
use in action __init__ to add lineno to exceptions
raised. Usually this is handled automatically,
but it is (almost) impossible to wrap init calls.
Actually, this probably could be done by changing
recipe helper, but until that is done use this funciton
"""
raise type, "%s:%s: %s: %s" % (self.file, self.linenum,
type.__name__, msg)
def _getDb(self):
if not hasattr(self.recipe, '_db') or self.recipe._db is None:
self.recipe._db = database.Database(self.recipe.cfg.root,
self.recipe.cfg.dbPath)
return self.recipe._db
# XXX look at ShellCommand versus Action
class ShellCommand(RecipeAction):
"""Base class for shell-based commands. ShellCommand is an abstract class
and can not be made into a working instance. Only derived classes which
define the C{template} static class variable will work properly.
Note: when creating templates, be aware that they are evaulated
twice, in the context of two different dictionaries.
- keys from keywords should have a # single %, as should "args".
- keys passed in through the macros argument will need %% to
escape them for delayed evaluation; for example,
%%(builddir)s and %%(destdir)s
@ivar self.command: Shell command to execute. This is built from the
C{template} static class variable in derived classes.
@type self.command: str
initialization time.
@cvar template: The string template used to build the shell command.
"""
def __init__(self, recipe, *args, **keywords):
"""Create a new ShellCommand instance that can be used to run
a simple shell statement
@param args: arguments to __init__ are stored for later substitution
in the shell command if it contains %(args)s
@param keywords: keywords are replaced in the shell command
through dictionary substitution
@raise TypeError: If a keyword is passed to __init__ which is not
accepted by the class.
@rtype: ShellCommand
"""
# enforce pure virtual status
assert(self.__class__ is not ShellCommand)
self.recipe = recipe
self.arglist = args
self.args = string.join(args)
# fill in anything in the template that might be specified
# as a keyword. Keywords only because a part of this class
# instance's dictionary if Action._applyDefaults is called.
# this is the case for build.BuildCommand instances, for example.
self.command = self.template % self.__dict__
# verify that there are not broken format strings
d = _AnyDict()
self.command % d
for arg in args:
if type(arg) is str and '%' in arg:
arg % d
def addArgs(self, *args, **keywords):
# append new arguments as well as include keywords
self.args = self.args + string.join(args)
RecipeAction.addArgs(self, *args, **keywords)
def _expandOnePath(path, macros, defaultDir=None, braceGlob=False, error=False):
if braceGlob:
return _expandPaths([path], macros, defaultDir, True, error)
if defaultDir is None:
defaultDir = macros.builddir
path = path % macros
if path and path[0] == '/':
if path.startswith(macros.destdir):
log.warning(
"remove destdir from path name %s;"
" absolute paths are automatically relative to destdir"
%path)
else:
path = macros.destdir + path
else:
path = os.path.join(defaultDir, path)
if error:
if not os.path.exists(path):
raise RuntimeError, "No such file '%s'" % path
return path
def matchRegexp(baseDir, pattern, regexpFlags):
if pattern[-1] != '$':
pattern = pattern + '$'
results = []
for root, dirs, fileNames in os.walk(baseDir):
for pathItem in itertools.chain(dirs + fileNames):
path = root + '/' + pathItem
if re.match(pattern, path):
results.append(path)
return results
class Regexp(object):
def __init__(self, pattern):
self.pattern = pattern
def __hash__(self):
return hash(self.pattern)
def __eq__(self, expr):
if isinstance(expr, Glob):
return expr() == self.pattern
elif isinstance(expr, Regexp):
return expr.pattern == self.pattern
return expr == self.pattern
def __repr__(self):
return 'Regexp(%r)' % self.pattern
def __str__(self):
return "Regexp(%r)" % self.pattern.replace('%', '%%')
class Glob(object):
def __init__(self, recipe, pattern):
self.macros = recipe.macros
self.pattern = pattern
def __repr__(self):
return "Glob(%r)" % self.pattern
def __str__(self):
return "Glob(%r)" % self.pattern.replace('%', '%%')
def __eq__(self, expr):
if isinstance(expr, Glob):
return expr() == self()
elif isinstance(expr, Regexp):
return expr.pattern == self()
return expr == self()
def __hash__(self):
return hash(self())
def __call__(self):
"""
Translate a shell PATTERN to a regular expression, substituting macros.
There is no way to quote meta-characters.
"""
# macros must be substituted first, so that they can be properly
# escaped
try:
pat = self.pattern % self.macros
except ValueError, msg:
log.error('invalid macro substitution in "%s", missing "s"?' % \
self.pattern)
raise
return '^' + fixedfnmatch.translate(pat)
def _expandPaths(paths, macros, defaultDir=None, braceGlob=True, error=False):
"""
Expand braces, globs, and macros in path names, and root all path names
to either the build dir or dest dir. Relative paths (not starting with
a /) are relative to builddir. All absolute paths to are relative to
destdir.
"""
destdir = macros.destdir
if defaultDir is None:
defaultDir = macros.builddir
expPaths = []
for item in paths:
if isinstance(item, Regexp):
isRegexp = True
path = item.pattern
elif isinstance(item, Glob):
isRegexp = False
braceGlob = True
path = item.pattern
else:
isRegexp = False
path = item
path = path % macros
if path[0] == '/':
if path.startswith(destdir):
log.warning(
"remove destdir from path name %s;"
" absolute paths are automatically relative to destdir"
%path)
else:
path = destdir + path
baseDir = destdir
else:
path = defaultDir + os.sep + path
baseDir = defaultDir
if isRegexp:
expPaths.extend(matchRegexp(baseDir, path, item))
elif braceGlob:
expPaths.extend(util.braceGlob(path))
else:
expPaths.append(path)
if error:
notfound = []
for path in expPaths:
if not os.path.exists(path):
notfound.append(path)
if notfound:
raise RuntimeError, "No such file(s) '%s'" % "', '".join(notfound)
return expPaths
class _pathLookupCache(object):
"""Simple cache object for path lookups (singleton-like)"""
__slots__ = ['_cache']
def __init__(self):
self._cache = {}
def getTrovesByPaths(self, db, paths):
ret = {}
for path in paths:
if path in self._cache:
ret[path] = self._cache[path]
else:
ret[path] = self._cache[path] = [ x.getName()
for x in db.iterTrovesByPath(path) ]
return ret
|
[] |
[] |
[
"PATH"
] |
[]
|
["PATH"]
|
python
| 1 | 0 | |
setup.py
|
#!/usr/bin/python3
import os
import sys
from setuptools import find_packages, setup
with open("README.md", "r") as fh:
long_description = fh.read()
if os.environ.get("BROWNIE_LIB", "0") == "1":
if sys.platform == "windows":
requirements_filename = "requirements-windows.in"
else:
requirements_filename = "requirements.in"
else:
if sys.platform == "windows":
requirements_filename = "requirements-windows.txt"
else:
requirements_filename = "requirements.txt"
with open(requirements_filename, "r") as f:
requirements = list(map(str.strip, f.read().split("\n")))[:-1]
setup(
name="eth-brownie",
packages=find_packages(),
version="1.15.2", # don't change this manually, use bumpversion instead
license="MIT",
description="A Python framework for Ethereum smart contract deployment, testing and interaction.", # noqa: E501
long_description=long_description,
long_description_content_type="text/markdown",
author="Ben Hauser",
author_email="[email protected]",
url="https://github.com/eth-brownie/brownie",
keywords=["brownie"],
install_requires=requirements,
entry_points={
"console_scripts": ["brownie=brownie._cli.__main__:main"],
"pytest11": ["pytest-brownie=brownie.test.plugin"],
},
include_package_data=True,
python_requires=">=3.6,<4",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Topic :: Software Development :: Build Tools",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
)
|
[] |
[] |
[
"BROWNIE_LIB"
] |
[]
|
["BROWNIE_LIB"]
|
python
| 1 | 0 | |
pkg/service/metadata/drone/droneutil/droneutil.go
|
// Copyright 2018 The OpenPitrix Authors. All rights reserved.
// Use of this source code is governed by a Apache license
// that can be found in the LICENSE file.
package droneutil
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"runtime"
"strings"
"google.golang.org/grpc"
"openpitrix.io/openpitrix/pkg/logger"
"openpitrix.io/openpitrix/pkg/pb/metadata/drone"
"openpitrix.io/openpitrix/pkg/pb/metadata/types"
)
func MustLoadConfdConfig(path string) *pbtypes.ConfdConfig {
p, err := LoadConfdConfig(path)
if err != nil {
logger.Critical("%+v", err)
os.Exit(1)
}
return p
}
func MustLoadDroneConfig(path string) *pbtypes.DroneConfig {
p, err := LoadDroneConfig(path)
if err != nil {
logger.Critical("%+v", err)
os.Exit(1)
}
return p
}
func LoadConfdConfig(path string) (*pbtypes.ConfdConfig, error) {
data, err := ioutil.ReadFile(path)
if err != nil {
logger.Warn("%+v", err)
return nil, err
}
p := new(pbtypes.ConfdConfig)
if err := json.Unmarshal(data, p); err != nil {
logger.Warn("%+v", err)
return nil, err
}
if p.ProcessorConfig == nil {
p.ProcessorConfig = &pbtypes.ConfdProcessorConfig{}
}
if p.BackendConfig == nil {
p.BackendConfig = &pbtypes.ConfdBackendConfig{}
}
p.ProcessorConfig.Confdir = strExtractingEnvValue(
p.ProcessorConfig.Confdir,
)
return p, nil
}
func LoadDroneConfig(path string) (*pbtypes.DroneConfig, error) {
data, err := ioutil.ReadFile(path)
if err != nil {
logger.Warn("%+v", err)
return nil, err
}
p := new(pbtypes.DroneConfig)
if err := json.Unmarshal(data, p); err != nil {
logger.Warn("%+v", err)
return nil, err
}
p.CmdInfoLogPath = strExtractingEnvValue(p.CmdInfoLogPath)
return p, nil
}
func DialDroneService(ctx context.Context, host string, port int) (
client pbdrone.DroneServiceClient,
conn *grpc.ClientConn,
err error,
) {
conn, err = grpc.Dial(fmt.Sprintf("%s:%d", host, port), grpc.WithInsecure())
if err != nil {
logger.Warn("%+v", err)
return
}
client = pbdrone.NewDroneServiceClient(conn)
return
}
func strExtractingEnvValue(s string) string {
if !strings.ContainsAny(s, "${}") {
return s
}
env := os.Environ()
if runtime.GOOS == "windows" {
if os.Getenv("HOME") == "" {
home := os.Getenv("HOMEDRIVE") + os.Getenv("HOMEPATH")
if home == "" {
home = os.Getenv("USERPROFILE")
}
env = append(env, "HOME="+home)
}
if os.Getenv("PWD") == "" {
pwd, _ := os.Getwd()
env = append(env, "PWD="+pwd)
}
}
for _, e := range env {
if i := strings.Index(e, "="); i >= 0 {
s = strings.Replace(s,
fmt.Sprintf("${%s}", strings.TrimSpace(e[:i])),
strings.TrimSpace(e[i+1:]),
-1,
)
}
}
return s
}
|
[
"\"HOME\"",
"\"HOMEDRIVE\"",
"\"HOMEPATH\"",
"\"USERPROFILE\"",
"\"PWD\""
] |
[] |
[
"HOME",
"HOMEPATH",
"HOMEDRIVE",
"USERPROFILE",
"PWD"
] |
[]
|
["HOME", "HOMEPATH", "HOMEDRIVE", "USERPROFILE", "PWD"]
|
go
| 5 | 0 | |
benchmarks/io.py
|
# To check the reading/writing performance of DL3 data
import logging
import numpy as np
import time
import yaml
import os
from gammapy.data import DataStore
from gammapy.maps import Map
N_OBS = int(os.environ.get("GAMMAPY_BENCH_N_OBS", 10))
def run_benchmark():
info = {"n_obs": N_OBS}
t = time.time()
data_store = DataStore.from_dir("$GAMMAPY_DATA/cta-1dc/index/gps/")
OBS_ID = 110380
obs_ids = OBS_ID * np.ones(N_OBS)
observations = data_store.get_observations(obs_ids)
info["data_loading"] = time.time() - t
t = time.time()
m = Map.create()
for obs in observations:
m.fill_events(obs.events)
info["filling"] = time.time() - t
t = time.time()
m.write("survey_map.fits.gz", overwrite=True)
info["writing"] = time.time() - t
with open("bench.yaml", "w") as fh:
yaml.dump(info, fh, sort_keys=False, indent=4)
if __name__ == "__main__":
format = "%(filename)s:%(lineno)s: %(message)s"
logging.basicConfig(level=logging.INFO, format=format)
logging.info(f"Running io.py with N_OBS = {N_OBS}")
logging.info(f"cwd = {os.getcwd()}")
run_benchmark()
|
[] |
[] |
[
"GAMMAPY_BENCH_N_OBS"
] |
[]
|
["GAMMAPY_BENCH_N_OBS"]
|
python
| 1 | 0 | |
artic/deprecated/plot_amplicon_depth.py
|
#!/usr/bin/env python3
"""
Plot the mean read depth per amplicon.
This has been written for use in the ARTIC pipeline so there are no file checks - it assumes the following:
* the primer scheme is in ARTIC format
* the input depth files are in the format: `chrom\treadgroup\tposition\tdepth
* readgroup equates to primer pool
* the primer pairs in the scheme are sorted by amplicon number (i.e. readgroups are interleaved)
* depth values are provided for all positions (see output of make_depth_mask.py for expected format)
"""
from .vcftagprimersites import read_bed_file
import sys
import pandas as pd
import numpy as np
import argparse
import os
os.environ['QT_QPA_PLATFORM'] = 'offscreen'
import seaborn as sns
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
def go(args):
# get the primer scheme
primerScheme = read_bed_file(args.primerScheme)
# number the amplicons in the scheme and link them to primer start site
ampliconCounter = 1
# store the amplicon number and starts by read group dict
rgAmplicons = {}
rgStarts = {}
# process the primers by readgroup
for primer in primerScheme:
poolName = primer['PoolName']
if poolName not in rgAmplicons:
rgAmplicons[poolName] = []
rgStarts[poolName] = []
if primer['direction'] == '+':
rgAmplicons[poolName].append(ampliconCounter)
rgStarts[poolName].append(primer['start'])
ampliconCounter += 1
# for pandas cut func to create bins, we need to add an extra value to the starts (just use inf)
for startList in rgStarts.values():
startList.append(np.inf)
# process the depth files
dfs = {}
for depthFile in args.depthFiles:
# read in the depth file
df = pd.read_csv(depthFile, sep='\t', header=None,
names=['refName', 'readGroup',
'position', 'depth'],
dtype={'refName': str, 'readGroup': str,
'position': int, 'depth': int},
usecols=(0, 1, 2, 3),)
# check that there aren't too many positions in the depth data for plotting
# assert len(df.index) < 30000, "error: too many data points to plot"
# check all ref positions have a depth value
startPos = df["position"][0]
endPos = df["position"][df.index[-1]]
assert len(df.index) == ((endPos - startPos) +
1), "error: depth needs to be reported at all positions"
# check the primer scheme contains the readgroup
rgList = df.readGroup.unique()
assert len(rgList) == 1, "error: depth file has %d readgroups, need 1 (%s)" % (
len(rgList), depthFile)
rg = rgList[0]
assert rg in rgAmplicons, "error: readgroup not found in provided primer scheme (%s)" % (
rg)
# get the amplicon starts for this readgroup
amplicons = sorted(rgAmplicons[rg])
starts = sorted(rgStarts[rg])
# bin read depths by amplicon for this readgroup
df['amplicon'] = pd.cut(
x=df['position'], bins=starts, labels=amplicons)
# store the mean of each bin
bins = (df.groupby(['amplicon'])[
'depth'].mean()).rename(depthFile.name)
# add to the pile
assert rg not in dfs, "error: readgroup present in multiple files (%s)" % (
rg)
dfs[rg] = bins
# combine the series data from each input file
newDF = pd.concat(dfs, axis=1)
newDF.sort_index(axis=0, inplace=True)
newDF.reset_index(inplace=True)
# melt the DF for seaborn
newDF = newDF.melt("amplicon", var_name="read group",
value_name="mean amplicon read depth")
newDF = newDF.dropna()
# plot the bar
g = sns.catplot(data=newDF,
x="amplicon",
y="mean amplicon read depth",
hue="read group",
height=4,
aspect=3,
kind="bar",
dodge=False,
legend=False)
g.set(yscale="log")
g.fig.suptitle(args.sampleID)
plt.legend(loc='upper right')
plt.xticks(rotation=45, size=6)
plt.savefig(args.outFilePrefix + "-barplot.png")
plt.close()
# plot the box
g = sns.catplot(data=newDF,
x="read group",
y="mean amplicon read depth",
kind="box")
g.fig.suptitle(args.sampleID)
plt.savefig(args.outFilePrefix + "-boxplot.png")
plt.close()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--primerScheme', required=True,
help='the ARTIC primer scheme')
parser.add_argument('--sampleID', required=True,
help='the sample ID for the provided depth files')
parser.add_argument('--outFilePrefix', default="./amplicon-depth",
help='the prefix to give the output plot file')
parser.add_argument(
"depthFiles", type=argparse.FileType('r'), nargs='+', help='the depth files produced by make_depth_mask.py')
args = parser.parse_args()
go(args)
if __name__ == "__main__":
main()
|
[] |
[] |
[
"QT_QPA_PLATFORM"
] |
[]
|
["QT_QPA_PLATFORM"]
|
python
| 1 | 0 | |
autobuild/tests/test_build.py
|
# $LicenseInfo:firstyear=2010&license=mit$
# Copyright (c) 2010, Linden Research, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# $/LicenseInfo$
import os
import sys
import logging
import pprint
import tempfile
import unittest
from .baseline_compare import AutobuildBaselineCompare
from autobuild import autobuild_tool_build as build
import autobuild.configfile as configfile
import autobuild.common as common
from autobuild.configfile import PACKAGE_METADATA_FILE, MetadataDescription
from autobuild.autobuild_tool_build import BuildError, AutobuildTool
from .basetest import BaseTest, clean_dir, exc
from .executables import envtest, noop, echo
# ****************************************************************************
# TODO
# - Test for specific --build-dir (new select_directories() mechanism)
# - Test building to configuration-specific build directory
# - Test building to build trees for --all configurations
# - Test building to build directory(ies) for specified --configuration(s)
# ****************************************************************************
logger = logging.getLogger("autobuild.test_build")
def build(*args):
"""
Some of our tests use BaseTest.autobuild() to run the build command as a
child process. Some call the build command in-process. This is the latter.
"""
AutobuildTool().main(list(args))
class LocalBase(BaseTest, AutobuildBaselineCompare):
def setUp(self):
BaseTest.setUp(self)
# We intend to ask our child autobuild command to run a script located
# in this directory. Make sure this directory is on child autobuild's
# PATH so we find it.
os.environ["PATH"] = os.pathsep.join([os.path.abspath(os.path.dirname(__file__)),
os.environ["PATH"]])
# Create and return a config file appropriate for this test class.
self.tmp_file = self.get_tmp_file()
self.tmp_build_dir=tempfile.mkdtemp(prefix=os.path.dirname(self.tmp_file)+"/build-")
self.config = self.get_config()
self.config.save()
def get_config(self):
config = configfile.ConfigurationDescription(self.tmp_file)
package = configfile.PackageDescription('test')
package.license = "LGPL"
package.license_file="LICENSES/file"
package.copyright="copy right"
platform = configfile.PlatformDescription()
platform.build_directory = self.tmp_build_dir
package.version_file = os.path.join(self.tmp_build_dir, "version.txt")
with open(package.version_file, "w") as vf:
vf.write("1.0\n")
build_configuration = configfile.BuildConfigurationDescription()
# Formally you might consider that noop.py is an "argument" rather
# than an "option" -- but the way Executable is structured, if we pass
# it as an "argument" then the "build" subcommand gets inserted before
# it, which thoroughly confuses the Python interpreter.
build_configuration.build = noop
build_configuration.default = True
build_configuration.name = 'Release'
platform.configurations['Release'] = build_configuration
package.platforms[common.get_current_platform()] = platform
config.package_description = package
return config
def tearDown(self):
self.cleanup_tmp_file()
if self.tmp_build_dir:
clean_dir(self.tmp_build_dir)
BaseTest.tearDown(self)
def read_metadata(self, platform=None):
# Metadata file is relative to the build directory. Find the build
# directory by drilling down to correct platform.
platforms = self.config.package_description.platforms
if platform:
platdata = platforms[platform]
else:
assert len(platforms) == 1, \
"read_metadata(no platform) ambiguous: " \
"pass one of %s" % ', '.join(list(platforms.keys()))
_, platdata = platforms.popitem()
return MetadataDescription(os.path.join(platdata.build_directory,
PACKAGE_METADATA_FILE))
class TestBuild(LocalBase):
def get_config(self):
config = super(TestBuild, self).get_config()
#config.package_description.version = "0"
logger.debug("config: %s" % pprint.pformat(config))
return config
def test_autobuild_build_default(self):
self.autobuild('build', '--no-configure', '--config-file=' + self.tmp_file, '--id=123456')
self.autobuild('build', '--config-file=' + self.tmp_file, '--id=123456', '--', '--foo', '-b')
metadata = self.read_metadata()
assert not metadata.package_description.version_file, \
"version_file erroneously propagated into metadata"
self.assertEqual(metadata.package_description.version, "1.0")
def test_autobuild_build_all(self):
self.autobuild('build', '--config-file=' + self.tmp_file, '--id=123456', '-a')
def test_autobuild_build_release(self):
self.autobuild('build', '--config-file=' + self.tmp_file, '-c', 'Release', '--id=123456')
class TestEnvironment(LocalBase):
def get_config(self):
config = super(TestEnvironment, self).get_config()
config.package_description.copyright="no copy"
# Formally you might consider that noop.py is an "argument" rather
# than an "option" -- but the way Executable is structured, if we pass
# it as an "argument" then the "build" subcommand gets inserted before
# it, which thoroughly confuses the Python interpreter.
config.package_description.platforms[common.get_current_platform()] \
.configurations["Release"].build = envtest
return config
def test_env(self):
# verify that the AUTOBUILD env var is set to point to something executable
self.autobuild('build', '--no-configure', '--config-file=' + self.tmp_file, '--id=123456')
class TestMissingPackageNameCurrent(LocalBase):
def get_config(self):
config = super(TestMissingPackageNameCurrent, self).get_config()
config.package_description.name = ""
return config
def test_autobuild_build(self):
# Make sure the verbose 'new requirement' message is only produced
# when the missing key is in fact version_file.
with exc(BuildError, "name", without="(?i)new requirement"):
build('build', '--config-file=' + self.tmp_file, '--id=123456')
class TestMissingPackageNameOld(LocalBase):
def get_config(self):
config = super(TestMissingPackageNameOld, self).get_config()
config.package_description.name = ""
config.version = "1.2"
return config
def test_autobuild_build(self):
# Make sure the verbose 'new requirement' message is only produced
# when the missing key is in fact version_file, especially with an
# older version config file.
with exc(BuildError, "name", without="(?i)new requirement"):
build('build', '--config-file=' + self.tmp_file, '--id=123456')
class TestMissingVersionFileCurrent(LocalBase):
def get_config(self):
config = super(TestMissingVersionFileCurrent, self).get_config()
config.package_description.version_file = ""
return config
def test_autobuild_build(self):
# Make sure the verbose 'new requirement' message isn't produced with
# a current format config file.
with exc(BuildError, "version_file", without="(?i)new requirement"):
build('build', '--config-file=' + self.tmp_file, '--id=123456')
class TestMissingVersionFileOld(LocalBase):
def get_config(self):
config = super(TestMissingVersionFileOld, self).get_config()
config.package_description.version_file = ""
config.version = "1.2"
return config
def test_autobuild_build(self):
# Make sure the verbose 'new requirement' message is produced when the
# missing key is version_file with an older version config file. The
# (?s) flag allows '.' to match newline, important because 'new
# requirement' may be on a different line of the exception message
# than the attribute name version_file.
with exc(BuildError, "(?is)version_file.*new requirement"):
build('build', '--config-file=' + self.tmp_file, '--id=123456')
class TestAbsentVersionFile(LocalBase):
def get_config(self):
config = super(TestAbsentVersionFile, self).get_config()
# nonexistent file
config.package_description.version_file = "venison.txt"
return config
def test_autobuild_build(self):
with exc(common.AutobuildError, "version_file"):
build('build', '--config-file=' + self.tmp_file, '--id=123456')
class TestEmptyVersionFile(LocalBase):
def get_config(self):
config = super(TestEmptyVersionFile, self).get_config()
# stomp the version_file with empty content
with open(config.package_description.version_file, "w"):
pass
return config
def test_autobuild_build(self):
with exc(common.AutobuildError, "version_file"):
build('build', '--config-file=' + self.tmp_file, '--id=123456')
class TestVersionFileOddWhitespace(LocalBase):
def get_config(self):
config = super(TestVersionFileOddWhitespace, self).get_config()
# overwrite the version_file
with open(config.package_description.version_file, "w") as vf:
vf.write(" 2.3 ")
return config
def test_autobuild_build(self):
build('build', '--config-file=' + self.tmp_file, '--id=123456')
self.assertEqual(self.read_metadata().package_description.version, "2.3")
class TestSubstitutions(LocalBase):
def get_config(self):
config = super(TestSubstitutions, self).get_config()
config.package_description.platforms[common.get_current_platform()] \
.configurations['Release'].build = echo("foo$AUTOBUILD_ADDRSIZE")
return config
def test_substitutions(self):
assert "foo32" in self.autobuild('build', '--config-file=' + self.tmp_file,
'-A', '32')
assert "foo64" in self.autobuild('build', '--config-file=' + self.tmp_file,
'-A', '64')
def test_id(self):
self.config.package_description.platforms[common.get_current_platform()] \
.configurations['Release'].build = echo("foo$AUTOBUILD_BUILD_ID")
self.config.save()
assert "foo666" in self.autobuild('build', '--config-file=' + self.tmp_file,
'-i', '666')
if __name__ == '__main__':
unittest.main()
|
[] |
[] |
[
"PATH"
] |
[]
|
["PATH"]
|
python
| 1 | 0 | |
cmd/all.go
|
/*
Copyright © 2021 NAME HERE <EMAIL ADDRESS>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cmd
import (
ckafka "github.com/confluentinc/confluent-kafka-go/kafka"
"github.com/spf13/cobra"
"github.com/wwwillian/codepix-go/application/grpc"
"github.com/wwwillian/codepix-go/application/kafka"
"github.com/wwwillian/codepix-go/infrastructure/db"
"os"
)
var (
gRPCPortNumber int
)
// allCmd represents the all command
var allCmd = &cobra.Command{
Use: "all",
Short: "Run gRPC and a Kafka Consumer",
Run: func(cmd *cobra.Command, args []string) {
database := db.ConnectDB(os.Getenv("env"))
go grpc.StartGrpcServer(database, portNumber)
producer := kafka.NewKafkaProducer()
deliveryChan := make(chan ckafka.Event)
go kafka.DeliveryReport(deliveryChan)
kafkaProcessor := kafka.NewKafkaProcessor(database, producer, deliveryChan)
kafkaProcessor.Consume()
},
}
func init() {
rootCmd.AddCommand(allCmd)
allCmd.Flags().IntVarP(&gRPCPortNumber, "grpc-port", "p", 500051, "gRPC Port")
// Here you will define your flags and configuration settings.
// Cobra supports Persistent Flags which will work for this command
// and all subcommands, e.g.:
// allCmd.PersistentFlags().String("foo", "", "A help for foo")
// Cobra supports local flags which will only run when this command
// is called directly, e.g.:
// allCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle")
}
|
[
"\"env\""
] |
[] |
[
"env"
] |
[]
|
["env"]
|
go
| 1 | 0 | |
main.go
|
package main
import (
"encoding/xml"
"fmt"
"log"
"math"
"math/rand"
"net/http"
"os"
"runtime"
"sort"
"strconv"
"strings"
"time"
"github.com/PuerkitoBio/goquery"
"github.com/fatih/color"
"github.com/jzelinskie/geddit"
"github.com/skratchdot/open-golang/open"
"github.com/texttheater/golang-levenshtein/levenshtein"
cli "gopkg.in/urfave/cli.v2"
)
// App information and constants
const (
AppName = "hnreader"
AppVersion = "v1.1"
AppAuthor = "Bunchhieng Soth"
AppEmail = "[email protected]"
AppDescription = "Open multiple tech news feeds in your favorite browser through the command line."
HackerNewsURL = "https://news.ycombinator.com/news?p="
LobstersURL = "https://lobste.rs"
DZoneURL = "http://feeds.dzone.com/home"
DevToURL = "https://dev.to/feed"
)
// Supported operating systems (GOOS)
const (
OSDarwin = "darwin"
OSLinux = "linux"
OSWindows = "windows"
)
// Colors for console output
var blue = color.New(color.FgBlue, color.Bold).SprintFunc()
var yellow = color.New(color.FgYellow, color.Bold).SprintFunc()
var red = color.New(color.FgRed, color.Bold).SprintFunc()
// Rss decode RSS xml
type Rss struct {
Item []RssItem `xml:"channel>item"`
}
// RssItem item with link to news
type RssItem struct {
Link string `xml:"link"`
}
type logWriter struct{}
// App contains author information
type App struct {
Name, Version, Email, Description, Author string
}
// Fetcher retrieves stories from a source.
type Fetcher interface {
Fetch(count int) (map[int]string, error)
}
// HackerNewsSource fetches new stories from news.ycombinator.com.
type HackerNewsSource struct{}
// Fetch gets news from the HackerNews
func (hn *HackerNewsSource) Fetch(count int) (map[int]string, error) {
news := make(map[int]string)
// 30 news per page
pages := count / 30
for i := 0; i <= pages; i++ {
resp, err := http.Get(HackerNewsURL + strconv.Itoa(pages))
if err != nil {
handleError(err)
continue
}
doc, err := goquery.NewDocumentFromReader(resp.Body)
if err != nil {
handleError(err)
continue
}
doc.Find("a.storylink").Each(func(i int, s *goquery.Selection) {
href, exist := s.Attr("href")
if !exist {
fmt.Println(red("can't find any stories..."))
}
news[i] = href
})
resp.Body.Close()
}
return news, nil
}
// RedditSource fetches new stories from reddit.com/r/programming.
type RedditSource struct{}
// Fetch gets news from the Reddit
func (rs *RedditSource) Fetch(count int) (map[int]string, error) {
news := make(map[int]string)
s := geddit.NewSession(fmt.Sprintf("desktop:com.github.Bunchhieng.%s:%s", AppName, AppVersion))
subs, err := s.SubredditSubmissions(
"programming",
geddit.HotSubmissions,
geddit.ListingOptions{
Count: count,
Limit: count,
},
)
if err != nil {
return news, err
}
for i, sub := range subs {
news[i] = sub.URL
}
return news, nil
}
// LobstersSource fetches new stories from https://lobste.rs
type LobstersSource struct{}
// Fetch gets news from the Lobsters
func (l *LobstersSource) Fetch(count int) (map[int]string, error) {
offset := float64(count) / float64(25)
pages := int(math.Ceil(offset))
news := make(map[int]string)
newsIndex := 0
for p := 1; p <= pages; p++ {
url := fmt.Sprintf("%s/page/%d", LobstersURL, p)
resp, err := http.Get(url)
if err != nil {
handleError(err)
continue
}
doc, err := goquery.NewDocumentFromReader(resp.Body)
if err != nil {
handleError(err)
continue
}
doc.Find(".link a.u-url").Each(func(_ int, s *goquery.Selection) {
href, exist := s.Attr("href")
if !exist {
fmt.Println(red("can't find any stories..."))
}
if newsIndex >= count {
return
}
// if internal link
if strings.HasPrefix(href, "/") {
href = LobstersURL + href
}
news[newsIndex] = href
newsIndex++
})
resp.Body.Close()
}
return news, nil
}
// DZoneSource fetches latest stories from http://feeds.dzone.com/home
type DZoneSource struct{}
// Fetch gets news from the DZone
func (l *DZoneSource) Fetch(count int) (map[int]string, error) {
news := make(map[int]string)
resp, err := http.Get(DZoneURL)
if err != nil {
return news, err
}
defer resp.Body.Close()
doc := Rss{}
d := xml.NewDecoder(resp.Body)
if err := d.Decode(&doc); err != nil {
return news, err
}
for i, item := range doc.Item {
if i >= count {
break
}
news[i] = item.Link
}
return news, nil
}
// DevToSource fetches latest stories from https://dev.to/
type DevToSource struct{}
// Fetch gets news from the Dev.To
func (l *DevToSource) Fetch(count int) (map[int]string, error) {
news := make(map[int]string)
resp, err := http.Get(DevToURL)
if err != nil {
return news, err
}
defer resp.Body.Close()
doc := Rss{}
d := xml.NewDecoder(resp.Body)
if err := d.Decode(&doc); err != nil {
return news, err
}
for i, item := range doc.Item {
if i >= count {
break
}
news[i] = item.Link
}
return news, nil
}
// Init initializes the app
func Init() *App {
return &App{
Name: AppName,
Version: AppVersion,
Description: AppDescription,
Author: AppAuthor,
Email: AppEmail,
}
}
// Information prints out app information
func (app *App) Information() {
fmt.Println(blue(app.Name) + " - " + blue(app.Version))
fmt.Println(blue(app.Description))
}
func (writer logWriter) Write(bytes []byte) (int, error) {
return fmt.Print(yellow("[") + time.Now().UTC().Format("15:04:05") + yellow("]") + string(bytes))
}
//RunApp opens a browser with input tabs count
func RunApp(tabs int, browser string, src Fetcher) error {
news, err := src.Fetch(tabs)
handleError(err)
browser = findBrowser(browser)
// To store the keys in slice in sorted order
var keys []int
for k := range news {
keys = append(keys, k)
}
// Sort map keys
sort.Ints(keys)
for _, k := range keys {
if k == tabs {
break
}
var err error
if browser == "" {
err = open.Run(news[k])
} else {
err = open.RunWith(news[k], browser)
if err != nil {
fmt.Printf(red("%s is not found on this computer, trying default browser...\n"), browser)
err = open.Run(news[k])
}
}
if err != nil {
os.Exit(1)
}
}
return nil
}
// findBrowser
func findBrowser(target string) string {
if target == "" {
return ""
}
browsers := []string{"google", "chrome", "mozilla", "firefox", "brave"}
shortest := -1
word := ""
for _, browser := range browsers {
distance := levenshtein.DistanceForStrings([]rune(browser), []rune(target), levenshtein.DefaultOptions)
if distance == 0 {
word = browser
break
}
if distance <= shortest || shortest < 0 {
shortest = distance
word = browser
}
}
return getBrowserNameByOS(word, runtime.GOOS)
}
// getGoogleChromeNameForOS
func getGoogleChromeNameForOS(os string) string {
switch os {
case OSDarwin:
return "Google Chrome"
case OSLinux:
return "google-chrome"
case OSWindows:
return "chrome"
}
return ""
}
// getFirefoxNameForOS
func getFirefoxNameForOS(os string) string {
switch os {
case OSDarwin:
return "Firefox"
case OSLinux:
return "firefox"
case OSWindows:
return "firefox"
}
return ""
}
// getBraveNameForOS
func getBraveNameForOS(os string) string {
switch os {
case OSDarwin:
return "Brave"
case OSLinux:
return "brave"
case OSWindows:
return "brave"
}
return ""
}
// getBrowserNameByOS normilizes browser name
func getBrowserNameByOS(browserFromCLI, os string) string {
switch browserFromCLI {
case "google", "chrome":
return getGoogleChromeNameForOS(os)
case "mozilla", "firefox":
return getFirefoxNameForOS(os)
case "brave":
return getBraveNameForOS(os)
}
return ""
}
// checkGoPath checks for GOPATH
func checkGoPath() error {
gopath := os.Getenv("GOPATH")
if gopath == "" {
log.Fatal(red("$GOPATH isn't set up properly..."))
}
return nil
}
// handleError go convention
func handleError(err error) error {
if err != nil {
fmt.Println(red(err.Error()))
}
return nil
}
func init() {
log.SetFlags(0)
log.SetOutput(new(logWriter))
}
// removeIndex removes specific index from the slice
func removeIndex(slice []cli.Flag, s int) []cli.Flag {
return append(slice[:s], slice[s+1:]...)
}
// getAllFlags return all flags for the command line
func getAllFlags(includeSource bool) []cli.Flag {
flags := []cli.Flag{
&cli.UintFlag{
Name: "tabs",
Value: 10,
Aliases: []string{"t"},
Usage: "Specify number of tabs\t",
},
&cli.StringFlag{
Name: "browser",
Value: "",
Aliases: []string{"b"},
Usage: "Specify browser\t",
},
&cli.StringFlag{
Name: "source",
Value: "hn",
Aliases: []string{"s"},
Usage: "Specify news source (one of \"hn\", \"reddit\", \"lobsters\", \"dzone\", \"devto\")\t",
},
}
if !includeSource {
flags = removeIndex(flags, 2)
}
return flags
}
// getAllActions return all action for the command line
func getAllActions(c *cli.Context) error {
var src Fetcher
rand.Seed(time.Now().Unix())
srcName := ""
if c.Command.Name == "random" {
srcName = []string{"hn", "reddit", "lobsters", "dzone"}[rand.Intn(4)]
} else {
srcName = c.String("source")
}
switch srcName {
case "hn":
src = new(HackerNewsSource)
case "reddit":
src = new(RedditSource)
case "lobsters":
src = new(LobstersSource)
case "dzone":
src = new(DZoneSource)
}
return handleError(RunApp(c.Int("tabs"), c.String("browser"), src))
}
func main() {
app := Init()
cli := &cli.App{
Name: app.Name,
Version: app.Version,
Authors: []*cli.Author{
{
Name: app.Author,
Email: app.Email,
},
},
Usage: app.Description,
Commands: []*cli.Command{
{
Name: "run",
Aliases: []string{"r"},
Usage: "Start hnreader with default option (10 news and chrome browser)",
Flags: getAllFlags(true),
Action: getAllActions,
Before: func(c *cli.Context) error {
app.Information()
checkGoPath()
return nil
},
},
{
Name: "random",
Aliases: []string{"rr"},
Usage: "Start hnreader with a randomized source of news",
Flags: getAllFlags(false),
Action: getAllActions,
Before: func(c *cli.Context) error {
app.Information()
checkGoPath()
return nil
},
},
},
}
cli.Run(os.Args)
}
|
[
"\"GOPATH\""
] |
[] |
[
"GOPATH"
] |
[]
|
["GOPATH"]
|
go
| 1 | 0 | |
microservices/facade/src/main.py
|
import os
import bottle
import requests
import yaml
from bottle import request, response, get, put, post, delete
from wsgicors import CORS
from src.logger import get_module_logger
from src.common import validate_json_body
log = get_module_logger(__name__)
log.debug("Starting...")
api_url = os.environ.get("API_URL", "http://backend:9000")
with open("config.yaml") as config_file:
config = yaml.load(config_file, Loader=yaml.FullLoader)
@get("/api/properties/all")
def list_all_properties():
try:
response_json = requests.get("{}/properties".format(api_url)).json()
properties_list = {
"list": [],
"center": [60.2060265, 24.9118616]
}
if response_json.get("isSuccessful") == True:
properties_list["list"] = response_json.get("body", [])
"""
Calculate center coordinate of the list.
The intention is to provide an approximate center point
for displaying the whole list on map component.
Curvature of the Earth wasn't considered.
"""
list_length = len(properties_list["list"])
if list_length:
x = [item.get("position").get("longitude") for item in properties_list["list"]]
y = [item.get("position").get("latitude") for item in properties_list["list"]]
properties_list["center"] = [sum(y) / list_length, sum(x) / list_length]
return properties_list
except Exception as e:
log.error("ERROR: Could not get list - {}".format(e))
response.status = 400
return
@get("/api/properties/<id>")
def get_property_by_id(id):
try:
reponse_json = requests.get("{}/properties/{}".format(api_url, id)).json()
return reponse_json.get("body")
except Exception as e:
log.error("ERROR: Could not get property with ID {} - {}".format(id, e))
response.status = 400
return
@post("/api/properties")
@validate_json_body(schema_filename="schema/create_property_schema.json")
def create_property():
body = request.json
try:
response = requests.post("{}/properties".format(api_url), json=body)
return response
except Exception as e:
log.error("ERROR: Could not create property- {}".format(e))
response.status = 400
return
@put("/api/properties")
@validate_json_body(schema_filename="schema/update_property_schema.json")
def update_property():
body = request.json
try:
response = requests.put("{}/properties".format(api_url), json=body)
return response
except Exception as e:
log.error("ERROR: Could not update property with ID {} - {}".format(id, e))
response.status = 400
return
@delete("/api/properties/<id>")
def delete_property(id):
try:
existing_property = get_property_by_id(id)
if not isinstance(existing_property, str):
response = requests.delete("{}/properties/{}".format(api_url, id))
return response
else:
log.debug("ERROR: Could not delete property - {}".format(existing_property))
except Exception as e:
log.error("ERROR: Could not delete property with ID {} - {}".format(id, e))
response.status = 400
return
corscfg = config.get("cors")
app = CORS(
bottle.app(),
headers=corscfg["headers"],
methods=corscfg["methods"],
origin=corscfg["origin"],
expose_headers=corscfg["expose_headers"],
)
if __name__ == "__main__":
bottle.run(
app=app,
debug=True,
reloader=True,
server="gunicorn",
workers=1
)
|
[] |
[] |
[
"API_URL"
] |
[]
|
["API_URL"]
|
python
| 1 | 0 | |
commands/onboard.go
|
package commands
import (
"bytes"
"encoding/json"
"io"
"net/http"
"net/url"
"os"
"github.com/RTradeLtd/ca-certificates/authority"
"github.com/RTradeLtd/ca-certificates/ca"
"github.com/RTradeLtd/ca-certificates/pki"
"github.com/RTradeLtd/ca-cli/command"
"github.com/RTradeLtd/ca-cli/crypto/randutil"
"github.com/RTradeLtd/ca-cli/errs"
"github.com/RTradeLtd/ca-cli/ui"
"github.com/RTradeLtd/ca-cli/utils"
"github.com/pkg/errors"
"github.com/urfave/cli"
)
// defaultOnboardingURL is the production onboarding url, to use a development
// url use:
// export STEP_CA_ONBOARDING_URL=http://localhost:3002/onboarding/
const defaultOnboardingURL = "https://api.smallstep.com/onboarding/"
type onboardingConfiguration struct {
Name string `json:"name"`
DNS string `json:"dns"`
Address string `json:"address"`
password []byte
}
type onboardingPayload struct {
Fingerprint string `json:"fingerprint"`
}
type onboardingError struct {
StatusCode int `json:"statusCode"`
Message string `json:"message"`
}
func (e onboardingError) Error() string {
return e.Message
}
func init() {
command.Register(cli.Command{
Name: "onboard",
Usage: "configure and run step-ca from the onboarding guide",
UsageText: "**step-ca onboard** <token>",
Action: onboardAction,
Description: `**step-ca onboard** configures step certificates using the onboarding guide.
Open https://smallstep.com/onboarding in your browser and start the CA with the
given token:
'''
$ step-ca onboard <token>
'''
## POSITIONAL ARGUMENTS
<token>
: The token string provided by the onboarding guide.`,
})
}
func onboardAction(ctx *cli.Context) error {
if ctx.NArg() == 0 {
return cli.ShowCommandHelp(ctx, "onboard")
}
if err := errs.NumberOfArguments(ctx, 1); err != nil {
return err
}
// Get onboarding url
onboarding := defaultOnboardingURL
if v := os.Getenv("STEP_CA_ONBOARDING_URL"); v != "" {
onboarding = v
}
u, err := url.Parse(onboarding)
if err != nil {
return errors.Wrapf(err, "error parsing %s", onboarding)
}
ui.Println("Connecting to onboarding guide...")
token := ctx.Args().Get(0)
onboardingURL := u.ResolveReference(&url.URL{Path: token}).String()
res, err := http.Get(onboardingURL)
if err != nil {
return errors.Wrap(err, "error connecting onboarding guide")
}
if res.StatusCode >= 400 {
var msg onboardingError
if err := readJSON(res.Body, &msg); err != nil {
return errors.Wrap(err, "error unmarshaling response")
}
return errors.Wrap(msg, "error receiving onboarding guide")
}
var config onboardingConfiguration
if err := readJSON(res.Body, &config); err != nil {
return errors.Wrap(err, "error unmarshaling response")
}
password, err := randutil.ASCII(32)
if err != nil {
return err
}
config.password = []byte(password)
ui.Println("Initializing step-ca with the following configuration:")
ui.PrintSelected("Name", config.Name)
ui.PrintSelected("DNS", config.DNS)
ui.PrintSelected("Address", config.Address)
ui.PrintSelected("Password", password)
ui.Println()
caConfig, fp, err := onboardPKI(config)
if err != nil {
return err
}
payload, err := json.Marshal(onboardingPayload{Fingerprint: fp})
if err != nil {
return errors.Wrap(err, "error marshaling payload")
}
resp, err := http.Post(onboardingURL, "application/json", bytes.NewBuffer(payload))
if err != nil {
return errors.Wrap(err, "error connecting onboarding guide")
}
if resp.StatusCode >= 400 {
var msg onboardingError
if err := readJSON(resp.Body, &msg); err != nil {
ui.Printf("%s {{ \"error unmarshalling response: %v\" | yellow }}\n", ui.IconWarn, err)
} else {
ui.Printf("%s {{ \"error posting fingerprint: %s\" | yellow }}\n", ui.IconWarn, msg.Message)
}
} else {
resp.Body.Close()
}
ui.Println("Initialized!")
ui.Println("Step CA is starting. Please return to the onboarding guide in your browser to continue.")
srv, err := ca.New(caConfig, ca.WithPassword(config.password))
if err != nil {
fatal(err)
}
go ca.StopReloaderHandler(srv)
if err = srv.Run(); err != nil && err != http.ErrServerClosed {
fatal(err)
}
return nil
}
func onboardPKI(config onboardingConfiguration) (*authority.Config, string, error) {
p, err := pki.New(pki.GetPublicPath(), pki.GetSecretsPath(), pki.GetConfigPath())
if err != nil {
return nil, "", err
}
p.SetAddress(config.Address)
p.SetDNSNames([]string{config.DNS})
ui.Println("Generating root certificate...")
rootCrt, rootKey, err := p.GenerateRootCertificate(config.Name+" Root CA", config.password)
if err != nil {
return nil, "", err
}
ui.Println("Generating intermediate certificate...")
err = p.GenerateIntermediateCertificate(config.Name+" Intermediate CA", rootCrt, rootKey, config.password)
if err != nil {
return nil, "", err
}
// Generate provisioner
p.SetProvisioner("admin")
ui.Println("Generating admin provisioner...")
if err = p.GenerateKeyPairs(config.password); err != nil {
return nil, "", err
}
// Generate and write configuration
caConfig, err := p.GenerateConfig()
if err != nil {
return nil, "", err
}
b, err := json.MarshalIndent(caConfig, "", " ")
if err != nil {
return nil, "", errors.Wrapf(err, "error marshaling %s", p.GetCAConfigPath())
}
if err = utils.WriteFile(p.GetCAConfigPath(), b, 0666); err != nil {
return nil, "", errs.FileError(err, p.GetCAConfigPath())
}
return caConfig, p.GetRootFingerprint(), nil
}
func readJSON(r io.ReadCloser, v interface{}) error {
defer r.Close()
return json.NewDecoder(r).Decode(v)
}
|
[
"\"STEP_CA_ONBOARDING_URL\""
] |
[] |
[
"STEP_CA_ONBOARDING_URL"
] |
[]
|
["STEP_CA_ONBOARDING_URL"]
|
go
| 1 | 0 | |
aiosmtpd/controller.py
|
# Copyright 2014-2021 The aiosmtpd Developers
# SPDX-License-Identifier: Apache-2.0
import asyncio
import errno
import os
import ssl
import sys
import threading
import time
from abc import ABCMeta, abstractmethod
from contextlib import ExitStack
from pathlib import Path
from socket import AF_INET6, SOCK_STREAM, create_connection, has_ipv6
from socket import socket as makesock
from socket import timeout as socket_timeout
try:
from socket import AF_UNIX
except ImportError: # pragma: on-not-win32
AF_UNIX = None
from typing import Any, Coroutine, Dict, Optional, Union
if sys.version_info >= (3, 8):
from typing import Literal # pragma: py-lt-38
else: # pragma: py-ge-38
from typing_extensions import Literal
from warnings import warn
from public import public
from aiosmtpd.smtp import SMTP
AsyncServer = asyncio.base_events.Server
DEFAULT_READY_TIMEOUT: float = 5.0
@public
class IP6_IS:
# Apparently errno.E* constants adapts to the OS, so on Windows they will
# automatically use the WSAE* constants
NO = {errno.EADDRNOTAVAIL, errno.EAFNOSUPPORT}
YES = {errno.EADDRINUSE}
def _has_ipv6() -> bool:
# Helper function to assist in mocking
return has_ipv6
@public
def get_localhost() -> Literal["::1", "127.0.0.1"]:
"""Returns numeric address to localhost depending on IPv6 availability"""
# Ref:
# - https://github.com/urllib3/urllib3/pull/611#issuecomment-100954017
# - https://github.com/python/cpython/blob/ :
# - v3.6.13/Lib/test/support/__init__.py#L745-L758
# - v3.9.1/Lib/test/support/socket_helper.py#L124-L137
if not _has_ipv6():
# socket.has_ipv6 only tells us of current Python's IPv6 support, not the
# system's. But if the current Python does not support IPv6, it's pointless to
# explore further.
return "127.0.0.1"
try:
with makesock(AF_INET6, SOCK_STREAM) as sock:
sock.bind(("::1", 0))
# If we reach this point, that means we can successfully bind ::1 (on random
# unused port), so IPv6 is definitely supported
return "::1"
except OSError as e:
if e.errno in IP6_IS.NO:
return "127.0.0.1"
if e.errno in IP6_IS.YES:
# We shouldn't ever get these errors, but if we do, that means IPv6 is
# supported
return "::1"
# Other kinds of errors MUST be raised so we can inspect
raise
class _FakeServer(asyncio.StreamReaderProtocol):
"""
Returned by _factory_invoker() in lieu of an SMTP instance in case
factory() failed to instantiate an SMTP instance.
"""
def __init__(self, loop: asyncio.AbstractEventLoop):
# Imitate what SMTP does
super().__init__(
asyncio.StreamReader(loop=loop),
client_connected_cb=self._client_connected_cb,
loop=loop,
)
def _client_connected_cb(
self, reader: asyncio.StreamReader, writer: asyncio.StreamWriter
) -> None:
pass
@public
class BaseController(metaclass=ABCMeta):
smtpd = None
server: Optional[AsyncServer] = None
server_coro: Optional[Coroutine] = None
_factory_invoked: threading.Event = None
def __init__(
self,
handler: Any,
loop: asyncio.AbstractEventLoop = None,
*,
ssl_context: Optional[ssl.SSLContext] = None,
# SMTP parameters
server_hostname: Optional[str] = None,
**SMTP_parameters,
):
self.handler = handler
if loop is None:
self.loop = asyncio.new_event_loop()
else:
self.loop = loop
self.ssl_context = ssl_context
self.SMTP_kwargs: Dict[str, Any] = {}
if "server_kwargs" in SMTP_parameters:
warn(
"server_kwargs will be removed in version 2.0. "
"Just specify the keyword arguments to forward to SMTP "
"as kwargs to this __init__ method.",
DeprecationWarning,
)
self.SMTP_kwargs = SMTP_parameters.pop("server_kwargs")
self.SMTP_kwargs.update(SMTP_parameters)
if server_hostname:
self.SMTP_kwargs["hostname"] = server_hostname
# Emulate previous behavior of defaulting enable_SMTPUTF8 to True
# It actually conflicts with SMTP class's default, but the reasoning is
# discussed in the docs.
self.SMTP_kwargs.setdefault("enable_SMTPUTF8", True)
#
self._factory_invoked = threading.Event()
def factory(self):
"""Subclasses can override this to customize the handler/server creation."""
return SMTP(self.handler, **self.SMTP_kwargs)
def _factory_invoker(self) -> Union[SMTP, _FakeServer]:
"""Wraps factory() to catch exceptions during instantiation"""
try:
self.smtpd = self.factory()
if self.smtpd is None:
raise RuntimeError("factory() returned None")
return self.smtpd
except Exception as err:
self._thread_exception = err
return _FakeServer(self.loop)
finally:
self._factory_invoked.set()
@abstractmethod
def _create_server(self) -> Coroutine:
"""
Overridden by subclasses to actually perform the async binding to the
listener endpoint. When overridden, MUST refer the _factory_invoker() method.
"""
raise NotImplementedError
def _cleanup(self):
"""Reset internal variables to prevent contamination"""
self._thread_exception = None
self._factory_invoked.clear()
self.server_coro = None
self.server = None
self.smtpd = None
def cancel_tasks(self, stop_loop: bool = True):
"""
Convenience method to stop the loop and cancel all tasks.
Use loop.call_soon_threadsafe() to invoke this.
"""
if stop_loop: # pragma: nobranch
self.loop.stop()
try:
_all_tasks = asyncio.all_tasks # pytype: disable=module-attr
except AttributeError: # pragma: py-gt-36
_all_tasks = asyncio.Task.all_tasks
for task in _all_tasks(self.loop):
# This needs to be invoked in a thread-safe way
task.cancel()
@public
class BaseThreadedController(BaseController, metaclass=ABCMeta):
_thread: Optional[threading.Thread] = None
_thread_exception: Optional[Exception] = None
def __init__(
self,
handler: Any,
loop: asyncio.AbstractEventLoop = None,
*,
ready_timeout: float = DEFAULT_READY_TIMEOUT,
ssl_context: Optional[ssl.SSLContext] = None,
# SMTP parameters
server_hostname: Optional[str] = None,
**SMTP_parameters,
):
super().__init__(
handler,
loop,
ssl_context=ssl_context,
server_hostname=server_hostname,
**SMTP_parameters,
)
self.ready_timeout = float(
os.getenv("AIOSMTPD_CONTROLLER_TIMEOUT", ready_timeout)
)
@abstractmethod
def _trigger_server(self):
"""
Overridden by subclasses to trigger asyncio to actually initialize the SMTP
class (it's lazy initialization, done only on initial connection).
"""
raise NotImplementedError
def _run(self, ready_event: threading.Event) -> None:
asyncio.set_event_loop(self.loop)
try:
# Need to do two-step assignments here to ensure IDEs can properly
# detect the types of the vars. Cannot use `assert isinstance`, because
# Python 3.6 in asyncio debug mode has a bug wherein CoroWrapper is not
# an instance of Coroutine
self.server_coro = self._create_server()
srv: AsyncServer = self.loop.run_until_complete(self.server_coro)
self.server = srv
except Exception as error: # pragma: on-wsl
# Usually will enter this part only if create_server() cannot bind to the
# specified host:port.
#
# Somehow WSL 1.0 (Windows Subsystem for Linux) allows multiple
# listeners on one port?!
# That is why we add "pragma: on-wsl" there, so this block will not affect
# coverage on WSL 1.0.
self._thread_exception = error
return
self.loop.call_soon(ready_event.set)
self.loop.run_forever()
# We reach this point when loop is ended (by external code)
# Perform some stoppages to ensure endpoint no longer bound.
self.server.close()
self.loop.run_until_complete(self.server.wait_closed())
self.loop.close()
self.server = None
def start(self):
"""
Start a thread and run the asyncio event loop in that thread
"""
assert self._thread is None, "SMTP daemon already running"
self._factory_invoked.clear()
ready_event = threading.Event()
self._thread = threading.Thread(target=self._run, args=(ready_event,))
self._thread.daemon = True
self._thread.start()
# Wait a while until the server is responding.
start = time.monotonic()
if not ready_event.wait(self.ready_timeout):
# An exception within self._run will also result in ready_event not set
# So, we first test for that, before raising TimeoutError
if self._thread_exception is not None: # pragma: on-wsl
# See comment about WSL1.0 in the _run() method
raise self._thread_exception
else:
raise TimeoutError(
"SMTP server failed to start within allotted time. "
"This might happen if the system is too busy. "
"Try increasing the `ready_timeout` parameter."
)
respond_timeout = self.ready_timeout - (time.monotonic() - start)
# Apparently create_server invokes factory() "lazily", so exceptions in
# factory() go undetected. To trigger factory() invocation we need to open
# a connection to the server and 'exchange' some traffic.
try:
self._trigger_server()
except socket_timeout:
# We totally don't care of timeout experienced by _testconn,
pass
except Exception:
# Raise other exceptions though
raise
if not self._factory_invoked.wait(respond_timeout):
raise TimeoutError(
"SMTP server started, but not responding within allotted time. "
"This might happen if the system is too busy. "
"Try increasing the `ready_timeout` parameter."
)
if self._thread_exception is not None:
raise self._thread_exception
# Defensive
if self.smtpd is None:
raise RuntimeError("Unknown Error, failed to init SMTP server")
def stop(self, no_assert: bool = False):
"""
Stop the loop, the tasks in the loop, and terminate the thread as well.
"""
assert no_assert or self._thread is not None, "SMTP daemon not running"
self.loop.call_soon_threadsafe(self.cancel_tasks)
if self._thread is not None:
self._thread.join()
self._thread = None
self._cleanup()
@public
class BaseUnthreadedController(BaseController, metaclass=ABCMeta):
def __init__(
self,
handler: Any,
loop: asyncio.AbstractEventLoop = None,
*,
ssl_context: Optional[ssl.SSLContext] = None,
# SMTP parameters
server_hostname: Optional[str] = None,
**SMTP_parameters,
):
super().__init__(
handler,
loop,
ssl_context=ssl_context,
server_hostname=server_hostname,
**SMTP_parameters,
)
self.ended = threading.Event()
def begin(self):
"""
Sets up the asyncio server task and inject it into the asyncio event loop.
Does NOT actually start the event loop itself.
"""
asyncio.set_event_loop(self.loop)
# Need to do two-step assignments here to ensure IDEs can properly
# detect the types of the vars. Cannot use `assert isinstance`, because
# Python 3.6 in asyncio debug mode has a bug wherein CoroWrapper is not
# an instance of Coroutine
self.server_coro = self._create_server()
srv: AsyncServer = self.loop.run_until_complete(self.server_coro)
self.server = srv
async def finalize(self):
"""
Perform orderly closing of the server listener.
NOTE: This is an async method; await this from an async or use
loop.create_task() (if loop is still running), or
loop.run_until_complete() (if loop has stopped)
"""
self.ended.clear()
server = self.server
server.close()
await server.wait_closed()
self.server_coro.close()
self._cleanup()
self.ended.set()
def end(self):
"""
Convenience method to asynchronously invoke finalize().
Consider using loop.call_soon_threadsafe to invoke this method, especially
if your loop is running in a different thread. You can afterwards .wait() on
ended attribute (a threading.Event) to check for completion, if needed.
"""
self.ended.clear()
if self.loop.is_running():
self.loop.create_task(self.finalize())
else:
self.loop.run_until_complete(self.finalize())
@public
class InetMixin(BaseController, metaclass=ABCMeta):
def __init__(
self,
handler: Any,
hostname: Optional[str] = None,
port: int = 8025,
loop: asyncio.AbstractEventLoop = None,
**kwargs,
):
super().__init__(
handler,
loop,
**kwargs,
)
self._localhost = get_localhost()
self.hostname = self._localhost if hostname is None else hostname
self.port = port
def _create_server(self) -> Coroutine:
"""
Creates a 'server task' that listens on an INET host:port.
Does NOT actually start the protocol object itself;
_factory_invoker() is only called upon fist connection attempt.
"""
return self.loop.create_server(
self._factory_invoker,
host=self.hostname,
port=self.port,
ssl=self.ssl_context,
)
def _trigger_server(self):
"""
Opens a socket connection to the newly launched server, wrapping in an SSL
Context if necessary, and read some data from it to ensure that factory()
gets invoked.
"""
# At this point, if self.hostname is Falsy, it most likely is "" (bind to all
# addresses). In such case, it should be safe to connect to localhost)
hostname = self.hostname or self._localhost
with ExitStack() as stk:
s = stk.enter_context(create_connection((hostname, self.port), 1.0))
if self.ssl_context:
s = stk.enter_context(self.ssl_context.wrap_socket(s))
s.recv(1024)
@public
class UnixSocketMixin(BaseController, metaclass=ABCMeta): # pragma: no-unixsock
def __init__(
self,
handler: Any,
unix_socket: Union[str, Path],
loop: asyncio.AbstractEventLoop = None,
**kwargs,
):
super().__init__(
handler,
loop,
**kwargs,
)
self.unix_socket = str(unix_socket)
def _create_server(self) -> Coroutine:
"""
Creates a 'server task' that listens on a Unix Socket file.
Does NOT actually start the protocol object itself;
_factory_invoker() is only called upon fist connection attempt.
"""
return self.loop.create_unix_server(
self._factory_invoker,
path=self.unix_socket,
ssl=self.ssl_context,
)
def _trigger_server(self):
"""
Opens a socket connection to the newly launched server, wrapping in an SSL
Context if necessary, and read some data from it to ensure that factory()
gets invoked.
"""
with ExitStack() as stk:
s: makesock = stk.enter_context(makesock(AF_UNIX, SOCK_STREAM))
s.connect(self.unix_socket)
if self.ssl_context:
s = stk.enter_context(self.ssl_context.wrap_socket(s))
s.recv(1024)
@public
class Controller(InetMixin, BaseThreadedController):
"""Provides a multithreaded controller that listens on an INET endpoint"""
def _trigger_server(self):
# Prevent confusion on which _trigger_server() to invoke.
# Or so LGTM.com claimed
InetMixin._trigger_server(self)
@public
class UnixSocketController( # pragma: no-unixsock
UnixSocketMixin, BaseThreadedController
):
"""Provides a multithreaded controller that listens on a Unix Socket file"""
def _trigger_server(self): # pragma: no-unixsock
# Prevent confusion on which _trigger_server() to invoke.
# Or so LGTM.com claimed
UnixSocketMixin._trigger_server(self)
@public
class UnthreadedController(InetMixin, BaseUnthreadedController):
"""Provides an unthreaded controller that listens on an INET endpoint"""
pass
@public
class UnixSocketUnthreadedController( # pragma: no-unixsock
UnixSocketMixin, BaseUnthreadedController
):
"""Provides an unthreaded controller that listens on a Unix Socket file"""
pass
|
[] |
[] |
[
"AIOSMTPD_CONTROLLER_TIMEOUT"
] |
[]
|
["AIOSMTPD_CONTROLLER_TIMEOUT"]
|
python
| 1 | 0 | |
databricks/koala/__init__.py
|
#
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .utils import *
from .namespace import *
from .typing import Col, pandas_wrap
__all__ = ['patch_spark', 'read_csv', 'Col', 'pandas_wrap']
def _auto_patch():
import os
import logging
# Autopatching is on by default.
x = os.getenv("SPARK_PANDAS_AUTOPATCH", "true")
if x.lower() in ("true", "1", "enabled"):
logger = logging.getLogger('spark')
logger.info("Patching spark automatically. You can disable it by setting "
"SPARK_PANDAS_AUTOPATCH=false in your environment")
patch_spark()
_auto_patch()
|
[] |
[] |
[
"SPARK_PANDAS_AUTOPATCH"
] |
[]
|
["SPARK_PANDAS_AUTOPATCH"]
|
python
| 1 | 0 | |
cmd/root.go
|
// Copyright © 2018 Ken'ichiro Oyama <[email protected]>
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package cmd
import (
"fmt"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
sortpkg "sort"
"strconv"
"strings"
"github.com/k1LoW/tbls/cmdutil"
"github.com/k1LoW/tbls/config"
"github.com/k1LoW/tbls/datasource"
"github.com/k1LoW/tbls/output/json"
"github.com/k1LoW/tbls/version"
"github.com/spf13/cobra"
)
// adjust is a flag on whethre to adjust the notation width of the table
var adjust bool
// force is a flag on whether to force genarate
var force bool
// sort is a flag on whether to sort tables, columns, and more
var sort bool
// configPath is a config file path
var configPath string
// erFormat is a option that ER diagram file format
var erFormat string
// when is a option that command execute condition
var when string
var baseUrl string
const rootUsageTemplate = `Usage:{{if .Runnable}}{{if ne .UseLine "tbls [flags]" }}
{{.UseLine}}{{end}}{{end}}{{if .HasAvailableSubCommands}}
{{.CommandPath}} [command]{{end}}{{if gt (len .Aliases) 0}}
Aliases:
{{.NameAndAliases}}{{end}}{{if .HasExample}}
Examples:
{{.Example}}{{end}}{{if .HasAvailableSubCommands}}
Available Commands:{{range .Commands}}{{if (or .IsAvailableCommand (eq .Name "help"))}}
{{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{if .HasAvailableLocalFlags}}
Flags:
{{.LocalFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasAvailableInheritedFlags}}
Global Flags:
{{.InheritedFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasHelpSubCommands}}
Additional help topics:{{range .Commands}}{{if .IsAdditionalHelpTopicCommand}}
{{rpad .CommandPath .CommandPathPadding}} {{.Short}}{{end}}{{end}}{{end}}{{if .HasAvailableSubCommands}}
Use "{{.CommandPath}} [command] --help" for more information about a command.{{end}}
`
var subCmds = []string{}
// rootCmd represents the base command when called without any subcommands
var rootCmd = &cobra.Command{
Use: "tbls",
Short: "tbls is a CI-Friendly tool for document a database, written in Go.",
Long: `tbls is a CI-Friendly tool for document a database, written in Go.`,
SilenceErrors: true,
SilenceUsage: true,
Args: cobra.ArbitraryArgs,
DisableFlagParsing: true,
ValidArgsFunction: genValidArgsFunc("tbls"),
RunE: func(cmd *cobra.Command, args []string) error {
configPath, args := cmdutil.PickOption(args, []string{"-c", "--config"})
when, args := cmdutil.PickOption(args, []string{"--when"})
if allow, err := cmdutil.IsAllowedToExecute(when); !allow || err != nil {
if err != nil {
return err
}
return nil
}
if len(args) == 0 {
cmd.Println(cmd.UsageString())
return nil
}
envs := os.Environ()
subCmd := args[0]
path, err := exec.LookPath(version.Name + "-" + subCmd)
if err != nil {
if strings.HasPrefix(subCmd, "-") {
cmd.PrintErrf("Error: unknown flag: '%s'\n", subCmd)
cmd.HelpFunc()(cmd, args)
return nil
}
cmd.PrintErrf("Error: unknown command \"%s\" for \"%s\"\n", subCmd, version.Name)
cmd.PrintErrf("Run '%s --help' for usage.\n", version.Name)
return nil
}
args = args[1:]
cfg, err := config.New()
if err != nil {
return err
}
err = cfg.Load(configPath)
if err != nil {
return err
}
if cfg.DSN.URL != "" {
s, err := datasource.Analyze(cfg.DSN)
if err != nil {
return err
}
if err := cfg.ModifySchema(s); err != nil {
return err
}
envs = append(envs, fmt.Sprintf("TBLS_DSN=%s", cfg.DSN.URL))
envs = append(envs, fmt.Sprintf("TBLS_CONFIG_PATH=%s", cfg.Path))
o := json.New(true)
tmpfile, err := ioutil.TempFile("", "TBLS_SCHEMA")
if err != nil {
return err
}
defer os.Remove(tmpfile.Name())
if err := o.OutputSchema(tmpfile, s); err != nil {
return err
}
envs = append(envs, fmt.Sprintf("TBLS_SCHEMA=%s", tmpfile.Name()))
}
c := exec.Command(path, args...) // #nosec
c.Env = envs
c.Stdout = os.Stdout
c.Stdin = os.Stdin
c.Stderr = os.Stderr
if err := c.Run(); err != nil {
return err
}
return nil
},
}
func Execute() {
var err error
subCmds, err = getExtSubCmds("tbls")
if err != nil {
printError(err)
os.Exit(1)
}
if err := rootCmd.Execute(); err != nil {
printError(err)
os.Exit(1)
}
}
func init() {
rootCmd.SetUsageTemplate(rootUsageTemplate)
rootCmd.Flags().StringVarP(&when, "when", "", "", "command execute condition")
rootCmd.Flags().StringVarP(&configPath, "config", "c", "", "config file path")
}
// genValidArgsFunc
func genValidArgsFunc(prefix string) func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
toC := toComplete
if len(args) > 0 {
toC = args[0]
}
completions := []string{}
for _, subCmd := range subCmds {
trimed := strings.TrimPrefix(subCmd, fmt.Sprintf("%s-", prefix))
switch {
case len(args) == 0 && toComplete == "":
completions = append(completions, fmt.Sprintf("%s\t%s", trimed, subCmd))
case trimed == toC && len(args) > 0:
// exec external sub-command "__complete"
subCmdArgs := []string{"__complete"}
subCmdArgs = append(subCmdArgs, args[1:]...)
subCmdArgs = append(subCmdArgs, toComplete)
out, err := exec.Command(subCmd, subCmdArgs...).Output() // #nosec
if err != nil {
return []string{}, cobra.ShellCompDirectiveError
}
splited := strings.Split(strings.TrimRight(string(out), "\n"), "\n")
completions = append(completions, splited[:len(splited)-1]...)
case trimed != strings.TrimPrefix(trimed, toC):
completions = append(completions, fmt.Sprintf("%s\t%s", trimed, subCmd))
}
}
return completions, cobra.ShellCompDirectiveNoFileComp
}
}
// getExtSubCmds
func getExtSubCmds(prefix string) ([]string, error) {
subCmds := []string{}
paths := unique(filepath.SplitList(os.Getenv("PATH")))
for _, p := range paths {
if strings.TrimSpace(p) == "" {
continue
}
files, err := ioutil.ReadDir(p)
if err != nil {
continue
}
for _, f := range files {
if f.IsDir() {
continue
}
if !strings.HasPrefix(f.Name(), fmt.Sprintf("%s-", prefix)) {
continue
}
mode := f.Mode()
if mode&0111 == 0 {
continue
}
subCmds = append(subCmds, f.Name())
}
}
sortpkg.Strings(subCmds)
return unique(subCmds), nil
}
func printError(err error) {
env := os.Getenv("DEBUG")
debug, _ := strconv.ParseBool(env)
if env != "" && debug {
fmt.Printf("%+v\n", err)
} else {
fmt.Println(err)
}
}
func unique(paths []string) []string {
exist := map[string]bool{}
np := []string{}
for _, p := range paths {
if exist[p] {
continue
}
exist[p] = true
np = append(np, p)
}
return np
}
|
[
"\"PATH\"",
"\"DEBUG\""
] |
[] |
[
"PATH",
"DEBUG"
] |
[]
|
["PATH", "DEBUG"]
|
go
| 2 | 0 | |
main_test.go
|
package main
import (
"testing"
"flag"
"fmt"
"os"
"path/filepath"
"./config"
)
func TestDr(t *testing.T){
currentDir, err := filepath.Abs(filepath.Dir(os.Args[0]))
if err != nil {
panic(err)
}
configDir := flag.String("config_dir", currentDir, "find your configuration file location")
if err := config.InitConfig(*configDir); err != nil {
panic(err)
}
host := os.Getenv("HOST")
tokenExp := os.Getenv("TOKEN_EXPIRED_MINUTES")
fmt.Println("app running on " + host)
fmt.Println("app running on " + tokenExp)
}
|
[
"\"HOST\"",
"\"TOKEN_EXPIRED_MINUTES\""
] |
[] |
[
"HOST",
"TOKEN_EXPIRED_MINUTES"
] |
[]
|
["HOST", "TOKEN_EXPIRED_MINUTES"]
|
go
| 2 | 0 | |
xds/src/main/java/io/grpc/xds/ClusterImplLoadBalancer.java
|
/*
* Copyright 2020 The gRPC Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.grpc.xds;
import static com.google.common.base.Preconditions.checkNotNull;
import static io.grpc.xds.XdsSubchannelPickers.BUFFER_PICKER;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.MoreObjects;
import com.google.common.base.Strings;
import io.grpc.Attributes;
import io.grpc.ClientStreamTracer;
import io.grpc.ClientStreamTracer.StreamInfo;
import io.grpc.ConnectivityState;
import io.grpc.EquivalentAddressGroup;
import io.grpc.InternalLogId;
import io.grpc.LoadBalancer;
import io.grpc.Metadata;
import io.grpc.Status;
import io.grpc.internal.ForwardingClientStreamTracer;
import io.grpc.internal.ObjectPool;
import io.grpc.util.ForwardingLoadBalancerHelper;
import io.grpc.util.ForwardingSubchannel;
import io.grpc.xds.ClusterImplLoadBalancerProvider.ClusterImplConfig;
import io.grpc.xds.Endpoints.DropOverload;
import io.grpc.xds.EnvoyServerProtoData.UpstreamTlsContext;
import io.grpc.xds.LoadStatsManager2.ClusterDropStats;
import io.grpc.xds.LoadStatsManager2.ClusterLocalityStats;
import io.grpc.xds.ThreadSafeRandom.ThreadSafeRandomImpl;
import io.grpc.xds.XdsLogger.XdsLogLevel;
import io.grpc.xds.XdsNameResolverProvider.CallCounterProvider;
import io.grpc.xds.XdsSubchannelPickers.ErrorPicker;
import io.grpc.xds.internal.sds.SslContextProviderSupplier;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Objects;
import java.util.concurrent.atomic.AtomicLong;
import javax.annotation.Nullable;
/**
* Load balancer for cluster_impl_experimental LB policy. This LB policy is the child LB policy of
* the priority_experimental LB policy and the parent LB policy of the weighted_target_experimental
* LB policy in the xDS load balancing hierarchy. This LB policy applies cluster-level
* configurations to requests sent to the corresponding cluster, such as drop policies, circuit
* breakers.
*/
final class ClusterImplLoadBalancer extends LoadBalancer {
@VisibleForTesting
static final long DEFAULT_PER_CLUSTER_MAX_CONCURRENT_REQUESTS = 1024L;
@VisibleForTesting
static boolean enableCircuitBreaking =
Strings.isNullOrEmpty(System.getenv("GRPC_XDS_EXPERIMENTAL_CIRCUIT_BREAKING"))
|| Boolean.parseBoolean(System.getenv("GRPC_XDS_EXPERIMENTAL_CIRCUIT_BREAKING"));
@VisibleForTesting
static boolean enableSecurity =
Strings.isNullOrEmpty(System.getenv("GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT"))
|| Boolean.parseBoolean(System.getenv("GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT"));
private static final Attributes.Key<ClusterLocalityStats> ATTR_CLUSTER_LOCALITY_STATS =
Attributes.Key.create("io.grpc.xds.ClusterImplLoadBalancer.clusterLocalityStats");
private final XdsLogger logger;
private final Helper helper;
private final ThreadSafeRandom random;
// The following fields are effectively final.
private String cluster;
@Nullable
private String edsServiceName;
private ObjectPool<XdsClient> xdsClientPool;
private XdsClient xdsClient;
private CallCounterProvider callCounterProvider;
private ClusterDropStats dropStats;
private ClusterImplLbHelper childLbHelper;
private LoadBalancer childLb;
ClusterImplLoadBalancer(Helper helper) {
this(helper, ThreadSafeRandomImpl.instance);
}
ClusterImplLoadBalancer(Helper helper, ThreadSafeRandom random) {
this.helper = checkNotNull(helper, "helper");
this.random = checkNotNull(random, "random");
InternalLogId logId = InternalLogId.allocate("cluster-impl-lb", helper.getAuthority());
logger = XdsLogger.withLogId(logId);
logger.log(XdsLogLevel.INFO, "Created");
}
@Override
public void handleResolvedAddresses(ResolvedAddresses resolvedAddresses) {
logger.log(XdsLogLevel.DEBUG, "Received resolution result: {0}", resolvedAddresses);
Attributes attributes = resolvedAddresses.getAttributes();
if (xdsClientPool == null) {
xdsClientPool = attributes.get(InternalXdsAttributes.XDS_CLIENT_POOL);
xdsClient = xdsClientPool.getObject();
}
if (callCounterProvider == null) {
callCounterProvider = attributes.get(InternalXdsAttributes.CALL_COUNTER_PROVIDER);
}
ClusterImplConfig config =
(ClusterImplConfig) resolvedAddresses.getLoadBalancingPolicyConfig();
if (cluster == null) {
cluster = config.cluster;
edsServiceName = config.edsServiceName;
childLbHelper = new ClusterImplLbHelper(
callCounterProvider.getOrCreate(config.cluster, config.edsServiceName));
childLb = config.childPolicy.getProvider().newLoadBalancer(childLbHelper);
// Assume load report server does not change throughout cluster lifetime.
if (config.lrsServerName != null) {
if (config.lrsServerName.isEmpty()) {
dropStats = xdsClient.addClusterDropStats(cluster, edsServiceName);
} else {
logger.log(XdsLogLevel.WARNING, "Cluster {0} config error: can only report load "
+ "to the same management server. Config lrsServerName {1} should be empty. ",
cluster, config.lrsServerName);
}
}
}
childLbHelper.updateDropPolicies(config.dropCategories);
childLbHelper.updateMaxConcurrentRequests(config.maxConcurrentRequests);
childLbHelper.updateSslContextProviderSupplier(config.tlsContext);
childLb.handleResolvedAddresses(
resolvedAddresses.toBuilder()
.setAttributes(attributes)
.setLoadBalancingPolicyConfig(config.childPolicy.getConfig())
.build());
}
@Override
public void handleNameResolutionError(Status error) {
if (childLb != null) {
childLb.handleNameResolutionError(error);
} else {
helper.updateBalancingState(ConnectivityState.TRANSIENT_FAILURE, new ErrorPicker(error));
}
}
@Override
public void shutdown() {
if (dropStats != null) {
dropStats.release();
}
if (childLb != null) {
childLb.shutdown();
if (childLbHelper != null) {
childLbHelper.updateSslContextProviderSupplier(null);
childLbHelper = null;
}
}
if (xdsClient != null) {
xdsClient = xdsClientPool.returnObject(xdsClient);
}
}
@Override
public boolean canHandleEmptyAddressListFromNameResolution() {
return true;
}
/**
* A decorated {@link LoadBalancer.Helper} that applies configurations for connections
* or requests to endpoints in the cluster.
*/
private final class ClusterImplLbHelper extends ForwardingLoadBalancerHelper {
private final AtomicLong inFlights;
private ConnectivityState currentState = ConnectivityState.IDLE;
private SubchannelPicker currentPicker = BUFFER_PICKER;
private List<DropOverload> dropPolicies = Collections.emptyList();
private long maxConcurrentRequests = DEFAULT_PER_CLUSTER_MAX_CONCURRENT_REQUESTS;
@Nullable
private SslContextProviderSupplier sslContextProviderSupplier;
private ClusterImplLbHelper(AtomicLong inFlights) {
this.inFlights = checkNotNull(inFlights, "inFlights");
}
@Override
public void updateBalancingState(ConnectivityState newState, SubchannelPicker newPicker) {
currentState = newState;
currentPicker = newPicker;
SubchannelPicker picker =
new RequestLimitingSubchannelPicker(newPicker, dropPolicies, maxConcurrentRequests);
delegate().updateBalancingState(newState, picker);
}
@Override
public Subchannel createSubchannel(CreateSubchannelArgs args) {
List<EquivalentAddressGroup> addresses = new ArrayList<>();
for (EquivalentAddressGroup eag : args.getAddresses()) {
Attributes.Builder attrBuilder = eag.getAttributes().toBuilder().set(
InternalXdsAttributes.ATTR_CLUSTER_NAME, cluster);
if (enableSecurity && sslContextProviderSupplier != null) {
attrBuilder.set(
InternalXdsAttributes.ATTR_SSL_CONTEXT_PROVIDER_SUPPLIER,
sslContextProviderSupplier);
}
addresses.add(new EquivalentAddressGroup(eag.getAddresses(), attrBuilder.build()));
}
Locality locality = args.getAddresses().get(0).getAttributes().get(
InternalXdsAttributes.ATTR_LOCALITY); // all addresses should be in the same locality
// Endpoint addresses resolved by ClusterResolverLoadBalancer should always contain
// attributes with its locality, including endpoints in LOGICAL_DNS clusters.
// In case of not (which really shouldn't), loads are aggregated under an empty locality.
if (locality == null) {
locality = Locality.create("", "", "");
}
final ClusterLocalityStats localityStats = xdsClient.addClusterLocalityStats(
cluster, edsServiceName, locality);
Attributes attrs = args.getAttributes().toBuilder().set(
ATTR_CLUSTER_LOCALITY_STATS, localityStats).build();
args = args.toBuilder().setAddresses(addresses).setAttributes(attrs).build();
final Subchannel subchannel = delegate().createSubchannel(args);
return new ForwardingSubchannel() {
@Override
public void shutdown() {
localityStats.release();
delegate().shutdown();
}
@Override
protected Subchannel delegate() {
return subchannel;
}
};
}
@Override
protected Helper delegate() {
return helper;
}
private void updateDropPolicies(List<DropOverload> dropOverloads) {
if (!dropPolicies.equals(dropOverloads)) {
dropPolicies = dropOverloads;
updateBalancingState(currentState, currentPicker);
}
}
private void updateMaxConcurrentRequests(@Nullable Long maxConcurrentRequests) {
if (Objects.equals(this.maxConcurrentRequests, maxConcurrentRequests)) {
return;
}
this.maxConcurrentRequests =
maxConcurrentRequests != null
? maxConcurrentRequests
: DEFAULT_PER_CLUSTER_MAX_CONCURRENT_REQUESTS;
updateBalancingState(currentState, currentPicker);
}
private void updateSslContextProviderSupplier(@Nullable UpstreamTlsContext tlsContext) {
UpstreamTlsContext currentTlsContext =
sslContextProviderSupplier != null
? (UpstreamTlsContext)sslContextProviderSupplier.getTlsContext()
: null;
if (Objects.equals(currentTlsContext, tlsContext)) {
return;
}
if (sslContextProviderSupplier != null) {
sslContextProviderSupplier.close();
}
sslContextProviderSupplier =
tlsContext != null
? new SslContextProviderSupplier(tlsContext, xdsClient.getTlsContextManager())
: null;
}
private class RequestLimitingSubchannelPicker extends SubchannelPicker {
private final SubchannelPicker delegate;
private final List<DropOverload> dropPolicies;
private final long maxConcurrentRequests;
private RequestLimitingSubchannelPicker(SubchannelPicker delegate,
List<DropOverload> dropPolicies, long maxConcurrentRequests) {
this.delegate = delegate;
this.dropPolicies = dropPolicies;
this.maxConcurrentRequests = maxConcurrentRequests;
}
@Override
public PickResult pickSubchannel(PickSubchannelArgs args) {
for (DropOverload dropOverload : dropPolicies) {
int rand = random.nextInt(1_000_000);
if (rand < dropOverload.dropsPerMillion()) {
logger.log(XdsLogLevel.INFO, "Drop request with category: {0}",
dropOverload.category());
if (dropStats != null) {
dropStats.recordDroppedRequest(dropOverload.category());
}
return PickResult.withDrop(
Status.UNAVAILABLE.withDescription("Dropped: " + dropOverload.category()));
}
}
final PickResult result = delegate.pickSubchannel(args);
if (result.getStatus().isOk() && result.getSubchannel() != null) {
if (enableCircuitBreaking) {
if (inFlights.get() >= maxConcurrentRequests) {
if (dropStats != null) {
dropStats.recordDroppedRequest();
}
return PickResult.withDrop(Status.UNAVAILABLE.withDescription(
"Cluster max concurrent requests limit exceeded"));
}
}
final ClusterLocalityStats stats =
result.getSubchannel().getAttributes().get(ATTR_CLUSTER_LOCALITY_STATS);
ClientStreamTracer.Factory tracerFactory = new CountingStreamTracerFactory(
stats, inFlights, result.getStreamTracerFactory());
return PickResult.withSubchannel(result.getSubchannel(), tracerFactory);
}
return result;
}
@Override
public String toString() {
return MoreObjects.toStringHelper(this).add("delegate", delegate).toString();
}
}
}
private static final class CountingStreamTracerFactory extends
ClientStreamTracer.InternalLimitedInfoFactory {
private ClusterLocalityStats stats;
private final AtomicLong inFlights;
@Nullable
private final ClientStreamTracer.Factory delegate;
private CountingStreamTracerFactory(
ClusterLocalityStats stats, AtomicLong inFlights,
@Nullable ClientStreamTracer.Factory delegate) {
this.stats = checkNotNull(stats, "stats");
this.inFlights = checkNotNull(inFlights, "inFlights");
this.delegate = delegate;
}
@Override
public ClientStreamTracer newClientStreamTracer(StreamInfo info, Metadata headers) {
stats.recordCallStarted();
inFlights.incrementAndGet();
if (delegate == null) {
return new ClientStreamTracer() {
@Override
public void streamClosed(Status status) {
stats.recordCallFinished(status);
inFlights.decrementAndGet();
}
};
}
final ClientStreamTracer delegatedTracer = delegate.newClientStreamTracer(info, headers);
return new ForwardingClientStreamTracer() {
@Override
protected ClientStreamTracer delegate() {
return delegatedTracer;
}
@Override
public void streamClosed(Status status) {
stats.recordCallFinished(status);
inFlights.decrementAndGet();
delegate().streamClosed(status);
}
};
}
}
}
|
[
"\"GRPC_XDS_EXPERIMENTAL_CIRCUIT_BREAKING\"",
"\"GRPC_XDS_EXPERIMENTAL_CIRCUIT_BREAKING\"",
"\"GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT\"",
"\"GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT\""
] |
[] |
[
"GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT",
"GRPC_XDS_EXPERIMENTAL_CIRCUIT_BREAKING"
] |
[]
|
["GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT", "GRPC_XDS_EXPERIMENTAL_CIRCUIT_BREAKING"]
|
java
| 2 | 0 | |
service/routes.py
|
######################################################################
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
######################################################################
"""
Shopcart Service with UI
Paths:
------
GET / - Displays a usage information for Selenium testing
GET /shopcarts - Returns a list of all the Shopcarts
POST /shopcarts - Creates a new Shopcart record in the database
GET /shopcarts/{id} - Returns the Shopcart with a given id number
DELETE /shopcarts/{id} - Deletes a Shopcart record in the database
PUT /shopcarts/{id}/place-order - Places an order
GET /shopcarts/{id}/items - Gets Shopcart Item list from a Shopcart
POST /shopcarts/{id}/items - Creates a new Shopcart Item record in the database
GET /shopcarts/{id}/items/{item_id} - Returns the Shopcart Item with given id and item_id number
PUT /shopcarts/{id}/items/{item_id} - Updates the Shopcart Item
DELETE /shopcarts/{id}/items/{item_id} - Deletes the Shopcart Item
"""
import os
import json
import requests
from flask import jsonify, request, make_response, abort
from flask.logging import create_logger
from flask_api import status # HTTP Status Codes
from flask_restplus import Api, Resource, fields, reqparse
from service.models import Shopcart, ShopcartItem, DataValidationError
from . import app, constants
# use create_logger function to avoid no-member errors for logger in pylint
logger = create_logger(app)
ORDER_ENDPOINT = os.getenv('ORDER_ENDPOINT',
'https://nyu-order-service-f20.us-south.cf.appdomain.cloud/orders')
######################################################################
# Configure Swagger before initializing it
######################################################################
api = Api(app,
version='1.0.0',
title='Shopcart REST API Service',
description='This is a Shopcart server.',
default='shopcarts',
default_label='Shopcart operations',
doc='/apidocs',
# authorizations=authorizations,
prefix='/api'
)
# Define the model so that the docs reflect what can be sent
shopcart_item_model = api.model('ShopcartItem', {
'id': fields.Integer(readOnly=True,
description='The unique id assigned internally by service'),
'sid': fields.Integer(readOnly=True,
description='The id of the Shopcart this item belongs to'),
'sku': fields.Integer(required=True,
description='The product id'),
'name': fields.String(required=True,
description='The product name'),
'price': fields.Float(required=True,
description='The price for one item'),
'amount': fields.Integer(required=True,
description='The number of product'),
'create_time': fields.DateTime(readOnly=True,
description='The time the record is created'),
'update_time': fields.DateTime(readOnly=True,
description='The time the record is updated')
})
shopcart_model = api.model('Shopcart', {
'id': fields.Integer(readOnly=True,
description='The unique id assigned internally by service'),
'user_id': fields.Integer(required=True,
description='The id of the User'),
'create_time': fields.DateTime(readOnly=True,
description='The time the record is created'),
'update_time': fields.DateTime(readOnly=True,
description='The time the record is updated'),
'items': fields.List(fields.Nested(shopcart_item_model))
})
create_shopcart_model = api.model('Shopcart', {
'user_id': fields.Integer(required=True,
description='The id of the User')
})
create_shopcart_item_model = api.model('ShopcartItem', {
'sku': fields.Integer(required=True,
description='The product id'),
'name': fields.String(required=True,
description='The product name'),
'price': fields.Float(required=True,
description='The price for one item'),
'amount': fields.Integer(required=True,
description='The number of product')
})
# query string arguments
shopcart_args = reqparse.RequestParser()
shopcart_args.add_argument('user_id', type=int, required=False, help='Find Shopcart by User Id')
shopcart_item_args = reqparse.RequestParser()
shopcart_item_args.add_argument('sku',
type=int,
required=False,
help='Find Shopcart Item by Product Id')
shopcart_item_args.add_argument('name',
type=str,
required=False,
help='Find Shopcart Item by Product Name')
shopcart_item_args.add_argument('price',
type=float,
required=False,
help='Find Shopcart Item by Product Price')
shopcart_item_args.add_argument('amount',
type=int,
required=False,
help='Find Shopcart Item by Product Amount')
######################################################################
# Error Handlers
######################################################################
@app.errorhandler(DataValidationError)
def request_validation_error(error):
""" Handles Value Errors from bad data """
return bad_request(error)
@app.errorhandler(status.HTTP_400_BAD_REQUEST)
def bad_request(error):
""" Handles bad reuests with 400_BAD_REQUEST """
logger.warning(str(error))
return (
jsonify(
status=status.HTTP_400_BAD_REQUEST, error="Bad Request", message=str(error)
),
status.HTTP_400_BAD_REQUEST,
)
@app.errorhandler(status.HTTP_404_NOT_FOUND)
def not_found(error):
""" Handles resources not found with 404_NOT_FOUND """
logger.warning(str(error))
return (
jsonify(
status=status.HTTP_404_NOT_FOUND, error=constants.NOT_FOUND, message=str(error)
),
status.HTTP_404_NOT_FOUND,
)
@app.errorhandler(status.HTTP_405_METHOD_NOT_ALLOWED)
def method_not_supported(error):
""" Handles unsuppoted HTTP methods with 405_METHOD_NOT_SUPPORTED """
logger.warning(str(error))
return (
jsonify(
status=status.HTTP_405_METHOD_NOT_ALLOWED,
error="Method not Allowed",
message=str(error),
),
status.HTTP_405_METHOD_NOT_ALLOWED,
)
@app.errorhandler(status.HTTP_415_UNSUPPORTED_MEDIA_TYPE)
def mediatype_not_supported(error):
""" Handles unsuppoted media requests with 415_UNSUPPORTED_MEDIA_TYPE """
logger.warning(str(error))
return (
jsonify(
status=status.HTTP_415_UNSUPPORTED_MEDIA_TYPE,
error="Unsupported media type",
message=str(error),
),
status.HTTP_415_UNSUPPORTED_MEDIA_TYPE,
)
######################################################################
# GET HEALTH CHECK
######################################################################
@app.route('/healthcheck')
def healthcheck():
""" Let them know our heart is still beating """
return make_response(jsonify(status=200, message='Healthy'), status.HTTP_200_OK)
######################################################################
# GET INDEX
######################################################################
@app.route('/')
def index():
""" Root URL response """
return app.send_static_file('index.html')
######################################################################
# PATH: /shopcarts
######################################################################
@api.route('/shopcarts', strict_slashes=False)
class ShopcartCollection(Resource):
"""LIST ALL Shopcarts"""
@api.doc('list_shopcarts')
@api.expect(shopcart_args, validate=True)
@api.marshal_list_with(shopcart_model)
def get(self):
""" Returns all of the Shopcarts """
logger.info('Request to list Shopcarts...')
args = shopcart_args.parse_args()
if args['user_id']:
logger.info('Find by user')
shopcarts = Shopcart.find_by_user(args['user_id'])
else:
logger.info('Find all')
shopcarts = Shopcart.all()
results = [shopcart.serialize() for shopcart in shopcarts]
for shopcart in results:
items = ShopcartItem.find_by_shopcartid(shopcart["id"])
shopcart["items"] = [item.serialize() for item in items]
logger.info('[%s] Shopcarts returned', len(results))
return results, status.HTTP_200_OK
# ------------------------------------------------------------------
# ADD A NEW Shopcart
# ------------------------------------------------------------------
@api.doc('create_shopcarts')
@api.expect(create_shopcart_model)
@api.response(400, 'The posted data was not valid')
@api.response(201, 'Shopcart created successfully')
@api.marshal_with(shopcart_model, code=201)
def post(self):
""" Create a Shopcart """
logger.info("Request to create a shopcart")
check_content_type("application/json")
logger.debug('Payload = %s', api.payload)
shopcart = None
if 'user_id' in api.payload:
shopcart = Shopcart.find_by_user(api.payload['user_id']).first()
if shopcart is None:
shopcart = Shopcart()
shopcart.deserialize(api.payload)
shopcart.create()
logger.info("Shopcart with ID [%s] created.", shopcart.id)
location_url = api.url_for(ShopcartResource, shopcart_id=shopcart.id, _external=True)
shopcart_result = shopcart.serialize()
items = ShopcartItem.find_by_shopcartid(shopcart_result["id"])
shopcart_result["items"] = [item.serialize() for item in items]
return shopcart_result, status.HTTP_201_CREATED, {"Location": location_url}
######################################################################
# G E T A N D D E L E T E S H O P C A R T
######################################################################
@api.route('/shopcarts/<int:shopcart_id>')
@api.param('shopcart_id', 'The Shopcart identifier')
class ShopcartResource(Resource):
"""
ShopcartResource class
Allows the manipulation of a single shopcart
GET /shopcart/{id} - Returns a shopcart with the id
DELETE /shopcart/{id} - Deletes a shopcart with the id
"""
@api.doc('get_shopcart')
@api.response(404, 'Shopcart not found')
@api.response(200, 'Shopcart returned successfully')
@api.marshal_with(shopcart_model)
def get(self, shopcart_id):
"""
Gets information about a Shopcart
This endpoint will get information about a shopcart
"""
logger.info("Request to get information of a shopcart")
shopcart = Shopcart.find(shopcart_id)
if shopcart is None:
logger.info("Shopcart with ID [%s] not found.", shopcart_id)
api.abort(
status.HTTP_404_NOT_FOUND,
"Shopcart with id '{}' was not found.".format(shopcart_id)
)
shopcart_items = ShopcartItem.find_by_shopcartid(shopcart_id)
response = shopcart.serialize()
response["items"] = [item.serialize() for item in shopcart_items]
logger.info("Shopcart with ID [%s] fetched.", shopcart.id)
return response, status.HTTP_200_OK
@api.doc('delete_shopcart')
@api.response(204, 'Shopcart has been deleted')
def delete(self, shopcart_id):
"""
Delete a Shopcart
This endpoint will delete a Shopcart based the id specified in the path
"""
logger.info('Request to delete Shopcart with id: %s', shopcart_id)
item = Shopcart.find(shopcart_id)
if item:
item.delete()
logger.info('Shopcart with id: %s has been deleted', shopcart_id)
return make_response("", status.HTTP_204_NO_CONTENT)
######################################################################
# G E T S H O P C A R T I T E M
######################################################################
@api.route('/shopcarts/<int:shopcart_id>/items/<int:item_id>')
@api.param('shopcart_id', 'The Shopcart identifier')
@api.param('item_id', 'The Shopcart Item identifier')
class ShopcartItemResource(Resource):
"""
ShopcartResource class
Allows the manipulation of a single shopcart
GET /shopcart/{id}/items/{id} - Returns a shopcart Item with the id
DELETE /shopcart/{id}/items/{id} - Deletes a shopcart Item with the id
PUT /shopcart/{id}/items/{id} - Updates a shopcart Item with the id
"""
@api.doc('get_shopcart_item')
@api.response(404, 'Shopcart Item not found')
@api.response(200, 'Shopcart Item found')
@api.marshal_with(shopcart_item_model, code=200)
def get(self, shopcart_id, item_id):
"""
Get a shopcart item
This endpoint will return an item in the shop cart
"""
logger.info("Request to get an item in a shopcart")
shopcart_item = ShopcartItem.find(item_id)
if shopcart_item is None or shopcart_item.sid != shopcart_id:
logger.info(
"Shopcart item with ID [%s] not found in shopcart [%s].", item_id, shopcart_id
)
api.abort(
status.HTTP_404_NOT_FOUND,
"Shopcart item with ID [%s] not found in shopcart [%s]." % (item_id, shopcart_id)
)
logger.info("Fetched shopcart item with ID [%s].", item_id)
return shopcart_item.serialize(), status.HTTP_200_OK
@api.doc('update_shopcart_item')
@api.response(404, 'Shopcart Item not found')
@api.response(400, 'The posted Item data was not valid')
@api.response(200, 'Shopcart Item updated')
@api.expect(shopcart_item_model)
@api.marshal_with(shopcart_item_model, code=200)
def put(self, shopcart_id, item_id):
"""
Update a Shopcart item
This endpoint will update a Shopcart item based the body that is posted
"""
logger.info("Request to update Shopcart item with id: %s", item_id)
check_content_type("application/json")
shopcart_item = ShopcartItem.find(item_id)
if shopcart_item is None or shopcart_item.sid != shopcart_id:
logger.info(
"Shopcart item with ID [%s] not found in shopcart [%s].", item_id, shopcart_id
)
api.abort(
status.HTTP_404_NOT_FOUND,
"Shopcart item with id '{}' was not found.".format(item_id)
)
data = api.payload
data["sid"] = shopcart_id
data["id"] = item_id
shopcart_item.deserialize(data)
shopcart_item.update()
logger.info("Shopcart item with ID [%s] updated.", shopcart_item.id)
return shopcart_item.serialize(), status.HTTP_200_OK
@api.doc('delete_shopcart_item')
@api.response(204, 'Shopcart Item has been deleted')
def delete(self, shopcart_id, item_id):
"""
Delete a ShopcartItem
This endpoint will delete a ShopcartItem based the id specified in the path
"""
logger.info(
'Request to delete ShopcartItem with id: %s from Shopcart %s', item_id, shopcart_id
)
shopcart_item = ShopcartItem.find(item_id)
if shopcart_item is not None and shopcart_item.sid == shopcart_id:
shopcart_item.delete()
logger.info('ShopcartItem with id: %s has been deleted', item_id)
return "", status.HTTP_204_NO_CONTENT
######################################################################
# PATH: /shopcarts/:id/items
######################################################################
@api.route('/shopcarts/<int:shopcart_id>/items', strict_slashes=False)
@api.param('shopcart_id', 'The Shopcart identifier')
class ShopcartItemCollection(Resource):
""" Handles all interactions with collections of Shopcart Items """
@api.doc('list_shopcart_items')
@api.response(200, 'Shopcart Items returned successfully')
@api.marshal_list_with(shopcart_item_model)
def get(self, shopcart_id):
"""
Get information of a shopcart
This endpoint will return items in the shop cart
"""
logger.info("Request to get items in a shopcart")
shopcart_items = ShopcartItem.find_by_shopcartid(shopcart_id)
result = [item.serialize() for item in shopcart_items]
logger.info("Fetched items for Shopcart with ID [%s].", shopcart_id)
return result, status.HTTP_200_OK
@api.doc('create_shopcart_item')
@api.response(201, 'Shopcart Items has been created')
@api.response(400, 'The posted data was not valid')
@api.expect(shopcart_item_model)
@api.marshal_with(shopcart_item_model, code=201)
def post(self, shopcart_id):
"""
Create a new Shopcart Item
"""
logger.info("Request to create a shopcart item")
check_content_type("application/json")
shopcart_item = ShopcartItem()
data = request.get_json()
if "id" in data:
data.pop("id")
data["sid"] = shopcart_id
shopcart_item.deserialize(data)
shopcart_item.add()
location_url = api.url_for(ShopcartItemResource,
shopcart_id=shopcart_item.sid, item_id=shopcart_item.id,
_external=True)
logger.info("ShopcartItem with ID [%s] created.", shopcart_item.id)
return shopcart_item, status.HTTP_201_CREATED, {"Location": location_url}
######################################################################
# PATH: /shopcarts/items
######################################################################
@api.route('/shopcarts/items', strict_slashes=False)
class ShopcartItemQueryCollection(Resource):
"""LIST ALL Shopcart Items or Query by sku, name, price, or amount"""
@api.doc('list_shopcart_items')
@api.expect(shopcart_item_args, validate=True)
@api.marshal_list_with(shopcart_item_model)
def get(self):
""" Returns all of the ShopcartItems """
logger.info('Request to list ShopcartItems...')
args = shopcart_item_args.parse_args()
if args['sku']:
logger.info('Find by sku')
shopcart_items = ShopcartItem.find_by_sku(args['sku'])
elif args['name']:
logger.info('Find by name')
shopcart_items = ShopcartItem.find_by_name(args['name'])
elif args['price']:
logger.info('Find by price')
shopcart_items = ShopcartItem.find_by_price(args['price'])
elif args['amount']:
logger.info('Find by amount')
shopcart_items = ShopcartItem.find_by_amount(args['amount'])
else:
logger.info('Find all')
shopcart_items = ShopcartItem.all()
results = [shopcart_item.serialize() for shopcart_item in shopcart_items]
logger.info('[%s] Shopcart Items returned', len(results))
return results, status.HTTP_200_OK
######################################################################
# PATH: /shopcarts/{id}/place-order
######################################################################
@api.route('/shopcarts/<int:shopcart_id>/place-order')
@api.param('shopcart_id', 'The Shopcart identifier')
class PlaceOrderResource(Resource):
""" Place Order action on a Shopcart"""
@api.doc('place_order')
@api.response(404, 'Shopcart not found or is empty')
@api.response(400, 'Unable to place order for shopcart')
@api.response(204, 'Shopcart has been deleted')
def put(self, shopcart_id):
"""
Place Order for a Shopcart
This endpoint will place an order for a Shopcart based the id specified in the path
"""
logger.info('Request to place order for Shopcart with id: %s', shopcart_id)
shopcart = Shopcart.find(shopcart_id)
if not shopcart:
logger.info("Shopcart with ID [%s] is does not exist.", shopcart_id)
api.abort(
status.HTTP_404_NOT_FOUND,
"Shopcart with ID [%s] is does not exist." % shopcart_id
)
shopcart_items = ShopcartItem.find_by_shopcartid(shopcart_id)
if shopcart_items is None or len(shopcart_items) == 0:
logger.info("Shopcart with ID [%s] is empty.", shopcart_id)
api.abort(
status.HTTP_404_NOT_FOUND,
"Shopcart with ID [%s] is empty." % shopcart_id
)
shopcart_items_list = [item.serialize() for item in shopcart_items]
# once we have the list of shopcart items we can send in JSON format to the orders team
#add the order status as PLACED for a new order
order_items= []
for item in shopcart_items_list:
order_item = {}
order_item["item_id"] = int(item["id"])
order_item["product_id"] = int(item["sku"])
order_item["quantity"] = int(item["amount"])
order_item["price"] = item["price"]
order_item["status"] = "PLACED"
order_items.append(order_item)
order = {
"customer_id": int(shopcart.serialize()["user_id"]),
"order_items": order_items,
}
payload = json.dumps(order)
headers = {'content-type': 'application/json'}
res = requests.post(
ORDER_ENDPOINT, data=payload, headers=headers
)
logger.info('Put Order response %d %s', res.status_code, res.text)
if res.status_code != 201:
api.abort(
status.HTTP_400_BAD_REQUEST,
"Unable to place order for shopcart [%s]." % shopcart_id
)
shopcart.delete()
logger.info('Shopcart with id: %s has been deleted', shopcart_id)
return make_response("", status.HTTP_204_NO_CONTENT)
######################################################################
# U T I L I T Y F U N C T I O N S
######################################################################
def check_content_type(content_type):
""" Checks that the media type is correct """
if 'Content-Type' not in request.headers:
logger.error('No Content-Type specified.')
abort(status.HTTP_415_UNSUPPORTED_MEDIA_TYPE,
'Content-Type must be {}'.format(content_type))
if request.headers['Content-Type'] == content_type:
return
logger.error('Invalid Content-Type: %s', request.headers['Content-Type'])
abort(status.HTTP_415_UNSUPPORTED_MEDIA_TYPE, 'Content-Type must be {}'.format(content_type))
def init_db():
""" Initialies the SQLAlchemy app """
Shopcart.init_db(app)
ShopcartItem.init_db(app)
logger.info("Database has been initialized!")
|
[] |
[] |
[
"ORDER_ENDPOINT"
] |
[]
|
["ORDER_ENDPOINT"]
|
python
| 1 | 0 | |
perf/benchmark/runner/runner.py
|
# Copyright Istio Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import collections
import os
import json
import socket
import argparse
import subprocess
import shlex
import uuid
import sys
import tempfile
import time
from subprocess import getoutput
from urllib.parse import urlparse
import yaml
from fortio import METRICS_START_SKIP_DURATION, METRICS_END_SKIP_DURATION
NAMESPACE = os.environ.get("NAMESPACE", "twopods")
NIGHTHAWK_GRPC_SERVICE_PORT_FORWARD = 9999
POD = collections.namedtuple('Pod', ['name', 'namespace', 'ip', 'labels'])
NIGHTHAWK_DOCKER_IMAGE = "envoyproxy/nighthawk-dev:59683b759eb8f8bd8cce282795c08f9e2b3313d4"
def pod_info(filterstr="", namespace=NAMESPACE, multi_ok=True):
cmd = "kubectl -n {namespace} get pod {filterstr} -o json".format(
namespace=namespace, filterstr=filterstr)
op = getoutput(cmd)
o = json.loads(op)
items = o['items']
if not multi_ok and len(items) > 1:
raise Exception("more than one found " + op)
if not items:
raise Exception("no pods found with command [" + cmd + "]")
i = items[0]
return POD(i['metadata']['name'], i['metadata']['namespace'],
i['status']['podIP'], i['metadata']['labels'])
def run_command(command):
process = subprocess.Popen(shlex.split(command))
process.wait()
def run_command_sync(command):
op = getoutput(command)
return op.strip()
# kubeclt related helper funcs
def kubectl_cp(from_file, to_file, container):
cmd = "kubectl --namespace {namespace} cp {from_file} {to_file} -c {container}".format(
namespace=NAMESPACE,
from_file=from_file,
to_file=to_file,
container=container)
print(cmd, flush=True)
run_command_sync(cmd)
def kubectl_exec(pod, remote_cmd, runfn=run_command, container=None):
c = ""
if container is not None:
c = "-c " + container
cmd = "kubectl --namespace {namespace} exec {pod} {c} -- {remote_cmd}".format(
pod=pod,
remote_cmd=remote_cmd,
c=c,
namespace=NAMESPACE)
print(cmd, flush=True)
runfn(cmd)
class Fortio:
ports = {
"http": {"direct_port": 8077, "port": 8080},
"grpc": {"direct_port": 8076, "port": 8079},
"direct_envoy": {"direct_port": 8076, "port": 8079},
}
def __init__(
self,
headers=None,
conn=None,
qps=None,
duration=None,
size=None,
mode="http",
telemetry_mode="mixer",
perf_record=False,
server="fortioserver",
client="fortioclient",
additional_args=None,
filter_fn=None,
extra_labels=None,
baseline=False,
serversidecar=False,
clientsidecar=False,
bothsidecar=True,
ingress=None,
mesh="istio",
cacert=None,
load_gen_type="fortio"):
self.run_id = str(uuid.uuid4()).partition('-')[0]
self.headers = headers
self.conn = conn
self.qps = qps
self.size = size
self.duration = duration
self.mode = mode
self.ns = NAMESPACE
# bucket resolution in seconds
self.r = "0.00005"
self.telemetry_mode = telemetry_mode
self.perf_record = perf_record
self.server = pod_info("-lapp=" + server, namespace=self.ns)
self.client = pod_info("-lapp=" + client, namespace=self.ns)
self.additional_args = additional_args
self.filter_fn = filter_fn
self.extra_labels = extra_labels
self.run_baseline = baseline
self.run_serversidecar = serversidecar
self.run_clientsidecar = clientsidecar
self.run_bothsidecar = bothsidecar
self.run_ingress = ingress
self.cacert = cacert
self.load_gen_type = load_gen_type
if mesh == "linkerd":
self.mesh = "linkerd"
elif mesh == "istio":
self.mesh = "istio"
else:
sys.exit("invalid mesh %s, must be istio or linkerd" % mesh)
def get_protocol_uri_fragment(self):
return "https" if self.mode == "grpc" else "http"
def compute_uri(self, svc, port_type):
if self.load_gen_type == "fortio":
basestr = "http://{svc}:{port}/echo?size={size}"
if self.mode == "grpc":
basestr = "-payload-size {size} {svc}:{port}"
return basestr.format(svc=svc, port=self.ports[self.mode][port_type], size=self.size)
elif self.load_gen_type == "nighthawk":
return "{protocol}://{svc}:{port}/".format(
svc=svc, port=self.ports[self.mode][port_type], protocol=self.get_protocol_uri_fragment())
else:
sys.exit("invalid load generator %s, must be fortio or nighthawk", self.load_gen_type)
def nosidecar(self, load_gen_cmd, sidecar_mode):
return load_gen_cmd + "_" + sidecar_mode + " " + self.compute_uri(self.server.ip, "direct_port")
def serversidecar(self, load_gen_cmd, sidecar_mode):
return load_gen_cmd + "_" + sidecar_mode + " " + self.compute_uri(self.server.ip, "port")
def clientsidecar(self, load_gen_cmd, sidecar_mode):
return load_gen_cmd + "_" + sidecar_mode + " " + self.compute_uri(self.server.labels["app"], "direct_port")
def bothsidecar(self, load_gen_cmd, sidecar_mode):
return load_gen_cmd + "_" + sidecar_mode + " " + self.compute_uri(self.server.labels["app"], "port")
def ingress(self, load_gen_cmd):
url = urlparse(self.run_ingress)
# If scheme is not defined fallback to http
if url.scheme == "":
url = urlparse("http://{svc}".format(svc=self.run_ingress))
return load_gen_cmd + "_ingress {url}/echo?size={size}".format(
url=url.geturl(), size=self.size)
def execute_sidecar_mode(self, sidecar_mode, load_gen_type, load_gen_cmd, sidecar_mode_func, labels, perf_label_suffix):
print('-------------- Running in {sidecar_mode} mode --------------'.format(sidecar_mode=sidecar_mode))
if load_gen_type == "fortio":
kubectl_exec(self.client.name, sidecar_mode_func(load_gen_cmd, sidecar_mode))
elif load_gen_type == "nighthawk":
run_nighthawk(self.client.name, sidecar_mode_func(load_gen_type, sidecar_mode), labels + "_" + sidecar_mode)
if self.perf_record and len(perf_label_suffix) > 0:
run_perf(
self.mesh,
self.server.name,
labels + perf_label_suffix,
duration=40)
def generate_test_labels(self, conn, qps, size):
size = size or self.size
labels = self.run_id
labels += "_qps_" + str(qps)
labels += "_c_" + str(conn)
labels += "_" + str(size)
if self.mesh == "istio":
labels += "_"
labels += self.telemetry_mode
elif self.mesh == "linkerd":
labels += "_"
labels += "linkerd"
if self.extra_labels is not None:
labels += "_" + self.extra_labels
return labels
def generate_headers_cmd(self, headers):
headers_cmd = ""
if headers is not None:
for header_val in headers.split(","):
headers_cmd += "-H=" + header_val + " "
return headers_cmd
def generate_fortio_cmd(self, headers_cmd, conn, qps, duration, grpc, cacert_arg, labels):
if duration is None:
duration = self.duration
fortio_cmd = (
"fortio load {headers} -c {conn} -qps {qps} -t {duration}s -a -r {r} {cacert_arg} {grpc} "
"-httpbufferkb=128 -labels {labels}").format(
headers=headers_cmd,
conn=conn,
qps=qps,
duration=duration,
r=self.r,
grpc=grpc,
cacert_arg=cacert_arg,
labels=labels)
return fortio_cmd
def generate_nighthawk_cmd(self, cpus, conn, qps, duration, labels):
nighthawk_args = [
"nighthawk_client",
"--concurrency {cpus}",
"--output-format json",
"--prefetch-connections",
"--open-loop",
"--jitter-uniform 0.0001s",
"--experimental-h1-connection-reuse-strategy lru",
"--experimental-h2-use-multiple-connections",
"--nighthawk-service 127.0.0.1:{port_forward}",
"--label Nighthawk",
"--connections {conn}",
"--rps {qps}",
"--duration {duration}",
"--request-header \"x-nighthawk-test-server-config: {{response_body_size:{size}}}\""
]
# Our "gRPC" mode actually means:
# - https (see get_protocol_uri_fragment())
# - h2
# - with long running connections
# - Also transfer request body sized according to "size".
if self.mode == "grpc":
nighthawk_args.append("--h2")
if self.size:
nighthawk_args.append(
"--request-header \"content-length: {size}\"")
# Note: Labels is the last arg, and there's stuff depending on that.
# watch out when moving it.
nighthawk_args.append("--label {labels}")
# As the worker count acts as a multiplier, we divide by qps/conn by the number of cpu's to spread load accross the workers so the sum of the workers will target the global qps/connection levels.
nighthawk_cmd = " ".join(nighthawk_args).format(
conn=round(conn / cpus),
qps=round(qps / cpus),
duration=duration,
labels=labels,
size=self.size,
cpus=cpus,
port_forward=NIGHTHAWK_GRPC_SERVICE_PORT_FORWARD)
return nighthawk_cmd
def run(self, headers, conn, qps, size, duration):
labels = self.generate_test_labels(conn, qps, size)
grpc = ""
if self.mode == "grpc":
grpc = "-grpc -ping"
cacert_arg = ""
if self.cacert is not None:
cacert_arg = "-cacert {cacert_path}".format(cacert_path=self.cacert)
headers_cmd = self.generate_headers_cmd(headers)
load_gen_cmd = ""
if self.load_gen_type == "fortio":
load_gen_cmd = self.generate_fortio_cmd(headers_cmd, conn, qps, duration, grpc, cacert_arg, labels)
elif self.load_gen_type == "nighthawk":
# TODO(oschaaf): Figure out how to best determine the right concurrency for Nighthawk.
# Results seem to get very noisy as the number of workers increases, are the clients
# and running on separate sets of vCPU cores? nproc yields the same concurrency as goprocs
# use with the Fortio version.
# client_cpus = int(run_command_sync(
# "kubectl exec -n \"{ns}\" svc/fortioclient -c shell nproc".format(ns=NAMESPACE)))
# print("Client pod has {client_cpus} cpus".format(client_cpus=client_cpus))
# See the comment above, we restrict execution to a single nighthawk worker for
# now to avoid noise.
workers = 1
load_gen_cmd = self.generate_nighthawk_cmd(workers, conn, qps, duration, labels)
if self.run_baseline:
self.execute_sidecar_mode("baseline", self.load_gen_type, load_gen_cmd, self.nosidecar, labels, "")
if self.run_serversidecar:
self.execute_sidecar_mode("serveronly", self.load_gen_type, load_gen_cmd, self.serversidecar, labels, "_srv_serveronly")
if self.run_clientsidecar:
self.execute_sidecar_mode("clientonly", self.load_gen_type, load_gen_cmd, self.clientsidecar, labels, "_srv_clientonly")
if self.run_bothsidecar:
self.execute_sidecar_mode("both", self.load_gen_type, load_gen_cmd, self.bothsidecar, labels, "_srv_bothsidecars")
if self.run_ingress:
print('-------------- Running in ingress mode --------------')
kubectl_exec(self.client.name, self.ingress(load_gen_cmd))
if self.perf_record:
run_perf(
self.mesh,
self.server.name,
labels + "_srv_ingress",
duration=40)
PERFCMD = "/usr/lib/linux-tools/4.4.0-131-generic/perf"
FLAMESH = "flame.sh"
PERFSH = "get_perfdata.sh"
PERFWD = "/etc/istio/proxy/"
WD = os.getcwd()
LOCAL_FLAMEDIR = os.path.join(WD, "../flame/")
LOCAL_FLAMEPATH = LOCAL_FLAMEDIR + FLAMESH
LOCAL_PERFPATH = LOCAL_FLAMEDIR + PERFSH
LOCAL_FLAMEOUTPUT = LOCAL_FLAMEDIR + "flameoutput/"
def run_perf(mesh, pod, labels, duration=20):
filename = labels + "_perf.data"
filepath = PERFWD + filename
perfpath = PERFWD + PERFSH
# copy executable over
kubectl_cp(LOCAL_PERFPATH, pod + ":" + perfpath, mesh + "-proxy")
kubectl_exec(
pod,
"{perf_cmd} {filename} {duration}".format(
perf_cmd=perfpath,
filename=filename,
duration=duration),
container=mesh + "-proxy")
kubectl_cp(pod + ":" + filepath + ".perf", LOCAL_FLAMEOUTPUT + filename + ".perf", mesh + "-proxy")
run_command_sync(LOCAL_FLAMEPATH + " " + filename + ".perf")
def validate_job_config(job_config):
required_fields = {"conn": list, "qps": list, "duration": int}
for k in required_fields:
if k not in job_config:
print("missing required parameter {}".format(k))
return False
exp_type = required_fields[k]
if not isinstance(job_config[k], exp_type):
print("expecting type of parameter {} to be {}, got {}".format(k, exp_type, type(job_config[k])))
return False
return True
def fortio_from_config_file(args):
with open(args.config_file) as f:
job_config = yaml.safe_load(f)
if not validate_job_config(job_config):
exit(1)
# TODO: hard to parse yaml into object directly because of existing constructor from CLI
fortio = Fortio()
fortio.headers = job_config.get('headers', None)
fortio.conn = job_config.get('conn', 16)
fortio.qps = job_config.get('qps', 1000)
fortio.duration = job_config.get('duration', 240)
fortio.load_gen_type = job_config.get("load_gen_type", "fortio")
fortio.telemetry_mode = job_config.get('telemetry_mode', 'mixer')
fortio.metrics = job_config.get('metrics', 'p90')
fortio.size = job_config.get('size', 1024)
fortio.perf_record = False
fortio.run_serversidecar = job_config.get('run_serversidecar', False)
fortio.run_clientsidecar = job_config.get('run_clientsidecar', False)
fortio.run_bothsidecar = job_config.get('run_bothsidecar', True)
fortio.run_baseline = job_config.get('run_baseline', False)
fortio.run_ingress = job_config.get('run_ingress', False)
fortio.mesh = job_config.get('mesh', 'istio')
fortio.mode = job_config.get('mode', 'http')
fortio.extra_labels = job_config.get('extra_labels')
return fortio
def can_connect_to_nighthawk_service():
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
return sock.connect_ex(('127.0.0.1', NIGHTHAWK_GRPC_SERVICE_PORT_FORWARD)) == 0
def run_perf_test(args):
min_duration = METRICS_START_SKIP_DURATION + METRICS_END_SKIP_DURATION
# run with config files
if args.config_file is not None:
fortio = fortio_from_config_file(args)
else:
fortio = Fortio(
headers=args.headers,
conn=args.conn,
qps=args.qps,
duration=args.duration,
size=args.size,
perf_record=args.perf,
extra_labels=args.extra_labels,
baseline=args.baseline,
serversidecar=args.serversidecar,
clientsidecar=args.clientsidecar,
bothsidecar=args.bothsidecar,
ingress=args.ingress,
mode=args.mode,
mesh=args.mesh,
telemetry_mode=args.telemetry_mode,
cacert=args.cacert,
load_gen_type=args.load_gen_type)
if fortio.duration <= min_duration:
print("Duration must be greater than {min_duration}".format(
min_duration=min_duration))
exit(1)
# Create a port_forward for accessing nighthawk_service.
if not can_connect_to_nighthawk_service():
popen_cmd = "kubectl -n \"{ns}\" port-forward svc/fortioclient {port}:9999".format(
ns=NAMESPACE,
port=NIGHTHAWK_GRPC_SERVICE_PORT_FORWARD)
process = subprocess.Popen(shlex.split(
popen_cmd), stdout=subprocess.PIPE)
max_tries = 10
while max_tries > 0 and not can_connect_to_nighthawk_service():
time.sleep(0.5)
max_tries = max_tries - 1
if not can_connect_to_nighthawk_service():
print("Failure connecting to nighthawk_service")
sys.exit(-1)
else:
print("Able to connect to nighthawk_service, proceeding")
try:
for conn in fortio.conn:
for qps in fortio.qps:
fortio.run(headers=fortio.headers, conn=conn, qps=qps,
duration=fortio.duration, size=fortio.size)
finally:
process.kill()
def run_nighthawk(pod, remote_cmd, labels):
# Use a local docker instance of Nighthawk to control nighthawk_service running in the pod
# and run transforms on the output we get.
docker_cmd = "docker run --rm --network=host {docker_image} {remote_cmd}".format(
docker_image=NIGHTHAWK_DOCKER_IMAGE, remote_cmd=remote_cmd)
print(docker_cmd, flush=True)
process = subprocess.Popen(shlex.split(docker_cmd), stdout=subprocess.PIPE)
(output, err) = process.communicate()
exit_code = process.wait()
if exit_code == 0:
with tempfile.NamedTemporaryFile(dir='/tmp', delete=True) as tmpfile:
dest = tmpfile.name
with open("%s.json" % dest, 'wb') as f:
f.write(output)
print("Dumped Nighthawk's json to {dest}".format(dest=dest))
# Send human readable output to the command line.
os.system(
"cat {dest}.json | docker run -i --rm {docker_image} nighthawk_output_transform --output-format human".format(docker_image=NIGHTHAWK_DOCKER_IMAGE, dest=dest))
# Transform to Fortio's reporting server json format
os.system("cat {dest}.json | docker run -i --rm {docker_image} nighthawk_output_transform --output-format fortio > {dest}.fortio.json".format(
dest=dest, docker_image=NIGHTHAWK_DOCKER_IMAGE))
# Copy to the Fortio report server data directory.
# TODO(oschaaf): We output the global aggregated statistics here of request_to_response, which excludes connection set up time.
# It would be nice to dump a series instead, as we have more details available in the Nighthawk json:
# - queue/connect time
# - time spend blocking in closed loop mode
# - initiation time to completion (spanning the complete lifetime of a request/reply, including queue/connect time)
# - per worker output may sometimes help interpret plots that don't have a nice knee-shaped shape.
kubectl_cp("{dest}.fortio.json".format(
dest=dest), "{pod}:/var/lib/fortio/{datetime}_nighthawk_{labels}.json".format(pod=pod, labels=labels, datetime=time.strftime("%Y-%m-%d-%H%M%S")), "shell")
else:
print("nighthawk remote execution error: %s" % exit_code)
if output:
print("--> stdout: %s" % output.decode("utf-8"))
if err:
print("--> stderr: %s" % err.decode("utf-8"))
def csv_to_int(s):
return [int(i) for i in s.split(",")]
def get_parser():
parser = argparse.ArgumentParser("Run performance test")
parser.add_argument(
"--headers",
help="a list of `header:value` should be separated by comma",
default=None)
parser.add_argument(
"--conn",
help="number of connections, comma separated list",
type=csv_to_int,)
parser.add_argument(
"--qps",
help="qps, comma separated list",
type=csv_to_int,)
parser.add_argument(
"--duration",
help="duration in seconds of the extract",
type=int)
parser.add_argument(
"--size",
help="size of the payload",
type=int,
default=1024)
parser.add_argument(
"--mesh",
help="istio or linkerd",
default="istio")
parser.add_argument(
"--telemetry_mode",
help="run with different mixer configurations: mixer, none, telemetryv2",
default="mixer")
parser.add_argument(
"--client",
help="where to run the test from",
default=None)
parser.add_argument(
"--server",
help="pod ip of the server",
default=None)
parser.add_argument(
"--perf",
help="also run perf and produce flame graph",
default=False)
parser.add_argument(
"--ingress",
help="run traffic through ingress, should be a valid URL",
default=None)
parser.add_argument(
"--extra_labels",
help="extra labels",
default=None)
parser.add_argument(
"--mode",
help="http or grpc",
default="http")
parser.add_argument(
"--config_file",
help="config yaml file",
default=None)
parser.add_argument(
"--cacert",
help="path to the cacert for the fortio client inside the container",
default=None)
parser.add_argument(
"--load_gen_type",
help="fortio or nighthawk",
default="fortio",
)
define_bool(parser, "baseline", "run baseline for all", False)
define_bool(parser, "serversidecar",
"run serversidecar-only for all", False)
define_bool(parser, "clientsidecar",
"run clientsidecar-only for all", False)
define_bool(parser, "bothsidecar",
"run both clientsiecar and serversidecar", True)
return parser
def define_bool(parser, opt, help_arg, default_val):
parser.add_argument(
"--" + opt, help=help_arg, dest=opt, action='store_true')
parser.add_argument(
"--no_" + opt, help="do not " + help_arg, dest=opt, action='store_false')
val = {opt: default_val}
parser.set_defaults(**val)
def main(argv):
args = get_parser().parse_args(argv)
print(args)
return run_perf_test(args)
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
|
[] |
[] |
[
"NAMESPACE"
] |
[]
|
["NAMESPACE"]
|
python
| 1 | 0 | |
pkg/reconciler/knativeeventing/controller.go
|
/*
Copyright 2019 The Knative Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package knativeeventing
import (
"context"
"flag"
"os"
"path/filepath"
mfc "github.com/manifestival/client-go-client"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/tools/cache"
"knative.dev/eventing-operator/pkg/apis/eventing/v1alpha1"
knativeEventinginformer "knative.dev/eventing-operator/pkg/client/injection/informers/eventing/v1alpha1/knativeeventing"
rbase "knative.dev/eventing-operator/pkg/reconciler"
deploymentinformer "knative.dev/pkg/client/injection/kube/informers/apps/v1/deployment"
"knative.dev/pkg/configmap"
"knative.dev/pkg/controller"
"knative.dev/pkg/injection/sharedmain"
)
const (
controllerAgentName = "knativeeventing-controller"
reconcilerName = "KnativeEventing"
)
var (
recursive = flag.Bool("recursive", false, "If filename is a directory, process all manifests recursively")
MasterURL = flag.String("master", "", "The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster.")
Kubeconfig = flag.String("kubeconfig", "", "Path to a kubeconfig. Only required if out-of-cluster.")
)
// NewController initializes the controller and is called by the generated code
// Registers eventhandlers to enqueue events
func NewController(
ctx context.Context,
cmw configmap.Watcher,
) *controller.Impl {
knativeEventingInformer := knativeEventinginformer.Get(ctx)
deploymentInformer := deploymentinformer.Get(ctx)
c := &Reconciler{
Base: rbase.NewBase(ctx, controllerAgentName, cmw),
knativeEventingLister: knativeEventingInformer.Lister(),
eventings: sets.String{},
}
koDataDir := os.Getenv("KO_DATA_PATH")
cfg, err := sharedmain.GetConfig(*MasterURL, *Kubeconfig)
if err != nil {
c.Logger.Error(err, "Error building kubeconfig")
}
config, err := mfc.NewManifest(filepath.Join(koDataDir, "knative-eventing/"), cfg)
if err != nil {
c.Logger.Error(err, "Error creating the Manifest for knative-eventing")
os.Exit(1)
}
c.config = config
impl := controller.NewImpl(c, c.Logger, reconcilerName)
c.Logger.Info("Setting up event handlers for %s", reconcilerName)
knativeEventingInformer.Informer().AddEventHandler(controller.HandleAll(impl.Enqueue))
deploymentInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{
FilterFunc: controller.Filter(v1alpha1.SchemeGroupVersion.WithKind("KnativeEventing")),
Handler: controller.HandleAll(impl.EnqueueControllerOf),
})
return impl
}
|
[
"\"KO_DATA_PATH\""
] |
[] |
[
"KO_DATA_PATH"
] |
[]
|
["KO_DATA_PATH"]
|
go
| 1 | 0 | |
resultdb/pbutil/invocation.go
|
// Copyright 2019 The LUCI Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package pbutil
import (
pb "go.chromium.org/luci/resultdb/proto/v1"
)
const invocationIDPattern = `[a-z][a-z0-9_\-:.]{0,99}`
var invocationIDRe = regexpf("^%s$", invocationIDPattern)
var invocationNameRe = regexpf("^invocations/(%s)$", invocationIDPattern)
// ValidateInvocationID returns a non-nil error if id is invalid.
func ValidateInvocationID(id string) error {
return validateWithRe(invocationIDRe, id)
}
// ValidateInvocationName returns a non-nil error if name is invalid.
func ValidateInvocationName(name string) error {
_, err := ParseInvocationName(name)
return err
}
// ParseInvocationName extracts the invocation id.
func ParseInvocationName(name string) (id string, err error) {
if name == "" {
return "", unspecified()
}
m := invocationNameRe.FindStringSubmatch(name)
if m == nil {
return "", doesNotMatch(invocationNameRe)
}
return m[1], nil
}
// InvocationName synthesizes an invocation name from an id.
// Does not validate id, use ValidateInvocationID.
func InvocationName(id string) string {
return "invocations/" + id
}
// NormalizeInvocation converts inv to the canonical form.
func NormalizeInvocation(inv *pb.Invocation) {
sortStringPairs(inv.Tags)
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
utils/file.go
|
// MIT License
// Copyright (c) 2019 gonethopper
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
// * @Author: ankye
// * @Date: 2019-06-11 07:48:41
// * @Last Modified by: ankye
// * @Last Modified time: 2019-06-11 07:48:41
package utils
import (
"bytes"
"errors"
"io"
"os"
"path"
"path/filepath"
"runtime"
"strings"
)
// FileIsExist if file exist ,return true
func FileIsExist(path string) bool {
_, err := os.Stat(path)
if err != nil {
if os.IsExist(err) {
return true
}
if os.IsNotExist(err) {
return false
}
return false
}
return true
}
// GetWorkDirectory get current exec file directory
func GetWorkDirectory() (string, error) {
// in test case or go run xxx, work directory is temp directory
// workDir := os.Getenv("WORK_DIR")
// if len(workDir) > 0 {
// return filepath.Abs(workDir)
// }
// workDir, err := os.Getwd()
// if err != nil {
// return "", err
// }
// file, err := exec.LookPath(os.Args[0])
// if err != nil {
// return "", err
// }
// path, err := filepath.Abs(file)
// if err != nil {
// return "", err
// }
path, err := filepath.Abs(filepath.Dir(os.Args[0]))
if err != nil {
return "", err
}
if runtime.GOOS == "windows" {
path = strings.Replace(path, "\\", "/", -1)
i := strings.LastIndex(path, "/")
if i < 0 {
return "", errors.New("work directory invalid")
}
return string(path[0 : i+1]), nil
}
return path + "/", nil
}
// GetAbsDirectory get file directory abs path
func GetAbsDirectory(filename string) string {
filename = filepath.FromSlash(path.Clean(filename))
if runtime.GOOS == "windows" {
filename = strings.Replace(filename, "\\", "/", -1)
}
i := strings.LastIndex(filename, "/")
if i < 0 {
return ""
}
filename = string(filename[0 : i+1])
if filepath.IsAbs(filename) {
return filename
}
workDir, err := GetWorkDirectory()
if err != nil {
return ""
}
return filepath.Join(workDir, filename)
}
// GetAbsFilePath get file abs path
func GetAbsFilePath(filename string) string {
if filepath.IsAbs(filename) {
return filename
}
workDir, err := GetWorkDirectory()
if err != nil {
return ""
}
return filepath.Join(workDir, filename)
}
// FileLines get file lines
func FileLines(filename string) (int32, error) {
fd, err := os.Open(filename)
//fd, err := mmap.Open(filename)
if err != nil {
return 0, err
}
defer fd.Close()
maxbuf := 32768
buf := make([]byte, maxbuf) // 32k
var count int32
lineSep := []byte{'\n'}
offset := int64(0)
for {
c, err := fd.Read(buf)
//c, err := fd.ReadAt(buf, offset)
if err != nil && err != io.EOF {
return count, nil
}
offset += int64(c)
count += int32(bytes.Count(buf[:c], lineSep))
if err == io.EOF {
break
}
}
return count, nil
}
|
[
"\"WORK_DIR\""
] |
[] |
[
"WORK_DIR"
] |
[]
|
["WORK_DIR"]
|
go
| 1 | 0 | |
serverless/apps/qctokyo/notificator.py
|
import json
import logging
import os
import requests
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def notify_horoscope_success_to_slack(event: dict, context) -> dict:
logger.info(f"event={event}")
title = "storing horoscope is success:smile:"
messages = [
f"job_id: {event['job_id']}",
f"backend_name: {event['backend_name']}",
f"creation_date: {event['creation_date']} UTC",
]
_post_slack(title, "good", "\n".join(messages))
return event
def notify_horoscope_failed_to_slack(event: dict, context) -> dict:
logger.info(f"event={event}")
title = "storing horoscope is failure:rage:"
messages = ["Check detail!"]
if "detail" in event and "status" in event["detail"]:
messages.append(f'status: {event["detail"]["status"]}')
_post_slack(title, "danger", "\n".join(messages))
return event
def notify_horoscope_update_to_slack(event: dict, context) -> dict:
logger.info(f"event={event}")
title = "updating horoscope is success:smile:"
filtered_result = {int(k[4:]): v for k, v in event.items() if k.startswith("rank")}
sorted_result = sorted(filtered_result.items(), key=lambda x: x[0])
result = [
str(x[0])
+ ": "
+ x[1]
.replace(" ", "")
.replace("</td><td>", ", ")
.replace("<td>", "")
.replace("</td>", "")
for x in sorted_result
]
messages = [f"received new oracle at {event['creation_date']} UTC"]
messages.extend(result)
messages.append("https://www.quantumcomputer.tokyo/horoscope.html")
_post_slack(title, "good", "\n".join(messages))
return event
def notify_horoscope_update_failed_to_slack(event: dict, context) -> dict:
logger.info(f"event={event}")
title = "updating horoscope is failure:rage:"
messages = ["Check detail!"]
if "detail" in event and "status" in event["detail"]:
messages.append(f'status: {event["detail"]["status"]}')
_post_slack(title, "danger", "\n".join(messages))
return event
def _post_slack(title: str, color: str, detail: str) -> None:
payload = {
"attachments": [
{
"color": color,
"pretext": f"[{os.environ['STAGE']}] {title}",
"text": detail,
}
]
}
try:
slack_webhook_url = "https://" + os.environ["SLACK_WEBHOOK_URL"]
response = requests.post(slack_webhook_url, data=json.dumps(payload))
except requests.exceptions.RequestException:
logger.exception(f"failed to call slack_webhook")
else:
logger.info(f"slack_webhook_response status_code={response.status_code}")
|
[] |
[] |
[
"SLACK_WEBHOOK_URL",
"STAGE"
] |
[]
|
["SLACK_WEBHOOK_URL", "STAGE"]
|
python
| 2 | 0 | |
altio/wsgi.py
|
"""
WSGI config for altio project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "altio.settings")
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
cmd/root.go
|
/*
Copyright © 2019 NAME HERE [email protected]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cmd
import (
"bufio"
"fmt"
"os"
"strings"
"github.com/spf13/cobra"
)
//variable used as flag later on
var name string
var greeting string
var preview bool
var prompt bool
var debug bool = false
// rootCmd represents the base command when called without any subcommands
// pointer to a struct
var rootCmd = &cobra.Command{
Use: "motd",
Short: "A brief description of your application",
Long: `A longer description that spans multiple lines and likely contains
examples and usage of using your application. For example:
Cobra is a CLI library for Go that empowers applications.
This application is a tool to generate the needed files
to quickly create a Cobra application.`,
// Uncomment the following line if your bare application
// has an action associated with it:
Run: func(cmd *cobra.Command, args []string) {
// Show usage if flag combination isn't valid (exit in this scenario)
// If no arguments passed, show usage
if prompt == false && (name == "" || greeting == "") {
cmd.Usage()
os.Exit(1)
}
// Optionally print flags and exit if DEBUG is set
if debug {
fmt.Println("Name:", name)
fmt.Println("Greeting:", greeting)
fmt.Println("Prompt:", prompt)
fmt.Println("Preview:", preview)
os.Exit(0)
}
// Conditionally read from stdin
if prompt { //if prompt is true
name, greeting = renderPrompt()
}
// Generate message
message := buildMessage(name, greeting)
// Either preview message or write to file
if preview {
fmt.Println(message)
} else {
// Open file to write and create if it does not exist
f, err := os.OpenFile("./file.txt", os.O_WRONLY|os.O_CREATE, 0644)
if err != nil {
fmt.Println("Error: Unable to open to ./file.txt")
fmt.Println(err)
os.Exit(1)
}
defer f.Close()
// Empty file.txt
err = os.Truncate("./file.txt", 0)
if err != nil {
fmt.Println("Error: Failed to truncate ./file.txt")
fmt.Println(err)
os.Exit(1)
}
// Write message to file.txt
_, err = f.Write([]byte(message))
if err != nil {
fmt.Println("Error: Failed to write to ./file.txt")
fmt.Println(err)
os.Exit(1)
}
}
},
}
func buildMessage(name, greeting string) string {
return fmt.Sprintf("%s, %s", greeting, name)
}
func renderPrompt() (name, greeting string) {
reader := bufio.NewReader(os.Stdin)
fmt.Print("Your Greeting: ")
greeting, _ = reader.ReadString('\n')
greeting = strings.TrimSpace(greeting)
fmt.Print("Your Name: ")
name, _ = reader.ReadString('\n')
name = strings.TrimSpace(name)
return
}
// Execute adds all child commands to the root command and sets flags appropriately.
// This is called by main.main(). It only needs to happen once to the rootCmd.
func Execute() {
//; multiple actions on same line
if err := rootCmd.Execute(); err != nil {
fmt.Println(err)
os.Exit(1)
}
}
// Executed when file is loaded (run before Execute is called)
func init() {
rootCmd.Flags().StringVarP(&name, "name", "n", "", "name to use within the message")
rootCmd.Flags().StringVarP(&greeting, "greeting", "g", "", "phrase to use within the greeting")
rootCmd.Flags().BoolVarP(&preview, "preview", "v", false, "use preview to output message without writing to ./file.txt")
rootCmd.Flags().BoolVarP(&prompt, "prompt", "p", false, "use prompt to input name and message")
if os.Getenv("DEBUG") != "" {
debug = true
}
}
|
[
"\"DEBUG\""
] |
[] |
[
"DEBUG"
] |
[]
|
["DEBUG"]
|
go
| 1 | 0 | |
go/cmd/vtctl/vtctl.go
|
/*
Copyright 2019 The Vitess Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"flag"
"fmt"
"log/syslog"
"os"
"os/signal"
"strings"
"syscall"
"time"
"vitess.io/vitess/go/cmd"
"context"
"vitess.io/vitess/go/exit"
"vitess.io/vitess/go/trace"
"vitess.io/vitess/go/vt/log"
"vitess.io/vitess/go/vt/logutil"
"vitess.io/vitess/go/vt/servenv"
"vitess.io/vitess/go/vt/topo"
"vitess.io/vitess/go/vt/vtctl"
"vitess.io/vitess/go/vt/vttablet/tmclient"
"vitess.io/vitess/go/vt/workflow"
"vitess.io/vitess/go/vt/wrangler"
)
var (
waitTime = flag.Duration("wait-time", 24*time.Hour, "time to wait on an action")
detachedMode = flag.Bool("detach", false, "detached mode - run vtcl detached from the terminal")
)
func init() {
logger := logutil.NewConsoleLogger()
flag.CommandLine.SetOutput(logutil.NewLoggerWriter(logger))
flag.Usage = func() {
logger.Printf("Usage: %s [global parameters] command [command parameters]\n", os.Args[0])
logger.Printf("\nThe global optional parameters are:\n")
flag.PrintDefaults()
logger.Printf("\nThe commands are listed below, sorted by group. Use '%s <command> -h' for more help.\n\n", os.Args[0])
vtctl.PrintAllCommands(logger)
}
}
// signal handling, centralized here
func installSignalHandlers(cancel func()) {
sigChan := make(chan os.Signal, 1)
signal.Notify(sigChan, syscall.SIGTERM, syscall.SIGINT)
go func() {
<-sigChan
// we got a signal, cancel the current ctx
cancel()
}()
}
func main() {
defer exit.RecoverAll()
defer logutil.Flush()
if *detachedMode {
// this method will call os.Exit and kill this process
cmd.DetachFromTerminalAndExit()
}
args := servenv.ParseFlagsWithArgs("vtctl")
action := args[0]
startMsg := fmt.Sprintf("USER=%v SUDO_USER=%v %v", os.Getenv("USER"), os.Getenv("SUDO_USER"), strings.Join(os.Args, " "))
if syslogger, err := syslog.New(syslog.LOG_INFO, "vtctl "); err == nil {
syslogger.Info(startMsg)
} else {
log.Warningf("cannot connect to syslog: %v", err)
}
closer := trace.StartTracing("vtctl")
defer trace.LogErrorsWhenClosing(closer)
servenv.FireRunHooks()
ts := topo.Open()
defer ts.Close()
vtctl.WorkflowManager = workflow.NewManager(ts)
ctx, cancel := context.WithTimeout(context.Background(), *waitTime)
wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient())
installSignalHandlers(cancel)
err := vtctl.RunCommand(ctx, wr, args)
cancel()
switch err {
case vtctl.ErrUnknownCommand:
flag.Usage()
exit.Return(1)
case nil:
// keep going
default:
log.Errorf("action failed: %v %v", action, err)
exit.Return(255)
}
}
|
[
"\"USER\"",
"\"SUDO_USER\""
] |
[] |
[
"USER",
"SUDO_USER"
] |
[]
|
["USER", "SUDO_USER"]
|
go
| 2 | 0 | |
gallery_prj/asgi.py
|
"""
ASGI config for gallery_prj project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'gallery_prj.settings')
application = get_asgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
scripts/scramble/scripts/psycopg2-solaris.py
|
import os, sys, shutil
from distutils.sysconfig import get_config_var
def prep_postgres( prepped, args ):
pg_version = args['version']
pg_srcdir = os.path.join( os.getcwd(), "postgresql-%s" % pg_version )
# set up environment
os.environ['CC'] = get_config_var('CC')
os.environ['CFLAGS'] = get_config_var('CFLAGS')
os.environ['LDFLAGS'] = get_config_var('LDFLAGS')
cc = get_solaris_compiler()
if cc == 'cc':
os.environ['CFLAGS'] += ' -KPIC'
elif cc == 'gcc':
os.environ['CFLAGS'] += ' -fPIC -DPIC'
# run configure
run( "./configure --prefix=%s/postgres --disable-dependency-tracking --enable-static --disable-shared --without-readline --with-thread-safety" % os.getcwd(),
os.path.join( os.getcwd(), "postgresql-%s" % pg_version ),
"Configuring postgres (./configure)" )
# compile
run( "gmake ../../src/include/utils/fmgroids.h", os.path.join( pg_srcdir, 'src', 'backend' ), "Compiling fmgroids.h (cd src/backend; gmake ../../src/include/utils/fmgroids.h)" )
run( "gmake", os.path.join( pg_srcdir, 'src', 'interfaces', 'libpq' ), "Compiling libpq (cd src/interfaces/libpq; gmake)" )
run( "gmake", os.path.join( pg_srcdir, 'src', 'bin', 'pg_config' ), "Compiling pg_config (cd src/bin/pg_config; gmake)" )
# install
run( "gmake install", os.path.join( pg_srcdir, 'src', 'interfaces', 'libpq' ), "Compiling libpq (cd src/interfaces/libpq; gmake install)" )
run( "gmake install", os.path.join( pg_srcdir, 'src', 'bin', 'pg_config' ), "Compiling pg_config (cd src/bin/pg_config; gmake install)" )
run( "gmake install", os.path.join( pg_srcdir, 'src', 'include' ), "Compiling pg_config (cd src/include; gmake install)" )
# create prepped archive
print "%s(): Creating prepped archive for future builds at:" % sys._getframe().f_code.co_name
print " ", prepped
compress( prepped,
'postgres/bin',
'postgres/include',
'postgres/lib' )
if __name__ == '__main__':
# change back to the build dir
if os.path.dirname( sys.argv[0] ) != "":
os.chdir( os.path.dirname( sys.argv[0] ) )
# find setuptools
sys.path.insert( 1, os.path.abspath( os.path.join( '..', '..', '..', 'lib' ) ) )
from scramble_lib import *
tag = get_tag()
pg_version = ( tag.split( "_" ) )[1]
pg_archive_base = os.path.join( archives, "postgresql-%s" % pg_version )
pg_archive = get_archive( pg_archive_base )
pg_archive_prepped = os.path.join( archives, "postgresql-%s-%s.tar.gz" % ( pg_version, platform_noucs ) )
# clean up any existing stuff (could happen if you run scramble.py by hand)
clean( [ 'postgresql-%s' % pg_version ] )
# unpack postgres
unpack_dep( pg_archive, pg_archive_prepped, prep_postgres, dict( version=pg_version ) )
# localize setup.cfg
if not os.path.exists( 'setup.cfg.orig' ):
shutil.copy( 'setup.cfg', 'setup.cfg.orig' )
f = open( 'setup.cfg', 'a' )
f.write( '\npg_config=postgres/bin/pg_config\n' )
f.close()
# tag
me = sys.argv[0]
sys.argv = [ me ]
if tag is not None:
sys.argv.append( "egg_info" )
sys.argv.append( "--tag-build=%s" %tag )
sys.argv.append( "bdist_egg" )
# go
execfile( "setup.py", globals(), locals() )
|
[] |
[] |
[
"CFLAGS",
"CC",
"LDFLAGS"
] |
[]
|
["CFLAGS", "CC", "LDFLAGS"]
|
python
| 3 | 0 | |
Static/CNN.py
|
import cv2
import os
import numpy as np
import tensorflow as tf
from Static import HEIGHT, WIDTH, PATH_IMG, PATH_IMG_BENIGN
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.optimizer_v2 import rmsprop
from sklearn.model_selection import train_test_split
import seaborn as sns
from sklearn.metrics import confusion_matrix
def load_images_from_folder(folder):
print("> loading images...")
images = []
for filename in os.listdir(folder):
img = cv2.imread(os.path.join(folder, filename))
if img is not None:
images.append(img)
return images
#######################################################
def pre_process(images_from_disk, height, width):
print("> preprocessing...")
res_image = []
dims = (height, width)
for iterator in range(len(images_from_disk)):
res = cv2.resize(images_from_disk[iterator], dims, interpolation=cv2.INTER_LINEAR)
cv2.normalize(res, res, 0, 255, cv2.NORM_MINMAX)
res = tf.convert_to_tensor(res, dtype=tf.float32)
res_image.append(res)
return res_image
###################################################
def build_model():
print(">> Building CNN...")
model = Sequential()
model.add(Conv2D(32, (1, 1), padding='same', input_shape=(HEIGHT, WIDTH, 3)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (1, 1), padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(128, (1, 1), padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(128))
model.add(Dense(1, activation='sigmoid'))
opt = rmsprop.RMSprop(learning_rate=0.1)
model.compile(loss='binary_crossentropy',
optimizer=opt,
metrics=['accuracy', tf.keras.metrics.Precision(), tf.keras.metrics.Recall(), tf.keras.metrics.AUC() ])
return model
def train_model(benign, malicious):
model = build_model()
print(">>>training model..")
batch_size = 50
epochs = 6
labels = [0 for _ in benign] + [1 for _ in malicious]
labels = np.array(labels)
data = benign + malicious
data = np.array(data)
X_train, X_test, y_train, y_test = train_test_split(data,
labels,
test_size=0.25,
random_state=80)
model.load_weights("cnn_new.h5")
#model.fit(X_train, y_train, batch_size=batch_size, epochs=epochs,
# validation_split=0.25,
# shuffle=True)
print(">>> evaluating...")
loss, acc, f1_score, precision, recall = model.evaluate(X_test, y_test)
print(f1_score)
print(precision)
print(recall)
#pred = model.predict(X_test)
#model.save_weights("cnn_new.h5")
print(">>> weight saved ")
#pred = pred.argmax(axis=-1)
#cf_matrix = confusion_matrix(y_test, pred)
#print(cf_matrix)
#sns.heatmap(cf_matrix / np.sum(cf_matrix), annot=True,
# fmt='.2%', cmap='Blues')
#plt.show()
def plotter(history):
print(history.history.keys())
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
if __name__ == '__main__':
benign = load_images_from_folder(PATH_IMG_BENIGN)
mal = load_images_from_folder(PATH_IMG)
mal = pre_process(mal, HEIGHT, WIDTH)
benign = pre_process(benign, HEIGHT, WIDTH)
train_model(benign, mal)
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
services/datastores/datastores_test.go
|
// Copyright 2019 Cuttle.ai. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package datastores_test
import (
"os"
"testing"
"github.com/cuttle-ai/brain/appctx"
"github.com/cuttle-ai/brain/env"
"github.com/cuttle-ai/brain/log"
"github.com/cuttle-ai/go-sdk/services/datastores"
)
func TestListDatastores(t *testing.T) {
env.LoadEnv(log.NewLogger())
appToken := os.Getenv("APP_TOKEN")
discoveryURL := os.Getenv("DISCOVERY_URL")
discoveryToken := os.Getenv("DISCOVERY_TOKEN")
appCtx := appctx.NewAppCtx(appToken, discoveryToken, discoveryURL)
_, err := datastores.ListDatastores(appCtx)
if err != nil {
t.Error("error while getting the list of datastores", err)
}
}
|
[
"\"APP_TOKEN\"",
"\"DISCOVERY_URL\"",
"\"DISCOVERY_TOKEN\""
] |
[] |
[
"DISCOVERY_TOKEN",
"DISCOVERY_URL",
"APP_TOKEN"
] |
[]
|
["DISCOVERY_TOKEN", "DISCOVERY_URL", "APP_TOKEN"]
|
go
| 3 | 0 | |
autobahn/wamp/test/test_user_handler_errors.py
|
###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Crossbar.io Technologies GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
import os
if os.environ.get('USE_TWISTED', False):
from twisted.trial import unittest
from twisted.internet import defer
from twisted.python.failure import Failure
from autobahn.wamp import message, role
from autobahn.wamp.exception import ProtocolError
from autobahn.twisted.wamp import ApplicationSession
class MockTransport:
def __init__(self):
self.messages = []
def send(self, msg):
self.messages.append(msg)
def close(self, *args, **kw):
pass
class MockApplicationSession(ApplicationSession):
'''
This is used by tests, which typically attach their own handler to
on*() methods. This just collects any errors from onUserError
'''
def __init__(self, *args, **kw):
ApplicationSession.__init__(self, *args, **kw)
self.errors = []
self._realm = 'dummy'
self._transport = MockTransport()
def onUserError(self, e, msg):
self.errors.append((e.value, msg))
def exception_raiser(exc):
'''
Create a method that takes any args and always raises the given
Exception instance.
'''
assert isinstance(exc, Exception), "Must derive from Exception"
def method(*args, **kw):
raise exc
return method
def async_exception_raiser(exc):
'''
Create a method that takes any args, and always returns a Deferred
that has failed.
'''
assert isinstance(exc, Exception), "Must derive from Exception"
def method(*args, **kw):
try:
raise exc
except:
return defer.fail(Failure())
return method
def create_mock_welcome():
return message.Welcome(
1234,
{
'broker': role.RoleBrokerFeatures(),
},
)
class TestSessionCallbacks(unittest.TestCase):
'''
These test that callbacks on user-overridden ApplicationSession
methods that produce errors are handled correctly.
XXX should do state-diagram documenting where we are when each
of these cases arises :/
'''
# XXX sure would be nice to use py.test @fixture to do the
# async/sync exception-raising stuff (i.e. make each test run
# twice)...but that would mean switching all test-running over
# to py-test
skip = True
def test_on_join(self):
session = MockApplicationSession()
exception = RuntimeError("blammo")
session.onJoin = exception_raiser(exception)
msg = create_mock_welcome()
# give the sesion a WELCOME, from which it should call onJoin
session.onMessage(msg)
# make sure we got the right error out of onUserError
self.assertEqual(1, len(session.errors))
self.assertEqual(exception, session.errors[0][0])
def test_on_join_deferred(self):
session = MockApplicationSession()
exception = RuntimeError("blammo")
session.onJoin = async_exception_raiser(exception)
msg = create_mock_welcome()
# give the sesion a WELCOME, from which it should call onJoin
session.onMessage(msg)
# make sure we got the right error out of onUserError
# import traceback
# traceback.print_exception(*session.errors[0][:3])
self.assertEqual(1, len(session.errors))
self.assertEqual(exception, session.errors[0][0])
def test_on_leave(self):
session = MockApplicationSession()
exception = RuntimeError("boom")
session.onLeave = exception_raiser(exception)
msg = message.Abort("testing")
# we haven't done anything, so this is "abort before we've
# connected"
session.onMessage(msg)
# make sure we got the right error out of onUserError
self.assertEqual(1, len(session.errors))
self.assertEqual(exception, session.errors[0][0])
def test_on_leave_deferred(self):
session = MockApplicationSession()
exception = RuntimeError("boom")
session.onLeave = async_exception_raiser(exception)
msg = message.Abort("testing")
# we haven't done anything, so this is "abort before we've
# connected"
session.onMessage(msg)
# make sure we got the right error out of onUserError
self.assertEqual(1, len(session.errors))
self.assertEqual(exception, session.errors[0][0])
def test_on_leave_valid_session(self):
'''
cover when onLeave called after we have a valid session
'''
session = MockApplicationSession()
exception = RuntimeError("such challenge")
session.onLeave = exception_raiser(exception)
# we have to get to an established connection first...
session.onMessage(create_mock_welcome())
self.assertTrue(session._session_id is not None)
# okay we have a session ("because ._session_id is not None")
msg = message.Goodbye()
session.onMessage(msg)
self.assertEqual(1, len(session.errors))
self.assertEqual(exception, session.errors[0][0])
def test_on_leave_valid_session_deferred(self):
'''
cover when onLeave called after we have a valid session
'''
session = MockApplicationSession()
exception = RuntimeError("such challenge")
session.onLeave = async_exception_raiser(exception)
# we have to get to an established connection first...
session.onMessage(create_mock_welcome())
self.assertTrue(session._session_id is not None)
# okay we have a session ("because ._session_id is not None")
msg = message.Goodbye()
session.onMessage(msg)
self.assertEqual(1, len(session.errors))
self.assertEqual(exception, session.errors[0][0])
def test_on_leave_after_bad_challenge(self):
'''
onLeave raises error after onChallenge fails
'''
session = MockApplicationSession()
exception = RuntimeError("such challenge")
session.onLeave = exception_raiser(exception)
session.onChallenge = exception_raiser(exception)
# make a challenge (which will fail, and then the
# subsequent onLeave will also fail)
msg = message.Challenge("foo")
session.onMessage(msg)
self.assertEqual(2, len(session.errors))
self.assertEqual(exception, session.errors[0][0])
def test_on_disconnect_via_close(self):
session = MockApplicationSession()
exception = RuntimeError("sideways")
session.onDisconnect = exception_raiser(exception)
# we short-cut the whole state-machine traversal here by
# just calling onClose directly, which would normally be
# called via a Protocol, e.g.,
# autobahn.wamp.websocket.WampWebSocketProtocol
session.onClose(False)
self.assertEqual(1, len(session.errors))
self.assertEqual(exception, session.errors[0][0])
def test_on_disconnect_via_close_deferred(self):
session = MockApplicationSession()
exception = RuntimeError("sideways")
session.onDisconnect = async_exception_raiser(exception)
# we short-cut the whole state-machine traversal here by
# just calling onClose directly, which would normally be
# called via a Protocol, e.g.,
# autobahn.wamp.websocket.WampWebSocketProtocol
session.onClose(False)
self.assertEqual(1, len(session.errors))
self.assertEqual(exception, session.errors[0][0])
# XXX FIXME Probably more ways to call onLeave!
def test_on_challenge(self):
session = MockApplicationSession()
exception = RuntimeError("such challenge")
session.onChallenge = exception_raiser(exception)
msg = message.Challenge("foo")
# execute
session.onMessage(msg)
# we already handle any onChallenge errors as "abort the
# connection". So make sure our error showed up in the
# fake-transport.
self.assertEqual(1, len(session.errors))
self.assertEqual(exception, session.errors[0][0])
self.assertEqual(1, len(session._transport.messages))
reply = session._transport.messages[0]
self.assertIsInstance(reply, message.Abort)
self.assertEqual("such challenge", reply.message)
def test_on_challenge_deferred(self):
session = MockApplicationSession()
exception = RuntimeError("such challenge")
session.onChallenge = async_exception_raiser(exception)
msg = message.Challenge("foo")
# execute
session.onMessage(msg)
# we already handle any onChallenge errors as "abort the
# connection". So make sure our error showed up in the
# fake-transport.
self.assertEqual(1, len(session.errors))
self.assertEqual(session.errors[0][0], exception)
self.assertEqual(1, len(session._transport.messages))
reply = session._transport.messages[0]
self.assertIsInstance(reply, message.Abort)
self.assertEqual("such challenge", reply.message)
def test_no_session(self):
'''
test "all other cases" when we don't yet have a session
established, which should all raise ProtocolErrors and
*not* go through the onUserError handler. We cheat and
just test one.
'''
session = MockApplicationSession()
exception = RuntimeError("such challenge")
session.onConnect = exception_raiser(exception)
for msg in [message.Goodbye()]:
self.assertRaises(ProtocolError, session.onMessage, (msg,))
self.assertEqual(0, len(session.errors))
def test_on_disconnect(self):
session = MockApplicationSession()
exception = RuntimeError("oh sadness")
session.onDisconnect = exception_raiser(exception)
# we short-cut the whole state-machine traversal here by
# just calling onClose directly, which would normally be
# called via a Protocol, e.g.,
# autobahn.wamp.websocket.WampWebSocketProtocol
session.onClose(False)
self.assertEqual(1, len(session.errors))
self.assertEqual(exception, session.errors[0][0])
def test_on_disconnect_deferred(self):
session = MockApplicationSession()
exception = RuntimeError("oh sadness")
session.onDisconnect = async_exception_raiser(exception)
# we short-cut the whole state-machine traversal here by
# just calling onClose directly, which would normally be
# called via a Protocol, e.g.,
# autobahn.wamp.websocket.WampWebSocketProtocol
session.onClose(False)
self.assertEqual(1, len(session.errors))
self.assertEqual(exception, session.errors[0][0])
def test_on_disconnect_with_session(self):
session = MockApplicationSession()
exception = RuntimeError("the pain runs deep")
session.onDisconnect = exception_raiser(exception)
# create a valid session
session.onMessage(create_mock_welcome())
# we short-cut the whole state-machine traversal here by
# just calling onClose directly, which would normally be
# called via a Protocol, e.g.,
# autobahn.wamp.websocket.WampWebSocketProtocol
session.onClose(False)
self.assertEqual(1, len(session.errors))
self.assertEqual(exception, session.errors[0][0])
def test_on_disconnect_with_session_deferred(self):
session = MockApplicationSession()
exception = RuntimeError("the pain runs deep")
session.onDisconnect = async_exception_raiser(exception)
# create a valid session
session.onMessage(create_mock_welcome())
# we short-cut the whole state-machine traversal here by
# just calling onClose directly, which would normally be
# called via a Protocol, e.g.,
# autobahn.wamp.websocket.WampWebSocketProtocol
session.onClose(False)
self.assertEqual(1, len(session.errors))
self.assertEqual(exception, session.errors[0][0])
def test_on_connect(self):
session = MockApplicationSession()
exception = RuntimeError("the pain runs deep")
session.onConnect = exception_raiser(exception)
trans = MockTransport()
# normally would be called from a Protocol?
session.onOpen(trans)
# shouldn't have done the .join()
self.assertEqual(0, len(trans.messages))
self.assertEqual(1, len(session.errors))
self.assertEqual(exception, session.errors[0][0])
def test_on_connect_deferred(self):
session = MockApplicationSession()
exception = RuntimeError("the pain runs deep")
session.onConnect = async_exception_raiser(exception)
trans = MockTransport()
# normally would be called from a Protocol?
session.onOpen(trans)
# shouldn't have done the .join()
self.assertEqual(0, len(trans.messages))
self.assertEqual(1, len(session.errors))
self.assertEqual(exception, session.errors[0][0])
# XXX likely missing other ways to invoke the above. need to
# cover, for sure:
#
# onChallenge
# onJoin
# onLeave
# onDisconnect
#
# what about other ISession ones?
# onConnect
# onDisconnect
# NOTE: for Event stuff, that is publish() handlers,
# test_publish_callback_exception in test_protocol.py already
# covers exceptions coming from user-code.
|
[] |
[] |
[
"USE_TWISTED"
] |
[]
|
["USE_TWISTED"]
|
python
| 1 | 0 | |
cmd/arena/commands/common.go
|
// Copyright 2018 The Kubeflow Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package commands
import (
"os"
"github.com/kubeflow/arena/types"
log "github.com/sirupsen/logrus"
batchv1 "k8s.io/api/batch/v1"
"k8s.io/api/core/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
)
// Global variables
var (
restConfig *rest.Config
clientConfig clientcmd.ClientConfig
clientset *kubernetes.Clientset
// To reduce client-go API call, for 'arena list' scenario
allPods []v1.Pod
allJobs []batchv1.Job
name string
namespace string
arenaNamespace string // the system namespace of arena
)
func initKubeClient() (*kubernetes.Clientset, error) {
if clientset != nil {
return clientset, nil
}
var err error
restConfig, err = clientConfig.ClientConfig()
if err != nil {
log.Fatal(err)
return nil, err
}
// create the clientset
clientset, err = kubernetes.NewForConfig(restConfig)
if err != nil {
log.Fatal(err)
return nil, err
}
return clientset, nil
}
func setupKubeconfig() {
// rules := clientcmd.NewDefaultClientConfigLoadingRules()
if len(loadingRules.ExplicitPath) == 0 {
if len(os.Getenv("KUBECONFIG")) > 0 {
loadingRules.ExplicitPath = os.Getenv("KUBECONFIG")
}
}
if len(loadingRules.ExplicitPath) > 0 {
if _, err := os.Stat(loadingRules.ExplicitPath); err != nil {
log.Warnf("Illegal kubeconfig file: %s", loadingRules.ExplicitPath)
} else {
log.Debugf("Use specified kubeconfig file %s", loadingRules.ExplicitPath)
types.KubeConfig = loadingRules.ExplicitPath
os.Setenv("KUBECONFIG", loadingRules.ExplicitPath)
}
}
}
|
[
"\"KUBECONFIG\"",
"\"KUBECONFIG\""
] |
[] |
[
"KUBECONFIG"
] |
[]
|
["KUBECONFIG"]
|
go
| 1 | 0 | |
utils/build_swift/build_swift/driver_arguments.py
|
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2020 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
from __future__ import absolute_import, unicode_literals
import multiprocessing
import os
import android.adb.commands
from swift_build_support.swift_build_support import targets
from swift_build_support.swift_build_support.targets import \
StdlibDeploymentTarget
from . import argparse
from . import defaults
__all__ = [
'create_argument_parser',
]
class _ApplyDefaultsArgumentParser(argparse.ArgumentParser):
"""Wrapper class around the default ArgumentParser that allows for
post-processing the parsed argument namespace to apply default argument
transformations.
"""
def __init__(self, apply_defaults=None, *args, **kwargs):
self._apply_defaults = apply_defaults
super(_ApplyDefaultsArgumentParser, self).__init__(*args, **kwargs)
def parse_known_args(self, args=None, namespace=None):
args, argv = super(_ApplyDefaultsArgumentParser, self)\
.parse_known_args(args, namespace)
self._apply_defaults(args)
return args, argv
def _apply_default_arguments(args):
"""Preprocess argument namespace to apply default behaviors.
"""
# Build cmark if any cmark-related options were specified.
if (args.cmark_build_variant is not None):
args.build_cmark = True
# Build LLDB if any LLDB-related options were specified.
if args.lldb_build_variant is not None or \
args.lldb_assertions is not None or \
args.lldb_build_with_xcode is not None:
args.build_lldb = True
# Set the default build variant.
if args.build_variant is None:
args.build_variant = 'Debug'
if args.llvm_build_variant is None:
args.llvm_build_variant = args.build_variant
if args.swift_build_variant is None:
args.swift_build_variant = args.build_variant
if args.swift_stdlib_build_variant is None:
args.swift_stdlib_build_variant = args.build_variant
if args.cmark_build_variant is None:
args.cmark_build_variant = args.swift_build_variant
if args.lldb_build_variant is None:
args.lldb_build_variant = args.build_variant
if args.lldb_build_with_xcode is None:
args.lldb_build_with_xcode = '0'
if args.foundation_build_variant is None:
args.foundation_build_variant = args.build_variant
if args.libdispatch_build_variant is None:
args.libdispatch_build_variant = args.build_variant
if args.libicu_build_variant is None:
args.libicu_build_variant = args.build_variant
# Assertions are enabled by default.
if args.assertions is None:
args.assertions = True
# Propagate the default assertions setting.
if args.cmark_assertions is None:
args.cmark_assertions = args.assertions
if args.llvm_assertions is None:
args.llvm_assertions = args.assertions
if args.swift_assertions is None:
args.swift_assertions = args.assertions
if args.swift_stdlib_assertions is None:
args.swift_stdlib_assertions = args.assertions
if args.llbuild_assertions is None:
args.llbuild_assertions = args.assertions
if args.lldb_assertions is None:
args.lldb_assertions = args.assertions
# Set the default CMake generator.
if args.cmake_generator is None:
args.cmake_generator = 'Ninja'
# --ios-all etc are not supported by open-source Swift.
if args.ios_all:
raise ValueError('error: --ios-all is unavailable in open-source '
'Swift.\nUse --ios to skip iOS device tests.')
if args.tvos_all:
raise ValueError('error: --tvos-all is unavailable in open-source '
'Swift.\nUse --tvos to skip tvOS device tests.')
if args.watchos_all:
raise ValueError('error: --watchos-all is unavailable in open-source '
'Swift.\nUse --watchos to skip watchOS device tests.')
# --skip-{ios,tvos,watchos} or --skip-build-{ios,tvos,watchos} are
# merely shorthands for --skip-build-{**os}-{device,simulator}
if not args.ios or not args.build_ios:
args.build_ios_device = False
args.build_ios_simulator = False
if not args.tvos or not args.build_tvos:
args.build_tvos_device = False
args.build_tvos_simulator = False
if not args.watchos or not args.build_watchos:
args.build_watchos_device = False
args.build_watchos_simulator = False
if not args.android or not args.build_android:
args.build_android = False
# --test-paths implies --test and/or --validation-test
# depending on what directories/files have been specified.
if args.test_paths:
for path in args.test_paths:
if path.startswith('test'):
args.test = True
elif path.startswith('validation-test'):
args.test = True
args.validation_test = True
# --validation-test implies --test.
if args.validation_test:
args.test = True
# --test-optimized implies --test.
if args.test_optimized:
args.test = True
# --test-optimize-size implies --test.
if args.test_optimize_for_size:
args.test = True
# --test-optimize-none-with-implicit-dynamic implies --test.
if args.test_optimize_none_with_implicit_dynamic:
args.test = True
# If none of tests specified skip swift stdlib test on all platforms
if not args.test and not args.validation_test and not args.long_test:
args.test_linux = False
args.test_freebsd = False
args.test_cygwin = False
args.test_osx = False
args.test_ios = False
args.test_tvos = False
args.test_watchos = False
args.test_android = False
args.test_swiftpm = False
args.test_swift_driver = False
args.test_swiftsyntax = False
args.test_indexstoredb = False
args.test_sourcekitlsp = False
args.test_skstresstester = False
args.test_swiftformat = False
args.test_swiftevolve = False
args.test_toolchainbenchmarks = False
# --skip-test-ios is merely a shorthand for host and simulator tests.
if not args.test_ios:
args.test_ios_host = False
args.test_ios_simulator = False
# --skip-test-tvos is merely a shorthand for host and simulator tests.
if not args.test_tvos:
args.test_tvos_host = False
args.test_tvos_simulator = False
# --skip-test-watchos is merely a shorthand for host and simulator
# --tests.
if not args.test_watchos:
args.test_watchos_host = False
args.test_watchos_simulator = False
# --skip-build-{ios,tvos,watchos}-{device,simulator} implies
# --skip-test-{ios,tvos,watchos}-{host,simulator}
if not args.build_ios_device:
args.test_ios_host = False
if not args.build_ios_simulator:
args.test_ios_simulator = False
if not args.build_tvos_device:
args.test_tvos_host = False
if not args.build_tvos_simulator:
args.test_tvos_simulator = False
if not args.build_watchos_device:
args.test_watchos_host = False
if not args.build_watchos_simulator:
args.test_watchos_simulator = False
if not args.build_android:
# If building natively on an Android host, allow running the test suite
# without the NDK config.
if not StdlibDeploymentTarget.Android.contains(StdlibDeploymentTarget
.host_target().name):
args.test_android = False
args.test_android_host = False
if not args.test_android:
args.test_android_host = False
if not args.host_test:
args.test_ios_host = False
args.test_tvos_host = False
args.test_watchos_host = False
args.test_android_host = False
def create_argument_parser():
"""Return a configured argument parser."""
# NOTE: USAGE, DESCRIPTION and EPILOG are defined at the bottom of the file
parser = _ApplyDefaultsArgumentParser(
apply_defaults=_apply_default_arguments,
formatter_class=argparse.RawDescriptionHelpFormatter,
usage=USAGE,
description=DESCRIPTION,
epilog=EPILOG)
builder = parser.to_builder()
# Prepare DSL functions
option = builder.add_option
set_defaults = builder.set_defaults
in_group = builder.in_group
mutually_exclusive_group = builder.mutually_exclusive_group
# Prepare DSL actions
append = builder.actions.append
store = builder.actions.store
store_true = builder.actions.store_true
store_false = builder.actions.store_false
store_int = builder.actions.store_int
store_path = builder.actions.store_path
toggle_true = builder.actions.toggle_true
toggle_false = builder.actions.toggle_false
unsupported = builder.actions.unsupported
# -------------------------------------------------------------------------
# Top-level options
option(['-n', '--dry-run'], store_true,
help='print the commands that would be executed, but do not '
'execute them')
option('--dump-config', toggle_true,
help='instead of building, write JSON to stdout containing '
'various values used to build in this configuration')
option('--legacy-impl', store_true('legacy_impl'),
help='use legacy implementation')
option('--build-runtime-with-host-compiler', toggle_true,
help='Use the host compiler, not the self-built one to compile the '
'Swift runtime')
option(['-i', '--ios'], store_true,
help='also build for iOS, but disallow tests that require an iOS '
'device')
option(['-I', '--ios-all'], store_true('ios_all'),
help='also build for iOS, and allow all iOS tests')
option(['--skip-local-build'], toggle_true('skip_local_build'),
help='set to skip building for the local platform')
option('--skip-ios', store_false('ios'),
help='set to skip everything iOS-related')
option('--tvos', toggle_true,
help='also build for tvOS, but disallow tests that require a tvos '
'device')
option('--tvos-all', toggle_true('tvos_all'),
help='also build for tvOS, and allow all tvOS tests')
option('--skip-tvos', store_false('tvos'),
help='set to skip everything tvOS-related')
option('--watchos', toggle_true,
help='also build for watchOS, but disallow tests that require an '
'watchOS device')
option('--watchos-all', toggle_true('watchos_all'),
help='also build for Apple watchOS, and allow all Apple watchOS '
'tests')
option('--skip-watchos', store_false('watchos'),
help='set to skip everything watchOS-related')
option('--maccatalyst', toggle_true,
help='Enable building Swift with macCatalyst support')
option('--maccatalyst-ios-tests', toggle_true,
help='When building for macCatalyst run tests with iOS-like '
'target triple')
option('--android', toggle_true,
help='also build for Android')
option('--swift-analyze-code-coverage', store,
choices=['false', 'not-merged', 'merged'],
# so CMake can see the inert mode as a false value
default=defaults.SWIFT_ANALYZE_CODE_COVERAGE,
help='enable code coverage analysis in Swift (false, not-merged, '
'merged).')
option('--build-subdir', store,
metavar='PATH',
help='name of the directory under $SWIFT_BUILD_ROOT where the '
'build products will be placed')
option('--install-prefix', store_path,
default=targets.install_prefix(),
help='The installation prefix. This is where built Swift products '
'(like bin, lib, and include) will be installed.')
option('--install-symroot', store_path,
help='the path to install debug symbols into')
option('--install-destdir', store_path,
help='the path to use as the filesystem root for the installation')
option('--install-all', toggle_true,
help='Assume all built products should be installed')
option(['-j', '--jobs'], store_int('build_jobs'),
default=multiprocessing.cpu_count(),
help='the number of parallel build jobs to use')
option('--darwin-xcrun-toolchain', store,
help='the name of the toolchain to use on Darwin')
option('--cmake', store_path(executable=True),
help='the path to a CMake executable that will be used to build '
'Swift')
option('--show-sdks', toggle_true,
help='print installed Xcode and SDK versions')
option('--extra-swift-args', append,
help='Pass through extra flags to swift in the form of a CMake '
'list "module_regexp;flag". Can be called multiple times to '
'add multiple such module_regexp flag pairs. All semicolons '
'in flags must be escaped with a "\\"')
option('--host-cc', store_path(executable=True),
help='the absolute path to CC, the "clang" compiler for the host '
'platform. Default is auto detected.')
option('--host-cxx', store_path(executable=True),
help='the absolute path to CXX, the "clang++" compiler for the '
'host platform. Default is auto detected.')
option('--cmake-c-launcher', store_path(executable=True),
default=os.environ.get('C_COMPILER_LAUNCHER', None),
help='the absolute path to set CMAKE_C_COMPILER_LAUNCHER')
option('--cmake-cxx-launcher', store_path(executable=True),
default=os.environ.get('CXX_COMPILER_LAUNCHER', None),
help='the absolute path to set CMAKE_CXX_COMPILER_LAUNCHER')
option('--host-lipo', store_path(executable=True),
help='the absolute path to lipo. Default is auto detected.')
option('--host-libtool', store_path(executable=True),
help='the absolute path to libtool. Default is auto detected.')
option('--distcc', toggle_true,
default=os.environ.get('USE_DISTCC') == '1',
help='use distcc in pump mode')
option('--enable-asan', toggle_true,
help='enable Address Sanitizer')
option('--enable-ubsan', toggle_true,
help='enable Undefined Behavior Sanitizer')
option('--enable-tsan', toggle_true,
help='enable Thread Sanitizer for swift tools')
option('--enable-tsan-runtime', toggle_true,
help='enable Thread Sanitizer on the swift runtime')
option('--enable-lsan', toggle_true,
help='enable Leak Sanitizer for swift tools')
option('--enable-sanitize-coverage', toggle_true,
help='enable sanitizer coverage for swift tools. Necessary for '
'fuzzing swiftc')
option('--compiler-vendor', store,
choices=['none', 'apple'],
default=defaults.COMPILER_VENDOR,
help='Compiler vendor name')
option('--clang-compiler-version', store,
type=argparse.ClangVersionType(),
metavar='MAJOR.MINOR.PATCH',
help='string that indicates a compiler version for Clang')
option('--clang-user-visible-version', store,
type=argparse.ClangVersionType(),
default=defaults.CLANG_USER_VISIBLE_VERSION,
metavar='MAJOR.MINOR.PATCH',
help='User-visible version of the embedded Clang and LLVM '
'compilers')
option('--swift-compiler-version', store,
type=argparse.SwiftVersionType(),
metavar='MAJOR.MINOR',
help='string that indicates a compiler version for Swift')
option('--swift-user-visible-version', store,
type=argparse.SwiftVersionType(),
default=defaults.SWIFT_USER_VISIBLE_VERSION,
metavar='MAJOR.MINOR',
help='User-visible version of the embedded Swift compiler')
option('--darwin-deployment-version-osx', store,
default=defaults.DARWIN_DEPLOYMENT_VERSION_OSX,
metavar='MAJOR.MINOR',
help='minimum deployment target version for OS X')
option('--darwin-deployment-version-ios', store,
default=defaults.DARWIN_DEPLOYMENT_VERSION_IOS,
metavar='MAJOR.MINOR',
help='minimum deployment target version for iOS')
option('--darwin-deployment-version-tvos', store,
default=defaults.DARWIN_DEPLOYMENT_VERSION_TVOS,
metavar='MAJOR.MINOR',
help='minimum deployment target version for tvOS')
option('--darwin-deployment-version-watchos', store,
default=defaults.DARWIN_DEPLOYMENT_VERSION_WATCHOS,
metavar='MAJOR.MINOR',
help='minimum deployment target version for watchOS')
option('--extra-cmake-options', append,
type=argparse.ShellSplitType(),
help='Pass through extra options to CMake in the form of comma '
'separated options "-DCMAKE_VAR1=YES,-DCMAKE_VAR2=/tmp". Can '
'be called multiple times to add multiple such options.')
option('--build-args', store,
type=argparse.ShellSplitType(),
default=[],
help='arguments to the build tool. This would be prepended to the '
'default argument that is "-j8" when CMake generator is '
'"Ninja".')
option('--verbose-build', toggle_true,
help='print the commands executed during the build')
option('--lto', store('lto_type'),
choices=['thin', 'full'],
const='full',
default=None,
metavar='LTO_TYPE',
help='use lto optimization on llvm/swift tools. This does not '
'imply using lto on the swift standard library or runtime. '
'Options: thin, full. If no optional arg is provided, full is '
'chosen by default')
option('--clang-profile-instr-use', store_path,
help='profile file to use for clang PGO')
option('--llvm-max-parallel-lto-link-jobs', store_int,
default=defaults.LLVM_MAX_PARALLEL_LTO_LINK_JOBS,
metavar='COUNT',
help='the maximum number of parallel link jobs to use when '
'compiling llvm')
option('--swift-tools-max-parallel-lto-link-jobs', store_int,
default=defaults.SWIFT_MAX_PARALLEL_LTO_LINK_JOBS,
metavar='COUNT',
help='the maximum number of parallel link jobs to use when '
'compiling swift tools.')
option('--disable-guaranteed-normal-arguments', store_true,
help='Disable guaranteed normal arguments')
option('--enable-stdlibcore-exclusivity-checking', store_true,
help='Enable exclusivity checking in stdlibCore')
option('--force-optimized-typechecker', store_true,
help='Force the type checker to be built with '
'optimization')
option('--lit-args', store,
default='-sv',
metavar='LITARGS',
help='lit args to use when testing')
option('--coverage-db', store_path,
help='coverage database to use when prioritizing testing')
option('--llvm-install-components', store,
default=defaults.llvm_install_components(),
help='A semi-colon split list of llvm components to install')
# -------------------------------------------------------------------------
in_group('Host and cross-compilation targets')
option('--host-target', store,
default=StdlibDeploymentTarget.host_target().name,
help='The host target. LLVM, Clang, and Swift will be built for '
'this target. The built LLVM and Clang will be used to '
'compile Swift for the cross-compilation targets.')
option('--cross-compile-hosts', append,
type=argparse.ShellSplitType(),
default=[],
help='A space separated list of targets to cross-compile host '
'Swift tools for. Can be used multiple times.')
option('--stdlib-deployment-targets', store,
type=argparse.ShellSplitType(),
default=None,
help='The targets to compile or cross-compile the Swift standard '
'library for. %(default)s by default.'
' Comma separated list: {}'.format(
' '.join(StdlibDeploymentTarget.get_target_names())))
option('--build-stdlib-deployment-targets', store,
type=argparse.ShellSplitType(),
default=['all'],
help='A space-separated list that filters which of the configured '
'targets to build the Swift standard library for, or "all".')
option('--swift-darwin-supported-archs', store,
metavar='ARCHS',
help='Semicolon-separated list of architectures to configure on '
'Darwin platforms. If left empty all default architectures '
'are configured.')
option('--swift-darwin-module-archs', store,
metavar='ARCHS',
help='Semicolon-separated list of architectures to configure Swift '
'module-only targets on Darwin platforms. These targets are '
'in addition to the full library targets.')
# -------------------------------------------------------------------------
in_group('Options to select projects')
option('--infer', store_true('infer_dependencies'),
help='Infer any downstream dependencies from enabled projects')
option(['-l', '--lldb'], store_true('build_lldb'),
help='build LLDB')
option(['-b', '--llbuild'], store_true('build_llbuild'),
help='build llbuild')
option(['--libcxx'], store_true('build_libcxx'),
help='build libcxx')
option(['-p', '--swiftpm'], toggle_true('build_swiftpm'),
help='build swiftpm')
option(['--install-swiftpm'], toggle_true('install_swiftpm'),
help='install swiftpm')
option(['--swiftsyntax'], store_true('build_swiftsyntax'),
help='build swiftSyntax')
option(['--skstresstester'], store_true('build_skstresstester'),
help='build the SourceKit stress tester')
option(['--swiftformat'], store_true('build_swiftformat'),
help='build swift-format')
option(['--swiftevolve'], store_true('build_swiftevolve'),
help='build the swift-evolve tool')
option(['--swift-driver'], toggle_true('build_swift_driver'),
help='build swift-driver')
option(['--indexstore-db'], toggle_true('build_indexstoredb'),
help='build IndexStoreDB')
option('--test-indexstore-db-sanitize-all',
toggle_true('test_indexstoredb_sanitize_all'),
help='run indexstore-db tests under all sanitizers')
option(['--sourcekit-lsp'], toggle_true('build_sourcekitlsp'),
help='build SourceKitLSP')
option('--test-sourcekit-lsp-sanitize-all',
toggle_true('test_sourcekitlsp_sanitize_all'),
help='run sourcekit-lsp tests under all sanitizers')
option('--install-swiftsyntax', toggle_true('install_swiftsyntax'),
help='install SwiftSyntax')
option('--swiftsyntax-verify-generated-files',
toggle_true('swiftsyntax_verify_generated_files'),
help='set to verify that the generated files in the source tree '
'match the ones that would be generated from current master')
option(['--install-sourcekit-lsp'], toggle_true('install_sourcekitlsp'),
help='install SourceKitLSP')
option(['--install-skstresstester'], toggle_true('install_skstresstester'),
help='install the SourceKit stress tester')
option(['--install-swift-driver'], toggle_true('install_swift_driver'),
help='install new Swift driver')
option(['--install-swiftevolve'], toggle_true('install_swiftevolve'),
help='install SwiftEvolve')
option(['--toolchain-benchmarks'],
toggle_true('build_toolchainbenchmarks'),
help='build Swift Benchmarks using swiftpm against the just built '
'toolchain')
option(['--swift-inspect'],
toggle_true('build_swift_inspect'),
help='build SwiftInspect using swiftpm against the just built '
'toolchain')
option('--xctest', toggle_true('build_xctest'),
help='build xctest')
option('--foundation', toggle_true('build_foundation'),
help='build foundation')
option('--libdispatch', toggle_true('build_libdispatch'),
help='build libdispatch')
option('--libicu', toggle_true('build_libicu'),
help='build libicu')
option('--playgroundsupport', toggle_true('build_playgroundsupport'),
help='build PlaygroundSupport')
option('--install-playgroundsupport',
store_true('install_playgroundsupport'),
help='install playground support')
option('--tensorflow-swift-apis', store_true('build_tensorflow_swift_apis'),
help='build TensorFlow Swift APIs')
option('--install-tensorflow-swift-apis',
store_true('install_tensorflow_swift_apis'),
help='install TensorFlow Swift APIs')
option('--build-ninja', toggle_true,
help='build the Ninja tool')
option(['--build-libparser-only'], store_true('build_libparser_only'),
help='build only libParser for SwiftSyntax')
option('--skip-build-clang-tools-extra',
toggle_false('build_clang_tools_extra'),
default=True,
help='skip building clang-tools-extra as part of llvm')
# -------------------------------------------------------------------------
in_group('Extra actions to perform before or in addition to building')
option(['-c', '--clean'], store_true,
help='do a clean build')
option('--export-compile-commands', toggle_true,
help='generate compilation databases in addition to building')
option('--symbols-package', store_path,
help='if provided, an archive of the symbols directory will be '
'generated at this path')
# -------------------------------------------------------------------------
in_group('Build variant')
with mutually_exclusive_group():
set_defaults(build_variant='Debug')
option(['-d', '--debug'], store('build_variant'),
const='Debug',
help='build the Debug variant of everything (LLVM, Clang, '
'Swift host tools, target Swift standard libraries, LLDB) '
'(default is %(default)s)')
option(['-r', '--release-debuginfo'], store('build_variant'),
const='RelWithDebInfo',
help='build the RelWithDebInfo variant of everything (default '
'is %(default)s)')
option(['-R', '--release'], store('build_variant'),
const='Release',
help='build the Release variant of everything (default is '
'%(default)s)')
# -------------------------------------------------------------------------
in_group('Override build variant for a specific project')
option('--debug-llvm', store('llvm_build_variant'),
const='Debug',
help='build the Debug variant of LLVM')
option('--debug-swift', store('swift_build_variant'),
const='Debug',
help='build the Debug variant of Swift host tools')
option('--debug-swift-stdlib', store('swift_stdlib_build_variant'),
const='Debug',
help='build the Debug variant of the Swift standard library and '
' SDK overlay')
option('--debug-lldb', store('lldb_build_variant'),
const='Debug',
help='build the Debug variant of LLDB')
option('--lldb-build-with-xcode', store('lldb_build_with_xcode'),
const='1',
help='build LLDB using xcodebuild, if possible')
option('--lldb-build-with-cmake', store('lldb_build_with_xcode'),
const='0',
help='build LLDB using CMake')
option('--debug-cmark', store('cmark_build_variant'),
const='Debug',
help='build the Debug variant of CommonMark')
option('--debug-foundation', store('foundation_build_variant'),
const='Debug',
help='build the Debug variant of Foundation')
option('--debug-libdispatch', store('libdispatch_build_variant'),
const='Debug',
help='build the Debug variant of libdispatch')
option('--debug-libicu', store('libicu_build_variant'),
const='Debug',
help='build the Debug variant of libicu')
# -------------------------------------------------------------------------
# Assertions group
with mutually_exclusive_group():
set_defaults(assertions=True)
# TODO: Convert to store_true
option(['-a', '--assertions'], store,
const=True,
help='enable assertions in all projects')
# TODO: Convert to store_false
option(['-A', '--no-assertions'], store('assertions'),
const=False,
help='disable assertions in all projects')
# -------------------------------------------------------------------------
in_group('Control assertions in a specific project')
option('--cmark-assertions', store,
const=True,
help='enable assertions in CommonMark')
option('--llvm-assertions', store,
const=True,
help='enable assertions in LLVM')
option('--no-llvm-assertions', store('llvm_assertions'),
const=False,
help='disable assertions in LLVM')
option('--swift-assertions', store,
const=True,
help='enable assertions in Swift')
option('--no-swift-assertions', store('swift_assertions'),
const=False,
help='disable assertions in Swift')
option('--swift-stdlib-assertions', store,
const=True,
help='enable assertions in the Swift standard library')
option('--no-swift-stdlib-assertions', store('swift_stdlib_assertions'),
const=False,
help='disable assertions in the Swift standard library')
option('--lldb-assertions', store,
const=True,
help='enable assertions in LLDB')
option('--no-lldb-assertions', store('lldb_assertions'),
const=False,
help='disable assertions in LLDB')
option('--llbuild-assertions', store,
const=True,
help='enable assertions in llbuild')
option('--no-llbuild-assertions', store('llbuild_assertions'),
const=False,
help='disable assertions in llbuild')
# -------------------------------------------------------------------------
in_group('Select the CMake generator')
set_defaults(cmake_generator=defaults.CMAKE_GENERATOR)
option(['-e', '--eclipse'], store('cmake_generator'),
const='Eclipse CDT4 - Ninja',
help="use CMake's Eclipse generator (%(default)s by default)")
option(['-m', '--make'], store('cmake_generator'),
const='Unix Makefiles',
help="use CMake's Makefile generator (%(default)s by default)")
option(['-x', '--xcode'], store('cmake_generator'),
const='Xcode',
help="use CMake's Xcode generator (%(default)s by default)")
# -------------------------------------------------------------------------
in_group('Run tests')
# NOTE: We can't merge -t and --test, because nargs='?' makes
# `-ti` to be treated as `-t=i`.
# FIXME: Convert to store_true action
option('-t', store('test', const=True),
help='test Swift after building')
option('--test', toggle_true,
help='test Swift after building')
option('-T', store('validation_test', const=True),
help='run the validation test suite (implies --test)')
option('--validation-test', toggle_true,
help='run the validation test suite (implies --test)')
# FIXME: Convert to store_true action
option('-o', store('test_optimized', const=True),
help='run the test suite in optimized mode too (implies --test)')
option('--test-optimized', toggle_true,
help='run the test suite in optimized mode too (implies --test)')
# FIXME: Convert to store_true action
option('-s', store('test_optimize_for_size', const=True),
help='run the test suite in optimize for size mode too '
'(implies --test)')
option('--test-optimize-for-size', toggle_true,
help='run the test suite in optimize for size mode too '
'(implies --test)')
# FIXME: Convert to store_true action
option('-y', store('test_optimize_none_with_implicit_dynamic', const=True),
help='run the test suite in optimize none with implicit dynamic'
' mode too (implies --test)')
option('--test-optimize-none-with-implicit-dynamic', toggle_true,
help='run the test suite in optimize none with implicit dynamic'
'mode too (implies --test)')
option('--long-test', toggle_true,
help='run the long test suite')
option('--stress-test', toggle_true,
help='run the stress test suite')
option('--host-test', toggle_true,
help='run executable tests on host devices (such as iOS or tvOS)')
option('--only-executable-test', toggle_true,
help='Only run executable tests. Does nothing if host-test is not '
'allowed')
option('--only-non-executable-test', toggle_true,
help='Only run non-executable tests.')
option('--test-paths', append,
type=argparse.ShellSplitType(),
help='run tests located in specific directories and/or files '
'(implies --test and/or --validation-test)')
option(['-B', '--benchmark'], store_true,
help='run the Swift Benchmark Suite after building')
option('--benchmark-num-o-iterations', store_int,
default=3,
help='if the Swift Benchmark Suite is run after building, run N '
'iterations with -O')
option('--benchmark-num-onone-iterations', store_int,
default=3,
help='if the Swift Benchmark Suite is run after building, run N '
'iterations with -Onone')
# We want to run the TSan (compiler-rt) libdispatch tests on Linux, where
# libdispatch is just another library and not available by default. To do
# so we build Clang/LLVM/libdispatch and use it to compile/run the TSan
# libdispatch tests.
option('--tsan-libdispatch-test', toggle_true,
help='Builds a new toolchain including the libdispatch C library. '
'Then re-builds the TSan runtime (compiler-rt) using this '
'freshly-built Clang and runs the TSan libdispatch tests.')
option('--skip-test-osx', toggle_false('test_osx'),
help='skip testing Swift stdlibs for Mac OS X')
option('--skip-test-linux', toggle_false('test_linux'),
help='skip testing Swift stdlibs for Linux')
option('--skip-test-freebsd', toggle_false('test_freebsd'),
help='skip testing Swift stdlibs for FreeBSD')
option('--skip-test-cygwin', toggle_false('test_cygwin'),
help='skip testing Swift stdlibs for Cygwin')
# -------------------------------------------------------------------------
in_group('Run build')
option('--build-swift-dynamic-stdlib', toggle_true,
default=True,
help='build dynamic variants of the Swift standard library')
option('--build-swift-static-stdlib', toggle_true,
help='build static variants of the Swift standard library')
option('--build-swift-dynamic-sdk-overlay', toggle_true,
default=True,
help='build dynamic variants of the Swift SDK overlay')
option('--build-swift-static-sdk-overlay', toggle_true,
help='build static variants of the Swift SDK overlay')
option('--build-swift-stdlib-unittest-extra', toggle_true,
help='Build optional StdlibUnittest components')
option(['-S', '--skip-build'], store_true,
help='generate build directory only without building')
option('--skip-build-linux', toggle_false('build_linux'),
help='skip building Swift stdlibs for Linux')
option('--skip-build-freebsd', toggle_false('build_freebsd'),
help='skip building Swift stdlibs for FreeBSD')
option('--skip-build-cygwin', toggle_false('build_cygwin'),
help='skip building Swift stdlibs for Cygwin')
option('--skip-build-osx', toggle_false('build_osx'),
help='skip building Swift stdlibs for MacOSX')
option('--skip-build-ios', toggle_false('build_ios'),
help='skip building Swift stdlibs for iOS')
option('--skip-build-ios-device', toggle_false('build_ios_device'),
help='skip building Swift stdlibs for iOS devices '
'(i.e. build simulators only)')
option('--skip-build-ios-simulator', toggle_false('build_ios_simulator'),
help='skip building Swift stdlibs for iOS simulator '
'(i.e. build devices only)')
option('--skip-build-tvos', toggle_false('build_tvos'),
help='skip building Swift stdlibs for tvOS')
option('--skip-build-tvos-device', toggle_false('build_tvos_device'),
help='skip building Swift stdlibs for tvOS devices '
'(i.e. build simulators only)')
option('--skip-build-tvos-simulator', toggle_false('build_tvos_simulator'),
help='skip building Swift stdlibs for tvOS simulator '
'(i.e. build devices only)')
option('--skip-build-watchos', toggle_false('build_watchos'),
help='skip building Swift stdlibs for watchOS')
option('--skip-build-watchos-device', toggle_false('build_watchos_device'),
help='skip building Swift stdlibs for watchOS devices '
'(i.e. build simulators only)')
option('--skip-build-watchos-simulator',
toggle_false('build_watchos_simulator'),
help='skip building Swift stdlibs for watchOS simulator '
'(i.e. build devices only)')
option('--skip-build-android', toggle_false('build_android'),
help='skip building Swift stdlibs for Android')
option('--skip-build-benchmarks', toggle_false('build_benchmarks'),
help='skip building Swift Benchmark Suite')
option('--build-external-benchmarks', toggle_true,
help='skip building Swift Benchmark Suite')
# -------------------------------------------------------------------------
in_group('Skip testing specified targets')
option('--skip-test-ios',
toggle_false('test_ios'),
help='skip testing all iOS targets. Equivalent to specifying both '
'--skip-test-ios-simulator and --skip-test-ios-host')
option('--skip-test-ios-simulator',
toggle_false('test_ios_simulator'),
help='skip testing iOS simulator targets')
option('--skip-test-ios-32bit-simulator',
toggle_false('test_ios_32bit_simulator'),
help='skip testing iOS 32 bit simulator targets')
option('--skip-test-ios-host',
toggle_false('test_ios_host'),
help='skip testing iOS device targets on the host machine (the '
'phone itself)')
option('--skip-test-tvos',
toggle_false('test_tvos'),
help='skip testing all tvOS targets. Equivalent to specifying both '
'--skip-test-tvos-simulator and --skip-test-tvos-host')
option('--skip-test-tvos-simulator',
toggle_false('test_tvos_simulator'),
help='skip testing tvOS simulator targets')
option('--skip-test-tvos-host',
toggle_false('test_tvos_host'),
help='skip testing tvOS device targets on the host machine (the '
'TV itself)')
option('--skip-test-watchos',
toggle_false('test_watchos'),
help='skip testing all tvOS targets. Equivalent to specifying both '
'--skip-test-watchos-simulator and --skip-test-watchos-host')
option('--skip-test-watchos-simulator',
toggle_false('test_watchos_simulator'),
help='skip testing watchOS simulator targets')
option('--skip-test-watchos-host',
toggle_false('test_watchos_host'),
help='skip testing watchOS device targets on the host machine (the '
'watch itself)')
option('--skip-test-android',
toggle_false('test_android'),
help='skip testing all Android targets.')
option('--skip-test-android-host',
toggle_false('test_android_host'),
help='skip testing Android device targets on the host machine (the '
'phone itself)')
option('--skip-test-swiftpm', toggle_false('test_swiftpm'),
help='skip testing swiftpm')
option('--skip-test-swift-driver', toggle_false('test_swift_driver'),
help='skip testing Swift driver')
option('--skip-test-swiftsyntax', toggle_false('test_swiftsyntax'),
help='skip testing SwiftSyntax')
option('--skip-test-indexstore-db', toggle_false('test_indexstoredb'),
help='skip testing indexstore-db')
option('--skip-test-sourcekit-lsp', toggle_false('test_sourcekitlsp'),
help='skip testing sourcekit-lsp')
option('--skip-test-playgroundsupport',
toggle_false('test_playgroundsupport'),
help='skip testing PlaygroundSupport')
option('--skip-test-skstresstester', toggle_false('test_skstresstester'),
help='skip testing the SourceKit Stress tester')
option('--skip-test-swiftformat', toggle_false('test_swiftformat'),
help='skip testing swift-format')
option('--skip-test-swiftevolve', toggle_false('test_swiftevolve'),
help='skip testing SwiftEvolve')
option('--skip-test-toolchain-benchmarks',
toggle_false('test_toolchainbenchmarks'),
help='skip testing toolchain benchmarks')
option('--skip-test-swift-inspect',
toggle_false('test_swift_inspect'),
help='skip testing swift_inspect')
# -------------------------------------------------------------------------
in_group('Build settings specific for LLVM')
option('--llvm-targets-to-build', store,
default='X86;ARM;AArch64;PowerPC;SystemZ;Mips',
help='LLVM target generators to build')
# -------------------------------------------------------------------------
in_group('Build settings for Android')
option('--android-ndk', store_path,
help='An absolute path to the NDK that will be used as a libc '
'implementation for Android builds')
option('--android-api-level', store,
default='21',
help='The Android API level to target when building for Android. '
'Currently only 21 or above is supported')
option('--android-ndk-gcc-version', store,
choices=['4.8', '4.9'],
default='4.9',
help='The GCC version to use when building for Android. Currently '
'only 4.9 is supported. %(default)s is also the default '
'value. This option may be used when experimenting with '
'versions of the Android NDK not officially supported by '
'Swift')
option('--android-icu-uc', store_path,
help='Path to libicuuc.so')
option('--android-icu-uc-include', store_path,
help='Path to a directory containing headers for libicuuc')
option('--android-icu-i18n', store_path,
help='Path to libicui18n.so')
option('--android-icu-i18n-include', store_path,
help='Path to a directory containing headers libicui18n')
option('--android-icu-data', store_path,
help='Path to libicudata.so')
option('--android-deploy-device-path', store_path,
default=android.adb.commands.DEVICE_TEMP_DIR,
help='Path on an Android device to which built Swift stdlib '
'products will be deployed. If running host tests, specify '
'the "{}" directory.'.format(
android.adb.commands.DEVICE_TEMP_DIR))
option('--android-arch', store,
choices=['armv7', 'aarch64'],
default='armv7',
help='The Android target architecture when building for Android. '
'Currently only armv7 and aarch64 are supported. '
'%(default)s is the default.')
# -------------------------------------------------------------------------
in_group('Experimental language features')
option('--enable-experimental-differentiable-programming', toggle_true,
default=True,
help='Enable experimental Swift differentiable programming language'
' features.')
# -------------------------------------------------------------------------
in_group('Unsupported options')
option('--build-jobs', unsupported)
option('--common-cmake-options', unsupported)
option('--only-execute', unsupported)
option('--skip-test-optimize-for-size', unsupported)
option('--skip-test-optimize-none-with-implicit-dynamic', unsupported)
option('--skip-test-optimized', unsupported)
# -------------------------------------------------------------------------
in_group('Build-script-impl arguments (for disambiguation)')
# We need to represent these options so that we can skip installing them if
# the user is running in install-all mode.
option('--skip-build-cmark', toggle_false('build_cmark'),
help='skip building cmark')
option('--skip-build-llvm', toggle_false('build_llvm'),
help='skip building llvm')
option('--skip-build-swift', toggle_false('build_swift'),
help='skip building swift')
# We need to list --skip-test-swift explicitly because otherwise argparse
# will auto-expand arguments like --skip-test-swift to the only known
# argument --skip-test-swiftevolve.
# These arguments are forwarded to impl_args in migration.py
option('--install-swift', toggle_true('impl_install_swift'))
option('--skip-test-swift', toggle_true('impl_skip_test_swift'))
# -------------------------------------------------------------------------
return builder.build()
# ----------------------------------------------------------------------------
USAGE = """
%(prog)s [-h | --help] [OPTION ...]
%(prog)s --preset=NAME [SUBSTITUTION ...]
"""
DESCRIPTION = """
Use this tool to build, test, and prepare binary distribution archives of Swift
and related tools.
Builds Swift (and, optionally, LLDB), incrementally, optionally
testing it thereafter. Different build configurations are maintained in
parallel.
"""
EPILOG = """
Using option presets:
--preset-file=PATH load presets from the specified file
--preset=NAME use the specified option preset
The preset mode is mutually exclusive with other options. It is not
possible to add ad-hoc customizations to a preset. This is a deliberate
design decision. (Rationale: a preset is a certain important set of
options that we want to keep in a centralized location. If you need to
customize it, you should create another preset in a centralized location,
rather than scattering the knowledge about the build across the system.)
Presets support substitutions for controlled customizations. Substitutions
are defined in the preset file. Values for substitutions are supplied
using the name=value syntax on the command line.
Any arguments not listed are forwarded directly to Swift's
'build-script-impl'. See that script's help for details. The listed
build-script-impl arguments are only for disambiguation in the argument parser.
Environment variables
---------------------
This script respects a few environment variables, should you
choose to set them:
SWIFT_SOURCE_ROOT: a directory containing the source for LLVM, Clang, Swift.
If this script is located in a Swift
source directory, the location of SWIFT_SOURCE_ROOT will be
inferred if the variable is not set.
'build-script' expects the sources to be laid out in the following way:
$SWIFT_SOURCE_ROOT/llvm
/clang
/swift
/lldb (optional)
/llbuild (optional)
/swiftpm (optional, requires llbuild)
/swift-syntax (optional, requires swiftpm)
/swift-stress-tester (optional,
requires swift-syntax)
/compiler-rt (optional)
/swift-corelibs-xctest (optional)
/swift-corelibs-foundation (optional)
/swift-corelibs-libdispatch (optional)
/icu (optional)
SWIFT_BUILD_ROOT: a directory in which to create out-of-tree builds.
Defaults to "$SWIFT_SOURCE_ROOT/build/".
Preparing to run this script
----------------------------
See README.md for instructions on cloning Swift subprojects.
If you intend to use the -l, -L, --lldb, or --debug-lldb options.
That's it; you're ready to go!
Examples
--------
Given the above layout of sources, the simplest invocation of 'build-script' is
just:
[~/src/s]$ ./swift/utils/build-script
This builds LLVM, Clang, Swift and Swift standard library in debug mode.
All builds are incremental. To incrementally build changed files, repeat the
same 'build-script' command.
Typical uses of 'build-script'
------------------------------
To build everything with optimization without debug information:
[~/src/s]$ ./swift/utils/build-script -R
To run tests, add '-t':
[~/src/s]$ ./swift/utils/build-script -R -t
To run normal tests and validation tests, add '-T':
[~/src/s]$ ./swift/utils/build-script -R -T
To build LLVM+Clang with optimization without debug information, and a
debuggable Swift compiler:
[~/src/s]$ ./swift/utils/build-script -R --debug-swift
To build a debuggable Swift standard library:
[~/src/s]$ ./swift/utils/build-script -R --debug-swift-stdlib
iOS build targets are always configured and present, but are not built by
default. To build the standard library for OS X, iOS simulator and iOS device:
[~/src/s]$ ./swift/utils/build-script -R -i
To run OS X and iOS tests that don't require a device:
[~/src/s]$ ./swift/utils/build-script -R -i -t
To use 'make' instead of 'ninja', use '-m':
[~/src/s]$ ./swift/utils/build-script -m -R
To create Xcode projects that can build Swift, use '-x':
[~/src/s]$ ./swift/utils/build-script -x -R
Preset mode in build-script
---------------------------
All buildbots and automated environments use 'build-script' in *preset mode*.
In preset mode, the command line only specifies the preset name and allows
limited customization (extra output paths). The actual options come from
the selected preset in 'utils/build-presets.ini'. For example, to build like
the incremental buildbot, run:
[~/src/s]$ ./swift/utils/build-script --preset=buildbot_incremental
To build with AddressSanitizer:
[~/src/s]$ ./swift/utils/build-script --preset=asan
To build a root for Xcode XYZ, '/tmp/xcode-xyz-root.tar.gz':
[~/src/s]$ ./swift/utils/build-script --preset=buildbot_BNI_internal_XYZ \\
install_destdir="/tmp/install"
install_symroot="/tmp/symroot"
installable_package="/tmp/xcode-xyz-root.tar.gz"
If you have your own favorite set of options, you can create your own, local,
preset. For example, let's create a preset called 'ds' (which stands for
Debug Swift):
$ cat > ~/.swift-build-presets
[preset: ds]
release
debug-swift
debug-swift-stdlib
test
build-subdir=ds
To use it, specify the '--preset=' argument:
[~/src/s]$ ./swift/utils/build-script --preset=ds
./swift/utils/build-script: using preset 'ds', which expands to
./swift/utils/build-script --release --debug-swift --debug-swift-stdlib \
--test
--build-subdir=ds --
...
Existing presets can be found in `utils/build-presets.ini`
Philosophy
----------
While you can invoke CMake directly to build Swift, this tool will save you
time by taking away the mechanical parts of the process, providing you controls
for the important options.
For all automated build environments, this tool is regarded as *the* *only* way
to build Swift. This is not a technical limitation of the Swift build system.
It is a policy decision aimed at making the builds uniform across all
environments and easily reproducible by engineers who are not familiar with the
details of the setups of other systems or automated environments.
"""
|
[] |
[] |
[
"C_COMPILER_LAUNCHER",
"CXX_COMPILER_LAUNCHER",
"USE_DISTCC"
] |
[]
|
["C_COMPILER_LAUNCHER", "CXX_COMPILER_LAUNCHER", "USE_DISTCC"]
|
python
| 3 | 0 | |
megfile/s3.py
|
import hashlib
import inspect
import io
import os
import re
from collections import defaultdict
from functools import partial, wraps
from itertools import chain
from logging import getLogger as get_logger
from typing import Any, BinaryIO, Callable, Dict, Iterator, List, Optional, Tuple, Union
from urllib.parse import urlsplit
import boto3
import botocore
import smart_open.s3
from megfile.errors import S3BucketNotFoundError, S3ConfigError, S3FileExistsError, S3FileNotFoundError, S3IsADirectoryError, S3NotADirectoryError, S3PermissionError, S3UnknownError, UnsupportedError, _create_missing_ok_generator
from megfile.errors import _logger as error_logger
from megfile.errors import patch_method, raise_s3_error, s3_should_retry, translate_fs_error, translate_s3_error
from megfile.interfaces import Access, FileCacher, FileEntry, MegfilePathLike, StatResult
from megfile.lib.compat import fspath
from megfile.lib.fnmatch import translate
from megfile.lib.glob import globlize, has_magic, ungloblize
from megfile.lib.joinpath import uri_join
from megfile.lib.s3_buffered_writer import DEFAULT_MAX_BUFFER_SIZE, S3BufferedWriter
from megfile.lib.s3_cached_handler import S3CachedHandler
from megfile.lib.s3_limited_seekable_writer import S3LimitedSeekableWriter
from megfile.lib.s3_pipe_handler import S3PipeHandler
from megfile.lib.s3_prefetch_reader import DEFAULT_BLOCK_SIZE, S3PrefetchReader
from megfile.lib.s3_share_cache_reader import S3ShareCacheReader
from megfile.utils import get_binary_mode, get_content_offset, is_readable, thread_local
# Monkey patch for smart_open
_smart_open_parameters = inspect.signature(smart_open.s3.open).parameters
if 'resource_kwargs' in _smart_open_parameters:
# smart_open >= 1.8.1
def _s3_open(bucket: str, key: str, mode: str):
return smart_open.s3.open(
bucket,
key,
mode,
session=get_s3_session(),
resource_kwargs={'endpoint_url': get_endpoint_url()})
elif 'client' in _smart_open_parameters:
# smart_open >= 5.0.0
def _s3_open(bucket: str, key: str, mode: str):
return smart_open.s3.open(bucket, key, mode, client=get_s3_client())
else:
# smart_open < 1.8.1, >= 1.6.0
def _s3_open(bucket: str, key: str, mode: str):
return smart_open.s3.open(
bucket,
key,
mode,
s3_session=get_s3_session(),
endpoint_url=get_endpoint_url())
__all__ = [
'is_s3',
's3_buffered_open',
's3_cached_open',
's3_copy',
's3_download',
's3_access',
's3_exists',
's3_getmd5',
's3_getmtime',
's3_getsize',
's3_glob',
's3_glob_stat',
's3_hasbucket',
's3_iglob',
's3_isdir',
's3_isfile',
's3_legacy_open',
's3_listdir',
's3_load_content',
's3_load_from',
's3_makedirs',
's3_memory_open',
's3_open',
's3_path_join',
's3_pipe_open',
's3_prefetch_open',
's3_remove',
's3_rename',
's3_move',
's3_sync',
's3_save_as',
's3_scan',
's3_scan_stat',
's3_scandir',
's3_stat',
's3_share_cache_open',
's3_unlink',
's3_upload',
's3_walk',
'S3Cacher',
'get_s3_client',
'parse_s3_url',
'get_endpoint_url',
'S3BufferedWriter',
'get_s3_session',
'S3LimitedSeekableWriter',
'S3PrefetchReader',
'S3ShareCacheReader',
]
_logger = get_logger(__name__)
content_md5_header = 'megfile-content-md5'
endpoint_url = 'https://s3.amazonaws.com'
def get_endpoint_url() -> str:
'''Get the endpoint url of S3
returns: S3 endpoint url
'''
oss_endpoint = os.environ.get('OSS_ENDPOINT')
if oss_endpoint is None:
oss_endpoint = endpoint_url
return oss_endpoint
def get_s3_session():
'''Get S3 session
returns: S3 session
'''
return thread_local('s3_session', boto3.session.Session)
max_pool_connections = 32
max_retries = 10
def _patch_make_request(client: botocore.client.BaseClient):
def retry_callback(error, operation_model, request_dict, request_context):
if error is None: # retry for the first time
error_logger.debug(
'failed to process: %r, with parameters: %s',
operation_model.name, request_dict)
if is_readable(request_dict['body']):
request_dict['body'].seek(0)
def before_callback(operation_model, request_dict, request_context):
_logger.debug(
'send s3 request: %r, with parameters: %s', operation_model.name,
request_dict)
client._make_request = patch_method(
client._make_request,
max_retries=max_retries,
should_retry=s3_should_retry,
before_callback=before_callback,
retry_callback=retry_callback)
return client
def _patch_send_request():
# From: https://github.com/boto/botocore/pull/1328
try:
import botocore.awsrequest
original_send_request = botocore.awsrequest.AWSConnection._send_request
except (AttributeError, ImportError):
return
def _send_request(self, method, url, body, headers, *args, **kwargs):
if headers.get('Content-Length') == '0':
# From RFC: https://tools.ietf.org/html/rfc7231#section-5.1.1
# Requirement for clients:
# - A client MUST NOT generate a 100-continue expectation
# in a request that does not include a message body.
headers.pop('Expect', None)
original_send_request(self, method, url, body, headers, *args, **kwargs)
botocore.awsrequest.AWSConnection._send_request = _send_request
_patch_send_request()
def get_s3_client(
config: Optional[botocore.config.Config] = None,
cache_key: Optional[str] = None):
'''Get S3 client
returns: S3 client
'''
if cache_key is not None:
return thread_local(cache_key, get_s3_client, config)
client = get_s3_session().client(
's3', endpoint_url=get_endpoint_url(), config=config)
client = _patch_make_request(client)
return client
def is_s3(path: MegfilePathLike) -> bool:
'''
According to `aws-cli <https://docs.aws.amazon.com/cli/latest/reference/s3/index.html>`_ , test if a path is s3 path
:param path: Path to be tested
:returns: True if path is s3 path, else False
'''
path = fspath(path)
if not path.startswith('s3://'):
return False
parts = urlsplit(path)
return parts.scheme == 's3'
def parse_s3_url(s3_url: MegfilePathLike) -> Tuple[str, str]:
s3_url = fspath(s3_url)
s3_scheme, rightpart = s3_url[:5], s3_url[5:]
if s3_scheme != 's3://':
raise ValueError('Not a s3 url: %r' % s3_url)
bucketmatch = re.match('(.*?)/', rightpart)
if bucketmatch is None:
bucket = rightpart
path = ''
else:
bucket = bucketmatch.group(1)
path = rightpart[len(bucket) + 1:]
return bucket, path
def _become_prefix(prefix: str) -> str:
if prefix != '' and not prefix.endswith('/'):
prefix += '/'
return prefix
def _make_stat(content: Dict[str, Any]):
return StatResult(
size=content['Size'],
mtime=content['LastModified'].timestamp(),
extra=content,
)
def s3_copy(
src_url: MegfilePathLike,
dst_url: MegfilePathLike,
callback: Optional[Callable[[int], None]] = None) -> None:
''' File copy on S3
Copy content of file on `src_path` to `dst_path`.
It's caller's responsebility to ensure the s3_isfile(src_url) == True
:param src_path: Source file path
:param dst_path: Target file path
:param callback: Called periodically during copy, and the input parameter is the data size (in bytes) of copy since the last call
'''
src_bucket, src_key = parse_s3_url(src_url)
dst_bucket, dst_key = parse_s3_url(dst_url)
if not src_bucket:
raise S3BucketNotFoundError('Empty bucket name: %r' % src_url)
if not src_key or src_key.endswith('/'):
raise S3IsADirectoryError('Is a directory: %r' % src_url)
if not dst_bucket:
raise S3BucketNotFoundError('Empty bucket name: %r' % dst_url)
if not dst_key or dst_key.endswith('/'):
raise S3IsADirectoryError('Is a directory: %r' % dst_url)
client = get_s3_client()
try:
client.copy(
{
'Bucket': src_bucket,
'Key': src_key,
},
Bucket=dst_bucket,
Key=dst_key,
Callback=callback)
except Exception as error:
error = translate_s3_error(error, dst_url)
# Error can't help tell which is problematic
if isinstance(error, S3BucketNotFoundError):
if not s3_hasbucket(src_url):
raise S3BucketNotFoundError('No such bucket: %r' % src_url)
elif isinstance(error, S3FileNotFoundError):
if not s3_isfile(src_url):
if s3_isdir(src_url):
raise S3IsADirectoryError('Is a directory: %r' % src_url)
raise S3FileNotFoundError('No such file: %r' % src_url)
raise error
def s3_isdir(s3_url: MegfilePathLike) -> bool:
'''
Test if an s3 url is directory
Specific procedures are as follows:
If there exists a suffix, of which ``os.path.join(s3_url, suffix)`` is a file
If the url is empty bucket or s3://
:param s3_url: Path to be tested
:returns: True if path is s3 directory, else False
'''
bucket, key = parse_s3_url(s3_url)
if not bucket: # s3:// => True, s3:///key => False
return not key
prefix = _become_prefix(key)
client = get_s3_client()
try:
resp = client.list_objects_v2(
Bucket=bucket, Prefix=prefix, Delimiter='/', MaxKeys=1)
except Exception as error:
error = translate_s3_error(error, s3_url)
if isinstance(error, (S3UnknownError, S3ConfigError)):
raise error
return False
if not key: # bucket is accessible
return True
if 'KeyCount' in resp:
return resp['KeyCount'] > 0
return len(resp.get('Contents', [])) > 0 or \
len(resp.get('CommonPrefixes', [])) > 0
def s3_isfile(s3_url: MegfilePathLike) -> bool:
'''
Test if an s3_url is file
:param s3_url: Path to be tested
:returns: True if path is s3 file, else False
'''
bucket, key = parse_s3_url(s3_url)
if not bucket or not key or key.endswith('/'):
# s3://, s3:///key, s3://bucket, s3://bucket/prefix/
return False
client = get_s3_client()
try:
client.head_object(Bucket=bucket, Key=key)
except Exception as error:
error = translate_s3_error(error, s3_url)
if isinstance(error, (S3UnknownError, S3ConfigError)):
raise error
return False
return True
def s3_access(s3_url: MegfilePathLike, mode: Access = Access.READ) -> bool:
'''
Test if path has access permission described by mode
Using head_bucket(), now READ/WRITE are same.
:param s3_url: Path to be tested
:param mode: access mode
:returns: bool, if the bucket of s3_url has read/write access.
'''
bucket, _ = parse_s3_url(s3_url) # only check bucket accessibility
if not bucket:
raise Exception("No available bucket")
if not isinstance(mode, Access):
raise TypeError(
'Unsupported mode: {} -- Mode should use one of the enums belonging to: {}'
.format(mode, ', '.join([str(a) for a in Access])))
if mode not in (Access.READ, Access.WRITE):
raise TypeError('Unsupported mode: {}'.format(mode))
client = get_s3_client()
try:
client.head_bucket(Bucket=bucket)
except Exception as error:
error = translate_s3_error(error, s3_url)
if isinstance(
error,
(S3PermissionError, S3FileNotFoundError, S3BucketNotFoundError)):
return False
raise error
return True
def s3_hasbucket(s3_url: MegfilePathLike) -> bool:
'''
Test if the bucket of s3_url exists
:param path: Path to be tested
:returns: True if bucket of s3_url eixsts, else False
'''
bucket, key = parse_s3_url(s3_url)
if not bucket:
return False
client = get_s3_client()
try:
client.head_bucket(Bucket=bucket)
except Exception as error:
error = translate_s3_error(error, s3_url)
if isinstance(error, (S3UnknownError, S3ConfigError)):
raise error
if isinstance(error, S3FileNotFoundError):
return False
return True
def s3_exists(s3_url: MegfilePathLike) -> bool:
'''
Test if s3_url exists
If the bucket of s3_url are not permitted to read, return False
:param path: Path to be tested
:returns: True if s3_url eixsts, else False
'''
bucket, key = parse_s3_url(s3_url)
if not bucket: # s3:// => True, s3:///key => False
return not key
return s3_isfile(s3_url) or s3_isdir(s3_url)
max_keys = 1000
def _list_objects_recursive(
s3_client, bucket: str, prefix: str, delimiter: str = ''):
resp = s3_client.list_objects_v2(
Bucket=bucket, Prefix=prefix, Delimiter=delimiter, MaxKeys=max_keys)
while True:
yield resp
if not resp['IsTruncated']:
break
resp = s3_client.list_objects_v2(
Bucket=bucket,
Prefix=prefix,
Delimiter=delimiter,
ContinuationToken=resp['NextContinuationToken'],
MaxKeys=max_keys)
def s3_scandir(s3_url: MegfilePathLike) -> Iterator[FileEntry]:
'''
Get all contents of given s3_url, the order of result is not guaranteed.
:param s3_url: Given s3 path
:returns: All contents have prefix of s3_url
:raises: S3FileNotFoundError, S3NotADirectoryError
'''
bucket, key = parse_s3_url(s3_url)
if not bucket and key:
raise S3BucketNotFoundError('Empty bucket name: %r' % s3_url)
if s3_isfile(s3_url):
raise S3NotADirectoryError('Not a directory: %r' % s3_url)
elif not s3_isdir(s3_url):
raise S3FileNotFoundError('No such directory: %r' % s3_url)
prefix = _become_prefix(key)
client = get_s3_client()
# In order to do check on creation,
# we need to wrap the iterator in another function
def create_generator() -> Iterator[FileEntry]:
with raise_s3_error(s3_url):
if not bucket and not key: # list buckets
response = client.list_buckets()
for content in response['Buckets']:
yield FileEntry(
content['Name'],
StatResult(
ctime=content['CreationDate'].timestamp(),
isdir=True,
extra=content,
))
return
for resp in _list_objects_recursive(client, bucket, prefix, '/'):
for common_prefix in resp.get('CommonPrefixes', []):
yield FileEntry(
common_prefix['Prefix'][len(prefix):-1],
StatResult(isdir=True, extra=common_prefix))
for content in resp.get('Contents', []):
yield FileEntry(
content['Key'][len(prefix):], _make_stat(content))
return create_generator()
def s3_listdir(s3_url: str) -> List[str]:
'''
Get all contents of given s3_url. The result is in acsending alphabetical order.
:param s3_url: Given s3 path
:returns: All contents have prefix of s3_url in acsending alphabetical order
:raises: S3FileNotFoundError, S3NotADirectoryError
'''
entries = list(s3_scandir(s3_url))
return sorted([entry.name for entry in entries])
def _s3_getdirstat(s3_dir_url: str) -> StatResult:
'''
Return StatResult of given s3_url directory, including:
1. Directory size: the sum of all file size in it, including file in subdirectories (if exist).
The result exludes the size of directory itself. In other words, return 0 Byte on an empty directory path
2. Last-modified time of directory:return the latest modified time of all file in it. The mtime of empty directory is 1970-01-01 00:00:00
:param s3_url: Given s3 path
:returns: An int indicates size in Bytes
'''
if not s3_isdir(s3_dir_url):
raise S3FileNotFoundError('No such file or directory: %r' % s3_dir_url)
bucket, key = parse_s3_url(s3_dir_url)
prefix = _become_prefix(key)
client = get_s3_client()
size = 0
mtime = 0.0
with raise_s3_error(s3_dir_url):
for resp in _list_objects_recursive(client, bucket, prefix):
for content in resp.get('Contents', []):
size += content['Size']
last_modified = content['LastModified'].timestamp()
if mtime < last_modified:
mtime = last_modified
return StatResult(size=size, mtime=mtime, isdir=True)
def s3_stat(s3_url: MegfilePathLike) -> StatResult:
'''
Get StatResult of s3_url file, including file size and mtime, referring to s3_getsize and s3_getmtime
If s3_url is not an existent path, which means s3_exist(s3_url) returns False, then raise S3FileNotFoundError
If attempt to get StatResult of complete s3, such as s3_dir_url == 's3://', raise UnsupportedError
:param s3_url: Given s3 path
:returns: StatResult
:raises: S3FileNotFoundError, UnsupportedError
'''
bucket, key = parse_s3_url(s3_url)
if not bucket:
if not key:
raise UnsupportedError('Get stat of whole s3', s3_url)
raise S3BucketNotFoundError('Empty bucket name: %r' % s3_url)
if not s3_isfile(s3_url):
return _s3_getdirstat(s3_url)
if not key or key.endswith('/'):
raise S3FileNotFoundError('No such directory: %r' % s3_url)
client = get_s3_client()
with raise_s3_error(s3_url):
content = client.head_object(Bucket=bucket, Key=key)
stat_record = StatResult(
size=content['ContentLength'],
mtime=content['LastModified'].timestamp(),
extra=content)
return stat_record
def s3_getsize(s3_url: MegfilePathLike) -> int:
'''
Get file size on the given s3_url path (in bytes).
If the path in a directory, return the sum of all file size in it, including file in subdirectories (if exist).
The result exludes the size of directory itself. In other words, return 0 Byte on an empty directory path.
If s3_url is not an existent path, which means s3_exist(s3_url) returns False, then raise S3FileNotFoundError
:param s3_url: Given s3 path
:returns: File size
:raises: S3FileNotFoundError, UnsupportedError
'''
return s3_stat(s3_url).size
def s3_getmtime(s3_url: MegfilePathLike) -> float:
'''
Get last-modified time of the file on the given s3_url path (in Unix timestamp format).
If the path is an existent directory, return the latest modified time of all file in it. The mtime of empty directory is 1970-01-01 00:00:00
If s3_url is not an existent path, which means s3_exist(s3_url) returns False, then raise S3FileNotFoundError
:param s3_url: Given s3 path
:returns: Last-modified time
:raises: S3FileNotFoundError, UnsupportedError
'''
return s3_stat(s3_url).mtime
def s3_upload(
src_url: MegfilePathLike,
dst_url: MegfilePathLike,
callback: Optional[Callable[[int], None]] = None) -> None:
'''
Uploads a file from local filesystem to s3.
:param src_url: source fs path
:param dst_url: target s3 path
:param callback: Called periodically during copy, and the input parameter is the data size (in bytes) of copy since the last call
'''
dst_bucket, dst_key = parse_s3_url(dst_url)
if not dst_bucket:
raise S3BucketNotFoundError('Empty bucket name: %r' % dst_url)
if not dst_key or dst_key.endswith('/'):
raise S3IsADirectoryError('Is a directory: %r' % dst_url)
client = get_s3_client()
with open(src_url, 'rb') as src:
# TODO: when have the 2nd md5 use case, extract this.
hash_md5 = hashlib.md5()
for chunk in iter(lambda: src.read(4096), b''):
hash_md5.update(chunk)
md5 = hash_md5.hexdigest()
src.seek(0)
with raise_s3_error(dst_url):
# TODO: better design for metadata scheme when we have another metadata field.
client.upload_fileobj(
src,
Bucket=dst_bucket,
Key=dst_key,
ExtraArgs={'Metadata': {
content_md5_header: md5,
}},
Callback=callback)
def s3_download(
src_url: MegfilePathLike,
dst_url: MegfilePathLike,
callback: Optional[Callable[[int], None]] = None) -> None:
'''
Downloads a file from s3 to local filesystem.
:param src_url: source s3 path
:param dst_url: target fs path
:param callback: Called periodically during copy, and the input parameter is the data size (in bytes) of copy since the last call
'''
src_bucket, src_key = parse_s3_url(src_url)
if not src_bucket:
raise S3BucketNotFoundError('Empty bucket name: %r' % src_url)
if not src_key or src_key.endswith('/'):
raise S3IsADirectoryError('Is a directory: %r' % src_url)
dst_url = fspath(dst_url)
if not dst_url or dst_url.endswith('/'):
raise S3IsADirectoryError('Is a directory: %r' % dst_url)
dst_directory = os.path.dirname(dst_url)
if dst_directory != '':
os.makedirs(dst_directory, exist_ok=True)
client = get_s3_client()
try:
client.download_file(src_bucket, src_key, dst_url, Callback=callback)
except Exception as error:
error = translate_fs_error(error, dst_url)
error = translate_s3_error(error, src_url)
if isinstance(error, S3FileNotFoundError) and s3_isdir(src_url):
raise S3IsADirectoryError('Is a directory: %r' % src_url)
raise error
def s3_remove(s3_url: MegfilePathLike, missing_ok: bool = False) -> None:
'''
Remove the file or directory on s3, `s3://` and `s3://bucket` are not permitted to remove
:param s3_url: Given path
:param missing_ok: if False and target file/directory not exists, raise S3FileNotFoundError
:raises: S3PermissionError, S3FileNotFoundError, UnsupportedError
'''
bucket, key = parse_s3_url(s3_url)
if not bucket:
if not key:
raise UnsupportedError('Remove whole s3', s3_url)
raise S3BucketNotFoundError('Empty bucket name: %r' % s3_url)
if not key:
raise UnsupportedError('Remove bucket', s3_url)
if not s3_exists(s3_url):
if missing_ok:
return
raise S3FileNotFoundError('No such file or directory: %r' % s3_url)
client = get_s3_client()
with raise_s3_error(s3_url):
if s3_isfile(s3_url):
client.delete_object(Bucket=bucket, Key=key)
return
prefix = _become_prefix(key)
for resp in _list_objects_recursive(client, bucket, prefix):
if 'Contents' in resp:
keys = [{'Key': content['Key']} for content in resp['Contents']]
client.delete_objects(Bucket=bucket, Delete={'Objects': keys})
def s3_unlink(s3_url: MegfilePathLike, missing_ok: bool = False) -> None:
'''
Remove the file on s3
:param s3_url: Given path
:param missing_ok: if False and target file not exists, raise S3FileNotFoundError
:raises: S3PermissionError, S3FileNotFoundError, S3IsADirectoryError
'''
bucket, key = parse_s3_url(s3_url)
if not bucket or not key or key.endswith('/'):
raise S3IsADirectoryError('Is a directory: %r' % s3_url)
if not s3_isfile(s3_url):
if missing_ok:
return
raise S3FileNotFoundError('No such file: %r' % s3_url)
client = get_s3_client()
with raise_s3_error(s3_url):
client.delete_object(Bucket=bucket, Key=key)
def s3_makedirs(s3_url: MegfilePathLike, exist_ok: bool = False):
'''
Create an s3 directory.
Purely creating directory is invalid because it's unavailable on OSS.
This function is to test the target bucket have WRITE access.
:param s3_url: Given path
:param exist_ok: If False and target directory exists, raise S3FileExistsError
:raises: S3BucketNotFoundError, S3FileExistsError
'''
bucket, _ = parse_s3_url(s3_url)
if not bucket:
raise S3BucketNotFoundError('Empty bucket name: %r' % s3_url)
if not s3_hasbucket(s3_url):
raise S3BucketNotFoundError('No such bucket: %r' % s3_url)
if exist_ok:
if s3_isfile(s3_url):
raise S3FileExistsError('File exists: %r' % s3_url)
return
if s3_exists(s3_url):
raise S3FileExistsError('File exists: %r' % s3_url)
def s3_walk(s3_url: MegfilePathLike
) -> Iterator[Tuple[str, List[str], List[str]]]:
'''
Iteratively traverse the given s3 directory, in top-bottom order. In other words, firstly traverse parent directory, if subdirectories exist, traverse the subdirectories in alphabetical order.
Every iteration on generator yields a 3-tuple:(root, dirs, files)
- root: Current s3 path;
- dirs: Name list of subdirectories in current directory. The list is sorted by name in ascending alphabetical order;
- files: Name list of files in current directory. The list is sorted by name in ascending alphabetical order;
If s3_url is a file path, return an empty generator
If s3_url is a non-existent path, return an empty generator
If s3_url is a bucket path, bucket will be the top directory, and will be returned at first iteration of generator
If s3_url is an empty bucket, only yield one 3-tuple (notes: s3 doesn't have empty directory)
If s3_url doesn't contain any bucket, which is s3_url == 's3://', raise UnsupportedError. walk() on complete s3 is not supported in megfile
:param path: An s3 path
:raises: UnsupportedError
:returns: A 3-tuple generator
'''
bucket, key = parse_s3_url(s3_url)
if not bucket:
raise UnsupportedError('Walk whole s3', s3_url)
if not s3_isdir(s3_url):
return
stack = [key]
client = get_s3_client()
while len(stack) > 0:
current = _become_prefix(stack.pop())
dirs, files = [], []
for resp in _list_objects_recursive(client, bucket, current, '/'):
for common_prefix in resp.get('CommonPrefixes', []):
dirs.append(common_prefix['Prefix'][:-1])
for content in resp.get('Contents', []):
files.append(content['Key'])
dirs = sorted(dirs)
stack.extend(reversed(dirs))
root = s3_path_join('s3://', bucket, current)[:-1]
dirs = [path[len(current):] for path in dirs]
files = sorted(path[len(current):] for path in files)
yield root, dirs, files
def s3_scan(s3_url: MegfilePathLike, missing_ok: bool = True) -> Iterator[str]:
'''
Iteratively traverse only files in given s3 directory, in alphabetical order.
Every iteration on generator yields a path string.
If s3_url is a file path, yields the file only
If s3_url is a non-existent path, return an empty generator
If s3_url is a bucket path, return all file paths in the bucket
If s3_url is an empty bucket, return an empty generator
If s3_url doesn't contain any bucket, which is s3_url == 's3://', raise UnsupportedError. walk() on complete s3 is not supported in megfile
:param path: An s3 path
:param missing_ok: If False and there's no file in the directory, raise FileNotFoundError
:raises: UnsupportedError
:returns: A file path generator
'''
scan_stat_iter = s3_scan_stat(s3_url)
def create_generator() -> Iterator[str]:
for path, _ in scan_stat_iter:
yield path
return create_generator()
def s3_scan_stat(s3_url: MegfilePathLike,
missing_ok: bool = True) -> Iterator[FileEntry]:
'''
Iteratively traverse only files in given directory, in alphabetical order.
Every iteration on generator yields a tuple of path string and file stat
:param path: Given s3_url
:param missing_ok: If False and there's no file in the directory, raise FileNotFoundError
:raises: UnsupportedError
:returns: A file path generator
'''
bucket, key = parse_s3_url(s3_url)
if not bucket:
raise UnsupportedError('Scan whole s3', s3_url)
def create_generator() -> Iterator[FileEntry]:
if not s3_isdir(s3_url):
if s3_isfile(s3_url):
# On s3, file and directory may be of same name and level, so need to test the path is file or directory
yield FileEntry(fspath(s3_url), s3_stat(s3_url))
return
if not key.endswith('/') and s3_isfile(s3_url):
yield FileEntry(fspath(s3_url), s3_stat(s3_url))
prefix = _become_prefix(key)
client = get_s3_client()
with raise_s3_error(s3_url):
for resp in _list_objects_recursive(client, bucket, prefix):
for content in resp.get('Contents', []):
full_path = s3_path_join('s3://', bucket, content['Key'])
yield FileEntry(full_path, _make_stat(content))
return _create_missing_ok_generator(
create_generator(), missing_ok,
S3FileNotFoundError('No match file: %r' % s3_url))
def s3_path_join(path: MegfilePathLike, *other_paths: MegfilePathLike) -> str:
'''
Concat 2 or more path to a complete path
:param path: Given path
:param other_paths: Paths to be concatenated
:returns: Concatenated complete path
.. note ::
The difference between this function and ``os.path.join`` is that this function ignores left side slash (which indicates absolute path) in ``other_paths`` and will directly concat.
e.g. os.path.join('/path', 'to', '/file') => '/file', but s3_path_join('/path', 'to', '/file') => '/path/to/file'
'''
return uri_join(fspath(path), *map(fspath, other_paths))
def _s3_split_magic(s3_pathname: str) -> Tuple[str, str]:
if not has_magic(s3_pathname):
return s3_pathname, ''
delimiter = '/'
normal_parts = []
magic_parts = []
all_parts = s3_pathname.split(delimiter)
for i, part in enumerate(all_parts):
if not has_magic(part):
normal_parts.append(part)
else:
magic_parts = all_parts[i:]
break
return delimiter.join(normal_parts), delimiter.join(magic_parts)
def s3_glob(
s3_pathname: MegfilePathLike,
recursive: bool = True,
missing_ok: bool = True) -> List[str]:
'''Return s3 path list in ascending alphabetical order, in which path matches glob pattern
Notes:Only glob in bucket. If trying to match bucket with wildcard characters, raise UnsupportedError
:param s3_pathname: May contain shell wildcard characters
:param recursive: If False,`**` will not search directory recursively
:param missing_ok: If False and target path doesn't match any file, raise FileNotFoundError
:raises: UnsupportedError, when bucket part contains wildcard characters
:returns: A list contains paths match `s3_pathname`
'''
return list(
s3_iglob(s3_pathname, recursive=recursive, missing_ok=missing_ok))
def s3_iglob(
s3_pathname: MegfilePathLike,
recursive: bool = True,
missing_ok: bool = True) -> Iterator[str]:
'''Return s3 path iterator in ascending alphabetical order, in which path matches glob pattern
Notes:Only glob in bucket. If trying to match bucket with wildcard characters, raise UnsupportedError
:param s3_pathname: May contain shell wildcard characters
:param recursive: If False,`**` will not search directory recursively
:param missing_ok: If False and target path doesn't match any file, raise FileNotFoundError
:raises: UnsupportedError, when bucket part contains wildcard characters
:returns: An iterator contains paths match `s3_pathname`
'''
s3_glob_stat_iter = s3_glob_stat(
s3_pathname, recursive=recursive, missing_ok=missing_ok)
def create_generator() -> Iterator[str]:
for path, _ in s3_glob_stat_iter:
yield path
return create_generator()
def s3_glob_stat(
s3_pathname: MegfilePathLike,
recursive: bool = True,
missing_ok: bool = True) -> Iterator[FileEntry]:
'''Return a generator contains tuples of path and file stat, in ascending alphabetical order, in which path matches glob pattern
Notes:Only glob in bucket. If trying to match bucket with wildcard characters, raise UnsupportedError
:param s3_pathname: May contain shell wildcard characters
:param recursive: If False,`**` will not search directory recursively
:param missing_ok: If False and target path doesn't match any file, raise FileNotFoundError
:raises: UnsupportedError, when bucket part contains wildcard characters
:returns: A generator contains tuples of path and file stat, in which paths match `s3_pathname`
'''
s3_pathname = fspath(s3_pathname)
iterables = []
for group_s3_pathname_1 in _group_s3path_by_bucket(s3_pathname):
for group_s3_pathname_2 in _group_s3path_by_ungloblize(
group_s3_pathname_1):
iterables.append(
_s3_glob_stat_single_path(
group_s3_pathname_2, recursive, missing_ok))
generator = chain(*iterables)
return _create_missing_ok_generator(
generator, missing_ok,
S3FileNotFoundError('No match file: %r' % s3_pathname))
def _s3_glob_stat_single_path(
s3_pathname: MegfilePathLike,
recursive: bool = True,
missing_ok: bool = True) -> Iterator[FileEntry]:
if not recursive:
# If not recursive, replace ** with *
s3_pathname = re.sub(r'\*{2,}', '*', s3_pathname)
top_dir, wildcard_part = _s3_split_magic(s3_pathname)
search_dir = wildcard_part.endswith('/')
def need_list_recursive(wildcard_part: str) -> bool:
if '**' in wildcard_part:
return True
for expanded_path in ungloblize(wildcard_part):
parts_length = len(expanded_path.split('/'))
if parts_length + search_dir >= 2:
return True
return False
def create_generator(_s3_pathname) -> Iterator[FileEntry]:
if not s3_exists(top_dir):
return
if not has_magic(_s3_pathname):
if s3_isfile(_s3_pathname):
yield FileEntry(_s3_pathname, s3_stat(_s3_pathname))
if s3_isdir(_s3_pathname):
yield FileEntry(_s3_pathname, StatResult(isdir=True))
return
else:
if need_list_recursive(wildcard_part):
delimiter = ''
else:
delimiter = '/'
dirnames = set()
pattern = re.compile(translate(_s3_pathname))
bucket, key = parse_s3_url(top_dir)
prefix = _become_prefix(key)
client = get_s3_client()
with raise_s3_error(_s3_pathname):
for resp in _list_objects_recursive(client, bucket, prefix,
delimiter):
for content in resp.get('Contents', []):
path = s3_path_join('s3://', bucket, content['Key'])
if not search_dir and pattern.match(path):
yield FileEntry(path, _make_stat(content))
dirname = os.path.dirname(path)
while dirname not in dirnames and dirname != top_dir:
dirnames.add(dirname)
path = dirname + '/' if search_dir else dirname
if pattern.match(path):
yield FileEntry(path, StatResult(isdir=True))
dirname = os.path.dirname(dirname)
for common_prefix in resp.get('CommonPrefixes', []):
path = s3_path_join(
's3://', bucket, common_prefix['Prefix'])
dirname = os.path.dirname(path)
if dirname not in dirnames and dirname != top_dir:
dirnames.add(dirname)
path = dirname + '/' if search_dir else dirname
if pattern.match(path):
yield FileEntry(path, StatResult(isdir=True))
return create_generator(s3_pathname)
def _s3path_change_bucket(path: str, oldname: str, newname: str) -> str:
return path.replace(oldname, newname, 1)
def _list_all_buckets() -> Iterator[str]:
client = get_s3_client()
response = client.list_buckets()
for content in response['Buckets']:
yield content['Name']
def _group_s3path_by_bucket(s3_pathname: str) -> List[str]:
bucket, key = parse_s3_url(s3_pathname)
if not bucket:
if not key:
raise UnsupportedError('Glob whole s3', s3_pathname)
raise S3BucketNotFoundError('Empty bucket name: %r' % s3_pathname)
glob_dict = defaultdict(list)
expanded_s3_pathname = ungloblize(s3_pathname)
for single_glob in expanded_s3_pathname:
bucket, _ = parse_s3_url(single_glob)
glob_dict[bucket].append(single_glob)
group_glob_list = []
all_buckets = None
for bucketname, glob_list in glob_dict.items():
if has_magic(bucketname):
if all_buckets is None:
all_buckets = _list_all_buckets()
pattern = re.compile(translate(re.sub(r'\*{2,}', '*', bucketname)))
for bucket in _list_all_buckets():
if pattern.fullmatch(bucket) is not None:
globlized_path = globlize(
list(
map(
partial(
_s3path_change_bucket,
oldname=bucketname,
newname=bucket), glob_list)))
group_glob_list.append(globlized_path)
else:
group_glob_list.append(globlize(glob_list))
return group_glob_list
def _group_s3path_by_ungloblize(s3_pathname: str) -> List[str]:
glob_list = []
group_glob_list = []
expanded_s3_pathname = ungloblize(s3_pathname)
for pathname in expanded_s3_pathname:
if has_magic(pathname):
glob_list.append(pathname)
continue
group_glob_list.append(pathname)
if glob_list:
group_glob_list.append(globlize(glob_list))
return group_glob_list
def s3_save_as(file_object: BinaryIO, s3_url: MegfilePathLike) -> None:
'''Write the opened binary stream to specified path, but the stream won't be closed
:param file_object: Stream to be read
:param s3_url: Specified target path
'''
bucket, key = parse_s3_url(s3_url)
if not bucket:
raise S3BucketNotFoundError('Empty bucket name: %r' % s3_url)
if not key or key.endswith('/'):
raise S3IsADirectoryError('Is a directory: %r' % s3_url)
client = get_s3_client()
with raise_s3_error(s3_url):
client.upload_fileobj(file_object, Bucket=bucket, Key=key)
def s3_load_from(s3_url: MegfilePathLike) -> BinaryIO:
'''Read all content in binary on specified path and write into memory
User should close the BinaryIO manually
:param s3_url: Specified path
:returns: BinaryIO
'''
bucket, key = parse_s3_url(s3_url)
if not bucket:
raise S3BucketNotFoundError('Empty bucket name: %r' % s3_url)
if not key or key.endswith('/'):
raise S3IsADirectoryError('Is a directory: %r' % s3_url)
buffer = io.BytesIO()
client = get_s3_client()
with raise_s3_error(s3_url):
client.download_fileobj(bucket, key, buffer)
buffer.seek(0)
return buffer
def _s3_binary_mode(s3_open_func):
@wraps(s3_open_func)
def wrapper(s3_url, mode: str = 'rb', **kwargs):
bucket, key = parse_s3_url(s3_url)
if not bucket:
raise S3BucketNotFoundError('Empty bucket name: %r' % s3_url)
if not key or key.endswith('/'):
raise S3IsADirectoryError('Is a directory: %r' % s3_url)
if 'x' in mode:
if s3_isfile(s3_url):
raise S3FileExistsError('File exists: %r' % s3_url)
mode = mode.replace('x', 'w')
if 'w' in mode or 'a' in mode:
if not s3_hasbucket(s3_url):
raise S3BucketNotFoundError('No such bucket: %r' % s3_url)
fileobj = s3_open_func(s3_url, get_binary_mode(mode), **kwargs)
if 'b' not in mode:
fileobj = io.TextIOWrapper(fileobj) # pytype: disable=wrong-arg-types
fileobj.mode = mode
return fileobj
return wrapper
@_s3_binary_mode
def s3_prefetch_open(
s3_url: MegfilePathLike,
mode: str = 'rb',
*,
max_concurrency: Optional[int] = None,
max_block_size: int = DEFAULT_BLOCK_SIZE) -> S3PrefetchReader:
'''Open a asynchronous prefetch reader, to support fast sequential read and random read
.. note ::
User should make sure that reader / writer are closed correctly
Supports context manager
Some parameter setting may perform well:max_concurrency=10 or 20, max_block_size=8 or 16 MB, default value None means using global thread pool
:param max_concurrency: Max download thread number, None by default
:param max_block_size: Max data size downloaded by each thread, in bytes, 8MB by default
:returns: An opened S3PrefetchReader object
:raises: S3FileNotFoundError
'''
if mode != 'rb':
raise ValueError('unacceptable mode: %r' % mode)
bucket, key = parse_s3_url(s3_url)
config = botocore.config.Config(max_pool_connections=max_pool_connections)
client = get_s3_client(config=config, cache_key='s3_filelike_client')
return S3PrefetchReader(
bucket,
key,
s3_client=client,
max_retries=max_retries,
max_workers=max_concurrency,
block_size=max_block_size)
@_s3_binary_mode
def s3_share_cache_open(
s3_url: MegfilePathLike,
mode: str = 'rb',
*,
cache_key: str = 'lru',
max_concurrency: Optional[int] = None,
max_block_size: int = DEFAULT_BLOCK_SIZE) -> S3ShareCacheReader:
'''Open a asynchronous prefetch reader, to support fast sequential read and random read
.. note ::
User should make sure that reader / writer are closed correctly
Supports context manager
Some parameter setting may perform well:max_concurrency=10 or 20, max_block_size=8 or 16 MB, default value None means using global thread pool
:param max_concurrency: Max download thread number, None by default
:param max_block_size: Max data size downloaded by each thread, in bytes, 8MB by default
:returns: An opened S3ShareCacheReader object
:raises: S3FileNotFoundError
'''
if mode != 'rb':
raise ValueError('unacceptable mode: %r' % mode)
bucket, key = parse_s3_url(s3_url)
config = botocore.config.Config(max_pool_connections=max_pool_connections)
client = get_s3_client(config=config, cache_key='s3_filelike_client')
return S3ShareCacheReader(
bucket,
key,
cache_key=cache_key,
s3_client=client,
max_retries=max_retries,
max_workers=max_concurrency,
block_size=max_block_size)
@_s3_binary_mode
def s3_pipe_open(
s3_url: MegfilePathLike, mode: str, *,
join_thread: bool = True) -> S3PipeHandler:
'''Open a asynchronous read-write reader / writer, to support fast sequential read / write
.. note ::
User should make sure that reader / writer are closed correctly
Supports context manager
When join_thread is False, while the file handle are closing, this function will not wait until the asynchronous writing finishes;
False doesn't affect read-handle, but this can speed up write-handle because file will be written asynchronously.
But asynchronous behaviour can guarantee the file are successfully written, and frequent execution may cause thread and file handle exhaustion
:param mode: Mode to open file, either "rb" or "wb"
:param join_thread: If wait after function execution until s3 finishes writing
:returns: An opened BufferedReader / BufferedWriter object
'''
if mode not in ('rb', 'wb'):
raise ValueError('unacceptable mode: %r' % mode)
if mode[0] == 'r' and not s3_isfile(s3_url):
raise S3FileNotFoundError('No such file: %r' % s3_url)
bucket, key = parse_s3_url(s3_url)
config = botocore.config.Config(max_pool_connections=max_pool_connections)
client = get_s3_client(config=config, cache_key='s3_filelike_client')
return S3PipeHandler(
bucket, key, mode, s3_client=client, join_thread=join_thread)
@_s3_binary_mode
def s3_cached_open(
s3_url: MegfilePathLike, mode: str, *,
cache_path: str) -> S3CachedHandler:
'''Open a local-cache file reader / writer, for frequent random read / write
.. note ::
User should make sure that reader / writer are closed correctly
Supports context manager
cache_path can specify the path of cache file. Performance could be better if cache file path is on ssd or tmpfs
:param mode: Mode to open file, could be one of "rb", "wb" or "ab"
:param cache_path: cache file path
:returns: An opened BufferedReader / BufferedWriter object
'''
if mode not in ('rb', 'wb', 'ab', 'rb+', 'wb+', 'ab+'):
raise ValueError('unacceptable mode: %r' % mode)
bucket, key = parse_s3_url(s3_url)
config = botocore.config.Config(max_pool_connections=max_pool_connections)
client = get_s3_client(config=config, cache_key='s3_filelike_client')
return S3CachedHandler(
bucket, key, mode, s3_client=client, cache_path=cache_path)
@_s3_binary_mode
def s3_buffered_open(
s3_url: MegfilePathLike,
mode: str,
*,
max_concurrency: Optional[int] = None,
max_buffer_size: int = DEFAULT_MAX_BUFFER_SIZE,
forward_ratio: Optional[float] = None,
block_size: int = DEFAULT_BLOCK_SIZE,
limited_seekable: bool = False,
buffered: bool = True,
share_cache_key: Optional[str] = None
) -> Union[S3PrefetchReader, S3BufferedWriter, io.BufferedReader, io.
BufferedWriter]:
'''Open an asynchronous prefetch reader, to support fast sequential read
.. note ::
User should make sure that reader / writer are closed correctly
Supports context manager
Some parameter setting may perform well:max_concurrency=10 or 20, max_block_size=8 or 16 MB, default value None means using global thread pool
:param max_concurrency: Max download thread number, None by default
:param max_buffer_size: Max cached buffer size in memory, 128MB by default
:param block_size: Size of single block, 8MB by default. Each block will be uploaded or downloaded by single thread.
:param limited_seekable: If write-handle supports limited seek (both file head part and tail part can seek block_size). Notes:This parameter are valid only for write-handle. Read-handle support arbitrary seek
:returns: An opened S3PrefetchReader object
:raises: S3FileNotFoundError
'''
if mode not in ('rb', 'wb'):
raise ValueError('unacceptable mode: %r' % mode)
bucket, key = parse_s3_url(s3_url)
config = botocore.config.Config(max_pool_connections=max_pool_connections)
client = get_s3_client(config=config, cache_key='s3_filelike_client')
if mode == 'rb':
# A rough conversion algorithm to align 2 types of Reader / Writer paremeters
# TODO: Optimize the conversion algorithm
block_capacity = max_buffer_size // block_size
if forward_ratio is None:
block_forward = None
else:
block_forward = max(int(block_capacity * forward_ratio), 1)
if share_cache_key is not None:
reader = S3ShareCacheReader(
bucket,
key,
cache_key=share_cache_key,
s3_client=client,
max_retries=max_retries,
max_workers=max_concurrency,
block_size=block_size,
block_forward=block_forward)
else:
reader = S3PrefetchReader(
bucket,
key,
s3_client=client,
max_retries=max_retries,
max_workers=max_concurrency,
block_capacity=block_capacity,
block_forward=block_forward,
block_size=block_size)
if buffered:
reader = io.BufferedReader(reader) # pytype: disable=wrong-arg-types
return reader
if limited_seekable:
writer = S3LimitedSeekableWriter(
bucket,
key,
s3_client=client,
max_workers=max_concurrency,
max_buffer_size=max_buffer_size,
block_size=block_size)
else:
writer = S3BufferedWriter(
bucket,
key,
s3_client=client,
max_workers=max_concurrency,
max_buffer_size=max_buffer_size,
block_size=block_size)
if buffered:
writer = io.BufferedWriter(writer) # pytype: disable=wrong-arg-types
return writer
@_s3_binary_mode
def s3_memory_open(s3_url: MegfilePathLike, mode: str) -> BinaryIO:
'''Open a BytesIO to read/write date to specified path
:param s3_url: Specified path
:returns: BinaryIO
'''
if mode not in ('rb', 'wb'):
raise ValueError('unacceptable mode: %r' % mode)
if mode == 'rb':
return s3_load_from(s3_url)
buffer = io.BytesIO()
close_buffer = buffer.close
bucket, key = parse_s3_url(s3_url)
config = botocore.config.Config(max_pool_connections=max_pool_connections)
client = get_s3_client(config=config, cache_key='s3_filelike_client')
def close():
try:
buffer.seek(0)
# File-like objects are closed after uploading
# https://github.com/boto/s3transfer/issues/80
buffer.close = close_buffer
client.upload_fileobj(buffer, bucket, key)
except Exception as error:
raise translate_s3_error(error, s3_url)
finally:
close_buffer()
buffer.close = close
return buffer
@_s3_binary_mode
def s3_legacy_open(s3_url: MegfilePathLike, mode: str):
'''Use smart_open.s3.open open a reader / writer
.. note ::
User should make sure that reader / writer are closed correctly
Supports context manager
:param mode: Mode to open file, either "rb" or "wb"
:returns: File-Like Object
'''
if mode not in ('rb', 'wb'):
raise ValueError('unacceptable mode: %r' % mode)
bucket, key = parse_s3_url(s3_url)
try:
return _s3_open(bucket, key, mode)
except Exception as error:
if isinstance(error, IOError):
error_str = str(error)
if 'NoSuchKey' in error_str:
raise S3FileNotFoundError('No such file: %r' % s3_url)
if 'NoSuchBucket' in error_str:
raise S3BucketNotFoundError('No such bucket: %r' % s3_url)
for code in ('AccessDenied', 'InvalidAccessKeyId',
'SignatureDoesNotMatch'):
if code in error_str:
raise S3PermissionError(
'Permission denied: %r, code: %s' % (s3_url, code))
raise S3UnknownError(error, s3_url)
elif isinstance(error, ValueError):
error_str = str(error)
if 'does not exist' in error_str:
# if bucket is non-existent or has no WRITE access
raise S3BucketNotFoundError('No such bucket: %r' % s3_url)
raise S3UnknownError(error, s3_url)
raise translate_s3_error(error, s3_url)
s3_open = s3_buffered_open
def s3_getmd5(s3_url: MegfilePathLike) -> Optional[str]:
'''
Get md5 meta info in files that uploaded/copied via megfile
If meta info is lost or non-existent, return None
:param s3_url: Specified path
:returns: md5 meta info
'''
bucket, key = parse_s3_url(s3_url)
if not bucket:
raise S3BucketNotFoundError('Empty bucket name: %r' % s3_url)
if not key or key.endswith('/'):
raise S3IsADirectoryError('Is a directory: %r' % s3_url)
client = get_s3_client()
with raise_s3_error(s3_url):
resp = client.head_object(Bucket=bucket, Key=key)
# boto3 does not lower the key of metadata
# https://github.com/boto/botocore/issues/1963
metadata = dict(
(key.lower(), value) for key, value in resp['Metadata'].items())
if content_md5_header in metadata:
return metadata[content_md5_header]
return None
def s3_load_content(
s3_url, start: Optional[int] = None,
stop: Optional[int] = None) -> bytes:
'''
Get specified file from [start, stop) in bytes
:param s3_url: Specified path
:param start: start index
:param stop: stop index
:returns: bytes content in range [start, stop)
'''
def _get_object(client, buckey, key, range_str):
return client.get_object(
Bucket=bucket, Key=key, Range=range_str)['Body'].read()
bucket, key = parse_s3_url(s3_url)
if not bucket:
raise S3BucketNotFoundError('Empty bucket name: %r' % s3_url)
if not key or key.endswith('/'):
raise S3IsADirectoryError('Is a directory: %r' % s3_url)
start, stop = get_content_offset(start, stop, s3_getsize(s3_url))
range_str = 'bytes=%d-%d' % (start, stop - 1)
client = get_s3_client()
with raise_s3_error(s3_url):
return patch_method(
_get_object,
max_retries=max_retries,
should_retry=s3_should_retry,
)(client, bucket, key, range_str)
def s3_rename(src_url: MegfilePathLike, dst_url: MegfilePathLike) -> None:
'''
Move s3 file path from src_url to dst_url
:param src_url: Given source path
:param dst_url: Given destination path
'''
s3_copy(src_url, dst_url)
s3_remove(src_url)
def _s3_scan_pairs(src_url: MegfilePathLike, dst_url: MegfilePathLike
) -> Iterator[Tuple[MegfilePathLike, MegfilePathLike]]:
for src_file_path in s3_scan(src_url):
content_path = src_file_path[len(src_url):]
if len(content_path) > 0:
dst_file_path = s3_path_join(dst_url, content_path)
else:
dst_file_path = dst_url
yield src_file_path, dst_file_path
def s3_move(src_url: MegfilePathLike, dst_url: MegfilePathLike) -> None:
'''
Move file/directory path from src_url to dst_url
:param src_url: Given source path
:param dst_url: Given destination path
'''
for src_file_path, dst_file_path in _s3_scan_pairs(src_url, dst_url):
s3_rename(src_file_path, dst_file_path)
def s3_sync(src_url: MegfilePathLike, dst_url: MegfilePathLike) -> None:
'''
Copy file/directory on src_url to dst_url
:param src_url: Given source path
:param dst_url: Given destination path
'''
for src_file_path, dst_file_path in _s3_scan_pairs(src_url, dst_url):
s3_copy(src_file_path, dst_file_path)
class S3Cacher(FileCacher):
cache_path = None
def __init__(self, path: str, cache_path: str, mode: str = 'r'):
if mode not in ('r', 'w', 'a'):
raise ValueError('unacceptable mode: %r' % mode)
if mode in ('r', 'a'):
s3_download(path, cache_path)
self.name = path
self.mode = mode
self.cache_path = cache_path
def _close(self):
if self.cache_path is not None and \
os.path.exists(self.cache_path):
if self.mode in ('w', 'a'):
s3_upload(self.cache_path, self.name)
os.unlink(self.cache_path)
|
[] |
[] |
[
"OSS_ENDPOINT"
] |
[]
|
["OSS_ENDPOINT"]
|
python
| 1 | 0 | |
peer/channel/create_test.go
|
/*
Copyright Digital Asset Holdings, LLC 2017 All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package channel
import (
"fmt"
"os"
"sync"
"testing"
mspmgmt "github.com/hyperledger/fabric/msp/mgmt"
"github.com/hyperledger/fabric/peer/common"
cb "github.com/hyperledger/fabric/protos/common"
)
var once sync.Once
/// mock deliver client for UT
type mockDeliverClient struct {
err error
}
func (m *mockDeliverClient) readBlock() (*cb.Block, error) {
if m.err != nil {
return nil, m.err
}
return &cb.Block{}, nil
}
func (m *mockDeliverClient) getBlock() (*cb.Block, error) {
b, err := m.readBlock()
if err != nil {
return nil, err
}
return b, nil
}
// InitMSP init MSP
func InitMSP() {
once.Do(initMSP)
}
func initMSP() {
// TODO: determine the location of this config file
var alternativeCfgPath = os.Getenv("PEER_CFG_PATH")
var mspMgrConfigDir string
if alternativeCfgPath != "" {
mspMgrConfigDir = alternativeCfgPath + "/msp/sampleconfig/"
} else if _, err := os.Stat("./msp/sampleconfig/"); err == nil {
mspMgrConfigDir = "./msp/sampleconfig/"
} else {
mspMgrConfigDir = os.Getenv("GOPATH") + "/src/github.com/hyperledger/fabric/msp/sampleconfig/"
}
err := mspmgmt.LoadFakeSetupWithLocalMspAndTestChainMsp(mspMgrConfigDir)
if err != nil {
panic(fmt.Errorf("Fatal error when reading MSP config file %s: err %s\n", mspMgrConfigDir, err))
}
}
func TestCreateChain(t *testing.T) {
InitMSP()
mockchain := "mockchain"
defer os.Remove(mockchain + ".block")
signer, err := common.GetDefaultSigner()
if err != nil {
t.Fatalf("Get default signer error: %v", err)
}
mockBroadcastClient := common.GetMockBroadcastClient(nil)
mockCF := &ChannelCmdFactory{
BroadcastClient: mockBroadcastClient,
Signer: signer,
DeliverClient: &mockDeliverClient{},
AnchorPeerParser: common.GetAnchorPeersParser("../common/testdata/anchorPeersOrg1.txt"),
}
cmd := createCmd(mockCF)
AddFlags(cmd)
args := []string{"-c", mockchain, "-a", "../common/testdata/anchorPeersOrg1.txt"}
cmd.SetArgs(args)
if err := cmd.Execute(); err != nil {
t.Fail()
t.Errorf("expected join command to succeed")
}
}
func TestCreateChainWithDefaultAnchorPeers(t *testing.T) {
InitMSP()
mockchain := "mockchain"
defer os.Remove(mockchain + ".block")
signer, err := common.GetDefaultSigner()
if err != nil {
t.Fatalf("Get default signer error: %v", err)
}
mockBroadcastClient := common.GetMockBroadcastClient(nil)
mockCF := &ChannelCmdFactory{
BroadcastClient: mockBroadcastClient,
Signer: signer,
DeliverClient: &mockDeliverClient{},
}
cmd := createCmd(mockCF)
AddFlags(cmd)
args := []string{"-c", mockchain}
cmd.SetArgs(args)
if err := cmd.Execute(); err != nil {
t.Fail()
t.Errorf("expected join command to succeed")
}
}
func TestCreateChainInvalidAnchorPeers(t *testing.T) {
InitMSP()
mockchain := "mockchain"
defer os.Remove(mockchain + ".block")
signer, err := common.GetDefaultSigner()
if err != nil {
t.Fatalf("Get default signer error: %v", err)
}
mockBroadcastClient := common.GetMockBroadcastClient(nil)
mockCF := &ChannelCmdFactory{
BroadcastClient: mockBroadcastClient,
Signer: signer,
DeliverClient: &mockDeliverClient{},
AnchorPeerParser: common.GetAnchorPeersParser("../common/testdata/anchorPeersBadPEM.txt"),
}
cmd := createCmd(mockCF)
AddFlags(cmd)
args := []string{"-c", mockchain, "-a", "../common/testdata/anchorPeersBadPEM.txt"}
cmd.SetArgs(args)
if err := cmd.Execute(); err == nil {
t.Errorf("expected create chain to fail because of invalid anchor peer file")
}
}
func TestCreateChainBCFail(t *testing.T) {
InitMSP()
mockchain := "mockchain"
defer os.Remove(mockchain + ".block")
signer, err := common.GetDefaultSigner()
if err != nil {
t.Fatalf("Get default signer error: %v", err)
}
sendErr := fmt.Errorf("send create tx failed")
mockBroadcastClient := common.GetMockBroadcastClient(sendErr)
mockCF := &ChannelCmdFactory{
BroadcastClient: mockBroadcastClient,
Signer: signer,
DeliverClient: &mockDeliverClient{},
AnchorPeerParser: common.GetAnchorPeersParser("../common/testdata/anchorPeersOrg1.txt"),
}
cmd := createCmd(mockCF)
AddFlags(cmd)
args := []string{"-c", mockchain, "-a", "../common/testdata/anchorPeersOrg1.txt"}
cmd.SetArgs(args)
expectedErrMsg := sendErr.Error()
if err := cmd.Execute(); err == nil {
t.Errorf("expected create chain to fail with broadcast error")
} else {
if err.Error() != expectedErrMsg {
t.Errorf("Run create chain get unexpected error: %s(expected %s)", err.Error(), expectedErrMsg)
}
}
}
func TestCreateChainDeliverFail(t *testing.T) {
InitMSP()
mockchain := "mockchain"
defer os.Remove(mockchain + ".block")
signer, err := common.GetDefaultSigner()
if err != nil {
t.Fatalf("Get default signer error: %v", err)
}
mockBroadcastClient := common.GetMockBroadcastClient(nil)
recvErr := fmt.Errorf("deliver create tx failed")
mockCF := &ChannelCmdFactory{
BroadcastClient: mockBroadcastClient,
Signer: signer,
DeliverClient: &mockDeliverClient{recvErr},
AnchorPeerParser: common.GetAnchorPeersParser("../common/testdata/anchorPeersOrg1.txt"),
}
cmd := createCmd(mockCF)
AddFlags(cmd)
args := []string{"-c", mockchain, "-a", "../common/testdata/anchorPeersOrg1.txt"}
cmd.SetArgs(args)
expectedErrMsg := recvErr.Error()
if err := cmd.Execute(); err == nil {
t.Errorf("expected create chain to fail with deliver error")
} else {
if err.Error() != expectedErrMsg {
t.Errorf("Run create chain get unexpected error: %s(expected %s)", err.Error(), expectedErrMsg)
}
}
}
|
[
"\"PEER_CFG_PATH\"",
"\"GOPATH\""
] |
[] |
[
"GOPATH",
"PEER_CFG_PATH"
] |
[]
|
["GOPATH", "PEER_CFG_PATH"]
|
go
| 2 | 0 | |
manage.py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "zabbix_dashboard.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
vendor/k8s.io/kubernetes/cmd/kube-apiserver/app/server.go
|
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package app does all of the work necessary to create a Kubernetes
// APIServer by binding together the API, master and APIServer infrastructure.
// It can be configured and called directly or via the hyperkube framework.
package app
import (
"crypto/tls"
"fmt"
"io/ioutil"
"net"
"net/http"
"net/url"
"os"
"reflect"
"strconv"
"strings"
"time"
"github.com/go-openapi/spec"
"github.com/golang/glog"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/openapi"
"k8s.io/apimachinery/pkg/runtime/schema"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
utilnet "k8s.io/apimachinery/pkg/util/net"
"k8s.io/apimachinery/pkg/util/sets"
utilwait "k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apiserver/pkg/admission"
"k8s.io/apiserver/pkg/authentication/authenticator"
"k8s.io/apiserver/pkg/authorization/authorizer"
genericapiserver "k8s.io/apiserver/pkg/server"
"k8s.io/apiserver/pkg/server/filters"
serverstorage "k8s.io/apiserver/pkg/server/storage"
"k8s.io/kubernetes/cmd/kube-apiserver/app/options"
"k8s.io/kubernetes/cmd/kube-apiserver/app/preflight"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/apis/apps"
"k8s.io/kubernetes/pkg/apis/autoscaling"
"k8s.io/kubernetes/pkg/apis/batch"
"k8s.io/kubernetes/pkg/apis/extensions"
"k8s.io/kubernetes/pkg/capabilities"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
informers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion"
"k8s.io/kubernetes/pkg/cloudprovider"
serviceaccountcontroller "k8s.io/kubernetes/pkg/controller/serviceaccount"
generatedopenapi "k8s.io/kubernetes/pkg/generated/openapi"
"k8s.io/kubernetes/pkg/kubeapiserver"
kubeadmission "k8s.io/kubernetes/pkg/kubeapiserver/admission"
kubeauthenticator "k8s.io/kubernetes/pkg/kubeapiserver/authenticator"
"k8s.io/kubernetes/pkg/kubeapiserver/authorizer/modes"
"k8s.io/kubernetes/pkg/master"
"k8s.io/kubernetes/pkg/master/tunneler"
quotainstall "k8s.io/kubernetes/pkg/quota/install"
"k8s.io/kubernetes/pkg/registry/cachesize"
rbacrest "k8s.io/kubernetes/pkg/registry/rbac/rest"
"k8s.io/kubernetes/pkg/version"
"k8s.io/kubernetes/plugin/pkg/auth/authenticator/token/bootstrap"
)
const etcdRetryLimit = 60
const etcdRetryInterval = 1 * time.Second
// NewAPIServerCommand creates a *cobra.Command object with default parameters
func NewAPIServerCommand() *cobra.Command {
s := options.NewServerRunOptions()
s.AddFlags(pflag.CommandLine)
cmd := &cobra.Command{
Use: "kube-apiserver",
Long: `The Kubernetes API server validates and configures data
for the api objects which include pods, services, replicationcontrollers, and
others. The API Server services REST operations and provides the frontend to the
cluster's shared state through which all other components interact.`,
Run: func(cmd *cobra.Command, args []string) {
},
}
return cmd
}
// Run runs the specified APIServer. This should never exit.
func Run(runOptions *options.ServerRunOptions, stopCh <-chan struct{}) error {
kubeAPIServerConfig, sharedInformers, err := CreateKubeAPIServerConfig(runOptions)
if err != nil {
return err
}
kubeAPIServer, err := CreateKubeAPIServer(kubeAPIServerConfig, sharedInformers, stopCh)
if err != nil {
return err
}
// we've picked the aggregator a release early. We want to make sure that the kube-apiserver doesn't
// have it enabled when run as a separat binary.
if true {
return kubeAPIServer.GenericAPIServer.PrepareRun().Run(stopCh)
}
// if we're starting up a hacked up version of this API server for a weird test case,
// just start the API server as is because clients don't get built correctly when you do this
if len(os.Getenv("KUBE_API_VERSIONS")) > 0 {
return kubeAPIServer.GenericAPIServer.PrepareRun().Run(stopCh)
}
// otherwise go down the normal path of standing the aggregator up in front of the API server
// this wires up openapi
kubeAPIServer.GenericAPIServer.PrepareRun()
aggregatorConfig, err := createAggregatorConfig(*kubeAPIServerConfig.GenericConfig, runOptions)
if err != nil {
return err
}
aggregatorServer, err := createAggregatorServer(aggregatorConfig, kubeAPIServer.GenericAPIServer, sharedInformers, stopCh)
if err != nil {
// we don't need special handling for innerStopCh because the aggregator server doesn't create any go routines
return err
}
return aggregatorServer.GenericAPIServer.PrepareRun().Run(stopCh)
}
// CreateKubeAPIServer creates and wires a workable kube-apiserver
func CreateKubeAPIServer(kubeAPIServerConfig *master.Config, sharedInformers informers.SharedInformerFactory, stopCh <-chan struct{}) (*master.Master, error) {
kubeAPIServer, err := kubeAPIServerConfig.Complete().New()
if err != nil {
return nil, err
}
kubeAPIServer.GenericAPIServer.AddPostStartHook("start-kube-apiserver-informers", func(context genericapiserver.PostStartHookContext) error {
sharedInformers.Start(stopCh)
return nil
})
return kubeAPIServer, nil
}
// CreateKubeAPIServerConfig creates all the resources for running the API server, but runs none of them
func CreateKubeAPIServerConfig(s *options.ServerRunOptions) (*master.Config, informers.SharedInformerFactory, error) {
// set defaults in the options before trying to create the generic config
if err := defaultOptions(s); err != nil {
return nil, nil, err
}
// validate options
if errs := s.Validate(); len(errs) != 0 {
return nil, nil, utilerrors.NewAggregate(errs)
}
genericConfig, sharedInformers, err := BuildGenericConfig(s)
if err != nil {
return nil, nil, err
}
if err := utilwait.PollImmediate(etcdRetryInterval, etcdRetryLimit*etcdRetryInterval, preflight.EtcdConnection{ServerList: s.Etcd.StorageConfig.ServerList}.CheckEtcdServers); err != nil {
return nil, nil, fmt.Errorf("error waiting for etcd connection: %v", err)
}
capabilities.Initialize(capabilities.Capabilities{
AllowPrivileged: s.AllowPrivileged,
// TODO(vmarmol): Implement support for HostNetworkSources.
PrivilegedSources: capabilities.PrivilegedSources{
HostNetworkSources: []string{},
HostPIDSources: []string{},
HostIPCSources: []string{},
},
PerConnectionBandwidthLimitBytesPerSec: s.MaxConnectionBytesPerSec,
})
// Setup nodeTunneler if needed
var nodeTunneler tunneler.Tunneler
var proxyDialerFn utilnet.DialFunc
if len(s.SSHUser) > 0 {
// Get ssh key distribution func, if supported
var installSSHKey tunneler.InstallSSHKey
cloud, err := cloudprovider.InitCloudProvider(s.CloudProvider.CloudProvider, s.CloudProvider.CloudConfigFile)
if err != nil {
return nil, nil, fmt.Errorf("cloud provider could not be initialized: %v", err)
}
if cloud != nil {
if instances, supported := cloud.Instances(); supported {
installSSHKey = instances.AddSSHKeyToAllInstances
}
}
if s.KubeletConfig.Port == 0 {
return nil, nil, fmt.Errorf("must enable kubelet port if proxy ssh-tunneling is specified")
}
if s.KubeletConfig.ReadOnlyPort == 0 {
return nil, nil, fmt.Errorf("must enable kubelet readonly port if proxy ssh-tunneling is specified")
}
// Set up the nodeTunneler
// TODO(cjcullen): If we want this to handle per-kubelet ports or other
// kubelet listen-addresses, we need to plumb through options.
healthCheckPath := &url.URL{
Scheme: "http",
Host: net.JoinHostPort("127.0.0.1", strconv.FormatUint(uint64(s.KubeletConfig.ReadOnlyPort), 10)),
Path: "healthz",
}
nodeTunneler = tunneler.New(s.SSHUser, s.SSHKeyfile, healthCheckPath, installSSHKey)
// Use the nodeTunneler's dialer to connect to the kubelet
s.KubeletConfig.Dial = nodeTunneler.Dial
// Use the nodeTunneler's dialer when proxying to pods, services, and nodes
proxyDialerFn = nodeTunneler.Dial
}
// Proxying to pods and services is IP-based... don't expect to be able to verify the hostname
proxyTLSClientConfig := &tls.Config{InsecureSkipVerify: true}
proxyTransport := utilnet.SetTransportDefaults(&http.Transport{
Dial: proxyDialerFn,
TLSClientConfig: proxyTLSClientConfig,
})
serviceIPRange, apiServerServiceIP, err := master.DefaultServiceIPRange(s.ServiceClusterIPRange)
if err != nil {
return nil, nil, err
}
storageFactory, err := BuildStorageFactory(s)
if err != nil {
return nil, nil, err
}
clientCA, err := readCAorNil(s.Authentication.ClientCert.ClientCA)
if err != nil {
return nil, nil, err
}
requestHeaderProxyCA, err := readCAorNil(s.Authentication.RequestHeader.ClientCAFile)
if err != nil {
return nil, nil, err
}
config := &master.Config{
GenericConfig: genericConfig,
ClientCARegistrationHook: master.ClientCARegistrationHook{
ClientCA: clientCA,
RequestHeaderUsernameHeaders: s.Authentication.RequestHeader.UsernameHeaders,
RequestHeaderGroupHeaders: s.Authentication.RequestHeader.GroupHeaders,
RequestHeaderExtraHeaderPrefixes: s.Authentication.RequestHeader.ExtraHeaderPrefixes,
RequestHeaderCA: requestHeaderProxyCA,
RequestHeaderAllowedNames: s.Authentication.RequestHeader.AllowedNames,
},
APIResourceConfigSource: storageFactory.APIResourceConfigSource,
StorageFactory: storageFactory,
EnableCoreControllers: true,
EventTTL: s.EventTTL,
KubeletClientConfig: s.KubeletConfig,
EnableUISupport: true,
EnableLogsSupport: true,
ProxyTransport: proxyTransport,
Tunneler: nodeTunneler,
ServiceIPRange: serviceIPRange,
APIServerServiceIP: apiServerServiceIP,
APIServerServicePort: 443,
ServiceNodePortRange: s.ServiceNodePortRange,
KubernetesServiceNodePort: s.KubernetesServiceNodePort,
MasterCount: s.MasterCount,
}
return config, sharedInformers, nil
}
// BuildGenericConfig takes the master server options and produces the genericapiserver.Config associated with it
func BuildGenericConfig(s *options.ServerRunOptions) (*genericapiserver.Config, informers.SharedInformerFactory, error) {
genericConfig := genericapiserver.NewConfig().WithSerializer(api.Codecs)
if err := s.GenericServerRunOptions.ApplyTo(genericConfig); err != nil {
return nil, nil, err
}
if err := s.InsecureServing.ApplyTo(genericConfig); err != nil {
return nil, nil, err
}
if err := s.SecureServing.ApplyTo(genericConfig); err != nil {
return nil, nil, err
}
if err := s.Authentication.ApplyTo(genericConfig); err != nil {
return nil, nil, err
}
if err := s.Audit.ApplyTo(genericConfig); err != nil {
return nil, nil, err
}
if err := s.Features.ApplyTo(genericConfig); err != nil {
return nil, nil, err
}
genericConfig.OpenAPIConfig = genericapiserver.DefaultOpenAPIConfig(generatedopenapi.GetOpenAPIDefinitions, api.Scheme)
genericConfig.OpenAPIConfig.PostProcessSpec = postProcessOpenAPISpecForBackwardCompatibility
genericConfig.OpenAPIConfig.Info.Title = "Kubernetes"
genericConfig.SwaggerConfig = genericapiserver.DefaultSwaggerConfig()
genericConfig.EnableMetrics = true
genericConfig.LongRunningFunc = filters.BasicLongRunningRequestCheck(
sets.NewString("watch", "proxy"),
sets.NewString("attach", "exec", "proxy", "log", "portforward"),
)
kubeVersion := version.Get()
genericConfig.Version = &kubeVersion
storageFactory, err := BuildStorageFactory(s)
if err != nil {
return nil, nil, err
}
if err := s.Etcd.ApplyWithStorageFactoryTo(storageFactory, genericConfig); err != nil {
return nil, nil, err
}
// Use protobufs for self-communication.
// Since not every generic apiserver has to support protobufs, we
// cannot default to it in generic apiserver and need to explicitly
// set it in kube-apiserver.
genericConfig.LoopbackClientConfig.ContentConfig.ContentType = "application/vnd.kubernetes.protobuf"
client, err := internalclientset.NewForConfig(genericConfig.LoopbackClientConfig)
if err != nil {
kubeAPIVersions := os.Getenv("KUBE_API_VERSIONS")
if len(kubeAPIVersions) == 0 {
return nil, nil, fmt.Errorf("failed to create clientset: %v", err)
}
// KUBE_API_VERSIONS is used in test-update-storage-objects.sh, disabling a number of API
// groups. This leads to a nil client above and undefined behaviour further down.
//
// TODO: get rid of KUBE_API_VERSIONS or define sane behaviour if set
glog.Errorf("Failed to create clientset with KUBE_API_VERSIONS=%q. KUBE_API_VERSIONS is only for testing. Things will break.", kubeAPIVersions)
}
sharedInformers := informers.NewSharedInformerFactory(client, 10*time.Minute)
genericConfig.Authenticator, genericConfig.OpenAPIConfig.SecurityDefinitions, err = BuildAuthenticator(s, storageFactory, client, sharedInformers)
if err != nil {
return nil, nil, fmt.Errorf("invalid authentication config: %v", err)
}
genericConfig.Authorizer, err = BuildAuthorizer(s, sharedInformers)
if err != nil {
return nil, nil, fmt.Errorf("invalid authorization config: %v", err)
}
if !sets.NewString(s.Authorization.Modes()...).Has(modes.ModeRBAC) {
genericConfig.DisabledPostStartHooks.Insert(rbacrest.PostStartHookName)
}
genericConfig.AdmissionControl, err = BuildAdmission(s, client, sharedInformers, genericConfig.Authorizer)
if err != nil {
return nil, nil, fmt.Errorf("failed to initialize admission: %v", err)
}
return genericConfig, sharedInformers, nil
}
// BuildAdmission constructs the admission chain
func BuildAdmission(s *options.ServerRunOptions, client internalclientset.Interface, sharedInformers informers.SharedInformerFactory, apiAuthorizer authorizer.Authorizer) (admission.Interface, error) {
admissionControlPluginNames := strings.Split(s.GenericServerRunOptions.AdmissionControl, ",")
var cloudConfig []byte
var err error
if s.CloudProvider.CloudConfigFile != "" {
cloudConfig, err = ioutil.ReadFile(s.CloudProvider.CloudConfigFile)
if err != nil {
glog.Fatalf("Error reading from cloud configuration file %s: %#v", s.CloudProvider.CloudConfigFile, err)
}
}
// NOTE: we do not provide informers to the registry because admission level decisions
// does not require us to open watches for all items tracked by quota.
quotaRegistry := quotainstall.NewRegistry(nil, nil)
pluginInitializer := kubeadmission.NewPluginInitializer(client, sharedInformers, apiAuthorizer, cloudConfig, quotaRegistry)
admissionConfigProvider, err := admission.ReadAdmissionConfiguration(admissionControlPluginNames, s.GenericServerRunOptions.AdmissionControlConfigFile)
if err != nil {
return nil, fmt.Errorf("failed to read plugin config: %v", err)
}
return admission.NewFromPlugins(admissionControlPluginNames, admissionConfigProvider, pluginInitializer)
}
// BuildAuthenticator constructs the authenticator
func BuildAuthenticator(s *options.ServerRunOptions, storageFactory serverstorage.StorageFactory, client internalclientset.Interface, sharedInformers informers.SharedInformerFactory) (authenticator.Request, *spec.SecurityDefinitions, error) {
authenticatorConfig := s.Authentication.ToAuthenticationConfig()
if s.Authentication.ServiceAccounts.Lookup {
// we have to go direct to storage because the clientsets fail when they're initialized with some API versions excluded
// we should stop trying to control them like that.
storageConfig, err := storageFactory.NewConfig(api.Resource("serviceaccounts"))
if err != nil {
return nil, nil, fmt.Errorf("unable to get serviceaccounts storage: %v", err)
}
authenticatorConfig.ServiceAccountTokenGetter = serviceaccountcontroller.NewGetterFromStorageInterface(storageConfig, storageFactory.ResourcePrefix(api.Resource("serviceaccounts")), storageFactory.ResourcePrefix(api.Resource("secrets")))
}
if client == nil || reflect.ValueOf(client).IsNil() {
// TODO: Remove check once client can never be nil.
glog.Errorf("Failed to setup bootstrap token authenticator because the loopback clientset was not setup properly.")
} else {
authenticatorConfig.BootstrapTokenAuthenticator = bootstrap.NewTokenAuthenticator(
sharedInformers.Core().InternalVersion().Secrets().Lister().Secrets(v1.NamespaceSystem),
)
}
return authenticatorConfig.New()
}
// BuildAuthorizer constructs the authorizer
func BuildAuthorizer(s *options.ServerRunOptions, sharedInformers informers.SharedInformerFactory) (authorizer.Authorizer, error) {
authorizationConfig := s.Authorization.ToAuthorizationConfig(sharedInformers)
return authorizationConfig.New()
}
// BuildStorageFactory constructs the storage factory
func BuildStorageFactory(s *options.ServerRunOptions) (*serverstorage.DefaultStorageFactory, error) {
storageGroupsToEncodingVersion, err := s.StorageSerialization.StorageGroupsToEncodingVersion()
if err != nil {
return nil, fmt.Errorf("error generating storage version map: %s", err)
}
storageFactory, err := kubeapiserver.NewStorageFactory(
s.Etcd.StorageConfig, s.Etcd.DefaultStorageMediaType, api.Codecs,
serverstorage.NewDefaultResourceEncodingConfig(api.Registry), storageGroupsToEncodingVersion,
// FIXME: this GroupVersionResource override should be configurable
[]schema.GroupVersionResource{batch.Resource("cronjobs").WithVersion("v2alpha1")},
master.DefaultAPIResourceConfigSource(), s.APIEnablement.RuntimeConfig)
if err != nil {
return nil, fmt.Errorf("error in initializing storage factory: %s", err)
}
// keep Deployments in extensions for backwards compatibility, we'll have to migrate at some point, eventually
storageFactory.AddCohabitatingResources(extensions.Resource("deployments"), apps.Resource("deployments"))
storageFactory.AddCohabitatingResources(extensions.Resource("horizontalpodautoscalers"), autoscaling.Resource("horizontalpodautoscalers"))
for _, override := range s.Etcd.EtcdServersOverrides {
tokens := strings.Split(override, "#")
if len(tokens) != 2 {
glog.Errorf("invalid value of etcd server overrides: %s", override)
continue
}
apiresource := strings.Split(tokens[0], "/")
if len(apiresource) != 2 {
glog.Errorf("invalid resource definition: %s", tokens[0])
continue
}
group := apiresource[0]
resource := apiresource[1]
groupResource := schema.GroupResource{Group: group, Resource: resource}
servers := strings.Split(tokens[1], ";")
storageFactory.SetEtcdLocation(groupResource, servers)
}
return storageFactory, nil
}
func defaultOptions(s *options.ServerRunOptions) error {
if err := s.GenericServerRunOptions.DefaultAdvertiseAddress(s.SecureServing, s.InsecureServing); err != nil {
return err
}
_, apiServerServiceIP, err := master.DefaultServiceIPRange(s.ServiceClusterIPRange)
if err != nil {
return fmt.Errorf("error determining service IP ranges: %v", err)
}
if err := s.SecureServing.MaybeDefaultWithSelfSignedCerts(s.GenericServerRunOptions.AdvertiseAddress.String(), apiServerServiceIP); err != nil {
return fmt.Errorf("error creating self-signed certificates: %v", err)
}
if err := s.CloudProvider.DefaultExternalHost(s.GenericServerRunOptions); err != nil {
return fmt.Errorf("error setting the external host value: %v", err)
}
s.Authentication.ApplyAuthorization(s.Authorization)
// Default to the private server key for service account token signing
if len(s.Authentication.ServiceAccounts.KeyFiles) == 0 && s.SecureServing.ServerCert.CertKey.KeyFile != "" {
if kubeauthenticator.IsValidServiceAccountKeyFile(s.SecureServing.ServerCert.CertKey.KeyFile) {
s.Authentication.ServiceAccounts.KeyFiles = []string{s.SecureServing.ServerCert.CertKey.KeyFile}
} else {
glog.Warning("No TLS key provided, service account token authentication disabled")
}
}
if s.Etcd.StorageConfig.DeserializationCacheSize == 0 {
// When size of cache is not explicitly set, estimate its size based on
// target memory usage.
glog.V(2).Infof("Initializing deserialization cache size based on %dMB limit", s.GenericServerRunOptions.TargetRAMMB)
// This is the heuristics that from memory capacity is trying to infer
// the maximum number of nodes in the cluster and set cache sizes based
// on that value.
// From our documentation, we officially recomment 120GB machines for
// 2000 nodes, and we scale from that point. Thus we assume ~60MB of
// capacity per node.
// TODO: We may consider deciding that some percentage of memory will
// be used for the deserialization cache and divide it by the max object
// size to compute its size. We may even go further and measure
// collective sizes of the objects in the cache.
clusterSize := s.GenericServerRunOptions.TargetRAMMB / 60
s.Etcd.StorageConfig.DeserializationCacheSize = 25 * clusterSize
if s.Etcd.StorageConfig.DeserializationCacheSize < 1000 {
s.Etcd.StorageConfig.DeserializationCacheSize = 1000
}
}
if s.Etcd.EnableWatchCache {
glog.V(2).Infof("Initializing cache sizes based on %dMB limit", s.GenericServerRunOptions.TargetRAMMB)
cachesize.InitializeWatchCacheSizes(s.GenericServerRunOptions.TargetRAMMB)
cachesize.SetWatchCacheSizes(s.GenericServerRunOptions.WatchCacheSizes)
}
return nil
}
func readCAorNil(file string) ([]byte, error) {
if len(file) == 0 {
return nil, nil
}
return ioutil.ReadFile(file)
}
// PostProcessSpec adds removed definitions for backward compatibility
func postProcessOpenAPISpecForBackwardCompatibility(s *spec.Swagger) (*spec.Swagger, error) {
compatibilityMap := map[string]string{
"v1beta1.DeploymentStatus": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.DeploymentStatus",
"v1beta1.ReplicaSetList": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.ReplicaSetList",
"v1beta1.Eviction": "k8s.io/kubernetes/pkg/apis/policy/v1beta1.Eviction",
"v1beta1.StatefulSetList": "k8s.io/kubernetes/pkg/apis/apps/v1beta1.StatefulSetList",
"v1beta1.RoleBinding": "k8s.io/kubernetes/pkg/apis/rbac/v1beta1.RoleBinding",
"v1beta1.PodSecurityPolicyList": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.PodSecurityPolicyList",
"v1.NodeSpec": "k8s.io/kubernetes/pkg/api/v1.NodeSpec",
"v1.FlockerVolumeSource": "k8s.io/kubernetes/pkg/api/v1.FlockerVolumeSource",
"v1.ContainerState": "k8s.io/kubernetes/pkg/api/v1.ContainerState",
"v1beta1.ClusterRole": "k8s.io/kubernetes/pkg/apis/rbac/v1beta1.ClusterRole",
"v1beta1.StorageClass": "k8s.io/kubernetes/pkg/apis/storage/v1beta1.StorageClass",
"v1.FlexVolumeSource": "k8s.io/kubernetes/pkg/api/v1.FlexVolumeSource",
"v1.SecretKeySelector": "k8s.io/kubernetes/pkg/api/v1.SecretKeySelector",
"v1.DeleteOptions": "k8s.io/kubernetes/pkg/api/v1.DeleteOptions",
"v1.PodStatus": "k8s.io/kubernetes/pkg/api/v1.PodStatus",
"v1.NodeStatus": "k8s.io/kubernetes/pkg/api/v1.NodeStatus",
"v1.ServiceSpec": "k8s.io/kubernetes/pkg/api/v1.ServiceSpec",
"v1.AttachedVolume": "k8s.io/kubernetes/pkg/api/v1.AttachedVolume",
"v1.PersistentVolume": "k8s.io/kubernetes/pkg/api/v1.PersistentVolume",
"v1.LimitRangeList": "k8s.io/kubernetes/pkg/api/v1.LimitRangeList",
"v1alpha1.Role": "k8s.io/kubernetes/pkg/apis/rbac/v1alpha1.Role",
"v1.Affinity": "k8s.io/kubernetes/pkg/api/v1.Affinity",
"v1beta1.PodDisruptionBudget": "k8s.io/kubernetes/pkg/apis/policy/v1beta1.PodDisruptionBudget",
"v1alpha1.RoleBindingList": "k8s.io/kubernetes/pkg/apis/rbac/v1alpha1.RoleBindingList",
"v1.PodAffinity": "k8s.io/kubernetes/pkg/api/v1.PodAffinity",
"v1beta1.SELinuxStrategyOptions": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.SELinuxStrategyOptions",
"v1.ResourceQuotaList": "k8s.io/kubernetes/pkg/api/v1.ResourceQuotaList",
"v1.PodList": "k8s.io/kubernetes/pkg/api/v1.PodList",
"v1.EnvVarSource": "k8s.io/kubernetes/pkg/api/v1.EnvVarSource",
"v1beta1.TokenReviewStatus": "k8s.io/kubernetes/pkg/apis/authentication/v1beta1.TokenReviewStatus",
"v1.PersistentVolumeClaimList": "k8s.io/kubernetes/pkg/api/v1.PersistentVolumeClaimList",
"v1beta1.RoleList": "k8s.io/kubernetes/pkg/apis/rbac/v1beta1.RoleList",
"v1.ListMeta": "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta",
"v1.ObjectMeta": "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta",
"v1.APIGroupList": "k8s.io/apimachinery/pkg/apis/meta/v1.APIGroupList",
"v2alpha1.Job": "k8s.io/kubernetes/pkg/apis/batch/v2alpha1.Job",
"v1.EnvFromSource": "k8s.io/kubernetes/pkg/api/v1.EnvFromSource",
"v1beta1.IngressStatus": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.IngressStatus",
"v1.Service": "k8s.io/kubernetes/pkg/api/v1.Service",
"v1beta1.DaemonSetStatus": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.DaemonSetStatus",
"v1alpha1.Subject": "k8s.io/kubernetes/pkg/apis/rbac/v1alpha1.Subject",
"v1.HorizontalPodAutoscaler": "k8s.io/kubernetes/pkg/apis/autoscaling/v1.HorizontalPodAutoscaler",
"v1.StatusCause": "k8s.io/apimachinery/pkg/apis/meta/v1.StatusCause",
"v1.NodeSelectorRequirement": "k8s.io/kubernetes/pkg/api/v1.NodeSelectorRequirement",
"v1beta1.NetworkPolicyIngressRule": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.NetworkPolicyIngressRule",
"v1beta1.ThirdPartyResource": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.ThirdPartyResource",
"v1beta1.PodSecurityPolicy": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.PodSecurityPolicy",
"v1beta1.StatefulSet": "k8s.io/kubernetes/pkg/apis/apps/v1beta1.StatefulSet",
"v1.LabelSelector": "k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector",
"v1.ScaleSpec": "k8s.io/kubernetes/pkg/apis/autoscaling/v1.ScaleSpec",
"v1.DownwardAPIVolumeFile": "k8s.io/kubernetes/pkg/api/v1.DownwardAPIVolumeFile",
"v1beta1.HorizontalPodAutoscaler": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.HorizontalPodAutoscaler",
"v1.AWSElasticBlockStoreVolumeSource": "k8s.io/kubernetes/pkg/api/v1.AWSElasticBlockStoreVolumeSource",
"v1.ComponentStatus": "k8s.io/kubernetes/pkg/api/v1.ComponentStatus",
"v2alpha1.JobSpec": "k8s.io/kubernetes/pkg/apis/batch/v2alpha1.JobSpec",
"v1.ContainerImage": "k8s.io/kubernetes/pkg/api/v1.ContainerImage",
"v1.ReplicationControllerStatus": "k8s.io/kubernetes/pkg/api/v1.ReplicationControllerStatus",
"v1.ResourceQuota": "k8s.io/kubernetes/pkg/api/v1.ResourceQuota",
"v1beta1.NetworkPolicyList": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.NetworkPolicyList",
"v1beta1.NonResourceAttributes": "k8s.io/kubernetes/pkg/apis/authorization/v1beta1.NonResourceAttributes",
"v1.JobCondition": "k8s.io/kubernetes/pkg/apis/batch/v1.JobCondition",
"v1.LabelSelectorRequirement": "k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelectorRequirement",
"v1beta1.Deployment": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.Deployment",
"v1.LoadBalancerIngress": "k8s.io/kubernetes/pkg/api/v1.LoadBalancerIngress",
"v1.SecretList": "k8s.io/kubernetes/pkg/api/v1.SecretList",
"v1beta1.ReplicaSetSpec": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.ReplicaSetSpec",
"v1beta1.RoleBindingList": "k8s.io/kubernetes/pkg/apis/rbac/v1beta1.RoleBindingList",
"v1.ServicePort": "k8s.io/kubernetes/pkg/api/v1.ServicePort",
"v1.Namespace": "k8s.io/kubernetes/pkg/api/v1.Namespace",
"v1beta1.NetworkPolicyPeer": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.NetworkPolicyPeer",
"v1.ReplicationControllerList": "k8s.io/kubernetes/pkg/api/v1.ReplicationControllerList",
"v1beta1.ReplicaSetCondition": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.ReplicaSetCondition",
"v1.ReplicationControllerCondition": "k8s.io/kubernetes/pkg/api/v1.ReplicationControllerCondition",
"v1.DaemonEndpoint": "k8s.io/kubernetes/pkg/api/v1.DaemonEndpoint",
"v1beta1.NetworkPolicyPort": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.NetworkPolicyPort",
"v1.NodeSystemInfo": "k8s.io/kubernetes/pkg/api/v1.NodeSystemInfo",
"v1.LimitRangeItem": "k8s.io/kubernetes/pkg/api/v1.LimitRangeItem",
"v1.ConfigMapVolumeSource": "k8s.io/kubernetes/pkg/api/v1.ConfigMapVolumeSource",
"v1beta1.ClusterRoleList": "k8s.io/kubernetes/pkg/apis/rbac/v1beta1.ClusterRoleList",
"v1beta1.ResourceAttributes": "k8s.io/kubernetes/pkg/apis/authorization/v1beta1.ResourceAttributes",
"v1.Pod": "k8s.io/kubernetes/pkg/api/v1.Pod",
"v1.FCVolumeSource": "k8s.io/kubernetes/pkg/api/v1.FCVolumeSource",
"v1beta1.SubresourceReference": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.SubresourceReference",
"v1.ResourceQuotaStatus": "k8s.io/kubernetes/pkg/api/v1.ResourceQuotaStatus",
"v1alpha1.RoleBinding": "k8s.io/kubernetes/pkg/apis/rbac/v1alpha1.RoleBinding",
"v1.PodCondition": "k8s.io/kubernetes/pkg/api/v1.PodCondition",
"v1.GroupVersionForDiscovery": "k8s.io/apimachinery/pkg/apis/meta/v1.GroupVersionForDiscovery",
"v1.NamespaceStatus": "k8s.io/kubernetes/pkg/api/v1.NamespaceStatus",
"v1.Job": "k8s.io/kubernetes/pkg/apis/batch/v1.Job",
"v1.PersistentVolumeClaimVolumeSource": "k8s.io/kubernetes/pkg/api/v1.PersistentVolumeClaimVolumeSource",
"v1.Handler": "k8s.io/kubernetes/pkg/api/v1.Handler",
"v1.ComponentStatusList": "k8s.io/kubernetes/pkg/api/v1.ComponentStatusList",
"v1.ServerAddressByClientCIDR": "k8s.io/apimachinery/pkg/apis/meta/v1.ServerAddressByClientCIDR",
"v1.PodAntiAffinity": "k8s.io/kubernetes/pkg/api/v1.PodAntiAffinity",
"v1.ISCSIVolumeSource": "k8s.io/kubernetes/pkg/api/v1.ISCSIVolumeSource",
"v1.ContainerStateRunning": "k8s.io/kubernetes/pkg/api/v1.ContainerStateRunning",
"v1.WeightedPodAffinityTerm": "k8s.io/kubernetes/pkg/api/v1.WeightedPodAffinityTerm",
"v1beta1.HostPortRange": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.HostPortRange",
"v1.HorizontalPodAutoscalerSpec": "k8s.io/kubernetes/pkg/apis/autoscaling/v1.HorizontalPodAutoscalerSpec",
"v1.HorizontalPodAutoscalerList": "k8s.io/kubernetes/pkg/apis/autoscaling/v1.HorizontalPodAutoscalerList",
"v1beta1.RoleRef": "k8s.io/kubernetes/pkg/apis/rbac/v1beta1.RoleRef",
"v1.Probe": "k8s.io/kubernetes/pkg/api/v1.Probe",
"v1beta1.IngressTLS": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.IngressTLS",
"v1beta1.ThirdPartyResourceList": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.ThirdPartyResourceList",
"v1beta1.DaemonSet": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.DaemonSet",
"v1.APIGroup": "k8s.io/apimachinery/pkg/apis/meta/v1.APIGroup",
"v1beta1.Subject": "k8s.io/kubernetes/pkg/apis/rbac/v1beta1.Subject",
"v1beta1.DeploymentList": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.DeploymentList",
"v1.NodeAffinity": "k8s.io/kubernetes/pkg/api/v1.NodeAffinity",
"v1beta1.RollingUpdateDeployment": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.RollingUpdateDeployment",
"v1beta1.APIVersion": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.APIVersion",
"v1alpha1.CertificateSigningRequest": "k8s.io/kubernetes/pkg/apis/certificates/v1alpha1.CertificateSigningRequest",
"v1.CinderVolumeSource": "k8s.io/kubernetes/pkg/api/v1.CinderVolumeSource",
"v1.NamespaceSpec": "k8s.io/kubernetes/pkg/api/v1.NamespaceSpec",
"v1beta1.PodDisruptionBudgetSpec": "k8s.io/kubernetes/pkg/apis/policy/v1beta1.PodDisruptionBudgetSpec",
"v1.Patch": "k8s.io/apimachinery/pkg/apis/meta/v1.Patch",
"v1beta1.ClusterRoleBinding": "k8s.io/kubernetes/pkg/apis/rbac/v1beta1.ClusterRoleBinding",
"v1beta1.HorizontalPodAutoscalerSpec": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.HorizontalPodAutoscalerSpec",
"v1.PersistentVolumeClaimSpec": "k8s.io/kubernetes/pkg/api/v1.PersistentVolumeClaimSpec",
"v1.Secret": "k8s.io/kubernetes/pkg/api/v1.Secret",
"v1.NodeCondition": "k8s.io/kubernetes/pkg/api/v1.NodeCondition",
"v1.LocalObjectReference": "k8s.io/kubernetes/pkg/api/v1.LocalObjectReference",
"runtime.RawExtension": "k8s.io/apimachinery/pkg/runtime.RawExtension",
"v1.PreferredSchedulingTerm": "k8s.io/kubernetes/pkg/api/v1.PreferredSchedulingTerm",
"v1.RBDVolumeSource": "k8s.io/kubernetes/pkg/api/v1.RBDVolumeSource",
"v1.KeyToPath": "k8s.io/kubernetes/pkg/api/v1.KeyToPath",
"v1.ScaleStatus": "k8s.io/kubernetes/pkg/apis/autoscaling/v1.ScaleStatus",
"v1alpha1.PolicyRule": "k8s.io/kubernetes/pkg/apis/rbac/v1alpha1.PolicyRule",
"v1.EndpointPort": "k8s.io/kubernetes/pkg/api/v1.EndpointPort",
"v1beta1.IngressList": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.IngressList",
"v1.EndpointAddress": "k8s.io/kubernetes/pkg/api/v1.EndpointAddress",
"v1.NodeSelector": "k8s.io/kubernetes/pkg/api/v1.NodeSelector",
"v1beta1.StorageClassList": "k8s.io/kubernetes/pkg/apis/storage/v1beta1.StorageClassList",
"v1.ServiceList": "k8s.io/kubernetes/pkg/api/v1.ServiceList",
"v2alpha1.CronJobSpec": "k8s.io/kubernetes/pkg/apis/batch/v2alpha1.CronJobSpec",
"v1.ContainerStateTerminated": "k8s.io/kubernetes/pkg/api/v1.ContainerStateTerminated",
"v1beta1.TokenReview": "k8s.io/kubernetes/pkg/apis/authentication/v1beta1.TokenReview",
"v1beta1.IngressBackend": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.IngressBackend",
"v1.Time": "k8s.io/apimachinery/pkg/apis/meta/v1.Time",
"v1beta1.IngressSpec": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.IngressSpec",
"v2alpha1.JobTemplateSpec": "k8s.io/kubernetes/pkg/apis/batch/v2alpha1.JobTemplateSpec",
"v1.LimitRange": "k8s.io/kubernetes/pkg/api/v1.LimitRange",
"v1beta1.UserInfo": "k8s.io/kubernetes/pkg/apis/authentication/v1beta1.UserInfo",
"v1.ResourceQuotaSpec": "k8s.io/kubernetes/pkg/api/v1.ResourceQuotaSpec",
"v1.ContainerPort": "k8s.io/kubernetes/pkg/api/v1.ContainerPort",
"v1beta1.HTTPIngressRuleValue": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.HTTPIngressRuleValue",
"v1.AzureFileVolumeSource": "k8s.io/kubernetes/pkg/api/v1.AzureFileVolumeSource",
"v1beta1.NetworkPolicySpec": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.NetworkPolicySpec",
"v1.PodTemplateSpec": "k8s.io/kubernetes/pkg/api/v1.PodTemplateSpec",
"v1.SecretVolumeSource": "k8s.io/kubernetes/pkg/api/v1.SecretVolumeSource",
"v1.PodSpec": "k8s.io/kubernetes/pkg/api/v1.PodSpec",
"v1.CephFSVolumeSource": "k8s.io/kubernetes/pkg/api/v1.CephFSVolumeSource",
"v1beta1.CPUTargetUtilization": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.CPUTargetUtilization",
"v1.Volume": "k8s.io/kubernetes/pkg/api/v1.Volume",
"v1beta1.Ingress": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.Ingress",
"v1beta1.HorizontalPodAutoscalerList": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.HorizontalPodAutoscalerList",
"v1.PersistentVolumeStatus": "k8s.io/kubernetes/pkg/api/v1.PersistentVolumeStatus",
"v1beta1.IDRange": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.IDRange",
"v2alpha1.JobCondition": "k8s.io/kubernetes/pkg/apis/batch/v2alpha1.JobCondition",
"v1beta1.IngressRule": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.IngressRule",
"v1alpha1.RoleRef": "k8s.io/kubernetes/pkg/apis/rbac/v1alpha1.RoleRef",
"v1.PodAffinityTerm": "k8s.io/kubernetes/pkg/api/v1.PodAffinityTerm",
"v1.ObjectReference": "k8s.io/kubernetes/pkg/api/v1.ObjectReference",
"v1.ServiceStatus": "k8s.io/kubernetes/pkg/api/v1.ServiceStatus",
"v1.APIResource": "k8s.io/apimachinery/pkg/apis/meta/v1.APIResource",
"v1beta1.Scale": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.Scale",
"v1.AzureDiskVolumeSource": "k8s.io/kubernetes/pkg/api/v1.AzureDiskVolumeSource",
"v1beta1.SubjectAccessReviewStatus": "k8s.io/kubernetes/pkg/apis/authorization/v1beta1.SubjectAccessReviewStatus",
"v1.ConfigMap": "k8s.io/kubernetes/pkg/api/v1.ConfigMap",
"v1.CrossVersionObjectReference": "k8s.io/kubernetes/pkg/apis/autoscaling/v1.CrossVersionObjectReference",
"v1.APIVersions": "k8s.io/apimachinery/pkg/apis/meta/v1.APIVersions",
"v1alpha1.ClusterRoleList": "k8s.io/kubernetes/pkg/apis/rbac/v1alpha1.ClusterRoleList",
"v1.Node": "k8s.io/kubernetes/pkg/api/v1.Node",
"resource.Quantity": "k8s.io/kubernetes/pkg/api/resource.Quantity",
"v1.Event": "k8s.io/kubernetes/pkg/api/v1.Event",
"v1.JobStatus": "k8s.io/kubernetes/pkg/apis/batch/v1.JobStatus",
"v1.PersistentVolumeSpec": "k8s.io/kubernetes/pkg/api/v1.PersistentVolumeSpec",
"v1beta1.SubjectAccessReviewSpec": "k8s.io/kubernetes/pkg/apis/authorization/v1beta1.SubjectAccessReviewSpec",
"v1.ResourceFieldSelector": "k8s.io/kubernetes/pkg/api/v1.ResourceFieldSelector",
"v1.EndpointSubset": "k8s.io/kubernetes/pkg/api/v1.EndpointSubset",
"v1alpha1.CertificateSigningRequestSpec": "k8s.io/kubernetes/pkg/apis/certificates/v1alpha1.CertificateSigningRequestSpec",
"v1.HostPathVolumeSource": "k8s.io/kubernetes/pkg/api/v1.HostPathVolumeSource",
"v1.LoadBalancerStatus": "k8s.io/kubernetes/pkg/api/v1.LoadBalancerStatus",
"v1beta1.HTTPIngressPath": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.HTTPIngressPath",
"v1beta1.Role": "k8s.io/kubernetes/pkg/apis/rbac/v1beta1.Role",
"v1beta1.DeploymentStrategy": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.DeploymentStrategy",
"v1beta1.RunAsUserStrategyOptions": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.RunAsUserStrategyOptions",
"v1beta1.DeploymentSpec": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.DeploymentSpec",
"v1.ExecAction": "k8s.io/kubernetes/pkg/api/v1.ExecAction",
"v1beta1.PodSecurityPolicySpec": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.PodSecurityPolicySpec",
"v1.HorizontalPodAutoscalerStatus": "k8s.io/kubernetes/pkg/apis/autoscaling/v1.HorizontalPodAutoscalerStatus",
"v1.PersistentVolumeList": "k8s.io/kubernetes/pkg/api/v1.PersistentVolumeList",
"v1alpha1.ClusterRole": "k8s.io/kubernetes/pkg/apis/rbac/v1alpha1.ClusterRole",
"v1.JobSpec": "k8s.io/kubernetes/pkg/apis/batch/v1.JobSpec",
"v1beta1.DaemonSetSpec": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.DaemonSetSpec",
"v2alpha1.CronJobList": "k8s.io/kubernetes/pkg/apis/batch/v2alpha1.CronJobList",
"v1.Endpoints": "k8s.io/kubernetes/pkg/api/v1.Endpoints",
"v1.SELinuxOptions": "k8s.io/kubernetes/pkg/api/v1.SELinuxOptions",
"v1beta1.SelfSubjectAccessReviewSpec": "k8s.io/kubernetes/pkg/apis/authorization/v1beta1.SelfSubjectAccessReviewSpec",
"v1beta1.ScaleStatus": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.ScaleStatus",
"v1.NodeSelectorTerm": "k8s.io/kubernetes/pkg/api/v1.NodeSelectorTerm",
"v1alpha1.CertificateSigningRequestStatus": "k8s.io/kubernetes/pkg/apis/certificates/v1alpha1.CertificateSigningRequestStatus",
"v1.StatusDetails": "k8s.io/apimachinery/pkg/apis/meta/v1.StatusDetails",
"v2alpha1.JobStatus": "k8s.io/kubernetes/pkg/apis/batch/v2alpha1.JobStatus",
"v1beta1.DeploymentRollback": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.DeploymentRollback",
"v1.GlusterfsVolumeSource": "k8s.io/kubernetes/pkg/api/v1.GlusterfsVolumeSource",
"v1.ServiceAccountList": "k8s.io/kubernetes/pkg/api/v1.ServiceAccountList",
"v1.JobList": "k8s.io/kubernetes/pkg/apis/batch/v1.JobList",
"v1.EventList": "k8s.io/kubernetes/pkg/api/v1.EventList",
"v1.ContainerStateWaiting": "k8s.io/kubernetes/pkg/api/v1.ContainerStateWaiting",
"v1.APIResourceList": "k8s.io/apimachinery/pkg/apis/meta/v1.APIResourceList",
"v1.ContainerStatus": "k8s.io/kubernetes/pkg/api/v1.ContainerStatus",
"v2alpha1.JobList": "k8s.io/kubernetes/pkg/apis/batch/v2alpha1.JobList",
"v1.ConfigMapKeySelector": "k8s.io/kubernetes/pkg/api/v1.ConfigMapKeySelector",
"v1.PhotonPersistentDiskVolumeSource": "k8s.io/kubernetes/pkg/api/v1.PhotonPersistentDiskVolumeSource",
"v1.PodTemplateList": "k8s.io/kubernetes/pkg/api/v1.PodTemplateList",
"v1.PersistentVolumeClaimStatus": "k8s.io/kubernetes/pkg/api/v1.PersistentVolumeClaimStatus",
"v1.ServiceAccount": "k8s.io/kubernetes/pkg/api/v1.ServiceAccount",
"v1alpha1.CertificateSigningRequestList": "k8s.io/kubernetes/pkg/apis/certificates/v1alpha1.CertificateSigningRequestList",
"v1beta1.SupplementalGroupsStrategyOptions": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.SupplementalGroupsStrategyOptions",
"v1.HTTPHeader": "k8s.io/kubernetes/pkg/api/v1.HTTPHeader",
"version.Info": "k8s.io/apimachinery/pkg/version.Info",
"v1.EventSource": "k8s.io/kubernetes/pkg/api/v1.EventSource",
"v1alpha1.ClusterRoleBindingList": "k8s.io/kubernetes/pkg/apis/rbac/v1alpha1.ClusterRoleBindingList",
"v1.OwnerReference": "k8s.io/apimachinery/pkg/apis/meta/v1.OwnerReference",
"v1beta1.ClusterRoleBindingList": "k8s.io/kubernetes/pkg/apis/rbac/v1beta1.ClusterRoleBindingList",
"v1beta1.ScaleSpec": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.ScaleSpec",
"v1.GitRepoVolumeSource": "k8s.io/kubernetes/pkg/api/v1.GitRepoVolumeSource",
"v1beta1.NetworkPolicy": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.NetworkPolicy",
"v1.ConfigMapEnvSource": "k8s.io/kubernetes/pkg/api/v1.ConfigMapEnvSource",
"v1.PodTemplate": "k8s.io/kubernetes/pkg/api/v1.PodTemplate",
"v1beta1.DeploymentCondition": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.DeploymentCondition",
"v1beta1.PodDisruptionBudgetStatus": "k8s.io/kubernetes/pkg/apis/policy/v1beta1.PodDisruptionBudgetStatus",
"v1.EnvVar": "k8s.io/kubernetes/pkg/api/v1.EnvVar",
"v1.LimitRangeSpec": "k8s.io/kubernetes/pkg/api/v1.LimitRangeSpec",
"v1.DownwardAPIVolumeSource": "k8s.io/kubernetes/pkg/api/v1.DownwardAPIVolumeSource",
"v1.NodeDaemonEndpoints": "k8s.io/kubernetes/pkg/api/v1.NodeDaemonEndpoints",
"v1.ComponentCondition": "k8s.io/kubernetes/pkg/api/v1.ComponentCondition",
"v1alpha1.CertificateSigningRequestCondition": "k8s.io/kubernetes/pkg/apis/certificates/v1alpha1.CertificateSigningRequestCondition",
"v1.SecurityContext": "k8s.io/kubernetes/pkg/api/v1.SecurityContext",
"v1beta1.LocalSubjectAccessReview": "k8s.io/kubernetes/pkg/apis/authorization/v1beta1.LocalSubjectAccessReview",
"v1beta1.StatefulSetSpec": "k8s.io/kubernetes/pkg/apis/apps/v1beta1.StatefulSetSpec",
"v1.NodeAddress": "k8s.io/kubernetes/pkg/api/v1.NodeAddress",
"v1.QuobyteVolumeSource": "k8s.io/kubernetes/pkg/api/v1.QuobyteVolumeSource",
"v1.Capabilities": "k8s.io/kubernetes/pkg/api/v1.Capabilities",
"v1.GCEPersistentDiskVolumeSource": "k8s.io/kubernetes/pkg/api/v1.GCEPersistentDiskVolumeSource",
"v1beta1.ReplicaSet": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.ReplicaSet",
"v1beta1.HorizontalPodAutoscalerStatus": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.HorizontalPodAutoscalerStatus",
"v1beta1.PolicyRule": "k8s.io/kubernetes/pkg/apis/rbac/v1beta1.PolicyRule",
"v1.ConfigMapList": "k8s.io/kubernetes/pkg/api/v1.ConfigMapList",
"v1.Lifecycle": "k8s.io/kubernetes/pkg/api/v1.Lifecycle",
"v1beta1.SelfSubjectAccessReview": "k8s.io/kubernetes/pkg/apis/authorization/v1beta1.SelfSubjectAccessReview",
"v2alpha1.CronJob": "k8s.io/kubernetes/pkg/apis/batch/v2alpha1.CronJob",
"v2alpha1.CronJobStatus": "k8s.io/kubernetes/pkg/apis/batch/v2alpha1.CronJobStatus",
"v1beta1.SubjectAccessReview": "k8s.io/kubernetes/pkg/apis/authorization/v1beta1.SubjectAccessReview",
"v1.Preconditions": "k8s.io/kubernetes/pkg/api/v1.Preconditions",
"v1beta1.DaemonSetList": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.DaemonSetList",
"v1.PersistentVolumeClaim": "k8s.io/kubernetes/pkg/api/v1.PersistentVolumeClaim",
"v1.Scale": "k8s.io/kubernetes/pkg/apis/autoscaling/v1.Scale",
"v1beta1.StatefulSetStatus": "k8s.io/kubernetes/pkg/apis/apps/v1beta1.StatefulSetStatus",
"v1.NFSVolumeSource": "k8s.io/kubernetes/pkg/api/v1.NFSVolumeSource",
"v1.ObjectFieldSelector": "k8s.io/kubernetes/pkg/api/v1.ObjectFieldSelector",
"v1.ResourceRequirements": "k8s.io/kubernetes/pkg/api/v1.ResourceRequirements",
"v1.WatchEvent": "k8s.io/apimachinery/pkg/apis/meta/v1.WatchEvent",
"v1.ReplicationControllerSpec": "k8s.io/kubernetes/pkg/api/v1.ReplicationControllerSpec",
"v1.HTTPGetAction": "k8s.io/kubernetes/pkg/api/v1.HTTPGetAction",
"v1beta1.RollbackConfig": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.RollbackConfig",
"v1beta1.TokenReviewSpec": "k8s.io/kubernetes/pkg/apis/authentication/v1beta1.TokenReviewSpec",
"v1.PodSecurityContext": "k8s.io/kubernetes/pkg/api/v1.PodSecurityContext",
"v1beta1.PodDisruptionBudgetList": "k8s.io/kubernetes/pkg/apis/policy/v1beta1.PodDisruptionBudgetList",
"v1.VolumeMount": "k8s.io/kubernetes/pkg/api/v1.VolumeMount",
"v1.ReplicationController": "k8s.io/kubernetes/pkg/api/v1.ReplicationController",
"v1.NamespaceList": "k8s.io/kubernetes/pkg/api/v1.NamespaceList",
"v1alpha1.ClusterRoleBinding": "k8s.io/kubernetes/pkg/apis/rbac/v1alpha1.ClusterRoleBinding",
"v1.TCPSocketAction": "k8s.io/kubernetes/pkg/api/v1.TCPSocketAction",
"v1.Binding": "k8s.io/kubernetes/pkg/api/v1.Binding",
"v1beta1.ReplicaSetStatus": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.ReplicaSetStatus",
"intstr.IntOrString": "k8s.io/kubernetes/pkg/util/intstr.IntOrString",
"v1.EndpointsList": "k8s.io/kubernetes/pkg/api/v1.EndpointsList",
"v1.Container": "k8s.io/kubernetes/pkg/api/v1.Container",
"v1alpha1.RoleList": "k8s.io/kubernetes/pkg/apis/rbac/v1alpha1.RoleList",
"v1.VsphereVirtualDiskVolumeSource": "k8s.io/kubernetes/pkg/api/v1.VsphereVirtualDiskVolumeSource",
"v1.NodeList": "k8s.io/kubernetes/pkg/api/v1.NodeList",
"v1.EmptyDirVolumeSource": "k8s.io/kubernetes/pkg/api/v1.EmptyDirVolumeSource",
"v1beta1.FSGroupStrategyOptions": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1.FSGroupStrategyOptions",
"v1.Status": "k8s.io/apimachinery/pkg/apis/meta/v1.Status",
}
for k, v := range compatibilityMap {
if _, found := s.Definitions[v]; !found {
continue
}
s.Definitions[k] = spec.Schema{
SchemaProps: spec.SchemaProps{
Ref: spec.MustCreateRef("#/definitions/" + openapi.EscapeJsonPointer(v)),
Description: fmt.Sprintf("Deprecated. Please use %s instead.", v),
},
}
}
return s, nil
}
|
[
"\"KUBE_API_VERSIONS\"",
"\"KUBE_API_VERSIONS\""
] |
[] |
[
"KUBE_API_VERSIONS"
] |
[]
|
["KUBE_API_VERSIONS"]
|
go
| 1 | 0 | |
cmd/integration/integration.go
|
/*
Copyright 2014 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// A basic integration test for the service.
// Assumes that there is a pre-existing etcd server running on localhost.
package main
import (
"fmt"
"io/ioutil"
"net"
"net/http"
"net/http/httptest"
"os"
"reflect"
gruntime "runtime"
"strconv"
"strings"
"sync"
"time"
kubeletapp "k8s.io/kubernetes/cmd/kubelet/app"
"k8s.io/kubernetes/pkg/api"
apierrors "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/testapi"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/api/v1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/client/restclient"
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/controller"
endpointcontroller "k8s.io/kubernetes/pkg/controller/endpoint"
"k8s.io/kubernetes/pkg/controller/framework/informers"
nodecontroller "k8s.io/kubernetes/pkg/controller/node"
replicationcontroller "k8s.io/kubernetes/pkg/controller/replication"
cadvisortest "k8s.io/kubernetes/pkg/kubelet/cadvisor/testing"
"k8s.io/kubernetes/pkg/kubelet/cm"
containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
"k8s.io/kubernetes/pkg/kubelet/dockertools"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/master"
"k8s.io/kubernetes/pkg/util"
"k8s.io/kubernetes/pkg/util/flag"
"k8s.io/kubernetes/pkg/util/flowcontrol"
utilnet "k8s.io/kubernetes/pkg/util/net"
utilruntime "k8s.io/kubernetes/pkg/util/runtime"
"k8s.io/kubernetes/pkg/util/sets"
"k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/pkg/volume/empty_dir"
"k8s.io/kubernetes/plugin/pkg/scheduler"
_ "k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider"
"k8s.io/kubernetes/plugin/pkg/scheduler/factory"
e2e "k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/integration"
"k8s.io/kubernetes/test/integration/framework"
etcd "github.com/coreos/etcd/client"
"github.com/golang/glog"
"github.com/spf13/pflag"
"golang.org/x/net/context"
)
var (
fakeDocker1 = dockertools.NewFakeDockerClient()
fakeDocker2 = dockertools.NewFakeDockerClient()
// Limit the number of concurrent tests.
maxConcurrency int
watchCache bool
longTestTimeout = time.Second * 500
maxTestTimeout = time.Minute * 15
)
type delegateHandler struct {
delegate http.Handler
}
func (h *delegateHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
if h.delegate != nil {
h.delegate.ServeHTTP(w, req)
return
}
w.WriteHeader(http.StatusNotFound)
}
func startComponents(firstManifestURL, secondManifestURL string) (string, string) {
// Setup
handler := delegateHandler{}
apiServer := httptest.NewServer(&handler)
cfg := etcd.Config{
Endpoints: []string{"http://127.0.0.1:4001"},
}
etcdClient, err := etcd.New(cfg)
if err != nil {
glog.Fatalf("Error creating etcd client: %v", err)
}
glog.Infof("Creating etcd client pointing to %v", cfg.Endpoints)
keysAPI := etcd.NewKeysAPI(etcdClient)
sleep := 4 * time.Second
ok := false
for i := 0; i < 3; i++ {
keys, err := keysAPI.Get(context.TODO(), "/", nil)
if err != nil {
glog.Warningf("Unable to list root etcd keys: %v", err)
if i < 2 {
time.Sleep(sleep)
sleep = sleep * sleep
}
continue
}
for _, node := range keys.Node.Nodes {
if _, err := keysAPI.Delete(context.TODO(), node.Key, &etcd.DeleteOptions{Recursive: true}); err != nil {
glog.Fatalf("Unable delete key: %v", err)
}
}
ok = true
break
}
if !ok {
glog.Fatalf("Failed to connect to etcd")
}
cl := client.NewOrDie(&restclient.Config{Host: apiServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: apiServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
// TODO: caesarxuchao: hacky way to specify version of Experimental client.
// We will fix this by supporting multiple group versions in Config
cl.ExtensionsClient = client.NewExtensionsOrDie(&restclient.Config{Host: apiServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Extensions.GroupVersion()}})
// Master
host, port, err := net.SplitHostPort(strings.TrimLeft(apiServer.URL, "http://"))
if err != nil {
glog.Fatalf("Unable to parse URL '%v': %v", apiServer.URL, err)
}
portNumber, err := strconv.Atoi(port)
if err != nil {
glog.Fatalf("Nonnumeric port? %v", err)
}
publicAddress := net.ParseIP(host)
if publicAddress == nil {
glog.Fatalf("No public address for %s", host)
}
// The caller of master.New should guarantee pulicAddress is properly set
hostIP, err := utilnet.ChooseBindAddress(publicAddress)
if err != nil {
glog.Fatalf("Unable to find suitable network address.error='%v' . "+
"Fail to get a valid public address for master.", err)
}
masterConfig := framework.NewMasterConfig()
masterConfig.EnableCoreControllers = true
masterConfig.EnableProfiling = true
masterConfig.ReadWritePort = portNumber
masterConfig.PublicAddress = hostIP
masterConfig.CacheTimeout = 2 * time.Second
masterConfig.EnableWatchCache = watchCache
// Create a master and install handlers into mux.
m, err := master.New(masterConfig)
if err != nil {
glog.Fatalf("Error in bringing up the master: %v", err)
}
handler.delegate = m.Handler
// Scheduler
schedulerConfigFactory := factory.NewConfigFactory(cl, api.DefaultSchedulerName, api.DefaultHardPodAffinitySymmetricWeight, api.DefaultFailureDomains)
schedulerConfig, err := schedulerConfigFactory.Create()
if err != nil {
glog.Fatalf("Couldn't create scheduler config: %v", err)
}
eventBroadcaster := record.NewBroadcaster()
schedulerConfig.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: api.DefaultSchedulerName})
eventBroadcaster.StartLogging(glog.Infof)
eventBroadcaster.StartRecordingToSink(cl.Events(""))
scheduler.New(schedulerConfig).Run()
podInformer := informers.CreateSharedPodIndexInformer(clientset, controller.NoResyncPeriodFunc())
// ensure the service endpoints are sync'd several times within the window that the integration tests wait
go endpointcontroller.NewEndpointController(podInformer, clientset).
Run(3, wait.NeverStop)
// TODO: Write an integration test for the replication controllers watch.
go replicationcontroller.NewReplicationManager(podInformer, clientset, controller.NoResyncPeriodFunc, replicationcontroller.BurstReplicas, 4096).
Run(3, wait.NeverStop)
go podInformer.Run(wait.NeverStop)
nodeController := nodecontroller.NewNodeController(nil, clientset, 5*time.Minute, flowcontrol.NewFakeAlwaysRateLimiter(), flowcontrol.NewFakeAlwaysRateLimiter(),
40*time.Second, 60*time.Second, 5*time.Second, nil, nil, 0, false)
nodeController.Run(5 * time.Second)
cadvisorInterface := new(cadvisortest.Fake)
// Kubelet (localhost)
testRootDir := integration.MakeTempDirOrDie("kubelet_integ_1.", "")
configFilePath := integration.MakeTempDirOrDie("config", testRootDir)
glog.Infof("Using %s as root dir for kubelet #1", testRootDir)
cm := cm.NewStubContainerManager()
kcfg := kubeletapp.SimpleKubelet(
clientset,
fakeDocker1,
"localhost",
testRootDir,
firstManifestURL,
"127.0.0.1",
10250, /* KubeletPort */
0, /* ReadOnlyPort */
api.NamespaceDefault,
empty_dir.ProbeVolumePlugins(),
nil,
cadvisorInterface,
configFilePath,
nil,
&containertest.FakeOS{},
1*time.Second, /* FileCheckFrequency */
1*time.Second, /* HTTPCheckFrequency */
10*time.Second, /* MinimumGCAge */
3*time.Second, /* NodeStatusUpdateFrequency */
10*time.Second, /* SyncFrequency */
10*time.Second, /* OutOfDiskTransitionFrequency */
10*time.Second, /* EvictionPressureTransitionPeriod */
40, /* MaxPods */
cm, net.ParseIP("127.0.0.1"))
kubeletapp.RunKubelet(kcfg)
// Kubelet (machine)
// Create a second kubelet so that the guestbook example's two redis slaves both
// have a place they can schedule.
testRootDir = integration.MakeTempDirOrDie("kubelet_integ_2.", "")
glog.Infof("Using %s as root dir for kubelet #2", testRootDir)
kcfg = kubeletapp.SimpleKubelet(
clientset,
fakeDocker2,
"127.0.0.1",
testRootDir,
secondManifestURL,
"127.0.0.1",
10251, /* KubeletPort */
0, /* ReadOnlyPort */
api.NamespaceDefault,
empty_dir.ProbeVolumePlugins(),
nil,
cadvisorInterface,
"",
nil,
&containertest.FakeOS{},
1*time.Second, /* FileCheckFrequency */
1*time.Second, /* HTTPCheckFrequency */
10*time.Second, /* MinimumGCAge */
3*time.Second, /* NodeStatusUpdateFrequency */
10*time.Second, /* SyncFrequency */
10*time.Second, /* OutOfDiskTransitionFrequency */
10*time.Second, /* EvictionPressureTransitionPeriod */
40, /* MaxPods */
cm,
net.ParseIP("127.0.0.1"))
kubeletapp.RunKubelet(kcfg)
return apiServer.URL, configFilePath
}
func makeTempDirOrDie(prefix string, baseDir string) string {
if baseDir == "" {
baseDir = "/tmp"
}
tempDir, err := ioutil.TempDir(baseDir, prefix)
if err != nil {
glog.Fatalf("Can't make a temp rootdir: %v", err)
}
if err = os.MkdirAll(tempDir, 0750); err != nil {
glog.Fatalf("Can't mkdir(%q): %v", tempDir, err)
}
return tempDir
}
// podsOnNodes returns true when all of the selected pods exist on a node.
func podsOnNodes(c *client.Client, podNamespace string, labelSelector labels.Selector) wait.ConditionFunc {
// Wait until all pods are running on the node.
return func() (bool, error) {
options := api.ListOptions{LabelSelector: labelSelector}
pods, err := c.Pods(podNamespace).List(options)
if err != nil {
glog.Infof("Unable to get pods to list: %v", err)
return false, nil
}
for i := range pods.Items {
pod := pods.Items[i]
podString := fmt.Sprintf("%s/%s", pod.Namespace, pod.Name)
glog.Infof("Check whether pod %q exists on node %q", podString, pod.Spec.NodeName)
if len(pod.Spec.NodeName) == 0 {
glog.Infof("Pod %q is not bound to a host yet", podString)
return false, nil
}
if pod.Status.Phase != api.PodRunning {
glog.Infof("Pod %q is not running, status: %v", podString, pod.Status.Phase)
return false, nil
}
}
return true, nil
}
}
func endpointsSet(c *client.Client, serviceNamespace, serviceID string, endpointCount int) wait.ConditionFunc {
return func() (bool, error) {
endpoints, err := c.Endpoints(serviceNamespace).Get(serviceID)
if err != nil {
glog.Infof("Error getting endpoints: %v", err)
return false, nil
}
count := 0
for _, ss := range endpoints.Subsets {
for _, addr := range ss.Addresses {
for _, port := range ss.Ports {
count++
glog.Infof("%s/%s endpoint: %s:%d %#v", serviceNamespace, serviceID, addr.IP, port.Port, addr.TargetRef)
}
}
}
return count == endpointCount, nil
}
}
func countEndpoints(eps *api.Endpoints) int {
count := 0
for i := range eps.Subsets {
count += len(eps.Subsets[i].Addresses) * len(eps.Subsets[i].Ports)
}
return count
}
func podExists(c *client.Client, podNamespace string, podName string) wait.ConditionFunc {
return func() (bool, error) {
_, err := c.Pods(podNamespace).Get(podName)
return err == nil, nil
}
}
func podNotFound(c *client.Client, podNamespace string, podName string) wait.ConditionFunc {
return func() (bool, error) {
_, err := c.Pods(podNamespace).Get(podName)
return apierrors.IsNotFound(err), nil
}
}
func podRunning(c *client.Client, podNamespace string, podName string) wait.ConditionFunc {
return func() (bool, error) {
pod, err := c.Pods(podNamespace).Get(podName)
if apierrors.IsNotFound(err) {
glog.V(2).Infof("Pod %s/%s was not found", podNamespace, podName)
return false, nil
}
if err != nil {
// This could be a connection error so we want to retry, but log the error.
glog.Errorf("Error when reading pod %q: %v", podName, err)
return false, nil
}
if pod.Status.Phase != api.PodRunning {
glog.V(2).Infof("Pod %s/%s is not running. In phase %q", podNamespace, podName, pod.Status.Phase)
return false, nil
}
return true, nil
}
}
func runAPIVersionsTest(c *client.Client) {
g, err := c.ServerGroups()
clientVersion := c.APIVersion().String()
if err != nil {
glog.Fatalf("Failed to get api versions: %v", err)
}
versions := unversioned.ExtractGroupVersions(g)
// Verify that the server supports the API version used by the client.
for _, version := range versions {
if version == clientVersion {
glog.Infof("Version test passed")
return
}
}
glog.Fatalf("Server does not support APIVersion used by client. Server supported APIVersions: '%v', client APIVersion: '%v'", versions, clientVersion)
}
func runSelfLinkTestOnNamespace(c *client.Client, namespace string) {
svcBody := api.Service{
ObjectMeta: api.ObjectMeta{
Name: "selflinktest",
Namespace: namespace,
Labels: map[string]string{
"name": "selflinktest",
},
},
Spec: api.ServiceSpec{
// This is here because validation requires it.
Selector: map[string]string{
"foo": "bar",
},
Ports: []api.ServicePort{{
Port: 12345,
Protocol: "TCP",
}},
SessionAffinity: "None",
},
}
services := c.Services(namespace)
svc, err := services.Create(&svcBody)
if err != nil {
glog.Fatalf("Failed creating selflinktest service: %v", err)
}
err = c.Get().RequestURI(svc.SelfLink).Do().Into(svc)
if err != nil {
glog.Fatalf("Failed listing service with supplied self link '%v': %v", svc.SelfLink, err)
}
svcList, err := services.List(api.ListOptions{})
if err != nil {
glog.Fatalf("Failed listing services: %v", err)
}
err = c.Get().RequestURI(svcList.SelfLink).Do().Into(svcList)
if err != nil {
glog.Fatalf("Failed listing services with supplied self link '%v': %v", svcList.SelfLink, err)
}
found := false
for i := range svcList.Items {
item := &svcList.Items[i]
if item.Name != "selflinktest" {
continue
}
found = true
err = c.Get().RequestURI(item.SelfLink).Do().Into(svc)
if err != nil {
glog.Fatalf("Failed listing service with supplied self link '%v': %v", item.SelfLink, err)
}
break
}
if !found {
glog.Fatalf("never found selflinktest service in namespace %s", namespace)
}
glog.Infof("Self link test passed in namespace %s", namespace)
// TODO: Should test PUT at some point, too.
}
func runAtomicPutTest(c *client.Client) {
svcBody := api.Service{
TypeMeta: unversioned.TypeMeta{
APIVersion: c.APIVersion().String(),
},
ObjectMeta: api.ObjectMeta{
Name: "atomicservice",
Labels: map[string]string{
"name": "atomicService",
},
},
Spec: api.ServiceSpec{
// This is here because validation requires it.
Selector: map[string]string{
"foo": "bar",
},
Ports: []api.ServicePort{{
Port: 12345,
Protocol: "TCP",
}},
SessionAffinity: "None",
},
}
services := c.Services(api.NamespaceDefault)
svc, err := services.Create(&svcBody)
if err != nil {
glog.Fatalf("Failed creating atomicService: %v", err)
}
glog.Info("Created atomicService")
testLabels := labels.Set{
"foo": "bar",
}
for i := 0; i < 5; i++ {
// a: z, b: y, etc...
testLabels[string([]byte{byte('a' + i)})] = string([]byte{byte('z' - i)})
}
var wg sync.WaitGroup
wg.Add(len(testLabels))
for label, value := range testLabels {
go func(l, v string) {
for {
glog.Infof("Starting to update (%s, %s)", l, v)
tmpSvc, err := services.Get(svc.Name)
if err != nil {
glog.Errorf("Error getting atomicService: %v", err)
continue
}
if tmpSvc.Spec.Selector == nil {
tmpSvc.Spec.Selector = map[string]string{l: v}
} else {
tmpSvc.Spec.Selector[l] = v
}
glog.Infof("Posting update (%s, %s)", l, v)
tmpSvc, err = services.Update(tmpSvc)
if err != nil {
if apierrors.IsConflict(err) {
glog.Infof("Conflict: (%s, %s)", l, v)
// This is what we expect.
continue
}
glog.Errorf("Unexpected error putting atomicService: %v", err)
continue
}
break
}
glog.Infof("Done update (%s, %s)", l, v)
wg.Done()
}(label, value)
}
wg.Wait()
svc, err = services.Get(svc.Name)
if err != nil {
glog.Fatalf("Failed getting atomicService after writers are complete: %v", err)
}
if !reflect.DeepEqual(testLabels, labels.Set(svc.Spec.Selector)) {
glog.Fatalf("Selector PUTs were not atomic: wanted %v, got %v", testLabels, svc.Spec.Selector)
}
glog.Info("Atomic PUTs work.")
}
func runPatchTest(c *client.Client) {
name := "patchservice"
resource := "services"
svcBody := api.Service{
TypeMeta: unversioned.TypeMeta{
APIVersion: c.APIVersion().String(),
},
ObjectMeta: api.ObjectMeta{
Name: name,
Labels: map[string]string{},
},
Spec: api.ServiceSpec{
// This is here because validation requires it.
Selector: map[string]string{
"foo": "bar",
},
Ports: []api.ServicePort{{
Port: 12345,
Protocol: "TCP",
}},
SessionAffinity: "None",
},
}
services := c.Services(api.NamespaceDefault)
svc, err := services.Create(&svcBody)
if err != nil {
glog.Fatalf("Failed creating patchservice: %v", err)
}
patchBodies := map[unversioned.GroupVersion]map[api.PatchType]struct {
AddLabelBody []byte
RemoveLabelBody []byte
RemoveAllLabelsBody []byte
}{
v1.SchemeGroupVersion: {
api.JSONPatchType: {
[]byte(`[{"op":"add","path":"/metadata/labels","value":{"foo":"bar","baz":"qux"}}]`),
[]byte(`[{"op":"remove","path":"/metadata/labels/foo"}]`),
[]byte(`[{"op":"remove","path":"/metadata/labels"}]`),
},
api.MergePatchType: {
[]byte(`{"metadata":{"labels":{"foo":"bar","baz":"qux"}}}`),
[]byte(`{"metadata":{"labels":{"foo":null}}}`),
[]byte(`{"metadata":{"labels":null}}`),
},
api.StrategicMergePatchType: {
[]byte(`{"metadata":{"labels":{"foo":"bar","baz":"qux"}}}`),
[]byte(`{"metadata":{"labels":{"foo":null}}}`),
[]byte(`{"metadata":{"labels":{"$patch":"replace"}}}`),
},
},
}
pb := patchBodies[c.APIVersion()]
execPatch := func(pt api.PatchType, body []byte) error {
return c.Patch(pt).
Resource(resource).
Namespace(api.NamespaceDefault).
Name(name).
Body(body).
Do().
Error()
}
for k, v := range pb {
// add label
err := execPatch(k, v.AddLabelBody)
if err != nil {
glog.Fatalf("Failed updating patchservice with patch type %s: %v", k, err)
}
svc, err = services.Get(name)
if err != nil {
glog.Fatalf("Failed getting patchservice: %v", err)
}
if len(svc.Labels) != 2 || svc.Labels["foo"] != "bar" || svc.Labels["baz"] != "qux" {
glog.Fatalf("Failed updating patchservice with patch type %s: labels are: %v", k, svc.Labels)
}
// remove one label
err = execPatch(k, v.RemoveLabelBody)
if err != nil {
glog.Fatalf("Failed updating patchservice with patch type %s: %v", k, err)
}
svc, err = services.Get(name)
if err != nil {
glog.Fatalf("Failed getting patchservice: %v", err)
}
if len(svc.Labels) != 1 || svc.Labels["baz"] != "qux" {
glog.Fatalf("Failed updating patchservice with patch type %s: labels are: %v", k, svc.Labels)
}
// remove all labels
err = execPatch(k, v.RemoveAllLabelsBody)
if err != nil {
glog.Fatalf("Failed updating patchservice with patch type %s: %v", k, err)
}
svc, err = services.Get(name)
if err != nil {
glog.Fatalf("Failed getting patchservice: %v", err)
}
if svc.Labels != nil {
glog.Fatalf("Failed remove all labels from patchservice with patch type %s: %v", k, svc.Labels)
}
}
glog.Info("PATCHs work.")
}
func runMasterServiceTest(client *client.Client) {
time.Sleep(12 * time.Second)
svcList, err := client.Services(api.NamespaceDefault).List(api.ListOptions{})
if err != nil {
glog.Fatalf("Unexpected error listing services: %v", err)
}
var foundRW bool
found := sets.String{}
for i := range svcList.Items {
found.Insert(svcList.Items[i].Name)
if svcList.Items[i].Name == "kubernetes" {
foundRW = true
}
}
if foundRW {
ep, err := client.Endpoints(api.NamespaceDefault).Get("kubernetes")
if err != nil {
glog.Fatalf("Unexpected error listing endpoints for kubernetes service: %v", err)
}
if countEndpoints(ep) == 0 {
glog.Fatalf("No endpoints for kubernetes service: %v", ep)
}
} else {
glog.Errorf("No RW service found: %v", found)
glog.Fatal("Kubernetes service test failed")
}
glog.Infof("Master service test passed.")
}
func runSchedulerNoPhantomPodsTest(client *client.Client) {
pod := &api.Pod{
Spec: api.PodSpec{
Containers: []api.Container{
{
Name: "c1",
Image: "gcr.io/google_containers/pause-amd64:3.0",
Ports: []api.ContainerPort{
{ContainerPort: 1234, HostPort: 9999},
},
ImagePullPolicy: api.PullIfNotPresent,
},
},
},
}
// Assuming we only have two kublets, the third pod here won't schedule
// if the scheduler doesn't correctly handle the delete for the second
// pod.
pod.ObjectMeta.Name = "phantom.foo"
foo, err := client.Pods(api.NamespaceDefault).Create(pod)
if err != nil {
glog.Fatalf("Failed to create pod: %v, %v", pod, err)
}
if err := wait.Poll(time.Second, longTestTimeout, podRunning(client, foo.Namespace, foo.Name)); err != nil {
glog.Fatalf("FAILED: pod never started running %v", err)
}
pod.ObjectMeta.Name = "phantom.bar"
bar, err := client.Pods(api.NamespaceDefault).Create(pod)
if err != nil {
glog.Fatalf("Failed to create pod: %v, %v", pod, err)
}
if err := wait.Poll(time.Second, longTestTimeout, podRunning(client, bar.Namespace, bar.Name)); err != nil {
glog.Fatalf("FAILED: pod never started running %v", err)
}
// Delete a pod to free up room.
glog.Infof("Deleting pod %v", bar.Name)
err = client.Pods(api.NamespaceDefault).Delete(bar.Name, api.NewDeleteOptions(0))
if err != nil {
glog.Fatalf("FAILED: couldn't delete pod %q: %v", bar.Name, err)
}
pod.ObjectMeta.Name = "phantom.baz"
baz, err := client.Pods(api.NamespaceDefault).Create(pod)
if err != nil {
glog.Fatalf("Failed to create pod: %v, %v", pod, err)
}
if err := wait.Poll(time.Second, longTestTimeout, podRunning(client, baz.Namespace, baz.Name)); err != nil {
if pod, perr := client.Pods(api.NamespaceDefault).Get("phantom.bar"); perr == nil {
glog.Fatalf("FAILED: 'phantom.bar' was never deleted: %#v, err: %v", pod, err)
} else {
glog.Fatalf("FAILED: (Scheduler probably didn't process deletion of 'phantom.bar') Pod never started running: err: %v, perr: %v", err, perr)
}
}
glog.Info("Scheduler doesn't make phantom pods: test passed.")
}
type testFunc func(*client.Client)
func addFlags(fs *pflag.FlagSet) {
fs.IntVar(
&maxConcurrency, "max-concurrency", -1, "Maximum number of tests to be run simultaneously. Unlimited if set to negative.")
fs.BoolVar(
&watchCache, "watch-cache", false, "Turn on watch cache on API server.")
}
func main() {
gruntime.GOMAXPROCS(gruntime.NumCPU())
addFlags(pflag.CommandLine)
flag.InitFlags()
utilruntime.ReallyCrash = true
util.InitLogs()
defer util.FlushLogs()
go func() {
defer util.FlushLogs()
time.Sleep(maxTestTimeout)
glog.Fatalf("This test has timed out.")
}()
glog.Infof("Running tests for APIVersion: %s", os.Getenv("KUBE_TEST_API"))
firstManifestURL := ServeCachedManifestFile(testPodSpecFile)
secondManifestURL := ServeCachedManifestFile(testPodSpecFile)
apiServerURL, _ := startComponents(firstManifestURL, secondManifestURL)
// Ok. we're good to go.
glog.Infof("API Server started on %s", apiServerURL)
// Wait for the synchronization threads to come up.
time.Sleep(time.Second * 10)
kubeClient := client.NewOrDie(
&restclient.Config{
Host: apiServerURL,
ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()},
QPS: 20,
Burst: 50,
})
// TODO: caesarxuchao: hacky way to specify version of Experimental client.
// We will fix this by supporting multiple group versions in Config
kubeClient.ExtensionsClient = client.NewExtensionsOrDie(
&restclient.Config{
Host: apiServerURL,
ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Extensions.GroupVersion()},
QPS: 20,
Burst: 50,
})
// Run tests in parallel
testFuncs := []testFunc{
runAtomicPutTest,
runPatchTest,
runAPIVersionsTest,
runMasterServiceTest,
func(c *client.Client) {
runSelfLinkTestOnNamespace(c, api.NamespaceDefault)
runSelfLinkTestOnNamespace(c, "other")
},
}
// Only run at most maxConcurrency tests in parallel.
if maxConcurrency <= 0 {
maxConcurrency = len(testFuncs)
}
glog.Infof("Running %d tests in parallel.", maxConcurrency)
ch := make(chan struct{}, maxConcurrency)
var wg sync.WaitGroup
wg.Add(len(testFuncs))
for i := range testFuncs {
f := testFuncs[i]
go func() {
ch <- struct{}{}
f(kubeClient)
<-ch
wg.Done()
}()
}
wg.Wait()
close(ch)
// Check that kubelet tried to make the containers.
// Using a set to list unique creation attempts. Our fake is
// really stupid, so kubelet tries to create these multiple times.
createdConts := sets.String{}
for _, p := range fakeDocker1.Created {
// The last 8 characters are random, so slice them off.
if n := len(p); n > 8 {
createdConts.Insert(p[:n-8])
}
}
for _, p := range fakeDocker2.Created {
// The last 8 characters are random, so slice them off.
if n := len(p); n > 8 {
createdConts.Insert(p[:n-8])
}
}
// We expect 6 containers:
// 1 pod infra container + 2 containers from the URL on first Kubelet +
// 1 pod infra container + 2 containers from the URL on second Kubelet +
// The total number of container created is 6
if len(createdConts) != 6 {
glog.Fatalf("Expected 6 containers; got %v\n\nlist of created containers:\n\n%#v\n\nDocker 1 Created:\n\n%#v\n\nDocker 2 Created:\n\n%#v\n\n", len(createdConts), createdConts.List(), fakeDocker1.Created, fakeDocker2.Created)
}
glog.Infof("OK - found created containers: %#v", createdConts.List())
// This test doesn't run with the others because it can't run in
// parallel and also it schedules extra pods which would change the
// above pod counting logic.
runSchedulerNoPhantomPodsTest(kubeClient)
glog.Infof("\n\nLogging high latency metrics from the 10250 kubelet")
e2e.HighLatencyKubeletOperations(nil, 1*time.Second, "localhost:10250")
glog.Infof("\n\nLogging high latency metrics from the 10251 kubelet")
e2e.HighLatencyKubeletOperations(nil, 1*time.Second, "localhost:10251")
}
// ServeCachedManifestFile serves a file for kubelet to read.
func ServeCachedManifestFile(contents string) (servingAddress string) {
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.URL.Path == "/manifest" {
w.Write([]byte(contents))
return
}
glog.Fatalf("Got request: %#v\n", r)
http.NotFound(w, r)
}))
return server.URL + "/manifest"
}
const (
testPodSpecFile = `{
"kind": "Pod",
"apiVersion": "v1",
"metadata": {
"name": "container-vm-guestbook-pod-spec"
},
"spec": {
"containers": [
{
"name": "redis",
"image": "gcr.io/google_containers/redis:e2e",
"volumeMounts": [{
"name": "redis-data",
"mountPath": "/data"
}]
},
{
"name": "guestbook",
"image": "gcr.io/google_samples/gb-frontend:v3",
"ports": [{
"name": "www",
"hostPort": 80,
"containerPort": 80
}]
}],
"volumes": [{ "name": "redis-data" }]
}
}`
)
|
[
"\"KUBE_TEST_API\""
] |
[] |
[
"KUBE_TEST_API"
] |
[]
|
["KUBE_TEST_API"]
|
go
| 1 | 0 | |
python/ray/tests/test_basic.py
|
# coding: utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from concurrent.futures import ThreadPoolExecutor
import glob
import json
import logging
from multiprocessing import Process
import os
import random
import re
import setproctitle
import shutil
import six
import socket
import string
import subprocess
import sys
import tempfile
import threading
import time
import numpy as np
import pickle
import pytest
import ray
import ray.ray_constants as ray_constants
import ray.tests.cluster_utils
import ray.tests.utils
logger = logging.getLogger(__name__)
def test_simple_serialization(ray_start_regular):
primitive_objects = [
# Various primitive types.
0,
0.0,
0.9,
1 << 62,
1 << 999,
"a",
string.printable,
"\u262F",
u"hello world",
u"\xff\xfe\x9c\x001\x000\x00",
None,
True,
False,
[],
(),
{},
type,
int,
set(),
# Collections types.
collections.Counter([np.random.randint(0, 10) for _ in range(100)]),
collections.OrderedDict([("hello", 1), ("world", 2)]),
collections.defaultdict(lambda: 0, [("hello", 1), ("world", 2)]),
collections.defaultdict(lambda: [], [("hello", 1), ("world", 2)]),
collections.deque([1, 2, 3, "a", "b", "c", 3.5]),
# Numpy dtypes.
np.int8(3),
np.int32(4),
np.int64(5),
np.uint8(3),
np.uint32(4),
np.uint64(5),
np.float32(1.9),
np.float64(1.9),
]
if sys.version_info < (3, 0):
primitive_objects.append(long(0)) # noqa: E501,F821
composite_objects = (
[[obj]
for obj in primitive_objects] + [(obj, )
for obj in primitive_objects] + [{
(): obj
} for obj in primitive_objects])
@ray.remote
def f(x):
return x
# Check that we can pass arguments by value to remote functions and
# that they are uncorrupted.
for obj in primitive_objects + composite_objects:
new_obj_1 = ray.get(f.remote(obj))
new_obj_2 = ray.get(ray.put(obj))
assert obj == new_obj_1
assert obj == new_obj_2
# TODO(rkn): The numpy dtypes currently come back as regular integers
# or floats.
if type(obj).__module__ != "numpy":
assert type(obj) == type(new_obj_1)
assert type(obj) == type(new_obj_2)
def test_complex_serialization(ray_start_regular):
def assert_equal(obj1, obj2):
module_numpy = (type(obj1).__module__ == np.__name__
or type(obj2).__module__ == np.__name__)
if module_numpy:
empty_shape = ((hasattr(obj1, "shape") and obj1.shape == ())
or (hasattr(obj2, "shape") and obj2.shape == ()))
if empty_shape:
# This is a special case because currently
# np.testing.assert_equal fails because we do not properly
# handle different numerical types.
assert obj1 == obj2, ("Objects {} and {} are "
"different.".format(obj1, obj2))
else:
np.testing.assert_equal(obj1, obj2)
elif hasattr(obj1, "__dict__") and hasattr(obj2, "__dict__"):
special_keys = ["_pytype_"]
assert (set(list(obj1.__dict__.keys()) + special_keys) == set(
list(obj2.__dict__.keys()) + special_keys)), (
"Objects {} and {} are different.".format(obj1, obj2))
for key in obj1.__dict__.keys():
if key not in special_keys:
assert_equal(obj1.__dict__[key], obj2.__dict__[key])
elif type(obj1) is dict or type(obj2) is dict:
assert_equal(obj1.keys(), obj2.keys())
for key in obj1.keys():
assert_equal(obj1[key], obj2[key])
elif type(obj1) is list or type(obj2) is list:
assert len(obj1) == len(obj2), ("Objects {} and {} are lists with "
"different lengths.".format(
obj1, obj2))
for i in range(len(obj1)):
assert_equal(obj1[i], obj2[i])
elif type(obj1) is tuple or type(obj2) is tuple:
assert len(obj1) == len(obj2), ("Objects {} and {} are tuples "
"with different lengths.".format(
obj1, obj2))
for i in range(len(obj1)):
assert_equal(obj1[i], obj2[i])
elif (ray.serialization.is_named_tuple(type(obj1))
or ray.serialization.is_named_tuple(type(obj2))):
assert len(obj1) == len(obj2), (
"Objects {} and {} are named "
"tuples with different lengths.".format(obj1, obj2))
for i in range(len(obj1)):
assert_equal(obj1[i], obj2[i])
else:
assert obj1 == obj2, "Objects {} and {} are different.".format(
obj1, obj2)
if sys.version_info >= (3, 0):
long_extras = [0, np.array([["hi", u"hi"], [1.3, 1]])]
else:
long_extras = [
long(0), # noqa: E501,F821
np.array([
["hi", u"hi"],
[1.3, long(1)] # noqa: E501,F821
])
]
PRIMITIVE_OBJECTS = [
0, 0.0, 0.9, 1 << 62, 1 << 100, 1 << 999, [1 << 100, [1 << 100]], "a",
string.printable, "\u262F", u"hello world",
u"\xff\xfe\x9c\x001\x000\x00", None, True, False, [], (), {},
np.int8(3),
np.int32(4),
np.int64(5),
np.uint8(3),
np.uint32(4),
np.uint64(5),
np.float32(1.9),
np.float64(1.9),
np.zeros([100, 100]),
np.random.normal(size=[100, 100]),
np.array(["hi", 3]),
np.array(["hi", 3], dtype=object)
] + long_extras
COMPLEX_OBJECTS = [
[[[[[[[[[[[[]]]]]]]]]]]],
{
"obj{}".format(i): np.random.normal(size=[100, 100])
for i in range(10)
},
# {(): {(): {(): {(): {(): {(): {(): {(): {(): {(): {
# (): {(): {}}}}}}}}}}}}},
(
(((((((((), ), ), ), ), ), ), ), ), ),
{
"a": {
"b": {
"c": {
"d": {}
}
}
}
},
]
class Foo(object):
def __init__(self, value=0):
self.value = value
def __hash__(self):
return hash(self.value)
def __eq__(self, other):
return other.value == self.value
class Bar(object):
def __init__(self):
for i, val in enumerate(PRIMITIVE_OBJECTS + COMPLEX_OBJECTS):
setattr(self, "field{}".format(i), val)
class Baz(object):
def __init__(self):
self.foo = Foo()
self.bar = Bar()
def method(self, arg):
pass
class Qux(object):
def __init__(self):
self.objs = [Foo(), Bar(), Baz()]
class SubQux(Qux):
def __init__(self):
Qux.__init__(self)
class CustomError(Exception):
pass
Point = collections.namedtuple("Point", ["x", "y"])
NamedTupleExample = collections.namedtuple(
"Example", "field1, field2, field3, field4, field5")
CUSTOM_OBJECTS = [
Exception("Test object."),
CustomError(),
Point(11, y=22),
Foo(),
Bar(),
Baz(), # Qux(), SubQux(),
NamedTupleExample(1, 1.0, "hi", np.zeros([3, 5]), [1, 2, 3]),
]
# Test dataclasses in Python 3.7.
if sys.version_info >= (3, 7):
from dataclasses import make_dataclass
DataClass0 = make_dataclass("DataClass0", [("number", int)])
CUSTOM_OBJECTS.append(DataClass0(number=3))
class CustomClass(object):
def __init__(self, value):
self.value = value
DataClass1 = make_dataclass("DataClass1", [("custom", CustomClass)])
class DataClass2(DataClass1):
@classmethod
def from_custom(cls, data):
custom = CustomClass(data)
return cls(custom)
def __reduce__(self):
return (self.from_custom, (self.custom.value, ))
CUSTOM_OBJECTS.append(DataClass2(custom=CustomClass(43)))
BASE_OBJECTS = PRIMITIVE_OBJECTS + COMPLEX_OBJECTS + CUSTOM_OBJECTS
LIST_OBJECTS = [[obj] for obj in BASE_OBJECTS]
TUPLE_OBJECTS = [(obj, ) for obj in BASE_OBJECTS]
# The check that type(obj).__module__ != "numpy" should be unnecessary, but
# otherwise this seems to fail on Mac OS X on Travis.
DICT_OBJECTS = ([{
obj: obj
} for obj in PRIMITIVE_OBJECTS if (
obj.__hash__ is not None and type(obj).__module__ != "numpy")] + [{
0: obj
} for obj in BASE_OBJECTS] + [{
Foo(123): Foo(456)
}])
RAY_TEST_OBJECTS = (
BASE_OBJECTS + LIST_OBJECTS + TUPLE_OBJECTS + DICT_OBJECTS)
@ray.remote
def f(x):
return x
# Check that we can pass arguments by value to remote functions and
# that they are uncorrupted.
for obj in RAY_TEST_OBJECTS:
assert_equal(obj, ray.get(f.remote(obj)))
assert_equal(obj, ray.get(ray.put(obj)))
def test_nested_functions(ray_start_regular):
# Make sure that remote functions can use other values that are defined
# after the remote function but before the first function invocation.
@ray.remote
def f():
return g(), ray.get(h.remote())
def g():
return 1
@ray.remote
def h():
return 2
assert ray.get(f.remote()) == (1, 2)
# Test a remote function that recursively calls itself.
@ray.remote
def factorial(n):
if n == 0:
return 1
return n * ray.get(factorial.remote(n - 1))
assert ray.get(factorial.remote(0)) == 1
assert ray.get(factorial.remote(1)) == 1
assert ray.get(factorial.remote(2)) == 2
assert ray.get(factorial.remote(3)) == 6
assert ray.get(factorial.remote(4)) == 24
assert ray.get(factorial.remote(5)) == 120
# Test remote functions that recursively call each other.
@ray.remote
def factorial_even(n):
assert n % 2 == 0
if n == 0:
return 1
return n * ray.get(factorial_odd.remote(n - 1))
@ray.remote
def factorial_odd(n):
assert n % 2 == 1
return n * ray.get(factorial_even.remote(n - 1))
assert ray.get(factorial_even.remote(4)) == 24
assert ray.get(factorial_odd.remote(5)) == 120
def test_ray_recursive_objects(ray_start_regular):
class ClassA(object):
pass
# Make a list that contains itself.
lst = []
lst.append(lst)
# Make an object that contains itself as a field.
a1 = ClassA()
a1.field = a1
# Make two objects that contain each other as fields.
a2 = ClassA()
a3 = ClassA()
a2.field = a3
a3.field = a2
# Make a dictionary that contains itself.
d1 = {}
d1["key"] = d1
# Create a list of recursive objects.
recursive_objects = [lst, a1, a2, a3, d1]
if ray.worker.USE_NEW_SERIALIZER:
# Serialize the recursive objects.
for obj in recursive_objects:
ray.put(obj)
else:
# Check that exceptions are thrown when we serialize the recursive
# objects.
for obj in recursive_objects:
with pytest.raises(Exception):
ray.put(obj)
def test_passing_arguments_by_value_out_of_the_box(ray_start_regular):
@ray.remote
def f(x):
return x
# Test passing lambdas.
def temp():
return 1
assert ray.get(f.remote(temp))() == 1
assert ray.get(f.remote(lambda x: x + 1))(3) == 4
# Test sets.
assert ray.get(f.remote(set())) == set()
s = {1, (1, 2, "hi")}
assert ray.get(f.remote(s)) == s
# Test types.
assert ray.get(f.remote(int)) == int
assert ray.get(f.remote(float)) == float
assert ray.get(f.remote(str)) == str
class Foo(object):
def __init__(self):
pass
# Make sure that we can put and get a custom type. Note that the result
# won't be "equal" to Foo.
ray.get(ray.put(Foo))
def test_putting_object_that_closes_over_object_id(ray_start_regular):
# This test is here to prevent a regression of
# https://github.com/ray-project/ray/issues/1317.
class Foo(object):
def __init__(self):
self.val = ray.put(0)
def method(self):
f
f = Foo()
ray.put(f)
def test_put_get(shutdown_only):
ray.init(num_cpus=0)
for i in range(100):
value_before = i * 10**6
objectid = ray.put(value_before)
value_after = ray.get(objectid)
assert value_before == value_after
for i in range(100):
value_before = i * 10**6 * 1.0
objectid = ray.put(value_before)
value_after = ray.get(objectid)
assert value_before == value_after
for i in range(100):
value_before = "h" * i
objectid = ray.put(value_before)
value_after = ray.get(objectid)
assert value_before == value_after
for i in range(100):
value_before = [1] * i
objectid = ray.put(value_before)
value_after = ray.get(objectid)
assert value_before == value_after
def test_custom_serializers(ray_start_regular):
class Foo(object):
def __init__(self):
self.x = 3
def custom_serializer(obj):
return 3, "string1", type(obj).__name__
def custom_deserializer(serialized_obj):
return serialized_obj, "string2"
ray.register_custom_serializer(
Foo, serializer=custom_serializer, deserializer=custom_deserializer)
assert ray.get(ray.put(Foo())) == ((3, "string1", Foo.__name__), "string2")
class Bar(object):
def __init__(self):
self.x = 3
ray.register_custom_serializer(
Bar, serializer=custom_serializer, deserializer=custom_deserializer)
@ray.remote
def f():
return Bar()
assert ray.get(f.remote()) == ((3, "string1", Bar.__name__), "string2")
def test_serialization_final_fallback(ray_start_regular):
pytest.importorskip("catboost")
# This test will only run when "catboost" is installed.
from catboost import CatBoostClassifier
model = CatBoostClassifier(
iterations=2,
depth=2,
learning_rate=1,
loss_function="Logloss",
logging_level="Verbose")
reconstructed_model = ray.get(ray.put(model))
assert set(model.get_params().items()) == set(
reconstructed_model.get_params().items())
def test_register_class(ray_start_2_cpus):
# Check that putting an object of a class that has not been registered
# throws an exception.
class TempClass(object):
pass
ray.get(ray.put(TempClass()))
# Test passing custom classes into remote functions from the driver.
@ray.remote
def f(x):
return x
class Foo(object):
def __init__(self, value=0):
self.value = value
def __hash__(self):
return hash(self.value)
def __eq__(self, other):
return other.value == self.value
foo = ray.get(f.remote(Foo(7)))
assert foo == Foo(7)
regex = re.compile(r"\d+\.\d*")
new_regex = ray.get(f.remote(regex))
# This seems to fail on the system Python 3 that comes with
# Ubuntu, so it is commented out for now:
# assert regex == new_regex
# Instead, we do this:
assert regex.pattern == new_regex.pattern
class TempClass1(object):
def __init__(self):
self.value = 1
# Test returning custom classes created on workers.
@ray.remote
def g():
class TempClass2(object):
def __init__(self):
self.value = 2
return TempClass1(), TempClass2()
object_1, object_2 = ray.get(g.remote())
assert object_1.value == 1
assert object_2.value == 2
# Test exporting custom class definitions from one worker to another
# when the worker is blocked in a get.
class NewTempClass(object):
def __init__(self, value):
self.value = value
@ray.remote
def h1(x):
return NewTempClass(x)
@ray.remote
def h2(x):
return ray.get(h1.remote(x))
assert ray.get(h2.remote(10)).value == 10
# Test registering multiple classes with the same name.
@ray.remote(num_return_vals=3)
def j():
class Class0(object):
def method0(self):
pass
c0 = Class0()
class Class0(object):
def method1(self):
pass
c1 = Class0()
class Class0(object):
def method2(self):
pass
c2 = Class0()
return c0, c1, c2
results = []
for _ in range(5):
results += j.remote()
for i in range(len(results) // 3):
c0, c1, c2 = ray.get(results[(3 * i):(3 * (i + 1))])
c0.method0()
c1.method1()
c2.method2()
assert not hasattr(c0, "method1")
assert not hasattr(c0, "method2")
assert not hasattr(c1, "method0")
assert not hasattr(c1, "method2")
assert not hasattr(c2, "method0")
assert not hasattr(c2, "method1")
@ray.remote
def k():
class Class0(object):
def method0(self):
pass
c0 = Class0()
class Class0(object):
def method1(self):
pass
c1 = Class0()
class Class0(object):
def method2(self):
pass
c2 = Class0()
return c0, c1, c2
results = ray.get([k.remote() for _ in range(5)])
for c0, c1, c2 in results:
c0.method0()
c1.method1()
c2.method2()
assert not hasattr(c0, "method1")
assert not hasattr(c0, "method2")
assert not hasattr(c1, "method0")
assert not hasattr(c1, "method2")
assert not hasattr(c2, "method0")
assert not hasattr(c2, "method1")
def test_keyword_args(ray_start_regular):
@ray.remote
def keyword_fct1(a, b="hello"):
return "{} {}".format(a, b)
@ray.remote
def keyword_fct2(a="hello", b="world"):
return "{} {}".format(a, b)
@ray.remote
def keyword_fct3(a, b, c="hello", d="world"):
return "{} {} {} {}".format(a, b, c, d)
x = keyword_fct1.remote(1)
assert ray.get(x) == "1 hello"
x = keyword_fct1.remote(1, "hi")
assert ray.get(x) == "1 hi"
x = keyword_fct1.remote(1, b="world")
assert ray.get(x) == "1 world"
x = keyword_fct1.remote(a=1, b="world")
assert ray.get(x) == "1 world"
x = keyword_fct2.remote(a="w", b="hi")
assert ray.get(x) == "w hi"
x = keyword_fct2.remote(b="hi", a="w")
assert ray.get(x) == "w hi"
x = keyword_fct2.remote(a="w")
assert ray.get(x) == "w world"
x = keyword_fct2.remote(b="hi")
assert ray.get(x) == "hello hi"
x = keyword_fct2.remote("w")
assert ray.get(x) == "w world"
x = keyword_fct2.remote("w", "hi")
assert ray.get(x) == "w hi"
x = keyword_fct3.remote(0, 1, c="w", d="hi")
assert ray.get(x) == "0 1 w hi"
x = keyword_fct3.remote(0, b=1, c="w", d="hi")
assert ray.get(x) == "0 1 w hi"
x = keyword_fct3.remote(a=0, b=1, c="w", d="hi")
assert ray.get(x) == "0 1 w hi"
x = keyword_fct3.remote(0, 1, d="hi", c="w")
assert ray.get(x) == "0 1 w hi"
x = keyword_fct3.remote(0, 1, c="w")
assert ray.get(x) == "0 1 w world"
x = keyword_fct3.remote(0, 1, d="hi")
assert ray.get(x) == "0 1 hello hi"
x = keyword_fct3.remote(0, 1)
assert ray.get(x) == "0 1 hello world"
x = keyword_fct3.remote(a=0, b=1)
assert ray.get(x) == "0 1 hello world"
# Check that we cannot pass invalid keyword arguments to functions.
@ray.remote
def f1():
return
@ray.remote
def f2(x, y=0, z=0):
return
# Make sure we get an exception if too many arguments are passed in.
with pytest.raises(Exception):
f1.remote(3)
with pytest.raises(Exception):
f1.remote(x=3)
with pytest.raises(Exception):
f2.remote(0, w=0)
with pytest.raises(Exception):
f2.remote(3, x=3)
# Make sure we get an exception if too many arguments are passed in.
with pytest.raises(Exception):
f2.remote(1, 2, 3, 4)
@ray.remote
def f3(x):
return x
assert ray.get(f3.remote(4)) == 4
def test_variable_number_of_args(shutdown_only):
@ray.remote
def varargs_fct1(*a):
return " ".join(map(str, a))
@ray.remote
def varargs_fct2(a, *b):
return " ".join(map(str, b))
try:
@ray.remote
def kwargs_throw_exception(**c):
return ()
kwargs_exception_thrown = False
except Exception:
kwargs_exception_thrown = True
ray.init(num_cpus=1)
x = varargs_fct1.remote(0, 1, 2)
assert ray.get(x) == "0 1 2"
x = varargs_fct2.remote(0, 1, 2)
assert ray.get(x) == "1 2"
assert kwargs_exception_thrown
@ray.remote
def f1(*args):
return args
@ray.remote
def f2(x, y, *args):
return x, y, args
assert ray.get(f1.remote()) == ()
assert ray.get(f1.remote(1)) == (1, )
assert ray.get(f1.remote(1, 2, 3)) == (1, 2, 3)
with pytest.raises(Exception):
f2.remote()
with pytest.raises(Exception):
f2.remote(1)
assert ray.get(f2.remote(1, 2)) == (1, 2, ())
assert ray.get(f2.remote(1, 2, 3)) == (1, 2, (3, ))
assert ray.get(f2.remote(1, 2, 3, 4)) == (1, 2, (3, 4))
def testNoArgs(self):
@ray.remote
def no_op():
pass
self.ray_start()
ray.get(no_op.remote())
def test_defining_remote_functions(shutdown_only):
ray.init(num_cpus=3)
# Test that we can define a remote function in the shell.
@ray.remote
def f(x):
return x + 1
assert ray.get(f.remote(0)) == 1
# Test that we can redefine the remote function.
@ray.remote
def f(x):
return x + 10
while True:
val = ray.get(f.remote(0))
assert val in [1, 10]
if val == 10:
break
else:
logger.info("Still using old definition of f, trying again.")
# Test that we can close over plain old data.
data = [
np.zeros([3, 5]), (1, 2, "a"), [0.0, 1.0, 1 << 62], 1 << 60, {
"a": np.zeros(3)
}
]
@ray.remote
def g():
return data
ray.get(g.remote())
# Test that we can close over modules.
@ray.remote
def h():
return np.zeros([3, 5])
assert np.alltrue(ray.get(h.remote()) == np.zeros([3, 5]))
@ray.remote
def j():
return time.time()
ray.get(j.remote())
# Test that we can define remote functions that call other remote
# functions.
@ray.remote
def k(x):
return x + 1
@ray.remote
def k2(x):
return ray.get(k.remote(x))
@ray.remote
def m(x):
return ray.get(k2.remote(x))
assert ray.get(k.remote(1)) == 2
assert ray.get(k2.remote(1)) == 2
assert ray.get(m.remote(1)) == 2
def test_submit_api(shutdown_only):
ray.init(num_cpus=2, num_gpus=1, resources={"Custom": 1})
@ray.remote
def f(n):
return list(range(n))
@ray.remote
def g():
return ray.get_gpu_ids()
assert f._remote([0], num_return_vals=0) is None
id1 = f._remote(args=[1], num_return_vals=1)
assert ray.get(id1) == [0]
id1, id2 = f._remote(args=[2], num_return_vals=2)
assert ray.get([id1, id2]) == [0, 1]
id1, id2, id3 = f._remote(args=[3], num_return_vals=3)
assert ray.get([id1, id2, id3]) == [0, 1, 2]
assert ray.get(
g._remote(args=[], num_cpus=1, num_gpus=1,
resources={"Custom": 1})) == [0]
infeasible_id = g._remote(args=[], resources={"NonexistentCustom": 1})
assert ray.get(g._remote()) == []
ready_ids, remaining_ids = ray.wait([infeasible_id], timeout=0.05)
assert len(ready_ids) == 0
assert len(remaining_ids) == 1
@ray.remote
class Actor(object):
def __init__(self, x, y=0):
self.x = x
self.y = y
def method(self, a, b=0):
return self.x, self.y, a, b
def gpu_ids(self):
return ray.get_gpu_ids()
@ray.remote
class Actor2(object):
def __init__(self):
pass
def method(self):
pass
a = Actor._remote(
args=[0], kwargs={"y": 1}, num_gpus=1, resources={"Custom": 1})
a2 = Actor2._remote()
ray.get(a2.method._remote())
id1, id2, id3, id4 = a.method._remote(
args=["test"], kwargs={"b": 2}, num_return_vals=4)
assert ray.get([id1, id2, id3, id4]) == [0, 1, "test", 2]
def test_many_fractional_resources(shutdown_only):
ray.init(num_cpus=2, num_gpus=2, resources={"Custom": 2})
@ray.remote
def g():
return 1
@ray.remote
def f(block, accepted_resources):
true_resources = {
resource: value[0][1]
for resource, value in ray.get_resource_ids().items()
}
if block:
ray.get(g.remote())
return true_resources == accepted_resources
# Check that the resource are assigned correctly.
result_ids = []
for rand1, rand2, rand3 in np.random.uniform(size=(100, 3)):
resource_set = {"CPU": int(rand1 * 10000) / 10000}
result_ids.append(f._remote([False, resource_set], num_cpus=rand1))
resource_set = {"CPU": 1, "GPU": int(rand1 * 10000) / 10000}
result_ids.append(f._remote([False, resource_set], num_gpus=rand1))
resource_set = {"CPU": 1, "Custom": int(rand1 * 10000) / 10000}
result_ids.append(
f._remote([False, resource_set], resources={"Custom": rand1}))
resource_set = {
"CPU": int(rand1 * 10000) / 10000,
"GPU": int(rand2 * 10000) / 10000,
"Custom": int(rand3 * 10000) / 10000
}
result_ids.append(
f._remote(
[False, resource_set],
num_cpus=rand1,
num_gpus=rand2,
resources={"Custom": rand3}))
result_ids.append(
f._remote(
[True, resource_set],
num_cpus=rand1,
num_gpus=rand2,
resources={"Custom": rand3}))
assert all(ray.get(result_ids))
# Check that the available resources at the end are the same as the
# beginning.
stop_time = time.time() + 10
correct_available_resources = False
while time.time() < stop_time:
if (ray.available_resources()["CPU"] == 2.0
and ray.available_resources()["GPU"] == 2.0
and ray.available_resources()["Custom"] == 2.0):
correct_available_resources = True
break
if not correct_available_resources:
assert False, "Did not get correct available resources."
def test_get_multiple(ray_start_regular):
object_ids = [ray.put(i) for i in range(10)]
assert ray.get(object_ids) == list(range(10))
# Get a random choice of object IDs with duplicates.
indices = list(np.random.choice(range(10), 5))
indices += indices
results = ray.get([object_ids[i] for i in indices])
assert results == indices
def test_get_multiple_experimental(ray_start_regular):
object_ids = [ray.put(i) for i in range(10)]
object_ids_tuple = tuple(object_ids)
assert ray.experimental.get(object_ids_tuple) == list(range(10))
object_ids_nparray = np.array(object_ids)
assert ray.experimental.get(object_ids_nparray) == list(range(10))
def test_get_dict(ray_start_regular):
d = {str(i): ray.put(i) for i in range(5)}
for i in range(5, 10):
d[str(i)] = i
result = ray.experimental.get(d)
expected = {str(i): i for i in range(10)}
assert result == expected
def test_wait(ray_start_regular):
@ray.remote
def f(delay):
time.sleep(delay)
return 1
objectids = [f.remote(1.0), f.remote(0.5), f.remote(0.5), f.remote(0.5)]
ready_ids, remaining_ids = ray.wait(objectids)
assert len(ready_ids) == 1
assert len(remaining_ids) == 3
ready_ids, remaining_ids = ray.wait(objectids, num_returns=4)
assert set(ready_ids) == set(objectids)
assert remaining_ids == []
objectids = [f.remote(0.5), f.remote(0.5), f.remote(0.5), f.remote(0.5)]
start_time = time.time()
ready_ids, remaining_ids = ray.wait(objectids, timeout=1.75, num_returns=4)
assert time.time() - start_time < 2
assert len(ready_ids) == 3
assert len(remaining_ids) == 1
ray.wait(objectids)
objectids = [f.remote(1.0), f.remote(0.5), f.remote(0.5), f.remote(0.5)]
start_time = time.time()
ready_ids, remaining_ids = ray.wait(objectids, timeout=5.0)
assert time.time() - start_time < 5
assert len(ready_ids) == 1
assert len(remaining_ids) == 3
# Verify that calling wait with duplicate object IDs throws an
# exception.
x = ray.put(1)
with pytest.raises(Exception):
ray.wait([x, x])
# Make sure it is possible to call wait with an empty list.
ready_ids, remaining_ids = ray.wait([])
assert ready_ids == []
assert remaining_ids == []
# Test semantics of num_returns with no timeout.
oids = [ray.put(i) for i in range(10)]
(found, rest) = ray.wait(oids, num_returns=2)
assert len(found) == 2
assert len(rest) == 8
# Verify that incorrect usage raises a TypeError.
x = ray.put(1)
with pytest.raises(TypeError):
ray.wait(x)
with pytest.raises(TypeError):
ray.wait(1)
with pytest.raises(TypeError):
ray.wait([1])
def test_wait_iterables(ray_start_regular):
@ray.remote
def f(delay):
time.sleep(delay)
return 1
objectids = (f.remote(1.0), f.remote(0.5), f.remote(0.5), f.remote(0.5))
ready_ids, remaining_ids = ray.experimental.wait(objectids)
assert len(ready_ids) == 1
assert len(remaining_ids) == 3
objectids = np.array(
[f.remote(1.0),
f.remote(0.5),
f.remote(0.5),
f.remote(0.5)])
ready_ids, remaining_ids = ray.experimental.wait(objectids)
assert len(ready_ids) == 1
assert len(remaining_ids) == 3
def test_multiple_waits_and_gets(shutdown_only):
# It is important to use three workers here, so that the three tasks
# launched in this experiment can run at the same time.
ray.init(num_cpus=3)
@ray.remote
def f(delay):
time.sleep(delay)
return 1
@ray.remote
def g(l):
# The argument l should be a list containing one object ID.
ray.wait([l[0]])
@ray.remote
def h(l):
# The argument l should be a list containing one object ID.
ray.get(l[0])
# Make sure that multiple wait requests involving the same object ID
# all return.
x = f.remote(1)
ray.get([g.remote([x]), g.remote([x])])
# Make sure that multiple get requests involving the same object ID all
# return.
x = f.remote(1)
ray.get([h.remote([x]), h.remote([x])])
def test_caching_functions_to_run(shutdown_only):
# Test that we export functions to run on all workers before the driver
# is connected.
def f(worker_info):
sys.path.append(1)
ray.worker.global_worker.run_function_on_all_workers(f)
def f(worker_info):
sys.path.append(2)
ray.worker.global_worker.run_function_on_all_workers(f)
def g(worker_info):
sys.path.append(3)
ray.worker.global_worker.run_function_on_all_workers(g)
def f(worker_info):
sys.path.append(4)
ray.worker.global_worker.run_function_on_all_workers(f)
ray.init(num_cpus=1)
@ray.remote
def get_state():
time.sleep(1)
return sys.path[-4], sys.path[-3], sys.path[-2], sys.path[-1]
res1 = get_state.remote()
res2 = get_state.remote()
assert ray.get(res1) == (1, 2, 3, 4)
assert ray.get(res2) == (1, 2, 3, 4)
# Clean up the path on the workers.
def f(worker_info):
sys.path.pop()
sys.path.pop()
sys.path.pop()
sys.path.pop()
ray.worker.global_worker.run_function_on_all_workers(f)
def test_running_function_on_all_workers(ray_start_regular):
def f(worker_info):
sys.path.append("fake_directory")
ray.worker.global_worker.run_function_on_all_workers(f)
@ray.remote
def get_path1():
return sys.path
assert "fake_directory" == ray.get(get_path1.remote())[-1]
def f(worker_info):
sys.path.pop(-1)
ray.worker.global_worker.run_function_on_all_workers(f)
# Create a second remote function to guarantee that when we call
# get_path2.remote(), the second function to run will have been run on
# the worker.
@ray.remote
def get_path2():
return sys.path
assert "fake_directory" not in ray.get(get_path2.remote())
def test_profiling_api(ray_start_2_cpus):
@ray.remote
def f():
with ray.profile(
"custom_event",
extra_data={"name": "custom name"}) as ray_prof:
ray_prof.set_attribute("key", "value")
ray.put(1)
object_id = f.remote()
ray.wait([object_id])
ray.get(object_id)
# Wait until all of the profiling information appears in the profile
# table.
timeout_seconds = 20
start_time = time.time()
while True:
if time.time() - start_time > timeout_seconds:
raise Exception("Timed out while waiting for information in "
"profile table.")
profile_data = ray.timeline()
event_types = {event["cat"] for event in profile_data}
expected_types = [
"worker_idle",
"task",
"task:deserialize_arguments",
"task:execute",
"task:store_outputs",
"wait_for_function",
"ray.get",
"ray.put",
"ray.wait",
"submit_task",
"fetch_and_run_function",
"register_remote_function",
"custom_event", # This is the custom one from ray.profile.
]
if all(expected_type in event_types
for expected_type in expected_types):
break
def test_wait_cluster(ray_start_cluster):
cluster = ray_start_cluster
cluster.add_node(num_cpus=1, resources={"RemoteResource": 1})
cluster.add_node(num_cpus=1, resources={"RemoteResource": 1})
ray.init(address=cluster.address)
@ray.remote(resources={"RemoteResource": 1})
def f():
return
# Make sure we have enough workers on the remote nodes to execute some
# tasks.
tasks = [f.remote() for _ in range(10)]
start = time.time()
ray.get(tasks)
end = time.time()
# Submit some more tasks that can only be executed on the remote nodes.
tasks = [f.remote() for _ in range(10)]
# Sleep for a bit to let the tasks finish.
time.sleep((end - start) * 2)
_, unready = ray.wait(tasks, num_returns=len(tasks), timeout=0)
# All remote tasks should have finished.
assert len(unready) == 0
def test_object_transfer_dump(ray_start_cluster):
cluster = ray_start_cluster
num_nodes = 3
for i in range(num_nodes):
cluster.add_node(resources={str(i): 1}, object_store_memory=10**9)
ray.init(address=cluster.address)
@ray.remote
def f(x):
return
# These objects will live on different nodes.
object_ids = [
f._remote(args=[1], resources={str(i): 1}) for i in range(num_nodes)
]
# Broadcast each object from each machine to each other machine.
for object_id in object_ids:
ray.get([
f._remote(args=[object_id], resources={str(i): 1})
for i in range(num_nodes)
])
# The profiling information only flushes once every second.
time.sleep(1.1)
transfer_dump = ray.object_transfer_timeline()
# Make sure the transfer dump can be serialized with JSON.
json.loads(json.dumps(transfer_dump))
assert len(transfer_dump) >= num_nodes**2
assert len({
event["pid"]
for event in transfer_dump if event["name"] == "transfer_receive"
}) == num_nodes
assert len({
event["pid"]
for event in transfer_dump if event["name"] == "transfer_send"
}) == num_nodes
def test_identical_function_names(ray_start_regular):
# Define a bunch of remote functions and make sure that we don't
# accidentally call an older version.
num_calls = 200
@ray.remote
def f():
return 1
results1 = [f.remote() for _ in range(num_calls)]
@ray.remote
def f():
return 2
results2 = [f.remote() for _ in range(num_calls)]
@ray.remote
def f():
return 3
results3 = [f.remote() for _ in range(num_calls)]
@ray.remote
def f():
return 4
results4 = [f.remote() for _ in range(num_calls)]
@ray.remote
def f():
return 5
results5 = [f.remote() for _ in range(num_calls)]
assert ray.get(results1) == num_calls * [1]
assert ray.get(results2) == num_calls * [2]
assert ray.get(results3) == num_calls * [3]
assert ray.get(results4) == num_calls * [4]
assert ray.get(results5) == num_calls * [5]
@ray.remote
def g():
return 1
@ray.remote # noqa: F811
def g():
return 2
@ray.remote # noqa: F811
def g():
return 3
@ray.remote # noqa: F811
def g():
return 4
@ray.remote # noqa: F811
def g():
return 5
result_values = ray.get([g.remote() for _ in range(num_calls)])
assert result_values == num_calls * [5]
def test_illegal_api_calls(ray_start_regular):
# Verify that we cannot call put on an ObjectID.
x = ray.put(1)
with pytest.raises(Exception):
ray.put(x)
# Verify that we cannot call get on a regular value.
with pytest.raises(Exception):
ray.get(3)
# TODO(hchen): This test currently doesn't work in Python 2. This is likely
# because plasma client isn't thread-safe. This needs to be fixed from the
# Arrow side. See #4107 for relevant discussions.
@pytest.mark.skipif(six.PY2, reason="Doesn't work in Python 2.")
def test_multithreading(ray_start_2_cpus):
# This test requires at least 2 CPUs to finish since the worker does not
# release resources when joining the threads.
def run_test_in_multi_threads(test_case, num_threads=10, num_repeats=25):
"""A helper function that runs test cases in multiple threads."""
def wrapper():
for _ in range(num_repeats):
test_case()
time.sleep(random.randint(0, 10) / 1000.0)
return "ok"
executor = ThreadPoolExecutor(max_workers=num_threads)
futures = [executor.submit(wrapper) for _ in range(num_threads)]
for future in futures:
assert future.result() == "ok"
@ray.remote
def echo(value, delay_ms=0):
if delay_ms > 0:
time.sleep(delay_ms / 1000.0)
return value
@ray.remote
class Echo(object):
def echo(self, value):
return value
def test_api_in_multi_threads():
"""Test using Ray api in multiple threads."""
# Test calling remote functions in multiple threads.
def test_remote_call():
value = random.randint(0, 1000000)
result = ray.get(echo.remote(value))
assert value == result
run_test_in_multi_threads(test_remote_call)
# Test multiple threads calling one actor.
actor = Echo.remote()
def test_call_actor():
value = random.randint(0, 1000000)
result = ray.get(actor.echo.remote(value))
assert value == result
run_test_in_multi_threads(test_call_actor)
# Test put and get.
def test_put_and_get():
value = random.randint(0, 1000000)
result = ray.get(ray.put(value))
assert value == result
run_test_in_multi_threads(test_put_and_get)
# Test multiple threads waiting for objects.
num_wait_objects = 10
objects = [
echo.remote(i, delay_ms=10) for i in range(num_wait_objects)
]
def test_wait():
ready, _ = ray.wait(
objects,
num_returns=len(objects),
timeout=1000.0,
)
assert len(ready) == num_wait_objects
assert ray.get(ready) == list(range(num_wait_objects))
run_test_in_multi_threads(test_wait, num_repeats=1)
# Run tests in a driver.
test_api_in_multi_threads()
# Run tests in a worker.
@ray.remote
def run_tests_in_worker():
test_api_in_multi_threads()
return "ok"
assert ray.get(run_tests_in_worker.remote()) == "ok"
# Test actor that runs background threads.
@ray.remote
class MultithreadedActor(object):
def __init__(self):
self.lock = threading.Lock()
self.thread_results = []
def background_thread(self, wait_objects):
try:
# Test wait
ready, _ = ray.wait(
wait_objects,
num_returns=len(wait_objects),
timeout=1000.0,
)
assert len(ready) == len(wait_objects)
for _ in range(20):
num = 10
# Test remote call
results = [echo.remote(i) for i in range(num)]
assert ray.get(results) == list(range(num))
# Test put and get
objects = [ray.put(i) for i in range(num)]
assert ray.get(objects) == list(range(num))
time.sleep(random.randint(0, 10) / 1000.0)
except Exception as e:
with self.lock:
self.thread_results.append(e)
else:
with self.lock:
self.thread_results.append("ok")
def spawn(self):
wait_objects = [echo.remote(i, delay_ms=10) for i in range(10)]
self.threads = [
threading.Thread(
target=self.background_thread, args=(wait_objects, ))
for _ in range(20)
]
[thread.start() for thread in self.threads]
def join(self):
[thread.join() for thread in self.threads]
assert self.thread_results == ["ok"] * len(self.threads)
return "ok"
actor = MultithreadedActor.remote()
actor.spawn.remote()
ray.get(actor.join.remote()) == "ok"
def test_free_objects_multi_node(ray_start_cluster):
# This test will do following:
# 1. Create 3 raylets that each hold an actor.
# 2. Each actor creates an object which is the deletion target.
# 3. Wait 0.1 second for the objects to be deleted.
# 4. Check that the deletion targets have been deleted.
# Caution: if remote functions are used instead of actor methods,
# one raylet may create more than one worker to execute the
# tasks, so the flushing operations may be executed in different
# workers and the plasma client holding the deletion target
# may not be flushed.
cluster = ray_start_cluster
config = json.dumps({"object_manager_repeated_push_delay_ms": 1000})
for i in range(3):
cluster.add_node(
num_cpus=1,
resources={"Custom{}".format(i): 1},
_internal_config=config)
ray.init(address=cluster.address)
class RawActor(object):
def get(self):
return ray.worker.global_worker.node.unique_id
ActorOnNode0 = ray.remote(resources={"Custom0": 1})(RawActor)
ActorOnNode1 = ray.remote(resources={"Custom1": 1})(RawActor)
ActorOnNode2 = ray.remote(resources={"Custom2": 1})(RawActor)
def create(actors):
a = actors[0].get.remote()
b = actors[1].get.remote()
c = actors[2].get.remote()
(l1, l2) = ray.wait([a, b, c], num_returns=3)
assert len(l1) == 3
assert len(l2) == 0
return (a, b, c)
def run_one_test(actors, local_only, delete_creating_tasks):
(a, b, c) = create(actors)
# The three objects should be generated on different object stores.
assert ray.get(a) != ray.get(b)
assert ray.get(a) != ray.get(c)
assert ray.get(c) != ray.get(b)
ray.internal.free(
[a, b, c],
local_only=local_only,
delete_creating_tasks=delete_creating_tasks)
# Wait for the objects to be deleted.
time.sleep(0.1)
return (a, b, c)
actors = [
ActorOnNode0.remote(),
ActorOnNode1.remote(),
ActorOnNode2.remote()
]
# Case 1: run this local_only=False. All 3 objects will be deleted.
(a, b, c) = run_one_test(actors, False, False)
(l1, l2) = ray.wait([a, b, c], timeout=0.01, num_returns=1)
# All the objects are deleted.
assert len(l1) == 0
assert len(l2) == 3
# Case 2: run this local_only=True. Only 1 object will be deleted.
(a, b, c) = run_one_test(actors, True, False)
(l1, l2) = ray.wait([a, b, c], timeout=0.01, num_returns=3)
# One object is deleted and 2 objects are not.
assert len(l1) == 2
assert len(l2) == 1
# The deleted object will have the same store with the driver.
local_return = ray.worker.global_worker.node.unique_id
for object_id in l1:
assert ray.get(object_id) != local_return
# Case3: These cases test the deleting creating tasks for the object.
(a, b, c) = run_one_test(actors, False, False)
task_table = ray.tasks()
for obj in [a, b, c]:
assert ray._raylet.compute_task_id(obj).hex() in task_table
(a, b, c) = run_one_test(actors, False, True)
task_table = ray.tasks()
for obj in [a, b, c]:
assert ray._raylet.compute_task_id(obj).hex() not in task_table
def test_local_mode(shutdown_only):
@ray.remote
def local_mode_f():
return np.array([0, 0])
@ray.remote
def local_mode_g(x):
x[0] = 1
return x
ray.init(local_mode=True)
@ray.remote
def f():
return np.ones([3, 4, 5])
xref = f.remote()
# Remote functions should return ObjectIDs.
assert isinstance(xref, ray.ObjectID)
assert np.alltrue(ray.get(xref) == np.ones([3, 4, 5]))
y = np.random.normal(size=[11, 12])
# Check that ray.get(ray.put) is the identity.
assert np.alltrue(y == ray.get(ray.put(y)))
# Make sure objects are immutable, this example is why we need to copy
# arguments before passing them into remote functions in python mode
aref = local_mode_f.remote()
assert np.alltrue(ray.get(aref) == np.array([0, 0]))
bref = local_mode_g.remote(ray.get(aref))
# Make sure local_mode_g does not mutate aref.
assert np.alltrue(ray.get(aref) == np.array([0, 0]))
assert np.alltrue(ray.get(bref) == np.array([1, 0]))
# wait should return the first num_returns values passed in as the
# first list and the remaining values as the second list
num_returns = 5
object_ids = [ray.put(i) for i in range(20)]
ready, remaining = ray.wait(
object_ids, num_returns=num_returns, timeout=None)
assert ready == object_ids[:num_returns]
assert remaining == object_ids[num_returns:]
# Check that ray.put() and ray.internal.free() work in local mode.
v1 = np.ones(10)
v2 = np.zeros(10)
k1 = ray.put(v1)
assert np.alltrue(v1 == ray.get(k1))
k2 = ray.put(v2)
assert np.alltrue(v2 == ray.get(k2))
ray.internal.free([k1, k2])
with pytest.raises(Exception):
ray.get(k1)
with pytest.raises(Exception):
ray.get(k2)
# Should fail silently.
ray.internal.free([k1, k2])
# Test actors in LOCAL_MODE.
@ray.remote
class LocalModeTestClass(object):
def __init__(self, array):
self.array = array
def set_array(self, array):
self.array = array
def get_array(self):
return self.array
def modify_and_set_array(self, array):
array[0] = -1
self.array = array
@ray.method(num_return_vals=3)
def returns_multiple(self):
return 1, 2, 3
test_actor = LocalModeTestClass.remote(np.arange(10))
obj = test_actor.get_array.remote()
assert isinstance(obj, ray.ObjectID)
assert np.alltrue(ray.get(obj) == np.arange(10))
test_array = np.arange(10)
# Remote actor functions should not mutate arguments
test_actor.modify_and_set_array.remote(test_array)
assert np.alltrue(test_array == np.arange(10))
# Remote actor functions should keep state
test_array[0] = -1
assert np.alltrue(test_array == ray.get(test_actor.get_array.remote()))
# Check that actor handles work in local mode.
@ray.remote
def use_actor_handle(handle):
array = np.ones(10)
handle.set_array.remote(array)
assert np.alltrue(array == ray.get(handle.get_array.remote()))
ray.get(use_actor_handle.remote(test_actor))
# Check that exceptions are deferred until ray.get().
exception_str = "test_basic remote task exception"
@ray.remote
def throws():
raise Exception(exception_str)
obj = throws.remote()
with pytest.raises(Exception, match=exception_str):
ray.get(obj)
# Check that multiple return values are handled properly.
@ray.remote(num_return_vals=3)
def returns_multiple():
return 1, 2, 3
obj1, obj2, obj3 = returns_multiple.remote()
assert ray.get(obj1) == 1
assert ray.get(obj2) == 2
assert ray.get(obj3) == 3
assert ray.get([obj1, obj2, obj3]) == [1, 2, 3]
obj1, obj2, obj3 = test_actor.returns_multiple.remote()
assert ray.get(obj1) == 1
assert ray.get(obj2) == 2
assert ray.get(obj3) == 3
assert ray.get([obj1, obj2, obj3]) == [1, 2, 3]
@ray.remote(num_return_vals=2)
def returns_multiple_throws():
raise Exception(exception_str)
obj1, obj2 = returns_multiple_throws.remote()
with pytest.raises(Exception, match=exception_str):
ray.get(obj)
ray.get(obj1)
with pytest.raises(Exception, match=exception_str):
ray.get(obj2)
def test_resource_constraints(shutdown_only):
num_workers = 20
ray.init(num_cpus=10, num_gpus=2)
@ray.remote(num_cpus=0)
def get_worker_id():
time.sleep(0.1)
return os.getpid()
# Attempt to wait for all of the workers to start up.
while True:
if len(
set(
ray.get([
get_worker_id.remote() for _ in range(num_workers)
]))) == num_workers:
break
time_buffer = 2
# At most 10 copies of this can run at once.
@ray.remote(num_cpus=1)
def f(n):
time.sleep(n)
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(10)])
duration = time.time() - start_time
assert duration < 0.5 + time_buffer
assert duration > 0.5
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(11)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
@ray.remote(num_cpus=3)
def f(n):
time.sleep(n)
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(3)])
duration = time.time() - start_time
assert duration < 0.5 + time_buffer
assert duration > 0.5
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(4)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
@ray.remote(num_gpus=1)
def f(n):
time.sleep(n)
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(2)])
duration = time.time() - start_time
assert duration < 0.5 + time_buffer
assert duration > 0.5
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(3)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
start_time = time.time()
ray.get([f.remote(0.5) for _ in range(4)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
def test_multi_resource_constraints(shutdown_only):
num_workers = 20
ray.init(num_cpus=10, num_gpus=10)
@ray.remote(num_cpus=0)
def get_worker_id():
time.sleep(0.1)
return os.getpid()
# Attempt to wait for all of the workers to start up.
while True:
if len(
set(
ray.get([
get_worker_id.remote() for _ in range(num_workers)
]))) == num_workers:
break
@ray.remote(num_cpus=1, num_gpus=9)
def f(n):
time.sleep(n)
@ray.remote(num_cpus=9, num_gpus=1)
def g(n):
time.sleep(n)
time_buffer = 2
start_time = time.time()
ray.get([f.remote(0.5), g.remote(0.5)])
duration = time.time() - start_time
assert duration < 0.5 + time_buffer
assert duration > 0.5
start_time = time.time()
ray.get([f.remote(0.5), f.remote(0.5)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
start_time = time.time()
ray.get([g.remote(0.5), g.remote(0.5)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
start_time = time.time()
ray.get([f.remote(0.5), f.remote(0.5), g.remote(0.5), g.remote(0.5)])
duration = time.time() - start_time
assert duration < 1 + time_buffer
assert duration > 1
def test_gpu_ids(shutdown_only):
num_gpus = 10
ray.init(num_cpus=10, num_gpus=num_gpus)
def get_gpu_ids(num_gpus_per_worker):
time.sleep(0.1)
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == num_gpus_per_worker
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
for gpu_id in gpu_ids:
assert gpu_id in range(num_gpus)
return gpu_ids
f0 = ray.remote(num_gpus=0)(lambda: get_gpu_ids(0))
f1 = ray.remote(num_gpus=1)(lambda: get_gpu_ids(1))
f2 = ray.remote(num_gpus=2)(lambda: get_gpu_ids(2))
f4 = ray.remote(num_gpus=4)(lambda: get_gpu_ids(4))
f5 = ray.remote(num_gpus=5)(lambda: get_gpu_ids(5))
# Wait for all workers to start up.
@ray.remote
def f():
time.sleep(0.1)
return os.getpid()
start_time = time.time()
while True:
if len(set(ray.get([f.remote() for _ in range(10)]))) == 10:
break
if time.time() > start_time + 10:
raise Exception("Timed out while waiting for workers to start "
"up.")
list_of_ids = ray.get([f0.remote() for _ in range(10)])
assert list_of_ids == 10 * [[]]
list_of_ids = ray.get([f1.remote() for _ in range(10)])
set_of_ids = {tuple(gpu_ids) for gpu_ids in list_of_ids}
assert set_of_ids == {(i, ) for i in range(10)}
list_of_ids = ray.get([f2.remote(), f4.remote(), f4.remote()])
all_ids = [gpu_id for gpu_ids in list_of_ids for gpu_id in gpu_ids]
assert set(all_ids) == set(range(10))
# There are only 10 GPUs, and each task uses 5 GPUs, so there should only
# be 2 tasks scheduled at a given time.
t1 = time.time()
ray.get([f5.remote() for _ in range(20)])
assert time.time() - t1 >= 10 * 0.1
# Test that actors have CUDA_VISIBLE_DEVICES set properly.
@ray.remote
class Actor0(object):
def __init__(self):
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 0
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
# Set self.x to make sure that we got here.
self.x = 1
def test(self):
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 0
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
return self.x
@ray.remote(num_gpus=1)
class Actor1(object):
def __init__(self):
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 1
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
# Set self.x to make sure that we got here.
self.x = 1
def test(self):
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 1
assert (os.environ["CUDA_VISIBLE_DEVICES"] == ",".join(
[str(i) for i in gpu_ids]))
return self.x
a0 = Actor0.remote()
ray.get(a0.test.remote())
a1 = Actor1.remote()
ray.get(a1.test.remote())
def test_zero_cpus(shutdown_only):
ray.init(num_cpus=0)
# We should be able to execute a task that requires 0 CPU resources.
@ray.remote(num_cpus=0)
def f():
return 1
ray.get(f.remote())
# We should be able to create an actor that requires 0 CPU resources.
@ray.remote(num_cpus=0)
class Actor(object):
def method(self):
pass
a = Actor.remote()
x = a.method.remote()
ray.get(x)
def test_zero_cpus_actor(ray_start_cluster):
cluster = ray_start_cluster
cluster.add_node(num_cpus=0)
cluster.add_node(num_cpus=2)
ray.init(address=cluster.address)
node_id = ray.worker.global_worker.node.unique_id
@ray.remote
class Foo(object):
def method(self):
return ray.worker.global_worker.node.unique_id
# Make sure tasks and actors run on the remote raylet.
a = Foo.remote()
assert ray.get(a.method.remote()) != node_id
def test_fractional_resources(shutdown_only):
ray.init(num_cpus=6, num_gpus=3, resources={"Custom": 1})
@ray.remote(num_gpus=0.5)
class Foo1(object):
def method(self):
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 1
return gpu_ids[0]
foos = [Foo1.remote() for _ in range(6)]
gpu_ids = ray.get([f.method.remote() for f in foos])
for i in range(3):
assert gpu_ids.count(i) == 2
del foos
@ray.remote
class Foo2(object):
def method(self):
pass
# Create an actor that requires 0.7 of the custom resource.
f1 = Foo2._remote([], {}, resources={"Custom": 0.7})
ray.get(f1.method.remote())
# Make sure that we cannot create an actor that requires 0.7 of the
# custom resource. TODO(rkn): Re-enable this once ray.wait is
# implemented.
f2 = Foo2._remote([], {}, resources={"Custom": 0.7})
ready, _ = ray.wait([f2.method.remote()], timeout=0.5)
assert len(ready) == 0
# Make sure we can start an actor that requries only 0.3 of the custom
# resource.
f3 = Foo2._remote([], {}, resources={"Custom": 0.3})
ray.get(f3.method.remote())
del f1, f3
# Make sure that we get exceptions if we submit tasks that require a
# fractional number of resources greater than 1.
@ray.remote(num_cpus=1.5)
def test():
pass
with pytest.raises(ValueError):
test.remote()
with pytest.raises(ValueError):
Foo2._remote([], {}, resources={"Custom": 1.5})
def test_multiple_raylets(ray_start_cluster):
# This test will define a bunch of tasks that can only be assigned to
# specific raylets, and we will check that they are assigned
# to the correct raylets.
cluster = ray_start_cluster
cluster.add_node(num_cpus=11, num_gpus=0)
cluster.add_node(num_cpus=5, num_gpus=5)
cluster.add_node(num_cpus=10, num_gpus=1)
ray.init(address=cluster.address)
cluster.wait_for_nodes()
# Define a bunch of remote functions that all return the socket name of
# the plasma store. Since there is a one-to-one correspondence between
# plasma stores and raylets (at least right now), this can be
# used to identify which raylet the task was assigned to.
# This must be run on the zeroth raylet.
@ray.remote(num_cpus=11)
def run_on_0():
return ray.worker.global_worker.node.plasma_store_socket_name
# This must be run on the first raylet.
@ray.remote(num_gpus=2)
def run_on_1():
return ray.worker.global_worker.node.plasma_store_socket_name
# This must be run on the second raylet.
@ray.remote(num_cpus=6, num_gpus=1)
def run_on_2():
return ray.worker.global_worker.node.plasma_store_socket_name
# This can be run anywhere.
@ray.remote(num_cpus=0, num_gpus=0)
def run_on_0_1_2():
return ray.worker.global_worker.node.plasma_store_socket_name
# This must be run on the first or second raylet.
@ray.remote(num_gpus=1)
def run_on_1_2():
return ray.worker.global_worker.node.plasma_store_socket_name
# This must be run on the zeroth or second raylet.
@ray.remote(num_cpus=8)
def run_on_0_2():
return ray.worker.global_worker.node.plasma_store_socket_name
def run_lots_of_tasks():
names = []
results = []
for i in range(100):
index = np.random.randint(6)
if index == 0:
names.append("run_on_0")
results.append(run_on_0.remote())
elif index == 1:
names.append("run_on_1")
results.append(run_on_1.remote())
elif index == 2:
names.append("run_on_2")
results.append(run_on_2.remote())
elif index == 3:
names.append("run_on_0_1_2")
results.append(run_on_0_1_2.remote())
elif index == 4:
names.append("run_on_1_2")
results.append(run_on_1_2.remote())
elif index == 5:
names.append("run_on_0_2")
results.append(run_on_0_2.remote())
return names, results
client_table = ray.nodes()
store_names = []
store_names += [
client["ObjectStoreSocketName"] for client in client_table
if client["Resources"].get("GPU", 0) == 0
]
store_names += [
client["ObjectStoreSocketName"] for client in client_table
if client["Resources"].get("GPU", 0) == 5
]
store_names += [
client["ObjectStoreSocketName"] for client in client_table
if client["Resources"].get("GPU", 0) == 1
]
assert len(store_names) == 3
def validate_names_and_results(names, results):
for name, result in zip(names, ray.get(results)):
if name == "run_on_0":
assert result in [store_names[0]]
elif name == "run_on_1":
assert result in [store_names[1]]
elif name == "run_on_2":
assert result in [store_names[2]]
elif name == "run_on_0_1_2":
assert (result in [
store_names[0], store_names[1], store_names[2]
])
elif name == "run_on_1_2":
assert result in [store_names[1], store_names[2]]
elif name == "run_on_0_2":
assert result in [store_names[0], store_names[2]]
else:
raise Exception("This should be unreachable.")
assert set(ray.get(results)) == set(store_names)
names, results = run_lots_of_tasks()
validate_names_and_results(names, results)
# Make sure the same thing works when this is nested inside of a task.
@ray.remote
def run_nested1():
names, results = run_lots_of_tasks()
return names, results
@ray.remote
def run_nested2():
names, results = ray.get(run_nested1.remote())
return names, results
names, results = ray.get(run_nested2.remote())
validate_names_and_results(names, results)
def test_custom_resources(ray_start_cluster):
cluster = ray_start_cluster
cluster.add_node(num_cpus=3, resources={"CustomResource": 0})
cluster.add_node(num_cpus=3, resources={"CustomResource": 1})
ray.init(address=cluster.address)
@ray.remote
def f():
time.sleep(0.001)
return ray.worker.global_worker.node.unique_id
@ray.remote(resources={"CustomResource": 1})
def g():
time.sleep(0.001)
return ray.worker.global_worker.node.unique_id
@ray.remote(resources={"CustomResource": 1})
def h():
ray.get([f.remote() for _ in range(5)])
return ray.worker.global_worker.node.unique_id
# The f tasks should be scheduled on both raylets.
assert len(set(ray.get([f.remote() for _ in range(50)]))) == 2
node_id = ray.worker.global_worker.node.unique_id
# The g tasks should be scheduled only on the second raylet.
raylet_ids = set(ray.get([g.remote() for _ in range(50)]))
assert len(raylet_ids) == 1
assert list(raylet_ids)[0] != node_id
# Make sure that resource bookkeeping works when a task that uses a
# custom resources gets blocked.
ray.get([h.remote() for _ in range(5)])
def test_two_custom_resources(ray_start_cluster):
cluster = ray_start_cluster
cluster.add_node(
num_cpus=3, resources={
"CustomResource1": 1,
"CustomResource2": 2
})
cluster.add_node(
num_cpus=3, resources={
"CustomResource1": 3,
"CustomResource2": 4
})
ray.init(address=cluster.address)
@ray.remote(resources={"CustomResource1": 1})
def f():
time.sleep(0.001)
return ray.worker.global_worker.node.unique_id
@ray.remote(resources={"CustomResource2": 1})
def g():
time.sleep(0.001)
return ray.worker.global_worker.node.unique_id
@ray.remote(resources={"CustomResource1": 1, "CustomResource2": 3})
def h():
time.sleep(0.001)
return ray.worker.global_worker.node.unique_id
@ray.remote(resources={"CustomResource1": 4})
def j():
time.sleep(0.001)
return ray.worker.global_worker.node.unique_id
@ray.remote(resources={"CustomResource3": 1})
def k():
time.sleep(0.001)
return ray.worker.global_worker.node.unique_id
# The f and g tasks should be scheduled on both raylets.
assert len(set(ray.get([f.remote() for _ in range(50)]))) == 2
assert len(set(ray.get([g.remote() for _ in range(50)]))) == 2
node_id = ray.worker.global_worker.node.unique_id
# The h tasks should be scheduled only on the second raylet.
raylet_ids = set(ray.get([h.remote() for _ in range(50)]))
assert len(raylet_ids) == 1
assert list(raylet_ids)[0] != node_id
# Make sure that tasks with unsatisfied custom resource requirements do
# not get scheduled.
ready_ids, remaining_ids = ray.wait([j.remote(), k.remote()], timeout=0.5)
assert ready_ids == []
def test_many_custom_resources(shutdown_only):
num_custom_resources = 10000
total_resources = {
str(i): np.random.randint(1, 7)
for i in range(num_custom_resources)
}
ray.init(num_cpus=5, resources=total_resources)
def f():
return 1
remote_functions = []
for _ in range(20):
num_resources = np.random.randint(0, num_custom_resources + 1)
permuted_resources = np.random.permutation(
num_custom_resources)[:num_resources]
random_resources = {
str(i): total_resources[str(i)]
for i in permuted_resources
}
remote_function = ray.remote(resources=random_resources)(f)
remote_functions.append(remote_function)
remote_functions.append(ray.remote(f))
remote_functions.append(ray.remote(resources=total_resources)(f))
results = []
for remote_function in remote_functions:
results.append(remote_function.remote())
results.append(remote_function.remote())
results.append(remote_function.remote())
ray.get(results)
# TODO: 5 retry attempts may be too little for Travis and we may need to
# increase it if this test begins to be flaky on Travis.
def test_zero_capacity_deletion_semantics(shutdown_only):
ray.init(num_cpus=2, num_gpus=1, resources={"test_resource": 1})
def test():
resources = ray.available_resources()
MAX_RETRY_ATTEMPTS = 5
retry_count = 0
del resources["memory"]
del resources["object_store_memory"]
while resources and retry_count < MAX_RETRY_ATTEMPTS:
time.sleep(0.1)
resources = ray.available_resources()
retry_count += 1
if retry_count >= MAX_RETRY_ATTEMPTS:
raise RuntimeError(
"Resources were available even after five retries.")
return resources
function = ray.remote(
num_cpus=2, num_gpus=1, resources={"test_resource": 1})(test)
cluster_resources = ray.get(function.remote())
# All cluster resources should be utilized and
# cluster_resources must be empty
assert cluster_resources == {}
@pytest.fixture
def save_gpu_ids_shutdown_only():
# Record the curent value of this environment variable so that we can
# reset it after the test.
original_gpu_ids = os.environ.get("CUDA_VISIBLE_DEVICES", None)
yield None
# The code after the yield will run as teardown code.
ray.shutdown()
# Reset the environment variable.
if original_gpu_ids is not None:
os.environ["CUDA_VISIBLE_DEVICES"] = original_gpu_ids
else:
del os.environ["CUDA_VISIBLE_DEVICES"]
def test_specific_gpus(save_gpu_ids_shutdown_only):
allowed_gpu_ids = [4, 5, 6]
os.environ["CUDA_VISIBLE_DEVICES"] = ",".join(
[str(i) for i in allowed_gpu_ids])
ray.init(num_gpus=3)
@ray.remote(num_gpus=1)
def f():
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 1
assert gpu_ids[0] in allowed_gpu_ids
@ray.remote(num_gpus=2)
def g():
gpu_ids = ray.get_gpu_ids()
assert len(gpu_ids) == 2
assert gpu_ids[0] in allowed_gpu_ids
assert gpu_ids[1] in allowed_gpu_ids
ray.get([f.remote() for _ in range(100)])
ray.get([g.remote() for _ in range(100)])
def test_blocking_tasks(ray_start_regular):
@ray.remote
def f(i, j):
return (i, j)
@ray.remote
def g(i):
# Each instance of g submits and blocks on the result of another
# remote task.
object_ids = [f.remote(i, j) for j in range(2)]
return ray.get(object_ids)
@ray.remote
def h(i):
# Each instance of g submits and blocks on the result of another
# remote task using ray.wait.
object_ids = [f.remote(i, j) for j in range(2)]
return ray.wait(object_ids, num_returns=len(object_ids))
ray.get([h.remote(i) for i in range(4)])
@ray.remote
def _sleep(i):
time.sleep(0.01)
return (i)
@ray.remote
def sleep():
# Each instance of sleep submits and blocks on the result of
# another remote task, which takes some time to execute.
ray.get([_sleep.remote(i) for i in range(10)])
ray.get(sleep.remote())
def test_max_call_tasks(ray_start_regular):
@ray.remote(max_calls=1)
def f():
return os.getpid()
pid = ray.get(f.remote())
ray.tests.utils.wait_for_pid_to_exit(pid)
@ray.remote(max_calls=2)
def f():
return os.getpid()
pid1 = ray.get(f.remote())
pid2 = ray.get(f.remote())
assert pid1 == pid2
ray.tests.utils.wait_for_pid_to_exit(pid1)
def attempt_to_load_balance(remote_function,
args,
total_tasks,
num_nodes,
minimum_count,
num_attempts=100):
attempts = 0
while attempts < num_attempts:
locations = ray.get(
[remote_function.remote(*args) for _ in range(total_tasks)])
names = set(locations)
counts = [locations.count(name) for name in names]
logger.info("Counts are {}.".format(counts))
if (len(names) == num_nodes
and all(count >= minimum_count for count in counts)):
break
attempts += 1
assert attempts < num_attempts
def test_load_balancing(ray_start_cluster):
# This test ensures that tasks are being assigned to all raylets
# in a roughly equal manner.
cluster = ray_start_cluster
num_nodes = 3
num_cpus = 7
for _ in range(num_nodes):
cluster.add_node(num_cpus=num_cpus)
ray.init(address=cluster.address)
@ray.remote
def f():
time.sleep(0.01)
return ray.worker.global_worker.node.unique_id
attempt_to_load_balance(f, [], 100, num_nodes, 10)
attempt_to_load_balance(f, [], 1000, num_nodes, 100)
def test_load_balancing_with_dependencies(ray_start_cluster):
# This test ensures that tasks are being assigned to all raylets in a
# roughly equal manner even when the tasks have dependencies.
cluster = ray_start_cluster
num_nodes = 3
for _ in range(num_nodes):
cluster.add_node(num_cpus=1)
ray.init(address=cluster.address)
@ray.remote
def f(x):
time.sleep(0.010)
return ray.worker.global_worker.node.unique_id
# This object will be local to one of the raylets. Make sure
# this doesn't prevent tasks from being scheduled on other raylets.
x = ray.put(np.zeros(1000000))
attempt_to_load_balance(f, [x], 100, num_nodes, 25)
def wait_for_num_tasks(num_tasks, timeout=10):
start_time = time.time()
while time.time() - start_time < timeout:
if len(ray.tasks()) >= num_tasks:
return
time.sleep(0.1)
raise Exception("Timed out while waiting for global state.")
def wait_for_num_objects(num_objects, timeout=10):
start_time = time.time()
while time.time() - start_time < timeout:
if len(ray.objects()) >= num_objects:
return
time.sleep(0.1)
raise Exception("Timed out while waiting for global state.")
@pytest.mark.skipif(
os.environ.get("RAY_USE_NEW_GCS") == "on",
reason="New GCS API doesn't have a Python API yet.")
def test_global_state_api(shutdown_only):
error_message = ("The ray global state API cannot be used "
"before ray.init has been called.")
with pytest.raises(Exception, match=error_message):
ray.objects()
with pytest.raises(Exception, match=error_message):
ray.tasks()
with pytest.raises(Exception, match=error_message):
ray.nodes()
with pytest.raises(Exception, match=error_message):
ray.jobs()
ray.init(num_cpus=5, num_gpus=3, resources={"CustomResource": 1})
assert ray.cluster_resources()["CPU"] == 5
assert ray.cluster_resources()["GPU"] == 3
assert ray.cluster_resources()["CustomResource"] == 1
assert ray.objects() == {}
job_id = ray.utils.compute_job_id_from_driver(
ray.WorkerID(ray.worker.global_worker.worker_id))
driver_task_id = ray.worker.global_worker.current_task_id.hex()
# One task is put in the task table which corresponds to this driver.
wait_for_num_tasks(1)
task_table = ray.tasks()
assert len(task_table) == 1
assert driver_task_id == list(task_table.keys())[0]
task_spec = task_table[driver_task_id]["TaskSpec"]
nil_unique_id_hex = ray.UniqueID.nil().hex()
nil_actor_id_hex = ray.ActorID.nil().hex()
assert task_spec["TaskID"] == driver_task_id
assert task_spec["ActorID"] == nil_actor_id_hex
assert task_spec["Args"] == []
assert task_spec["JobID"] == job_id.hex()
assert task_spec["FunctionID"] == nil_unique_id_hex
assert task_spec["ReturnObjectIDs"] == []
client_table = ray.nodes()
node_ip_address = ray.worker.global_worker.node_ip_address
assert len(client_table) == 1
assert client_table[0]["NodeManagerAddress"] == node_ip_address
@ray.remote
def f(*xs):
return 1
x_id = ray.put(1)
result_id = f.remote(1, "hi", x_id)
# Wait for one additional task to complete.
wait_for_num_tasks(1 + 1)
task_table = ray.tasks()
assert len(task_table) == 1 + 1
task_id_set = set(task_table.keys())
task_id_set.remove(driver_task_id)
task_id = list(task_id_set)[0]
task_spec = task_table[task_id]["TaskSpec"]
assert task_spec["ActorID"] == nil_actor_id_hex
assert task_spec["Args"] == [1, "hi", x_id]
assert task_spec["JobID"] == job_id.hex()
assert task_spec["ReturnObjectIDs"] == [result_id]
assert task_table[task_id] == ray.tasks(task_id)
# Wait for two objects, one for the x_id and one for result_id.
wait_for_num_objects(2)
def wait_for_object_table():
timeout = 10
start_time = time.time()
while time.time() - start_time < timeout:
object_table = ray.objects()
tables_ready = (object_table[x_id]["ManagerIDs"] is not None and
object_table[result_id]["ManagerIDs"] is not None)
if tables_ready:
return
time.sleep(0.1)
raise Exception("Timed out while waiting for object table to "
"update.")
object_table = ray.objects()
assert len(object_table) == 2
assert object_table[x_id] == ray.objects(x_id)
object_table_entry = ray.objects(result_id)
assert object_table[result_id] == object_table_entry
job_table = ray.jobs()
assert len(job_table) == 1
assert job_table[0]["JobID"] == job_id.hex()
assert job_table[0]["NodeManagerAddress"] == node_ip_address
# TODO(rkn): Pytest actually has tools for capturing stdout and stderr, so we
# should use those, but they seem to conflict with Ray's use of faulthandler.
class CaptureOutputAndError(object):
"""Capture stdout and stderr of some span.
This can be used as follows.
captured = {}
with CaptureOutputAndError(captured):
# Do stuff.
# Access captured["out"] and captured["err"].
"""
def __init__(self, captured_output_and_error):
if sys.version_info >= (3, 0):
import io
self.output_buffer = io.StringIO()
self.error_buffer = io.StringIO()
else:
import cStringIO
self.output_buffer = cStringIO.StringIO()
self.error_buffer = cStringIO.StringIO()
self.captured_output_and_error = captured_output_and_error
def __enter__(self):
sys.stdout.flush()
sys.stderr.flush()
self.old_stdout = sys.stdout
self.old_stderr = sys.stderr
sys.stdout = self.output_buffer
sys.stderr = self.error_buffer
def __exit__(self, exc_type, exc_value, traceback):
sys.stdout.flush()
sys.stderr.flush()
sys.stdout = self.old_stdout
sys.stderr = self.old_stderr
self.captured_output_and_error["out"] = self.output_buffer.getvalue()
self.captured_output_and_error["err"] = self.error_buffer.getvalue()
def test_logging_to_driver(shutdown_only):
ray.init(num_cpus=1, log_to_driver=True)
@ray.remote
def f():
# It's important to make sure that these print statements occur even
# without calling sys.stdout.flush() and sys.stderr.flush().
for i in range(100):
print(i)
print(100 + i, file=sys.stderr)
captured = {}
with CaptureOutputAndError(captured):
ray.get(f.remote())
time.sleep(1)
output_lines = captured["out"]
for i in range(200):
assert str(i) in output_lines
# TODO(rkn): Check that no additional logs appear beyond what we expect
# and that there are no duplicate logs. Once we address the issue
# described in https://github.com/ray-project/ray/pull/5462, we should
# also check that nothing is logged to stderr.
def test_not_logging_to_driver(shutdown_only):
ray.init(num_cpus=1, log_to_driver=False)
@ray.remote
def f():
for i in range(100):
print(i)
print(100 + i, file=sys.stderr)
sys.stdout.flush()
sys.stderr.flush()
captured = {}
with CaptureOutputAndError(captured):
ray.get(f.remote())
time.sleep(1)
output_lines = captured["out"]
assert len(output_lines) == 0
# TODO(rkn): Check that no additional logs appear beyond what we expect
# and that there are no duplicate logs. Once we address the issue
# described in https://github.com/ray-project/ray/pull/5462, we should
# also check that nothing is logged to stderr.
@pytest.mark.skipif(
os.environ.get("RAY_USE_NEW_GCS") == "on",
reason="New GCS API doesn't have a Python API yet.")
def test_workers(shutdown_only):
num_workers = 3
ray.init(num_cpus=num_workers)
@ray.remote
def f():
return id(ray.worker.global_worker), os.getpid()
# Wait until all of the workers have started.
worker_ids = set()
while len(worker_ids) != num_workers:
worker_ids = set(ray.get([f.remote() for _ in range(10)]))
def test_specific_job_id():
dummy_driver_id = ray.JobID.from_int(1)
ray.init(num_cpus=1, job_id=dummy_driver_id)
# in driver
assert dummy_driver_id == ray._get_runtime_context().current_driver_id
# in worker
@ray.remote
def f():
return ray._get_runtime_context().current_driver_id
assert dummy_driver_id == ray.get(f.remote())
ray.shutdown()
def test_object_id_properties():
id_bytes = b"00112233445566778899"
object_id = ray.ObjectID(id_bytes)
assert object_id.binary() == id_bytes
object_id = ray.ObjectID.nil()
assert object_id.is_nil()
with pytest.raises(ValueError, match=r".*needs to have length 20.*"):
ray.ObjectID(id_bytes + b"1234")
with pytest.raises(ValueError, match=r".*needs to have length 20.*"):
ray.ObjectID(b"0123456789")
object_id = ray.ObjectID.from_random()
assert not object_id.is_nil()
assert object_id.binary() != id_bytes
id_dumps = pickle.dumps(object_id)
id_from_dumps = pickle.loads(id_dumps)
assert id_from_dumps == object_id
file_prefix = "test_object_id_properties"
# Make sure the ids are fork safe.
def write(index):
str = ray.ObjectID.from_random().hex()
with open("{}{}".format(file_prefix, index), "w") as fo:
fo.write(str)
def read(index):
with open("{}{}".format(file_prefix, index), "r") as fi:
for line in fi:
return line
processes = [Process(target=write, args=(_, )) for _ in range(4)]
for process in processes:
process.start()
for process in processes:
process.join()
hexes = {read(i) for i in range(4)}
[os.remove("{}{}".format(file_prefix, i)) for i in range(4)]
assert len(hexes) == 4
@pytest.fixture
def shutdown_only_with_initialization_check():
yield None
# The code after the yield will run as teardown code.
ray.shutdown()
assert not ray.is_initialized()
def test_initialized(shutdown_only_with_initialization_check):
assert not ray.is_initialized()
ray.init(num_cpus=0)
assert ray.is_initialized()
def test_initialized_local_mode(shutdown_only_with_initialization_check):
assert not ray.is_initialized()
ray.init(num_cpus=0, local_mode=True)
assert ray.is_initialized()
def test_wait_reconstruction(shutdown_only):
ray.init(num_cpus=1, object_store_memory=int(10**8))
@ray.remote
def f():
return np.zeros(6 * 10**7, dtype=np.uint8)
x_id = f.remote()
ray.wait([x_id])
ray.wait([f.remote()])
assert not ray.worker.global_worker.core_worker.object_exists(x_id)
ready_ids, _ = ray.wait([x_id])
assert len(ready_ids) == 1
def test_ray_setproctitle(ray_start_2_cpus):
@ray.remote
class UniqueName(object):
def __init__(self):
assert setproctitle.getproctitle() == "ray_UniqueName:__init__()"
def f(self):
assert setproctitle.getproctitle() == "ray_UniqueName:f()"
@ray.remote
def unique_1():
assert setproctitle.getproctitle(
) == "ray_worker:ray.tests.test_basic.unique_1()"
actor = UniqueName.remote()
ray.get(actor.f.remote())
ray.get(unique_1.remote())
def test_duplicate_error_messages(shutdown_only):
ray.init(num_cpus=0)
driver_id = ray.WorkerID.nil()
error_data = ray.gcs_utils.construct_error_message(driver_id, "test",
"message", 0)
# Push the same message to the GCS twice (they are the same because we
# do not include a timestamp).
r = ray.worker.global_worker.redis_client
r.execute_command("RAY.TABLE_APPEND",
ray.gcs_utils.TablePrefix.Value("ERROR_INFO"),
ray.gcs_utils.TablePubsub.Value("ERROR_INFO_PUBSUB"),
driver_id.binary(), error_data)
# Before https://github.com/ray-project/ray/pull/3316 this would
# give an error
r.execute_command("RAY.TABLE_APPEND",
ray.gcs_utils.TablePrefix.Value("ERROR_INFO"),
ray.gcs_utils.TablePubsub.Value("ERROR_INFO_PUBSUB"),
driver_id.binary(), error_data)
@pytest.mark.skipif(
os.getenv("TRAVIS") is None,
reason="This test should only be run on Travis.")
def test_ray_stack(ray_start_2_cpus):
def unique_name_1():
time.sleep(1000)
@ray.remote
def unique_name_2():
time.sleep(1000)
@ray.remote
def unique_name_3():
unique_name_1()
unique_name_2.remote()
unique_name_3.remote()
success = False
start_time = time.time()
while time.time() - start_time < 30:
# Attempt to parse the "ray stack" call.
output = ray.utils.decode(subprocess.check_output(["ray", "stack"]))
if ("unique_name_1" in output and "unique_name_2" in output
and "unique_name_3" in output):
success = True
break
if not success:
raise Exception("Failed to find necessary information with "
"'ray stack'")
def test_pandas_parquet_serialization():
# Only test this if pandas is installed
pytest.importorskip("pandas")
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
tempdir = tempfile.mkdtemp()
filename = os.path.join(tempdir, "parquet-test")
pd.DataFrame({"col1": [0, 1], "col2": [0, 1]}).to_parquet(filename)
with open(os.path.join(tempdir, "parquet-compression"), "wb") as f:
table = pa.Table.from_arrays([pa.array([1, 2, 3])], ["hello"])
pq.write_table(table, f, compression="lz4")
# Clean up
shutil.rmtree(tempdir)
def test_socket_dir_not_existing(shutdown_only):
random_name = ray.ObjectID.from_random().hex()
temp_raylet_socket_dir = "/tmp/ray/tests/{}".format(random_name)
temp_raylet_socket_name = os.path.join(temp_raylet_socket_dir,
"raylet_socket")
ray.init(num_cpus=1, raylet_socket_name=temp_raylet_socket_name)
def test_raylet_is_robust_to_random_messages(ray_start_regular):
node_manager_address = None
node_manager_port = None
for client in ray.nodes():
if "NodeManagerAddress" in client:
node_manager_address = client["NodeManagerAddress"]
node_manager_port = client["NodeManagerPort"]
assert node_manager_address
assert node_manager_port
# Try to bring down the node manager:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((node_manager_address, node_manager_port))
s.send(1000 * b"asdf")
@ray.remote
def f():
return 1
assert ray.get(f.remote()) == 1
def test_non_ascii_comment(ray_start_regular):
@ray.remote
def f():
# 日本語 Japanese comment
return 1
assert ray.get(f.remote()) == 1
@ray.remote
def echo(x):
return x
@ray.remote
class WithConstructor(object):
def __init__(self, data):
self.data = data
def get_data(self):
return self.data
@ray.remote
class WithoutConstructor(object):
def set_data(self, data):
self.data = data
def get_data(self):
return self.data
class BaseClass(object):
def __init__(self, data):
self.data = data
def get_data(self):
return self.data
@ray.remote
class DerivedClass(BaseClass):
def __init__(self, data):
# Due to different behaviors of super in Python 2 and Python 3,
# we use BaseClass directly here.
BaseClass.__init__(self, data)
def test_load_code_from_local(shutdown_only):
ray.init(load_code_from_local=True, num_cpus=4)
message = "foo"
# Test normal function.
assert ray.get(echo.remote(message)) == message
# Test actor class with constructor.
actor = WithConstructor.remote(1)
assert ray.get(actor.get_data.remote()) == 1
# Test actor class without constructor.
actor = WithoutConstructor.remote()
actor.set_data.remote(1)
assert ray.get(actor.get_data.remote()) == 1
# Test derived actor class.
actor = DerivedClass.remote(1)
assert ray.get(actor.get_data.remote()) == 1
# Test using ray.remote decorator on raw classes.
base_actor_class = ray.remote(num_cpus=1)(BaseClass)
base_actor = base_actor_class.remote(message)
assert ray.get(base_actor.get_data.remote()) == message
def test_shutdown_disconnect_global_state():
ray.init(num_cpus=0)
ray.shutdown()
with pytest.raises(Exception) as e:
ray.objects()
assert str(e.value).endswith("ray.init has been called.")
@pytest.mark.parametrize(
"ray_start_object_store_memory", [150 * 1024 * 1024], indirect=True)
def test_put_pins_object(ray_start_object_store_memory):
x_id = ray.put("HI")
x_copy = ray.ObjectID(x_id.binary())
assert ray.get(x_copy) == "HI"
# x cannot be evicted since x_id pins it
for _ in range(10):
ray.put(np.zeros(10 * 1024 * 1024))
assert ray.get(x_id) == "HI"
assert ray.get(x_copy) == "HI"
# now it can be evicted since x_id pins it but x_copy does not
del x_id
for _ in range(10):
ray.put(np.zeros(10 * 1024 * 1024))
with pytest.raises(ray.exceptions.UnreconstructableError):
ray.get(x_copy)
# weakref put
y_id = ray.put("HI", weakref=True)
for _ in range(10):
ray.put(np.zeros(10 * 1024 * 1024))
with pytest.raises(ray.exceptions.UnreconstructableError):
ray.get(y_id)
@ray.remote
def check_no_buffer_ref(x):
assert x[0].get_buffer_ref() is None
z_id = ray.put("HI")
assert z_id.get_buffer_ref() is not None
ray.get(check_no_buffer_ref.remote([z_id]))
@pytest.mark.parametrize(
"ray_start_object_store_memory", [150 * 1024 * 1024], indirect=True)
def test_redis_lru_with_set(ray_start_object_store_memory):
x = np.zeros(8 * 10**7, dtype=np.uint8)
x_id = ray.put(x, weakref=True)
# Remove the object from the object table to simulate Redis LRU eviction.
removed = False
start_time = time.time()
while time.time() < start_time + 10:
if ray.state.state.redis_clients[0].delete(b"OBJECT" +
x_id.binary()) == 1:
removed = True
break
assert removed
# Now evict the object from the object store.
ray.put(x) # This should not crash.
def test_decorated_function(ray_start_regular):
def function_invocation_decorator(f):
def new_f(args, kwargs):
# Reverse the arguments.
return f(args[::-1], {"d": 5}), kwargs
return new_f
def f(a, b, c, d=None):
return a, b, c, d
f.__ray_invocation_decorator__ = function_invocation_decorator
f = ray.remote(f)
result_id, kwargs = f.remote(1, 2, 3, d=4)
assert kwargs == {"d": 4}
assert ray.get(result_id) == (3, 2, 1, 5)
def test_get_postprocess(ray_start_regular):
def get_postprocessor(object_ids, values):
return [value for value in values if value > 0]
ray.worker.global_worker._post_get_hooks.append(get_postprocessor)
assert ray.get(
[ray.put(i) for i in [0, 1, 3, 5, -1, -3, 4]]) == [1, 3, 5, 4]
def test_export_after_shutdown(ray_start_regular):
# This test checks that we can use actor and remote function definitions
# across multiple Ray sessions.
@ray.remote
def f():
pass
@ray.remote
class Actor(object):
def method(self):
pass
ray.get(f.remote())
a = Actor.remote()
ray.get(a.method.remote())
ray.shutdown()
# Start Ray and use the remote function and actor again.
ray.init(num_cpus=1)
ray.get(f.remote())
a = Actor.remote()
ray.get(a.method.remote())
ray.shutdown()
# Start Ray again and make sure that these definitions can be exported from
# workers.
ray.init(num_cpus=2)
@ray.remote
def export_definitions_from_worker(remote_function, actor_class):
ray.get(remote_function.remote())
actor_handle = actor_class.remote()
ray.get(actor_handle.method.remote())
ray.get(export_definitions_from_worker.remote(f, Actor))
def test_invalid_unicode_in_worker_log(shutdown_only):
info = ray.init(num_cpus=1)
logs_dir = os.path.join(info["session_dir"], "logs")
# Wait till first worker log file is created.
while True:
log_file_paths = glob.glob("{}/worker*.out".format(logs_dir))
if len(log_file_paths) == 0:
time.sleep(0.2)
else:
break
with open(log_file_paths[0], "wb") as f:
f.write(b"\xe5abc\nline2\nline3\n")
f.write(b"\xe5abc\nline2\nline3\n")
f.write(b"\xe5abc\nline2\nline3\n")
f.flush()
# Wait till the log monitor reads the file.
time.sleep(1.0)
# Make sure that nothing has died.
assert ray.services.remaining_processes_alive()
@pytest.mark.skip(reason="This test is too expensive to run.")
def test_move_log_files_to_old(shutdown_only):
info = ray.init(num_cpus=1)
logs_dir = os.path.join(info["session_dir"], "logs")
@ray.remote
class Actor(object):
def f(self):
print("function f finished")
# First create a temporary actor.
actors = [
Actor.remote() for i in range(ray_constants.LOG_MONITOR_MAX_OPEN_FILES)
]
ray.get([a.f.remote() for a in actors])
# Make sure no log files are in the "old" directory before the actors
# are killed.
assert len(glob.glob("{}/old/worker*.out".format(logs_dir))) == 0
# Now kill the actors so the files get moved to logs/old/.
[a.__ray_terminate__.remote() for a in actors]
while True:
log_file_paths = glob.glob("{}/old/worker*.out".format(logs_dir))
if len(log_file_paths) > 0:
with open(log_file_paths[0], "r") as f:
assert "function f finished\n" in f.readlines()
break
# Make sure that nothing has died.
assert ray.services.remaining_processes_alive()
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES",
"RAY_USE_NEW_GCS",
"TRAVIS"
] |
[]
|
["CUDA_VISIBLE_DEVICES", "RAY_USE_NEW_GCS", "TRAVIS"]
|
python
| 3 | 0 | |
tests/integration_test.go
|
// +build integration
package tests
import (
"encoding/json"
"io/ioutil"
"os"
"os/user"
"path"
"testing"
"github.com/deis/deis/tests/utils"
)
var (
gitCloneCmd = "if [ ! -d {{.ExampleApp}} ] ; then git clone https://github.com/deis/{{.ExampleApp}}.git ; fi"
gitRemoveCmd = "git remote remove deis"
gitPushCmd = "git push deis master"
)
// Client represents the client data structure in ~/.deis/client.json
type Client struct {
Controller string `json:"controller"`
Username string `json:"username"`
Token string `json:"token"`
}
func TestGlobal(t *testing.T) {
params := utils.GetGlobalConfig()
utils.Execute(t, authRegisterCmd, params, false, "")
clientTest(t, params)
utils.Execute(t, keysAddCmd, params, false, "")
}
func clientTest(t *testing.T, params *utils.DeisTestConfig) {
user, err := user.Current()
if err != nil {
t.Fatal(err)
}
profile := os.Getenv("DEIS_PROFILE")
if profile == "" {
profile = "client"
}
clientJsonFilePath := ".deis/" + profile + ".json"
data, err := ioutil.ReadFile(path.Join(user.HomeDir, clientJsonFilePath))
if err != nil {
t.Fatal(err)
}
client := &Client{}
json.Unmarshal(data, &client)
if client.Token == "" {
t.Error("token not present in client.json")
}
if client.Controller == "" {
t.Error("controller endpoint not present in client.json")
}
if client.Username == "" {
t.Error("username not present in client.json")
}
}
|
[
"\"DEIS_PROFILE\""
] |
[] |
[
"DEIS_PROFILE"
] |
[]
|
["DEIS_PROFILE"]
|
go
| 1 | 0 | |
code/helper.go
|
package main
import (
"bytes"
"io"
"log"
"os"
)
func ExtractEnvironmentVariables() {
awsRegion = os.Getenv("AWS_REGION")
accessKey = getSecretValue(os.Getenv("LM_ACCESS_KEY_ARN"))
if accessKey == "" {
log.Fatalf("Missing LM_ACCESS_KEY_ARN env var")
}
accessID = getSecretValue(os.Getenv("LM_ACCESS_ID_ARN"))
if accessID == "" {
log.Fatalf("Missing LM_ACCESS_ID_ARN env var")
}
lmHost = os.Getenv("LM_HOST")
if lmHost == "" {
log.Fatalf("Missing LM_HOST env var")
}
if os.Getenv("DEBUG") == "true" {
debug = true
} else {
debug = false
}
scrubRegex = os.Getenv("LM_SCRUB_REGEX")
}
func readCloserToString(body io.ReadCloser) string {
buf := new(bytes.Buffer)
_,_ = buf.ReadFrom(body)
return buf.String()
}
func handleFatalError(errStr string, err error) {
if err != nil {
log.Fatalf("%s: %s", errStr, err)
}
}
|
[
"\"AWS_REGION\"",
"\"LM_ACCESS_KEY_ARN\"",
"\"LM_ACCESS_ID_ARN\"",
"\"LM_HOST\"",
"\"DEBUG\"",
"\"LM_SCRUB_REGEX\""
] |
[] |
[
"LM_HOST",
"AWS_REGION",
"LM_ACCESS_ID_ARN",
"LM_ACCESS_KEY_ARN",
"LM_SCRUB_REGEX",
"DEBUG"
] |
[]
|
["LM_HOST", "AWS_REGION", "LM_ACCESS_ID_ARN", "LM_ACCESS_KEY_ARN", "LM_SCRUB_REGEX", "DEBUG"]
|
go
| 6 | 0 | |
bangpy-ops/utils/generate_all_ops_header.py
|
# Copyright (C) [2021] by Cambricon, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import os
import sys
def main():
if len(sys.argv) == 1:
raise ValueError("Please input at least one operator header file.")
header_lists = sys.argv[1].split(",")
header_lists = [i for i in header_lists if i != ""]
build_path = os.environ.get("BANGPY_BUILD_PATH", "")
if build_path == "":
raise ValueError("Could not find BANGPY_BUILD_PATH environment variable.")
build_path += "/" if build_path[-1] != "/" else ""
with open(build_path + "mlu_ops.h", "w") as mlu_ops:
for i, h in enumerate(header_lists):
with open(h, "r") as one_opertor_header:
lines = one_opertor_header.readlines()
if i != 0:
lines[0] = "\n"
mlu_ops.writelines(lines)
if __name__ == "__main__":
main()
|
[] |
[] |
[
"BANGPY_BUILD_PATH"
] |
[]
|
["BANGPY_BUILD_PATH"]
|
python
| 1 | 0 | |
libpod/runtime.go
|
package libpod
import (
"context"
"fmt"
"os"
"path/filepath"
"strings"
"sync"
"syscall"
is "github.com/containers/image/v5/storage"
"github.com/containers/image/v5/types"
"github.com/containers/libpod/libpod/config"
"github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/libpod/events"
"github.com/containers/libpod/libpod/image"
"github.com/containers/libpod/libpod/lock"
sysreg "github.com/containers/libpod/pkg/registries"
"github.com/containers/libpod/pkg/rootless"
"github.com/containers/libpod/pkg/util"
"github.com/containers/storage"
"github.com/cri-o/ocicni/pkg/ocicni"
"github.com/docker/docker/pkg/namesgenerator"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// A RuntimeOption is a functional option which alters the Runtime created by
// NewRuntime
type RuntimeOption func(*Runtime) error
// Runtime is the core libpod runtime
type Runtime struct {
config *config.Config
state State
store storage.Store
storageService *storageService
imageContext *types.SystemContext
defaultOCIRuntime OCIRuntime
ociRuntimes map[string]OCIRuntime
netPlugin ocicni.CNIPlugin
conmonPath string
imageRuntime *image.Runtime
lockManager lock.Manager
// doRenumber indicates that the runtime should perform a lock renumber
// during initialization.
// Once the runtime has been initialized and returned, this variable is
// unused.
doRenumber bool
doMigrate bool
// System migrate can move containers to a new runtime.
// We make no promises that these migrated containers work on the new
// runtime, though.
migrateRuntime string
// valid indicates whether the runtime is ready to use.
// valid is set to true when a runtime is returned from GetRuntime(),
// and remains true until the runtime is shut down (rendering its
// storage unusable). When valid is false, the runtime cannot be used.
valid bool
lock sync.RWMutex
// mechanism to read and write even logs
eventer events.Eventer
// noStore indicates whether we need to interact with a store or not
noStore bool
}
// SetXdgDirs ensures the XDG_RUNTIME_DIR env and XDG_CONFIG_HOME variables are set.
// containers/image uses XDG_RUNTIME_DIR to locate the auth file, XDG_CONFIG_HOME is
// use for the libpod.conf configuration file.
func SetXdgDirs() error {
if !rootless.IsRootless() {
return nil
}
// Setup XDG_RUNTIME_DIR
runtimeDir := os.Getenv("XDG_RUNTIME_DIR")
if runtimeDir == "" {
var err error
runtimeDir, err = util.GetRuntimeDir()
if err != nil {
return err
}
}
if err := os.Setenv("XDG_RUNTIME_DIR", runtimeDir); err != nil {
return errors.Wrapf(err, "cannot set XDG_RUNTIME_DIR")
}
if rootless.IsRootless() && os.Getenv("DBUS_SESSION_BUS_ADDRESS") == "" {
sessionAddr := filepath.Join(runtimeDir, "bus")
if _, err := os.Stat(sessionAddr); err == nil {
os.Setenv("DBUS_SESSION_BUS_ADDRESS", fmt.Sprintf("unix:path=%s", sessionAddr))
}
}
// Setup XDG_CONFIG_HOME
if cfgHomeDir := os.Getenv("XDG_CONFIG_HOME"); cfgHomeDir == "" {
cfgHomeDir, err := util.GetRootlessConfigHomeDir()
if err != nil {
return err
}
if err := os.Setenv("XDG_CONFIG_HOME", cfgHomeDir); err != nil {
return errors.Wrapf(err, "cannot set XDG_CONFIG_HOME")
}
}
return nil
}
// NewRuntime creates a new container runtime
// Options can be passed to override the default configuration for the runtime
func NewRuntime(ctx context.Context, options ...RuntimeOption) (runtime *Runtime, err error) {
return newRuntimeFromConfig(ctx, "", options...)
}
// NewRuntimeFromConfig creates a new container runtime using the given
// configuration file for its default configuration. Passed RuntimeOption
// functions can be used to mutate this configuration further.
// An error will be returned if the configuration file at the given path does
// not exist or cannot be loaded
func NewRuntimeFromConfig(ctx context.Context, userConfigPath string, options ...RuntimeOption) (runtime *Runtime, err error) {
if userConfigPath == "" {
return nil, errors.New("invalid configuration file specified")
}
return newRuntimeFromConfig(ctx, userConfigPath, options...)
}
func newRuntimeFromConfig(ctx context.Context, userConfigPath string, options ...RuntimeOption) (runtime *Runtime, err error) {
runtime = new(Runtime)
conf, err := config.NewConfig(userConfigPath)
if err != nil {
return nil, err
}
runtime.config = conf
// Overwrite config with user-given configuration options
for _, opt := range options {
if err := opt(runtime); err != nil {
return nil, errors.Wrapf(err, "error configuring runtime")
}
}
if err := makeRuntime(ctx, runtime); err != nil {
return nil, err
}
return runtime, nil
}
func getLockManager(runtime *Runtime) (lock.Manager, error) {
var err error
var manager lock.Manager
switch runtime.config.LockType {
case "file":
lockPath := filepath.Join(runtime.config.TmpDir, "locks")
manager, err = lock.OpenFileLockManager(lockPath)
if err != nil {
if os.IsNotExist(errors.Cause(err)) {
manager, err = lock.NewFileLockManager(lockPath)
if err != nil {
return nil, errors.Wrapf(err, "failed to get new file lock manager")
}
} else {
return nil, err
}
}
case "", "shm":
lockPath := define.DefaultSHMLockPath
if rootless.IsRootless() {
lockPath = fmt.Sprintf("%s_%d", define.DefaultRootlessSHMLockPath, rootless.GetRootlessUID())
}
// Set up the lock manager
manager, err = lock.OpenSHMLockManager(lockPath, runtime.config.NumLocks)
if err != nil {
if os.IsNotExist(errors.Cause(err)) {
manager, err = lock.NewSHMLockManager(lockPath, runtime.config.NumLocks)
if err != nil {
return nil, errors.Wrapf(err, "failed to get new shm lock manager")
}
} else if errors.Cause(err) == syscall.ERANGE && runtime.doRenumber {
logrus.Debugf("Number of locks does not match - removing old locks")
// ERANGE indicates a lock numbering mismatch.
// Since we're renumbering, this is not fatal.
// Remove the earlier set of locks and recreate.
if err := os.Remove(filepath.Join("/dev/shm", lockPath)); err != nil {
return nil, errors.Wrapf(err, "error removing libpod locks file %s", lockPath)
}
manager, err = lock.NewSHMLockManager(lockPath, runtime.config.NumLocks)
if err != nil {
return nil, err
}
} else {
return nil, err
}
}
default:
return nil, errors.Wrapf(define.ErrInvalidArg, "unknown lock type %s", runtime.config.LockType)
}
return manager, nil
}
// Make a new runtime based on the given configuration
// Sets up containers/storage, state store, OCI runtime
func makeRuntime(ctx context.Context, runtime *Runtime) (err error) {
// Find a working conmon binary
if cPath, err := runtime.config.FindConmon(); err != nil {
return err
} else {
runtime.conmonPath = cPath
}
// Make the static files directory if it does not exist
if err := os.MkdirAll(runtime.config.StaticDir, 0700); err != nil {
// The directory is allowed to exist
if !os.IsExist(err) {
return errors.Wrapf(err, "error creating runtime static files directory %s",
runtime.config.StaticDir)
}
}
// Set up the state.
//
// TODO - if we further break out the state implementation into
// libpod/state, the config could take care of the code below. It
// would further allow to move the types and consts into a coherent
// package.
switch runtime.config.StateType {
case define.InMemoryStateStore:
state, err := NewInMemoryState()
if err != nil {
return err
}
runtime.state = state
case define.SQLiteStateStore:
return errors.Wrapf(define.ErrInvalidArg, "SQLite state is currently disabled")
case define.BoltDBStateStore:
dbPath := filepath.Join(runtime.config.StaticDir, "bolt_state.db")
state, err := NewBoltState(dbPath, runtime)
if err != nil {
return err
}
runtime.state = state
default:
return errors.Wrapf(define.ErrInvalidArg, "unrecognized state type passed (%v)", runtime.config.StateType)
}
// Grab config from the database so we can reset some defaults
dbConfig, err := runtime.state.GetDBConfig()
if err != nil {
return errors.Wrapf(err, "error retrieving runtime configuration from database")
}
if err := runtime.config.MergeDBConfig(dbConfig); err != nil {
return errors.Wrapf(err, "error merging database config into runtime config")
}
logrus.Debugf("Using graph driver %s", runtime.config.StorageConfig.GraphDriverName)
logrus.Debugf("Using graph root %s", runtime.config.StorageConfig.GraphRoot)
logrus.Debugf("Using run root %s", runtime.config.StorageConfig.RunRoot)
logrus.Debugf("Using static dir %s", runtime.config.StaticDir)
logrus.Debugf("Using tmp dir %s", runtime.config.TmpDir)
logrus.Debugf("Using volume path %s", runtime.config.VolumePath)
// Validate our config against the database, now that we've set our
// final storage configuration
if err := runtime.state.ValidateDBConfig(runtime); err != nil {
return err
}
if err := runtime.state.SetNamespace(runtime.config.Namespace); err != nil {
return errors.Wrapf(err, "error setting libpod namespace in state")
}
logrus.Debugf("Set libpod namespace to %q", runtime.config.Namespace)
// Set up containers/storage
var store storage.Store
if os.Geteuid() != 0 {
logrus.Debug("Not configuring container store")
} else if runtime.noStore {
logrus.Debug("No store required. Not opening container store.")
} else {
if err := runtime.configureStore(); err != nil {
return err
}
}
defer func() {
if err != nil && store != nil {
// Don't forcibly shut down
// We could be opening a store in use by another libpod
_, err2 := store.Shutdown(false)
if err2 != nil {
logrus.Errorf("Error removing store for partially-created runtime: %s", err2)
}
}
}()
// Setup the eventer
eventer, err := runtime.newEventer()
if err != nil {
return err
}
runtime.eventer = eventer
if runtime.imageRuntime != nil {
runtime.imageRuntime.Eventer = eventer
}
// Set up containers/image
runtime.imageContext = &types.SystemContext{
SignaturePolicyPath: runtime.config.SignaturePolicyPath,
}
// Create the tmpDir
if err := os.MkdirAll(runtime.config.TmpDir, 0751); err != nil {
// The directory is allowed to exist
if !os.IsExist(err) {
return errors.Wrapf(err, "error creating tmpdir %s", runtime.config.TmpDir)
}
}
// Create events log dir
if err := os.MkdirAll(filepath.Dir(runtime.config.EventsLogFilePath), 0700); err != nil {
// The directory is allowed to exist
if !os.IsExist(err) {
return errors.Wrapf(err, "error creating events dirs %s", filepath.Dir(runtime.config.EventsLogFilePath))
}
}
// Make lookup tables for runtime support
supportsJSON := make(map[string]bool)
supportsNoCgroups := make(map[string]bool)
for _, r := range runtime.config.RuntimeSupportsJSON {
supportsJSON[r] = true
}
for _, r := range runtime.config.RuntimeSupportsNoCgroups {
supportsNoCgroups[r] = true
}
// Get us at least one working OCI runtime.
runtime.ociRuntimes = make(map[string]OCIRuntime)
// Is the old runtime_path defined?
if runtime.config.RuntimePath != nil {
// Don't print twice in rootless mode.
if os.Geteuid() == 0 {
logrus.Warningf("The configuration is using `runtime_path`, which is deprecated and will be removed in future. Please use `runtimes` and `runtime`")
logrus.Warningf("If you are using both `runtime_path` and `runtime`, the configuration from `runtime_path` is used")
}
if len(runtime.config.RuntimePath) == 0 {
return errors.Wrapf(define.ErrInvalidArg, "empty runtime path array passed")
}
name := filepath.Base(runtime.config.RuntimePath[0])
json := supportsJSON[name]
nocgroups := supportsNoCgroups[name]
ociRuntime, err := newConmonOCIRuntime(name, runtime.config.RuntimePath, runtime.conmonPath, runtime.config, json, nocgroups)
if err != nil {
return err
}
runtime.ociRuntimes[name] = ociRuntime
runtime.defaultOCIRuntime = ociRuntime
}
// Initialize remaining OCI runtimes
for name, paths := range runtime.config.OCIRuntimes {
json := supportsJSON[name]
nocgroups := supportsNoCgroups[name]
ociRuntime, err := newConmonOCIRuntime(name, paths, runtime.conmonPath, runtime.config, json, nocgroups)
if err != nil {
// Don't fatally error.
// This will allow us to ship configs including optional
// runtimes that might not be installed (crun, kata).
// Only a warnf so default configs don't spec errors.
logrus.Warnf("Error initializing configured OCI runtime %s: %v", name, err)
continue
}
runtime.ociRuntimes[name] = ociRuntime
}
// Do we have a default OCI runtime?
if runtime.config.OCIRuntime != "" {
// If the string starts with / it's a path to a runtime
// executable.
if strings.HasPrefix(runtime.config.OCIRuntime, "/") {
name := filepath.Base(runtime.config.OCIRuntime)
json := supportsJSON[name]
nocgroups := supportsNoCgroups[name]
ociRuntime, err := newConmonOCIRuntime(name, []string{runtime.config.OCIRuntime}, runtime.conmonPath, runtime.config, json, nocgroups)
if err != nil {
return err
}
runtime.ociRuntimes[name] = ociRuntime
runtime.defaultOCIRuntime = ociRuntime
} else {
ociRuntime, ok := runtime.ociRuntimes[runtime.config.OCIRuntime]
if !ok {
return errors.Wrapf(define.ErrInvalidArg, "default OCI runtime %q not found", runtime.config.OCIRuntime)
}
runtime.defaultOCIRuntime = ociRuntime
}
}
// Do we have at least one valid OCI runtime?
if len(runtime.ociRuntimes) == 0 {
return errors.Wrapf(define.ErrInvalidArg, "no OCI runtime has been configured")
}
// Do we have a default runtime?
if runtime.defaultOCIRuntime == nil {
return errors.Wrapf(define.ErrInvalidArg, "no default OCI runtime was configured")
}
// Make the per-boot files directory if it does not exist
if err := os.MkdirAll(runtime.config.TmpDir, 0755); err != nil {
// The directory is allowed to exist
if !os.IsExist(err) {
return errors.Wrapf(err, "error creating runtime temporary files directory %s",
runtime.config.TmpDir)
}
}
// Set up the CNI net plugin
if !rootless.IsRootless() {
netPlugin, err := ocicni.InitCNI(runtime.config.CNIDefaultNetwork, runtime.config.CNIConfigDir, runtime.config.CNIPluginDir...)
if err != nil {
return errors.Wrapf(err, "error configuring CNI network plugin")
}
runtime.netPlugin = netPlugin
}
// We now need to see if the system has restarted
// We check for the presence of a file in our tmp directory to verify this
// This check must be locked to prevent races
runtimeAliveLock := filepath.Join(runtime.config.TmpDir, "alive.lck")
runtimeAliveFile := filepath.Join(runtime.config.TmpDir, "alive")
aliveLock, err := storage.GetLockfile(runtimeAliveLock)
if err != nil {
return errors.Wrapf(err, "error acquiring runtime init lock")
}
// Acquire the lock and hold it until we return
// This ensures that no two processes will be in runtime.refresh at once
// TODO: we can't close the FD in this lock, so we should keep it around
// and use it to lock important operations
aliveLock.Lock()
doRefresh := false
defer func() {
if aliveLock.Locked() {
aliveLock.Unlock()
}
}()
_, err = os.Stat(runtimeAliveFile)
if err != nil {
// If we need to refresh, then it is safe to assume there are
// no containers running. Create immediately a namespace, as
// we will need to access the storage.
if os.Geteuid() != 0 {
aliveLock.Unlock() // Unlock to avoid deadlock as BecomeRootInUserNS will reexec.
pausePid, err := util.GetRootlessPauseProcessPidPath()
if err != nil {
return errors.Wrapf(err, "could not get pause process pid file path")
}
became, ret, err := rootless.BecomeRootInUserNS(pausePid)
if err != nil {
return err
}
if became {
os.Exit(ret)
}
}
// If the file doesn't exist, we need to refresh the state
// This will trigger on first use as well, but refreshing an
// empty state only creates a single file
// As such, it's not really a performance concern
if os.IsNotExist(err) {
doRefresh = true
} else {
return errors.Wrapf(err, "error reading runtime status file %s", runtimeAliveFile)
}
}
runtime.lockManager, err = getLockManager(runtime)
if err != nil {
return err
}
// If we're renumbering locks, do it now.
// It breaks out of normal runtime init, and will not return a valid
// runtime.
if runtime.doRenumber {
if err := runtime.renumberLocks(); err != nil {
return err
}
}
// If we need to refresh the state, do it now - things are guaranteed to
// be set up by now.
if doRefresh {
// Ensure we have a store before refresh occurs
if runtime.store == nil {
if err := runtime.configureStore(); err != nil {
return err
}
}
if err2 := runtime.refresh(runtimeAliveFile); err2 != nil {
return err2
}
}
// Mark the runtime as valid - ready to be used, cannot be modified
// further
runtime.valid = true
if runtime.doMigrate {
if err := runtime.migrate(ctx); err != nil {
return err
}
}
return nil
}
// GetConfig returns a copy of the configuration used by the runtime
func (r *Runtime) GetConfig() (*config.Config, error) {
r.lock.RLock()
defer r.lock.RUnlock()
if !r.valid {
return nil, define.ErrRuntimeStopped
}
config := new(config.Config)
// Copy so the caller won't be able to modify the actual config
if err := JSONDeepCopy(r.config, config); err != nil {
return nil, errors.Wrapf(err, "error copying config")
}
return config, nil
}
// DeferredShutdown shuts down the runtime without exposing any
// errors. This is only meant to be used when the runtime is being
// shutdown within a defer statement; else use Shutdown
func (r *Runtime) DeferredShutdown(force bool) {
_ = r.Shutdown(force)
}
// Shutdown shuts down the runtime and associated containers and storage
// If force is true, containers and mounted storage will be shut down before
// cleaning up; if force is false, an error will be returned if there are
// still containers running or mounted
func (r *Runtime) Shutdown(force bool) error {
r.lock.Lock()
defer r.lock.Unlock()
if !r.valid {
return define.ErrRuntimeStopped
}
r.valid = false
// Shutdown all containers if --force is given
if force {
ctrs, err := r.state.AllContainers()
if err != nil {
logrus.Errorf("Error retrieving containers from database: %v", err)
} else {
for _, ctr := range ctrs {
if err := ctr.StopWithTimeout(define.CtrRemoveTimeout); err != nil {
logrus.Errorf("Error stopping container %s: %v", ctr.ID(), err)
}
}
}
}
var lastError error
// If no store was requested, it can bew nil and there is no need to
// attempt to shut it down
if r.store != nil {
if _, err := r.store.Shutdown(force); err != nil {
lastError = errors.Wrapf(err, "Error shutting down container storage")
}
}
if err := r.state.Close(); err != nil {
if lastError != nil {
logrus.Errorf("%v", lastError)
}
lastError = err
}
return lastError
}
// Reconfigures the runtime after a reboot
// Refreshes the state, recreating temporary files
// Does not check validity as the runtime is not valid until after this has run
func (r *Runtime) refresh(alivePath string) error {
logrus.Debugf("Podman detected system restart - performing state refresh")
// First clear the state in the database
if err := r.state.Refresh(); err != nil {
return err
}
// Next refresh the state of all containers to recreate dirs and
// namespaces, and all the pods to recreate cgroups.
// Containers, pods, and volumes must also reacquire their locks.
ctrs, err := r.state.AllContainers()
if err != nil {
return errors.Wrapf(err, "error retrieving all containers from state")
}
pods, err := r.state.AllPods()
if err != nil {
return errors.Wrapf(err, "error retrieving all pods from state")
}
vols, err := r.state.AllVolumes()
if err != nil {
return errors.Wrapf(err, "error retrieving all volumes from state")
}
// No locks are taken during pod, volume, and container refresh.
// Furthermore, the pod/volume/container refresh() functions are not
// allowed to take locks themselves.
// We cannot assume that any pod/volume/container has a valid lock until
// after this function has returned.
// The runtime alive lock should suffice to provide mutual exclusion
// until this has run.
for _, ctr := range ctrs {
if err := ctr.refresh(); err != nil {
logrus.Errorf("Error refreshing container %s: %v", ctr.ID(), err)
}
}
for _, pod := range pods {
if err := pod.refresh(); err != nil {
logrus.Errorf("Error refreshing pod %s: %v", pod.ID(), err)
}
}
for _, vol := range vols {
if err := vol.refresh(); err != nil {
logrus.Errorf("Error refreshing volume %s: %v", vol.Name(), err)
}
}
// Create a file indicating the runtime is alive and ready
file, err := os.OpenFile(alivePath, os.O_RDONLY|os.O_CREATE, 0644)
if err != nil {
return errors.Wrapf(err, "error creating runtime status file %s", alivePath)
}
defer file.Close()
r.newSystemEvent(events.Refresh)
return nil
}
// Info returns the store and host information
func (r *Runtime) Info() ([]define.InfoData, error) {
info := []define.InfoData{}
// get host information
hostInfo, err := r.hostInfo()
if err != nil {
return nil, errors.Wrapf(err, "error getting host info")
}
info = append(info, define.InfoData{Type: "host", Data: hostInfo})
// get store information
storeInfo, err := r.storeInfo()
if err != nil {
return nil, errors.Wrapf(err, "error getting store info")
}
info = append(info, define.InfoData{Type: "store", Data: storeInfo})
reg, err := sysreg.GetRegistries()
if err != nil {
return nil, errors.Wrapf(err, "error getting registries")
}
registries := make(map[string]interface{})
registries["search"] = reg
ireg, err := sysreg.GetInsecureRegistries()
if err != nil {
return nil, errors.Wrapf(err, "error getting registries")
}
registries["insecure"] = ireg
breg, err := sysreg.GetBlockedRegistries()
if err != nil {
return nil, errors.Wrapf(err, "error getting registries")
}
registries["blocked"] = breg
info = append(info, define.InfoData{Type: "registries", Data: registries})
return info, nil
}
// generateName generates a unique name for a container or pod.
func (r *Runtime) generateName() (string, error) {
for {
name := namesgenerator.GetRandomName(0)
// Make sure container with this name does not exist
if _, err := r.state.LookupContainer(name); err == nil {
continue
} else {
if errors.Cause(err) != define.ErrNoSuchCtr {
return "", err
}
}
// Make sure pod with this name does not exist
if _, err := r.state.LookupPod(name); err == nil {
continue
} else {
if errors.Cause(err) != define.ErrNoSuchPod {
return "", err
}
}
return name, nil
}
// The code should never reach here.
}
// Configure store and image runtime
func (r *Runtime) configureStore() error {
store, err := storage.GetStore(r.config.StorageConfig)
if err != nil {
return err
}
r.store = store
is.Transport.SetStore(store)
// Set up a storage service for creating container root filesystems from
// images
storageService, err := getStorageService(r.store)
if err != nil {
return err
}
r.storageService = storageService
ir := image.NewImageRuntimeFromStore(r.store)
ir.SignaturePolicyPath = r.config.SignaturePolicyPath
ir.EventsLogFilePath = r.config.EventsLogFilePath
ir.EventsLogger = r.config.EventsLogger
r.imageRuntime = ir
return nil
}
// ImageRuntime returns the imageruntime for image operations.
// If WithNoStore() was used, no image runtime will be available, and this
// function will return nil.
func (r *Runtime) ImageRuntime() *image.Runtime {
return r.imageRuntime
}
// SystemContext returns the imagecontext
func (r *Runtime) SystemContext() *types.SystemContext {
return r.imageContext
}
// GetOCIRuntimePath retrieves the path of the default OCI runtime.
func (r *Runtime) GetOCIRuntimePath() string {
return r.defaultOCIRuntime.Path()
}
|
[
"\"XDG_RUNTIME_DIR\"",
"\"DBUS_SESSION_BUS_ADDRESS\"",
"\"XDG_CONFIG_HOME\""
] |
[] |
[
"XDG_RUNTIME_DIR",
"DBUS_SESSION_BUS_ADDRESS",
"XDG_CONFIG_HOME"
] |
[]
|
["XDG_RUNTIME_DIR", "DBUS_SESSION_BUS_ADDRESS", "XDG_CONFIG_HOME"]
|
go
| 3 | 0 | |
cli/commands/env/env.go
|
package env
import (
"fmt"
"os"
"path/filepath"
"runtime"
"strconv"
"strings"
"text/template"
"github.com/sensu/sensu-go/cli"
"github.com/spf13/cobra"
)
const (
envTmpl = `{{ .Prefix }}SENSU_API_URL{{ .Delimiter }}{{ .APIURL }}{{ .LineEnding }}` +
`{{ .Prefix }}SENSU_NAMESPACE{{ .Delimiter }}{{ .Namespace }}{{ .LineEnding }}` +
`{{ .Prefix }}SENSU_FORMAT{{ .Delimiter }}{{ .Format }}{{ .LineEnding }}` +
`{{ .Prefix }}SENSU_ACCESS_TOKEN{{ .Delimiter }}{{ .AccessToken }}{{ .LineEnding }}` +
`{{ .Prefix }}SENSU_ACCESS_TOKEN_EXPIRES_AT{{ .Delimiter }}{{ .AccessTokenExpiresAt }}{{ .LineEnding }}` +
`{{ .Prefix }}SENSU_REFRESH_TOKEN{{ .Delimiter }}{{ .RefreshToken }}{{ .LineEnding }}` +
`{{ .Prefix }}SENSU_TRUSTED_CA_FILE{{ .Delimiter }}{{ .TrustedCAFile }}{{ .LineEnding }}` +
`{{ .Prefix }}SENSU_INSECURE_SKIP_TLS_VERIFY{{ .Delimiter }}{{ .InsecureSkipTLSVerify }}{{ .LineEnding }}` +
`{{ .UsageHint }}`
shellFlag = "shell"
)
// Command display the commands to set up the environment used by sensuctl
func Command(cli *cli.SensuCli) *cobra.Command {
cmd := &cobra.Command{
Use: "env",
Short: "display the commands to set up the environment used by sensuctl",
PreRun: refreshAccessToken(cli),
RunE: execute(cli),
}
_ = cmd.Flags().StringP(shellFlag, "", "",
fmt.Sprintf(
`force environment to be configured for a specified shell ("%s"|"%s"|"%s")`,
"bash", "cmd", "powershell",
))
return cmd
}
type shellConfig struct {
args []string
userShell string
Prefix string
Delimiter string
LineEnding string
APIURL string
Namespace string
Format string
AccessToken string
AccessTokenExpiresAt int64
RefreshToken string
TrustedCAFile string
InsecureSkipTLSVerify string
}
func (s shellConfig) UsageHint() string {
cmd := ""
comment := "#"
commandLine := strings.Join(s.args, " ")
switch s.userShell {
case "cmd":
cmd = fmt.Sprintf("\t@FOR /f \"tokens=*\" %%i IN ('%s') DO @%%i", commandLine)
comment = "REM"
case "powershell":
cmd = fmt.Sprintf("& %s | Invoke-Expression", commandLine)
default:
cmd = fmt.Sprintf("eval $(%s)", commandLine)
}
return fmt.Sprintf("%s Run this command to configure your shell: \n%s %s\n", comment, comment, cmd)
}
// execute contains the actual logic for displaying the environment
func execute(cli *cli.SensuCli) func(*cobra.Command, []string) error {
return func(cmd *cobra.Command, args []string) error {
shellCfg := shellConfig{
args: os.Args,
APIURL: cli.Config.APIUrl(),
Namespace: cli.Config.Namespace(),
Format: cli.Config.Format(),
AccessToken: cli.Config.Tokens().Access,
AccessTokenExpiresAt: cli.Config.Tokens().ExpiresAt,
RefreshToken: cli.Config.Tokens().Refresh,
TrustedCAFile: cli.Config.TrustedCAFile(),
InsecureSkipTLSVerify: strconv.FormatBool(cli.Config.InsecureSkipTLSVerify()),
}
// Get the user shell
shellCfg.userShell = shell()
// Determine if the shell flag was passed to override the shell to use
shellFlag, err := cmd.Flags().GetString(shellFlag)
if err != nil {
return err
}
if shellFlag != "" {
shellCfg.userShell = shellFlag
}
switch shellCfg.userShell {
case "cmd":
shellCfg.Prefix = "SET "
shellCfg.Delimiter = "="
shellCfg.LineEnding = "\n"
case "powershell":
shellCfg.Prefix = "$Env:"
shellCfg.Delimiter = " = \""
shellCfg.LineEnding = "\"\n"
default: // bash
shellCfg.Prefix = "export "
shellCfg.Delimiter = "=\""
shellCfg.LineEnding = "\"\n"
}
t := template.New("envConfig")
tmpl, err := t.Parse(envTmpl)
if err != nil {
return err
}
return tmpl.Execute(cmd.OutOrStdout(), shellCfg)
}
}
// refreshAccessToken attempts to silently refresh the access token
func refreshAccessToken(cli *cli.SensuCli) func(*cobra.Command, []string) {
return func(cmd *cobra.Command, args []string) {
tokens, err := cli.Client.RefreshAccessToken(cli.Config.Tokens())
if err != nil {
return
}
// Write new tokens to disk
_ = cli.Config.SaveTokens(tokens)
}
}
// shell attempts to discover the shell currently used
func shell() string {
shell := os.Getenv("SHELL")
if shell == "" {
// Default to powershell for now when running on Windows
if runtime.GOOS == "windows" {
return "powershell"
}
return ""
}
return filepath.Base(shell)
}
|
[
"\"SHELL\""
] |
[] |
[
"SHELL"
] |
[]
|
["SHELL"]
|
go
| 1 | 0 | |
internal/platform/mailer/smtp.go
|
package mailer
import (
"errors"
"fmt"
"net/mail"
"os"
"strconv"
"github.com/matcornic/hermes/v2"
"golang.org/x/crypto/ssh/terminal"
"gopkg.in/gomail.v2"
generator "Gogin/internal/helpers/emailTemplate"
)
type SmtpAuthentication struct {
Server string
Port int
SenderEmail string
SenderIdentity string
SMTPUser string
SMTPPassword string
}
// SendOptions are options for sending an email
type SendOptions struct {
To string
Subject string
}
// send sends the email
func send(smtpConfig SmtpAuthentication, options SendOptions, htmlBody string, txtBody string) error {
if smtpConfig.Server == "" {
return errors.New("SMTP server config is empty")
}
if smtpConfig.Port == 0 {
return errors.New("SMTP port config is empty")
}
if smtpConfig.SMTPUser == "" {
return errors.New("SMTP user is empty")
}
if smtpConfig.SenderIdentity == "" {
return errors.New("SMTP sender identity is empty")
}
if smtpConfig.SenderEmail == "" {
return errors.New("SMTP sender email is empty")
}
if options.To == "" {
return errors.New("no receiver emails configured")
}
from := mail.Address{
Name: smtpConfig.SenderIdentity,
Address: smtpConfig.SenderEmail,
}
m := gomail.NewMessage()
m.SetHeader("From", from.String())
m.SetHeader("To", options.To)
m.SetHeader("Subject", options.Subject)
m.SetBody("text/plain", txtBody)
m.AddAlternative("text/html", htmlBody)
d := gomail.NewDialer(smtpConfig.Server, smtpConfig.Port, smtpConfig.SMTPUser, smtpConfig.SMTPPassword)
return d.DialAndSend(m)
}
func SendEmail(email, subject string, template hermes.Email) {
port, _ := strconv.Atoi(os.Getenv("SMTP_PORT"))
password := os.Getenv("SMTP_PASSWORD")
SMTPUser := os.Getenv("SMTP_USERNAME")
if password == "" {
fmt.Printf("Enter SMTP password of '%s' account: ", SMTPUser)
bytePassword, _ := terminal.ReadPassword(0)
password = string(bytePassword)
}
smtpConfig := SmtpAuthentication{
Server: os.Getenv("SMTP_HOST"),
Port: port,
SenderEmail: SMTPUser,
SenderIdentity: os.Getenv("PRODUCT_NAME"),
SMTPPassword: password,
SMTPUser: SMTPUser,
}
options := SendOptions{
To: email,
}
options.Subject = subject
htmlBytes, txtBytes := generator.Export(template)
err := send(smtpConfig, options, string(htmlBytes), string(txtBytes))
if err != nil {
fmt.Println(err)
}
}
|
[
"\"SMTP_PORT\"",
"\"SMTP_PASSWORD\"",
"\"SMTP_USERNAME\"",
"\"SMTP_HOST\"",
"\"PRODUCT_NAME\""
] |
[] |
[
"SMTP_PORT",
"SMTP_USERNAME",
"PRODUCT_NAME",
"SMTP_PASSWORD",
"SMTP_HOST"
] |
[]
|
["SMTP_PORT", "SMTP_USERNAME", "PRODUCT_NAME", "SMTP_PASSWORD", "SMTP_HOST"]
|
go
| 5 | 0 | |
test/fastq_importer_test.py
|
# -*- coding: utf-8 -*-
import ftplib
import hashlib
import os
import time
import unittest
from configparser import ConfigParser
from os import environ
import requests
from biokbase.workspace.client import Workspace as workspaceService
from installed_clients.DataFileUtilClient import DataFileUtil
from kb_uploadmethods.authclient import KBaseAuth as _KBaseAuth
from kb_uploadmethods.kb_uploadmethodsImpl import kb_uploadmethods
from kb_uploadmethods.kb_uploadmethodsServer import MethodContext
class kb_uploadmethodsTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.token = environ.get('KB_AUTH_TOKEN', None)
config_file = environ.get('KB_DEPLOYMENT_CONFIG', None)
cls.cfg = {}
config = ConfigParser()
config.read(config_file)
for nameval in config.items('kb_uploadmethods'):
cls.cfg[nameval[0]] = nameval[1]
authServiceUrl = cls.cfg.get('auth-service-url',
"https://kbase.us/services/authorization/Sessions/Login")
auth_client = _KBaseAuth(authServiceUrl)
cls.user_id = auth_client.get_user(cls.token)
# WARNING: don't call any logging methods on the context object,
# it'll result in a NoneType error
cls.ctx = MethodContext(None)
cls.ctx.update({'token': cls.token,
'user_id': cls.user_id,
'provenance': [
{'service': 'kb_uploadmethods',
'method': 'please_never_use_it_in_production',
'method_params': []
}],
'authenticated': 1})
cls.wsURL = cls.cfg['workspace-url']
cls.wsClient = workspaceService(cls.wsURL, token=cls.token)
cls.serviceImpl = kb_uploadmethods(cls.cfg)
cls.dfu = DataFileUtil(os.environ['SDK_CALLBACK_URL'], token=cls.token)
cls.scratch = cls.cfg['scratch']
cls.shockURL = cls.cfg['shock-url']
@classmethod
def tearDownClass(cls):
if hasattr(cls, 'wsName'):
cls.wsClient.delete_workspace({'workspace': cls.wsName})
print('Test workspace was deleted')
@classmethod
def make_ref(self, objinfo):
return str(objinfo[6]) + '/' + str(objinfo[0]) + '/' + str(objinfo[4])
@classmethod
def delete_shock_node(cls, node_id):
header = {'Authorization': 'Oauth {0}'.format(cls.token)}
requests.delete(cls.shockURL + '/node/' + node_id, headers=header,
allow_redirects=True)
print(('Deleted shock node ' + node_id))
def getWsClient(self):
return self.__class__.wsClient
def getWsName(self):
if hasattr(self.__class__, 'wsName'):
return self.__class__.wsName
suffix = int(time.time() * 1000)
wsName = "test_kb_uploadmethods_" + str(suffix)
ret = self.getWsClient().create_workspace({'workspace': wsName}) # noqa
self.__class__.wsName = wsName
return wsName
def getImpl(self):
return self.__class__.serviceImpl
def getContext(self):
return self.__class__.ctx
def getDefaultParams(self, file_path=True):
if file_path:
default_input_params = {
'fwd_staging_file_name': 'SP1.fq',
'sequencing_tech': 'Unknown',
'name': 'test_reads_file_name.reads',
'workspace_name': self.getWsName()
}
else:
default_input_params = {
'download_type': 'Direct Download',
'fwd_file_url': 'http://molb7621.github.io/workshop/_downloads/SP1.fq',
'sequencing_tech': 'Unknown',
'name': 'test_reads_file_name.reads',
'workspace_name': self.getWsName()
}
return default_input_params
def check_lib(self, lib, size, filename, md5):
shock_id = lib["file"]["id"]
print("LIB: {}".format(str(lib)))
print("Shock ID: {}".format(str(shock_id)))
fileinput = [{
'shock_id': shock_id,
'file_path': self.scratch + '/temp',
'unpack': 'uncompress'}]
print("File Input: {}".format(str(fileinput)))
files = self.dfu.shock_to_file_mass(fileinput)
path = files[0]["file_path"]
file_md5 = hashlib.md5(open(path, 'rb').read()).hexdigest()
libfile = lib['file']
self.assertEqual(file_md5, md5)
self.assertEqual(lib['size'], size)
self.assertEqual(lib['type'], 'fq')
self.assertEqual(lib['encoding'], 'ascii')
self.assertEqual(libfile['file_name'], filename)
self.assertEqual(libfile['hid'].startswith('KBH_'), True)
self.assertEqual(libfile['type'], 'shock')
self.assertEqual(libfile['url'], self.shockURL)
def test_validate_upload_fastq_file_parameters(self):
# Testing required params
invalidate_input_params = self.getDefaultParams()
del invalidate_input_params['name']
with self.assertRaisesRegex(
ValueError,
'"name" parameter is required, but missing'):
self.getImpl().upload_fastq_file(self.getContext(), invalidate_input_params)
invalidate_input_params = self.getDefaultParams()
del invalidate_input_params['workspace_name']
with self.assertRaisesRegex(
ValueError,
'"workspace_name" parameter is required, but missing'):
self.getImpl().upload_fastq_file(self.getContext(), invalidate_input_params)
invalidate_input_params = self.getDefaultParams()
invalidate_input_params['fwd_file_url'] = 'https://fake_url'
with self.assertRaisesRegex(
ValueError,
'Cannot upload Reads for both file path and file URL'):
self.getImpl().upload_fastq_file(self.getContext(), invalidate_input_params)
# Testing _validate_upload_file_availability
# invalidate_input_params = self.getDefaultParams()
# nonexistent_file_name = 'fake_file_0123456.fastq'
# invalidate_input_params['fwd_staging_file_name'] = nonexistent_file_name
# with self.assertRaisesRegexp(ValueError,
# 'Target file: {} is NOT available.'.format(nonexistent_file_name)):
# self.getImpl().upload_fastq_file(self.getContext(), invalidate_input_params)
# Testing duplicate forward/reverse
invalidate_input_params = self.getDefaultParams()
rev_staging_file_name = invalidate_input_params['fwd_staging_file_name']
invalidate_input_params['rev_staging_file_name'] = rev_staging_file_name
error_msg = 'Same file \[{}\] is used for forward and reverse. '.format(
invalidate_input_params['rev_staging_file_name'])
error_msg += 'Please select different files and try again.'
with self.assertRaisesRegex(ValueError, error_msg):
self.getImpl().upload_fastq_file(self.getContext(), invalidate_input_params)
invalidate_input_params = self.getDefaultParams(file_path=False)
invalidate_input_params['rev_file_url'] = invalidate_input_params['fwd_file_url']
error_msg = 'Same URL\n {}\nis used for forward and reverse. '.format(
invalidate_input_params['rev_file_url'])
error_msg += 'Please select different files and try again.'
with self.assertRaisesRegex(ValueError, error_msg):
self.getImpl().upload_fastq_file(self.getContext(), invalidate_input_params)
# Testing URL prefix
invalidate_input_params = self.getDefaultParams(file_path=False)
invalidate_input_params['fwd_file_url'] = 'ftp://ftp.dlptest.com/24_Hour/SP1.fq'
with self.assertRaisesRegex(
ValueError,
'Download type and URL prefix do NOT match'):
self.getImpl().upload_fastq_file(self.getContext(), invalidate_input_params)
invalidate_input_params = self.getDefaultParams(file_path=False)
invalidate_input_params['download_type'] = 'DropBox'
with self.assertRaisesRegex(
ValueError,
'Download type and URL prefix do NOT match'):
self.getImpl().upload_fastq_file(self.getContext(), invalidate_input_params)
invalidate_input_params = self.getDefaultParams(file_path=False)
invalidate_input_params['download_type'] = 'FTP'
with self.assertRaisesRegex(
ValueError,
'Download type and URL prefix do NOT match'):
self.getImpl().upload_fastq_file(self.getContext(), invalidate_input_params)
invalidate_input_params = self.getDefaultParams(file_path=False)
del invalidate_input_params['download_type']
with self.assertRaisesRegex(
ValueError,
'Download type parameter is required, but missing'):
self.getImpl().upload_fastq_file(self.getContext(), invalidate_input_params)
error_msg = 'Advanced params "Mean Insert Size", "St. Dev. of Insert Size" or '
error_msg += '"Reads Orientation Outward" is Paried End Reads specific'
invalidate_input_params = self.getDefaultParams()
invalidate_input_params['insert_size_mean'] = 10
with self.assertRaisesRegex(ValueError, error_msg):
self.getImpl().upload_fastq_file(self.getContext(), invalidate_input_params)
invalidate_input_params = self.getDefaultParams()
invalidate_input_params['insert_size_std_dev'] = 0.4
with self.assertRaisesRegex(ValueError, error_msg):
self.getImpl().upload_fastq_file(self.getContext(), invalidate_input_params)
invalidate_input_params = self.getDefaultParams()
invalidate_input_params['read_orientation_outward'] = 1
with self.assertRaisesRegex(ValueError, error_msg):
self.getImpl().upload_fastq_file(self.getContext(), invalidate_input_params)
error_msg = 'Sequencing Technology: "PacBio CCS" or "PacBio CLR" '
error_msg += 'is Single End Reads specific'
invalidate_input_params = self.getDefaultParams()
invalidate_input_params['sequencing_tech'] = 'PacBio CCS'
invalidate_input_params['rev_staging_file_name'] = 'rev_staging_file_name'
with self.assertRaisesRegex(ValueError, error_msg):
self.getImpl().upload_fastq_file(self.getContext(), invalidate_input_params)
invalidate_input_params = self.getDefaultParams()
invalidate_input_params['sequencing_tech'] = 'PacBio CLR'
invalidate_input_params['rev_staging_file_name'] = 'rev_staging_file_name'
with self.assertRaisesRegex(ValueError, error_msg):
self.getImpl().upload_fastq_file(self.getContext(), invalidate_input_params)
invalidate_input_params = self.getDefaultParams()
invalidate_input_params['sequencing_tech'] = 'PacBio CCS'
invalidate_input_params['interleaved'] = 1
with self.assertRaisesRegex(ValueError, error_msg):
self.getImpl().upload_fastq_file(self.getContext(), invalidate_input_params)
invalidate_input_params = self.getDefaultParams()
invalidate_input_params['sequencing_tech'] = 'PacBio CLR'
invalidate_input_params['interleaved'] = 1
with self.assertRaisesRegex(ValueError, error_msg):
self.getImpl().upload_fastq_file(self.getContext(), invalidate_input_params)
invalidate_input_params = self.getDefaultParams(file_path=False)
invalidate_input_params['sequencing_tech'] = 'PacBio CCS'
invalidate_input_params['rev_file_url'] = 'rev_file_url'
with self.assertRaisesRegex(ValueError, error_msg):
self.getImpl().upload_fastq_file(self.getContext(), invalidate_input_params)
invalidate_input_params = self.getDefaultParams(file_path=False)
invalidate_input_params['sequencing_tech'] = 'PacBio CLR'
invalidate_input_params['rev_file_url'] = 'rev_file_url'
with self.assertRaisesRegex(ValueError, error_msg):
self.getImpl().upload_fastq_file(self.getContext(), invalidate_input_params)
invalidate_input_params = self.getDefaultParams(file_path=False)
invalidate_input_params['sequencing_tech'] = 'PacBio CCS'
invalidate_input_params['interleaved'] = 1
with self.assertRaisesRegex(ValueError, error_msg):
self.getImpl().upload_fastq_file(self.getContext(), invalidate_input_params)
invalidate_input_params = self.getDefaultParams(file_path=False)
invalidate_input_params['sequencing_tech'] = 'PacBio CLR'
invalidate_input_params['interleaved'] = 1
with self.assertRaisesRegex(ValueError, error_msg):
self.getImpl().upload_fastq_file(self.getContext(), invalidate_input_params)
def test_upload_fastq_file_url_direct_download(self):
fwd_file_url = 'https://anl.box.com/shared/static/'
fwd_file_url += 'qwadp20dxtwnhc8r3sjphen6h0k1hdyo.fastq'
params = {
'download_type': 'Direct Download',
'fwd_file_url': fwd_file_url,
'sequencing_tech': 'Unknown',
'name': 'test_reads_file_name.reads',
'workspace_name': self.getWsName()
}
ref = self.getImpl().upload_fastq_file(self.getContext(), params)
self.assertTrue('obj_ref' in ref[0])
obj = self.dfu.get_objects(
{'object_refs': [self.getWsName() + '/test_reads_file_name.reads']})['data'][0]
self.assertEqual(ref[0]['obj_ref'], self.make_ref(obj['info']))
self.assertEqual(obj['info'][2].startswith(
'KBaseFile.SingleEndLibrary'), True)
d = obj['data']
self.assertEqual(d['sequencing_tech'], 'Unknown')
self.assertEqual(d['single_genome'], 1)
self.assertEqual('source' not in d, True)
self.assertEqual('strain' not in d, True)
self.check_lib(d['lib'], 2966, 'Sample1.fastq.gz',
'f118ee769a5e1b40ec44629994dfc3cd')
node = d['lib']['file']['id']
self.delete_shock_node(node)
def test_upload_fastq_file_url_direct_download_interleaved(self):
fwd_file_url = 'https://anl.box.com/shared/static/'
fwd_file_url += 'pf0d0d7torv07qh2nogaay073udmiacr.fastq'
params = {
'download_type': 'Direct Download',
'fwd_file_url': fwd_file_url,
'sequencing_tech': 'seqtech-pr2',
'name': 'pairedreads2',
'workspace_name': self.getWsName(),
'insert_size_mean': 72.1,
'insert_size_std_dev': 84.0,
'read_orientation_outward': 1,
'interleaved': 1
}
ref = self.getImpl().upload_fastq_file(self.getContext(), params)
self.assertTrue('obj_ref' in ref[0])
obj = self.dfu.get_objects(
{'object_refs': [self.getWsName() + '/pairedreads2']})['data'][0]
self.assertEqual(ref[0]['obj_ref'], self.make_ref(obj['info']))
self.assertEqual(obj['info'][2].startswith(
'KBaseFile.PairedEndLibrary'), True)
d = obj['data']
self.assertEqual(d['sequencing_tech'], 'seqtech-pr2')
self.assertEqual(d['single_genome'], 1)
self.assertEqual('source' not in d, True)
self.assertEqual('strain' not in d, True)
self.assertEqual(d['interleaved'], 1)
self.assertEqual(d['read_orientation_outward'], 1)
self.assertEqual(d['insert_size_mean'], 72.1)
self.assertEqual(d['insert_size_std_dev'], 84.0)
self.assertNotIn('lib2', d)
self.assertEqual(d['read_count'], 4)
self.assertEqual(d['total_bases'], 1004)
self.assertEqual(d['number_of_duplicates'], 0)
self.assertEqual(d['base_percentages']['A'], 20)
self.assertEqual(d['base_percentages']['T'], 20)
self.assertEqual(d['base_percentages']['N'], 0)
self.assertEqual(d['base_percentages']['C'], 26.4286)
self.assertEqual(d['base_percentages']['G'], 33.5714)
self.assertEqual(d["phred_type"], "33")
self.assertEqual(d["qual_mean"], 25.1143)
self.assertEqual(d["qual_min"], 10)
self.assertEqual(d["qual_max"], 40)
self.assertEqual(d["qual_stdev"], 10.081)
self.assertEqual(d["gc_content"], 0.6)
self.assertEqual(d["read_length_mean"], 251)
self.assertEqual(d["read_length_stdev"], 0)
self.check_lib(d['lib1'], 1063, 'Sample5_interleaved.fastq.gz',
'971a5f445055c85fd45b17459e15e3ed')
node = d['lib1']['file']['id']
self.delete_shock_node(node)
def test_upload_fastq_file_url_direct_download_paired_end(self):
fwd_file_url = 'https://anl.box.com/shared/static/'
fwd_file_url += 'lph9l0ye6yqetnbk04cx33mqgrj4b85j.fq'
rev_file_url = 'https://anl.box.com/shared/static/'
rev_file_url += '1u9fi158vquyrh9qt7l04t71eqbpvyrr.fq'
params = {
'download_type': 'Direct Download',
'fwd_file_url': fwd_file_url,
'rev_file_url': rev_file_url,
'sequencing_tech': 'Unknown',
'name': 'test_reads_file_name.reads',
'workspace_name': self.getWsName(),
'single_genome': 0,
'insert_size_mean': 99.9,
'insert_size_std_dev': 10.1,
'read_orientation_outward': 1,
'interleaved': 0
}
ref = self.getImpl().upload_fastq_file(self.getContext(), params)
self.assertTrue('obj_ref' in ref[0])
obj = self.dfu.get_objects(
{'object_refs': [self.getWsName() + '/test_reads_file_name.reads']})['data'][0]
self.assertEqual(ref[0]['obj_ref'], self.make_ref(obj['info']))
self.assertEqual(obj['info'][2].startswith(
'KBaseFile.PairedEndLibrary'), True)
d = obj['data']
file_name = d["lib1"]["file"]["file_name"]
self.assertTrue(file_name.endswith(".inter.fastq.gz"))
self.assertEqual(d['sequencing_tech'], 'Unknown')
self.assertEqual(d['single_genome'], 0)
self.assertEqual('source' not in d, True)
self.assertEqual('strain' not in d, True)
self.assertEqual(d['interleaved'], 1)
self.assertEqual(d['read_orientation_outward'], 1)
self.assertEqual(d['insert_size_mean'], 99.9)
self.assertEqual(d['insert_size_std_dev'], 10.1)
self.check_lib(d['lib1'], 2696029, file_name,
'1c58d7d59c656db39cedcb431376514b')
node = d['lib1']['file']['id']
self.delete_shock_node(node)
def test_upload_fastq_file_url_dropbox(self):
fwd_file_url = 'https://www.dropbox.com/s/'
fwd_file_url += 'lv7jx1vh6yky3o0/Sample1.fastq?dl=0'
params = {
'download_type': 'DropBox',
'fwd_file_url': fwd_file_url,
'sequencing_tech': 'Unknown',
'name': 'test_reads_file_name.reads',
'workspace_name': self.getWsName()
}
ref = self.getImpl().upload_fastq_file(self.getContext(), params)
self.assertTrue('obj_ref' in ref[0])
obj = self.dfu.get_objects(
{'object_refs': [self.getWsName() + '/test_reads_file_name.reads']})['data'][0]
self.assertEqual(ref[0]['obj_ref'], self.make_ref(obj['info']))
self.assertEqual(obj['info'][2].startswith(
'KBaseFile.SingleEndLibrary'), True)
d = obj['data']
self.assertEqual(d['sequencing_tech'], 'Unknown')
self.assertEqual(d['single_genome'], 1)
self.assertEqual('source' not in d, True)
self.assertEqual('strain' not in d, True)
self.check_lib(d['lib'], 2966, 'Sample1.fastq.gz',
'f118ee769a5e1b40ec44629994dfc3cd')
node = d['lib']['file']['id']
self.delete_shock_node(node)
def test_upload_fastq_file_url_dropbox_paired_end(self):
fwd_file_url = 'https://www.dropbox.com/s/'
fwd_file_url += 'pgtja4btj62ctkx/small.forward.fq?dl=0'
rev_file_url = 'https://www.dropbox.com/s/'
rev_file_url += 'hh55x00qluhfhr8/small.reverse.fq?dl=0'
params = {
'download_type': 'DropBox',
'fwd_file_url': fwd_file_url,
'rev_file_url': rev_file_url,
'sequencing_tech': 'Unknown',
'name': 'test_reads_file_name.reads',
'workspace_name': self.getWsName(),
'single_genome': 0,
'insert_size_mean': 99.9,
'insert_size_std_dev': 10.1,
'read_orientation_outward': 1,
'interleaved': 0
}
ref = self.getImpl().upload_fastq_file(self.getContext(), params)
self.assertTrue('obj_ref' in ref[0])
obj = self.dfu.get_objects(
{'object_refs': [self.getWsName() + '/test_reads_file_name.reads']})['data'][0]
self.assertEqual(ref[0]['obj_ref'], self.make_ref(obj['info']))
self.assertEqual(obj['info'][2].startswith(
'KBaseFile.PairedEndLibrary'), True)
d = obj['data']
file_name = d["lib1"]["file"]["file_name"]
self.assertTrue(file_name.endswith(".inter.fastq.gz"))
self.assertEqual(d['sequencing_tech'], 'Unknown')
self.assertEqual(d['single_genome'], 0)
self.assertEqual('source' not in d, True)
self.assertEqual('strain' not in d, True)
self.assertEqual(d['interleaved'], 1)
self.assertEqual(d['read_orientation_outward'], 1)
self.assertEqual(d['insert_size_mean'], 99.9)
self.assertEqual(d['insert_size_std_dev'], 10.1)
self.check_lib(d['lib1'], 2696029, file_name,
'1c58d7d59c656db39cedcb431376514b')
node = d['lib1']['file']['id']
self.delete_shock_node(node)
def test_upload_fastq_file_url_google_drive(self):
fwd_file_url = 'https://drive.google.com/file/d/'
fwd_file_url += '0B0exSa7ebQ0qcHdNS2NEYjJOTTg/view?usp=sharing'
params = {
'download_type': 'Google Drive',
'fwd_file_url': fwd_file_url,
'sequencing_tech': 'Unknown',
'name': 'test_reads_file_name.reads',
'workspace_name': self.getWsName()
}
ref = self.getImpl().upload_fastq_file(self.getContext(), params)
self.assertTrue('obj_ref' in ref[0])
obj = self.dfu.get_objects(
{'object_refs': [self.getWsName() + '/test_reads_file_name.reads']})['data'][0]
self.assertEqual(ref[0]['obj_ref'], self.make_ref(obj['info']))
self.assertEqual(obj['info'][2].startswith(
'KBaseFile.SingleEndLibrary'), True)
d = obj['data']
self.assertEqual(d['sequencing_tech'], 'Unknown')
self.assertEqual(d['single_genome'], 1)
self.assertEqual('source' not in d, True)
self.assertEqual('strain' not in d, True)
self.check_lib(d['lib'], 2966, 'Sample1.fastq.gz',
'f118ee769a5e1b40ec44629994dfc3cd')
node = d['lib']['file']['id']
self.delete_shock_node(node)
def test_upload_fastq_file_url_google_drive_paired_end(self):
fwd_file_url = 'https://drive.google.com/open?'
fwd_file_url += 'id=0B0exSa7ebQ0qSGlmVzIwNXV5OWc'
rev_file_url = 'https://drive.google.com/file/d/'
rev_file_url += '0B0exSa7ebQ0qYml1c1BXTEhtR00/view?usp=sharing'
params = {
'download_type': 'Google Drive',
'fwd_file_url': fwd_file_url,
'rev_file_url': rev_file_url,
'sequencing_tech': 'Unknown',
'name': 'test_reads_file_name.reads',
'workspace_name': self.getWsName(),
'single_genome': 0,
'insert_size_mean': 99.9,
'insert_size_std_dev': 10.1,
'read_orientation_outward': 1,
'interleaved': 0
}
ref = self.getImpl().upload_fastq_file(self.getContext(), params)
self.assertTrue('obj_ref' in ref[0])
obj = self.dfu.get_objects(
{'object_refs': [self.getWsName() + '/test_reads_file_name.reads']})['data'][0]
self.assertEqual(ref[0]['obj_ref'], self.make_ref(obj['info']))
self.assertEqual(obj['info'][2].startswith(
'KBaseFile.PairedEndLibrary'), True)
d = obj['data']
file_name = d["lib1"]["file"]["file_name"]
self.assertTrue(file_name.endswith(".inter.fastq.gz"))
self.assertEqual(d['sequencing_tech'], 'Unknown')
self.assertEqual(d['single_genome'], 0)
self.assertEqual('source' not in d, True)
self.assertEqual('strain' not in d, True)
self.assertEqual(d['interleaved'], 1)
self.assertEqual(d['read_orientation_outward'], 1)
self.assertEqual(d['insert_size_mean'], 99.9)
self.assertEqual(d['insert_size_std_dev'], 10.1)
self.check_lib(d['lib1'], 2696029, file_name,
'1c58d7d59c656db39cedcb431376514b')
node = d['lib1']['file']['id']
self.delete_shock_node(node)
def test_upload_fastq_file_url_ftp(self):
# copy test file to FTP
fq_filename = "Sample1.fastq"
ftp_connection = ftplib.FTP('ftp.uconn.edu')
ftp_connection.login('anonymous', '[email protected]')
ftp_connection.cwd("/48_hour/")
if fq_filename not in ftp_connection.nlst():
fh = open(os.path.join("data", fq_filename), 'rb')
ftp_connection.storbinary('STOR Sample1.fastq', fh)
fh.close()
params = {
'download_type': 'FTP',
'fwd_file_url': 'ftp://ftp.uconn.edu/48_hour/Sample1.fastq',
'sequencing_tech': 'Unknown',
'name': 'test_reads_file_name.reads',
'workspace_name': self.getWsName()
}
ref = self.getImpl().upload_fastq_file(self.getContext(), params)
self.assertTrue('obj_ref' in ref[0])
obj = self.dfu.get_objects(
{'object_refs': [self.getWsName() + '/test_reads_file_name.reads']})['data'][0]
self.assertEqual(ref[0]['obj_ref'], self.make_ref(obj['info']))
self.assertEqual(obj['info'][2].startswith(
'KBaseFile.SingleEndLibrary'), True)
d = obj['data']
self.assertEqual(d['sequencing_tech'], 'Unknown')
self.assertEqual(d['single_genome'], 1)
self.assertEqual('source' not in d, True)
self.assertEqual('strain' not in d, True)
self.check_lib(d['lib'], 2966, 'Sample1.fastq.gz',
'f118ee769a5e1b40ec44629994dfc3cd')
node = d['lib']['file']['id']
self.delete_shock_node(node)
def test_upload_fastq_file_url_ftp_paired(self):
# copy test file to FTP
fq_filename = "small.forward.fq"
ftp_connection = ftplib.FTP('ftp.uconn.edu')
ftp_connection.login('anonymous', '[email protected]')
ftp_connection.cwd("/48_hour/")
if fq_filename not in ftp_connection.nlst():
fh = open(os.path.join("data", fq_filename), 'rb')
ftp_connection.storbinary('STOR small.forward.fq', fh)
fh.close()
fq_filename = "small.reverse.fq"
if fq_filename not in ftp_connection.nlst():
fh = open(os.path.join("data", fq_filename), 'rb')
ftp_connection.storbinary('STOR small.reverse.fq', fh)
fh.close()
params = {
'download_type': 'FTP',
'fwd_file_url': 'ftp://ftp.uconn.edu/48_hour/small.forward.fq',
'rev_file_url': 'ftp://ftp.uconn.edu/48_hour/small.reverse.fq',
'sequencing_tech': 'Unknown',
'name': 'test_reads_file_name.reads',
'workspace_name': self.getWsName(),
'single_genome': 0,
'insert_size_mean': 99.9,
'insert_size_std_dev': 10.1,
'interleaved': 0
}
ref = self.getImpl().upload_fastq_file(self.getContext(), params)
self.assertTrue('obj_ref' in ref[0])
obj = self.dfu.get_objects(
{'object_refs': [self.getWsName() + '/test_reads_file_name.reads']})['data'][0]
self.assertEqual(ref[0]['obj_ref'], self.make_ref(obj['info']))
self.assertEqual(obj['info'][2].startswith(
'KBaseFile.PairedEndLibrary'), True)
d = obj['data']
file_name = d["lib1"]["file"]["file_name"]
self.assertTrue(file_name.endswith(".inter.fastq.gz"))
self.assertEqual(d['sequencing_tech'], 'Unknown')
self.assertEqual(d['single_genome'], 0)
self.assertEqual('source' not in d, True)
self.assertEqual('strain' not in d, True)
self.assertEqual(d['interleaved'], 1)
self.assertEqual(d['read_orientation_outward'], 0)
self.assertEqual(d['insert_size_mean'], 99.9)
self.assertEqual(d['insert_size_std_dev'], 10.1)
self.check_lib(d['lib1'], 2696029, file_name,
'1c58d7d59c656db39cedcb431376514b')
node = d['lib1']['file']['id']
self.delete_shock_node(node)
def test_urls_to_add_direct_download(self):
fwd_file_url = 'https://anl.box.com/shared/static/'
fwd_file_url += 'qwadp20dxtwnhc8r3sjphen6h0k1hdyo.fastq'
params = {
'download_type': 'Direct Download',
'workspace_name': self.getWsName(),
'sequencing_tech': 'Unknown',
'urls_to_add': [
{
'fwd_file_url': fwd_file_url,
'name': 'test_reads_file_name_1.reads',
'single_genome': 1
},
{
'fwd_file_url': fwd_file_url,
'name': 'test_reads_file_name_2.reads',
'single_genome': 1
}
]
}
ref = self.getImpl().upload_fastq_file(self.getContext(), params)
self.assertTrue('obj_ref' in ref[0])
self.assertEqual(2, len(ref[0].get('obj_ref').split(',')))
obj = self.dfu.get_objects(
{'object_refs': [self.getWsName() + '/test_reads_file_name_1.reads']})['data'][0]
self.assertEqual(obj['info'][2].startswith(
'KBaseFile.SingleEndLibrary'), True)
d = obj['data']
self.assertEqual(d['sequencing_tech'], 'Unknown')
self.assertEqual(d['single_genome'], 1)
self.assertEqual('source' not in d, True)
self.assertEqual('strain' not in d, True)
self.check_lib(d['lib'], 2966, 'Sample1.fastq.gz',
'f118ee769a5e1b40ec44629994dfc3cd')
node = d['lib']['file']['id']
self.delete_shock_node(node)
obj = self.dfu.get_objects(
{'object_refs': [self.getWsName() + '/test_reads_file_name_2.reads']})['data'][0]
self.assertEqual(obj['info'][2].startswith(
'KBaseFile.SingleEndLibrary'), True)
d = obj['data']
self.assertEqual(d['sequencing_tech'], 'Unknown')
self.assertEqual(d['single_genome'], 1)
self.assertEqual('source' not in d, True)
self.assertEqual('strain' not in d, True)
self.check_lib(d['lib'], 2966, 'Sample1.fastq.gz',
'f118ee769a5e1b40ec44629994dfc3cd')
node = d['lib']['file']['id']
self.delete_shock_node(node)
def test_urls_to_add_dropbox_paired_end(self):
fwd_file_url = 'https://www.dropbox.com/s/pgtja4btj62ctkx/small.forward.fq?dl=0'
rev_file_url = 'https://www.dropbox.com/s/hh55x00qluhfhr8/small.reverse.fq?dl=0'
params = {
'download_type': 'DropBox',
'sequencing_tech': 'Unknown',
'workspace_name': self.getWsName(),
'urls_to_add': [
{
'fwd_file_url': fwd_file_url,
'rev_file_url': rev_file_url,
'name': 'test_reads_file_name_1.reads',
'single_genome': 0,
'insert_size_mean': 99.9,
'insert_size_std_dev': 10.1,
'read_orientation_outward': 1,
'interleaved': 0
},
{
'fwd_file_url': fwd_file_url,
'rev_file_url': rev_file_url,
'name': 'test_reads_file_name_2.reads',
'single_genome': 0,
'insert_size_mean': 99.9,
'insert_size_std_dev': 10.1,
'read_orientation_outward': 1,
'interleaved': 0
}
]
}
ref = self.getImpl().upload_fastq_file(self.getContext(), params)
self.assertTrue('obj_ref' in ref[0])
self.assertEqual(2, len(ref[0].get('obj_ref').split(',')))
obj = self.dfu.get_objects(
{'object_refs': [self.getWsName() + '/test_reads_file_name_1.reads']})['data'][0]
self.assertEqual(obj['info'][2].startswith(
'KBaseFile.PairedEndLibrary'), True)
d = obj['data']
file_name = d["lib1"]["file"]["file_name"]
self.assertTrue(file_name.endswith(".inter.fastq.gz"))
self.assertEqual(d['sequencing_tech'], 'Unknown')
self.assertEqual(d['single_genome'], 0)
self.assertEqual('source' not in d, True)
self.assertEqual('strain' not in d, True)
self.assertEqual(d['interleaved'], 1)
self.assertEqual(d['read_orientation_outward'], 1)
self.assertEqual(d['insert_size_mean'], 99.9)
self.assertEqual(d['insert_size_std_dev'], 10.1)
self.check_lib(d['lib1'], 2696029, file_name,
'1c58d7d59c656db39cedcb431376514b')
node = d['lib1']['file']['id']
self.delete_shock_node(node)
obj = self.dfu.get_objects(
{'object_refs': [self.getWsName() + '/test_reads_file_name_2.reads']})['data'][0]
self.assertEqual(obj['info'][2].startswith(
'KBaseFile.PairedEndLibrary'), True)
d = obj['data']
file_name = d["lib1"]["file"]["file_name"]
self.assertTrue(file_name.endswith(".inter.fastq.gz"))
self.assertEqual(d['sequencing_tech'], 'Unknown')
self.assertEqual(d['single_genome'], 0)
self.assertEqual('source' not in d, True)
self.assertEqual('strain' not in d, True)
self.assertEqual(d['interleaved'], 1)
self.assertEqual(d['read_orientation_outward'], 1)
self.assertEqual(d['insert_size_mean'], 99.9)
self.assertEqual(d['insert_size_std_dev'], 10.1)
self.check_lib(d['lib1'], 2696029, file_name,
'1c58d7d59c656db39cedcb431376514b')
node = d['lib1']['file']['id']
self.delete_shock_node(node)
def test_urls_to_add_direct_download_leading_space(self):
fwd_file_url = ' https://anl.box.com/shared/static/'
fwd_file_url += 'qwadp20dxtwnhc8r3sjphen6h0k1hdyo.fastq'
params = {
'download_type': 'Direct Download',
'workspace_name': self.getWsName(),
'sequencing_tech': 'Unknown',
'urls_to_add': [
{
'fwd_file_url': fwd_file_url,
'name': 'test_reads_file_name_1.reads',
'single_genome': 1
},
{
'fwd_file_url': fwd_file_url,
'name': 'test_reads_file_name_2.reads',
'single_genome': 1
}
]
}
ref = self.getImpl().upload_fastq_file(self.getContext(), params)
self.assertTrue('obj_ref' in ref[0])
self.assertEqual(2, len(ref[0].get('obj_ref').split(',')))
obj = self.dfu.get_objects(
{'object_refs': [self.getWsName() + '/test_reads_file_name_1.reads']})['data'][0]
self.assertEqual(obj['info'][2].startswith(
'KBaseFile.SingleEndLibrary'), True)
d = obj['data']
self.assertEqual(d['sequencing_tech'], 'Unknown')
self.assertEqual(d['single_genome'], 1)
self.assertEqual('source' not in d, True)
self.assertEqual('strain' not in d, True)
self.check_lib(d['lib'], 2966, 'Sample1.fastq.gz',
'f118ee769a5e1b40ec44629994dfc3cd')
node = d['lib']['file']['id']
self.delete_shock_node(node)
obj = self.dfu.get_objects(
{'object_refs': [self.getWsName() + '/test_reads_file_name_2.reads']})['data'][0]
self.assertEqual(obj['info'][2].startswith(
'KBaseFile.SingleEndLibrary'), True)
d = obj['data']
self.assertEqual(d['sequencing_tech'], 'Unknown')
self.assertEqual(d['single_genome'], 1)
self.assertEqual('source' not in d, True)
self.assertEqual('strain' not in d, True)
self.check_lib(d['lib'], 2966, 'Sample1.fastq.gz',
'f118ee769a5e1b40ec44629994dfc3cd')
node = d['lib']['file']['id']
self.delete_shock_node(node)
def test_upload_fastq_file_url_ftp_trailing_space(self):
# copy test file to FTP
fq_filename = "Sample1.fastq"
ftp_connection = ftplib.FTP('ftp.uconn.edu')
ftp_connection.login('anonymous', '[email protected]')
ftp_connection.cwd("/48_hour/")
if fq_filename not in ftp_connection.nlst():
fh = open(os.path.join("data", fq_filename), 'rb')
ftp_connection.storbinary('STOR Sample1.fastq', fh)
fh.close()
params = {
'download_type': 'FTP',
'fwd_file_url': 'ftp://ftp.uconn.edu/48_hour/Sample1.fastq ',
'sequencing_tech': 'Unknown',
'name': 'test_reads_file_name.reads',
'workspace_name': self.getWsName()
}
ref = self.getImpl().upload_fastq_file(self.getContext(), params)
self.assertTrue('obj_ref' in ref[0])
obj = self.dfu.get_objects(
{'object_refs': [self.getWsName() + '/test_reads_file_name.reads']})['data'][0]
self.assertEqual(ref[0]['obj_ref'], self.make_ref(obj['info']))
self.assertEqual(obj['info'][2].startswith(
'KBaseFile.SingleEndLibrary'), True)
d = obj['data']
self.assertEqual(d['sequencing_tech'], 'Unknown')
self.assertEqual(d['single_genome'], 1)
self.assertEqual('source' not in d, True)
self.assertEqual('strain' not in d, True)
self.check_lib(d['lib'], 2966, 'Sample1.fastq.gz',
'f118ee769a5e1b40ec44629994dfc3cd')
node = d['lib']['file']['id']
self.delete_shock_node(node)
|
[] |
[] |
[
"SDK_CALLBACK_URL"
] |
[]
|
["SDK_CALLBACK_URL"]
|
python
| 1 | 0 | |
pkg/patterns/declarative/metrics_test.go
|
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package declarative
import (
"bytes"
"errors"
"os"
"os/exec"
"path/filepath"
"strings"
"testing"
"time"
"github.com/prometheus/client_golang/prometheus/testutil"
apps "k8s.io/api/apps/v1"
core "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/envtest"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/kubebuilder-declarative-pattern/pkg/patterns/declarative/pkg/manifest"
"sigs.k8s.io/yaml"
)
// This test checks gvkString function
func TestGVKString(t *testing.T) {
testCases := []struct {
subtest string
gvk schema.GroupVersionKind
want string
}{
{
subtest: "v1/Pod",
gvk: core.SchemeGroupVersion.WithKind("Pod"),
want: "v1/Pod",
},
{
subtest: "apps/v1/Deployment",
gvk: apps.SchemeGroupVersion.WithKind("Deployment"),
want: "apps/v1/Deployment",
},
}
for _, st := range testCases {
t.Run(st.subtest, func(t *testing.T) {
if got := gvkString(st.gvk); st.want != got {
t.Errorf("want:\n%v\ngot:\n%v\n", st.want, got)
}
})
}
}
// This test checks reconcileMetricsFor function & reconcieMetrics.reconcileWith method
func TestReconcileWith(t *testing.T) {
testCases := []struct {
subtest string
gvks []schema.GroupVersionKind
namespaces []string
names []string
want []string
}{
{
subtest: "core",
gvks: []schema.GroupVersionKind{core.SchemeGroupVersion.WithKind("Pod")},
namespaces: []string{"ns1"},
names: []string{"n1"},
want: []string{`
# HELP declarative_reconciler_reconcile_count How many times reconciliation of K8s objects managed by declarative reconciler occurs
# TYPE declarative_reconciler_reconcile_count counter
declarative_reconciler_reconcile_count {group_version_kind = "v1/Pod", name = "n1", namespace = "ns1"} 2
`,
},
},
{
subtest: "core&app",
gvks: []schema.GroupVersionKind{core.SchemeGroupVersion.WithKind("Pod"), apps.SchemeGroupVersion.WithKind("Deployment")},
namespaces: []string{"ns1", ""},
names: []string{"n1", "n2"},
want: []string{`
# HELP declarative_reconciler_reconcile_count How many times reconciliation of K8s objects managed by declarative reconciler occurs
# TYPE declarative_reconciler_reconcile_count counter
declarative_reconciler_reconcile_count {group_version_kind = "v1/Pod", name = "n1", namespace = "ns1"} 2
`,
`
# HELP declarative_reconciler_reconcile_count How many times reconciliation of K8s objects managed by declarative reconciler occurs
# TYPE declarative_reconciler_reconcile_count counter
declarative_reconciler_reconcile_count {group_version_kind = "apps/v1/Deployment", name = "n2", namespace = ""} 2
`,
},
},
{
subtest: "node - cluster scoped only",
gvks: []schema.GroupVersionKind{core.SchemeGroupVersion.WithKind("Node")},
namespaces: []string{""},
names: []string{"n1"},
want: []string{`
# HELP declarative_reconciler_reconcile_count How many times reconciliation of K8s objects managed by declarative reconciler occurs
# TYPE declarative_reconciler_reconcile_count counter
declarative_reconciler_reconcile_count {group_version_kind = "v1/Node", name = "n1", namespace = ""} 2
`,
},
},
}
for _, st := range testCases {
t.Run(st.subtest, func(t *testing.T) {
for i, gvk := range st.gvks {
rm := reconcileMetricsFor(gvk)
rm.reconcileWith(reconcile.Request{NamespacedName: types.NamespacedName{Namespace: st.namespaces[i], Name: st.names[i]}})
rm.reconcileWith(reconcile.Request{NamespacedName: types.NamespacedName{Namespace: st.namespaces[i], Name: st.names[i]}})
if err := testutil.CollectAndCompare(rm.reconcileCounterVec.WithLabelValues(gvkString(gvk),
st.namespaces[i], st.names[i]), strings.NewReader(st.want[i])); err != nil {
t.Error(err)
}
}
})
reconcileCount.Reset()
}
}
// This test checks reconcileMetricsFor function & reconcileMetrics.reconcileFailedWith method
func TestReconcileFailedWith(t *testing.T) {
testCases := []struct {
subtest string
gvks []schema.GroupVersionKind
errs []error
namespaces []string
names []string
want []string
}{
{
subtest: "core",
gvks: []schema.GroupVersionKind{core.SchemeGroupVersion.WithKind("Pod")},
errs: []error{errors.New("test")},
namespaces: []string{"ns1"},
names: []string{"n1"},
want: []string{`
# HELP declarative_reconciler_reconcile_failure_count How many times reconciliation failure of K8s objects managed by declarative reconciler occurs
# TYPE declarative_reconciler_reconcile_failure_count counter
declarative_reconciler_reconcile_failure_count {group_version_kind = "v1/Pod", name = "n1", namespace = "ns1"} 2
`,
},
},
{
subtest: "core&app",
gvks: []schema.GroupVersionKind{core.SchemeGroupVersion.WithKind("Pod"), apps.SchemeGroupVersion.WithKind("Deployment")},
errs: []error{errors.New("test"), errors.New("test")},
namespaces: []string{"ns1", ""},
names: []string{"n1", "n2"},
want: []string{`
# HELP declarative_reconciler_reconcile_failure_count How many times reconciliation failure of K8s objects managed by declarative reconciler occurs
# TYPE declarative_reconciler_reconcile_failure_count counter
declarative_reconciler_reconcile_failure_count {group_version_kind = "v1/Pod", name = "n1", namespace = "ns1"} 2
`,
`
# HELP declarative_reconciler_reconcile_failure_count How many times reconciliation failure of K8s objects managed by declarative reconciler occurs
# TYPE declarative_reconciler_reconcile_failure_count counter
declarative_reconciler_reconcile_failure_count {group_version_kind = "apps/v1/Deployment", name = "n2", namespace = ""} 2
`,
},
},
{
subtest: "node - cluster scoped only",
gvks: []schema.GroupVersionKind{core.SchemeGroupVersion.WithKind("Node")},
errs: []error{errors.New("test")},
namespaces: []string{""},
names: []string{"n1"},
want: []string{`
# HELP declarative_reconciler_reconcile_failure_count How many times reconciliation failure of K8s objects managed by declarative reconciler occurs
# TYPE declarative_reconciler_reconcile_failure_count counter
declarative_reconciler_reconcile_failure_count {group_version_kind = "v1/Node", name = "n1", namespace = ""} 2
`,
},
},
{
subtest: "no error",
gvks: []schema.GroupVersionKind{core.SchemeGroupVersion.WithKind("Node")},
errs: []error{nil},
namespaces: []string{""},
names: []string{"n1"},
want: []string{`
# HELP declarative_reconciler_reconcile_failure_count How many times reconciliation failure of K8s objects managed by declarative reconciler occurs
# TYPE declarative_reconciler_reconcile_failure_count counter
declarative_reconciler_reconcile_failure_count {group_version_kind = "v1/Node", name = "n1", namespace = ""} 0
`,
},
},
}
for _, st := range testCases {
t.Run(st.subtest, func(t *testing.T) {
for i, gvk := range st.gvks {
rm := reconcileMetricsFor(gvk)
rm.reconcileFailedWith(reconcile.Request{NamespacedName: types.NamespacedName{Namespace: st.namespaces[i], Name: st.names[i]}},
reconcile.Result{}, st.errs[i])
rm.reconcileFailedWith(reconcile.Request{NamespacedName: types.NamespacedName{Namespace: st.namespaces[i], Name: st.names[i]}},
reconcile.Result{}, st.errs[i])
if err := testutil.CollectAndCompare(rm.reconcileFailureCounterVec.WithLabelValues(gvkString(gvk),
st.namespaces[i], st.names[i]), strings.NewReader(st.want[i])); err != nil {
t.Error(err)
}
}
})
reconcileFailure.Reset()
}
}
// This test checks *ObjectTracker.addIfNotPresent method
//
// envtest package used in this test requires control
// plane binaries (etcd & kube-apiserver & kubectl).
// The default path these binaries reside in is set to
// /usr/local/kubebuilder/bin .
// This path can be set through environment variable
// KUBEBUILDER_ASSETS .
// It is recommended to download kubebuilder release binaries
// and point that path.
func TestAddIfNotPresent(t *testing.T) {
const defKubectlPath = "/usr/local/kubebuilder/bin"
var kubectlPath string
// Run local kube-apiserver & etecd
testEnv := envtest.Environment{}
restConf, err := testEnv.Start()
if err != nil {
t.Log("Maybe, you have to make sure control plane binaries" + " " +
"(kube-apiserver, etcd & kubectl) reside in" + " " +
"/usr/local/kubebuilder/bin" + " " +
"or have to set environment variable" + " " +
"KUBEBUILDER_ASSETS to the path these binaries reside in")
t.Error(err)
}
// Create manager
mgrOpt := manager.Options{}
mgr, err := manager.New(restConf, mgrOpt)
if err != nil {
t.Error(err)
}
stopC := make(chan struct{})
go func() {
_ = mgr.GetCache().Start(stopC)
}()
// Set up kubectl command
if envPath := os.Getenv("KUBEBUILDER_ASSETS"); envPath != "" {
kubectlPath = filepath.Join(envPath, "kubectl")
} else {
kubectlPath = filepath.Join("/usr/local/kubebuilder/bin", "kubectl")
}
// kubectl arg for "kubectl apply"
applyArgs := []string{"apply"}
applyArgs = append(applyArgs, "--server="+restConf.Host)
// kubectl arg for "kubectl delete"
deleteArgs := []string{"delete"}
deleteArgs = append(deleteArgs, "--server="+restConf.Host)
// Configure globalObjectTracker
globalObjectTracker.mgr = mgr
testCases := []struct {
subtest string
metricsDuration int
actions []string
defaultNamespace string
objects [][]string
wants []string
}{
// It's better to use different kind of K8s object for each test cases
{
subtest: "Create K8s object",
metricsDuration: 0,
actions: []string{"Create"},
defaultNamespace: "",
objects: [][]string{
{
"kind: Namespace\n" +
"apiVersion: v1\n" +
"metadata:\n" +
" name: ns1\n",
},
},
wants: []string{
`
# HELP declarative_reconciler_managed_objects_record Track the number of objects in manifest
# TYPE declarative_reconciler_managed_objects_record gauge
declarative_reconciler_managed_objects_record {group_version_kind = "v1/Namespace", name = "ns1", namespace = ""} 1
`,
},
},
{
subtest: "Update K8s object",
metricsDuration: 0,
actions: []string{"Create", "Update"},
defaultNamespace: "",
objects: [][]string{
{
"kind: Namespace\n" +
"apiVersion: v1\n" +
"metadata:\n" +
" name: ns2\n",
"kind: Role\n" +
"apiVersion: rbac.authorization.k8s.io/v1\n" +
"metadata:\n" +
" name: r2\n" +
" namespace: ns2\n" +
"rules:\n" +
` - apiGroups: [""]` + "\n" +
` resources: ["pods"]` + "\n" +
` verbs: ["get"]`,
},
{
"kind: Namespace\n" +
"apiVersion: v1\n" +
"metadata:\n" +
" name: ns2\n",
"kind: Role\n" +
"apiVersion: rbac.authorization.k8s.io/v1\n" +
"metadata:\n" +
" name: r2\n" +
" namespace: ns2\n" +
"rules:\n" +
` - apiGroups: [""]` + "\n" +
` resources: ["pods"]` + "\n" +
` verbs: ["get", "list"]`,
},
},
wants: []string{
`
# HELP declarative_reconciler_managed_objects_record Track the number of objects in manifest
# TYPE declarative_reconciler_managed_objects_record gauge
declarative_reconciler_managed_objects_record {group_version_kind = "v1/Namespace", name = "ns2", namespace = ""} 1
declarative_reconciler_managed_objects_record {group_version_kind = "rbac.authorization.k8s.io/v1/Role", name = "r2", namespace = "ns2"} 1
`,
`
# HELP declarative_reconciler_managed_objects_record Track the number of objects in manifest
# TYPE declarative_reconciler_managed_objects_record gauge
declarative_reconciler_managed_objects_record {group_version_kind = "v1/Namespace", name = "ns2", namespace = ""} 1
declarative_reconciler_managed_objects_record {group_version_kind = "rbac.authorization.k8s.io/v1/Role", name = "r2", namespace = "ns2"} 1
`,
},
},
{
subtest: "Delete K8s object",
metricsDuration: 0,
actions: []string{"Create", "Delete"},
defaultNamespace: "ns3",
objects: [][]string{
{
"kind: Namespace\n" +
"apiVersion: v1\n" +
"metadata:\n" +
" name: ns3\n",
"kind: Secret\n" +
"apiVersion: v1\n" +
"metadata:\n" +
" name: s3\n" +
"type: Opaque\n" +
"data:\n" +
" name: dGVzdA==\n",
},
{
"kind: Secret\n" +
"apiVersion: v1\n" +
"metadata:\n" +
" name: s3\n" +
"type: Opaque\n" +
"data:\n" +
" name: dGVzdA==\n",
},
},
wants: []string{
`
# HELP declarative_reconciler_managed_objects_record Track the number of objects in manifest
# TYPE declarative_reconciler_managed_objects_record gauge
declarative_reconciler_managed_objects_record {group_version_kind = "v1/Namespace", name = "ns3", namespace = ""} 1
declarative_reconciler_managed_objects_record {group_version_kind = "v1/Secret", name = "s3", namespace = "ns3"} 1
`,
`
# HELP declarative_reconciler_managed_objects_record Track the number of objects in manifest
# TYPE declarative_reconciler_managed_objects_record gauge
declarative_reconciler_managed_objects_record {group_version_kind = "v1/Namespace", name = "ns3", namespace = ""} 1
declarative_reconciler_managed_objects_record {group_version_kind = "v1/Secret", name = "s3", namespace = "ns3"} 0
`,
},
},
{
subtest: "Delete metrics after specified duration(duration=2)",
metricsDuration: 2,
actions: []string{"Create", "Delete", "Create", "Create"},
defaultNamespace: "",
objects: [][]string{
{
"kind: Namespace\n" +
"apiVersion: v1\n" +
"metadata:\n" +
" name: ns4\n",
"kind: Secret\n" +
"apiVersion: v1\n" +
"metadata:\n" +
" name: s4\n" +
" namespace: ns4\n" +
"type: Opaque\n" +
"data:\n" +
" name: dGVzdA==\n",
},
{
"kind: Secret\n" +
"apiVersion: v1\n" +
"metadata:\n" +
" name: s4\n" +
" namespace: ns4\n" +
"type: Opaque\n" +
"data:\n" +
" name: dGVzdA==\n",
},
{
"kind: Namespace\n" +
"apiVersion: v1\n" +
"metadata:\n" +
" name: ns4\n",
},
{
"kind: Namespace\n" +
"apiVersion: v1\n" +
"metadata:\n" +
" name: ns4\n",
},
},
wants: []string{
`
# HELP declarative_reconciler_managed_objects_record Track the number of objects in manifest
# TYPE declarative_reconciler_managed_objects_record gauge
declarative_reconciler_managed_objects_record {group_version_kind = "v1/Namespace", name = "ns4", namespace = ""} 1
declarative_reconciler_managed_objects_record {group_version_kind = "v1/Secret", name = "s4", namespace = "ns4"} 1
`,
`
# HELP declarative_reconciler_managed_objects_record Track the number of objects in manifest
# TYPE declarative_reconciler_managed_objects_record gauge
declarative_reconciler_managed_objects_record {group_version_kind = "v1/Namespace", name = "ns4", namespace = ""} 1
declarative_reconciler_managed_objects_record {group_version_kind = "v1/Secret", name = "s4", namespace = "ns4"} 0
`,
`
# HELP declarative_reconciler_managed_objects_record Track the number of objects in manifest
# TYPE declarative_reconciler_managed_objects_record gauge
declarative_reconciler_managed_objects_record {group_version_kind = "v1/Namespace", name = "ns4", namespace = ""} 1
declarative_reconciler_managed_objects_record {group_version_kind = "v1/Secret", name = "s4", namespace = "ns4"} 0
`,
`
# HELP declarative_reconciler_managed_objects_record Track the number of objects in manifest
# TYPE declarative_reconciler_managed_objects_record gauge
declarative_reconciler_managed_objects_record {group_version_kind = "v1/Namespace", name = "ns4", namespace = ""} 1
`,
},
},
}
for _, st := range testCases {
t.Run(st.subtest, func(t *testing.T) {
globalObjectTracker.SetMetricsDuration(st.metricsDuration)
for i, yobjList := range st.objects {
var cmd *exec.Cmd
var stdout bytes.Buffer
var stderr bytes.Buffer
var cmdArgs []string
var yobj string
var jobjList = [][]byte{}
var objList = []*manifest.Object{}
for i, yitem := range yobjList {
if i == 0 {
yobj = yitem
} else {
yobj = yobj + "---\n" + yitem
}
}
// YAML to JSON
for _, yitem := range yobjList {
jobj, err := yaml.YAMLToJSON([]byte(yitem))
if err != nil {
t.Error(err)
}
jobjList = append(jobjList, jobj)
}
// JSON to manifest.Object
for _, jobj := range jobjList {
mobj, err := manifest.ParseJSONToObject(jobj)
if err != nil {
t.Error(err)
}
objList = append(objList, mobj)
}
// Run addIfNotPresent
err = globalObjectTracker.addIfNotPresent(objList, st.defaultNamespace)
if err != nil {
t.Error(err)
}
// Set up kubectl command
if st.actions[i] != "Delete" {
if len(st.defaultNamespace) != 0 {
cmdArgs = append(applyArgs, "-n", st.defaultNamespace, "-f", "-")
} else {
cmdArgs = append(applyArgs, "-f", "-")
}
} else {
if len(st.defaultNamespace) != 0 {
cmdArgs = append(deleteArgs, "-n", st.defaultNamespace, "-f", "-")
} else {
cmdArgs = append(deleteArgs, "-f", "-")
}
}
cmd = exec.Command(kubectlPath, cmdArgs...)
cmd.Stdin = strings.NewReader(yobj)
cmd.Stdout = &stdout
cmd.Stderr = &stderr
if err := cmd.Run(); err != nil {
t.Logf("action: %v\n", st.actions[i])
t.Logf("stdout: %v\n", stdout.String())
t.Logf("stderr: %v\n", stderr.String())
t.Error(err)
}
// Wait for reflector sees K8s object change in K8s API server & adds it to DeltaFIFO
// then controller pops it and eventhandler updates metrics
// If we ommit it, there is a chance call of testutil.CollectAndCompare is too fast & fails.
_ = mgr.GetCache().WaitForCacheSync(stopC)
time.Sleep(time.Second * 10)
// Check for metrics
err = testutil.CollectAndCompare(managedObjectsRecord, strings.NewReader(st.wants[i]))
if err != nil {
t.Logf("No. of action in subtest: %v\n", i)
t.Error(err)
}
}
})
managedObjectsRecord.Reset()
}
}
|
[
"\"KUBEBUILDER_ASSETS\""
] |
[] |
[
"KUBEBUILDER_ASSETS"
] |
[]
|
["KUBEBUILDER_ASSETS"]
|
go
| 1 | 0 | |
cmd/provider_cmd_auth0.go
|
// Copyright 2019 The Terraformer Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cmd
import (
"errors"
"os"
auth0_terraforming "github.com/GoogleCloudPlatform/terraformer/providers/auth0"
"github.com/GoogleCloudPlatform/terraformer/terraformutils"
"github.com/spf13/cobra"
)
func newCmdAuth0Importer(options ImportOptions) *cobra.Command {
cmd := &cobra.Command{
Use: "auth0",
Short: "Import current state to Terraform configuration from Auth0",
Long: "Import current state to Terraform configuration from Auth0",
RunE: func(cmd *cobra.Command, args []string) error {
domain := os.Getenv("AUTH0_DOMAIN")
if len(domain) == 0 {
return errors.New("Domain for Auth0 must be set through `AUTH0_DOMAIN` env var")
}
clientID := os.Getenv("AUTH0_CLIENT_ID")
if len(clientID) == 0 {
return errors.New("Client ID for Auht0 must be set through `AUTH0_CLIENT_ID` env var")
}
clientSecret := os.Getenv("AUTH0_CLIENT_SECRET")
if len(clientSecret) == 0 {
return errors.New("Clien Secret for Auth0 must be set through `AUTH0_CLIENT_SECRET` env var")
}
provider := newAuth0Provider()
err := Import(provider, options, []string{domain, clientID, clientSecret})
if err != nil {
return err
}
return nil
},
}
cmd.AddCommand(listCmd(newAuth0Provider()))
baseProviderFlags(cmd.PersistentFlags(), &options, "action", "action=name1:name2:name3")
return cmd
}
func newAuth0Provider() terraformutils.ProviderGenerator {
return &auth0_terraforming.Auth0Provider{}
}
|
[
"\"AUTH0_DOMAIN\"",
"\"AUTH0_CLIENT_ID\"",
"\"AUTH0_CLIENT_SECRET\""
] |
[] |
[
"AUTH0_DOMAIN",
"AUTH0_CLIENT_SECRET",
"AUTH0_CLIENT_ID"
] |
[]
|
["AUTH0_DOMAIN", "AUTH0_CLIENT_SECRET", "AUTH0_CLIENT_ID"]
|
go
| 3 | 0 | |
system/exec/service.go
|
package exec
import (
"fmt"
"github.com/viant/endly"
"github.com/viant/endly/model"
"github.com/viant/endly/model/criteria"
"github.com/viant/endly/util"
"github.com/viant/toolbox/cred"
"github.com/viant/toolbox/data"
"github.com/viant/toolbox/secret"
"github.com/viant/toolbox/ssh"
"github.com/viant/toolbox/url"
"os"
"path"
"strings"
)
//ServiceID represent system executor service id
const ServiceID = "exec"
//SudoCredentialKey represent obsucated password sudo credentials key (target.Credentials)
const SudoCredentialKey = "**sudo**"
type execService struct {
*endly.AbstractService
credentials map[string]*cred.Config
}
func (s *execService) open(context *endly.Context, request *OpenSessionRequest) (*OpenSessionResponse, error) {
var clientSession, err = s.openSession(context, request)
if err != nil {
return nil, err
}
return &OpenSessionResponse{
SessionID: clientSession.ID,
}, nil
}
func (s *execService) openSSHService(context *endly.Context, request *OpenSessionRequest) (ssh.Service, error) {
if request.ReplayService != nil {
return request.ReplayService, nil
}
target, err := context.ExpandResource(request.Target)
if err != nil {
return nil, err
}
authConfig, err := context.Secrets.GetOrCreate(target.Credentials)
if err != nil {
return nil, err
}
hostname, port := s.GetHostAndSSHPort(target)
return ssh.NewService(hostname, port, authConfig)
}
func (s *execService) isSupportedScheme(target *url.Resource) bool {
return target.ParsedURL.Scheme == "ssh" || target.ParsedURL.Scheme == "scp" || target.ParsedURL.Scheme == "file"
}
func (s *execService) initSession(context *endly.Context, target *url.Resource, session *model.Session, env map[string]string) error {
_, _ = s.changeDirectory(context, session, nil, target.ParsedURL.Path)
for k, v := range env {
if err := s.setEnvVariable(context, session, k, v); err != nil {
return err
}
}
return nil
}
func (s *execService) openSession(context *endly.Context, request *OpenSessionRequest) (*model.Session, error) {
target, err := context.ExpandResource(request.Target)
if err != nil {
return nil, err
}
if !s.isSupportedScheme(target) {
return nil, fmt.Errorf("failed to open sessionID: invalid schema: %v in url: %v", target.ParsedURL.Scheme, target.URL)
}
s.Lock()
sessions := TerminalSessions(context)
s.Unlock()
var replayCommands *ssh.ReplayCommands
if request.Basedir != "" {
replayCommands, err = ssh.NewReplayCommands(request.Basedir)
if err != nil {
return nil, err
}
}
var sessionID = target.Host()
if sessions.Has(sessionID) {
s.Lock()
SShSession := sessions[sessionID]
s.Unlock()
err = s.initSession(context, target, SShSession, request.Env)
if err != nil {
return nil, err
}
return SShSession, err
}
sshService, err := s.openSSHService(context, request)
if err == nil {
err = s.captureCommandIfNeeded(context, replayCommands, sshService)
}
if err != nil {
return nil, err
}
SSHSession, err := model.NewSession(sessionID, sshService)
if err != nil {
return nil, err
}
if !request.Transient {
context.Deffer(func() {
_ = sshService.Close()
})
}
SSHSession.MultiCommandSession, err = SSHSession.Service.OpenMultiCommandSession(request.Config)
if err != nil {
return nil, err
}
if !request.Transient {
context.Deffer(func() {
_, _ = s.closeSession(context, &CloseSessionRequest{
SessionID: sessionID,
})
})
}
err = s.initSession(context, target, SSHSession, request.Env)
if err != nil {
return nil, err
}
s.Lock()
sessions[sessionID] = SSHSession
s.Unlock()
SSHSession.Os, err = s.detectOperatingSystem(SSHSession)
if err != nil {
return nil, err
}
return SSHSession, nil
}
func (s *execService) setEnvVariables(context *endly.Context, session *model.Session, env map[string]string) error {
for k, v := range env {
err := s.setEnvVariable(context, session, k, v)
if err != nil {
return err
}
}
return nil
}
func (s *execService) setEnvVariable(context *endly.Context, session *model.Session, name, newValue string) error {
newValue = context.Expand(newValue)
if actual, has := session.EnvVariables[name]; has {
if newValue == actual {
return nil
}
}
session.EnvVariables[name] = newValue
var err error
newValue = strings.TrimSpace(newValue)
if strings.Contains(newValue, " ") {
_, err = s.rumCommandTemplate(context, session, "export %v='%v'", name, newValue)
} else {
_, err = s.rumCommandTemplate(context, session, "export %v=%v", name, newValue)
}
return err
}
func (s *execService) changeDirectory(context *endly.Context, session *model.Session, commandInfo *RunResponse, directory string) (string, error) {
if directory == "" {
return "", nil
}
parent, name := path.Split(directory)
if path.Ext(name) != "" {
directory = parent
}
if len(directory) > 1 && strings.HasSuffix(directory, "/") {
directory = string(directory[:len(directory)-1])
}
if session.CurrentDirectory == directory {
return "", nil
}
result, err := s.rumCommandTemplate(context, session, "cd %v", directory)
if err != nil {
return "", err
}
if !util.CheckNoSuchFileOrDirectory(result) {
session.CurrentDirectory = directory
}
return result, err
}
func (s *execService) run(context *endly.Context, session *model.Session, command string, listener ssh.Listener, timeoutMs int, terminators ...string) (stdout string, err error) {
if stdout, err = session.Run(command, listener, timeoutMs, terminators...); err == nil {
return stdout, err
}
if err == ssh.ErrTerminated {
err := session.Reconnect()
if err != nil {
return "", err
}
currentDirectory := session.CurrentDirectory
env := session.EnvVariables
session.EnvVariables = make(map[string]string)
session.CurrentDirectory = ""
for k, v := range env {
s.setEnvVariable(context, session, k, v)
}
runResponse := &RunResponse{}
s.changeDirectory(context, session, runResponse, currentDirectory)
return session.Run(command, listener, timeoutMs, terminators...)
}
return stdout, err
}
func (s *execService) rumCommandTemplate(context *endly.Context, session *model.Session, commandTemplate string, arguments ...interface{}) (string, error) {
command := fmt.Sprintf(commandTemplate, arguments...)
startEvent := s.Begin(context, NewSdtinEvent(session.ID, command))
stdout, err := session.Run(command, nil, 1000)
s.End(context)(startEvent, NewStdoutEvent(session.ID, stdout, err))
return stdout, err
}
func (s *execService) applyCommandOptions(context *endly.Context, options *Options, session *model.Session, info *RunResponse) error {
if len(options.SystemPaths) > 0 {
session.Path.Unshift(options.SystemPaths...)
if err := s.setEnvVariable(context, session, "PATH", session.Path.EnvValue()); err != nil {
return err
}
}
err := s.setEnvVariables(context, session, options.Env)
if err != nil {
return err
}
if options.Directory != "" {
directory := context.Expand(options.Directory)
_, err := s.changeDirectory(context, session, info, directory)
if err != nil {
return err
}
}
return nil
}
func match(stdout string, candidates ...string) string {
if len(candidates) == 0 {
return ""
}
for _, candidate := range candidates {
if util.EscapedContains(stdout, candidate) {
return candidate
}
}
return ""
}
func (s *execService) commandAsSuperUser(session *model.Session, command string) string {
if session.Username == "root" {
return command
}
if len(command) > 1 && !strings.Contains(command, "sudo") {
return "sudo " + command
}
return command
}
func (s *execService) validateStdout(stdout string, command string, execution *ExtractCommand) error {
errorMatch := match(stdout, execution.Errors...)
if errorMatch != "" {
return fmt.Errorf("encounter error fragment: (%v), command:%v, stdout: %v", errorMatch, command, stdout)
}
if len(execution.Success) > 0 {
sucessMatch := match(stdout, execution.Success...)
if sucessMatch == "" {
return fmt.Errorf("failed to match any fragment: '%v', command: %v; stdout: %v", strings.Join(execution.Success, ","), command, stdout)
}
}
return nil
}
func (s *execService) authSuperUserIfNeeded(stdout string, context *endly.Context, session *model.Session, extractCommand *ExtractCommand, response *RunResponse, request *ExtractRequest) (err error) {
if session.SuperUSerAuth && !(util.EscapedContains(stdout, "Sorry, try again.") && util.EscapedContains(stdout, "Password")) {
return nil
}
if util.EscapedContains(stdout, "Password") {
session.SuperUSerAuth = true
if len(request.Secrets) == 0 {
request.Secrets = secret.NewSecrets(nil)
request.Secrets[SudoCredentialKey] = secret.Secret(request.Target.Credentials)
}
extractCommand := NewExtractCommand(SudoCredentialKey, "", nil, []string{"Password", util.CommandNotFound})
err = s.executeCommand(context, session, extractCommand, response, request)
}
return err
}
func (s *execService) buildExecutionState(response *RunResponse, context *endly.Context) data.Map {
var state = context.State()
var result = state.Clone()
var commands = data.NewCollection()
for _, log := range response.Cmd {
var cmd = data.NewMap()
cmd.Put("stdin", log.Stdin)
cmd.Put("stdout", log.Stdout)
commands.Push(cmd)
}
result.Put("cmd", commands)
result.Put("output", response.Output)
var stdout = ""
if len(response.Cmd) > 0 {
stdout = response.Cmd[len(response.Cmd)-1].Stdout
}
result.Put("stdout", stdout)
return result
}
func (s *execService) executeCommand(context *endly.Context, session *model.Session, extractCommand *ExtractCommand, response *RunResponse, request *ExtractRequest) (err error) {
var state = context.State()
state.SetValue("os.user", session.Username)
command := context.Expand(extractCommand.Command)
options := request.Options
terminators := getTerminators(options, session, extractCommand)
isSuperUserCmd := strings.Contains(command, "sudo ") || request.SuperUser
if extractCommand.When != "" {
var state = s.buildExecutionState(response, context)
if ok, err := criteria.Evaluate(context, state, extractCommand.When, "Cmd.When", true); !ok {
return err
}
} else if strings.Contains(command, "$") {
var state = s.buildExecutionState(response, context)
command = state.ExpandAsText(command)
}
if isSuperUserCmd {
if !session.SuperUSerAuth {
terminators = append(terminators, "Password")
}
command = s.commandAsSuperUser(session, command)
}
var cmd = command
if cmd, err = context.Secrets.Expand(cmd, request.Secrets); err != nil {
return err
}
var listener ssh.Listener
//troubleshooting secrets - DO NOT USE unless really needed
if os.Getenv("ENDLY_SECRET_REVEAL") == "true" {
command = cmd
}
s.Begin(context, NewSdtinEvent(session.ID, command))
listener = func(stdout string, hasMore bool) {
if stdout != "" {
context.Publish(NewStdoutEvent(session.ID, stdout, err))
}
}
stdout, err := s.run(context, session, cmd, listener, options.TimeoutMs, terminators...)
if len(response.Output) > 0 {
if !strings.HasSuffix(response.Output, "\n") {
response.Output += "\n"
}
}
response.Output += stdout
response.Add(NewCommandLog(command, stdout, err))
if err != nil {
return err
}
if err = s.validateStdout(stdout, command, extractCommand); err != nil {
return err
}
if isSuperUserCmd {
err = s.authSuperUserIfNeeded(stdout, context, session, extractCommand, response, request)
if err != nil {
return err
}
}
stdout = response.Cmd[len(response.Cmd)-1].Stdout
return extractCommand.Extract.Extract(context, response.Data, strings.Split(stdout, "\n")...)
}
func getTerminators(options *Options, session *model.Session, execution *ExtractCommand) []string {
var terminators = append([]string{}, options.Terminators...)
terminators = append(terminators, "$ ")
superUserPrompt := string(strings.Replace(session.ShellPrompt(), "$", "#", 1))
if strings.Contains(superUserPrompt, "bash") {
superUserPrompt = string(superUserPrompt[2:])
}
terminators = append(terminators, superUserPrompt)
terminators = append(terminators, execution.Errors...)
return terminators
}
func (s *execService) runCommands(context *endly.Context, request *RunRequest) (*RunResponse, error) {
response, err := s.runExtractCommands(context, request.AsExtractRequest())
if err != nil {
return nil, err
}
if len(request.Extract) > 0 {
if len(response.Data) == 0 {
response.Data = data.NewMap()
}
err = request.Extract.Extract(context, response.Data, strings.Split(response.Output, "\n")...)
}
return response, err
}
func (s *execService) runExtractCommands(context *endly.Context, request *ExtractRequest) (*RunResponse, error) {
target, err := context.ExpandResource(request.Target)
if err != nil {
return nil, err
}
session, err := s.openSession(context, &OpenSessionRequest{Target: target})
if err != nil {
return nil, err
}
response := NewRunResponse(session.ID)
if err = s.applyCommandOptions(context, request.Options, session, response); err != nil {
return nil, err
}
response = NewRunResponse(session.ID)
for _, extractCommand := range request.Commands {
var command = context.Expand(extractCommand.Command)
if strings.Contains(command, "rm ") && strings.Contains(command, session.CurrentDirectory) {
session.CurrentDirectory = "" //reset path
}
if strings.HasPrefix(command, "cd ") {
if !strings.Contains(command, "&&") {
var directory = strings.TrimSpace(string(command[3:]))
stdout, err := s.changeDirectory(context, session, response, directory)
response.Add(NewCommandLog(command, stdout, err))
if err == nil {
err = s.validateStdout(stdout, command, extractCommand)
}
if err != nil {
return nil, err
}
continue
}
session.CurrentDirectory = "" //reset path
}
if strings.HasPrefix(command, "export ") {
if !strings.Contains(command, "&&") {
envVariable := string(command[7:])
keyValuePair := strings.Split(envVariable, "=")
if len(keyValuePair) == 2 {
key := strings.TrimSpace(keyValuePair[0])
value := strings.TrimSpace(keyValuePair[1])
value = strings.Trim(value, "'\"")
err = s.setEnvVariable(context, session, key, value)
response.Add(NewCommandLog(command, "", err))
continue
}
}
session.EnvVariables = make(map[string]string) //reset env variables
}
err = s.executeCommand(context, session, extractCommand, response, request)
if err != nil {
return nil, err
}
}
return response, nil
}
func (s *execService) closeSession(context *endly.Context, request *CloseSessionRequest) (*CloseSessionResponse, error) {
clientSessions := TerminalSessions(context)
if session, has := clientSessions[request.SessionID]; has {
session.MultiCommandSession.Close()
session.Close()
delete(clientSessions, request.SessionID)
}
return &CloseSessionResponse{
SessionID: request.SessionID,
}, nil
}
func isAmd64Architecture(candidate string) bool {
return strings.Contains(candidate, "amd64") || strings.Contains(candidate, "x86_64")
}
func (s *execService) extractOsPath(session *model.Session, os *model.OperatingSystem) error {
output, err := session.Run("echo $PATH", nil, 0)
if err != nil {
return err
}
lines := strings.Split(output, "\r\n")
for i := 0; i < len(lines); i++ {
var line = lines[i]
if !strings.Contains(line, ":") || !strings.Contains(line, "/") {
continue
}
session.Path = model.NewPath(strings.Split(line, ":")...)
break
}
return nil
}
func (s *execService) extractOsUser(session *model.Session, os *model.OperatingSystem) error {
output, err := session.Run("echo $USER", nil, 0)
if err != nil {
return err
}
output = util.EscapeStdout(output)
strings.Replace(output, "\n", "", len(output))
session.Username = output
return nil
}
func (s *execService) detectOperatingSystem(session *model.Session) (*model.OperatingSystem, error) {
operatingSystem := &model.OperatingSystem{}
session.Path = model.NewPath()
varsionCheckCommand := "lsb_release -a"
if session.MultiCommandSession.System() == "darwin" {
varsionCheckCommand = "sw_vers"
}
output, err := session.Run(varsionCheckCommand, nil, 0)
if err != nil {
return nil, err
}
lines := strings.Split(output, "\r\n")
for i := 0; i < len(lines); i++ {
line := lines[i]
if isAmd64Architecture(line) {
operatingSystem.Architecture = "amd64"
}
pair := strings.Split(line, ":")
if len(pair) != 2 {
continue
}
var key = strings.Replace(strings.ToLower(pair[0]), " ", "", len(pair[0]))
var val = strings.Replace(strings.Trim(pair[1], " \t\r"), " ", "", len(line))
switch key {
case "productname", "distributorid":
operatingSystem.Name = strings.ToLower(val)
case "productversion", "release":
operatingSystem.Version = strings.ToLower(val)
}
}
operatingSystem.Hardware, err = session.Run("uname -m", nil, 0)
if err != nil {
return nil, err
}
operatingSystem.Arch = operatingSystem.Architecture
if isAmd64Architecture(operatingSystem.Hardware) {
operatingSystem.Architecture = "amd64"
operatingSystem.Arch = "x64"
}
operatingSystem.System = session.System()
if err = s.extractOsPath(session, operatingSystem); err == nil {
err = s.extractOsUser(session, operatingSystem)
}
return operatingSystem, err
}
func (s *execService) captureCommandIfNeeded(context *endly.Context, replayCommands *ssh.ReplayCommands, sshService ssh.Service) (err error) {
if replayCommands != nil {
err = replayCommands.Enable(sshService)
if err != nil {
return err
}
context.Deffer(func() {
_ = replayCommands.Store()
})
}
return err
}
const (
execServiceOpenExample = `{
"Target": {
"URL": "scp://127.0.0.1/",
"Credentials": "${env.HOME}/.secret/localhost.json"
},
"SystemPaths": ["/usr/local/bin"],
"Env": {
"GOPATH":"${env.HOME}/go"
}
}`
execServiceRunExample = `{
"Target": {
"URL": "scp://127.0.0.1/",
"Credentials": "${env.HOME}/.secret/localhost.json"
},
"Commands":["mkdir /tmp/app1"]
}`
execServiceRunAndExtractExample = `{
"Target": {
"URL": "scp://127.0.0.1/",
"Credentials": "${env.HOME}/.secret/localhost.json"
},
"SystemPaths": [
"/opt/sdk/go/bin"
],
"Commands": [
{
"Command": "go version",
"Extract": [
{
"RegExpr": "go(\\d\\.\\d)",
"Key": "Version"
}
]
}
]
}`
execServiceManagedCloseExample = `{
"Target": {
"URL": "scp://127.0.0.1/",
"Credentials": "${env.HOME}/.secret/localhost.json"
}
}`
)
func (s *execService) registerRoutes() {
s.Register(&endly.Route{
Action: "open",
RequestInfo: &endly.ActionInfo{
Description: "open SSH session, usually no need for using this action directly since run,extract actions open session if needed",
Examples: []*endly.UseCase{
{
Description: "open session",
Data: execServiceOpenExample,
},
},
},
RequestProvider: func() interface{} {
return &OpenSessionRequest{}
},
ResponseProvider: func() interface{} {
return &OpenSessionResponse{}
},
Handler: func(context *endly.Context, request interface{}) (interface{}, error) {
if req, ok := request.(*OpenSessionRequest); ok {
return s.open(context, req)
}
return nil, fmt.Errorf("unsupported request type: %T", request)
},
})
s.Register(&endly.Route{
Action: "run",
RequestInfo: &endly.ActionInfo{
Description: "run terminal command",
Examples: []*endly.UseCase{
{
Description: "run command",
Data: execServiceRunExample,
},
},
},
RequestProvider: func() interface{} {
return &RunRequest{}
},
ResponseProvider: func() interface{} {
return &RunResponse{}
},
Handler: func(context *endly.Context, request interface{}) (interface{}, error) {
if req, ok := request.(*RunRequest); ok {
return s.runCommands(context, req)
}
return nil, fmt.Errorf("unsupported request type: %T", request)
},
})
s.Register(&endly.Route{
Action: "extract",
RequestInfo: &endly.ActionInfo{
Description: "run terminal command and extract data from the stdout",
Examples: []*endly.UseCase{
{
Description: "run and extract command",
Data: execServiceRunAndExtractExample,
},
},
},
RequestProvider: func() interface{} {
return &ExtractRequest{}
},
ResponseProvider: func() interface{} {
return &RunResponse{}
},
Handler: func(context *endly.Context, request interface{}) (interface{}, error) {
if req, ok := request.(*ExtractRequest); ok {
return s.runExtractCommands(context, req)
}
return nil, fmt.Errorf("unsupported request type: %T", request)
},
})
s.Register(&endly.Route{
Action: "close",
RequestInfo: &endly.ActionInfo{
Description: "close SSH terminal session, if created by run or extract it is scheduled to be closed at the end of endly run context.Close()",
Examples: []*endly.UseCase{
{
Description: "close ",
Data: execServiceManagedCloseExample,
},
},
},
RequestProvider: func() interface{} {
return &CloseSessionRequest{}
},
ResponseProvider: func() interface{} {
return &CloseSessionResponse{}
},
Handler: func(context *endly.Context, request interface{}) (interface{}, error) {
if req, ok := request.(*CloseSessionRequest); ok {
return s.closeSession(context, req)
}
return nil, fmt.Errorf("unsupported request type: %T", request)
},
})
}
//New creates a new execution service
func New() endly.Service {
var result = &execService{
credentials: make(map[string]*cred.Config),
AbstractService: endly.NewAbstractService(ServiceID),
}
result.AbstractService.Service = result
result.registerRoutes()
return result
}
|
[
"\"ENDLY_SECRET_REVEAL\""
] |
[] |
[
"ENDLY_SECRET_REVEAL"
] |
[]
|
["ENDLY_SECRET_REVEAL"]
|
go
| 1 | 0 | |
gradle-plugin/src/main/java/org/springframework/boot/experimental/gradle/ThinLauncherPlugin.java
|
/*
* Copyright 2012-2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.boot.experimental.gradle;
import java.io.File;
import java.nio.file.Files;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.Callable;
import org.gradle.api.Action;
import org.gradle.api.InvalidUserDataException;
import org.gradle.api.Plugin;
import org.gradle.api.Project;
import org.gradle.api.Task;
import org.gradle.api.artifacts.Configuration;
import org.gradle.api.plugins.BasePlugin;
import org.gradle.api.plugins.JavaPlugin;
import org.gradle.api.plugins.JavaPluginConvention;
import org.gradle.api.specs.Spec;
import org.gradle.api.tasks.Copy;
import org.gradle.api.tasks.Exec;
import org.gradle.api.tasks.SourceSetContainer;
import org.gradle.api.tasks.TaskContainer;
import org.gradle.internal.jvm.Jvm;
import org.gradle.jvm.tasks.Jar;
import org.springframework.util.StringUtils;
/**
* Gradle {@link Plugin} for Spring Boot's thin launcher.
*
* If the Java plugin is applied to the project, some tasks are added to the project.
* <ul>
* <li>"thinResolve": runs the project jar and download its dependencies. If you have more
* than one jar task then an additional task is created for each one named
* "thinResolve[JarTaskName]" (where "JarTaskName" is the capitalized name of the jar
* task).</li>
* <li>"thinResolvePrepare": copies the project jar to the "root" directory preparing for
* the resolution. The same naming convention applies to multiple jar tasks.</li>
* <li>"thinProperties": calculates thin.properties and puts them in the main build
* output.</li>
* <li>"thinPom": runs automatically if you apply the Maven plugin. Generates a pom.xml
* and puts it in the main build output.</li>
* </ul>
*
* @author Andy Wilkinson
*/
public class ThinLauncherPlugin implements Plugin<Project> {
@Override
public void apply(final Project project) {
project.getTasks().withType(Jar.class, new Action<Jar>() {
@Override
public void execute(Jar jar) {
createCopyTask(project, jar);
createResolveTask(project, jar);
createPropertiesTask(project);
createPomTask(project);
}
});
}
private void createPomTask(final Project project) {
TaskContainer taskContainer = project.getTasks();
create(taskContainer, "thinPom", PomTask.class, new Action<PomTask>() {
@Override
public void execute(final PomTask thin) {
thin.doFirst(new Action<Task>() {
@Override
public void execute(Task task) {
SourceSetContainer sourceSets = project.getConvention()
.getPlugin(JavaPluginConvention.class).getSourceSets();
File resourcesDir = sourceSets.getByName("main").getOutput()
.getResourcesDir();
thin.setOutput(new File(resourcesDir, "META-INF/maven/"
+ project.getGroup() + "/" + project.getName()));
}
});
project.getTasks().withType(Jar.class, new Action<Jar>() {
@Override
public void execute(Jar jar) {
jar.dependsOn(thin);
}
});
}
});
}
private void createPropertiesTask(final Project project) {
TaskContainer taskContainer = project.getTasks();
create(taskContainer, "thinProperties", PropertiesTask.class,
new Action<PropertiesTask>() {
@Override
public void execute(PropertiesTask libPropertiesTask) {
configureLibPropertiesTask(libPropertiesTask, project);
}
});
}
private void configureLibPropertiesTask(PropertiesTask thin, Project project) {
thin.setConfiguration(findRuntimeClasspath(project));
SourceSetContainer sourceSets = project.getConvention()
.getPlugin(JavaPluginConvention.class).getSourceSets();
File resourcesDir = sourceSets.getByName("main").getOutput().getResourcesDir();
thin.setOutput(new File(resourcesDir, "META-INF"));
}
private Configuration findRuntimeClasspath(Project project) {
Configuration configuration = project.getConfigurations()
.getByName("runtimeClasspath");
if (configuration == null) {
configuration = project.getConfigurations()
.getByName(JavaPlugin.RUNTIME_CONFIGURATION_NAME);
}
return configuration;
}
private void createCopyTask(final Project project, final Jar jar) {
String task = "bootRepackage";
Jar thinJar = jar;
if (project.getTasksByName(task, true).isEmpty()) {
task = "thinJar" + suffix(jar);
if (suffix(jar).startsWith("ThinJar")) {
return;
}
if (!project.getTasksByName(task, true).isEmpty()) {
return;
}
if (suffix(jar).isEmpty()) {
thinJar = create(project.getTasks(), task, Jar.class, new Action<Jar>() {
@Override
public void execute(final Jar thin) {
final Jar bootJar;
if (project.getTasks().findByName("bootJar") != null) {
bootJar = (Jar) project.getTasks().getByName("bootJar");
}
else {
bootJar = (Jar) project.getTasks().getByName("jar");
}
thin.dependsOn(bootJar);
project.getTasks().getByName(BasePlugin.ASSEMBLE_TASK_NAME)
.dependsOn(thin);
thin.doFirst(new Action<Task>() {
@Override
public void execute(Task t) {
Map<String, Object> attrs = new HashMap<>();
attrs.put("Main-Class",
"org.springframework.boot.loader.wrapper.ThinJarWrapper");
attrs.put("Start-Class", getMainClass(bootJar));
thin.setManifest(bootJar.getManifest());
thin.getManifest().attributes(attrs);
SourceSetContainer sources = (SourceSetContainer) project
.getProperties().get("sourceSets");
thin.from(project.zipTree(new Callable<File>() {
@Override
public File call() throws Exception {
File file = File.createTempFile("tmp", ".jar",
project.getBuildDir());
file.delete();
Files.copy(getClass().getClassLoader()
.getResourceAsStream(
"META-INF/loader/spring-boot-thin-wrapper.jar"),
file.toPath());
return file;
}
}));
thin.from((Object) sources.findByName("main")
.getRuntimeClasspath().filter(new Spec<File>() {
@Override
public boolean isSatisfiedBy(File element) {
return element.isDirectory();
}
}).getFiles().toArray(new File[0]));
}
private Object getMainClass(Jar bootJar) {
Object result = bootJar.getManifest().getAttributes()
.get("Start-Class");
if (result != null) {
return result;
}
return bootJar.getManifest().getAttributes()
.get("Main-Class");
}
});
thin.setDescription(
"Assembles a thin executable jar archive containing the main"
+ " classes and the thin wrapper.");
thin.setGroup(BasePlugin.BUILD_GROUP);
}
});
}
}
final String bootJarTask = task;
final Jar targetJar = thinJar;
create(project.getTasks(), "thinResolvePrepare" + suffix(jar), Copy.class,
new Action<Copy>() {
@Override
public void execute(Copy copy) {
copy.dependsOn(bootJarTask);
copy.from(targetJar.getOutputs().getFiles());
copy.into(new File(project.getBuildDir(), "thin/root"));
}
});
}
private void createResolveTask(final Project project, final Jar jar) {
create(project.getTasks(), "thinResolve" + suffix(jar), Exec.class,
new Action<Exec>() {
@Override
public void execute(final Exec exec) {
final Jar thinJar;
if (project.getTasks()
.findByName("thinJar" + suffix(jar)) != null) {
thinJar = (Jar) project.getTasks()
.getByName("thinJar" + suffix(jar));
}
else {
thinJar = (Jar) project.getTasks().getByName("jar");
}
final String prepareTask = "thinResolvePrepare" + suffix(jar);
exec.dependsOn(prepareTask);
exec.doFirst(new Action<Task>() {
@SuppressWarnings("unchecked")
@Override
public void execute(Task task) {
Copy copy = (Copy) project.getTasks()
.getByName(prepareTask);
exec.setWorkingDir(
copy.getOutputs().getFiles().getSingleFile());
exec.setCommandLine(Jvm.current().getJavaExecutable());
List<String> args = Arrays.asList("-Dthin.root=.",
"-Dthin.dryrun", "-jar",
thinJar.getArchiveName());
String thinRepo = getThinRepo(project);
if (thinRepo != null) {
args.add(1, "-Dthin.repo=" + thinRepo);
}
exec.args(args);
}
});
}
});
}
private String getThinRepo(Project project) {
if (System.getProperty("thin.repo") != null) {
return System.getProperty("thin.repo");
}
if (System.getenv("THIN_REPO") != null) {
return System.getProperty("THIN_REPO");
}
Map<String, ?> properties = project.getProperties();
if (properties != null && properties.get("thin.repo") != null) {
return (String) properties.get("thin.repo");
}
return null;
}
private String suffix(Jar jar) {
String name = jar.getName();
return "jar".equals(name) || "bootJar".equals(name) ? ""
: StringUtils.capitalize(name);
}
@SuppressWarnings("unchecked")
private <T extends Task> T create(TaskContainer taskContainer, String name,
Class<T> type, Action<? super T> configuration)
throws InvalidUserDataException {
Task existing = taskContainer.findByName(name);
if (existing != null) {
return (T) existing;
}
return taskContainer.create(name, type, configuration);
}
}
|
[
"\"THIN_REPO\""
] |
[] |
[
"THIN_REPO"
] |
[]
|
["THIN_REPO"]
|
java
| 1 | 0 | |
backend/manage.py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'plecothoughts_33879.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
xu_website/wsgi.py
|
"""
WSGI config for xu_website project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'xu_website.settings')
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
moto/ec2/models.py
|
from __future__ import unicode_literals
import copy
import itertools
import ipaddress
import json
import os
import re
import six
import warnings
from pkg_resources import resource_filename
import boto.ec2
from collections import defaultdict
import weakref
from datetime import datetime
from boto.ec2.instance import Instance as BotoInstance, Reservation
from boto.ec2.blockdevicemapping import BlockDeviceMapping, BlockDeviceType
from boto.ec2.spotinstancerequest import SpotInstanceRequest as BotoSpotRequest
from boto.ec2.launchspecification import LaunchSpecification
from moto.compat import OrderedDict
from moto.core import BaseBackend
from moto.core.models import Model, BaseModel
from moto.core.utils import iso_8601_datetime_with_milliseconds, camelcase_to_underscores
from .exceptions import (
CidrLimitExceeded,
DependencyViolationError,
EC2ClientError,
FilterNotImplementedError,
GatewayNotAttachedError,
InvalidAddressError,
InvalidAllocationIdError,
InvalidAMIIdError,
InvalidAMIAttributeItemValueError,
InvalidAssociationIdError,
InvalidCIDRSubnetError,
InvalidCustomerGatewayIdError,
InvalidDHCPOptionsIdError,
InvalidDomainError,
InvalidID,
InvalidInstanceIdError,
InvalidInternetGatewayIdError,
InvalidKeyPairDuplicateError,
InvalidKeyPairNameError,
InvalidNetworkAclIdError,
InvalidNetworkAttachmentIdError,
InvalidNetworkInterfaceIdError,
InvalidParameterValueError,
InvalidParameterValueErrorTagNull,
InvalidPermissionNotFoundError,
InvalidPermissionDuplicateError,
InvalidRouteTableIdError,
InvalidRouteError,
InvalidSecurityGroupDuplicateError,
InvalidSecurityGroupNotFoundError,
InvalidSnapshotIdError,
InvalidSubnetIdError,
InvalidVolumeIdError,
InvalidVolumeAttachmentError,
InvalidVpcCidrBlockAssociationIdError,
InvalidVPCPeeringConnectionIdError,
InvalidVPCPeeringConnectionStateTransitionError,
InvalidVPCIdError,
InvalidVpnGatewayIdError,
InvalidVpnConnectionIdError,
MalformedAMIIdError,
MalformedDHCPOptionsIdError,
MissingParameterError,
MotoNotImplementedError,
OperationNotPermitted,
ResourceAlreadyAssociatedError,
RulesPerSecurityGroupLimitExceededError,
TagLimitExceeded)
from .utils import (
EC2_RESOURCE_TO_PREFIX,
EC2_PREFIX_TO_RESOURCE,
random_ami_id,
random_dhcp_option_id,
random_eip_allocation_id,
random_eip_association_id,
random_eni_attach_id,
random_eni_id,
random_instance_id,
random_internet_gateway_id,
random_ip,
random_ipv6_cidr,
random_nat_gateway_id,
random_key_pair,
random_private_ip,
random_public_ip,
random_reservation_id,
random_route_table_id,
generate_route_id,
split_route_id,
random_security_group_id,
random_snapshot_id,
random_spot_fleet_request_id,
random_spot_request_id,
random_subnet_id,
random_subnet_association_id,
random_volume_id,
random_vpc_id,
random_vpc_cidr_association_id,
random_vpc_peering_connection_id,
generic_filter,
is_valid_resource_id,
get_prefix,
simple_aws_filter_to_re,
is_valid_cidr,
filter_internet_gateways,
filter_reservations,
random_network_acl_id,
random_network_acl_subnet_association_id,
random_vpn_gateway_id,
random_vpn_connection_id,
random_customer_gateway_id,
is_tag_filter,
tag_filter_matches,
)
INSTANCE_TYPES = json.load(
open(resource_filename(__name__, 'resources/instance_types.json'), 'r')
)
AMIS = json.load(
open(os.environ.get('MOTO_AMIS_PATH') or resource_filename(
__name__, 'resources/amis.json'), 'r')
)
def utc_date_and_time():
return datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S.000Z')
def validate_resource_ids(resource_ids):
for resource_id in resource_ids:
if not is_valid_resource_id(resource_id):
raise InvalidID(resource_id=resource_id)
return True
class InstanceState(object):
def __init__(self, name='pending', code=0):
self.name = name
self.code = code
class StateReason(object):
def __init__(self, message="", code=""):
self.message = message
self.code = code
class TaggedEC2Resource(BaseModel):
def get_tags(self, *args, **kwargs):
tags = self.ec2_backend.describe_tags(
filters={'resource-id': [self.id]})
return tags
def add_tag(self, key, value):
self.ec2_backend.create_tags([self.id], {key: value})
def add_tags(self, tag_map):
for key, value in tag_map.items():
self.ec2_backend.create_tags([self.id], {key: value})
def get_filter_value(self, filter_name, method_name=None):
tags = self.get_tags()
if filter_name.startswith('tag:'):
tagname = filter_name.replace('tag:', '', 1)
for tag in tags:
if tag['key'] == tagname:
return tag['value']
return ''
elif filter_name == 'tag-key':
return [tag['key'] for tag in tags]
elif filter_name == 'tag-value':
return [tag['value'] for tag in tags]
else:
raise FilterNotImplementedError(filter_name, method_name)
class NetworkInterface(TaggedEC2Resource):
def __init__(self, ec2_backend, subnet, private_ip_address, device_index=0,
public_ip_auto_assign=True, group_ids=None):
self.ec2_backend = ec2_backend
self.id = random_eni_id()
self.device_index = device_index
self.private_ip_address = private_ip_address
self.subnet = subnet
self.instance = None
self.attachment_id = None
self.public_ip = None
self.public_ip_auto_assign = public_ip_auto_assign
self.start()
self.attachments = []
# Local set to the ENI. When attached to an instance, @property group_set
# returns groups for both self and the attached instance.
self._group_set = []
group = None
if group_ids:
for group_id in group_ids:
group = self.ec2_backend.get_security_group_from_id(group_id)
if not group:
# Create with specific group ID.
group = SecurityGroup(
self.ec2_backend, group_id, group_id, group_id, vpc_id=subnet.vpc_id)
self.ec2_backend.groups[subnet.vpc_id][group_id] = group
if group:
self._group_set.append(group)
@classmethod
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
properties = cloudformation_json['Properties']
security_group_ids = properties.get('SecurityGroups', [])
ec2_backend = ec2_backends[region_name]
subnet_id = properties.get('SubnetId')
if subnet_id:
subnet = ec2_backend.get_subnet(subnet_id)
else:
subnet = None
private_ip_address = properties.get('PrivateIpAddress', None)
network_interface = ec2_backend.create_network_interface(
subnet,
private_ip_address,
group_ids=security_group_ids
)
return network_interface
def stop(self):
if self.public_ip_auto_assign:
self.public_ip = None
def start(self):
self.check_auto_public_ip()
def check_auto_public_ip(self):
if self.public_ip_auto_assign:
self.public_ip = random_public_ip()
@property
def group_set(self):
if self.instance and self.instance.security_groups:
return set(self._group_set) | set(self.instance.security_groups)
else:
return self._group_set
def get_cfn_attribute(self, attribute_name):
from moto.cloudformation.exceptions import UnformattedGetAttTemplateException
if attribute_name == 'PrimaryPrivateIpAddress':
return self.private_ip_address
elif attribute_name == 'SecondaryPrivateIpAddresses':
raise NotImplementedError(
'"Fn::GetAtt" : [ "{0}" , "SecondaryPrivateIpAddresses" ]"')
raise UnformattedGetAttTemplateException()
@property
def physical_resource_id(self):
return self.id
def get_filter_value(self, filter_name):
if filter_name == 'network-interface-id':
return self.id
elif filter_name in ('addresses.private-ip-address', 'private-ip-address'):
return self.private_ip_address
elif filter_name == 'subnet-id':
return self.subnet.id
elif filter_name == 'vpc-id':
return self.subnet.vpc_id
elif filter_name == 'group-id':
return [group.id for group in self._group_set]
elif filter_name == 'availability-zone':
return self.subnet.availability_zone
else:
return super(NetworkInterface, self).get_filter_value(
filter_name, 'DescribeNetworkInterfaces')
class NetworkInterfaceBackend(object):
def __init__(self):
self.enis = {}
super(NetworkInterfaceBackend, self).__init__()
def create_network_interface(self, subnet, private_ip_address, group_ids=None, **kwargs):
eni = NetworkInterface(
self, subnet, private_ip_address, group_ids=group_ids, **kwargs)
self.enis[eni.id] = eni
return eni
def get_network_interface(self, eni_id):
for eni in self.enis.values():
if eni_id == eni.id:
return eni
raise InvalidNetworkInterfaceIdError(eni_id)
def delete_network_interface(self, eni_id):
deleted = self.enis.pop(eni_id, None)
if not deleted:
raise InvalidNetworkInterfaceIdError(eni_id)
return deleted
def describe_network_interfaces(self, filters=None):
enis = self.enis.values()
if filters:
for (_filter, _filter_value) in filters.items():
if _filter == 'network-interface-id':
_filter = 'id'
enis = [eni for eni in enis if getattr(
eni, _filter) in _filter_value]
elif _filter == 'group-id':
original_enis = enis
enis = []
for eni in original_enis:
for group in eni.group_set:
if group.id in _filter_value:
enis.append(eni)
break
else:
self.raise_not_implemented_error(
"The filter '{0}' for DescribeNetworkInterfaces".format(_filter))
return enis
def attach_network_interface(self, eni_id, instance_id, device_index):
eni = self.get_network_interface(eni_id)
instance = self.get_instance(instance_id)
return instance.attach_eni(eni, device_index)
def detach_network_interface(self, attachment_id):
found_eni = None
for eni in self.enis.values():
if eni.attachment_id == attachment_id:
found_eni = eni
break
else:
raise InvalidNetworkAttachmentIdError(attachment_id)
found_eni.instance.detach_eni(found_eni)
def modify_network_interface_attribute(self, eni_id, group_id):
eni = self.get_network_interface(eni_id)
group = self.get_security_group_from_id(group_id)
eni._group_set = [group]
def get_all_network_interfaces(self, eni_ids=None, filters=None):
enis = self.enis.values()
if eni_ids:
enis = [eni for eni in enis if eni.id in eni_ids]
if len(enis) != len(eni_ids):
invalid_id = list(set(eni_ids).difference(
set([eni.id for eni in enis])))[0]
raise InvalidNetworkInterfaceIdError(invalid_id)
return generic_filter(filters, enis)
class Instance(TaggedEC2Resource, BotoInstance):
def __init__(self, ec2_backend, image_id, user_data, security_groups, **kwargs):
super(Instance, self).__init__()
self.ec2_backend = ec2_backend
self.id = random_instance_id()
self.image_id = image_id
self._state = InstanceState("running", 16)
self._reason = ""
self._state_reason = StateReason()
self.user_data = user_data
self.security_groups = security_groups
self.instance_type = kwargs.get("instance_type", "m1.small")
self.region_name = kwargs.get("region_name", "us-east-1")
placement = kwargs.get("placement", None)
self.vpc_id = None
self.subnet_id = kwargs.get("subnet_id")
in_ec2_classic = not bool(self.subnet_id)
self.key_name = kwargs.get("key_name")
self.ebs_optimized = kwargs.get("ebs_optimized", False)
self.source_dest_check = "true"
self.launch_time = utc_date_and_time()
self.disable_api_termination = kwargs.get("disable_api_termination", False)
self._spot_fleet_id = kwargs.get("spot_fleet_id", None)
associate_public_ip = kwargs.get("associate_public_ip", False)
if in_ec2_classic:
# If we are in EC2-Classic, autoassign a public IP
associate_public_ip = True
amis = self.ec2_backend.describe_images(filters={'image-id': image_id})
ami = amis[0] if amis else None
if ami is None:
warnings.warn('Could not find AMI with image-id:{0}, '
'in the near future this will '
'cause an error.\n'
'Use ec2_backend.describe_images() to'
'find suitable image for your test'.format(image_id),
PendingDeprecationWarning)
self.platform = ami.platform if ami else None
self.virtualization_type = ami.virtualization_type if ami else 'paravirtual'
self.architecture = ami.architecture if ami else 'x86_64'
# handle weird bug around user_data -- something grabs the repr(), so
# it must be clean
if isinstance(self.user_data, list) and len(self.user_data) > 0:
if six.PY3 and isinstance(self.user_data[0], six.binary_type):
# string will have a "b" prefix -- need to get rid of it
self.user_data[0] = self.user_data[0].decode('utf-8')
elif six.PY2 and isinstance(self.user_data[0], six.text_type):
# string will have a "u" prefix -- need to get rid of it
self.user_data[0] = self.user_data[0].encode('utf-8')
if self.subnet_id:
subnet = ec2_backend.get_subnet(self.subnet_id)
self.vpc_id = subnet.vpc_id
self._placement.zone = subnet.availability_zone
if associate_public_ip is None:
# Mapping public ip hasnt been explicitly enabled or disabled
associate_public_ip = subnet.map_public_ip_on_launch == 'true'
elif placement:
self._placement.zone = placement
else:
self._placement.zone = ec2_backend.region_name + 'a'
self.block_device_mapping = BlockDeviceMapping()
self._private_ips = set()
self.prep_nics(
kwargs.get("nics", {}),
private_ip=kwargs.get("private_ip"),
associate_public_ip=associate_public_ip
)
def __del__(self):
try:
subnet = self.ec2_backend.get_subnet(self.subnet_id)
for ip in self._private_ips:
subnet.del_subnet_ip(ip)
except Exception:
# Its not "super" critical we clean this up, as reset will do this
# worst case we'll get IP address exaustion... rarely
pass
def setup_defaults(self):
# Default have an instance with root volume should you not wish to
# override with attach volume cmd.
volume = self.ec2_backend.create_volume(8, 'us-east-1a')
self.ec2_backend.attach_volume(volume.id, self.id, '/dev/sda1')
def teardown_defaults(self):
volume_id = self.block_device_mapping['/dev/sda1'].volume_id
self.ec2_backend.detach_volume(volume_id, self.id, '/dev/sda1')
self.ec2_backend.delete_volume(volume_id)
@property
def get_block_device_mapping(self):
return self.block_device_mapping.items()
@property
def private_ip(self):
return self.nics[0].private_ip_address
@property
def private_dns(self):
formatted_ip = self.private_ip.replace('.', '-')
if self.region_name == "us-east-1":
return "ip-{0}.ec2.internal".format(formatted_ip)
else:
return "ip-{0}.{1}.compute.internal".format(formatted_ip, self.region_name)
@property
def public_ip(self):
return self.nics[0].public_ip
@property
def public_dns(self):
if self.public_ip:
formatted_ip = self.public_ip.replace('.', '-')
if self.region_name == "us-east-1":
return "ec2-{0}.compute-1.amazonaws.com".format(formatted_ip)
else:
return "ec2-{0}.{1}.compute.amazonaws.com".format(formatted_ip, self.region_name)
@classmethod
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
properties = cloudformation_json['Properties']
ec2_backend = ec2_backends[region_name]
security_group_ids = properties.get('SecurityGroups', [])
group_names = [ec2_backend.get_security_group_from_id(
group_id).name for group_id in security_group_ids]
reservation = ec2_backend.add_instances(
image_id=properties['ImageId'],
user_data=properties.get('UserData'),
count=1,
security_group_names=group_names,
instance_type=properties.get("InstanceType", "m1.small"),
subnet_id=properties.get("SubnetId"),
key_name=properties.get("KeyName"),
private_ip=properties.get('PrivateIpAddress'),
)
instance = reservation.instances[0]
for tag in properties.get("Tags", []):
instance.add_tag(tag["Key"], tag["Value"])
return instance
@classmethod
def delete_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
ec2_backend = ec2_backends[region_name]
all_instances = ec2_backend.all_instances()
# the resource_name for instances is the stack name, logical id, and random suffix separated
# by hyphens. So to lookup the instances using the 'aws:cloudformation:logical-id' tag, we need to
# extract the logical-id from the resource_name
logical_id = resource_name.split('-')[1]
for instance in all_instances:
instance_tags = instance.get_tags()
for tag in instance_tags:
if tag['key'] == 'aws:cloudformation:logical-id' and tag['value'] == logical_id:
instance.delete(region_name)
@property
def physical_resource_id(self):
return self.id
def start(self, *args, **kwargs):
for nic in self.nics.values():
nic.start()
self._state.name = "running"
self._state.code = 16
self._reason = ""
self._state_reason = StateReason()
def stop(self, *args, **kwargs):
for nic in self.nics.values():
nic.stop()
self._state.name = "stopped"
self._state.code = 80
self._reason = "User initiated ({0})".format(
datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S UTC'))
self._state_reason = StateReason("Client.UserInitiatedShutdown: User initiated shutdown",
"Client.UserInitiatedShutdown")
def delete(self, region):
self.terminate()
def terminate(self, *args, **kwargs):
for nic in self.nics.values():
nic.stop()
self.teardown_defaults()
if self._spot_fleet_id:
spot_fleet = self.ec2_backend.get_spot_fleet_request(self._spot_fleet_id)
for spec in spot_fleet.launch_specs:
if spec.instance_type == self.instance_type and spec.subnet_id == self.subnet_id:
break
spot_fleet.fulfilled_capacity -= spec.weighted_capacity
spot_fleet.spot_requests = [req for req in spot_fleet.spot_requests if req.instance != self]
self._state.name = "terminated"
self._state.code = 48
self._reason = "User initiated ({0})".format(
datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S UTC'))
self._state_reason = StateReason("Client.UserInitiatedShutdown: User initiated shutdown",
"Client.UserInitiatedShutdown")
def reboot(self, *args, **kwargs):
self._state.name = "running"
self._state.code = 16
self._reason = ""
self._state_reason = StateReason()
@property
def dynamic_group_list(self):
if self.nics:
groups = []
for nic in self.nics.values():
for group in nic.group_set:
groups.append(group)
return groups
else:
return self.security_groups
def prep_nics(self, nic_spec, private_ip=None, associate_public_ip=None):
self.nics = {}
if self.subnet_id:
subnet = self.ec2_backend.get_subnet(self.subnet_id)
if not private_ip:
private_ip = subnet.get_available_subnet_ip(instance=self)
else:
subnet.request_ip(private_ip, instance=self)
self._private_ips.add(private_ip)
elif private_ip is None:
# Preserve old behaviour if in EC2-Classic mode
private_ip = random_private_ip()
# Primary NIC defaults
primary_nic = {'SubnetId': self.subnet_id,
'PrivateIpAddress': private_ip,
'AssociatePublicIpAddress': associate_public_ip}
primary_nic = dict((k, v) for k, v in primary_nic.items() if v)
# If empty NIC spec but primary NIC values provided, create NIC from
# them.
if primary_nic and not nic_spec:
nic_spec[0] = primary_nic
nic_spec[0]['DeviceIndex'] = 0
# Flesh out data structures and associations
for nic in nic_spec.values():
device_index = int(nic.get('DeviceIndex'))
nic_id = nic.get('NetworkInterfaceId')
if nic_id:
# If existing NIC found, use it.
use_nic = self.ec2_backend.get_network_interface(nic_id)
use_nic.device_index = device_index
use_nic.public_ip_auto_assign = False
else:
# If primary NIC values provided, use them for the primary NIC.
if device_index == 0 and primary_nic:
nic.update(primary_nic)
if 'SubnetId' in nic:
subnet = self.ec2_backend.get_subnet(nic['SubnetId'])
else:
subnet = None
group_id = nic.get('SecurityGroupId')
group_ids = [group_id] if group_id else []
use_nic = self.ec2_backend.create_network_interface(subnet,
nic.get(
'PrivateIpAddress'),
device_index=device_index,
public_ip_auto_assign=nic.get(
'AssociatePublicIpAddress', False),
group_ids=group_ids)
self.attach_eni(use_nic, device_index)
def attach_eni(self, eni, device_index):
device_index = int(device_index)
self.nics[device_index] = eni
# This is used upon associate/disassociate public IP.
eni.instance = self
eni.attachment_id = random_eni_attach_id()
eni.device_index = device_index
return eni.attachment_id
def detach_eni(self, eni):
self.nics.pop(eni.device_index, None)
eni.instance = None
eni.attachment_id = None
eni.device_index = None
def get_cfn_attribute(self, attribute_name):
from moto.cloudformation.exceptions import UnformattedGetAttTemplateException
if attribute_name == 'AvailabilityZone':
return self.placement
elif attribute_name == 'PrivateDnsName':
return self.private_dns
elif attribute_name == 'PublicDnsName':
return self.public_dns
elif attribute_name == 'PrivateIp':
return self.private_ip
elif attribute_name == 'PublicIp':
return self.public_ip
raise UnformattedGetAttTemplateException()
class InstanceBackend(object):
def __init__(self):
self.reservations = OrderedDict()
super(InstanceBackend, self).__init__()
def get_instance(self, instance_id):
for instance in self.all_instances():
if instance.id == instance_id:
return instance
raise InvalidInstanceIdError(instance_id)
def add_instances(self, image_id, count, user_data, security_group_names,
**kwargs):
new_reservation = Reservation()
new_reservation.id = random_reservation_id()
security_groups = [self.get_security_group_from_name(name)
for name in security_group_names]
security_groups.extend(self.get_security_group_from_id(sg_id)
for sg_id in kwargs.pop("security_group_ids", []))
self.reservations[new_reservation.id] = new_reservation
tags = kwargs.pop("tags", {})
instance_tags = tags.get('instance', {})
for index in range(count):
new_instance = Instance(
self,
image_id,
user_data,
security_groups,
**kwargs
)
new_reservation.instances.append(new_instance)
new_instance.add_tags(instance_tags)
new_instance.setup_defaults()
return new_reservation
def start_instances(self, instance_ids):
started_instances = []
for instance in self.get_multi_instances_by_id(instance_ids):
instance.start()
started_instances.append(instance)
return started_instances
def stop_instances(self, instance_ids):
stopped_instances = []
for instance in self.get_multi_instances_by_id(instance_ids):
instance.stop()
stopped_instances.append(instance)
return stopped_instances
def terminate_instances(self, instance_ids):
terminated_instances = []
if not instance_ids:
raise EC2ClientError(
"InvalidParameterCombination", "No instances specified")
for instance in self.get_multi_instances_by_id(instance_ids):
instance.terminate()
terminated_instances.append(instance)
return terminated_instances
def reboot_instances(self, instance_ids):
rebooted_instances = []
for instance in self.get_multi_instances_by_id(instance_ids):
instance.reboot()
rebooted_instances.append(instance)
return rebooted_instances
def modify_instance_attribute(self, instance_id, key, value):
instance = self.get_instance(instance_id)
setattr(instance, key, value)
return instance
def modify_instance_security_groups(self, instance_id, new_group_list):
instance = self.get_instance(instance_id)
setattr(instance, 'security_groups', new_group_list)
return instance
def describe_instance_attribute(self, instance_id, key):
if key == 'group_set':
key = 'security_groups'
instance = self.get_instance(instance_id)
value = getattr(instance, key)
return instance, value
def all_instances(self):
instances = []
for reservation in self.all_reservations():
for instance in reservation.instances:
instances.append(instance)
return instances
def all_running_instances(self):
instances = []
for reservation in self.all_reservations():
for instance in reservation.instances:
if instance.state_code == 16:
instances.append(instance)
return instances
def get_multi_instances_by_id(self, instance_ids):
"""
:param instance_ids: A string list with instance ids
:return: A list with instance objects
"""
result = []
for reservation in self.all_reservations():
for instance in reservation.instances:
if instance.id in instance_ids:
result.append(instance)
# TODO: Trim error message down to specific invalid id.
if instance_ids and len(instance_ids) > len(result):
raise InvalidInstanceIdError(instance_ids)
return result
def get_instance_by_id(self, instance_id):
for reservation in self.all_reservations():
for instance in reservation.instances:
if instance.id == instance_id:
return instance
def get_reservations_by_instance_ids(self, instance_ids, filters=None):
""" Go through all of the reservations and filter to only return those
associated with the given instance_ids.
"""
reservations = []
for reservation in self.all_reservations():
reservation_instance_ids = [
instance.id for instance in reservation.instances]
matching_reservation = any(
instance_id in reservation_instance_ids for instance_id in instance_ids)
if matching_reservation:
reservation.instances = [
instance for instance in reservation.instances if instance.id in instance_ids]
reservations.append(reservation)
found_instance_ids = [
instance.id for reservation in reservations for instance in reservation.instances]
if len(found_instance_ids) != len(instance_ids):
invalid_id = list(set(instance_ids).difference(
set(found_instance_ids)))[0]
raise InvalidInstanceIdError(invalid_id)
if filters is not None:
reservations = filter_reservations(reservations, filters)
return reservations
def all_reservations(self, filters=None):
reservations = [copy.copy(reservation) for reservation in self.reservations.values()]
if filters is not None:
reservations = filter_reservations(reservations, filters)
return reservations
class KeyPair(object):
def __init__(self, name, fingerprint, material):
self.name = name
self.fingerprint = fingerprint
self.material = material
def get_filter_value(self, filter_name):
if filter_name == 'key-name':
return self.name
elif filter_name == 'fingerprint':
return self.fingerprint
else:
raise FilterNotImplementedError(filter_name, 'DescribeKeyPairs')
class KeyPairBackend(object):
def __init__(self):
self.keypairs = {}
super(KeyPairBackend, self).__init__()
def create_key_pair(self, name):
if name in self.keypairs:
raise InvalidKeyPairDuplicateError(name)
keypair = KeyPair(name, **random_key_pair())
self.keypairs[name] = keypair
return keypair
def delete_key_pair(self, name):
if name in self.keypairs:
self.keypairs.pop(name)
return True
def describe_key_pairs(self, key_names=None, filters=None):
results = []
if key_names:
results = [keypair for keypair in self.keypairs.values()
if keypair.name in key_names]
if len(key_names) > len(results):
unknown_keys = set(key_names) - set(results)
raise InvalidKeyPairNameError(unknown_keys)
else:
results = self.keypairs.values()
if filters:
return generic_filter(filters, results)
else:
return results
def import_key_pair(self, key_name, public_key_material):
if key_name in self.keypairs:
raise InvalidKeyPairDuplicateError(key_name)
keypair = KeyPair(key_name, **random_key_pair())
self.keypairs[key_name] = keypair
return keypair
class TagBackend(object):
VALID_TAG_FILTERS = ['key',
'resource-id',
'resource-type',
'value']
VALID_TAG_RESOURCE_FILTER_TYPES = ['customer-gateway',
'dhcp-options',
'image',
'instance',
'internet-gateway',
'network-acl',
'network-interface',
'reserved-instances',
'route-table',
'security-group',
'snapshot',
'spot-instances-request',
'subnet',
'volume',
'vpc',
'vpc-peering-connection'
'vpn-connection',
'vpn-gateway']
def __init__(self):
self.tags = defaultdict(dict)
super(TagBackend, self).__init__()
def create_tags(self, resource_ids, tags):
if None in set([tags[tag] for tag in tags]):
raise InvalidParameterValueErrorTagNull()
for resource_id in resource_ids:
if resource_id in self.tags:
if len(self.tags[resource_id]) + len([tag for tag in tags if not tag.startswith("aws:")]) > 50:
raise TagLimitExceeded()
elif len([tag for tag in tags if not tag.startswith("aws:")]) > 50:
raise TagLimitExceeded()
for resource_id in resource_ids:
for tag in tags:
self.tags[resource_id][tag] = tags[tag]
return True
def delete_tags(self, resource_ids, tags):
for resource_id in resource_ids:
for tag in tags:
if tag in self.tags[resource_id]:
if tags[tag] is None:
self.tags[resource_id].pop(tag)
elif tags[tag] == self.tags[resource_id][tag]:
self.tags[resource_id].pop(tag)
return True
def describe_tags(self, filters=None):
import re
results = []
key_filters = []
resource_id_filters = []
resource_type_filters = []
value_filters = []
if filters is not None:
for tag_filter in filters:
if tag_filter in self.VALID_TAG_FILTERS:
if tag_filter == 'key':
for value in filters[tag_filter]:
key_filters.append(re.compile(
simple_aws_filter_to_re(value)))
if tag_filter == 'resource-id':
for value in filters[tag_filter]:
resource_id_filters.append(
re.compile(simple_aws_filter_to_re(value)))
if tag_filter == 'resource-type':
for value in filters[tag_filter]:
resource_type_filters.append(value)
if tag_filter == 'value':
for value in filters[tag_filter]:
value_filters.append(re.compile(
simple_aws_filter_to_re(value)))
for resource_id, tags in self.tags.items():
for key, value in tags.items():
add_result = False
if filters is None:
add_result = True
else:
key_pass = False
id_pass = False
type_pass = False
value_pass = False
if key_filters:
for pattern in key_filters:
if pattern.match(key) is not None:
key_pass = True
else:
key_pass = True
if resource_id_filters:
for pattern in resource_id_filters:
if pattern.match(resource_id) is not None:
id_pass = True
else:
id_pass = True
if resource_type_filters:
for resource_type in resource_type_filters:
if EC2_PREFIX_TO_RESOURCE[get_prefix(resource_id)] == resource_type:
type_pass = True
else:
type_pass = True
if value_filters:
for pattern in value_filters:
if pattern.match(value) is not None:
value_pass = True
else:
value_pass = True
if key_pass and id_pass and type_pass and value_pass:
add_result = True
# If we're not filtering, or we are filtering and this
if add_result:
result = {
'resource_id': resource_id,
'key': key,
'value': value,
'resource_type': EC2_PREFIX_TO_RESOURCE[get_prefix(resource_id)],
}
results.append(result)
return results
class Ami(TaggedEC2Resource):
def __init__(self, ec2_backend, ami_id, instance=None, source_ami=None,
name=None, description=None, owner_id=111122223333,
public=False, virtualization_type=None, architecture=None,
state='available', creation_date=None, platform=None,
image_type='machine', image_location=None, hypervisor=None,
root_device_type='standard', root_device_name='/dev/sda1', sriov='simple',
region_name='us-east-1a'
):
self.ec2_backend = ec2_backend
self.id = ami_id
self.state = state
self.name = name
self.image_type = image_type
self.image_location = image_location
self.owner_id = owner_id
self.description = description
self.virtualization_type = virtualization_type
self.architecture = architecture
self.kernel_id = None
self.platform = platform
self.hypervisor = hypervisor
self.root_device_name = root_device_name
self.root_device_type = root_device_type
self.sriov = sriov
self.creation_date = utc_date_and_time() if creation_date is None else creation_date
if instance:
self.instance = instance
self.instance_id = instance.id
self.virtualization_type = instance.virtualization_type
self.architecture = instance.architecture
self.kernel_id = instance.kernel
self.platform = instance.platform
elif source_ami:
"""
http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/CopyingAMIs.html
"We don't copy launch permissions, user-defined tags, or Amazon S3 bucket permissions from the source AMI to the new AMI."
~ 2014.09.29
"""
self.virtualization_type = source_ami.virtualization_type
self.architecture = source_ami.architecture
self.kernel_id = source_ami.kernel_id
self.platform = source_ami.platform
if not name:
self.name = source_ami.name
if not description:
self.description = source_ami.description
self.launch_permission_groups = set()
self.launch_permission_users = set()
if public:
self.launch_permission_groups.add('all')
# AWS auto-creates these, we should reflect the same.
volume = self.ec2_backend.create_volume(15, region_name)
self.ebs_snapshot = self.ec2_backend.create_snapshot(
volume.id, "Auto-created snapshot for AMI %s" % self.id, owner_id)
self.ec2_backend.delete_volume(volume.id)
@property
def is_public(self):
return 'all' in self.launch_permission_groups
@property
def is_public_string(self):
return str(self.is_public).lower()
def get_filter_value(self, filter_name):
if filter_name == 'virtualization-type':
return self.virtualization_type
elif filter_name == 'kernel-id':
return self.kernel_id
elif filter_name in ['architecture', 'platform']:
return getattr(self, filter_name)
elif filter_name == 'image-id':
return self.id
elif filter_name == 'is-public':
return str(self.is_public)
elif filter_name == 'state':
return self.state
elif filter_name == 'name':
return self.name
elif filter_name == 'owner-id':
return self.owner_id
else:
return super(Ami, self).get_filter_value(
filter_name, 'DescribeImages')
class AmiBackend(object):
AMI_REGEX = re.compile("ami-[a-z0-9]+")
def __init__(self):
self.amis = {}
self._load_amis()
super(AmiBackend, self).__init__()
def _load_amis(self):
for ami in AMIS:
ami_id = ami['ami_id']
self.amis[ami_id] = Ami(self, **ami)
def create_image(self, instance_id, name=None, description=None, context=None):
# TODO: check that instance exists and pull info from it.
ami_id = random_ami_id()
instance = self.get_instance(instance_id)
ami = Ami(self, ami_id, instance=instance, source_ami=None,
name=name, description=description,
owner_id=context.get_current_user() if context else '111122223333')
self.amis[ami_id] = ami
return ami
def copy_image(self, source_image_id, source_region, name=None, description=None):
source_ami = ec2_backends[source_region].describe_images(
ami_ids=[source_image_id])[0]
ami_id = random_ami_id()
ami = Ami(self, ami_id, instance=None, source_ami=source_ami,
name=name, description=description)
self.amis[ami_id] = ami
return ami
def describe_images(self, ami_ids=(), filters=None, exec_users=None, owners=None,
context=None):
images = self.amis.values()
if len(ami_ids):
# boto3 seems to default to just searching based on ami ids if that parameter is passed
# and if no images are found, it raises an errors
malformed_ami_ids = [ami_id for ami_id in ami_ids if not ami_id.startswith('ami-')]
if malformed_ami_ids:
raise MalformedAMIIdError(malformed_ami_ids)
images = [ami for ami in images if ami.id in ami_ids]
if len(images) == 0:
raise InvalidAMIIdError(ami_ids)
else:
# Limit images by launch permissions
if exec_users:
tmp_images = []
for ami in images:
for user_id in exec_users:
if user_id in ami.launch_permission_users:
tmp_images.append(ami)
images = tmp_images
# Limit by owner ids
if owners:
# support filtering by Owners=['self']
owners = list(map(
lambda o: context.get_current_user()
if context and o == 'self' else o,
owners))
images = [ami for ami in images if ami.owner_id in owners]
# Generic filters
if filters:
return generic_filter(filters, images)
return images
def deregister_image(self, ami_id):
if ami_id in self.amis:
self.amis.pop(ami_id)
return True
raise InvalidAMIIdError(ami_id)
def get_launch_permission_groups(self, ami_id):
ami = self.describe_images(ami_ids=[ami_id])[0]
return ami.launch_permission_groups
def get_launch_permission_users(self, ami_id):
ami = self.describe_images(ami_ids=[ami_id])[0]
return ami.launch_permission_users
def validate_permission_targets(self, user_ids=None, group=None):
# If anything is invalid, nothing is added. (No partial success.)
if user_ids:
"""
AWS docs:
"The AWS account ID is a 12-digit number, such as 123456789012, that you use to construct Amazon Resource Names (ARNs)."
http://docs.aws.amazon.com/general/latest/gr/acct-identifiers.html
"""
for user_id in user_ids:
if len(user_id) != 12 or not user_id.isdigit():
raise InvalidAMIAttributeItemValueError("userId", user_id)
if group and group != 'all':
raise InvalidAMIAttributeItemValueError("UserGroup", group)
def add_launch_permission(self, ami_id, user_ids=None, group=None):
ami = self.describe_images(ami_ids=[ami_id])[0]
self.validate_permission_targets(user_ids=user_ids, group=group)
if user_ids:
for user_id in user_ids:
ami.launch_permission_users.add(user_id)
if group:
ami.launch_permission_groups.add(group)
return True
def remove_launch_permission(self, ami_id, user_ids=None, group=None):
ami = self.describe_images(ami_ids=[ami_id])[0]
self.validate_permission_targets(user_ids=user_ids, group=group)
if user_ids:
for user_id in user_ids:
ami.launch_permission_users.discard(user_id)
if group:
ami.launch_permission_groups.discard(group)
return True
class Region(object):
def __init__(self, name, endpoint):
self.name = name
self.endpoint = endpoint
class Zone(object):
def __init__(self, name, region_name):
self.name = name
self.region_name = region_name
class RegionsAndZonesBackend(object):
regions = [Region(ri.name, ri.endpoint) for ri in boto.ec2.regions()]
zones = dict(
(region, [Zone(region + c, region) for c in 'abc'])
for region in [r.name for r in regions])
def describe_regions(self, region_names=[]):
if len(region_names) == 0:
return self.regions
ret = []
for name in region_names:
for region in self.regions:
if region.name == name:
ret.append(region)
return ret
def describe_availability_zones(self):
return self.zones[self.region_name]
def get_zone_by_name(self, name):
for zone in self.zones[self.region_name]:
if zone.name == name:
return zone
class SecurityRule(object):
def __init__(self, ip_protocol, from_port, to_port, ip_ranges, source_groups):
self.ip_protocol = ip_protocol
self.from_port = from_port
self.to_port = to_port
self.ip_ranges = ip_ranges or []
self.source_groups = source_groups
@property
def unique_representation(self):
return "{0}-{1}-{2}-{3}-{4}".format(
self.ip_protocol,
self.from_port,
self.to_port,
self.ip_ranges,
self.source_groups
)
def __eq__(self, other):
return self.unique_representation == other.unique_representation
class SecurityGroup(TaggedEC2Resource):
def __init__(self, ec2_backend, group_id, name, description, vpc_id=None):
self.ec2_backend = ec2_backend
self.id = group_id
self.name = name
self.description = description
self.ingress_rules = []
self.egress_rules = [SecurityRule(-1, None, None, ['0.0.0.0/0'], [])]
self.enis = {}
self.vpc_id = vpc_id
self.owner_id = "123456789012"
@classmethod
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
properties = cloudformation_json['Properties']
ec2_backend = ec2_backends[region_name]
vpc_id = properties.get('VpcId')
security_group = ec2_backend.create_security_group(
name=resource_name,
description=properties.get('GroupDescription'),
vpc_id=vpc_id,
)
for tag in properties.get("Tags", []):
tag_key = tag["Key"]
tag_value = tag["Value"]
security_group.add_tag(tag_key, tag_value)
for ingress_rule in properties.get('SecurityGroupIngress', []):
source_group_id = ingress_rule.get('SourceSecurityGroupId')
ec2_backend.authorize_security_group_ingress(
group_name_or_id=security_group.id,
ip_protocol=ingress_rule['IpProtocol'],
from_port=ingress_rule['FromPort'],
to_port=ingress_rule['ToPort'],
ip_ranges=ingress_rule.get('CidrIp'),
source_group_ids=[source_group_id],
vpc_id=vpc_id,
)
return security_group
@classmethod
def update_from_cloudformation_json(cls, original_resource, new_resource_name, cloudformation_json, region_name):
cls._delete_security_group_given_vpc_id(
original_resource.name, original_resource.vpc_id, region_name)
return cls.create_from_cloudformation_json(new_resource_name, cloudformation_json, region_name)
@classmethod
def delete_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
properties = cloudformation_json['Properties']
vpc_id = properties.get('VpcId')
cls._delete_security_group_given_vpc_id(
resource_name, vpc_id, region_name)
@classmethod
def _delete_security_group_given_vpc_id(cls, resource_name, vpc_id, region_name):
ec2_backend = ec2_backends[region_name]
security_group = ec2_backend.get_security_group_from_name(
resource_name, vpc_id)
if security_group:
security_group.delete(region_name)
def delete(self, region_name):
''' Not exposed as part of the ELB API - used for CloudFormation. '''
self.ec2_backend.delete_security_group(group_id=self.id)
@property
def physical_resource_id(self):
return self.id
def matches_filter(self, key, filter_value):
def to_attr(filter_name):
attr = None
if filter_name == 'group-name':
attr = 'name'
elif filter_name == 'group-id':
attr = 'id'
elif filter_name == 'vpc-id':
attr = 'vpc_id'
else:
attr = filter_name.replace('-', '_')
return attr
if key.startswith('ip-permission'):
match = re.search(r"ip-permission.(*)", key)
ingress_attr = to_attr(match.groups()[0])
for ingress in self.ingress_rules:
if getattr(ingress, ingress_attr) in filter_value:
return True
elif is_tag_filter(key):
tag_value = self.get_filter_value(key)
if isinstance(filter_value, list):
return tag_filter_matches(self, key, filter_value)
return tag_value in filter_value
else:
attr_name = to_attr(key)
return getattr(self, attr_name) in filter_value
return False
def matches_filters(self, filters):
for key, value in filters.items():
if not self.matches_filter(key, value):
return False
return True
def get_cfn_attribute(self, attribute_name):
from moto.cloudformation.exceptions import UnformattedGetAttTemplateException
if attribute_name == 'GroupId':
return self.id
raise UnformattedGetAttTemplateException()
def add_ingress_rule(self, rule):
if rule in self.ingress_rules:
raise InvalidPermissionDuplicateError()
else:
self.ingress_rules.append(rule)
def add_egress_rule(self, rule):
self.egress_rules.append(rule)
def get_number_of_ingress_rules(self):
return sum(
len(rule.ip_ranges) + len(rule.source_groups)
for rule in self.ingress_rules)
def get_number_of_egress_rules(self):
return sum(
len(rule.ip_ranges) + len(rule.source_groups)
for rule in self.egress_rules)
class SecurityGroupBackend(object):
def __init__(self):
# the key in the dict group is the vpc_id or None (non-vpc)
self.groups = defaultdict(dict)
# Create the default security group
self.create_security_group("default", "default group")
super(SecurityGroupBackend, self).__init__()
def create_security_group(self, name, description, vpc_id=None, force=False):
if not description:
raise MissingParameterError('GroupDescription')
group_id = random_security_group_id()
if not force:
existing_group = self.get_security_group_from_name(name, vpc_id)
if existing_group:
raise InvalidSecurityGroupDuplicateError(name)
group = SecurityGroup(self, group_id, name, description, vpc_id=vpc_id)
self.groups[vpc_id][group_id] = group
return group
def describe_security_groups(self, group_ids=None, groupnames=None, filters=None):
matches = itertools.chain(*[x.values()
for x in self.groups.values()])
if group_ids:
matches = [grp for grp in matches
if grp.id in group_ids]
if len(group_ids) > len(matches):
unknown_ids = set(group_ids) - set(matches)
raise InvalidSecurityGroupNotFoundError(unknown_ids)
if groupnames:
matches = [grp for grp in matches
if grp.name in groupnames]
if len(groupnames) > len(matches):
unknown_names = set(groupnames) - set(matches)
raise InvalidSecurityGroupNotFoundError(unknown_names)
if filters:
matches = [grp for grp in matches
if grp.matches_filters(filters)]
return matches
def _delete_security_group(self, vpc_id, group_id):
if self.groups[vpc_id][group_id].enis:
raise DependencyViolationError(
"{0} is being utilized by {1}".format(group_id, 'ENIs'))
return self.groups[vpc_id].pop(group_id)
def delete_security_group(self, name=None, group_id=None):
if group_id:
# loop over all the SGs, find the right one
for vpc_id, groups in self.groups.items():
if group_id in groups:
return self._delete_security_group(vpc_id, group_id)
raise InvalidSecurityGroupNotFoundError(group_id)
elif name:
# Group Name. Has to be in standard EC2, VPC needs to be
# identified by group_id
group = self.get_security_group_from_name(name)
if group:
return self._delete_security_group(None, group.id)
raise InvalidSecurityGroupNotFoundError(name)
def get_security_group_from_id(self, group_id):
# 2 levels of chaining necessary since it's a complex structure
all_groups = itertools.chain.from_iterable(
[x.values() for x in self.groups.values()])
for group in all_groups:
if group.id == group_id:
return group
def get_security_group_from_name(self, name, vpc_id=None):
for group_id, group in self.groups[vpc_id].items():
if group.name == name:
return group
def get_security_group_by_name_or_id(self, group_name_or_id, vpc_id):
# try searching by id, fallbacks to name search
group = self.get_security_group_from_id(group_name_or_id)
if group is None:
group = self.get_security_group_from_name(group_name_or_id, vpc_id)
return group
def authorize_security_group_ingress(self,
group_name_or_id,
ip_protocol,
from_port,
to_port,
ip_ranges,
source_group_names=None,
source_group_ids=None,
vpc_id=None):
group = self.get_security_group_by_name_or_id(group_name_or_id, vpc_id)
if ip_ranges and not isinstance(ip_ranges, list):
ip_ranges = [ip_ranges]
if ip_ranges:
for cidr in ip_ranges:
if not is_valid_cidr(cidr):
raise InvalidCIDRSubnetError(cidr=cidr)
self._verify_group_will_respect_rule_count_limit(
group, group.get_number_of_ingress_rules(),
ip_ranges, source_group_names, source_group_ids)
source_group_names = source_group_names if source_group_names else []
source_group_ids = source_group_ids if source_group_ids else []
source_groups = []
for source_group_name in source_group_names:
source_group = self.get_security_group_from_name(
source_group_name, vpc_id)
if source_group:
source_groups.append(source_group)
# for VPCs
for source_group_id in source_group_ids:
source_group = self.get_security_group_from_id(source_group_id)
if source_group:
source_groups.append(source_group)
security_rule = SecurityRule(
ip_protocol, from_port, to_port, ip_ranges, source_groups)
group.add_ingress_rule(security_rule)
def revoke_security_group_ingress(self,
group_name_or_id,
ip_protocol,
from_port,
to_port,
ip_ranges,
source_group_names=None,
source_group_ids=None,
vpc_id=None):
group = self.get_security_group_by_name_or_id(group_name_or_id, vpc_id)
source_groups = []
for source_group_name in source_group_names:
source_group = self.get_security_group_from_name(
source_group_name, vpc_id)
if source_group:
source_groups.append(source_group)
for source_group_id in source_group_ids:
source_group = self.get_security_group_from_id(source_group_id)
if source_group:
source_groups.append(source_group)
security_rule = SecurityRule(
ip_protocol, from_port, to_port, ip_ranges, source_groups)
if security_rule in group.ingress_rules:
group.ingress_rules.remove(security_rule)
return security_rule
raise InvalidPermissionNotFoundError()
def authorize_security_group_egress(self,
group_name_or_id,
ip_protocol,
from_port,
to_port,
ip_ranges,
source_group_names=None,
source_group_ids=None,
vpc_id=None):
group = self.get_security_group_by_name_or_id(group_name_or_id, vpc_id)
if ip_ranges and not isinstance(ip_ranges, list):
ip_ranges = [ip_ranges]
if ip_ranges:
for cidr in ip_ranges:
if not is_valid_cidr(cidr):
raise InvalidCIDRSubnetError(cidr=cidr)
self._verify_group_will_respect_rule_count_limit(
group, group.get_number_of_egress_rules(),
ip_ranges, source_group_names, source_group_ids)
source_group_names = source_group_names if source_group_names else []
source_group_ids = source_group_ids if source_group_ids else []
source_groups = []
for source_group_name in source_group_names:
source_group = self.get_security_group_from_name(
source_group_name, vpc_id)
if source_group:
source_groups.append(source_group)
# for VPCs
for source_group_id in source_group_ids:
source_group = self.get_security_group_from_id(source_group_id)
if source_group:
source_groups.append(source_group)
security_rule = SecurityRule(
ip_protocol, from_port, to_port, ip_ranges, source_groups)
group.add_egress_rule(security_rule)
def revoke_security_group_egress(self,
group_name_or_id,
ip_protocol,
from_port,
to_port,
ip_ranges,
source_group_names=None,
source_group_ids=None,
vpc_id=None):
group = self.get_security_group_by_name_or_id(group_name_or_id, vpc_id)
source_groups = []
for source_group_name in source_group_names:
source_group = self.get_security_group_from_name(
source_group_name, vpc_id)
if source_group:
source_groups.append(source_group)
for source_group_id in source_group_ids:
source_group = self.get_security_group_from_id(source_group_id)
if source_group:
source_groups.append(source_group)
security_rule = SecurityRule(
ip_protocol, from_port, to_port, ip_ranges, source_groups)
if security_rule in group.egress_rules:
group.egress_rules.remove(security_rule)
return security_rule
raise InvalidPermissionNotFoundError()
def _verify_group_will_respect_rule_count_limit(
self, group, current_rule_nb,
ip_ranges, source_group_names=None, source_group_ids=None):
max_nb_rules = 50 if group.vpc_id else 100
future_group_nb_rules = current_rule_nb
if ip_ranges:
future_group_nb_rules += len(ip_ranges)
if source_group_ids:
future_group_nb_rules += len(source_group_ids)
if source_group_names:
future_group_nb_rules += len(source_group_names)
if future_group_nb_rules > max_nb_rules:
raise RulesPerSecurityGroupLimitExceededError
class SecurityGroupIngress(object):
def __init__(self, security_group, properties):
self.security_group = security_group
self.properties = properties
@classmethod
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
properties = cloudformation_json['Properties']
ec2_backend = ec2_backends[region_name]
group_name = properties.get('GroupName')
group_id = properties.get('GroupId')
ip_protocol = properties.get("IpProtocol")
cidr_ip = properties.get("CidrIp")
cidr_ipv6 = properties.get("CidrIpv6")
from_port = properties.get("FromPort")
source_security_group_id = properties.get("SourceSecurityGroupId")
source_security_group_name = properties.get("SourceSecurityGroupName")
# source_security_owner_id =
# properties.get("SourceSecurityGroupOwnerId") # IGNORED AT THE MOMENT
to_port = properties.get("ToPort")
assert group_id or group_name
assert source_security_group_name or cidr_ip or cidr_ipv6 or source_security_group_id
assert ip_protocol
if source_security_group_id:
source_security_group_ids = [source_security_group_id]
else:
source_security_group_ids = None
if source_security_group_name:
source_security_group_names = [source_security_group_name]
else:
source_security_group_names = None
if cidr_ip:
ip_ranges = [cidr_ip]
else:
ip_ranges = []
if group_id:
security_group = ec2_backend.describe_security_groups(group_ids=[group_id])[
0]
else:
security_group = ec2_backend.describe_security_groups(
groupnames=[group_name])[0]
ec2_backend.authorize_security_group_ingress(
group_name_or_id=security_group.id,
ip_protocol=ip_protocol,
from_port=from_port,
to_port=to_port,
ip_ranges=ip_ranges,
source_group_ids=source_security_group_ids,
source_group_names=source_security_group_names,
)
return cls(security_group, properties)
class VolumeAttachment(object):
def __init__(self, volume, instance, device, status):
self.volume = volume
self.attach_time = utc_date_and_time()
self.instance = instance
self.device = device
self.status = status
@classmethod
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
properties = cloudformation_json['Properties']
instance_id = properties['InstanceId']
volume_id = properties['VolumeId']
ec2_backend = ec2_backends[region_name]
attachment = ec2_backend.attach_volume(
volume_id=volume_id,
instance_id=instance_id,
device_path=properties['Device'],
)
return attachment
class Volume(TaggedEC2Resource):
def __init__(self, ec2_backend, volume_id, size, zone, snapshot_id=None, encrypted=False):
self.id = volume_id
self.size = size
self.zone = zone
self.create_time = utc_date_and_time()
self.attachment = None
self.snapshot_id = snapshot_id
self.ec2_backend = ec2_backend
self.encrypted = encrypted
@classmethod
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
properties = cloudformation_json['Properties']
ec2_backend = ec2_backends[region_name]
volume = ec2_backend.create_volume(
size=properties.get('Size'),
zone_name=properties.get('AvailabilityZone'),
)
return volume
@property
def physical_resource_id(self):
return self.id
@property
def status(self):
if self.attachment:
return 'in-use'
else:
return 'available'
def get_filter_value(self, filter_name):
if filter_name.startswith('attachment') and not self.attachment:
return None
elif filter_name == 'attachment.attach-time':
return self.attachment.attach_time
elif filter_name == 'attachment.device':
return self.attachment.device
elif filter_name == 'attachment.instance-id':
return self.attachment.instance.id
elif filter_name == 'attachment.status':
return self.attachment.status
elif filter_name == 'create-time':
return self.create_time
elif filter_name == 'size':
return self.size
elif filter_name == 'snapshot-id':
return self.snapshot_id
elif filter_name == 'status':
return self.status
elif filter_name == 'volume-id':
return self.id
elif filter_name == 'encrypted':
return str(self.encrypted).lower()
elif filter_name == 'availability-zone':
return self.zone.name
else:
return super(Volume, self).get_filter_value(
filter_name, 'DescribeVolumes')
class Snapshot(TaggedEC2Resource):
def __init__(self, ec2_backend, snapshot_id, volume, description, encrypted=False, owner_id='123456789012'):
self.id = snapshot_id
self.volume = volume
self.description = description
self.start_time = utc_date_and_time()
self.create_volume_permission_groups = set()
self.ec2_backend = ec2_backend
self.status = 'completed'
self.encrypted = encrypted
self.owner_id = owner_id
def get_filter_value(self, filter_name):
if filter_name == 'description':
return self.description
elif filter_name == 'snapshot-id':
return self.id
elif filter_name == 'start-time':
return self.start_time
elif filter_name == 'volume-id':
return self.volume.id
elif filter_name == 'volume-size':
return self.volume.size
elif filter_name == 'encrypted':
return str(self.encrypted).lower()
elif filter_name == 'status':
return self.status
else:
return super(Snapshot, self).get_filter_value(
filter_name, 'DescribeSnapshots')
class EBSBackend(object):
def __init__(self):
self.volumes = {}
self.attachments = {}
self.snapshots = {}
super(EBSBackend, self).__init__()
def create_volume(self, size, zone_name, snapshot_id=None, encrypted=False):
volume_id = random_volume_id()
zone = self.get_zone_by_name(zone_name)
if snapshot_id:
snapshot = self.get_snapshot(snapshot_id)
if size is None:
size = snapshot.volume.size
if snapshot.encrypted:
encrypted = snapshot.encrypted
volume = Volume(self, volume_id, size, zone, snapshot_id, encrypted)
self.volumes[volume_id] = volume
return volume
def describe_volumes(self, volume_ids=None, filters=None):
matches = self.volumes.values()
if volume_ids:
matches = [vol for vol in matches
if vol.id in volume_ids]
if len(volume_ids) > len(matches):
unknown_ids = set(volume_ids) - set(matches)
raise InvalidVolumeIdError(unknown_ids)
if filters:
matches = generic_filter(filters, matches)
return matches
def get_volume(self, volume_id):
volume = self.volumes.get(volume_id, None)
if not volume:
raise InvalidVolumeIdError(volume_id)
return volume
def delete_volume(self, volume_id):
if volume_id in self.volumes:
return self.volumes.pop(volume_id)
raise InvalidVolumeIdError(volume_id)
def attach_volume(self, volume_id, instance_id, device_path):
volume = self.get_volume(volume_id)
instance = self.get_instance(instance_id)
if not volume or not instance:
return False
volume.attachment = VolumeAttachment(
volume, instance, device_path, 'attached')
# Modify instance to capture mount of block device.
bdt = BlockDeviceType(volume_id=volume_id, status=volume.status, size=volume.size,
attach_time=utc_date_and_time())
instance.block_device_mapping[device_path] = bdt
return volume.attachment
def detach_volume(self, volume_id, instance_id, device_path):
volume = self.get_volume(volume_id)
self.get_instance(instance_id)
old_attachment = volume.attachment
if not old_attachment:
raise InvalidVolumeAttachmentError(volume_id, instance_id)
old_attachment.status = 'detached'
volume.attachment = None
return old_attachment
def create_snapshot(self, volume_id, description, owner_id=None):
snapshot_id = random_snapshot_id()
volume = self.get_volume(volume_id)
params = [self, snapshot_id, volume, description, volume.encrypted]
if owner_id:
params.append(owner_id)
snapshot = Snapshot(*params)
self.snapshots[snapshot_id] = snapshot
return snapshot
def describe_snapshots(self, snapshot_ids=None, filters=None):
matches = self.snapshots.values()
if snapshot_ids:
matches = [snap for snap in matches
if snap.id in snapshot_ids]
if len(snapshot_ids) > len(matches):
unknown_ids = set(snapshot_ids) - set(matches)
raise InvalidSnapshotIdError(unknown_ids)
if filters:
matches = generic_filter(filters, matches)
return matches
def copy_snapshot(self, source_snapshot_id, source_region, description=None):
source_snapshot = ec2_backends[source_region].describe_snapshots(
snapshot_ids=[source_snapshot_id])[0]
snapshot_id = random_snapshot_id()
snapshot = Snapshot(self, snapshot_id, volume=source_snapshot.volume,
description=description, encrypted=source_snapshot.encrypted)
self.snapshots[snapshot_id] = snapshot
return snapshot
def get_snapshot(self, snapshot_id):
snapshot = self.snapshots.get(snapshot_id, None)
if not snapshot:
raise InvalidSnapshotIdError(snapshot_id)
return snapshot
def delete_snapshot(self, snapshot_id):
if snapshot_id in self.snapshots:
return self.snapshots.pop(snapshot_id)
raise InvalidSnapshotIdError(snapshot_id)
def get_create_volume_permission_groups(self, snapshot_id):
snapshot = self.get_snapshot(snapshot_id)
return snapshot.create_volume_permission_groups
def add_create_volume_permission(self, snapshot_id, user_id=None, group=None):
if user_id:
self.raise_not_implemented_error(
"The UserId parameter for ModifySnapshotAttribute")
if group != 'all':
raise InvalidAMIAttributeItemValueError("UserGroup", group)
snapshot = self.get_snapshot(snapshot_id)
snapshot.create_volume_permission_groups.add(group)
return True
def remove_create_volume_permission(self, snapshot_id, user_id=None, group=None):
if user_id:
self.raise_not_implemented_error(
"The UserId parameter for ModifySnapshotAttribute")
if group != 'all':
raise InvalidAMIAttributeItemValueError("UserGroup", group)
snapshot = self.get_snapshot(snapshot_id)
snapshot.create_volume_permission_groups.discard(group)
return True
class VPC(TaggedEC2Resource):
def __init__(self, ec2_backend, vpc_id, cidr_block, is_default, instance_tenancy='default',
amazon_provided_ipv6_cidr_block=False):
self.ec2_backend = ec2_backend
self.id = vpc_id
self.cidr_block = cidr_block
self.cidr_block_association_set = {}
self.dhcp_options = None
self.state = 'available'
self.instance_tenancy = instance_tenancy
self.is_default = 'true' if is_default else 'false'
self.enable_dns_support = 'true'
# This attribute is set to 'true' only for default VPCs
# or VPCs created using the wizard of the VPC console
self.enable_dns_hostnames = 'true' if is_default else 'false'
self.associate_vpc_cidr_block(cidr_block)
if amazon_provided_ipv6_cidr_block:
self.associate_vpc_cidr_block(cidr_block, amazon_provided_ipv6_cidr_block=amazon_provided_ipv6_cidr_block)
@classmethod
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
properties = cloudformation_json['Properties']
ec2_backend = ec2_backends[region_name]
vpc = ec2_backend.create_vpc(
cidr_block=properties['CidrBlock'],
instance_tenancy=properties.get('InstanceTenancy', 'default')
)
for tag in properties.get("Tags", []):
tag_key = tag["Key"]
tag_value = tag["Value"]
vpc.add_tag(tag_key, tag_value)
return vpc
@property
def physical_resource_id(self):
return self.id
def get_filter_value(self, filter_name):
if filter_name in ('vpc-id', 'vpcId'):
return self.id
elif filter_name in ('cidr', 'cidr-block', 'cidrBlock'):
return self.cidr_block
elif filter_name in ('cidr-block-association.cidr-block', 'ipv6-cidr-block-association.ipv6-cidr-block'):
return [c['cidr_block'] for c in self.get_cidr_block_association_set(ipv6='ipv6' in filter_name)]
elif filter_name in ('cidr-block-association.association-id', 'ipv6-cidr-block-association.association-id'):
return self.cidr_block_association_set.keys()
elif filter_name in ('cidr-block-association.state', 'ipv6-cidr-block-association.state'):
return [c['cidr_block_state']['state'] for c in self.get_cidr_block_association_set(ipv6='ipv6' in filter_name)]
elif filter_name in ('instance_tenancy', 'InstanceTenancy'):
return self.instance_tenancy
elif filter_name in ('is-default', 'isDefault'):
return self.is_default
elif filter_name == 'state':
return self.state
elif filter_name in ('dhcp-options-id', 'dhcpOptionsId'):
if not self.dhcp_options:
return None
return self.dhcp_options.id
else:
return super(VPC, self).get_filter_value(filter_name, 'DescribeVpcs')
def associate_vpc_cidr_block(self, cidr_block, amazon_provided_ipv6_cidr_block=False):
max_associations = 5 if not amazon_provided_ipv6_cidr_block else 1
if len(self.get_cidr_block_association_set(amazon_provided_ipv6_cidr_block)) >= max_associations:
raise CidrLimitExceeded(self.id, max_associations)
association_id = random_vpc_cidr_association_id()
association_set = {
'association_id': association_id,
'cidr_block_state': {'state': 'associated', 'StatusMessage': ''}
}
association_set['cidr_block'] = random_ipv6_cidr() if amazon_provided_ipv6_cidr_block else cidr_block
self.cidr_block_association_set[association_id] = association_set
return association_set
def disassociate_vpc_cidr_block(self, association_id):
if self.cidr_block == self.cidr_block_association_set.get(association_id, {}).get('cidr_block'):
raise OperationNotPermitted(association_id)
response = self.cidr_block_association_set.pop(association_id, {})
if response:
response['vpc_id'] = self.id
response['cidr_block_state']['state'] = 'disassociating'
return response
def get_cidr_block_association_set(self, ipv6=False):
return [c for c in self.cidr_block_association_set.values() if ('::/' if ipv6 else '.') in c.get('cidr_block')]
class VPCBackend(object):
__refs__ = defaultdict(list)
def __init__(self):
self.vpcs = {}
self.__refs__[self.__class__].append(weakref.ref(self))
super(VPCBackend, self).__init__()
@classmethod
def get_instances(cls):
for inst_ref in cls.__refs__[cls]:
inst = inst_ref()
if inst is not None:
yield inst
def create_vpc(self, cidr_block, instance_tenancy='default', amazon_provided_ipv6_cidr_block=False):
vpc_id = random_vpc_id()
vpc = VPC(self, vpc_id, cidr_block, len(self.vpcs) == 0, instance_tenancy, amazon_provided_ipv6_cidr_block)
self.vpcs[vpc_id] = vpc
# AWS creates a default main route table and security group.
self.create_route_table(vpc_id, main=True)
# AWS creates a default Network ACL
self.create_network_acl(vpc_id, default=True)
default = self.get_security_group_from_name('default', vpc_id=vpc_id)
if not default:
self.create_security_group(
'default', 'default VPC security group', vpc_id=vpc_id)
return vpc
def get_vpc(self, vpc_id):
if vpc_id not in self.vpcs:
raise InvalidVPCIdError(vpc_id)
return self.vpcs.get(vpc_id)
# get vpc by vpc id and aws region
def get_cross_vpc(self, vpc_id, peer_region):
for vpcs in self.get_instances():
if vpcs.region_name == peer_region:
match_vpc = vpcs.get_vpc(vpc_id)
return match_vpc
def get_all_vpcs(self, vpc_ids=None, filters=None):
matches = self.vpcs.values()
if vpc_ids:
matches = [vpc for vpc in matches
if vpc.id in vpc_ids]
if len(vpc_ids) > len(matches):
unknown_ids = set(vpc_ids) - set(matches)
raise InvalidVPCIdError(unknown_ids)
if filters:
matches = generic_filter(filters, matches)
return matches
def delete_vpc(self, vpc_id):
# Delete route table if only main route table remains.
route_tables = self.get_all_route_tables(filters={'vpc-id': vpc_id})
if len(route_tables) > 1:
raise DependencyViolationError(
"The vpc {0} has dependencies and cannot be deleted.".format(vpc_id)
)
for route_table in route_tables:
self.delete_route_table(route_table.id)
# Delete default security group if exists.
default = self.get_security_group_from_name('default', vpc_id=vpc_id)
if default:
self.delete_security_group(group_id=default.id)
# Now delete VPC.
vpc = self.vpcs.pop(vpc_id, None)
if not vpc:
raise InvalidVPCIdError(vpc_id)
if vpc.dhcp_options:
vpc.dhcp_options.vpc = None
self.delete_dhcp_options_set(vpc.dhcp_options.id)
vpc.dhcp_options = None
return vpc
def describe_vpc_attribute(self, vpc_id, attr_name):
vpc = self.get_vpc(vpc_id)
if attr_name in ('enable_dns_support', 'enable_dns_hostnames'):
return getattr(vpc, attr_name)
else:
raise InvalidParameterValueError(attr_name)
def modify_vpc_attribute(self, vpc_id, attr_name, attr_value):
vpc = self.get_vpc(vpc_id)
if attr_name in ('enable_dns_support', 'enable_dns_hostnames'):
setattr(vpc, attr_name, attr_value)
else:
raise InvalidParameterValueError(attr_name)
def disassociate_vpc_cidr_block(self, association_id):
for vpc in self.vpcs.values():
response = vpc.disassociate_vpc_cidr_block(association_id)
if response:
return response
else:
raise InvalidVpcCidrBlockAssociationIdError(association_id)
def associate_vpc_cidr_block(self, vpc_id, cidr_block, amazon_provided_ipv6_cidr_block):
vpc = self.get_vpc(vpc_id)
return vpc.associate_vpc_cidr_block(cidr_block, amazon_provided_ipv6_cidr_block)
class VPCPeeringConnectionStatus(object):
def __init__(self, code='initiating-request', message=''):
self.code = code
self.message = message
def deleted(self):
self.code = 'deleted'
self.message = 'Deleted by {deleter ID}'
def initiating(self):
self.code = 'initiating-request'
self.message = 'Initiating Request to {accepter ID}'
def pending(self):
self.code = 'pending-acceptance'
self.message = 'Pending Acceptance by {accepter ID}'
def accept(self):
self.code = 'active'
self.message = 'Active'
def reject(self):
self.code = 'rejected'
self.message = 'Inactive'
class VPCPeeringConnection(TaggedEC2Resource):
def __init__(self, vpc_pcx_id, vpc, peer_vpc):
self.id = vpc_pcx_id
self.vpc = vpc
self.peer_vpc = peer_vpc
self._status = VPCPeeringConnectionStatus()
@classmethod
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
properties = cloudformation_json['Properties']
ec2_backend = ec2_backends[region_name]
vpc = ec2_backend.get_vpc(properties['VpcId'])
peer_vpc = ec2_backend.get_vpc(properties['PeerVpcId'])
vpc_pcx = ec2_backend.create_vpc_peering_connection(vpc, peer_vpc)
return vpc_pcx
@property
def physical_resource_id(self):
return self.id
class VPCPeeringConnectionBackend(object):
def __init__(self):
self.vpc_pcxs = {}
super(VPCPeeringConnectionBackend, self).__init__()
def create_vpc_peering_connection(self, vpc, peer_vpc):
vpc_pcx_id = random_vpc_peering_connection_id()
vpc_pcx = VPCPeeringConnection(vpc_pcx_id, vpc, peer_vpc)
vpc_pcx._status.pending()
self.vpc_pcxs[vpc_pcx_id] = vpc_pcx
return vpc_pcx
def get_all_vpc_peering_connections(self):
return self.vpc_pcxs.values()
def get_vpc_peering_connection(self, vpc_pcx_id):
if vpc_pcx_id not in self.vpc_pcxs:
raise InvalidVPCPeeringConnectionIdError(vpc_pcx_id)
return self.vpc_pcxs.get(vpc_pcx_id)
def delete_vpc_peering_connection(self, vpc_pcx_id):
deleted = self.get_vpc_peering_connection(vpc_pcx_id)
deleted._status.deleted()
return deleted
def accept_vpc_peering_connection(self, vpc_pcx_id):
vpc_pcx = self.get_vpc_peering_connection(vpc_pcx_id)
if vpc_pcx._status.code != 'pending-acceptance':
raise InvalidVPCPeeringConnectionStateTransitionError(vpc_pcx.id)
vpc_pcx._status.accept()
return vpc_pcx
def reject_vpc_peering_connection(self, vpc_pcx_id):
vpc_pcx = self.get_vpc_peering_connection(vpc_pcx_id)
if vpc_pcx._status.code != 'pending-acceptance':
raise InvalidVPCPeeringConnectionStateTransitionError(vpc_pcx.id)
vpc_pcx._status.reject()
return vpc_pcx
class Subnet(TaggedEC2Resource):
def __init__(self, ec2_backend, subnet_id, vpc_id, cidr_block, availability_zone, default_for_az,
map_public_ip_on_launch):
self.ec2_backend = ec2_backend
self.id = subnet_id
self.vpc_id = vpc_id
self.cidr_block = cidr_block
self.cidr = ipaddress.ip_network(six.text_type(self.cidr_block))
self._availability_zone = availability_zone
self.default_for_az = default_for_az
self.map_public_ip_on_launch = map_public_ip_on_launch
# Theory is we assign ip's as we go (as 16,777,214 usable IPs in a /8)
self._subnet_ip_generator = self.cidr.hosts()
self.reserved_ips = [six.next(self._subnet_ip_generator) for _ in range(0, 3)] # Reserved by AWS
self._unused_ips = set() # if instance is destroyed hold IP here for reuse
self._subnet_ips = {} # has IP: instance
@classmethod
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
properties = cloudformation_json['Properties']
vpc_id = properties['VpcId']
cidr_block = properties['CidrBlock']
availability_zone = properties.get('AvailabilityZone')
ec2_backend = ec2_backends[region_name]
subnet = ec2_backend.create_subnet(
vpc_id=vpc_id,
cidr_block=cidr_block,
availability_zone=availability_zone,
)
for tag in properties.get("Tags", []):
tag_key = tag["Key"]
tag_value = tag["Value"]
subnet.add_tag(tag_key, tag_value)
return subnet
@property
def availability_zone(self):
return self._availability_zone
@property
def physical_resource_id(self):
return self.id
def get_filter_value(self, filter_name):
"""
API Version 2014-10-01 defines the following filters for DescribeSubnets:
* availabilityZone
* available-ip-address-count
* cidrBlock
* defaultForAz
* state
* subnet-id
* tag:key=value
* tag-key
* tag-value
* vpc-id
Taken from: http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSubnets.html
"""
if filter_name in ('cidr', 'cidrBlock', 'cidr-block'):
return self.cidr_block
elif filter_name in ('vpc-id', 'vpcId'):
return self.vpc_id
elif filter_name == 'subnet-id':
return self.id
elif filter_name in ('availabilityZone', 'availability-zone'):
return self.availability_zone
elif filter_name in ('defaultForAz', 'default-for-az'):
return self.default_for_az
else:
return super(Subnet, self).get_filter_value(
filter_name, 'DescribeSubnets')
def get_cfn_attribute(self, attribute_name):
from moto.cloudformation.exceptions import UnformattedGetAttTemplateException
if attribute_name == 'AvailabilityZone':
raise NotImplementedError(
'"Fn::GetAtt" : [ "{0}" , "AvailabilityZone" ]"')
raise UnformattedGetAttTemplateException()
def get_available_subnet_ip(self, instance):
try:
new_ip = self._unused_ips.pop()
except KeyError:
new_ip = six.next(self._subnet_ip_generator)
# Skips any IP's if they've been manually specified
while str(new_ip) in self._subnet_ips:
new_ip = six.next(self._subnet_ip_generator)
if new_ip == self.cidr.broadcast_address:
raise StopIteration() # Broadcast address cant be used obviously
# TODO StopIteration will be raised if no ip's available, not sure how aws handles this.
new_ip = str(new_ip)
self._subnet_ips[new_ip] = instance
return new_ip
def request_ip(self, ip, instance):
if ipaddress.ip_address(ip) not in self.cidr:
raise Exception('IP does not fall in the subnet CIDR of {0}'.format(self.cidr))
if ip in self._subnet_ips:
raise Exception('IP already in use')
try:
self._unused_ips.remove(ip)
except KeyError:
pass
self._subnet_ips[ip] = instance
return ip
def del_subnet_ip(self, ip):
try:
del self._subnet_ips[ip]
self._unused_ips.add(ip)
except KeyError:
pass # Unknown IP
class SubnetBackend(object):
def __init__(self):
# maps availability zone to dict of (subnet_id, subnet)
self.subnets = defaultdict(dict)
super(SubnetBackend, self).__init__()
def get_subnet(self, subnet_id):
for subnets in self.subnets.values():
if subnet_id in subnets:
return subnets[subnet_id]
raise InvalidSubnetIdError(subnet_id)
def create_subnet(self, vpc_id, cidr_block, availability_zone):
subnet_id = random_subnet_id()
self.get_vpc(vpc_id) # Validate VPC exists
# if this is the first subnet for an availability zone,
# consider it the default
default_for_az = str(availability_zone not in self.subnets).lower()
map_public_ip_on_launch = default_for_az
subnet = Subnet(self, subnet_id, vpc_id, cidr_block, availability_zone,
default_for_az, map_public_ip_on_launch)
# AWS associates a new subnet with the default Network ACL
self.associate_default_network_acl_with_subnet(subnet_id)
self.subnets[availability_zone][subnet_id] = subnet
return subnet
def get_all_subnets(self, subnet_ids=None, filters=None):
# Extract a list of all subnets
matches = itertools.chain(*[x.values()
for x in self.subnets.values()])
if subnet_ids:
matches = [sn for sn in matches
if sn.id in subnet_ids]
if len(subnet_ids) > len(matches):
unknown_ids = set(subnet_ids) - set(matches)
raise InvalidSubnetIdError(unknown_ids)
if filters:
matches = generic_filter(filters, matches)
return matches
def delete_subnet(self, subnet_id):
for subnets in self.subnets.values():
if subnet_id in subnets:
return subnets.pop(subnet_id, None)
raise InvalidSubnetIdError(subnet_id)
def modify_subnet_attribute(self, subnet_id, map_public_ip):
subnet = self.get_subnet(subnet_id)
if map_public_ip not in ('true', 'false'):
raise InvalidParameterValueError(map_public_ip)
subnet.map_public_ip_on_launch = map_public_ip
class SubnetRouteTableAssociation(object):
def __init__(self, route_table_id, subnet_id):
self.route_table_id = route_table_id
self.subnet_id = subnet_id
@classmethod
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
properties = cloudformation_json['Properties']
route_table_id = properties['RouteTableId']
subnet_id = properties['SubnetId']
ec2_backend = ec2_backends[region_name]
subnet_association = ec2_backend.create_subnet_association(
route_table_id=route_table_id,
subnet_id=subnet_id,
)
return subnet_association
class SubnetRouteTableAssociationBackend(object):
def __init__(self):
self.subnet_associations = {}
super(SubnetRouteTableAssociationBackend, self).__init__()
def create_subnet_association(self, route_table_id, subnet_id):
subnet_association = SubnetRouteTableAssociation(
route_table_id, subnet_id)
self.subnet_associations["{0}:{1}".format(
route_table_id, subnet_id)] = subnet_association
return subnet_association
class RouteTable(TaggedEC2Resource):
def __init__(self, ec2_backend, route_table_id, vpc_id, main=False):
self.ec2_backend = ec2_backend
self.id = route_table_id
self.vpc_id = vpc_id
self.main = main
self.associations = {}
self.routes = {}
@classmethod
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
properties = cloudformation_json['Properties']
vpc_id = properties['VpcId']
ec2_backend = ec2_backends[region_name]
route_table = ec2_backend.create_route_table(
vpc_id=vpc_id,
)
return route_table
@property
def physical_resource_id(self):
return self.id
def get_filter_value(self, filter_name):
if filter_name == "association.main":
# Note: Boto only supports 'true'.
# https://github.com/boto/boto/issues/1742
if self.main:
return 'true'
else:
return 'false'
elif filter_name == "route-table-id":
return self.id
elif filter_name == "vpc-id":
return self.vpc_id
elif filter_name == "association.route-table-id":
return self.id
elif filter_name == "association.route-table-association-id":
return self.associations.keys()
elif filter_name == "association.subnet-id":
return self.associations.values()
else:
return super(RouteTable, self).get_filter_value(
filter_name, 'DescribeRouteTables')
class RouteTableBackend(object):
def __init__(self):
self.route_tables = {}
super(RouteTableBackend, self).__init__()
def create_route_table(self, vpc_id, main=False):
route_table_id = random_route_table_id()
vpc = self.get_vpc(vpc_id) # Validate VPC exists
route_table = RouteTable(self, route_table_id, vpc_id, main=main)
self.route_tables[route_table_id] = route_table
# AWS creates a default local route.
self.create_route(route_table_id, vpc.cidr_block, local=True)
return route_table
def get_route_table(self, route_table_id):
route_table = self.route_tables.get(route_table_id, None)
if not route_table:
raise InvalidRouteTableIdError(route_table_id)
return route_table
def get_all_route_tables(self, route_table_ids=None, filters=None):
route_tables = self.route_tables.values()
if route_table_ids:
route_tables = [
route_table for route_table in route_tables if route_table.id in route_table_ids]
if len(route_tables) != len(route_table_ids):
invalid_id = list(set(route_table_ids).difference(
set([route_table.id for route_table in route_tables])))[0]
raise InvalidRouteTableIdError(invalid_id)
return generic_filter(filters, route_tables)
def delete_route_table(self, route_table_id):
route_table = self.get_route_table(route_table_id)
if route_table.associations:
raise DependencyViolationError(
"The routeTable '{0}' has dependencies and cannot be deleted.".format(route_table_id)
)
self.route_tables.pop(route_table_id)
return True
def associate_route_table(self, route_table_id, subnet_id):
# Idempotent if association already exists.
route_tables_by_subnet = self.get_all_route_tables(
filters={'association.subnet-id': [subnet_id]})
if route_tables_by_subnet:
for association_id, check_subnet_id in route_tables_by_subnet[0].associations.items():
if subnet_id == check_subnet_id:
return association_id
# Association does not yet exist, so create it.
route_table = self.get_route_table(route_table_id)
self.get_subnet(subnet_id) # Validate subnet exists
association_id = random_subnet_association_id()
route_table.associations[association_id] = subnet_id
return association_id
def disassociate_route_table(self, association_id):
for route_table in self.route_tables.values():
if association_id in route_table.associations:
return route_table.associations.pop(association_id, None)
raise InvalidAssociationIdError(association_id)
def replace_route_table_association(self, association_id, route_table_id):
# Idempotent if association already exists.
new_route_table = self.get_route_table(route_table_id)
if association_id in new_route_table.associations:
return association_id
# Find route table which currently has the association, error if none.
route_tables_by_association_id = self.get_all_route_tables(
filters={'association.route-table-association-id': [association_id]})
if not route_tables_by_association_id:
raise InvalidAssociationIdError(association_id)
# Remove existing association, create new one.
previous_route_table = route_tables_by_association_id[0]
subnet_id = previous_route_table.associations.pop(association_id, None)
return self.associate_route_table(route_table_id, subnet_id)
class Route(object):
def __init__(self, route_table, destination_cidr_block, local=False,
gateway=None, instance=None, interface=None, vpc_pcx=None):
self.id = generate_route_id(route_table.id, destination_cidr_block)
self.route_table = route_table
self.destination_cidr_block = destination_cidr_block
self.local = local
self.gateway = gateway
self.instance = instance
self.interface = interface
self.vpc_pcx = vpc_pcx
@classmethod
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
properties = cloudformation_json['Properties']
gateway_id = properties.get('GatewayId')
instance_id = properties.get('InstanceId')
interface_id = properties.get('NetworkInterfaceId')
pcx_id = properties.get('VpcPeeringConnectionId')
route_table_id = properties['RouteTableId']
ec2_backend = ec2_backends[region_name]
route_table = ec2_backend.create_route(
route_table_id=route_table_id,
destination_cidr_block=properties.get('DestinationCidrBlock'),
gateway_id=gateway_id,
instance_id=instance_id,
interface_id=interface_id,
vpc_peering_connection_id=pcx_id,
)
return route_table
class RouteBackend(object):
def __init__(self):
super(RouteBackend, self).__init__()
def create_route(self, route_table_id, destination_cidr_block, local=False,
gateway_id=None, instance_id=None, interface_id=None,
vpc_peering_connection_id=None):
route_table = self.get_route_table(route_table_id)
if interface_id:
self.raise_not_implemented_error(
"CreateRoute to NetworkInterfaceId")
gateway = None
if gateway_id:
if EC2_RESOURCE_TO_PREFIX['vpn-gateway'] in gateway_id:
gateway = self.get_vpn_gateway(gateway_id)
elif EC2_RESOURCE_TO_PREFIX['internet-gateway'] in gateway_id:
gateway = self.get_internet_gateway(gateway_id)
route = Route(route_table, destination_cidr_block, local=local,
gateway=gateway,
instance=self.get_instance(
instance_id) if instance_id else None,
interface=None,
vpc_pcx=self.get_vpc_peering_connection(
vpc_peering_connection_id) if vpc_peering_connection_id else None)
route_table.routes[route.id] = route
return route
def replace_route(self, route_table_id, destination_cidr_block,
gateway_id=None, instance_id=None, interface_id=None,
vpc_peering_connection_id=None):
route_table = self.get_route_table(route_table_id)
route_id = generate_route_id(route_table.id, destination_cidr_block)
route = route_table.routes[route_id]
if interface_id:
self.raise_not_implemented_error(
"ReplaceRoute to NetworkInterfaceId")
route.gateway = None
if gateway_id:
if EC2_RESOURCE_TO_PREFIX['vpn-gateway'] in gateway_id:
route.gateway = self.get_vpn_gateway(gateway_id)
elif EC2_RESOURCE_TO_PREFIX['internet-gateway'] in gateway_id:
route.gateway = self.get_internet_gateway(gateway_id)
route.instance = self.get_instance(
instance_id) if instance_id else None
route.interface = None
route.vpc_pcx = self.get_vpc_peering_connection(
vpc_peering_connection_id) if vpc_peering_connection_id else None
route_table.routes[route.id] = route
return route
def get_route(self, route_id):
route_table_id, destination_cidr_block = split_route_id(route_id)
route_table = self.get_route_table(route_table_id)
return route_table.get(route_id)
def delete_route(self, route_table_id, destination_cidr_block):
route_table = self.get_route_table(route_table_id)
route_id = generate_route_id(route_table_id, destination_cidr_block)
deleted = route_table.routes.pop(route_id, None)
if not deleted:
raise InvalidRouteError(route_table_id, destination_cidr_block)
return deleted
class InternetGateway(TaggedEC2Resource):
def __init__(self, ec2_backend):
self.ec2_backend = ec2_backend
self.id = random_internet_gateway_id()
self.vpc = None
@classmethod
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
ec2_backend = ec2_backends[region_name]
return ec2_backend.create_internet_gateway()
@property
def physical_resource_id(self):
return self.id
@property
def attachment_state(self):
if self.vpc:
return "available"
else:
return "detached"
class InternetGatewayBackend(object):
def __init__(self):
self.internet_gateways = {}
super(InternetGatewayBackend, self).__init__()
def create_internet_gateway(self):
igw = InternetGateway(self)
self.internet_gateways[igw.id] = igw
return igw
def describe_internet_gateways(self, internet_gateway_ids=None, filters=None):
igws = []
if internet_gateway_ids is None:
igws = self.internet_gateways.values()
else:
for igw_id in internet_gateway_ids:
if igw_id in self.internet_gateways:
igws.append(self.internet_gateways[igw_id])
else:
raise InvalidInternetGatewayIdError(igw_id)
if filters is not None:
igws = filter_internet_gateways(igws, filters)
return igws
def delete_internet_gateway(self, internet_gateway_id):
igw = self.get_internet_gateway(internet_gateway_id)
if igw.vpc:
raise DependencyViolationError(
"{0} is being utilized by {1}".format(internet_gateway_id, igw.vpc.id)
)
self.internet_gateways.pop(internet_gateway_id)
return True
def detach_internet_gateway(self, internet_gateway_id, vpc_id):
igw = self.get_internet_gateway(internet_gateway_id)
if not igw.vpc or igw.vpc.id != vpc_id:
raise GatewayNotAttachedError(internet_gateway_id, vpc_id)
igw.vpc = None
return True
def attach_internet_gateway(self, internet_gateway_id, vpc_id):
igw = self.get_internet_gateway(internet_gateway_id)
if igw.vpc:
raise ResourceAlreadyAssociatedError(internet_gateway_id)
vpc = self.get_vpc(vpc_id)
igw.vpc = vpc
return True
def get_internet_gateway(self, internet_gateway_id):
igw_ids = [internet_gateway_id]
return self.describe_internet_gateways(internet_gateway_ids=igw_ids)[0]
class VPCGatewayAttachment(BaseModel):
def __init__(self, gateway_id, vpc_id):
self.gateway_id = gateway_id
self.vpc_id = vpc_id
@classmethod
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
properties = cloudformation_json['Properties']
ec2_backend = ec2_backends[region_name]
attachment = ec2_backend.create_vpc_gateway_attachment(
gateway_id=properties['InternetGatewayId'],
vpc_id=properties['VpcId'],
)
ec2_backend.attach_internet_gateway(
properties['InternetGatewayId'], properties['VpcId'])
return attachment
@property
def physical_resource_id(self):
return self.vpc_id
class VPCGatewayAttachmentBackend(object):
def __init__(self):
self.gateway_attachments = {}
super(VPCGatewayAttachmentBackend, self).__init__()
def create_vpc_gateway_attachment(self, vpc_id, gateway_id):
attachment = VPCGatewayAttachment(vpc_id, gateway_id)
self.gateway_attachments[gateway_id] = attachment
return attachment
class SpotInstanceRequest(BotoSpotRequest, TaggedEC2Resource):
def __init__(self, ec2_backend, spot_request_id, price, image_id, type,
valid_from, valid_until, launch_group, availability_zone_group,
key_name, security_groups, user_data, instance_type, placement,
kernel_id, ramdisk_id, monitoring_enabled, subnet_id, spot_fleet_id,
**kwargs):
super(SpotInstanceRequest, self).__init__(**kwargs)
ls = LaunchSpecification()
self.ec2_backend = ec2_backend
self.launch_specification = ls
self.id = spot_request_id
self.state = "open"
self.price = price
self.type = type
self.valid_from = valid_from
self.valid_until = valid_until
self.launch_group = launch_group
self.availability_zone_group = availability_zone_group
self.user_data = user_data # NOT
ls.kernel = kernel_id
ls.ramdisk = ramdisk_id
ls.image_id = image_id
ls.key_name = key_name
ls.instance_type = instance_type
ls.placement = placement
ls.monitored = monitoring_enabled
ls.subnet_id = subnet_id
self.spot_fleet_id = spot_fleet_id
if security_groups:
for group_name in security_groups:
group = self.ec2_backend.get_security_group_from_name(
group_name)
if group:
ls.groups.append(group)
else:
# If not security groups, add the default
default_group = self.ec2_backend.get_security_group_from_name(
"default")
ls.groups.append(default_group)
self.instance = self.launch_instance()
def get_filter_value(self, filter_name):
if filter_name == 'state':
return self.state
elif filter_name == 'spot-instance-request-id':
return self.id
else:
return super(SpotInstanceRequest, self).get_filter_value(
filter_name, 'DescribeSpotInstanceRequests')
def launch_instance(self):
reservation = self.ec2_backend.add_instances(
image_id=self.launch_specification.image_id, count=1, user_data=self.user_data,
instance_type=self.launch_specification.instance_type,
subnet_id=self.launch_specification.subnet_id,
key_name=self.launch_specification.key_name,
security_group_names=[],
security_group_ids=self.launch_specification.groups,
spot_fleet_id=self.spot_fleet_id,
)
instance = reservation.instances[0]
return instance
@six.add_metaclass(Model)
class SpotRequestBackend(object):
def __init__(self):
self.spot_instance_requests = {}
super(SpotRequestBackend, self).__init__()
def request_spot_instances(self, price, image_id, count, type, valid_from,
valid_until, launch_group, availability_zone_group,
key_name, security_groups, user_data,
instance_type, placement, kernel_id, ramdisk_id,
monitoring_enabled, subnet_id, spot_fleet_id=None):
requests = []
for _ in range(count):
spot_request_id = random_spot_request_id()
request = SpotInstanceRequest(self,
spot_request_id, price, image_id, type, valid_from, valid_until,
launch_group, availability_zone_group, key_name, security_groups,
user_data, instance_type, placement, kernel_id, ramdisk_id,
monitoring_enabled, subnet_id, spot_fleet_id)
self.spot_instance_requests[spot_request_id] = request
requests.append(request)
return requests
@Model.prop('SpotInstanceRequest')
def describe_spot_instance_requests(self, filters=None):
requests = self.spot_instance_requests.values()
return generic_filter(filters, requests)
def cancel_spot_instance_requests(self, request_ids):
requests = []
for request_id in request_ids:
requests.append(self.spot_instance_requests.pop(request_id))
return requests
class SpotFleetLaunchSpec(object):
def __init__(self, ebs_optimized, group_set, iam_instance_profile, image_id,
instance_type, key_name, monitoring, spot_price, subnet_id, user_data,
weighted_capacity):
self.ebs_optimized = ebs_optimized
self.group_set = group_set
self.iam_instance_profile = iam_instance_profile
self.image_id = image_id
self.instance_type = instance_type
self.key_name = key_name
self.monitoring = monitoring
self.spot_price = spot_price
self.subnet_id = subnet_id
self.user_data = user_data
self.weighted_capacity = float(weighted_capacity)
class SpotFleetRequest(TaggedEC2Resource):
def __init__(self, ec2_backend, spot_fleet_request_id, spot_price,
target_capacity, iam_fleet_role, allocation_strategy, launch_specs):
self.ec2_backend = ec2_backend
self.id = spot_fleet_request_id
self.spot_price = spot_price
self.target_capacity = int(target_capacity)
self.iam_fleet_role = iam_fleet_role
self.allocation_strategy = allocation_strategy
self.state = "active"
self.fulfilled_capacity = 0.0
self.launch_specs = []
for spec in launch_specs:
self.launch_specs.append(SpotFleetLaunchSpec(
ebs_optimized=spec['ebs_optimized'],
group_set=[val for key, val in spec.items(
) if key.startswith("group_set")],
iam_instance_profile=spec.get('iam_instance_profile._arn'),
image_id=spec['image_id'],
instance_type=spec['instance_type'],
key_name=spec.get('key_name'),
monitoring=spec.get('monitoring._enabled'),
spot_price=spec.get('spot_price', self.spot_price),
subnet_id=spec['subnet_id'],
user_data=spec.get('user_data'),
weighted_capacity=spec['weighted_capacity'],
)
)
self.spot_requests = []
self.create_spot_requests(self.target_capacity)
@property
def physical_resource_id(self):
return self.id
@classmethod
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
properties = cloudformation_json[
'Properties']['SpotFleetRequestConfigData']
ec2_backend = ec2_backends[region_name]
spot_price = properties.get('SpotPrice')
target_capacity = properties['TargetCapacity']
iam_fleet_role = properties['IamFleetRole']
allocation_strategy = properties['AllocationStrategy']
launch_specs = properties["LaunchSpecifications"]
launch_specs = [
dict([(camelcase_to_underscores(key), val)
for key, val in launch_spec.items()])
for launch_spec
in launch_specs
]
spot_fleet_request = ec2_backend.request_spot_fleet(spot_price,
target_capacity, iam_fleet_role, allocation_strategy,
launch_specs)
return spot_fleet_request
def get_launch_spec_counts(self, weight_to_add):
weight_map = defaultdict(int)
weight_so_far = 0
if self.allocation_strategy == 'diversified':
launch_spec_index = 0
while True:
launch_spec = self.launch_specs[
launch_spec_index % len(self.launch_specs)]
weight_map[launch_spec] += 1
weight_so_far += launch_spec.weighted_capacity
if weight_so_far >= weight_to_add:
break
launch_spec_index += 1
else: # lowestPrice
cheapest_spec = sorted(
# FIXME: change `+inf` to the on demand price scaled to weighted capacity when it's not present
self.launch_specs, key=lambda spec: float(spec.spot_price or '+inf'))[0]
weight_so_far = weight_to_add + (weight_to_add % cheapest_spec.weighted_capacity)
weight_map[cheapest_spec] = int(
weight_so_far // cheapest_spec.weighted_capacity)
return weight_map, weight_so_far
def create_spot_requests(self, weight_to_add):
weight_map, added_weight = self.get_launch_spec_counts(weight_to_add)
for launch_spec, count in weight_map.items():
requests = self.ec2_backend.request_spot_instances(
price=launch_spec.spot_price,
image_id=launch_spec.image_id,
count=count,
type="persistent",
valid_from=None,
valid_until=None,
launch_group=None,
availability_zone_group=None,
key_name=launch_spec.key_name,
security_groups=launch_spec.group_set,
user_data=launch_spec.user_data,
instance_type=launch_spec.instance_type,
placement=None,
kernel_id=None,
ramdisk_id=None,
monitoring_enabled=launch_spec.monitoring,
subnet_id=launch_spec.subnet_id,
spot_fleet_id=self.id,
)
self.spot_requests.extend(requests)
self.fulfilled_capacity += added_weight
return self.spot_requests
def terminate_instances(self):
instance_ids = []
new_fulfilled_capacity = self.fulfilled_capacity
for req in self.spot_requests:
instance = req.instance
for spec in self.launch_specs:
if spec.instance_type == instance.instance_type and spec.subnet_id == instance.subnet_id:
break
if new_fulfilled_capacity - spec.weighted_capacity < self.target_capacity:
continue
new_fulfilled_capacity -= spec.weighted_capacity
instance_ids.append(instance.id)
self.spot_requests = [req for req in self.spot_requests if req.instance.id not in instance_ids]
self.ec2_backend.terminate_instances(instance_ids)
class SpotFleetBackend(object):
def __init__(self):
self.spot_fleet_requests = {}
super(SpotFleetBackend, self).__init__()
def request_spot_fleet(self, spot_price, target_capacity, iam_fleet_role,
allocation_strategy, launch_specs):
spot_fleet_request_id = random_spot_fleet_request_id()
request = SpotFleetRequest(self, spot_fleet_request_id, spot_price,
target_capacity, iam_fleet_role, allocation_strategy, launch_specs)
self.spot_fleet_requests[spot_fleet_request_id] = request
return request
def get_spot_fleet_request(self, spot_fleet_request_id):
return self.spot_fleet_requests[spot_fleet_request_id]
def describe_spot_fleet_instances(self, spot_fleet_request_id):
spot_fleet = self.get_spot_fleet_request(spot_fleet_request_id)
return spot_fleet.spot_requests
def describe_spot_fleet_requests(self, spot_fleet_request_ids):
requests = self.spot_fleet_requests.values()
if spot_fleet_request_ids:
requests = [
request for request in requests if request.id in spot_fleet_request_ids]
return requests
def cancel_spot_fleet_requests(self, spot_fleet_request_ids, terminate_instances):
spot_requests = []
for spot_fleet_request_id in spot_fleet_request_ids:
spot_fleet = self.spot_fleet_requests[spot_fleet_request_id]
if terminate_instances:
spot_fleet.target_capacity = 0
spot_fleet.terminate_instances()
spot_requests.append(spot_fleet)
del self.spot_fleet_requests[spot_fleet_request_id]
return spot_requests
def modify_spot_fleet_request(self, spot_fleet_request_id, target_capacity, terminate_instances):
if target_capacity < 0:
raise ValueError('Cannot reduce spot fleet capacity below 0')
spot_fleet_request = self.spot_fleet_requests[spot_fleet_request_id]
delta = target_capacity - spot_fleet_request.fulfilled_capacity
spot_fleet_request.target_capacity = target_capacity
if delta > 0:
spot_fleet_request.create_spot_requests(delta)
elif delta < 0 and terminate_instances == 'Default':
spot_fleet_request.terminate_instances()
return True
class ElasticAddress(object):
def __init__(self, domain, address=None):
if address:
self.public_ip = address
else:
self.public_ip = random_ip()
self.allocation_id = random_eip_allocation_id() if domain == "vpc" else None
self.domain = domain
self.instance = None
self.eni = None
self.association_id = None
@classmethod
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
ec2_backend = ec2_backends[region_name]
properties = cloudformation_json.get('Properties')
instance_id = None
if properties:
domain = properties.get('Domain')
eip = ec2_backend.allocate_address(
domain=domain if domain else 'standard')
instance_id = properties.get('InstanceId')
else:
eip = ec2_backend.allocate_address(domain='standard')
if instance_id:
instance = ec2_backend.get_instance_by_id(instance_id)
ec2_backend.associate_address(instance, address=eip.public_ip)
return eip
@property
def physical_resource_id(self):
return self.public_ip
def get_cfn_attribute(self, attribute_name):
from moto.cloudformation.exceptions import UnformattedGetAttTemplateException
if attribute_name == 'AllocationId':
return self.allocation_id
raise UnformattedGetAttTemplateException()
def get_filter_value(self, filter_name):
if filter_name == 'allocation-id':
return self.allocation_id
elif filter_name == 'association-id':
return self.association_id
elif filter_name == 'domain':
return self.domain
elif filter_name == 'instance-id' and self.instance:
return self.instance.id
elif filter_name == 'network-interface-id' and self.eni:
return self.eni.id
elif filter_name == 'private-ip-address' and self.eni:
return self.eni.private_ip_address
elif filter_name == 'public-ip':
return self.public_ip
else:
# TODO: implement network-interface-owner-id
raise FilterNotImplementedError(filter_name, 'DescribeAddresses')
class ElasticAddressBackend(object):
def __init__(self):
self.addresses = []
super(ElasticAddressBackend, self).__init__()
def allocate_address(self, domain, address=None):
if domain not in ['standard', 'vpc']:
raise InvalidDomainError(domain)
if address:
address = ElasticAddress(domain, address)
else:
address = ElasticAddress(domain)
self.addresses.append(address)
return address
def address_by_ip(self, ips):
eips = [address for address in self.addresses
if address.public_ip in ips]
# TODO: Trim error message down to specific invalid address.
if not eips or len(ips) > len(eips):
raise InvalidAddressError(ips)
return eips
def address_by_allocation(self, allocation_ids):
eips = [address for address in self.addresses
if address.allocation_id in allocation_ids]
# TODO: Trim error message down to specific invalid id.
if not eips or len(allocation_ids) > len(eips):
raise InvalidAllocationIdError(allocation_ids)
return eips
def address_by_association(self, association_ids):
eips = [address for address in self.addresses
if address.association_id in association_ids]
# TODO: Trim error message down to specific invalid id.
if not eips or len(association_ids) > len(eips):
raise InvalidAssociationIdError(association_ids)
return eips
def associate_address(self, instance=None, eni=None, address=None, allocation_id=None, reassociate=False):
eips = []
if address:
eips = self.address_by_ip([address])
elif allocation_id:
eips = self.address_by_allocation([allocation_id])
eip = eips[0]
new_instance_association = bool(instance and (
not eip.instance or eip.instance.id == instance.id))
new_eni_association = bool(
eni and (not eip.eni or eni.id == eip.eni.id))
if new_instance_association or new_eni_association or reassociate:
eip.instance = instance
eip.eni = eni
if not eip.eni and instance:
# default to primary network interface
eip.eni = instance.nics[0]
if eip.eni:
eip.eni.public_ip = eip.public_ip
if eip.domain == "vpc":
eip.association_id = random_eip_association_id()
return eip
raise ResourceAlreadyAssociatedError(eip.public_ip)
def describe_addresses(self, allocation_ids=None, public_ips=None, filters=None):
matches = self.addresses
if allocation_ids:
matches = [addr for addr in matches
if addr.allocation_id in allocation_ids]
if len(allocation_ids) > len(matches):
unknown_ids = set(allocation_ids) - set(matches)
raise InvalidAllocationIdError(unknown_ids)
if public_ips:
matches = [addr for addr in matches
if addr.public_ip in public_ips]
if len(public_ips) > len(matches):
unknown_ips = set(allocation_ids) - set(matches)
raise InvalidAddressError(unknown_ips)
if filters:
matches = generic_filter(filters, matches)
return matches
def disassociate_address(self, address=None, association_id=None):
eips = []
if address:
eips = self.address_by_ip([address])
elif association_id:
eips = self.address_by_association([association_id])
eip = eips[0]
if eip.eni:
eip.eni.public_ip = None
if eip.eni.instance and eip.eni.instance._state.name == "running":
eip.eni.check_auto_public_ip()
eip.eni = None
eip.instance = None
eip.association_id = None
return True
def release_address(self, address=None, allocation_id=None):
eips = []
if address:
eips = self.address_by_ip([address])
elif allocation_id:
eips = self.address_by_allocation([allocation_id])
eip = eips[0]
self.disassociate_address(address=eip.public_ip)
eip.allocation_id = None
self.addresses.remove(eip)
return True
class DHCPOptionsSet(TaggedEC2Resource):
def __init__(self, ec2_backend, domain_name_servers=None, domain_name=None,
ntp_servers=None, netbios_name_servers=None,
netbios_node_type=None):
self.ec2_backend = ec2_backend
self._options = {
"domain-name-servers": domain_name_servers,
"domain-name": domain_name,
"ntp-servers": ntp_servers,
"netbios-name-servers": netbios_name_servers,
"netbios-node-type": netbios_node_type,
}
self.id = random_dhcp_option_id()
self.vpc = None
def get_filter_value(self, filter_name):
"""
API Version 2015-10-01 defines the following filters for DescribeDhcpOptions:
* dhcp-options-id
* key
* value
* tag:key=value
* tag-key
* tag-value
Taken from: http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeDhcpOptions.html
"""
if filter_name == 'dhcp-options-id':
return self.id
elif filter_name == 'key':
return list(self._options.keys())
elif filter_name == 'value':
values = [item for item in list(self._options.values()) if item]
return itertools.chain(*values)
else:
return super(DHCPOptionsSet, self).get_filter_value(
filter_name, 'DescribeDhcpOptions')
@property
def options(self):
return self._options
class DHCPOptionsSetBackend(object):
def __init__(self):
self.dhcp_options_sets = {}
super(DHCPOptionsSetBackend, self).__init__()
def associate_dhcp_options(self, dhcp_options, vpc):
dhcp_options.vpc = vpc
vpc.dhcp_options = dhcp_options
def create_dhcp_options(
self, domain_name_servers=None, domain_name=None,
ntp_servers=None, netbios_name_servers=None,
netbios_node_type=None):
NETBIOS_NODE_TYPES = [1, 2, 4, 8]
for field_value in domain_name_servers, ntp_servers, netbios_name_servers:
if field_value and len(field_value) > 4:
raise InvalidParameterValueError(",".join(field_value))
if netbios_node_type and int(netbios_node_type[0]) not in NETBIOS_NODE_TYPES:
raise InvalidParameterValueError(netbios_node_type)
options = DHCPOptionsSet(
self, domain_name_servers, domain_name, ntp_servers,
netbios_name_servers, netbios_node_type
)
self.dhcp_options_sets[options.id] = options
return options
def describe_dhcp_options(self, options_ids=None):
options_sets = []
for option_id in options_ids or []:
if option_id in self.dhcp_options_sets:
options_sets.append(self.dhcp_options_sets[option_id])
else:
raise InvalidDHCPOptionsIdError(option_id)
return options_sets or self.dhcp_options_sets.values()
def delete_dhcp_options_set(self, options_id):
if not (options_id and options_id.startswith('dopt-')):
raise MalformedDHCPOptionsIdError(options_id)
if options_id in self.dhcp_options_sets:
if self.dhcp_options_sets[options_id].vpc:
raise DependencyViolationError(
"Cannot delete assigned DHCP options.")
self.dhcp_options_sets.pop(options_id)
else:
raise InvalidDHCPOptionsIdError(options_id)
return True
def get_all_dhcp_options(self, dhcp_options_ids=None, filters=None):
dhcp_options_sets = self.dhcp_options_sets.values()
if dhcp_options_ids:
dhcp_options_sets = [
dhcp_options_set for dhcp_options_set in dhcp_options_sets if dhcp_options_set.id in dhcp_options_ids]
if len(dhcp_options_sets) != len(dhcp_options_ids):
invalid_id = list(set(dhcp_options_ids).difference(
set([dhcp_options_set.id for dhcp_options_set in dhcp_options_sets])))[0]
raise InvalidDHCPOptionsIdError(invalid_id)
return generic_filter(filters, dhcp_options_sets)
class VPNConnection(TaggedEC2Resource):
def __init__(self, ec2_backend, id, type,
customer_gateway_id, vpn_gateway_id):
self.ec2_backend = ec2_backend
self.id = id
self.state = 'available'
self.customer_gateway_configuration = {}
self.type = type
self.customer_gateway_id = customer_gateway_id
self.vpn_gateway_id = vpn_gateway_id
self.tunnels = None
self.options = None
self.static_routes = None
def get_filter_value(self, filter_name):
return super(VPNConnection, self).get_filter_value(
filter_name, 'DescribeVpnConnections')
class VPNConnectionBackend(object):
def __init__(self):
self.vpn_connections = {}
super(VPNConnectionBackend, self).__init__()
def create_vpn_connection(self, type, customer_gateway_id,
vpn_gateway_id,
static_routes_only=None):
vpn_connection_id = random_vpn_connection_id()
if static_routes_only:
pass
vpn_connection = VPNConnection(
self, id=vpn_connection_id, type=type,
customer_gateway_id=customer_gateway_id,
vpn_gateway_id=vpn_gateway_id
)
self.vpn_connections[vpn_connection.id] = vpn_connection
return vpn_connection
def delete_vpn_connection(self, vpn_connection_id):
if vpn_connection_id in self.vpn_connections:
self.vpn_connections.pop(vpn_connection_id)
else:
raise InvalidVpnConnectionIdError(vpn_connection_id)
return True
def describe_vpn_connections(self, vpn_connection_ids=None):
vpn_connections = []
for vpn_connection_id in vpn_connection_ids or []:
if vpn_connection_id in self.vpn_connections:
vpn_connections.append(self.vpn_connections[vpn_connection_id])
else:
raise InvalidVpnConnectionIdError(vpn_connection_id)
return vpn_connections or self.vpn_connections.values()
def get_all_vpn_connections(self, vpn_connection_ids=None, filters=None):
vpn_connections = self.vpn_connections.values()
if vpn_connection_ids:
vpn_connections = [vpn_connection for vpn_connection in vpn_connections
if vpn_connection.id in vpn_connection_ids]
if len(vpn_connections) != len(vpn_connection_ids):
invalid_id = list(set(vpn_connection_ids).difference(
set([vpn_connection.id for vpn_connection in vpn_connections])))[0]
raise InvalidVpnConnectionIdError(invalid_id)
return generic_filter(filters, vpn_connections)
class NetworkAclBackend(object):
def __init__(self):
self.network_acls = {}
super(NetworkAclBackend, self).__init__()
def get_network_acl(self, network_acl_id):
network_acl = self.network_acls.get(network_acl_id, None)
if not network_acl:
raise InvalidNetworkAclIdError(network_acl_id)
return network_acl
def create_network_acl(self, vpc_id, default=False):
network_acl_id = random_network_acl_id()
self.get_vpc(vpc_id)
network_acl = NetworkAcl(self, network_acl_id, vpc_id, default)
self.network_acls[network_acl_id] = network_acl
return network_acl
def get_all_network_acls(self, network_acl_ids=None, filters=None):
network_acls = self.network_acls.values()
if network_acl_ids:
network_acls = [network_acl for network_acl in network_acls
if network_acl.id in network_acl_ids]
if len(network_acls) != len(network_acl_ids):
invalid_id = list(set(network_acl_ids).difference(
set([network_acl.id for network_acl in network_acls])))[0]
raise InvalidRouteTableIdError(invalid_id)
return generic_filter(filters, network_acls)
def delete_network_acl(self, network_acl_id):
deleted = self.network_acls.pop(network_acl_id, None)
if not deleted:
raise InvalidNetworkAclIdError(network_acl_id)
return deleted
def create_network_acl_entry(self, network_acl_id, rule_number,
protocol, rule_action, egress, cidr_block,
icmp_code, icmp_type, port_range_from,
port_range_to):
network_acl_entry = NetworkAclEntry(self, network_acl_id, rule_number,
protocol, rule_action, egress,
cidr_block, icmp_code, icmp_type,
port_range_from, port_range_to)
network_acl = self.get_network_acl(network_acl_id)
network_acl.network_acl_entries.append(network_acl_entry)
return network_acl_entry
def delete_network_acl_entry(self, network_acl_id, rule_number, egress):
network_acl = self.get_network_acl(network_acl_id)
entry = next(entry for entry in network_acl.network_acl_entries
if entry.egress == egress and entry.rule_number == rule_number)
if entry is not None:
network_acl.network_acl_entries.remove(entry)
return entry
def replace_network_acl_entry(self, network_acl_id, rule_number, protocol, rule_action, egress,
cidr_block, icmp_code, icmp_type, port_range_from, port_range_to):
self.delete_network_acl_entry(network_acl_id, rule_number, egress)
network_acl_entry = self.create_network_acl_entry(network_acl_id, rule_number,
protocol, rule_action, egress,
cidr_block, icmp_code, icmp_type,
port_range_from, port_range_to)
return network_acl_entry
def replace_network_acl_association(self, association_id,
network_acl_id):
# lookup existing association for subnet and delete it
default_acl = next(value for key, value in self.network_acls.items()
if association_id in value.associations.keys())
subnet_id = None
for key, value in default_acl.associations.items():
if key == association_id:
subnet_id = default_acl.associations[key].subnet_id
del default_acl.associations[key]
break
new_assoc_id = random_network_acl_subnet_association_id()
association = NetworkAclAssociation(self,
new_assoc_id,
subnet_id,
network_acl_id)
new_acl = self.get_network_acl(network_acl_id)
new_acl.associations[new_assoc_id] = association
return association
def associate_default_network_acl_with_subnet(self, subnet_id):
association_id = random_network_acl_subnet_association_id()
acl = next(acl for acl in self.network_acls.values() if acl.default)
acl.associations[association_id] = NetworkAclAssociation(self, association_id,
subnet_id, acl.id)
class NetworkAclAssociation(object):
def __init__(self, ec2_backend, new_association_id,
subnet_id, network_acl_id):
self.ec2_backend = ec2_backend
self.id = new_association_id
self.new_association_id = new_association_id
self.subnet_id = subnet_id
self.network_acl_id = network_acl_id
super(NetworkAclAssociation, self).__init__()
class NetworkAcl(TaggedEC2Resource):
def __init__(self, ec2_backend, network_acl_id, vpc_id, default=False):
self.ec2_backend = ec2_backend
self.id = network_acl_id
self.vpc_id = vpc_id
self.network_acl_entries = []
self.associations = {}
self.default = 'true' if default is True else 'false'
def get_filter_value(self, filter_name):
if filter_name == "default":
return self.default
elif filter_name == "vpc-id":
return self.vpc_id
elif filter_name == "association.network-acl-id":
return self.id
elif filter_name == "association.subnet-id":
return [assoc.subnet_id for assoc in self.associations.values()]
else:
return super(NetworkAcl, self).get_filter_value(
filter_name, 'DescribeNetworkAcls')
class NetworkAclEntry(TaggedEC2Resource):
def __init__(self, ec2_backend, network_acl_id, rule_number,
protocol, rule_action, egress, cidr_block,
icmp_code, icmp_type, port_range_from,
port_range_to):
self.ec2_backend = ec2_backend
self.network_acl_id = network_acl_id
self.rule_number = rule_number
self.protocol = protocol
self.rule_action = rule_action
self.egress = egress
self.cidr_block = cidr_block
self.icmp_code = icmp_code
self.icmp_type = icmp_type
self.port_range_from = port_range_from
self.port_range_to = port_range_to
class VpnGateway(TaggedEC2Resource):
def __init__(self, ec2_backend, id, type):
self.ec2_backend = ec2_backend
self.id = id
self.type = type
self.attachments = {}
super(VpnGateway, self).__init__()
def get_filter_value(self, filter_name):
return super(VpnGateway, self).get_filter_value(
filter_name, 'DescribeVpnGateways')
class VpnGatewayAttachment(object):
def __init__(self, vpc_id, state):
self.vpc_id = vpc_id
self.state = state
super(VpnGatewayAttachment, self).__init__()
class VpnGatewayBackend(object):
def __init__(self):
self.vpn_gateways = {}
super(VpnGatewayBackend, self).__init__()
def create_vpn_gateway(self, type='ipsec.1'):
vpn_gateway_id = random_vpn_gateway_id()
vpn_gateway = VpnGateway(self, vpn_gateway_id, type)
self.vpn_gateways[vpn_gateway_id] = vpn_gateway
return vpn_gateway
def get_all_vpn_gateways(self, filters=None):
vpn_gateways = self.vpn_gateways.values()
return generic_filter(filters, vpn_gateways)
def get_vpn_gateway(self, vpn_gateway_id):
vpn_gateway = self.vpn_gateways.get(vpn_gateway_id, None)
if not vpn_gateway:
raise InvalidVpnGatewayIdError(vpn_gateway_id)
return vpn_gateway
def attach_vpn_gateway(self, vpn_gateway_id, vpc_id):
vpn_gateway = self.get_vpn_gateway(vpn_gateway_id)
self.get_vpc(vpc_id)
attachment = VpnGatewayAttachment(vpc_id, state='attached')
vpn_gateway.attachments[vpc_id] = attachment
return attachment
def delete_vpn_gateway(self, vpn_gateway_id):
deleted = self.vpn_gateways.pop(vpn_gateway_id, None)
if not deleted:
raise InvalidVpnGatewayIdError(vpn_gateway_id)
return deleted
def detach_vpn_gateway(self, vpn_gateway_id, vpc_id):
vpn_gateway = self.get_vpn_gateway(vpn_gateway_id)
self.get_vpc(vpc_id)
detached = vpn_gateway.attachments.pop(vpc_id, None)
if not detached:
raise InvalidVPCIdError(vpc_id)
return detached
class CustomerGateway(TaggedEC2Resource):
def __init__(self, ec2_backend, id, type, ip_address, bgp_asn):
self.ec2_backend = ec2_backend
self.id = id
self.type = type
self.ip_address = ip_address
self.bgp_asn = bgp_asn
self.attachments = {}
super(CustomerGateway, self).__init__()
def get_filter_value(self, filter_name):
return super(CustomerGateway, self).get_filter_value(
filter_name, 'DescribeCustomerGateways')
class CustomerGatewayBackend(object):
def __init__(self):
self.customer_gateways = {}
super(CustomerGatewayBackend, self).__init__()
def create_customer_gateway(self, type='ipsec.1', ip_address=None, bgp_asn=None):
customer_gateway_id = random_customer_gateway_id()
customer_gateway = CustomerGateway(
self, customer_gateway_id, type, ip_address, bgp_asn)
self.customer_gateways[customer_gateway_id] = customer_gateway
return customer_gateway
def get_all_customer_gateways(self, filters=None):
customer_gateways = self.customer_gateways.values()
return generic_filter(filters, customer_gateways)
def get_customer_gateway(self, customer_gateway_id):
customer_gateway = self.customer_gateways.get(
customer_gateway_id, None)
if not customer_gateway:
raise InvalidCustomerGatewayIdError(customer_gateway_id)
return customer_gateway
def delete_customer_gateway(self, customer_gateway_id):
deleted = self.customer_gateways.pop(customer_gateway_id, None)
if not deleted:
raise InvalidCustomerGatewayIdError(customer_gateway_id)
return deleted
class NatGateway(object):
def __init__(self, backend, subnet_id, allocation_id):
# public properties
self.id = random_nat_gateway_id()
self.subnet_id = subnet_id
self.allocation_id = allocation_id
self.state = 'available'
self.private_ip = random_private_ip()
# protected properties
self._created_at = datetime.utcnow()
self._backend = backend
# NOTE: this is the core of NAT Gateways creation
self._eni = self._backend.create_network_interface(
backend.get_subnet(self.subnet_id), self.private_ip)
# associate allocation with ENI
self._backend.associate_address(
eni=self._eni, allocation_id=self.allocation_id)
@property
def vpc_id(self):
subnet = self._backend.get_subnet(self.subnet_id)
return subnet.vpc_id
@property
def create_time(self):
return iso_8601_datetime_with_milliseconds(self._created_at)
@property
def network_interface_id(self):
return self._eni.id
@property
def public_ip(self):
eips = self._backend.address_by_allocation([self.allocation_id])
return eips[0].public_ip
@classmethod
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
ec2_backend = ec2_backends[region_name]
nat_gateway = ec2_backend.create_nat_gateway(
cloudformation_json['Properties']['SubnetId'],
cloudformation_json['Properties']['AllocationId'],
)
return nat_gateway
class NatGatewayBackend(object):
def __init__(self):
self.nat_gateways = {}
super(NatGatewayBackend, self).__init__()
def get_all_nat_gateways(self, filters):
return self.nat_gateways.values()
def create_nat_gateway(self, subnet_id, allocation_id):
nat_gateway = NatGateway(self, subnet_id, allocation_id)
self.nat_gateways[nat_gateway.id] = nat_gateway
return nat_gateway
def delete_nat_gateway(self, nat_gateway_id):
return self.nat_gateways.pop(nat_gateway_id)
class EC2Backend(BaseBackend, InstanceBackend, TagBackend, EBSBackend,
RegionsAndZonesBackend, SecurityGroupBackend, AmiBackend,
VPCBackend, SubnetBackend, SubnetRouteTableAssociationBackend,
NetworkInterfaceBackend, VPNConnectionBackend,
VPCPeeringConnectionBackend,
RouteTableBackend, RouteBackend, InternetGatewayBackend,
VPCGatewayAttachmentBackend, SpotFleetBackend,
SpotRequestBackend, ElasticAddressBackend, KeyPairBackend,
DHCPOptionsSetBackend, NetworkAclBackend, VpnGatewayBackend,
CustomerGatewayBackend, NatGatewayBackend):
def __init__(self, region_name):
self.region_name = region_name
super(EC2Backend, self).__init__()
# Default VPC exists by default, which is the current behavior
# of EC2-VPC. See for detail:
#
# docs.aws.amazon.com/AmazonVPC/latest/UserGuide/default-vpc.html
#
if not self.vpcs:
vpc = self.create_vpc('172.31.0.0/16')
else:
# For now this is included for potential
# backward-compatibility issues
vpc = self.vpcs.values()[0]
# Create default subnet for each availability zone
ip, _ = vpc.cidr_block.split('/')
ip = ip.split('.')
ip[2] = 0
for zone in self.describe_availability_zones():
az_name = zone.name
cidr_block = '.'.join(str(i) for i in ip) + '/20'
self.create_subnet(vpc.id, cidr_block, availability_zone=az_name)
ip[2] += 16
def reset(self):
region_name = self.region_name
self.__dict__ = {}
self.__init__(region_name)
# Use this to generate a proper error template response when in a response
# handler.
def raise_error(self, code, message):
raise EC2ClientError(code, message)
def raise_not_implemented_error(self, blurb):
raise MotoNotImplementedError(blurb)
def do_resources_exist(self, resource_ids):
for resource_id in resource_ids:
resource_prefix = get_prefix(resource_id)
if resource_prefix == EC2_RESOURCE_TO_PREFIX['customer-gateway']:
self.get_customer_gateway(customer_gateway_id=resource_id)
elif resource_prefix == EC2_RESOURCE_TO_PREFIX['dhcp-options']:
self.describe_dhcp_options(options_ids=[resource_id])
elif resource_prefix == EC2_RESOURCE_TO_PREFIX['image']:
self.describe_images(ami_ids=[resource_id])
elif resource_prefix == EC2_RESOURCE_TO_PREFIX['instance']:
self.get_instance_by_id(instance_id=resource_id)
elif resource_prefix == EC2_RESOURCE_TO_PREFIX['internet-gateway']:
self.describe_internet_gateways(
internet_gateway_ids=[resource_id])
elif resource_prefix == EC2_RESOURCE_TO_PREFIX['network-acl']:
self.get_all_network_acls()
elif resource_prefix == EC2_RESOURCE_TO_PREFIX['network-interface']:
self.describe_network_interfaces(
filters={'network-interface-id': resource_id})
elif resource_prefix == EC2_RESOURCE_TO_PREFIX['reserved-instance']:
self.raise_not_implemented_error('DescribeReservedInstances')
elif resource_prefix == EC2_RESOURCE_TO_PREFIX['route-table']:
self.get_route_table(route_table_id=resource_id)
elif resource_prefix == EC2_RESOURCE_TO_PREFIX['security-group']:
self.describe_security_groups(group_ids=[resource_id])
elif resource_prefix == EC2_RESOURCE_TO_PREFIX['snapshot']:
self.get_snapshot(snapshot_id=resource_id)
elif resource_prefix == EC2_RESOURCE_TO_PREFIX['spot-instance-request']:
self.describe_spot_instance_requests(
filters={'spot-instance-request-id': resource_id})
elif resource_prefix == EC2_RESOURCE_TO_PREFIX['subnet']:
self.get_subnet(subnet_id=resource_id)
elif resource_prefix == EC2_RESOURCE_TO_PREFIX['volume']:
self.get_volume(volume_id=resource_id)
elif resource_prefix == EC2_RESOURCE_TO_PREFIX['vpc']:
self.get_vpc(vpc_id=resource_id)
elif resource_prefix == EC2_RESOURCE_TO_PREFIX['vpc-peering-connection']:
self.get_vpc_peering_connection(vpc_pcx_id=resource_id)
elif resource_prefix == EC2_RESOURCE_TO_PREFIX['vpn-connection']:
self.describe_vpn_connections(vpn_connection_ids=[resource_id])
elif resource_prefix == EC2_RESOURCE_TO_PREFIX['vpn-gateway']:
self.get_vpn_gateway(vpn_gateway_id=resource_id)
return True
ec2_backends = {region.name: EC2Backend(region.name)
for region in RegionsAndZonesBackend.regions}
|
[] |
[] |
[
"MOTO_AMIS_PATH"
] |
[]
|
["MOTO_AMIS_PATH"]
|
python
| 1 | 0 | |
e2e/iam/suite_test.go
|
/*
Copyright © 2019 AWS Controller authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e_test
import (
"context"
"os"
"path/filepath"
"testing"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/gomega/gexec"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/dynamic/fake"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/envtest"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
"go.awsctrl.io/manager/aws"
"go.awsctrl.io/manager/controllers/cloudformation"
"go.awsctrl.io/manager/controllers/controllermanager"
"go.awsctrl.io/manager/controllers/self"
"go.awsctrl.io/manager/testutils"
"go.awsctrl.io/manager/token"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
cloudformationv1alpha1 "go.awsctrl.io/manager/apis/cloudformation/v1alpha1"
metav1alpha1 "go.awsctrl.io/manager/apis/meta/v1alpha1"
selfv1alpha1 "go.awsctrl.io/manager/apis/self/v1alpha1"
)
// These tests use Ginkgo (BDD-style Go testing framework). Refer to
// http://onsi.github.io/ginkgo/ to learn more about Ginkgo.
var (
cfg *rest.Config
k8sclient client.Client
k8smanager ctrl.Manager
testenv *envtest.Environment
awsclient aws.AWS
configname string = "config"
podnamespace string = "default"
timeout = time.Second * 300
interval = time.Second * 1
capabilityIAM string = "CAPABILITY_IAM"
capabilityNamedIAM string = "CAPABILITY_NAMED_IAM"
)
func TestAPIs(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecsWithDefaultAndCustomReporters(t,
"Controller Suite",
[]Reporter{envtest.NewlineReporter{}})
}
var _ = BeforeSuite(func(done Done) {
logf.SetLogger(zap.LoggerTo(GinkgoWriter, true))
By("bootstrapping test environment")
testenv = &envtest.Environment{
CRDDirectoryPaths: []string{filepath.Join("..", "..", "config", "crd", "bases")},
}
var err error
cfg, err = testenv.Start()
Expect(err).ToNot(HaveOccurred())
Expect(cfg).ToNot(BeNil())
err = scheme.AddToScheme(scheme.Scheme)
Expect(err).NotTo(HaveOccurred())
err = selfv1alpha1.AddToScheme(scheme.Scheme)
Expect(err).NotTo(HaveOccurred())
err = cloudformationv1alpha1.AddToScheme(scheme.Scheme)
Expect(err).NotTo(HaveOccurred())
err = controllermanager.AddAllSchemes(scheme.Scheme)
Expect(err).NotTo(HaveOccurred())
k8smanager, err = ctrl.NewManager(cfg, ctrl.Options{
Scheme: scheme.Scheme,
})
Expect(err).ToNot(HaveOccurred())
if os.Getenv("USE_AWS_CLIENT") == "true" {
awsclient = aws.New()
} else {
awsclient = testutils.NewAWS()
}
err = (&self.ConfigReconciler{
Client: k8smanager.GetClient(),
Log: ctrl.Log.WithName("controllers").WithName("self").WithName("config"),
Scheme: k8smanager.GetScheme(),
ConfigName: configname,
PodNamespace: podnamespace,
AWSClient: awsclient,
}).SetupWithManager(k8smanager)
Expect(err).ToNot(HaveOccurred())
err = (&cloudformation.StackReconciler{
Client: k8smanager.GetClient(),
Log: ctrl.Log.WithName("controllers").WithName("cloudformation").WithName("stack"),
Scheme: k8smanager.GetScheme(),
ConfigName: configname,
PodNamespace: podnamespace,
AWSClient: awsclient,
TokenClient: token.New(),
}).SetupWithManager(k8smanager)
Expect(err).ToNot(HaveOccurred())
var dynclient dynamic.Interface
if os.Getenv("USE_EXISTING_CLUSTER") == "true" {
dynclient, err = dynamic.NewForConfig(k8smanager.GetConfig())
Expect(err).ToNot(HaveOccurred())
} else {
dynclient = fake.NewSimpleDynamicClient(scheme.Scheme, []runtime.Object{}...)
}
_, err = controllermanager.SetupControllers(k8smanager, dynclient)
Expect(err).ToNot(HaveOccurred())
go func() {
err = k8smanager.Start(ctrl.SetupSignalHandler())
Expect(err).ToNot(HaveOccurred())
}()
k8sclient = k8smanager.GetClient()
Expect(k8sclient).ToNot(BeNil())
configkey := types.NamespacedName{
Name: configname,
Namespace: podnamespace,
}
config := &selfv1alpha1.Config{
ObjectMeta: metav1.ObjectMeta{
Name: configkey.Name,
Namespace: configkey.Namespace,
},
Spec: selfv1alpha1.ConfigSpec{
ClusterName: "test-cluster",
Resources: []string{},
AWS: selfv1alpha1.ConfigAWS{
DefaultRegion: "us-west-2",
AccountID: os.Getenv("AWS_ACCOUNT_ID"),
SupportedRegions: []string{"us-west-2"},
},
},
}
Expect(k8sclient.Create(context.Background(), config)).Should(Succeed())
close(done)
}, 60)
var _ = AfterSuite(func() {
By("tearing down the test environment")
Eventually(func() bool {
stackList := cloudformationv1alpha1.StackList{}
if err := k8sclient.List(context.Background(), &stackList); err != nil {
return false
}
if len(stackList.Items) == 0 {
return true
}
for _, stack := range stackList.Items {
if os.Getenv("USE_AWS_CLIENT") == "true" {
awsclient.SetClient("us-west-2", testutils.NewCFN("DELETE_COMPLETE"))
}
if stack.Status.Status == metav1alpha1.DeleteInProgressStatus {
continue
}
if err := k8sclient.Delete(context.Background(), &stack); err != nil {
return false
}
}
return false
}, (time.Second * 60), time.Second*1).Should(BeTrue())
config := &selfv1alpha1.Config{
ObjectMeta: metav1.ObjectMeta{
Name: configname,
Namespace: podnamespace,
},
}
Expect(k8sclient.Delete(context.Background(), config)).Should(Succeed())
gexec.KillAndWait(5 * time.Second)
err := testenv.Stop()
Expect(err).ToNot(HaveOccurred())
})
|
[
"\"USE_AWS_CLIENT\"",
"\"USE_EXISTING_CLUSTER\"",
"\"AWS_ACCOUNT_ID\"",
"\"USE_AWS_CLIENT\""
] |
[] |
[
"USE_EXISTING_CLUSTER",
"USE_AWS_CLIENT",
"AWS_ACCOUNT_ID"
] |
[]
|
["USE_EXISTING_CLUSTER", "USE_AWS_CLIENT", "AWS_ACCOUNT_ID"]
|
go
| 3 | 0 | |
awx/plugins/inventory/ec2.py
|
#!/usr/bin/env python
'''
EC2 external inventory script
=================================
Generates inventory that Ansible can understand by making API request to
AWS EC2 using the Boto library.
NOTE: This script assumes Ansible is being executed where the environment
variables needed for Boto have already been set:
export AWS_ACCESS_KEY_ID='AK123'
export AWS_SECRET_ACCESS_KEY='abc123'
optional region environement variable if region is 'auto'
This script also assumes there is an ec2.ini file alongside it. To specify a
different path to ec2.ini, define the EC2_INI_PATH environment variable:
export EC2_INI_PATH=/path/to/my_ec2.ini
If you're using eucalyptus you need to set the above variables and
you need to define:
export EC2_URL=http://hostname_of_your_cc:port/services/Eucalyptus
If you're using boto profiles (requires boto>=2.24.0) you can choose a profile
using the --boto-profile command line argument (e.g. ec2.py --boto-profile prod) or using
the AWS_PROFILE variable:
AWS_PROFILE=prod ansible-playbook -i ec2.py myplaybook.yml
For more details, see: http://docs.pythonboto.org/en/latest/boto_config_tut.html
When run against a specific host, this script returns the following variables:
- ec2_ami_launch_index
- ec2_architecture
- ec2_association
- ec2_attachTime
- ec2_attachment
- ec2_attachmentId
- ec2_block_devices
- ec2_client_token
- ec2_deleteOnTermination
- ec2_description
- ec2_deviceIndex
- ec2_dns_name
- ec2_eventsSet
- ec2_group_name
- ec2_hypervisor
- ec2_id
- ec2_image_id
- ec2_instanceState
- ec2_instance_type
- ec2_ipOwnerId
- ec2_ip_address
- ec2_item
- ec2_kernel
- ec2_key_name
- ec2_launch_time
- ec2_monitored
- ec2_monitoring
- ec2_networkInterfaceId
- ec2_ownerId
- ec2_persistent
- ec2_placement
- ec2_platform
- ec2_previous_state
- ec2_private_dns_name
- ec2_private_ip_address
- ec2_publicIp
- ec2_public_dns_name
- ec2_ramdisk
- ec2_reason
- ec2_region
- ec2_requester_id
- ec2_root_device_name
- ec2_root_device_type
- ec2_security_group_ids
- ec2_security_group_names
- ec2_shutdown_state
- ec2_sourceDestCheck
- ec2_spot_instance_request_id
- ec2_state
- ec2_state_code
- ec2_state_reason
- ec2_status
- ec2_subnet_id
- ec2_tenancy
- ec2_virtualization_type
- ec2_vpc_id
These variables are pulled out of a boto.ec2.instance object. There is a lack of
consistency with variable spellings (camelCase and underscores) since this
just loops through all variables the object exposes. It is preferred to use the
ones with underscores when multiple exist.
In addition, if an instance has AWS Tags associated with it, each tag is a new
variable named:
- ec2_tag_[Key] = [Value]
Security groups are comma-separated in 'ec2_security_group_ids' and
'ec2_security_group_names'.
'''
# (c) 2012, Peter Sankauskas
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
######################################################################
import sys
import os
import argparse
import re
from time import time
import boto
from boto import ec2
from boto import rds
from boto import elasticache
from boto import route53
import six
from ansible.module_utils import ec2 as ec2_utils
HAS_BOTO3 = False
try:
import boto3
HAS_BOTO3 = True
except ImportError:
pass
from six.moves import configparser
from collections import defaultdict
try:
import json
except ImportError:
import simplejson as json
class Ec2Inventory(object):
def _empty_inventory(self):
return {"_meta" : {"hostvars" : {}}}
def __init__(self):
''' Main execution path '''
# Inventory grouped by instance IDs, tags, security groups, regions,
# and availability zones
self.inventory = self._empty_inventory()
self.aws_account_id = None
# Index of hostname (address) to instance ID
self.index = {}
# Boto profile to use (if any)
self.boto_profile = None
# AWS credentials.
self.credentials = {}
# Read settings and parse CLI arguments
self.parse_cli_args()
self.read_settings()
# Make sure that profile_name is not passed at all if not set
# as pre 2.24 boto will fall over otherwise
if self.boto_profile:
if not hasattr(boto.ec2.EC2Connection, 'profile_name'):
self.fail_with_error("boto version must be >= 2.24 to use profile")
# Cache
if self.args.refresh_cache:
self.do_api_calls_update_cache()
elif not self.is_cache_valid():
self.do_api_calls_update_cache()
# Data to print
if self.args.host:
data_to_print = self.get_host_info()
elif self.args.list:
# Display list of instances for inventory
if self.inventory == self._empty_inventory():
data_to_print = self.get_inventory_from_cache()
else:
data_to_print = self.json_format_dict(self.inventory, True)
print(data_to_print)
def is_cache_valid(self):
''' Determines if the cache files have expired, or if it is still valid '''
if os.path.isfile(self.cache_path_cache):
mod_time = os.path.getmtime(self.cache_path_cache)
current_time = time()
if (mod_time + self.cache_max_age) > current_time:
if os.path.isfile(self.cache_path_index):
return True
return False
def read_settings(self):
''' Reads the settings from the ec2.ini file '''
scriptbasename = __file__
scriptbasename = os.path.basename(scriptbasename)
scriptbasename = scriptbasename.replace('.py', '')
defaults = {'ec2': {
'ini_path': os.path.join(os.path.dirname(__file__), '%s.ini' % scriptbasename)
}
}
if six.PY3:
config = configparser.ConfigParser()
else:
config = configparser.SafeConfigParser()
ec2_ini_path = os.environ.get('EC2_INI_PATH', defaults['ec2']['ini_path'])
ec2_ini_path = os.path.expanduser(os.path.expandvars(ec2_ini_path))
config.read(ec2_ini_path)
# is eucalyptus?
self.eucalyptus_host = None
self.eucalyptus = False
if config.has_option('ec2', 'eucalyptus'):
self.eucalyptus = config.getboolean('ec2', 'eucalyptus')
if self.eucalyptus and config.has_option('ec2', 'eucalyptus_host'):
self.eucalyptus_host = config.get('ec2', 'eucalyptus_host')
# Regions
self.regions = []
configRegions = config.get('ec2', 'regions')
configRegions_exclude = config.get('ec2', 'regions_exclude')
if (configRegions == 'all'):
if self.eucalyptus_host:
self.regions.append(boto.connect_euca(host=self.eucalyptus_host).region.name, **self.credentials)
else:
for regionInfo in ec2.regions():
if regionInfo.name not in configRegions_exclude:
self.regions.append(regionInfo.name)
else:
self.regions = configRegions.split(",")
if 'auto' in self.regions:
env_region = os.environ.get('AWS_REGION')
if env_region is None:
env_region = os.environ.get('AWS_DEFAULT_REGION')
self.regions = [ env_region ]
# Destination addresses
self.destination_variable = config.get('ec2', 'destination_variable')
self.vpc_destination_variable = config.get('ec2', 'vpc_destination_variable')
if config.has_option('ec2', 'hostname_variable'):
self.hostname_variable = config.get('ec2', 'hostname_variable')
else:
self.hostname_variable = None
if config.has_option('ec2', 'destination_format') and \
config.has_option('ec2', 'destination_format_tags'):
self.destination_format = config.get('ec2', 'destination_format')
self.destination_format_tags = config.get('ec2', 'destination_format_tags').split(',')
else:
self.destination_format = None
self.destination_format_tags = None
# Route53
self.route53_enabled = config.getboolean('ec2', 'route53')
if config.has_option('ec2', 'route53_hostnames'):
self.route53_hostnames = config.get('ec2', 'route53_hostnames')
else:
self.route53_hostnames = None
self.route53_excluded_zones = []
if config.has_option('ec2', 'route53_excluded_zones'):
self.route53_excluded_zones.extend(
config.get('ec2', 'route53_excluded_zones', '').split(','))
# Include RDS instances?
self.rds_enabled = True
if config.has_option('ec2', 'rds'):
self.rds_enabled = config.getboolean('ec2', 'rds')
# Include RDS cluster instances?
if config.has_option('ec2', 'include_rds_clusters'):
self.include_rds_clusters = config.getboolean('ec2', 'include_rds_clusters')
else:
self.include_rds_clusters = False
# Include ElastiCache instances?
self.elasticache_enabled = True
if config.has_option('ec2', 'elasticache'):
self.elasticache_enabled = config.getboolean('ec2', 'elasticache')
# Return all EC2 instances?
if config.has_option('ec2', 'all_instances'):
self.all_instances = config.getboolean('ec2', 'all_instances')
else:
self.all_instances = False
# Instance states to be gathered in inventory. Default is 'running'.
# Setting 'all_instances' to 'yes' overrides this option.
ec2_valid_instance_states = [
'pending',
'running',
'shutting-down',
'terminated',
'stopping',
'stopped'
]
self.ec2_instance_states = []
if self.all_instances:
self.ec2_instance_states = ec2_valid_instance_states
elif config.has_option('ec2', 'instance_states'):
for instance_state in config.get('ec2', 'instance_states').split(','):
instance_state = instance_state.strip()
if instance_state not in ec2_valid_instance_states:
continue
self.ec2_instance_states.append(instance_state)
else:
self.ec2_instance_states = ['running']
# Return all RDS instances? (if RDS is enabled)
if config.has_option('ec2', 'all_rds_instances') and self.rds_enabled:
self.all_rds_instances = config.getboolean('ec2', 'all_rds_instances')
else:
self.all_rds_instances = False
# Return all ElastiCache replication groups? (if ElastiCache is enabled)
if config.has_option('ec2', 'all_elasticache_replication_groups') and self.elasticache_enabled:
self.all_elasticache_replication_groups = config.getboolean('ec2', 'all_elasticache_replication_groups')
else:
self.all_elasticache_replication_groups = False
# Return all ElastiCache clusters? (if ElastiCache is enabled)
if config.has_option('ec2', 'all_elasticache_clusters') and self.elasticache_enabled:
self.all_elasticache_clusters = config.getboolean('ec2', 'all_elasticache_clusters')
else:
self.all_elasticache_clusters = False
# Return all ElastiCache nodes? (if ElastiCache is enabled)
if config.has_option('ec2', 'all_elasticache_nodes') and self.elasticache_enabled:
self.all_elasticache_nodes = config.getboolean('ec2', 'all_elasticache_nodes')
else:
self.all_elasticache_nodes = False
# boto configuration profile (prefer CLI argument then environment variables then config file)
self.boto_profile = self.args.boto_profile or os.environ.get('AWS_PROFILE')
if config.has_option('ec2', 'boto_profile') and not self.boto_profile:
self.boto_profile = config.get('ec2', 'boto_profile')
# AWS credentials (prefer environment variables)
if not (self.boto_profile or os.environ.get('AWS_ACCESS_KEY_ID') or
os.environ.get('AWS_PROFILE')):
if config.has_option('credentials', 'aws_access_key_id'):
aws_access_key_id = config.get('credentials', 'aws_access_key_id')
else:
aws_access_key_id = None
if config.has_option('credentials', 'aws_secret_access_key'):
aws_secret_access_key = config.get('credentials', 'aws_secret_access_key')
else:
aws_secret_access_key = None
if config.has_option('credentials', 'aws_security_token'):
aws_security_token = config.get('credentials', 'aws_security_token')
else:
aws_security_token = None
if aws_access_key_id:
self.credentials = {
'aws_access_key_id': aws_access_key_id,
'aws_secret_access_key': aws_secret_access_key
}
if aws_security_token:
self.credentials['security_token'] = aws_security_token
# Cache related
cache_dir = os.path.expanduser(config.get('ec2', 'cache_path'))
if self.boto_profile:
cache_dir = os.path.join(cache_dir, 'profile_' + self.boto_profile)
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
cache_name = 'ansible-ec2'
cache_id = self.boto_profile or os.environ.get('AWS_ACCESS_KEY_ID', self.credentials.get('aws_access_key_id'))
if cache_id:
cache_name = '%s-%s' % (cache_name, cache_id)
self.cache_path_cache = os.path.join(cache_dir, "%s.cache" % cache_name)
self.cache_path_index = os.path.join(cache_dir, "%s.index" % cache_name)
self.cache_max_age = config.getint('ec2', 'cache_max_age')
if config.has_option('ec2', 'expand_csv_tags'):
self.expand_csv_tags = config.getboolean('ec2', 'expand_csv_tags')
else:
self.expand_csv_tags = False
# Configure nested groups instead of flat namespace.
if config.has_option('ec2', 'nested_groups'):
self.nested_groups = config.getboolean('ec2', 'nested_groups')
else:
self.nested_groups = False
# Replace dash or not in group names
if config.has_option('ec2', 'replace_dash_in_groups'):
self.replace_dash_in_groups = config.getboolean('ec2', 'replace_dash_in_groups')
else:
self.replace_dash_in_groups = True
# Configure which groups should be created.
group_by_options = [
'group_by_instance_id',
'group_by_region',
'group_by_availability_zone',
'group_by_ami_id',
'group_by_instance_type',
'group_by_instance_state',
'group_by_key_pair',
'group_by_vpc_id',
'group_by_security_group',
'group_by_tag_keys',
'group_by_tag_none',
'group_by_route53_names',
'group_by_rds_engine',
'group_by_rds_parameter_group',
'group_by_elasticache_engine',
'group_by_elasticache_cluster',
'group_by_elasticache_parameter_group',
'group_by_elasticache_replication_group',
'group_by_aws_account',
]
for option in group_by_options:
if config.has_option('ec2', option):
setattr(self, option, config.getboolean('ec2', option))
else:
setattr(self, option, True)
# Do we need to just include hosts that match a pattern?
try:
pattern_include = config.get('ec2', 'pattern_include')
if pattern_include and len(pattern_include) > 0:
self.pattern_include = re.compile(pattern_include)
else:
self.pattern_include = None
except configparser.NoOptionError:
self.pattern_include = None
# Do we need to exclude hosts that match a pattern?
try:
pattern_exclude = config.get('ec2', 'pattern_exclude')
if pattern_exclude and len(pattern_exclude) > 0:
self.pattern_exclude = re.compile(pattern_exclude)
else:
self.pattern_exclude = None
except configparser.NoOptionError:
self.pattern_exclude = None
# Do we want to stack multiple filters?
if config.has_option('ec2', 'stack_filters'):
self.stack_filters = config.getboolean('ec2', 'stack_filters')
else:
self.stack_filters = False
# Instance filters (see boto and EC2 API docs). Ignore invalid filters.
self.ec2_instance_filters = defaultdict(list)
if config.has_option('ec2', 'instance_filters'):
filters = [f for f in config.get('ec2', 'instance_filters').split(',') if f]
for instance_filter in filters:
instance_filter = instance_filter.strip()
if not instance_filter or '=' not in instance_filter:
continue
filter_key, filter_value = [x.strip() for x in instance_filter.split('=', 1)]
if not filter_key:
continue
self.ec2_instance_filters[filter_key].append(filter_value)
def parse_cli_args(self):
''' Command line argument processing '''
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on EC2')
parser.add_argument('--list', action='store_true', default=True,
help='List instances (default: True)')
parser.add_argument('--host', action='store',
help='Get all the variables about a specific instance')
parser.add_argument('--refresh-cache', action='store_true', default=False,
help='Force refresh of cache by making API requests to EC2 (default: False - use cache files)')
parser.add_argument('--profile', '--boto-profile', action='store', dest='boto_profile',
help='Use boto profile for connections to EC2')
self.args = parser.parse_args()
def do_api_calls_update_cache(self):
''' Do API calls to each region, and save data in cache files '''
if self.route53_enabled:
self.get_route53_records()
for region in self.regions:
self.get_instances_by_region(region)
if self.rds_enabled:
self.get_rds_instances_by_region(region)
if self.elasticache_enabled:
self.get_elasticache_clusters_by_region(region)
self.get_elasticache_replication_groups_by_region(region)
if self.include_rds_clusters:
self.include_rds_clusters_by_region(region)
self.write_to_cache(self.inventory, self.cache_path_cache)
self.write_to_cache(self.index, self.cache_path_index)
def connect(self, region):
''' create connection to api server'''
if self.eucalyptus:
conn = boto.connect_euca(host=self.eucalyptus_host, **self.credentials)
conn.APIVersion = '2010-08-31'
else:
conn = self.connect_to_aws(ec2, region)
return conn
def boto_fix_security_token_in_profile(self, connect_args):
''' monkey patch for boto issue boto/boto#2100 '''
profile = 'profile ' + self.boto_profile
if boto.config.has_option(profile, 'aws_security_token'):
connect_args['security_token'] = boto.config.get(profile, 'aws_security_token')
return connect_args
def connect_to_aws(self, module, region):
connect_args = self.credentials
# only pass the profile name if it's set (as it is not supported by older boto versions)
if self.boto_profile:
connect_args['profile_name'] = self.boto_profile
self.boto_fix_security_token_in_profile(connect_args)
conn = module.connect_to_region(region, **connect_args)
# connect_to_region will fail "silently" by returning None if the region name is wrong or not supported
if conn is None:
self.fail_with_error("region name: %s likely not supported, or AWS is down. connection to region failed." % region)
return conn
def get_instances_by_region(self, region):
''' Makes an AWS EC2 API call to the list of instances in a particular
region '''
try:
conn = self.connect(region)
reservations = []
if self.ec2_instance_filters:
if self.stack_filters:
filters_dict = {}
for filter_key, filter_values in self.ec2_instance_filters.items():
filters_dict[filter_key] = filter_values
reservations.extend(conn.get_all_instances(filters = filters_dict))
else:
for filter_key, filter_values in self.ec2_instance_filters.items():
reservations.extend(conn.get_all_instances(filters = { filter_key : filter_values }))
else:
reservations = conn.get_all_instances()
# Pull the tags back in a second step
# AWS are on record as saying that the tags fetched in the first `get_all_instances` request are not
# reliable and may be missing, and the only way to guarantee they are there is by calling `get_all_tags`
instance_ids = []
for reservation in reservations:
instance_ids.extend([instance.id for instance in reservation.instances])
max_filter_value = 199
tags = []
for i in range(0, len(instance_ids), max_filter_value):
tags.extend(conn.get_all_tags(filters={'resource-type': 'instance', 'resource-id': instance_ids[i:i+max_filter_value]}))
tags_by_instance_id = defaultdict(dict)
for tag in tags:
tags_by_instance_id[tag.res_id][tag.name] = tag.value
if (not self.aws_account_id) and reservations:
self.aws_account_id = reservations[0].owner_id
for reservation in reservations:
for instance in reservation.instances:
instance.tags = tags_by_instance_id[instance.id]
self.add_instance(instance, region)
except boto.exception.BotoServerError as e:
if e.error_code == 'AuthFailure':
error = self.get_auth_error_message()
else:
backend = 'Eucalyptus' if self.eucalyptus else 'AWS'
error = "Error connecting to %s backend.\n%s" % (backend, e.message)
self.fail_with_error(error, 'getting EC2 instances')
def get_rds_instances_by_region(self, region):
''' Makes an AWS API call to the list of RDS instances in a particular
region '''
try:
conn = self.connect_to_aws(rds, region)
if conn:
marker = None
while True:
instances = conn.get_all_dbinstances(marker=marker)
marker = instances.marker
for instance in instances:
self.add_rds_instance(instance, region)
if not marker:
break
except boto.exception.BotoServerError as e:
error = e.reason
if e.error_code == 'AuthFailure':
error = self.get_auth_error_message()
if not e.reason == "Forbidden":
error = "Looks like AWS RDS is down:\n%s" % e.message
self.fail_with_error(error, 'getting RDS instances')
def include_rds_clusters_by_region(self, region):
if not HAS_BOTO3:
self.fail_with_error("Working with RDS clusters requires boto3 - please install boto3 and try again",
"getting RDS clusters")
client = ec2_utils.boto3_inventory_conn('client', 'rds', region, **self.credentials)
marker, clusters = '', []
while marker is not None:
resp = client.describe_db_clusters(Marker=marker)
clusters.extend(resp["DBClusters"])
marker = resp.get('Marker', None)
account_id = boto.connect_iam().get_user().arn.split(':')[4]
c_dict = {}
for c in clusters:
# remove these datetime objects as there is no serialisation to json
# currently in place and we don't need the data yet
if 'EarliestRestorableTime' in c:
del c['EarliestRestorableTime']
if 'LatestRestorableTime' in c:
del c['LatestRestorableTime']
if self.ec2_instance_filters == {}:
matches_filter = True
else:
matches_filter = False
try:
# arn:aws:rds:<region>:<account number>:<resourcetype>:<name>
tags = client.list_tags_for_resource(
ResourceName='arn:aws:rds:' + region + ':' + account_id + ':cluster:' + c['DBClusterIdentifier'])
c['Tags'] = tags['TagList']
if self.ec2_instance_filters:
for filter_key, filter_values in self.ec2_instance_filters.items():
# get AWS tag key e.g. tag:env will be 'env'
tag_name = filter_key.split(":", 1)[1]
# Filter values is a list (if you put multiple values for the same tag name)
matches_filter = any(d['Key'] == tag_name and d['Value'] in filter_values for d in c['Tags'])
if matches_filter:
# it matches a filter, so stop looking for further matches
break
except Exception as e:
if e.message.find('DBInstanceNotFound') >= 0:
# AWS RDS bug (2016-01-06) means deletion does not fully complete and leave an 'empty' cluster.
# Ignore errors when trying to find tags for these
pass
# ignore empty clusters caused by AWS bug
if len(c['DBClusterMembers']) == 0:
continue
elif matches_filter:
c_dict[c['DBClusterIdentifier']] = c
self.inventory['db_clusters'] = c_dict
def get_elasticache_clusters_by_region(self, region):
''' Makes an AWS API call to the list of ElastiCache clusters (with
nodes' info) in a particular region.'''
# ElastiCache boto module doesn't provide a get_all_intances method,
# that's why we need to call describe directly (it would be called by
# the shorthand method anyway...)
try:
conn = self.connect_to_aws(elasticache, region)
if conn:
# show_cache_node_info = True
# because we also want nodes' information
response = conn.describe_cache_clusters(None, None, None, True)
except boto.exception.BotoServerError as e:
error = e.reason
if e.error_code == 'AuthFailure':
error = self.get_auth_error_message()
if not e.reason == "Forbidden":
error = "Looks like AWS ElastiCache is down:\n%s" % e.message
self.fail_with_error(error, 'getting ElastiCache clusters')
try:
# Boto also doesn't provide wrapper classes to CacheClusters or
# CacheNodes. Because of that we can't make use of the get_list
# method in the AWSQueryConnection. Let's do the work manually
clusters = response['DescribeCacheClustersResponse']['DescribeCacheClustersResult']['CacheClusters']
except KeyError as e:
error = "ElastiCache query to AWS failed (unexpected format)."
self.fail_with_error(error, 'getting ElastiCache clusters')
for cluster in clusters:
self.add_elasticache_cluster(cluster, region)
def get_elasticache_replication_groups_by_region(self, region):
''' Makes an AWS API call to the list of ElastiCache replication groups
in a particular region.'''
# ElastiCache boto module doesn't provide a get_all_intances method,
# that's why we need to call describe directly (it would be called by
# the shorthand method anyway...)
try:
conn = self.connect_to_aws(elasticache, region)
if conn:
response = conn.describe_replication_groups()
except boto.exception.BotoServerError as e:
error = e.reason
if e.error_code == 'AuthFailure':
error = self.get_auth_error_message()
if not e.reason == "Forbidden":
error = "Looks like AWS ElastiCache [Replication Groups] is down:\n%s" % e.message
self.fail_with_error(error, 'getting ElastiCache clusters')
try:
# Boto also doesn't provide wrapper classes to ReplicationGroups
# Because of that we can't make use of the get_list method in the
# AWSQueryConnection. Let's do the work manually
replication_groups = response['DescribeReplicationGroupsResponse']['DescribeReplicationGroupsResult']['ReplicationGroups']
except KeyError as e:
error = "ElastiCache [Replication Groups] query to AWS failed (unexpected format)."
self.fail_with_error(error, 'getting ElastiCache clusters')
for replication_group in replication_groups:
self.add_elasticache_replication_group(replication_group, region)
def get_auth_error_message(self):
''' create an informative error message if there is an issue authenticating'''
errors = ["Authentication error retrieving ec2 inventory."]
if None in [os.environ.get('AWS_ACCESS_KEY_ID'), os.environ.get('AWS_SECRET_ACCESS_KEY')]:
errors.append(' - No AWS_ACCESS_KEY_ID or AWS_SECRET_ACCESS_KEY environment vars found')
else:
errors.append(' - AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment vars found but may not be correct')
boto_paths = ['/etc/boto.cfg', '~/.boto', '~/.aws/credentials']
boto_config_found = list(p for p in boto_paths if os.path.isfile(os.path.expanduser(p)))
if len(boto_config_found) > 0:
errors.append(" - Boto configs found at '%s', but the credentials contained may not be correct" % ', '.join(boto_config_found))
else:
errors.append(" - No Boto config found at any expected location '%s'" % ', '.join(boto_paths))
return '\n'.join(errors)
def fail_with_error(self, err_msg, err_operation=None):
'''log an error to std err for ansible-playbook to consume and exit'''
if err_operation:
err_msg = 'ERROR: "{err_msg}", while: {err_operation}'.format(
err_msg=err_msg, err_operation=err_operation)
sys.stderr.write(err_msg)
sys.exit(1)
def get_instance(self, region, instance_id):
conn = self.connect(region)
reservations = conn.get_all_instances([instance_id])
for reservation in reservations:
for instance in reservation.instances:
return instance
def add_instance(self, instance, region):
''' Adds an instance to the inventory and index, as long as it is
addressable '''
# Only return instances with desired instance states
if instance.state not in self.ec2_instance_states:
return
# Select the best destination address
if self.destination_format and self.destination_format_tags:
dest = self.destination_format.format(*[ getattr(instance, 'tags').get(tag, '') for tag in self.destination_format_tags ])
elif instance.subnet_id:
dest = getattr(instance, self.vpc_destination_variable, None)
if dest is None:
dest = getattr(instance, 'tags').get(self.vpc_destination_variable, None)
else:
dest = getattr(instance, self.destination_variable, None)
if dest is None:
dest = getattr(instance, 'tags').get(self.destination_variable, None)
if not dest:
# Skip instances we cannot address (e.g. private VPC subnet)
return
# Set the inventory name
hostname = None
if self.hostname_variable:
if self.hostname_variable.startswith('tag_'):
hostname = instance.tags.get(self.hostname_variable[4:], None)
else:
hostname = getattr(instance, self.hostname_variable)
# set the hostname from route53
if self.route53_enabled and self.route53_hostnames:
route53_names = self.get_instance_route53_names(instance)
for name in route53_names:
if name.endswith(self.route53_hostnames):
hostname = name
# If we can't get a nice hostname, use the destination address
if not hostname:
hostname = dest
# to_safe strips hostname characters like dots, so don't strip route53 hostnames
elif self.route53_enabled and self.route53_hostnames and hostname.endswith(self.route53_hostnames):
hostname = hostname.lower()
else:
hostname = self.to_safe(hostname).lower()
# if we only want to include hosts that match a pattern, skip those that don't
if self.pattern_include and not self.pattern_include.match(hostname):
return
# if we need to exclude hosts that match a pattern, skip those
if self.pattern_exclude and self.pattern_exclude.match(hostname):
return
# Add to index
self.index[hostname] = [region, instance.id]
# Inventory: Group by instance ID (always a group of 1)
if self.group_by_instance_id:
self.inventory[instance.id] = [hostname]
if self.nested_groups:
self.push_group(self.inventory, 'instances', instance.id)
# Inventory: Group by region
if self.group_by_region:
self.push(self.inventory, region, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'regions', region)
# Inventory: Group by availability zone
if self.group_by_availability_zone:
self.push(self.inventory, instance.placement, hostname)
if self.nested_groups:
if self.group_by_region:
self.push_group(self.inventory, region, instance.placement)
self.push_group(self.inventory, 'zones', instance.placement)
# Inventory: Group by Amazon Machine Image (AMI) ID
if self.group_by_ami_id:
ami_id = self.to_safe(instance.image_id)
self.push(self.inventory, ami_id, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'images', ami_id)
# Inventory: Group by instance type
if self.group_by_instance_type:
type_name = self.to_safe('type_' + instance.instance_type)
self.push(self.inventory, type_name, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'types', type_name)
# Inventory: Group by instance state
if self.group_by_instance_state:
state_name = self.to_safe('instance_state_' + instance.state)
self.push(self.inventory, state_name, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'instance_states', state_name)
# Inventory: Group by key pair
if self.group_by_key_pair and instance.key_name:
key_name = self.to_safe('key_' + instance.key_name)
self.push(self.inventory, key_name, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'keys', key_name)
# Inventory: Group by VPC
if self.group_by_vpc_id and instance.vpc_id:
vpc_id_name = self.to_safe('vpc_id_' + instance.vpc_id)
self.push(self.inventory, vpc_id_name, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'vpcs', vpc_id_name)
# Inventory: Group by security group
if self.group_by_security_group:
try:
for group in instance.groups:
key = self.to_safe("security_group_" + group.name)
self.push(self.inventory, key, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'security_groups', key)
except AttributeError:
self.fail_with_error('\n'.join(['Package boto seems a bit older.',
'Please upgrade boto >= 2.3.0.']))
# Inventory: Group by AWS account ID
if self.group_by_aws_account:
self.push(self.inventory, self.aws_account_id, dest)
if self.nested_groups:
self.push_group(self.inventory, 'accounts', self.aws_account_id)
# Inventory: Group by tag keys
if self.group_by_tag_keys:
for k, v in instance.tags.items():
if self.expand_csv_tags and v and ',' in v:
values = map(lambda x: x.strip(), v.split(','))
else:
values = [v]
for v in values:
if v:
key = self.to_safe("tag_" + k + "=" + v)
else:
key = self.to_safe("tag_" + k)
self.push(self.inventory, key, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'tags', self.to_safe("tag_" + k))
if v:
self.push_group(self.inventory, self.to_safe("tag_" + k), key)
# Inventory: Group by Route53 domain names if enabled
if self.route53_enabled and self.group_by_route53_names:
route53_names = self.get_instance_route53_names(instance)
for name in route53_names:
self.push(self.inventory, name, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'route53', name)
# Global Tag: instances without tags
if self.group_by_tag_none and len(instance.tags) == 0:
self.push(self.inventory, 'tag_none', hostname)
if self.nested_groups:
self.push_group(self.inventory, 'tags', 'tag_none')
# Global Tag: tag all EC2 instances
self.push(self.inventory, 'ec2', hostname)
self.inventory["_meta"]["hostvars"][hostname] = self.get_host_info_dict_from_instance(instance)
self.inventory["_meta"]["hostvars"][hostname]['ansible_ssh_host'] = dest
def add_rds_instance(self, instance, region):
''' Adds an RDS instance to the inventory and index, as long as it is
addressable '''
# Only want available instances unless all_rds_instances is True
if not self.all_rds_instances and instance.status != 'available':
return
# Select the best destination address
dest = instance.endpoint[0]
if not dest:
# Skip instances we cannot address (e.g. private VPC subnet)
return
# Set the inventory name
hostname = None
if self.hostname_variable:
if self.hostname_variable.startswith('tag_'):
hostname = instance.tags.get(self.hostname_variable[4:], None)
else:
hostname = getattr(instance, self.hostname_variable)
# If we can't get a nice hostname, use the destination address
if not hostname:
hostname = dest
hostname = self.to_safe(hostname).lower()
# Add to index
self.index[hostname] = [region, instance.id]
# Inventory: Group by instance ID (always a group of 1)
if self.group_by_instance_id:
self.inventory[instance.id] = [hostname]
if self.nested_groups:
self.push_group(self.inventory, 'instances', instance.id)
# Inventory: Group by region
if self.group_by_region:
self.push(self.inventory, region, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'regions', region)
# Inventory: Group by availability zone
if self.group_by_availability_zone:
self.push(self.inventory, instance.availability_zone, hostname)
if self.nested_groups:
if self.group_by_region:
self.push_group(self.inventory, region, instance.availability_zone)
self.push_group(self.inventory, 'zones', instance.availability_zone)
# Inventory: Group by instance type
if self.group_by_instance_type:
type_name = self.to_safe('type_' + instance.instance_class)
self.push(self.inventory, type_name, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'types', type_name)
# Inventory: Group by VPC
if self.group_by_vpc_id and instance.subnet_group and instance.subnet_group.vpc_id:
vpc_id_name = self.to_safe('vpc_id_' + instance.subnet_group.vpc_id)
self.push(self.inventory, vpc_id_name, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'vpcs', vpc_id_name)
# Inventory: Group by security group
if self.group_by_security_group:
try:
if instance.security_group:
key = self.to_safe("security_group_" + instance.security_group.name)
self.push(self.inventory, key, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'security_groups', key)
except AttributeError:
self.fail_with_error('\n'.join(['Package boto seems a bit older.',
'Please upgrade boto >= 2.3.0.']))
# Inventory: Group by engine
if self.group_by_rds_engine:
self.push(self.inventory, self.to_safe("rds_" + instance.engine), hostname)
if self.nested_groups:
self.push_group(self.inventory, 'rds_engines', self.to_safe("rds_" + instance.engine))
# Inventory: Group by parameter group
if self.group_by_rds_parameter_group:
self.push(self.inventory, self.to_safe("rds_parameter_group_" + instance.parameter_group.name), hostname)
if self.nested_groups:
self.push_group(self.inventory, 'rds_parameter_groups', self.to_safe("rds_parameter_group_" + instance.parameter_group.name))
# Global Tag: all RDS instances
self.push(self.inventory, 'rds', hostname)
self.inventory["_meta"]["hostvars"][hostname] = self.get_host_info_dict_from_instance(instance)
self.inventory["_meta"]["hostvars"][hostname]['ansible_ssh_host'] = dest
def add_elasticache_cluster(self, cluster, region):
''' Adds an ElastiCache cluster to the inventory and index, as long as
it's nodes are addressable '''
# Only want available clusters unless all_elasticache_clusters is True
if not self.all_elasticache_clusters and cluster['CacheClusterStatus'] != 'available':
return
# Select the best destination address
if 'ConfigurationEndpoint' in cluster and cluster['ConfigurationEndpoint']:
# Memcached cluster
dest = cluster['ConfigurationEndpoint']['Address']
is_redis = False
else:
# Redis sigle node cluster
# Because all Redis clusters are single nodes, we'll merge the
# info from the cluster with info about the node
dest = cluster['CacheNodes'][0]['Endpoint']['Address']
is_redis = True
if not dest:
# Skip clusters we cannot address (e.g. private VPC subnet)
return
# Add to index
self.index[dest] = [region, cluster['CacheClusterId']]
# Inventory: Group by instance ID (always a group of 1)
if self.group_by_instance_id:
self.inventory[cluster['CacheClusterId']] = [dest]
if self.nested_groups:
self.push_group(self.inventory, 'instances', cluster['CacheClusterId'])
# Inventory: Group by region
if self.group_by_region and not is_redis:
self.push(self.inventory, region, dest)
if self.nested_groups:
self.push_group(self.inventory, 'regions', region)
# Inventory: Group by availability zone
if self.group_by_availability_zone and not is_redis:
self.push(self.inventory, cluster['PreferredAvailabilityZone'], dest)
if self.nested_groups:
if self.group_by_region:
self.push_group(self.inventory, region, cluster['PreferredAvailabilityZone'])
self.push_group(self.inventory, 'zones', cluster['PreferredAvailabilityZone'])
# Inventory: Group by node type
if self.group_by_instance_type and not is_redis:
type_name = self.to_safe('type_' + cluster['CacheNodeType'])
self.push(self.inventory, type_name, dest)
if self.nested_groups:
self.push_group(self.inventory, 'types', type_name)
# Inventory: Group by VPC (information not available in the current
# AWS API version for ElastiCache)
# Inventory: Group by security group
if self.group_by_security_group and not is_redis:
# Check for the existence of the 'SecurityGroups' key and also if
# this key has some value. When the cluster is not placed in a SG
# the query can return None here and cause an error.
if 'SecurityGroups' in cluster and cluster['SecurityGroups'] is not None:
for security_group in cluster['SecurityGroups']:
key = self.to_safe("security_group_" + security_group['SecurityGroupId'])
self.push(self.inventory, key, dest)
if self.nested_groups:
self.push_group(self.inventory, 'security_groups', key)
# Inventory: Group by engine
if self.group_by_elasticache_engine and not is_redis:
self.push(self.inventory, self.to_safe("elasticache_" + cluster['Engine']), dest)
if self.nested_groups:
self.push_group(self.inventory, 'elasticache_engines', self.to_safe(cluster['Engine']))
# Inventory: Group by parameter group
if self.group_by_elasticache_parameter_group:
self.push(self.inventory, self.to_safe("elasticache_parameter_group_" + cluster['CacheParameterGroup']['CacheParameterGroupName']), dest)
if self.nested_groups:
self.push_group(self.inventory, 'elasticache_parameter_groups', self.to_safe(cluster['CacheParameterGroup']['CacheParameterGroupName']))
# Inventory: Group by replication group
if self.group_by_elasticache_replication_group and 'ReplicationGroupId' in cluster and cluster['ReplicationGroupId']:
self.push(self.inventory, self.to_safe("elasticache_replication_group_" + cluster['ReplicationGroupId']), dest)
if self.nested_groups:
self.push_group(self.inventory, 'elasticache_replication_groups', self.to_safe(cluster['ReplicationGroupId']))
# Global Tag: all ElastiCache clusters
self.push(self.inventory, 'elasticache_clusters', cluster['CacheClusterId'])
host_info = self.get_host_info_dict_from_describe_dict(cluster)
self.inventory["_meta"]["hostvars"][dest] = host_info
# Add the nodes
for node in cluster['CacheNodes']:
self.add_elasticache_node(node, cluster, region)
def add_elasticache_node(self, node, cluster, region):
''' Adds an ElastiCache node to the inventory and index, as long as
it is addressable '''
# Only want available nodes unless all_elasticache_nodes is True
if not self.all_elasticache_nodes and node['CacheNodeStatus'] != 'available':
return
# Select the best destination address
dest = node['Endpoint']['Address']
if not dest:
# Skip nodes we cannot address (e.g. private VPC subnet)
return
node_id = self.to_safe(cluster['CacheClusterId'] + '_' + node['CacheNodeId'])
# Add to index
self.index[dest] = [region, node_id]
# Inventory: Group by node ID (always a group of 1)
if self.group_by_instance_id:
self.inventory[node_id] = [dest]
if self.nested_groups:
self.push_group(self.inventory, 'instances', node_id)
# Inventory: Group by region
if self.group_by_region:
self.push(self.inventory, region, dest)
if self.nested_groups:
self.push_group(self.inventory, 'regions', region)
# Inventory: Group by availability zone
if self.group_by_availability_zone:
self.push(self.inventory, cluster['PreferredAvailabilityZone'], dest)
if self.nested_groups:
if self.group_by_region:
self.push_group(self.inventory, region, cluster['PreferredAvailabilityZone'])
self.push_group(self.inventory, 'zones', cluster['PreferredAvailabilityZone'])
# Inventory: Group by node type
if self.group_by_instance_type:
type_name = self.to_safe('type_' + cluster['CacheNodeType'])
self.push(self.inventory, type_name, dest)
if self.nested_groups:
self.push_group(self.inventory, 'types', type_name)
# Inventory: Group by VPC (information not available in the current
# AWS API version for ElastiCache)
# Inventory: Group by security group
if self.group_by_security_group:
# Check for the existence of the 'SecurityGroups' key and also if
# this key has some value. When the cluster is not placed in a SG
# the query can return None here and cause an error.
if 'SecurityGroups' in cluster and cluster['SecurityGroups'] is not None:
for security_group in cluster['SecurityGroups']:
key = self.to_safe("security_group_" + security_group['SecurityGroupId'])
self.push(self.inventory, key, dest)
if self.nested_groups:
self.push_group(self.inventory, 'security_groups', key)
# Inventory: Group by engine
if self.group_by_elasticache_engine:
self.push(self.inventory, self.to_safe("elasticache_" + cluster['Engine']), dest)
if self.nested_groups:
self.push_group(self.inventory, 'elasticache_engines', self.to_safe("elasticache_" + cluster['Engine']))
# Inventory: Group by parameter group (done at cluster level)
# Inventory: Group by replication group (done at cluster level)
# Inventory: Group by ElastiCache Cluster
if self.group_by_elasticache_cluster:
self.push(self.inventory, self.to_safe("elasticache_cluster_" + cluster['CacheClusterId']), dest)
# Global Tag: all ElastiCache nodes
self.push(self.inventory, 'elasticache_nodes', dest)
host_info = self.get_host_info_dict_from_describe_dict(node)
if dest in self.inventory["_meta"]["hostvars"]:
self.inventory["_meta"]["hostvars"][dest].update(host_info)
else:
self.inventory["_meta"]["hostvars"][dest] = host_info
def add_elasticache_replication_group(self, replication_group, region):
''' Adds an ElastiCache replication group to the inventory and index '''
# Only want available clusters unless all_elasticache_replication_groups is True
if not self.all_elasticache_replication_groups and replication_group['Status'] != 'available':
return
# Skip clusters we cannot address (e.g. private VPC subnet or clustered redis)
if replication_group['NodeGroups'][0]['PrimaryEndpoint'] is None or \
replication_group['NodeGroups'][0]['PrimaryEndpoint']['Address'] is None:
return
# Select the best destination address (PrimaryEndpoint)
dest = replication_group['NodeGroups'][0]['PrimaryEndpoint']['Address']
# Add to index
self.index[dest] = [region, replication_group['ReplicationGroupId']]
# Inventory: Group by ID (always a group of 1)
if self.group_by_instance_id:
self.inventory[replication_group['ReplicationGroupId']] = [dest]
if self.nested_groups:
self.push_group(self.inventory, 'instances', replication_group['ReplicationGroupId'])
# Inventory: Group by region
if self.group_by_region:
self.push(self.inventory, region, dest)
if self.nested_groups:
self.push_group(self.inventory, 'regions', region)
# Inventory: Group by availability zone (doesn't apply to replication groups)
# Inventory: Group by node type (doesn't apply to replication groups)
# Inventory: Group by VPC (information not available in the current
# AWS API version for replication groups
# Inventory: Group by security group (doesn't apply to replication groups)
# Check this value in cluster level
# Inventory: Group by engine (replication groups are always Redis)
if self.group_by_elasticache_engine:
self.push(self.inventory, 'elasticache_redis', dest)
if self.nested_groups:
self.push_group(self.inventory, 'elasticache_engines', 'redis')
# Global Tag: all ElastiCache clusters
self.push(self.inventory, 'elasticache_replication_groups', replication_group['ReplicationGroupId'])
host_info = self.get_host_info_dict_from_describe_dict(replication_group)
self.inventory["_meta"]["hostvars"][dest] = host_info
def get_route53_records(self):
''' Get and store the map of resource records to domain names that
point to them. '''
if self.boto_profile:
r53_conn = route53.Route53Connection(profile_name=self.boto_profile)
else:
r53_conn = route53.Route53Connection()
all_zones = r53_conn.get_zones()
route53_zones = [ zone for zone in all_zones if zone.name[:-1]
not in self.route53_excluded_zones ]
self.route53_records = {}
for zone in route53_zones:
rrsets = r53_conn.get_all_rrsets(zone.id)
for record_set in rrsets:
record_name = record_set.name
if record_name.endswith('.'):
record_name = record_name[:-1]
for resource in record_set.resource_records:
self.route53_records.setdefault(resource, set())
self.route53_records[resource].add(record_name)
def get_instance_route53_names(self, instance):
''' Check if an instance is referenced in the records we have from
Route53. If it is, return the list of domain names pointing to said
instance. If nothing points to it, return an empty list. '''
instance_attributes = [ 'public_dns_name', 'private_dns_name',
'ip_address', 'private_ip_address' ]
name_list = set()
for attrib in instance_attributes:
try:
value = getattr(instance, attrib)
except AttributeError:
continue
if value in self.route53_records:
name_list.update(self.route53_records[value])
return list(name_list)
def get_host_info_dict_from_instance(self, instance):
instance_vars = {}
for key in vars(instance):
value = getattr(instance, key)
key = self.to_safe('ec2_' + key)
# Handle complex types
# state/previous_state changed to properties in boto in https://github.com/boto/boto/commit/a23c379837f698212252720d2af8dec0325c9518
if key == 'ec2__state':
instance_vars['ec2_state'] = instance.state or ''
instance_vars['ec2_state_code'] = instance.state_code
elif key == 'ec2__previous_state':
instance_vars['ec2_previous_state'] = instance.previous_state or ''
instance_vars['ec2_previous_state_code'] = instance.previous_state_code
elif type(value) in [int, bool]:
instance_vars[key] = value
elif isinstance(value, six.string_types):
instance_vars[key] = value.strip()
elif value is None:
instance_vars[key] = ''
elif key == 'ec2_region':
instance_vars[key] = value.name
elif key == 'ec2__placement':
instance_vars['ec2_placement'] = value.zone
elif key == 'ec2_tags':
for k, v in value.items():
if self.expand_csv_tags and ',' in v:
v = list(map(lambda x: x.strip(), v.split(',')))
key = self.to_safe('ec2_tag_' + k)
instance_vars[key] = v
elif key == 'ec2_groups':
group_ids = []
group_names = []
for group in value:
group_ids.append(group.id)
group_names.append(group.name)
instance_vars["ec2_security_group_ids"] = ','.join([str(i) for i in group_ids])
instance_vars["ec2_security_group_names"] = ','.join([str(i) for i in group_names])
elif key == 'ec2_block_device_mapping':
instance_vars["ec2_block_devices"] = {}
for k, v in value.items():
instance_vars["ec2_block_devices"][ os.path.basename(k) ] = v.volume_id
else:
pass
# TODO Product codes if someone finds them useful
#print key
#print type(value)
#print value
instance_vars[self.to_safe('ec2_account_id')] = self.aws_account_id
return instance_vars
def get_host_info_dict_from_describe_dict(self, describe_dict):
''' Parses the dictionary returned by the API call into a flat list
of parameters. This method should be used only when 'describe' is
used directly because Boto doesn't provide specific classes. '''
# I really don't agree with prefixing everything with 'ec2'
# because EC2, RDS and ElastiCache are different services.
# I'm just following the pattern used until now to not break any
# compatibility.
host_info = {}
for key in describe_dict:
value = describe_dict[key]
key = self.to_safe('ec2_' + self.uncammelize(key))
# Handle complex types
# Target: Memcached Cache Clusters
if key == 'ec2_configuration_endpoint' and value:
host_info['ec2_configuration_endpoint_address'] = value['Address']
host_info['ec2_configuration_endpoint_port'] = value['Port']
# Target: Cache Nodes and Redis Cache Clusters (single node)
if key == 'ec2_endpoint' and value:
host_info['ec2_endpoint_address'] = value['Address']
host_info['ec2_endpoint_port'] = value['Port']
# Target: Redis Replication Groups
if key == 'ec2_node_groups' and value:
host_info['ec2_endpoint_address'] = value[0]['PrimaryEndpoint']['Address']
host_info['ec2_endpoint_port'] = value[0]['PrimaryEndpoint']['Port']
replica_count = 0
for node in value[0]['NodeGroupMembers']:
if node['CurrentRole'] == 'primary':
host_info['ec2_primary_cluster_address'] = node['ReadEndpoint']['Address']
host_info['ec2_primary_cluster_port'] = node['ReadEndpoint']['Port']
host_info['ec2_primary_cluster_id'] = node['CacheClusterId']
elif node['CurrentRole'] == 'replica':
host_info['ec2_replica_cluster_address_'+ str(replica_count)] = node['ReadEndpoint']['Address']
host_info['ec2_replica_cluster_port_'+ str(replica_count)] = node['ReadEndpoint']['Port']
host_info['ec2_replica_cluster_id_'+ str(replica_count)] = node['CacheClusterId']
replica_count += 1
# Target: Redis Replication Groups
if key == 'ec2_member_clusters' and value:
host_info['ec2_member_clusters'] = ','.join([str(i) for i in value])
# Target: All Cache Clusters
elif key == 'ec2_cache_parameter_group':
host_info["ec2_cache_node_ids_to_reboot"] = ','.join([str(i) for i in value['CacheNodeIdsToReboot']])
host_info['ec2_cache_parameter_group_name'] = value['CacheParameterGroupName']
host_info['ec2_cache_parameter_apply_status'] = value['ParameterApplyStatus']
# Target: Almost everything
elif key == 'ec2_security_groups':
# Skip if SecurityGroups is None
# (it is possible to have the key defined but no value in it).
if value is not None:
sg_ids = []
for sg in value:
sg_ids.append(sg['SecurityGroupId'])
host_info["ec2_security_group_ids"] = ','.join([str(i) for i in sg_ids])
# Target: Everything
# Preserve booleans and integers
elif type(value) in [int, bool]:
host_info[key] = value
# Target: Everything
# Sanitize string values
elif isinstance(value, six.string_types):
host_info[key] = value.strip()
# Target: Everything
# Replace None by an empty string
elif value is None:
host_info[key] = ''
else:
# Remove non-processed complex types
pass
return host_info
def get_host_info(self):
''' Get variables about a specific host '''
if len(self.index) == 0:
# Need to load index from cache
self.load_index_from_cache()
if not self.args.host in self.index:
# try updating the cache
self.do_api_calls_update_cache()
if not self.args.host in self.index:
# host might not exist anymore
return self.json_format_dict({}, True)
(region, instance_id) = self.index[self.args.host]
instance = self.get_instance(region, instance_id)
return self.json_format_dict(self.get_host_info_dict_from_instance(instance), True)
def push(self, my_dict, key, element):
''' Push an element onto an array that may not have been defined in
the dict '''
group_info = my_dict.setdefault(key, [])
if isinstance(group_info, dict):
host_list = group_info.setdefault('hosts', [])
host_list.append(element)
else:
group_info.append(element)
def push_group(self, my_dict, key, element):
''' Push a group as a child of another group. '''
parent_group = my_dict.setdefault(key, {})
if not isinstance(parent_group, dict):
parent_group = my_dict[key] = {'hosts': parent_group}
child_groups = parent_group.setdefault('children', [])
if element not in child_groups:
child_groups.append(element)
def get_inventory_from_cache(self):
''' Reads the inventory from the cache file and returns it as a JSON
object '''
with open(self.cache_path_cache, 'r') as f:
json_inventory = f.read()
return json_inventory
def load_index_from_cache(self):
''' Reads the index from the cache file sets self.index '''
with open(self.cache_path_index, 'rb') as f:
self.index = json.load(f)
def write_to_cache(self, data, filename):
''' Writes data in JSON format to a file '''
json_data = self.json_format_dict(data, True)
with open(filename, 'w') as f:
f.write(json_data)
def uncammelize(self, key):
temp = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', key)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', temp).lower()
def to_safe(self, word):
''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups '''
regex = "[^A-Za-z0-9\_"
if not self.replace_dash_in_groups:
regex += "\-"
return re.sub(regex + "]", "_", word)
def json_format_dict(self, data, pretty=False):
''' Converts a dict to a JSON object and dumps it as a formatted
string '''
if pretty:
return json.dumps(data, sort_keys=True, indent=2)
else:
return json.dumps(data)
if __name__ == '__main__':
# Run the script
Ec2Inventory()
|
[] |
[] |
[
"AWS_DEFAULT_REGION",
"EC2_INI_PATH",
"AWS_REGION",
"AWS_SECRET_ACCESS_KEY",
"AWS_ACCESS_KEY_ID",
"AWS_PROFILE"
] |
[]
|
["AWS_DEFAULT_REGION", "EC2_INI_PATH", "AWS_REGION", "AWS_SECRET_ACCESS_KEY", "AWS_ACCESS_KEY_ID", "AWS_PROFILE"]
|
python
| 6 | 0 | |
configure.py
|
# Copyright 2019-2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import subprocess
import sys
try:
from shutil import which
except ImportError:
from distutils.spawn import find_executable as which
_COMPAT_TENSORFLOW_VERSION = "1.15.0"
_COMPAT_PYTHON_VERSION = "Python 3.7"
_COMPAT_SWIG_VERSION = "SWIG Version "
_ASCEND_INSTALL_PATH_ENV = "ASCEND_CUSTOM_PATH"
def run_command(cmd):
output = subprocess.check_output(cmd)
return output.decode('UTF-8').strip()
def get_input(question):
try:
try:
answer = raw_input(question)
except NameError:
answer = input(question)
except EOFError:
answer = ''
return answer
def real_config_path(file):
return os.path.join("tools", file)
def setup_python():
"""Get python install path."""
default_python_bin_path = which('python3')
custom_python_bin_path = ''
ask_python_bin_path = ''
if default_python_bin_path:
custom_python_bin_path = default_python_bin_path
compile_args = run_command([
custom_python_bin_path, '--version'])
if not _COMPAT_PYTHON_VERSION in compile_args:
print('Invalid default python version: %s, only support Python 3.7.' % compile_args)
ask_python_bin_path = ('Please specify the location of python with valid '
'tensorflow 1.15.0 site-packages installed. [Default '
'is %s]\n(Please enter the correct python path: ') % default_python_bin_path
custom_python_bin_path = ''
else:
ask_python_bin_path = ('Please specify the location of python with valid '
'tensorflow 1.15.0 site-packages installed. [Default '
'is %s]\n(Please enter the correct python path: ') % default_python_bin_path
while True:
if not custom_python_bin_path:
python_bin_path = get_input(ask_python_bin_path)
else:
python_bin_path = custom_python_bin_path
custom_python_bin_path = None
if not python_bin_path:
python_bin_path = default_python_bin_path
pass
# Check if the path is valid
if os.path.isfile(python_bin_path) and os.access(python_bin_path, os.X_OK):
pass
elif not os.path.exists(python_bin_path):
print('Invalid python path: %s cannot be found.' % python_bin_path)
continue
else:
print('%s is not executable. Is it the python binary?' % python_bin_path)
continue
try:
compile_args = run_command([
python_bin_path, '-c',
'import distutils.sysconfig; import tensorflow as tf; print(tf.__version__ + "|" + tf.sysconfig.get_lib('
') + "|" + "|".join(tf.sysconfig.get_compile_flags()) + "|" + distutils.sysconfig.get_python_inc())'
]).split("|")
if not compile_args[0].startswith(_COMPAT_TENSORFLOW_VERSION):
print('Invalid python path: %s compat tensorflow version is %s'
' got %s.' % (python_bin_path, _COMPAT_TENSORFLOW_VERSION,
compile_args[0]))
continue
except subprocess.CalledProcessError:
print('Invalid python path: %s tensorflow not installed.' %
python_bin_path)
continue
# Write tools/python_bin_path.sh
with open(real_config_path('PYTHON_BIN_PATH'), 'w') as f:
f.write(python_bin_path)
with open(real_config_path('COMPILE_FLAGS'), 'w') as f:
for flag in compile_args[2:-1]:
f.write(flag + '\n')
f.write("-I" + compile_args[-1] + '\n')
print('tensorflow path: %s.' % compile_args[1])
with open(real_config_path('LINK_FLAGS'), 'w') as f:
f.write(os.path.join(compile_args[1], 'libtensorflow_framework.so.1\n'))
f.write(os.path.join(compile_args[1], 'python', '_pywrap_tensorflow_internal.so\n'))
break
def setup_ascend(env_path):
"""Get ascend install path."""
default_ascend_path = "/usr/local/Ascend"
custom_ascend_path = env_path
while True:
if not custom_ascend_path:
ascend_path = default_ascend_path
else:
ascend_path = custom_ascend_path
# Check if the path is valid
if os.path.isdir(ascend_path) and os.access(ascend_path, os.X_OK):
break
elif not os.path.exists(ascend_path):
print('Invalid ascend path: %s cannot be found.' % ascend_path)
print('ascend path: %s.' % ascend_path)
with open(real_config_path('LINK_FLAGS'), 'a') as f:
f.write(os.path.join(ascend_path, "fwkacllib", "lib64", "libtsdclient.so\n"))
f.write(os.path.join(ascend_path, "fwkacllib", "lib64", "libge_runner.so\n"))
f.write(os.path.join(ascend_path, "fwkacllib", "lib64", "libfmk_parser.so\n"))
f.write(os.path.join(ascend_path, "fwkacllib", "lib64", "libdatatransfer.so\n"))
f.write(os.path.join(ascend_path, "fwkacllib", "lib64", "libindextransform.so\n"))
f.write(os.path.join(ascend_path, "atc", "lib64", "libalog.so\n"))
def setup_swig():
"""Get swig install path."""
default_swig_path = which('swig')
custom_swig_path = ''
ask_swig_path = ''
if default_swig_path:
custom_swig_path = default_swig_path
compile_args = run_command([
custom_swig_path, '-version'])
if not _COMPAT_SWIG_VERSION in compile_args:
print('Invalid default python version: %s.' % compile_args)
ask_swig_path = ('Please specify the location of swig. [Default is '
'%s]\n(Please enter the correct swig path: ') % default_swig_path
custom_swig_path = ''
else:
ask_swig_path = ('Please specify the location of swig. [Default is '
'%s]\n(Please enter the correct swig path: ') % default_swig_path
while True:
if not custom_swig_path:
swig_path = get_input(ask_swig_path)
else:
swig_path = custom_swig_path
custom_swig_path = None
if not swig_path:
swig_path = default_swig_path
# Check if the path is valid
if os.path.isfile(swig_path) and os.access(swig_path, os.X_OK):
break
elif not os.path.exists(swig_path):
print('Invalid swig path: %s cannot be found.' % swig_path)
continue
else:
print('%s is not executable. Is it the swig binary?' % swig_path)
continue
with open(real_config_path('SWIG_BIN_PATH'), 'w') as f:
f.write(swig_path)
def main():
env_snapshot = dict(os.environ)
setup_python()
setup_ascend(env_snapshot.get(_ASCEND_INSTALL_PATH_ENV))
setup_swig()
if __name__ == '__main__':
main()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
src/main/java/com/cttexpress/Launcher.java
|
package com.cttexpress;
import org.apache.catalina.Context;
import org.apache.catalina.Service;
import org.apache.catalina.connector.Connector;
import org.apache.catalina.startup.Tomcat;
import org.glassfish.jersey.servlet.ServletContainer;
public class Launcher {
private static final String JERSEY_SERVLET_NAME = "jersey-container-servlet";
public static void main(String[] args) throws Exception {
new Launcher().start();
}
void start() throws Exception {
String httpScheme = System.getenv("HTTP_SCHEME");
System.out.println("Env HTTP_SCHEME[" + httpScheme + "]");
if (httpScheme != null && httpScheme.trim().equalsIgnoreCase("HTTP")) {
httpScheme = "HTTP";
} else if (httpScheme != null && httpScheme.trim().equalsIgnoreCase("HTTPS")) {
httpScheme = "HTTPS";
} else {
httpScheme = "HTTP";
System.out.println("Default HTTP_SCHEME[" + httpScheme + "]");
}
String contextPath = "";
String appBase = ".";
Tomcat tomcat = new Tomcat();
Service service = tomcat.getService();
if (httpScheme.equalsIgnoreCase("HTTPS")) {
service.addConnector(getSslConnector());
} else {
service.addConnector(getNoSslConnector());
}
tomcat.getHost().setAppBase(appBase);
Context context = tomcat.addContext(contextPath, appBase);
Tomcat.addServlet(context, JERSEY_SERVLET_NAME,
new ServletContainer(new JerseyConfiguration()));
context.addServletMappingDecoded("/oauth2/default/v1/*", JERSEY_SERVLET_NAME);
context.addApplicationListener(CommonAppServletContextListener.class.getName());
tomcat.start();
tomcat.getServer().await();
}
private static Connector getSslConnector() {
String portAsString = System.getenv("PORT");
System.out.println("Env PORT[" + portAsString + "]");
if (portAsString == null || portAsString.trim().length() == 0) {
portAsString = "9090";
System.out.println("Default PORT[" + portAsString + "]");
}
String keyAlias = System.getenv("KEYALIAS");
System.out.println("Env KEYALIAS[" + keyAlias + "]");
String keystorePass = System.getenv("KEYSTOREPASS");
System.out.println("Env KEYSTOREPASS[" + keystorePass + "]");
String keystoreFile = System.getenv("KEYSTOREFILE");
System.out.println("Env KEYSTOREFILE[" + keystoreFile + "]");
Integer port = new Integer(portAsString);
Connector connector = new Connector();
connector.setPort(port.intValue());
connector.setSecure(true);
connector.setScheme("https");
connector.setAttribute("keyAlias", keyAlias);
connector.setAttribute("keystorePass", keystorePass);
connector.setAttribute("keystoreType", "JKS");
connector.setAttribute("keystoreFile", keystoreFile);
connector.setAttribute("clientAuth", "false");
connector.setAttribute("protocol", "HTTP/1.1");
connector.setAttribute("sslProtocol", "TLS");
connector.setAttribute("maxThreads", "200");
connector.setAttribute("protocol", "org.apache.coyote.http11.Http11AprProtocol");
connector.setAttribute("SSLEnabled", true);
return connector;
}
private static Connector getNoSslConnector() {
String portAsString = System.getenv("PORT");
System.out.println("Env PORT[" + portAsString + "]");
if (portAsString == null || portAsString.trim().length() == 0) {
portAsString = "8080";
System.out.println("Default PORT[" + portAsString + "]");
}
Integer port = new Integer(portAsString);
Connector connector = new Connector();
connector.setPort(port.intValue());
connector.setSecure(false);
connector.setScheme("http");
connector.setAttribute("protocol", "HTTP/1.1");
connector.setAttribute("maxThreads", "200");
connector.setAttribute("protocol", "org.apache.coyote.http11.Http11AprProtocol");
connector.setAttribute("SSLEnabled", false);
return connector;
}
}
|
[
"\"HTTP_SCHEME\"",
"\"PORT\"",
"\"KEYALIAS\"",
"\"KEYSTOREPASS\"",
"\"KEYSTOREFILE\"",
"\"PORT\""
] |
[] |
[
"PORT",
"KEYSTOREFILE",
"HTTP_SCHEME",
"KEYALIAS",
"KEYSTOREPASS"
] |
[]
|
["PORT", "KEYSTOREFILE", "HTTP_SCHEME", "KEYALIAS", "KEYSTOREPASS"]
|
java
| 5 | 0 | |
test/functional/p2p_unrequested_blocks.py
|
#!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Copyright (c) 2017-2020 The Raven Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
Test processing of unrequested blocks.
Since behavior differs when receiving unrequested blocks from whitelisted peers
versus non-whitelisted peers, this tests the behavior of both (effectively two
separate tests running in parallel).
Setup: three nodes, node0+node1+node2, not connected to each other. Node0 does not
whitelist localhost, but node1 does. They will each be on their own chain for
this test. Node2 will have nMinimumChainWork set to 0x10, so it won't process
low-work unrequested blocks.
We have one NodeConn connection to each, test_node, white_node, and min_work_node,
respectively.
The test:
1. Generate one block on each node, to leave IBD.
2. Mine a new block on each tip, and deliver to each node from node's peer.
The tip should advance for node0 and node1, but node2 should skip processing
due to nMinimumChainWork.
Node2 is unused in tests 3-7:
3. Mine a block that forks the previous block, and deliver to each node from
corresponding peer.
Node0 should not process this block (just accept the header), because it is
unrequested and doesn't have more work than the tip.
Node1 should process because this is coming from a whitelisted peer.
4. Send another block that builds on the forking block.
Node0 should process this block but be stuck on the shorter chain, because
it's missing an intermediate block.
Node1 should reorg to this longer chain.
4b.Send 288 more blocks on the longer chain.
Node0 should process all but the last block (too far ahead in height).
Send all headers to Node1, and then send the last block in that chain.
Node1 should accept the block because it's coming from a whitelisted peer.
5. Send a duplicate of the block in #3 to Node0.
Node0 should not process the block because it is unrequested, and stay on
the shorter chain.
6. Send Node0 an inv for the height 3 block produced in #4 above.
Node0 should figure out that Node0 has the missing height 2 block and send a
getdata.
7. Send Node0 the missing block again.
Node0 should process and the tip should advance.
8. Test Node2 is able to sync when connected to node0 (which should have sufficient
work on its chain).
"""
import time
from test_framework.mininode import NodeConn, NodeConnCB, NetworkThread, MsgBlock, MsgHeaders, CBlockHeader, mininode_lock, MsgInv, CInv
from test_framework.test_framework import RavenTestFramework
from test_framework.util import os, p2p_port, assert_equal, assert_raises_rpc_error, connect_nodes, sync_blocks
from test_framework.blocktools import create_block, create_coinbase
class AcceptBlockTest(RavenTestFramework):
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("RAVEND", "ravend"),
help="ravend binary to test")
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
self.extra_args = [[], ["-whitelist=127.0.0.1"], ["-minimumchainwork=0x10"]]
def setup_network(self):
# Node0 will be used to test behavior of processing unrequested blocks
# from peers which are not whitelisted, while Node1 will be used for
# the whitelisted case.
# Node2 will be used for non-whitelisted peers to test the interaction
# with nMinimumChainWork.
self.setup_nodes()
def run_test(self):
# Setup the p2p connections and start up the network thread.
test_node = NodeConnCB() # connects to node0 (not whitelisted)
white_node = NodeConnCB() # connects to node1 (whitelisted)
min_work_node = NodeConnCB() # connects to node2 (not whitelisted)
connections = [NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node),
NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], white_node),
NodeConn('127.0.0.1', p2p_port(2), self.nodes[2], min_work_node)]
test_node.add_connection(connections[0])
white_node.add_connection(connections[1])
min_work_node.add_connection(connections[2])
NetworkThread().start() # Start up network handling in another thread
# Test logic begins here
test_node.wait_for_verack()
white_node.wait_for_verack()
min_work_node.wait_for_verack()
# 1. Have nodes mine a block (nodes1/2 leave IBD)
[ n.generate(1) for n in self.nodes ]
tips = [ int("0x" + n.getbestblockhash(), 0) for n in self.nodes ]
# 2. Send one block that builds on each tip.
# This should be accepted by nodes 1/2
blocks_h2 = [] # the height 2 blocks on each node's chain
block_time = int(time.time()) + 1
for i in range(3):
blocks_h2.append(create_block(tips[i], create_coinbase(2), block_time))
blocks_h2[i].solve()
block_time += 1
test_node.send_message(MsgBlock(blocks_h2[0]))
white_node.send_message(MsgBlock(blocks_h2[1]))
min_work_node.send_message(MsgBlock(blocks_h2[2]))
for x in [test_node, white_node, min_work_node]:
x.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 2)
assert_equal(self.nodes[1].getblockcount(), 2)
assert_equal(self.nodes[2].getblockcount(), 1)
self.log.info("First height 2 block accepted by node0/node1; correctly rejected by node2")
# 3. Send another block that builds on the original tip.
blocks_h2f = [] # Blocks at height 2 that fork off the main chain
for i in range(2):
blocks_h2f.append(create_block(tips[i], create_coinbase(2), blocks_h2[i].nTime+1))
blocks_h2f[i].solve()
test_node.send_message(MsgBlock(blocks_h2f[0]))
white_node.send_message(MsgBlock(blocks_h2f[1]))
for x in [test_node, white_node]:
x.sync_with_ping()
for x in self.nodes[0].getchaintips():
if x['hash'] == blocks_h2f[0].hash:
assert_equal(x['status'], "headers-only")
for x in self.nodes[1].getchaintips():
if x['hash'] == blocks_h2f[1].hash:
assert_equal(x['status'], "valid-headers")
self.log.info("Second height 2 block accepted only from whitelisted peer")
# 4. Now send another block that builds on the forking chain.
blocks_h3 = []
for i in range(2):
blocks_h3.append(create_block(blocks_h2f[i].sha256, create_coinbase(3), blocks_h2f[i].nTime+1))
blocks_h3[i].solve()
test_node.send_message(MsgBlock(blocks_h3[0]))
white_node.send_message(MsgBlock(blocks_h3[1]))
for x in [test_node, white_node]:
x.sync_with_ping()
# Since the earlier block was not processed by node0, the new block
# can't be fully validated.
for x in self.nodes[0].getchaintips():
if x['hash'] == blocks_h3[0].hash:
assert_equal(x['status'], "headers-only")
# But this block should be accepted by node0 since it has more work.
self.nodes[0].getblock(blocks_h3[0].hash)
self.log.info("Unrequested more-work block accepted from non-whitelisted peer")
# Node1 should have accepted and reorged.
assert_equal(self.nodes[1].getblockcount(), 3)
self.log.info("Successfully reorged to length 3 chain from whitelisted peer")
# 4b. Now mine 288 more blocks and deliver; all should be processed but
# the last (height-too-high) on node0. Node1 should process the tip if
# we give it the headers chain leading to the tip.
tips = blocks_h3
headers_message = MsgHeaders()
all_blocks = [] # node0's blocks
for j in range(2):
for i in range(288):
next_block = create_block(tips[j].sha256, create_coinbase(i + 4), tips[j].nTime+1)
next_block.solve()
if j==0:
test_node.send_message(MsgBlock(next_block))
all_blocks.append(next_block)
else:
headers_message.headers.append(CBlockHeader(next_block))
tips[j] = next_block
time.sleep(2)
# Blocks 1-287 should be accepted, block 288 should be ignored because it's too far ahead
for x in all_blocks[:-1]:
self.nodes[0].getblock(x.hash)
assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[-1].hash)
headers_message.headers.pop() # Ensure the last block is unrequested
white_node.send_message(headers_message) # Send headers leading to tip
white_node.send_message(MsgBlock(tips[1])) # Now deliver the tip
white_node.sync_with_ping()
self.nodes[1].getblock(tips[1].hash)
self.log.info("Unrequested block far ahead of tip accepted from whitelisted peer")
# 5. Test handling of unrequested block on the node that didn't process
# Should still not be processed (even though it has a child that has more
# work).
test_node.send_message(MsgBlock(blocks_h2f[0]))
# Here, if the sleep is too short, the test could falsely succeed (if the
# node hasn't processed the block by the time the sleep returns, and then
# the node processes it and incorrectly advances the tip).
# But this would be caught later on, when we verify that an inv triggers
# a getdata request for this block.
test_node.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 2)
self.log.info("Unrequested block that would complete more-work chain was ignored")
# 6. Try to get node to request the missing block.
# Poke the node with an inv for block at height 3 and see if that
# triggers a getdata on block 2 (it should if block 2 is missing).
with mininode_lock:
# Clear state so we can check the getdata request
test_node.last_message.pop("getdata", None)
test_node.send_message(MsgInv([CInv(2, blocks_h3[0].sha256)]))
test_node.sync_with_ping()
with mininode_lock:
getdata = test_node.last_message["getdata"]
# Check that the getdata includes the right block
assert_equal(getdata.inv[0].hash, blocks_h2f[0].sha256)
self.log.info("Inv at tip triggered getdata for unprocessed block")
# 7. Send the missing block for the third time (now it is requested)
test_node.send_message(MsgBlock(blocks_h2f[0]))
test_node.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 290)
self.log.info("Successfully reorged to longer chain from non-whitelisted peer")
# 8. Connect node2 to node0 and ensure it is able to sync
connect_nodes(self.nodes[0], 2)
sync_blocks([self.nodes[0], self.nodes[2]])
self.log.info("Successfully synced nodes 2 and 0")
[ c.disconnect_node() for c in connections ]
if __name__ == '__main__':
AcceptBlockTest().main()
|
[] |
[] |
[
"RAVEND"
] |
[]
|
["RAVEND"]
|
python
| 1 | 0 | |
contrib/gitian-build.py
|
#!/usr/bin/env python3
# Copyright (c) 2018-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import argparse
import os
import subprocess
import sys
def setup():
global args, workdir
programs = ['ruby', 'git', 'make', 'wget', 'curl']
if args.kvm:
programs += ['apt-cacher-ng', 'python-vm-builder', 'qemu-kvm', 'qemu-utils']
elif args.docker and not os.path.isfile('/lib/systemd/system/docker.service'):
dockers = ['docker.io', 'docker-ce']
for i in dockers:
return_code = subprocess.call(['sudo', 'apt-get', 'install', '-qq', i])
if return_code == 0:
break
if return_code != 0:
print('Cannot find any way to install Docker.', file=sys.stderr)
sys.exit(1)
else:
programs += ['apt-cacher-ng', 'lxc', 'debootstrap']
subprocess.check_call(['sudo', 'apt-get', 'install', '-qq'] + programs)
if not os.path.isdir('gitian.sigs'):
subprocess.check_call(['git', 'clone', 'https://github.com/mindblockchain-core/gitian.sigs.git'])
if not os.path.isdir('mindblockchain-detached-sigs'):
subprocess.check_call(['git', 'clone', 'https://github.com/mindblockchain-core/mindblockchain-detached-sigs.git'])
if not os.path.isdir('gitian-builder'):
subprocess.check_call(['git', 'clone', 'https://github.com/devrandom/gitian-builder.git'])
if not os.path.isdir('mindblockchain'):
subprocess.check_call(['git', 'clone', 'https://github.com/mindblockchain/mindblockchain.git'])
os.chdir('gitian-builder')
make_image_prog = ['bin/make-base-vm', '--suite', 'bionic', '--arch', 'amd64']
if args.docker:
make_image_prog += ['--docker']
elif not args.kvm:
make_image_prog += ['--lxc']
subprocess.check_call(make_image_prog)
os.chdir(workdir)
if args.is_bionic and not args.kvm and not args.docker:
subprocess.check_call(['sudo', 'sed', '-i', 's/lxcbr0/br0/', '/etc/default/lxc-net'])
print('Reboot is required')
sys.exit(0)
def build():
global args, workdir
os.makedirs('mindblockchain-binaries/' + args.version, exist_ok=True)
print('\nBuilding Dependencies\n')
os.chdir('gitian-builder')
os.makedirs('inputs', exist_ok=True)
subprocess.check_call(['wget', '-O', 'inputs/osslsigncode-2.0.tar.gz', 'https://github.com/mtrojnar/osslsigncode/archive/2.0.tar.gz'])
subprocess.check_call(["echo '5a60e0a4b3e0b4d655317b2f12a810211c50242138322b16e7e01c6fbb89d92f inputs/osslsigncode-2.0.tar.gz' | sha256sum -c"], shell=True)
subprocess.check_call(['make', '-C', '../mindblockchain/depends', 'download', 'SOURCES_PATH=' + os.getcwd() + '/cache/common'])
if args.linux:
print('\nCompiling ' + args.version + ' Linux')
subprocess.check_call(['bin/gbuild', '-j', args.jobs, '-m', args.memory, '--commit', 'mindblockchain='+args.commit, '--url', 'mindblockchain='+args.url, '../mindblockchain/contrib/gitian-descriptors/gitian-linux.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-linux', '--destination', '../gitian.sigs/', '../mindblockchain/contrib/gitian-descriptors/gitian-linux.yml'])
subprocess.check_call('mv build/out/mindblockchain-*.tar.gz build/out/src/mindblockchain-*.tar.gz ../mindblockchain-binaries/'+args.version, shell=True)
if args.windows:
print('\nCompiling ' + args.version + ' Windows')
subprocess.check_call(['bin/gbuild', '-j', args.jobs, '-m', args.memory, '--commit', 'mindblockchain='+args.commit, '--url', 'mindblockchain='+args.url, '../mindblockchain/contrib/gitian-descriptors/gitian-win.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-win-unsigned', '--destination', '../gitian.sigs/', '../mindblockchain/contrib/gitian-descriptors/gitian-win.yml'])
subprocess.check_call('mv build/out/mindblockchain-*-win-unsigned.tar.gz inputs/', shell=True)
subprocess.check_call('mv build/out/mindblockchain-*.zip build/out/mindblockchain-*.exe build/out/src/mindblockchain-*.tar.gz ../mindblockchain-binaries/'+args.version, shell=True)
if args.macos:
print('\nCompiling ' + args.version + ' MacOS')
subprocess.check_call(['bin/gbuild', '-j', args.jobs, '-m', args.memory, '--commit', 'mindblockchain='+args.commit, '--url', 'mindblockchain='+args.url, '../mindblockchain/contrib/gitian-descriptors/gitian-osx.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-osx-unsigned', '--destination', '../gitian.sigs/', '../mindblockchain/contrib/gitian-descriptors/gitian-osx.yml'])
subprocess.check_call('mv build/out/mindblockchain-*-osx-unsigned.tar.gz inputs/', shell=True)
subprocess.check_call('mv build/out/mindblockchain-*.tar.gz build/out/mindblockchain-*.dmg build/out/src/mindblockchain-*.tar.gz ../mindblockchain-binaries/'+args.version, shell=True)
os.chdir(workdir)
if args.commit_files:
print('\nCommitting '+args.version+' Unsigned Sigs\n')
os.chdir('gitian.sigs')
subprocess.check_call(['git', 'add', args.version+'-linux/'+args.signer])
subprocess.check_call(['git', 'add', args.version+'-win-unsigned/'+args.signer])
subprocess.check_call(['git', 'add', args.version+'-osx-unsigned/'+args.signer])
subprocess.check_call(['git', 'commit', '-m', 'Add '+args.version+' unsigned sigs for '+args.signer])
os.chdir(workdir)
def sign():
global args, workdir
os.chdir('gitian-builder')
if args.windows:
print('\nSigning ' + args.version + ' Windows')
subprocess.check_call('cp inputs/mindblockchain-' + args.version + '-win-unsigned.tar.gz inputs/mindblockchain-win-unsigned.tar.gz', shell=True)
subprocess.check_call(['bin/gbuild', '--skip-image', '--upgrade', '--commit', 'signature='+args.commit, '../mindblockchain/contrib/gitian-descriptors/gitian-win-signer.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-win-signed', '--destination', '../gitian.sigs/', '../mindblockchain/contrib/gitian-descriptors/gitian-win-signer.yml'])
subprocess.check_call('mv build/out/mindblockchain-*win64-setup.exe ../mindblockchain-binaries/'+args.version, shell=True)
if args.macos:
print('\nSigning ' + args.version + ' MacOS')
subprocess.check_call('cp inputs/mindblockchain-' + args.version + '-osx-unsigned.tar.gz inputs/mindblockchain-osx-unsigned.tar.gz', shell=True)
subprocess.check_call(['bin/gbuild', '--skip-image', '--upgrade', '--commit', 'signature='+args.commit, '../mindblockchain/contrib/gitian-descriptors/gitian-osx-signer.yml'])
subprocess.check_call(['bin/gsign', '-p', args.sign_prog, '--signer', args.signer, '--release', args.version+'-osx-signed', '--destination', '../gitian.sigs/', '../mindblockchain/contrib/gitian-descriptors/gitian-osx-signer.yml'])
subprocess.check_call('mv build/out/mindblockchain-osx-signed.dmg ../mindblockchain-binaries/'+args.version+'/mindblockchain-'+args.version+'-osx.dmg', shell=True)
os.chdir(workdir)
if args.commit_files:
print('\nCommitting '+args.version+' Signed Sigs\n')
os.chdir('gitian.sigs')
subprocess.check_call(['git', 'add', args.version+'-win-signed/'+args.signer])
subprocess.check_call(['git', 'add', args.version+'-osx-signed/'+args.signer])
subprocess.check_call(['git', 'commit', '-a', '-m', 'Add '+args.version+' signed binary sigs for '+args.signer])
os.chdir(workdir)
def verify():
global args, workdir
rc = 0
os.chdir('gitian-builder')
print('\nVerifying v'+args.version+' Linux\n')
if subprocess.call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-linux', '../mindblockchain/contrib/gitian-descriptors/gitian-linux.yml']):
print('Verifying v'+args.version+' Linux FAILED\n')
rc = 1
print('\nVerifying v'+args.version+' Windows\n')
if subprocess.call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-win-unsigned', '../mindblockchain/contrib/gitian-descriptors/gitian-win.yml']):
print('Verifying v'+args.version+' Windows FAILED\n')
rc = 1
print('\nVerifying v'+args.version+' MacOS\n')
if subprocess.call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-osx-unsigned', '../mindblockchain/contrib/gitian-descriptors/gitian-osx.yml']):
print('Verifying v'+args.version+' MacOS FAILED\n')
rc = 1
print('\nVerifying v'+args.version+' Signed Windows\n')
if subprocess.call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-win-signed', '../mindblockchain/contrib/gitian-descriptors/gitian-win-signer.yml']):
print('Verifying v'+args.version+' Signed Windows FAILED\n')
rc = 1
print('\nVerifying v'+args.version+' Signed MacOS\n')
if subprocess.call(['bin/gverify', '-v', '-d', '../gitian.sigs/', '-r', args.version+'-osx-signed', '../mindblockchain/contrib/gitian-descriptors/gitian-osx-signer.yml']):
print('Verifying v'+args.version+' Signed MacOS FAILED\n')
rc = 1
os.chdir(workdir)
return rc
def main():
global args, workdir
parser = argparse.ArgumentParser(description='Script for running full Gitian builds.')
parser.add_argument('-c', '--commit', action='store_true', dest='commit', help='Indicate that the version argument is for a commit or branch')
parser.add_argument('-p', '--pull', action='store_true', dest='pull', help='Indicate that the version argument is the number of a github repository pull request')
parser.add_argument('-u', '--url', dest='url', default='https://github.com/mindblockchain/mindblockchain', help='Specify the URL of the repository. Default is %(default)s')
parser.add_argument('-v', '--verify', action='store_true', dest='verify', help='Verify the Gitian build')
parser.add_argument('-b', '--build', action='store_true', dest='build', help='Do a Gitian build')
parser.add_argument('-s', '--sign', action='store_true', dest='sign', help='Make signed binaries for Windows and MacOS')
parser.add_argument('-B', '--buildsign', action='store_true', dest='buildsign', help='Build both signed and unsigned binaries')
parser.add_argument('-o', '--os', dest='os', default='lwm', help='Specify which Operating Systems the build is for. Default is %(default)s. l for Linux, w for Windows, m for MacOS')
parser.add_argument('-j', '--jobs', dest='jobs', default='2', help='Number of processes to use. Default %(default)s')
parser.add_argument('-m', '--memory', dest='memory', default='2000', help='Memory to allocate in MiB. Default %(default)s')
parser.add_argument('-k', '--kvm', action='store_true', dest='kvm', help='Use KVM instead of LXC')
parser.add_argument('-d', '--docker', action='store_true', dest='docker', help='Use Docker instead of LXC')
parser.add_argument('-S', '--setup', action='store_true', dest='setup', help='Set up the Gitian building environment. Only works on Debian-based systems (Ubuntu, Debian)')
parser.add_argument('-D', '--detach-sign', action='store_true', dest='detach_sign', help='Create the assert file for detached signing. Will not commit anything.')
parser.add_argument('-n', '--no-commit', action='store_false', dest='commit_files', help='Do not commit anything to git')
parser.add_argument('signer', nargs='?', help='GPG signer to sign each build assert file')
parser.add_argument('version', nargs='?', help='Version number, commit, or branch to build. If building a commit or branch, the -c option must be specified')
args = parser.parse_args()
workdir = os.getcwd()
args.is_bionic = b'bionic' in subprocess.check_output(['lsb_release', '-cs'])
if args.kvm and args.docker:
raise Exception('Error: cannot have both kvm and docker')
# Ensure no more than one environment variable for gitian-builder (USE_LXC, USE_VBOX, USE_DOCKER) is set as they
# can interfere (e.g., USE_LXC being set shadows USE_DOCKER; for details see gitian-builder/libexec/make-clean-vm).
os.environ['USE_LXC'] = ''
os.environ['USE_VBOX'] = ''
os.environ['USE_DOCKER'] = ''
if args.docker:
os.environ['USE_DOCKER'] = '1'
elif not args.kvm:
os.environ['USE_LXC'] = '1'
if 'GITIAN_HOST_IP' not in os.environ.keys():
os.environ['GITIAN_HOST_IP'] = '10.0.3.1'
if 'LXC_GUEST_IP' not in os.environ.keys():
os.environ['LXC_GUEST_IP'] = '10.0.3.5'
if args.setup:
setup()
if args.buildsign:
args.build = True
args.sign = True
if not args.build and not args.sign and not args.verify:
sys.exit(0)
args.linux = 'l' in args.os
args.windows = 'w' in args.os
args.macos = 'm' in args.os
# Disable for MacOS if no SDK found
if args.macos and not os.path.isfile('gitian-builder/inputs/Xcode-11.3.1-11C505-extracted-SDK-with-libcxx-headers.tar.gz'):
print('Cannot build for MacOS, SDK does not exist. Will build for other OSes')
args.macos = False
args.sign_prog = 'true' if args.detach_sign else 'gpg --detach-sign'
script_name = os.path.basename(sys.argv[0])
if not args.signer:
print(script_name+': Missing signer')
print('Try '+script_name+' --help for more information')
sys.exit(1)
if not args.version:
print(script_name+': Missing version')
print('Try '+script_name+' --help for more information')
sys.exit(1)
# Add leading 'v' for tags
if args.commit and args.pull:
raise Exception('Cannot have both commit and pull')
args.commit = ('' if args.commit else 'v') + args.version
os.chdir('mindblockchain')
if args.pull:
subprocess.check_call(['git', 'fetch', args.url, 'refs/pull/'+args.version+'/merge'])
os.chdir('../gitian-builder/inputs/mindblockchain')
subprocess.check_call(['git', 'fetch', args.url, 'refs/pull/'+args.version+'/merge'])
args.commit = subprocess.check_output(['git', 'show', '-s', '--format=%H', 'FETCH_HEAD'], universal_newlines=True, encoding='utf8').strip()
args.version = 'pull-' + args.version
print(args.commit)
subprocess.check_call(['git', 'fetch'])
subprocess.check_call(['git', 'checkout', args.commit])
os.chdir(workdir)
os.chdir('gitian-builder')
subprocess.check_call(['git', 'pull'])
os.chdir(workdir)
if args.build:
build()
if args.sign:
sign()
if args.verify:
os.chdir('gitian.sigs')
subprocess.check_call(['git', 'pull'])
os.chdir(workdir)
sys.exit(verify())
if __name__ == '__main__':
main()
|
[] |
[] |
[
"USE_DOCKER",
"USE_LXC",
"USE_VBOX",
"GITIAN_HOST_IP",
"LXC_GUEST_IP"
] |
[]
|
["USE_DOCKER", "USE_LXC", "USE_VBOX", "GITIAN_HOST_IP", "LXC_GUEST_IP"]
|
python
| 5 | 0 | |
vendor/github.com/openshift/generic-admission-server/pkg/cmd/cmd.go
|
package cmd
import (
"flag"
"os"
"runtime"
"github.com/golang/glog"
genericapiserver "k8s.io/apiserver/pkg/server"
"k8s.io/apiserver/pkg/util/logs"
"github.com/openshift/generic-admission-server/pkg/apiserver"
"github.com/openshift/generic-admission-server/pkg/cmd/server"
)
// AdmissionHook is what callers provide, in the mutating, the validating variant or implementing even both interfaces.
// We define it here to limit how much of the import tree callers have to deal with for this plugin. This means that
// callers need to match levels of apimachinery, api, client-go, and apiserver.
type AdmissionHook apiserver.AdmissionHook
type ValidatingAdmissionHook apiserver.ValidatingAdmissionHook
type MutatingAdmissionHook apiserver.MutatingAdmissionHook
func RunAdmissionServer(admissionHooks ...AdmissionHook) {
logs.InitLogs()
defer logs.FlushLogs()
if len(os.Getenv("GOMAXPROCS")) == 0 {
runtime.GOMAXPROCS(runtime.NumCPU())
}
stopCh := genericapiserver.SetupSignalHandler()
// done to avoid cannot use admissionHooks (type []AdmissionHook) as type []apiserver.AdmissionHook in argument to "github.com/openshift/kubernetes-namespace-reservation/pkg/genericadmissionserver/cmd/server".NewCommandStartAdmissionServer
var castSlice []apiserver.AdmissionHook
for i := range admissionHooks {
castSlice = append(castSlice, admissionHooks[i])
}
cmd := server.NewCommandStartAdmissionServer(os.Stdout, os.Stderr, stopCh, castSlice...)
cmd.Flags().AddGoFlagSet(flag.CommandLine)
if err := cmd.Execute(); err != nil {
glog.Fatal(err)
}
}
|
[
"\"GOMAXPROCS\""
] |
[] |
[
"GOMAXPROCS"
] |
[]
|
["GOMAXPROCS"]
|
go
| 1 | 0 | |
Python-3.7.12/Lib/test/test_c_locale_coercion.py
|
# Tests the attempted automatic coercion of the C locale to a UTF-8 locale
import locale
import os
import shutil
import subprocess
import sys
import sysconfig
import unittest
from collections import namedtuple
import test.support
from test.support.script_helper import (
run_python_until_end,
interpreter_requires_environment,
)
# Set the list of ways we expect to be able to ask for the "C" locale
EXPECTED_C_LOCALE_EQUIVALENTS = ["C", "invalid.ascii"]
# Set our expectation for the default encoding used in the C locale
# for the filesystem encoding and the standard streams
EXPECTED_C_LOCALE_STREAM_ENCODING = "ascii"
EXPECTED_C_LOCALE_FS_ENCODING = "ascii"
# Set our expectation for the default locale used when none is specified
EXPECT_COERCION_IN_DEFAULT_LOCALE = True
TARGET_LOCALES = ["C.UTF-8", "C.utf8", "UTF-8"]
# Apply some platform dependent overrides
if sys.platform.startswith("linux"):
if test.support.is_android:
# Android defaults to using UTF-8 for all system interfaces
EXPECTED_C_LOCALE_STREAM_ENCODING = "utf-8"
EXPECTED_C_LOCALE_FS_ENCODING = "utf-8"
else:
# Linux distros typically alias the POSIX locale directly to the C
# locale.
# TODO: Once https://bugs.python.org/issue30672 is addressed, we'll be
# able to check this case unconditionally
EXPECTED_C_LOCALE_EQUIVALENTS.append("POSIX")
elif sys.platform.startswith("aix"):
# AIX uses iso8859-1 in the C locale, other *nix platforms use ASCII
EXPECTED_C_LOCALE_STREAM_ENCODING = "iso8859-1"
EXPECTED_C_LOCALE_FS_ENCODING = "iso8859-1"
elif sys.platform == "darwin":
# FS encoding is UTF-8 on macOS
EXPECTED_C_LOCALE_FS_ENCODING = "utf-8"
elif sys.platform == "cygwin":
# Cygwin defaults to using C.UTF-8
# TODO: Work out a robust dynamic test for this that doesn't rely on
# CPython's own locale handling machinery
EXPECT_COERCION_IN_DEFAULT_LOCALE = False
# Note that the above expectations are still wrong in some cases, such as:
# * Windows when PYTHONLEGACYWINDOWSFSENCODING is set
# * Any platform other than AIX that uses latin-1 in the C locale
# * Any Linux distro where POSIX isn't a simple alias for the C locale
# * Any Linux distro where the default locale is something other than "C"
#
# Options for dealing with this:
# * Don't set the PY_COERCE_C_LOCALE preprocessor definition on
# such platforms (e.g. it isn't set on Windows)
# * Fix the test expectations to match the actual platform behaviour
# In order to get the warning messages to match up as expected, the candidate
# order here must much the target locale order in Python/pylifecycle.c
_C_UTF8_LOCALES = ("C.UTF-8", "C.utf8", "UTF-8")
# There's no reliable cross-platform way of checking locale alias
# lists, so the only way of knowing which of these locales will work
# is to try them with locale.setlocale(). We do that in a subprocess
# in setUpModule() below to avoid altering the locale of the test runner.
#
# If the relevant locale module attributes exist, and we're not on a platform
# where we expect it to always succeed, we also check that
# `locale.nl_langinfo(locale.CODESET)` works, as if it fails, the interpreter
# will skip locale coercion for that particular target locale
_check_nl_langinfo_CODESET = bool(
sys.platform not in ("darwin", "linux") and
hasattr(locale, "nl_langinfo") and
hasattr(locale, "CODESET")
)
def _set_locale_in_subprocess(locale_name):
cmd_fmt = "import locale; print(locale.setlocale(locale.LC_CTYPE, '{}'))"
if _check_nl_langinfo_CODESET:
# If there's no valid CODESET, we expect coercion to be skipped
cmd_fmt += "; import sys; sys.exit(not locale.nl_langinfo(locale.CODESET))"
cmd = cmd_fmt.format(locale_name)
result, py_cmd = run_python_until_end("-c", cmd, PYTHONCOERCECLOCALE='')
return result.rc == 0
_fields = "fsencoding stdin_info stdout_info stderr_info lang lc_ctype lc_all"
_EncodingDetails = namedtuple("EncodingDetails", _fields)
class EncodingDetails(_EncodingDetails):
# XXX (ncoghlan): Using JSON for child state reporting may be less fragile
CHILD_PROCESS_SCRIPT = ";".join([
"import sys, os, codecs",
"print(codecs.lookup(sys.getfilesystemencoding()).name)",
"print(codecs.lookup(sys.stdin.encoding).name + ':' + sys.stdin.errors)",
"print(codecs.lookup(sys.stdout.encoding).name + ':' + sys.stdout.errors)",
"print(codecs.lookup(sys.stderr.encoding).name + ':' + sys.stderr.errors)",
"print(os.environ.get('LANG', 'not set'))",
"print(os.environ.get('LC_CTYPE', 'not set'))",
"print(os.environ.get('LC_ALL', 'not set'))",
])
@classmethod
def get_expected_details(cls, coercion_expected, fs_encoding, stream_encoding, env_vars):
"""Returns expected child process details for a given encoding"""
_stream = stream_encoding + ":{}"
# stdin and stdout should use surrogateescape either because the
# coercion triggered, or because the C locale was detected
stream_info = 2*[_stream.format("surrogateescape")]
# stderr should always use backslashreplace
stream_info.append(_stream.format("backslashreplace"))
expected_lang = env_vars.get("LANG", "not set")
if coercion_expected:
expected_lc_ctype = CLI_COERCION_TARGET
else:
expected_lc_ctype = env_vars.get("LC_CTYPE", "not set")
expected_lc_all = env_vars.get("LC_ALL", "not set")
env_info = expected_lang, expected_lc_ctype, expected_lc_all
return dict(cls(fs_encoding, *stream_info, *env_info)._asdict())
@classmethod
def get_child_details(cls, env_vars):
"""Retrieves fsencoding and standard stream details from a child process
Returns (encoding_details, stderr_lines):
- encoding_details: EncodingDetails for eager decoding
- stderr_lines: result of calling splitlines() on the stderr output
The child is run in isolated mode if the current interpreter supports
that.
"""
result, py_cmd = run_python_until_end(
"-X", "utf8=0", "-c", cls.CHILD_PROCESS_SCRIPT,
**env_vars
)
if not result.rc == 0:
result.fail(py_cmd)
# All subprocess outputs in this test case should be pure ASCII
stdout_lines = result.out.decode("ascii").splitlines()
child_encoding_details = dict(cls(*stdout_lines)._asdict())
stderr_lines = result.err.decode("ascii").rstrip().splitlines()
return child_encoding_details, stderr_lines
# Details of the shared library warning emitted at runtime
LEGACY_LOCALE_WARNING = (
"Python runtime initialized with LC_CTYPE=C (a locale with default ASCII "
"encoding), which may cause Unicode compatibility problems. Using C.UTF-8, "
"C.utf8, or UTF-8 (if available) as alternative Unicode-compatible "
"locales is recommended."
)
# Details of the CLI locale coercion warning emitted at runtime
CLI_COERCION_WARNING_FMT = (
"Python detected LC_CTYPE=C: LC_CTYPE coerced to {} (set another locale "
"or PYTHONCOERCECLOCALE=0 to disable this locale coercion behavior)."
)
AVAILABLE_TARGETS = None
CLI_COERCION_TARGET = None
CLI_COERCION_WARNING = None
def setUpModule():
global AVAILABLE_TARGETS
global CLI_COERCION_TARGET
global CLI_COERCION_WARNING
if AVAILABLE_TARGETS is not None:
# initialization already done
return
AVAILABLE_TARGETS = []
# Find the target locales available in the current system
for target_locale in _C_UTF8_LOCALES:
if _set_locale_in_subprocess(target_locale):
AVAILABLE_TARGETS.append(target_locale)
if AVAILABLE_TARGETS:
# Coercion is expected to use the first available target locale
CLI_COERCION_TARGET = AVAILABLE_TARGETS[0]
CLI_COERCION_WARNING = CLI_COERCION_WARNING_FMT.format(CLI_COERCION_TARGET)
class _LocaleHandlingTestCase(unittest.TestCase):
# Base class to check expected locale handling behaviour
def _check_child_encoding_details(self,
env_vars,
expected_fs_encoding,
expected_stream_encoding,
expected_warnings,
coercion_expected):
"""Check the C locale handling for the given process environment
Parameters:
expected_fs_encoding: expected sys.getfilesystemencoding() result
expected_stream_encoding: expected encoding for standard streams
expected_warning: stderr output to expect (if any)
"""
result = EncodingDetails.get_child_details(env_vars)
encoding_details, stderr_lines = result
expected_details = EncodingDetails.get_expected_details(
coercion_expected,
expected_fs_encoding,
expected_stream_encoding,
env_vars
)
self.assertEqual(encoding_details, expected_details)
if expected_warnings is None:
expected_warnings = []
self.assertEqual(stderr_lines, expected_warnings)
class LocaleConfigurationTests(_LocaleHandlingTestCase):
# Test explicit external configuration via the process environment
@classmethod
def setUpClass(cls):
# This relies on setUpModule() having been run, so it can't be
# handled via the @unittest.skipUnless decorator
if not AVAILABLE_TARGETS:
raise unittest.SkipTest("No C-with-UTF-8 locale available")
def test_external_target_locale_configuration(self):
# Explicitly setting a target locale should give the same behaviour as
# is seen when implicitly coercing to that target locale
self.maxDiff = None
expected_fs_encoding = "utf-8"
expected_stream_encoding = "utf-8"
base_var_dict = {
"LANG": "",
"LC_CTYPE": "",
"LC_ALL": "",
"PYTHONCOERCECLOCALE": "",
}
for env_var in ("LANG", "LC_CTYPE"):
for locale_to_set in AVAILABLE_TARGETS:
# XXX (ncoghlan): LANG=UTF-8 doesn't appear to work as
# expected, so skip that combination for now
# See https://bugs.python.org/issue30672 for discussion
if env_var == "LANG" and locale_to_set == "UTF-8":
continue
with self.subTest(env_var=env_var,
configured_locale=locale_to_set):
var_dict = base_var_dict.copy()
var_dict[env_var] = locale_to_set
self._check_child_encoding_details(var_dict,
expected_fs_encoding,
expected_stream_encoding,
expected_warnings=None,
coercion_expected=False)
@test.support.cpython_only
@unittest.skipUnless(sysconfig.get_config_var("PY_COERCE_C_LOCALE"),
"C locale coercion disabled at build time")
class LocaleCoercionTests(_LocaleHandlingTestCase):
# Test implicit reconfiguration of the environment during CLI startup
def _check_c_locale_coercion(self,
fs_encoding, stream_encoding,
coerce_c_locale,
expected_warnings=None,
coercion_expected=True,
**extra_vars):
"""Check the C locale handling for various configurations
Parameters:
fs_encoding: expected sys.getfilesystemencoding() result
stream_encoding: expected encoding for standard streams
coerce_c_locale: setting to use for PYTHONCOERCECLOCALE
None: don't set the variable at all
str: the value set in the child's environment
expected_warnings: expected warning lines on stderr
extra_vars: additional environment variables to set in subprocess
"""
self.maxDiff = None
if not AVAILABLE_TARGETS:
# Locale coercion is disabled when there aren't any target locales
fs_encoding = EXPECTED_C_LOCALE_FS_ENCODING
stream_encoding = EXPECTED_C_LOCALE_STREAM_ENCODING
coercion_expected = False
if expected_warnings:
expected_warnings = [LEGACY_LOCALE_WARNING]
base_var_dict = {
"LANG": "",
"LC_CTYPE": "",
"LC_ALL": "",
"PYTHONCOERCECLOCALE": "",
}
base_var_dict.update(extra_vars)
if coerce_c_locale is not None:
base_var_dict["PYTHONCOERCECLOCALE"] = coerce_c_locale
# Check behaviour for the default locale
with self.subTest(default_locale=True,
PYTHONCOERCECLOCALE=coerce_c_locale):
if EXPECT_COERCION_IN_DEFAULT_LOCALE:
_expected_warnings = expected_warnings
_coercion_expected = coercion_expected
else:
_expected_warnings = None
_coercion_expected = False
# On Android CLI_COERCION_WARNING is not printed when all the
# locale environment variables are undefined or empty. When
# this code path is run with environ['LC_ALL'] == 'C', then
# LEGACY_LOCALE_WARNING is printed.
if (test.support.is_android and
_expected_warnings == [CLI_COERCION_WARNING]):
_expected_warnings = None
self._check_child_encoding_details(base_var_dict,
fs_encoding,
stream_encoding,
_expected_warnings,
_coercion_expected)
# Check behaviour for explicitly configured locales
for locale_to_set in EXPECTED_C_LOCALE_EQUIVALENTS:
for env_var in ("LANG", "LC_CTYPE"):
with self.subTest(env_var=env_var,
nominal_locale=locale_to_set,
PYTHONCOERCECLOCALE=coerce_c_locale):
var_dict = base_var_dict.copy()
var_dict[env_var] = locale_to_set
# Check behaviour on successful coercion
self._check_child_encoding_details(var_dict,
fs_encoding,
stream_encoding,
expected_warnings,
coercion_expected)
def test_PYTHONCOERCECLOCALE_not_set(self):
# This should coerce to the first available target locale by default
self._check_c_locale_coercion("utf-8", "utf-8", coerce_c_locale=None)
def test_PYTHONCOERCECLOCALE_not_zero(self):
# *Any* string other than "0" is considered "set" for our purposes
# and hence should result in the locale coercion being enabled
for setting in ("", "1", "true", "false"):
self._check_c_locale_coercion("utf-8", "utf-8", coerce_c_locale=setting)
def test_PYTHONCOERCECLOCALE_set_to_warn(self):
# PYTHONCOERCECLOCALE=warn enables runtime warnings for legacy locales
self._check_c_locale_coercion("utf-8", "utf-8",
coerce_c_locale="warn",
expected_warnings=[CLI_COERCION_WARNING])
def test_PYTHONCOERCECLOCALE_set_to_zero(self):
# The setting "0" should result in the locale coercion being disabled
self._check_c_locale_coercion(EXPECTED_C_LOCALE_FS_ENCODING,
EXPECTED_C_LOCALE_STREAM_ENCODING,
coerce_c_locale="0",
coercion_expected=False)
# Setting LC_ALL=C shouldn't make any difference to the behaviour
self._check_c_locale_coercion(EXPECTED_C_LOCALE_FS_ENCODING,
EXPECTED_C_LOCALE_STREAM_ENCODING,
coerce_c_locale="0",
LC_ALL="C",
coercion_expected=False)
def test_LC_ALL_set_to_C(self):
# Setting LC_ALL should render the locale coercion ineffective
self._check_c_locale_coercion(EXPECTED_C_LOCALE_FS_ENCODING,
EXPECTED_C_LOCALE_STREAM_ENCODING,
coerce_c_locale=None,
LC_ALL="C",
coercion_expected=False)
# And result in a warning about a lack of locale compatibility
self._check_c_locale_coercion(EXPECTED_C_LOCALE_FS_ENCODING,
EXPECTED_C_LOCALE_STREAM_ENCODING,
coerce_c_locale="warn",
LC_ALL="C",
expected_warnings=[LEGACY_LOCALE_WARNING],
coercion_expected=False)
def test_PYTHONCOERCECLOCALE_set_to_one(self):
# skip the test if the LC_CTYPE locale is C or coerced
old_loc = locale.setlocale(locale.LC_CTYPE, None)
self.addCleanup(locale.setlocale, locale.LC_CTYPE, old_loc)
loc = locale.setlocale(locale.LC_CTYPE, "")
if loc == "C":
self.skipTest("test requires LC_CTYPE locale different than C")
if loc in TARGET_LOCALES :
self.skipTest("coerced LC_CTYPE locale: %s" % loc)
# bpo-35336: PYTHONCOERCECLOCALE=1 must not coerce the LC_CTYPE locale
# if it's not equal to "C"
code = 'import locale; print(locale.setlocale(locale.LC_CTYPE, None))'
env = dict(os.environ, PYTHONCOERCECLOCALE='1')
cmd = subprocess.run([sys.executable, '-c', code],
stdout=subprocess.PIPE,
env=env,
text=True)
self.assertEqual(cmd.stdout.rstrip(), loc)
def test_main():
test.support.run_unittest(
LocaleConfigurationTests,
LocaleCoercionTests
)
test.support.reap_children()
if __name__ == "__main__":
test_main()
|
[] |
[] |
[
"LC_ALL",
"LANG",
"LC_CTYPE"
] |
[]
|
["LC_ALL", "LANG", "LC_CTYPE"]
|
python
| 3 | 0 | |
airbyte-cdk/python/airbyte_cdk/entrypoint.py
|
#
# Copyright (c) 2021 Airbyte, Inc., all rights reserved.
#
import argparse
import importlib
import os.path
import sys
import tempfile
from typing import Iterable, List
from airbyte_cdk.logger import init_logger
from airbyte_cdk.models import AirbyteMessage, Status, Type
from airbyte_cdk.sources import Source
from airbyte_cdk.sources.utils.schema_helpers import check_config_against_spec_or_exit, split_config
class AirbyteEntrypoint(object):
def __init__(self, source: Source):
self.source = source
self.logger = init_logger(getattr(source, "name", "source"))
def parse_args(self, args: List[str]) -> argparse.Namespace:
# set up parent parsers
parent_parser = argparse.ArgumentParser(add_help=False)
main_parser = argparse.ArgumentParser()
subparsers = main_parser.add_subparsers(title="commands", dest="command")
# spec
subparsers.add_parser("spec", help="outputs the json configuration specification", parents=[parent_parser])
# check
check_parser = subparsers.add_parser("check", help="checks the config can be used to connect", parents=[parent_parser])
required_check_parser = check_parser.add_argument_group("required named arguments")
required_check_parser.add_argument("--config", type=str, required=True, help="path to the json configuration file")
# discover
discover_parser = subparsers.add_parser(
"discover", help="outputs a catalog describing the source's schema", parents=[parent_parser]
)
required_discover_parser = discover_parser.add_argument_group("required named arguments")
required_discover_parser.add_argument("--config", type=str, required=True, help="path to the json configuration file")
# read
read_parser = subparsers.add_parser("read", help="reads the source and outputs messages to STDOUT", parents=[parent_parser])
read_parser.add_argument("--state", type=str, required=False, help="path to the json-encoded state file")
required_read_parser = read_parser.add_argument_group("required named arguments")
required_read_parser.add_argument("--config", type=str, required=True, help="path to the json configuration file")
required_read_parser.add_argument(
"--catalog", type=str, required=True, help="path to the catalog used to determine which data to read"
)
return main_parser.parse_args(args)
def run(self, parsed_args: argparse.Namespace) -> Iterable[str]:
cmd = parsed_args.command
if not cmd:
raise Exception("No command passed")
# todo: add try catch for exceptions with different exit codes
source_spec = self.source.spec(self.logger)
with tempfile.TemporaryDirectory() as temp_dir:
if cmd == "spec":
message = AirbyteMessage(type=Type.SPEC, spec=source_spec)
yield message.json(exclude_unset=True)
else:
raw_config = self.source.read_config(parsed_args.config)
config = self.source.configure(raw_config, temp_dir)
# Remove internal flags from config before validating so
# jsonschema's additionalProperties flag wont fail the validation
config, internal_config = split_config(config)
if self.source.check_config_against_spec or cmd == "check":
check_config_against_spec_or_exit(config, source_spec, self.logger)
# Put internal flags back to config dict
config.update(internal_config.dict())
if cmd == "check":
check_result = self.source.check(self.logger, config)
if check_result.status == Status.SUCCEEDED:
self.logger.info("Check succeeded")
else:
self.logger.error("Check failed")
output_message = AirbyteMessage(type=Type.CONNECTION_STATUS, connectionStatus=check_result).json(exclude_unset=True)
yield output_message
elif cmd == "discover":
catalog = self.source.discover(self.logger, config)
yield AirbyteMessage(type=Type.CATALOG, catalog=catalog).json(exclude_unset=True)
elif cmd == "read":
config_catalog = self.source.read_catalog(parsed_args.catalog)
state = self.source.read_state(parsed_args.state)
generator = self.source.read(self.logger, config, config_catalog, state)
for message in generator:
yield message.json(exclude_unset=True)
else:
raise Exception("Unexpected command " + cmd)
def launch(source: Source, args: List[str]):
source_entrypoint = AirbyteEntrypoint(source)
parsed_args = source_entrypoint.parse_args(args)
for message in source_entrypoint.run(parsed_args):
print(message)
def main():
impl_module = os.environ.get("AIRBYTE_IMPL_MODULE", Source.__module__)
impl_class = os.environ.get("AIRBYTE_IMPL_PATH", Source.__name__)
module = importlib.import_module(impl_module)
impl = getattr(module, impl_class)
# set up and run entrypoint
source = impl()
if not isinstance(source, Source):
raise Exception("Source implementation provided does not implement Source class!")
launch(source, sys.argv[1:])
|
[] |
[] |
[
"AIRBYTE_IMPL_PATH",
"AIRBYTE_IMPL_MODULE"
] |
[]
|
["AIRBYTE_IMPL_PATH", "AIRBYTE_IMPL_MODULE"]
|
python
| 2 | 0 | |
manage.py
|
#!/usr/bin/env python3
import os
import sys
from ndh.utils import get_env
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "caracole.settings")
get_env()
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError("Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?") from exc
execute_from_command_line(sys.argv)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
pkg/image/image_test.go
|
// Copyright (c) 2018-2019 Sylabs, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package image
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"testing"
specs "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/stretchr/testify/require"
"github.com/sylabs/singularity-cri/pkg/singularity"
k8s "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
)
func TestPullImage(t *testing.T) {
privateServer := os.Getenv("PRIVATE_SERVER")
privateUsername := os.Getenv("PRIVATE_USERNAME")
privatePassword := os.Getenv("PRIVATE_PASSWORD")
tt := []struct {
name string
skip bool
ref *Reference
auth *k8s.AuthConfig
expectImage *Info
expectError string
}{
{
name: "unknown registry",
ref: &Reference{
uri: "foo.io",
tags: []string{"foo.io/cri-tools/test-image-latest"},
},
expectImage: nil,
expectError: "could not pull image: unknown image registry: foo.io",
},
{
name: "docker image",
ref: &Reference{
uri: singularity.DockerDomain,
tags: []string{"busybox:1.31"},
},
expectImage: &Info{
Size: 782336,
Ref: &Reference{
uri: singularity.DockerDomain,
tags: []string{"busybox:1.31"},
},
OciConfig: &specs.ImageConfig{
Env: []string{"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"},
Cmd: []string{"sh"},
},
},
},
{
name: "custom docker server address",
ref: &Reference{
uri: singularity.DockerDomain,
tags: []string{"cri-tools/test-image-latest"},
},
auth: &k8s.AuthConfig{
ServerAddress: "gcr.io",
},
expectImage: &Info{
Size: 782336,
Ref: &Reference{
uri: singularity.DockerDomain,
tags: []string{"cri-tools/test-image-latest"},
},
OciConfig: &specs.ImageConfig{
Env: []string{"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"},
Cmd: []string{"sh"},
},
},
},
{
name: "library by digest",
ref: &Reference{
uri: singularity.LibraryDomain,
digests: []string{
"cloud.sylabs.io/sylabs/tests/busybox:sha256.8b5478b0f2962eba3982be245986eb0ea54f5164d90a65c078af5b83147009ba",
},
},
expectImage: &Info{
ID: "8b5478b0f2962eba3982be245986eb0ea54f5164d90a65c078af5b83147009ba",
Sha256: "8b5478b0f2962eba3982be245986eb0ea54f5164d90a65c078af5b83147009ba",
Size: 672699,
Path: filepath.Join(os.TempDir(), "8b5478b0f2962eba3982be245986eb0ea54f5164d90a65c078af5b83147009ba"),
Ref: &Reference{
uri: singularity.LibraryDomain,
digests: []string{
"cloud.sylabs.io/sylabs/tests/busybox:sha256.8b5478b0f2962eba3982be245986eb0ea54f5164d90a65c078af5b83147009ba",
},
},
},
},
{
name: "library by tag",
ref: &Reference{
uri: singularity.LibraryDomain,
tags: []string{
"cloud.sylabs.io/sylabs/tests/busybox:1.0.0",
},
},
expectImage: &Info{
ID: "8b5478b0f2962eba3982be245986eb0ea54f5164d90a65c078af5b83147009ba",
Sha256: "8b5478b0f2962eba3982be245986eb0ea54f5164d90a65c078af5b83147009ba",
Size: 672699,
Path: filepath.Join(os.TempDir(), "8b5478b0f2962eba3982be245986eb0ea54f5164d90a65c078af5b83147009ba"),
Ref: &Reference{
uri: singularity.LibraryDomain,
tags: []string{
"cloud.sylabs.io/sylabs/tests/busybox:1.0.0",
},
},
},
},
{
name: "private docker image without creds",
ref: &Reference{
uri: singularity.DockerDomain,
tags: []string{"sylabs/test:latest"},
},
auth: &k8s.AuthConfig{
ServerAddress: privateServer,
},
skip: privateServer == "",
expectError: "unauthorized: authentication required",
},
{
name: "private docker image",
ref: &Reference{
uri: singularity.DockerDomain,
tags: []string{"sylabs/test:latest"},
},
auth: &k8s.AuthConfig{
ServerAddress: privateServer,
Username: privateUsername,
Password: privatePassword,
},
skip: privateServer == "" && privatePassword == "",
expectImage: &Info{
Size: 2723840,
Ref: &Reference{
uri: singularity.DockerDomain,
tags: []string{"sylabs/test:latest"},
},
OciConfig: &specs.ImageConfig{
Env: []string{"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"},
Cmd: []string{"/bin/sh"},
},
},
},
{
name: "local SIF no found",
ref: &Reference{
uri: singularity.LocalFileDomain,
tags: []string{"local.file/tmp/not-found.sif"},
},
expectError: "no such file or directory",
},
}
for _, tc := range tt {
t.Run(tc.name, func(t *testing.T) {
if tc.skip {
t.Skip()
}
image, err := Pull(context.Background(), os.TempDir(), tc.ref, tc.auth)
if tc.expectError == "" {
require.NoError(t, err, "unexpected error")
} else {
require.Error(t, err, "expected error, but got nil")
require.Contains(t, err.Error(), tc.expectError, "unexpected pull error")
}
if image != nil {
require.NoError(t, image.Remove(), "could not remove image")
}
if image != nil && tc.ref.URI() == singularity.DockerDomain {
image.ID = ""
image.Sha256 = ""
image.Path = ""
}
require.Equal(t, tc.expectImage, image, "image mismatch")
})
}
}
func TestLibraryInfo(t *testing.T) {
tt := []struct {
name string
ref *Reference
expectImage *Info
expectError error
}{
{
name: "unknown registry",
ref: &Reference{
uri: "foo.io",
tags: []string{"foo.io/cri-tools/test-image-latest"},
},
expectError: ErrNotLibrary,
},
{
name: "docker image",
ref: &Reference{
uri: singularity.DockerDomain,
tags: []string{"gcr.io/cri-tools/test-image-latest"},
},
expectError: ErrNotLibrary,
},
{
name: "library by digest",
ref: &Reference{
uri: singularity.LibraryDomain,
digests: []string{
"cloud.sylabs.io/sylabs/tests/busybox:sha256.8b5478b0f2962eba3982be245986eb0ea54f5164d90a65c078af5b83147009ba",
},
},
expectImage: &Info{
ID: "8b5478b0f2962eba3982be245986eb0ea54f5164d90a65c078af5b83147009ba",
Sha256: "8b5478b0f2962eba3982be245986eb0ea54f5164d90a65c078af5b83147009ba",
Size: 672699,
Ref: &Reference{
uri: singularity.LibraryDomain,
digests: []string{
"cloud.sylabs.io/sylabs/tests/busybox:sha256.8b5478b0f2962eba3982be245986eb0ea54f5164d90a65c078af5b83147009ba",
},
},
},
},
{
name: "library by tag",
ref: &Reference{
uri: singularity.LibraryDomain,
tags: []string{
"cloud.sylabs.io/sylabs/tests/busybox:1.0.0",
},
},
expectImage: &Info{
ID: "8b5478b0f2962eba3982be245986eb0ea54f5164d90a65c078af5b83147009ba",
Sha256: "8b5478b0f2962eba3982be245986eb0ea54f5164d90a65c078af5b83147009ba",
Size: 672699,
Ref: &Reference{
uri: singularity.LibraryDomain,
tags: []string{
"cloud.sylabs.io/sylabs/tests/busybox:1.0.0",
},
},
},
},
{
name: "library not found",
ref: &Reference{
uri: singularity.LibraryDomain,
digests: []string{"cloud.sylabs.io/sylabs/tests/not-found"},
},
expectError: ErrNotFound,
},
}
for _, tc := range tt {
t.Run(tc.name, func(t *testing.T) {
image, err := LibraryInfo(context.Background(), tc.ref, nil)
require.Equal(t, tc.expectError, err, "could not get library image info")
require.Equal(t, tc.expectImage, image, "image mismatch")
})
}
}
func TestInfo_Verify(t *testing.T) {
tt := []struct {
name string
imgRef *Reference
image *Info
expectError string
}{
{
name: "docker image",
imgRef: &Reference{
uri: singularity.DockerDomain,
tags: []string{"gcr.io/cri-tools/test-image-latest"},
},
expectError: "",
},
{
name: "signed SIF",
imgRef: &Reference{
uri: singularity.LibraryDomain,
tags: []string{"sylabs/tests/verify_success:1.0.1"},
},
expectError: "",
},
{
name: "non-signed SIF",
imgRef: &Reference{
uri: singularity.LibraryDomain,
tags: []string{"sylabs/tests/unsigned:1.0.0"},
},
expectError: "",
},
{
name: "broken signature SIF",
imgRef: &Reference{
uri: singularity.LibraryDomain,
tags: []string{"sylabs/tests/verify_corrupted:1.0.1"},
},
expectError: "verification failed",
},
{
name: "broken image info",
image: &Info{
Path: "/foo/bar",
Ref: &Reference{
uri: singularity.LibraryDomain,
},
},
expectError: "open /foo/bar: no such file or directory",
},
}
for _, tc := range tt {
t.Run(tc.name, func(t *testing.T) {
var err error
img := tc.image
if img == nil {
img, err = Pull(context.Background(), os.TempDir(), tc.imgRef, nil)
require.NoError(t, err, "could not pull SIF")
defer func() {
require.NoError(t, img.Remove(), "could not remove SIF")
}()
}
err = img.Verify()
if tc.expectError == "" {
require.NoError(t, err, "unexpected error")
} else {
require.Error(t, err, "expected error, but got nil")
require.Contains(t, err.Error(), tc.expectError, "unexpected verify error")
}
})
}
}
func TestInfo_BorrowReturn(t *testing.T) {
tt := []struct {
name string
borrow []string
ret []string
expectUsedBy []string
}{
{
name: "not used",
},
{
name: "used and returned",
borrow: []string{"first_container"},
ret: []string{"first_container"},
},
{
name: "used and not returned",
borrow: []string{"first_container"},
expectUsedBy: []string{"first_container"},
},
{
name: "multiple return",
borrow: []string{"first_container", "second_container"},
ret: []string{"first_container", "second_container"},
},
{
name: "multiple without return",
borrow: []string{"first_container", "second_container"},
ret: []string{"second_container"},
expectUsedBy: []string{"first_container"},
},
}
for _, tc := range tt {
t.Run(tc.name, func(t *testing.T) {
var image Info
for _, b := range tc.borrow {
image.Borrow(b)
}
for _, r := range tc.ret {
image.Return(r)
}
actual := image.UsedBy()
require.ElementsMatch(t, tc.expectUsedBy, actual)
})
}
}
func TestInfo_Remove(t *testing.T) {
f, err := ioutil.TempFile("", "")
require.NoError(t, err, "could not create temp image file")
require.NoError(t, f.Close())
defer os.Remove(f.Name())
tt := []struct {
name string
image *Info
expectError error
}{
{
name: "non existent file",
image: &Info{
Path: "/foo/bar",
},
expectError: fmt.Errorf("could not remove image: remove /foo/bar: no such file or directory"),
},
{
name: "image is used",
image: &Info{
Path: "/foo/bar",
usedBy: []string{"container_id"},
},
expectError: ErrIsUsed,
},
{
name: "all ok",
image: &Info{
Path: f.Name(),
},
expectError: nil,
},
}
for _, tc := range tt {
t.Run(tc.name, func(t *testing.T) {
err = tc.image.Remove()
require.Equal(t, tc.expectError, err)
})
}
}
func TestInfo_Matches(t *testing.T) {
tt := []struct {
name string
img *Info
filter *k8s.ImageFilter
expect bool
}{
{
name: "no filter",
img: &Info{
ID: "7b0178cb4bac7227f83a56d62d3fdf9900645b6d53578aaad25a7df61ae15b39",
Ref: &Reference{
tags: []string{"gcr.io/cri-tools/test-image-tags:1", "gcr.io/cri-tools/test-image-tags:2"},
digests: []string{},
},
},
filter: &k8s.ImageFilter{},
expect: true,
},
{
name: "id match",
img: &Info{
ID: "7b0178cb4bac7227f83a56d62d3fdf9900645b6d53578aaad25a7df61ae15b39",
Ref: &Reference{
tags: []string{"gcr.io/cri-tools/test-image-tags:1", "gcr.io/cri-tools/test-image-tags:2"},
digests: []string{},
},
},
filter: &k8s.ImageFilter{
Image: &k8s.ImageSpec{
Image: "7b0178cb4bac7227f83a56d62d3fdf9900645b6d53578aaad25a7df61ae15b39",
},
},
expect: true,
},
{
name: "tag match",
img: &Info{
ID: "7b0178cb4bac7227f83a56d62d3fdf9900645b6d53578aaad25a7df61ae15b39",
Ref: &Reference{
tags: []string{"gcr.io/cri-tools/test-image-tags:1", "gcr.io/cri-tools/test-image-tags:2"},
digests: []string{},
},
},
filter: &k8s.ImageFilter{
Image: &k8s.ImageSpec{
Image: "gcr.io/cri-tools/test-image-tags",
},
},
expect: true,
},
{
name: "digest match",
img: &Info{
ID: "7b0178cb4bac7227f83a56d62d3fdf9900645b6d53578aaad25a7df61ae15b39",
Ref: &Reference{
tags: []string{},
digests: []string{"gcr.io/cri-tools/test-image-digest@sha256:9179135b4b4cc5a8721e09379244807553c318d92fa3111a65133241551ca343"},
},
},
filter: &k8s.ImageFilter{
Image: &k8s.ImageSpec{
Image: "gcr.io/cri-tools/test-image-digest",
},
},
expect: true,
},
{
name: "empty filter",
img: &Info{
ID: "7b0178cb4bac7227f83a56d62d3fdf9900645b6d53578aaad25a7df61ae15b39",
Ref: &Reference{
tags: []string{},
digests: []string{"gcr.io/cri-tools/test-image-digest@sha256:9179135b4b4cc5a8721e09379244807553c318d92fa3111a65133241551ca343"},
},
},
filter: &k8s.ImageFilter{
Image: &k8s.ImageSpec{
Image: "",
},
},
expect: true,
},
{
name: "no match",
img: &Info{
ID: "7b0178cb4bac7227f83a56d62d3fdf9900645b6d53578aaad25a7df61ae15b39",
Ref: &Reference{
tags: []string{},
digests: []string{"gcr.io/cri-tools/test-image-digest@sha256:9179135b4b4cc5a8721e09379244807553c318d92fa3111a65133241551ca343"},
},
},
filter: &k8s.ImageFilter{
Image: &k8s.ImageSpec{
Image: "gcr.io/cri-tools/test-image-tags",
},
},
expect: false,
},
}
for _, tc := range tt {
t.Run(tc.name, func(t *testing.T) {
require.Equal(t, tc.expect, tc.img.Matches(tc.filter))
})
}
}
func TestInfo_UnmarshalJSON(t *testing.T) {
tt := []struct {
name string
input string
expect *Info
}{
{
name: "all filled",
input: `
{
"id":"0d408f32cc56b16509f30ae3dfa56ffb01269b2100036991d49af645a7b717a0",
"sha256":"0d408f32cc56b16509f30ae3dfa56ffb01269b2100036991d49af645a7b717a0",
"size":741376,
"path":"/var/lib/singularity/0d408f32cc56b16509f30ae3dfa56ffb01269b2100036991d49af645a7b717a0",
"ref":{
"uri":"docker.io",
"tags":["busybox:1.28"],
"digests":null
},
"ociConfig":{
"User":"sasha",
"WorkingDir":"/opt/go",
"Cmd":["./my-server"]
}
}`,
expect: &Info{
ID: "0d408f32cc56b16509f30ae3dfa56ffb01269b2100036991d49af645a7b717a0",
Sha256: "0d408f32cc56b16509f30ae3dfa56ffb01269b2100036991d49af645a7b717a0",
Size: 741376,
Path: "/var/lib/singularity/0d408f32cc56b16509f30ae3dfa56ffb01269b2100036991d49af645a7b717a0",
Ref: &Reference{
uri: singularity.DockerDomain,
tags: []string{"busybox:1.28"},
},
OciConfig: &specs.ImageConfig{
User: "sasha",
Cmd: []string{"./my-server"},
WorkingDir: "/opt/go",
},
},
},
{
name: "no oci config",
input: `
{
"id":"0d408f32cc56b16509f30ae3dfa56ffb01269b2100036991d49af645a7b717a0",
"sha256":"0d408f32cc56b16509f30ae3dfa56ffb01269b2100036991d49af645a7b717a0",
"size":741376,
"path":"/var/lib/singularity/0d408f32cc56b16509f30ae3dfa56ffb01269b2100036991d49af645a7b717a0",
"ref":{
"uri":"docker.io",
"tags":["busybox:1.28"],
"digests":null
}
}`,
expect: &Info{
ID: "0d408f32cc56b16509f30ae3dfa56ffb01269b2100036991d49af645a7b717a0",
Sha256: "0d408f32cc56b16509f30ae3dfa56ffb01269b2100036991d49af645a7b717a0",
Size: 741376,
Path: "/var/lib/singularity/0d408f32cc56b16509f30ae3dfa56ffb01269b2100036991d49af645a7b717a0",
Ref: &Reference{
uri: singularity.DockerDomain,
tags: []string{"busybox:1.28"},
},
},
},
}
for _, tc := range tt {
t.Run(tc.name, func(t *testing.T) {
var info *Info
err := json.Unmarshal([]byte(tc.input), &info)
require.NoError(t, err, "could not unmarshal image")
require.Equal(t, tc.expect, info)
})
}
}
func TestInfo_MarshalJSON(t *testing.T) {
tt := []struct {
name string
input *Info
expect string
}{
{
name: "all filled",
input: &Info{
ID: "0d408f32cc56b16509f30ae3dfa56ffb01269b2100036991d49af645a7b717a0",
Sha256: "0d408f32cc56b16509f30ae3dfa56ffb01269b2100036991d49af645a7b717a0",
Size: 741376,
Path: "/var/lib/singularity/0d408f32cc56b16509f30ae3dfa56ffb01269b2100036991d49af645a7b717a0",
Ref: &Reference{
uri: singularity.DockerDomain,
tags: []string{"busybox:1.28"},
},
OciConfig: &specs.ImageConfig{
User: "sasha",
Cmd: []string{"./my-server"},
WorkingDir: "/opt/go",
},
usedBy: []string{"should-not-marshal"},
},
expect: `
{
"id":"0d408f32cc56b16509f30ae3dfa56ffb01269b2100036991d49af645a7b717a0",
"sha256":"0d408f32cc56b16509f30ae3dfa56ffb01269b2100036991d49af645a7b717a0",
"size":741376,
"path":"/var/lib/singularity/0d408f32cc56b16509f30ae3dfa56ffb01269b2100036991d49af645a7b717a0",
"ref":{
"uri":"docker.io",
"tags":["busybox:1.28"],
"digests":null
},
"ociConfig":{
"User":"sasha",
"WorkingDir":"/opt/go",
"Cmd":["./my-server"]
}
}`,
},
{
name: "no oci config",
input: &Info{
ID: "0d408f32cc56b16509f30ae3dfa56ffb01269b2100036991d49af645a7b717a0",
Sha256: "0d408f32cc56b16509f30ae3dfa56ffb01269b2100036991d49af645a7b717a0",
Size: 741376,
Path: "/var/lib/singularity/0d408f32cc56b16509f30ae3dfa56ffb01269b2100036991d49af645a7b717a0",
Ref: &Reference{
uri: singularity.DockerDomain,
tags: []string{"busybox:1.28"},
},
usedBy: []string{"should-not-marshal"},
},
expect: `
{
"id":"0d408f32cc56b16509f30ae3dfa56ffb01269b2100036991d49af645a7b717a0",
"sha256":"0d408f32cc56b16509f30ae3dfa56ffb01269b2100036991d49af645a7b717a0",
"size":741376,
"path":"/var/lib/singularity/0d408f32cc56b16509f30ae3dfa56ffb01269b2100036991d49af645a7b717a0",
"ref":{
"uri":"docker.io",
"tags":["busybox:1.28"],
"digests":null
}
}`,
},
}
for _, tc := range tt {
t.Run(tc.name, func(t *testing.T) {
res, err := json.Marshal(tc.input)
require.NoError(t, err, "could not marshal image")
require.JSONEq(t, tc.expect, string(res))
})
}
}
|
[
"\"PRIVATE_SERVER\"",
"\"PRIVATE_USERNAME\"",
"\"PRIVATE_PASSWORD\""
] |
[] |
[
"PRIVATE_PASSWORD",
"PRIVATE_SERVER",
"PRIVATE_USERNAME"
] |
[]
|
["PRIVATE_PASSWORD", "PRIVATE_SERVER", "PRIVATE_USERNAME"]
|
go
| 3 | 0 | |
main.go
|
package main
import (
"github.com/codegangsta/cli"
"github.com/mgutz/logxi/v1"
"os"
"path/filepath"
)
var logger log.Logger
var (
BasePath string
SrcPath string
TargetPath string
)
var (
flSrcPath = cli.StringFlag{
Name: "src,s",
Value: ".",
Usage: "",
}
flTargetPath = cli.StringFlag{
Name: "target,t",
Value: "./target",
Usage: "",
}
flBuild = cli.BoolFlag{
Name: "build,b",
Usage: "",
}
flAddr = cli.StringFlag{
Name: "addr,a",
Value: ":8080",
Usage: "preview address",
}
)
func init() {
if os.Getenv("LOGXI") == "" {
log.ProcessLogxiEnv("*=INF")
}
logger = log.New("linen")
dir, err := filepath.Abs(filepath.Dir(os.Args[0]))
if err != nil {
logger.Error(err.Error())
}
BasePath = dir
}
func main() {
app := cli.NewApp()
app.Name = "linen"
app.Usage = "is simple static page(s) generator"
app.Version = VERSION
app.Commands = []cli.Command{
{
Name: "build",
ShortName: "b",
Usage: "build source pages",
Flags: []cli.Flag{flSrcPath, flTargetPath},
Action: BuildAction,
},
{
Name: "preview",
ShortName: "p",
Usage: "preview",
Flags: []cli.Flag{flSrcPath, flTargetPath, flBuild, flAddr},
Action: PreviewAction,
},
}
if os.Getenv("LOGXI") == "" {
os.Setenv("LOGXI", "*=INF")
}
app.Run(os.Args)
}
func BuildAction(c *cli.Context) {
SrcPath = c.String("src")
TargetPath = c.String("target")
Build(SrcPath, TargetPath)
logger.Info("DONE")
}
func PreviewAction(c *cli.Context) {
var err error
SrcPath, err = filepath.Abs(c.String("src"))
if err != nil {
logger.Error("srcPath", "path", SrcPath, "err", err)
os.Exit(1)
}
TargetPath, err = filepath.Abs(c.String("target"))
if err != nil {
logger.Error("targetPath", "path", TargetPath, "err", err)
os.Exit(1)
}
build := c.Bool("build")
addr := c.String("addr")
previewServe(addr, build)
}
|
[
"\"LOGXI\"",
"\"LOGXI\""
] |
[] |
[
"LOGXI"
] |
[]
|
["LOGXI"]
|
go
| 1 | 0 | |
selfdrive/controls/lib/lateral_planner.py
|
import os
import math
import numpy as np
from common.params import Params
from common.realtime import sec_since_boot, DT_MDL
from common.numpy_fast import interp, clip
from selfdrive.car.hyundai.values import CAR
from selfdrive.ntune import ntune_common_get, ntune_common_enabled
from selfdrive.swaglog import cloudlog
from selfdrive.controls.lib.lateral_mpc import libmpc_py
from selfdrive.controls.lib.drive_helpers import CONTROL_N, MPC_COST_LAT, LAT_MPC_N, CAR_ROTATION_RADIUS
from selfdrive.controls.lib.lane_planner import LanePlanner, TRAJECTORY_SIZE
from selfdrive.config import Conversions as CV
import cereal.messaging as messaging
from cereal import log
AUTO_LCA_START_TIME = 1.0
LaneChangeState = log.LateralPlan.LaneChangeState
LaneChangeDirection = log.LateralPlan.LaneChangeDirection
LOG_MPC = os.environ.get('LOG_MPC', False)
LANE_CHANGE_SPEED_MIN = 60 * CV.KPH_TO_MS
LANE_CHANGE_TIME_MAX = 10.
DESIRES = {
LaneChangeDirection.none: {
LaneChangeState.off: log.LateralPlan.Desire.none,
LaneChangeState.preLaneChange: log.LateralPlan.Desire.none,
LaneChangeState.laneChangeStarting: log.LateralPlan.Desire.none,
LaneChangeState.laneChangeFinishing: log.LateralPlan.Desire.none,
},
LaneChangeDirection.left: {
LaneChangeState.off: log.LateralPlan.Desire.none,
LaneChangeState.preLaneChange: log.LateralPlan.Desire.none,
LaneChangeState.laneChangeStarting: log.LateralPlan.Desire.laneChangeLeft,
LaneChangeState.laneChangeFinishing: log.LateralPlan.Desire.laneChangeLeft,
},
LaneChangeDirection.right: {
LaneChangeState.off: log.LateralPlan.Desire.none,
LaneChangeState.preLaneChange: log.LateralPlan.Desire.none,
LaneChangeState.laneChangeStarting: log.LateralPlan.Desire.laneChangeRight,
LaneChangeState.laneChangeFinishing: log.LateralPlan.Desire.laneChangeRight,
},
}
class LateralPlanner():
def __init__(self, CP, use_lanelines=True, wide_camera=False):
self.use_lanelines = use_lanelines
self.LP = LanePlanner(wide_camera)
self.last_cloudlog_t = 0
self.setup_mpc()
self.solution_invalid_cnt = 0
self.lane_change_enabled = Params().get_bool('LaneChangeEnabled')
self.auto_lane_change_enabled = Params().get_bool('AutoLaneChangeEnabled')
self.lane_change_state = LaneChangeState.off
self.lane_change_direction = LaneChangeDirection.none
self.lane_change_timer = 0.0
self.lane_change_ll_prob = 1.0
self.keep_pulse_timer = 0.0
self.prev_one_blinker = False
self.desire = log.LateralPlan.Desire.none
self.path_xyz = np.zeros((TRAJECTORY_SIZE,3))
self.path_xyz_stds = np.ones((TRAJECTORY_SIZE,3))
self.plan_yaw = np.zeros((TRAJECTORY_SIZE,))
self.t_idxs = np.arange(TRAJECTORY_SIZE)
self.y_pts = np.zeros(TRAJECTORY_SIZE)
self.auto_lane_change_timer = 0.0
self.prev_torque_applied = False
self.steerRatio = 0.0
self.wide_camera = wide_camera
def setup_mpc(self):
self.libmpc = libmpc_py.libmpc
self.libmpc.init()
self.mpc_solution = libmpc_py.ffi.new("log_t *")
self.cur_state = libmpc_py.ffi.new("state_t *")
self.cur_state[0].x = 0.0
self.cur_state[0].y = 0.0
self.cur_state[0].psi = 0.0
self.cur_state[0].curvature = 0.0
self.desired_curvature = 0.0
self.safe_desired_curvature = 0.0
self.desired_curvature_rate = 0.0
self.safe_desired_curvature_rate = 0.0
def update(self, sm, CP):
v_ego = sm['carState'].vEgo
active = sm['controlsState'].active
measured_curvature = sm['controlsState'].curvature
md = sm['modelV2']
self.LP.parse_model(sm['modelV2'])
if len(md.position.x) == TRAJECTORY_SIZE and len(md.orientation.x) == TRAJECTORY_SIZE:
self.path_xyz = np.column_stack([md.position.x, md.position.y, md.position.z])
cameraOffset = ntune_common_get("cameraOffset") + 0.08 if self.wide_camera else ntune_common_get("cameraOffset")
self.path_xyz[:, 1] -= cameraOffset
self.t_idxs = np.array(md.position.t)
self.plan_yaw = list(md.orientation.z)
if len(md.orientation.xStd) == TRAJECTORY_SIZE:
self.path_xyz_stds = np.column_stack([md.position.xStd, md.position.yStd, md.position.zStd])
# Lane change logic
one_blinker = sm['carState'].leftBlinker != sm['carState'].rightBlinker
below_lane_change_speed = v_ego < LANE_CHANGE_SPEED_MIN
if (not active) or (self.lane_change_timer > LANE_CHANGE_TIME_MAX) or (not one_blinker) or (not self.lane_change_enabled):
self.lane_change_state = LaneChangeState.off
self.lane_change_direction = LaneChangeDirection.none
else:
torque_applied = sm['carState'].steeringPressed and \
((sm['carState'].steeringTorque > 0 and self.lane_change_direction == LaneChangeDirection.left) or
(sm['carState'].steeringTorque < 0 and self.lane_change_direction == LaneChangeDirection.right)) or \
self.auto_lane_change_enabled and \
(AUTO_LCA_START_TIME+0.25) > self.auto_lane_change_timer > AUTO_LCA_START_TIME
blindspot_detected = ((sm['carState'].leftBlindspot and self.lane_change_direction == LaneChangeDirection.left) or
(sm['carState'].rightBlindspot and self.lane_change_direction == LaneChangeDirection.right))
lane_change_prob = self.LP.l_lane_change_prob + self.LP.r_lane_change_prob
# State transitions
# off
if self.lane_change_state == LaneChangeState.off and one_blinker and not self.prev_one_blinker and not below_lane_change_speed:
if sm['carState'].leftBlinker:
self.lane_change_direction = LaneChangeDirection.left
elif sm['carState'].rightBlinker:
self.lane_change_direction = LaneChangeDirection.right
self.lane_change_state = LaneChangeState.preLaneChange
self.lane_change_ll_prob = 1.0
# pre
elif self.lane_change_state == LaneChangeState.preLaneChange:
if not one_blinker or below_lane_change_speed:
self.lane_change_state = LaneChangeState.off
elif torque_applied and (not blindspot_detected or self.prev_torque_applied):
self.lane_change_state = LaneChangeState.laneChangeStarting
elif torque_applied and blindspot_detected and self.auto_lane_change_timer != 10.0:
self.auto_lane_change_timer = 10.0
elif not torque_applied and self.auto_lane_change_timer == 10.0 and not self.prev_torque_applied:
self.prev_torque_applied = True
# starting
elif self.lane_change_state == LaneChangeState.laneChangeStarting:
# fade out over .5s
self.lane_change_ll_prob = max(self.lane_change_ll_prob - 2*DT_MDL, 0.0)
# 98% certainty
if lane_change_prob < 0.02 and self.lane_change_ll_prob < 0.01:
self.lane_change_state = LaneChangeState.laneChangeFinishing
# finishing
elif self.lane_change_state == LaneChangeState.laneChangeFinishing:
# fade in laneline over 1s
self.lane_change_ll_prob = min(self.lane_change_ll_prob + DT_MDL, 1.0)
if one_blinker and self.lane_change_ll_prob > 0.99:
self.lane_change_state = LaneChangeState.preLaneChange
elif self.lane_change_ll_prob > 0.99:
self.lane_change_state = LaneChangeState.off
if self.lane_change_state in [LaneChangeState.off, LaneChangeState.preLaneChange]:
self.lane_change_timer = 0.0
else:
self.lane_change_timer += DT_MDL
if self.lane_change_state == LaneChangeState.off:
self.auto_lane_change_timer = 0.0
self.prev_torque_applied = False
elif self.auto_lane_change_timer < (AUTO_LCA_START_TIME+0.25): # stop afer 3 sec resume from 10 when torque applied
self.auto_lane_change_timer += DT_MDL
self.prev_one_blinker = one_blinker
self.desire = DESIRES[self.lane_change_direction][self.lane_change_state]
# Send keep pulse once per second during LaneChangeStart.preLaneChange
if self.lane_change_state in [LaneChangeState.off, LaneChangeState.laneChangeStarting]:
self.keep_pulse_timer = 0.0
elif self.lane_change_state == LaneChangeState.preLaneChange:
self.keep_pulse_timer += DT_MDL
if self.keep_pulse_timer > 1.0:
self.keep_pulse_timer = 0.0
elif self.desire in [log.LateralPlan.Desire.keepLeft, log.LateralPlan.Desire.keepRight]:
self.desire = log.LateralPlan.Desire.none
# Turn off lanes during lane change
if self.desire == log.LateralPlan.Desire.laneChangeRight or self.desire == log.LateralPlan.Desire.laneChangeLeft:
self.LP.lll_prob *= self.lane_change_ll_prob
self.LP.rll_prob *= self.lane_change_ll_prob
if self.use_lanelines:
d_path_xyz = self.LP.get_d_path(v_ego, self.t_idxs, self.path_xyz)
self.libmpc.set_weights(MPC_COST_LAT.PATH, MPC_COST_LAT.HEADING, ntune_common_get('steerRateCost'))
else:
d_path_xyz = self.path_xyz
path_cost = np.clip(abs(self.path_xyz[0, 1] / self.path_xyz_stds[0, 1]), 0.5, 5.0) * MPC_COST_LAT.PATH
# Heading cost is useful at low speed, otherwise end of plan can be off-heading
heading_cost = interp(v_ego, [5.0, 10.0], [MPC_COST_LAT.HEADING, 0.0])
self.libmpc.set_weights(path_cost, heading_cost, ntune_common_get('steerRateCost'))
y_pts = np.interp(v_ego * self.t_idxs[:LAT_MPC_N + 1], np.linalg.norm(d_path_xyz, axis=1), d_path_xyz[:,1])
heading_pts = np.interp(v_ego * self.t_idxs[:LAT_MPC_N + 1], np.linalg.norm(self.path_xyz, axis=1), self.plan_yaw)
self.y_pts = y_pts
assert len(y_pts) == LAT_MPC_N + 1
assert len(heading_pts) == LAT_MPC_N + 1
# for now CAR_ROTATION_RADIUS is disabled
# to use it, enable it in the MPC
assert abs(CAR_ROTATION_RADIUS) < 1e-3
self.libmpc.run_mpc(self.cur_state, self.mpc_solution,
float(v_ego),
CAR_ROTATION_RADIUS,
list(y_pts),
list(heading_pts))
# init state for next
self.cur_state.x = 0.0
self.cur_state.y = 0.0
self.cur_state.psi = 0.0
self.cur_state.curvature = interp(DT_MDL, self.t_idxs[:LAT_MPC_N + 1], self.mpc_solution.curvature)
# Check for infeasable MPC solution
mpc_nans = any(math.isnan(x) for x in self.mpc_solution.curvature)
t = sec_since_boot()
if mpc_nans:
self.libmpc.init()
self.cur_state.curvature = measured_curvature
if t > self.last_cloudlog_t + 5.0:
self.last_cloudlog_t = t
cloudlog.warning("Lateral mpc - nan: True")
if self.mpc_solution[0].cost > 20000. or mpc_nans: # TODO: find a better way to detect when MPC did not converge
self.solution_invalid_cnt += 1
else:
self.solution_invalid_cnt = 0
def publish(self, sm, pm):
plan_solution_valid = self.solution_invalid_cnt < 2
plan_send = messaging.new_message('lateralPlan')
plan_send.valid = sm.all_alive_and_valid(service_list=['carState', 'controlsState', 'modelV2'])
plan_send.lateralPlan.laneWidth = float(self.LP.lane_width)
plan_send.lateralPlan.dPathPoints = [float(x) for x in self.y_pts]
plan_send.lateralPlan.psis = [float(x) for x in self.mpc_solution.psi[0:CONTROL_N]]
plan_send.lateralPlan.curvatures = [float(x) for x in self.mpc_solution.curvature[0:CONTROL_N]]
plan_send.lateralPlan.curvatureRates = [float(x) for x in self.mpc_solution.curvature_rate[0:CONTROL_N-1]] +[0.0]
plan_send.lateralPlan.lProb = float(self.LP.lll_prob)
plan_send.lateralPlan.rProb = float(self.LP.rll_prob)
plan_send.lateralPlan.dProb = float(self.LP.d_prob)
plan_send.lateralPlan.mpcSolutionValid = bool(plan_solution_valid)
plan_send.lateralPlan.desire = self.desire
plan_send.lateralPlan.laneChangeState = self.lane_change_state
plan_send.lateralPlan.laneChangeDirection = self.lane_change_direction
plan_send.lateralPlan.autoLaneChangeEnabled = self.auto_lane_change_enabled
plan_send.lateralPlan.autoLaneChangeTimer = int(AUTO_LCA_START_TIME) - int(self.auto_lane_change_timer)
pm.send('lateralPlan', plan_send)
if LOG_MPC:
dat = messaging.new_message('liveMpc')
dat.liveMpc.x = list(self.mpc_solution.x)
dat.liveMpc.y = list(self.mpc_solution.y)
dat.liveMpc.psi = list(self.mpc_solution.psi)
dat.liveMpc.curvature = list(self.mpc_solution.curvature)
dat.liveMpc.cost = self.mpc_solution.cost
pm.send('liveMpc', dat)
|
[] |
[] |
[
"LOG_MPC"
] |
[]
|
["LOG_MPC"]
|
python
| 1 | 0 | |
components/authn-service/tokens/tokens_test.go
|
package token
import (
"context"
"math/rand"
"os"
"reflect"
"runtime"
"strings"
"testing"
"time"
"github.com/pkg/errors"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.uber.org/zap"
"github.com/chef/automate/components/authn-service/constants"
"github.com/chef/automate/components/authn-service/tokens/mock"
"github.com/chef/automate/components/authn-service/tokens/pg"
"github.com/chef/automate/components/authn-service/tokens/pg/testconstants"
tokens "github.com/chef/automate/components/authn-service/tokens/types"
tutil "github.com/chef/automate/components/authn-service/tokens/util"
"github.com/chef/automate/lib/grpc/auth_context"
uuid "github.com/chef/automate/lib/uuid4"
)
var logger *zap.Logger
func init() {
cfg := zap.NewProductionConfig()
cfg.Level.SetLevel(zap.ErrorLevel)
logger, _ = cfg.Build()
rand.Seed(time.Now().Unix())
}
type adapterTestFunc func(context.Context, *testing.T, tokens.Storage)
// TestToken tests the mock and pg adapters via their implemented adapter
// interface
func TestToken(t *testing.T) {
pgURLGiven := false
// Note: this matches CI
pgCfg := pg.Config{
PGURL: constants.TestPgURL,
MigrationsPath: "pg/sql/",
}
if v, found := os.LookupEnv("PG_URL"); found {
pgCfg.PGURL = v
pgURLGiven = true
}
adapters := map[string]tokens.TokenConfig{
"mock": &mock.Config{},
"pg": &pgCfg,
}
// Note: because the pg adapter doesn't let us set the stage so easily,
// these overlap a bit: most _create_ 1+ tokens first
// (any failures in these "setup creates" are triggering a test failure,
// i.e., they're t.Fatal'ing out)-
tests := []adapterTestFunc{
testGetTokens,
testGetToken,
testGetTokenIDWithValue,
testGetTokenIDWithValueNotFound,
testCreateToken,
testCreateTokenWithInvalidValueFails,
testCreateTokenWithValue,
testCreateLegacyTokenWithInvalidValueFails,
testCreateLegacyTokenWithValue,
testDeleteToken,
testDeleteTokenNotFound,
testUpdateTokenActiveOnly,
testUpdateTokenNotFound,
} // Note: if a "not found" case is last, we'll leave a tidy test database
for adpName, adpCfg := range adapters {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
t.Run(adpName, func(t *testing.T) {
for _, test := range tests {
// TODO 2017/09/02 sr: this is pretty inefficient, we'll run the pg
// migrations for each and every test case. Since the overall
// performance still isn't that bad, I'll leave it at that for now.
adp, err := adpCfg.Open(nil, logger)
if err != nil {
// The logic to determine if we want to ignore this PG connection
// failure is as follows:
// - if the developer has passed PG_URL, we assume they want to run
// the pg tests (for testing migrations, etc)
// - if this is running on CI, never skip
// Why bother skipping? -- We don't want our test suite to require
// a running postgres instance, as that we would be annoying.
if pgURLGiven || os.Getenv("CI") == "true" {
t.Fatalf("opening connector: %s", err)
} else {
t.Logf("opening database: %s", err)
t.Logf(testconstants.SkipPGMessageFmt, pgCfg.PGURL)
t.SkipNow()
}
}
require.Nil(t, err, "opening connector: %s", err)
if r, ok := adp.(tokens.Resetter); ok {
err := r.Reset(ctx)
require.Nil(t, err, "reset adapter: %s", err)
}
// use the function name to identify the test case
name := strings.Split(runtime.FuncForPC(reflect.ValueOf(test).Pointer()).Name(), ".")[2]
t.Run(name, func(t *testing.T) {
test(ctx, t, adp)
})
}
})
}
}
// TODO (bs): we should insert these w/ sql
func testGetTokens(ctx context.Context, t *testing.T, ta tokens.Storage) {
tok, err := ta.CreateToken(ctx, "id0", "node1", true, []string{"project-1"})
require.Nil(t, err, "expected no error, got err=%v", err)
tok2, err := ta.CreateToken(ctx, "id1", "node2", true, []string{"project-1"})
require.Nil(t, err, "expected no error, got err=%v", err)
actualToks, err := ta.GetTokens(ctx)
assert.NoError(t, err)
assert.ElementsMatch(t, []*tokens.Token{tok, tok2}, actualToks)
}
func testGetToken(ctx context.Context, t *testing.T, ta tokens.Storage) {
id := "id0"
expectedTok, err := ta.CreateToken(ctx, id, "node1", true, []string{"project-1"})
require.NoError(t, err)
actualTok, err := ta.GetToken(ctx, id)
assert.NoError(t, err)
assert.Equal(t, expectedTok, actualTok)
}
func testGetTokenIDWithValueNotFound(ctx context.Context, t *testing.T, ta tokens.Storage) {
_, err := ta.GetTokenIDWithValue(ctx, "not-found")
assert.Error(t, err)
if err != nil {
if _, ok := errors.Cause(err).(*tokens.NotFoundError); !ok {
t.Errorf("expected token.NotFoundError, got %s", err)
}
}
}
func testGetTokenIDWithValue(ctx context.Context, t *testing.T, ta tokens.Storage) {
expectedID := "id0"
expectedTok, err := ta.CreateToken(ctx, expectedID, "description", true, []string{"project-1"})
require.NoError(t, err)
tokID, err := ta.GetTokenIDWithValue(ctx, expectedTok.Value)
assert.NoError(t, err)
assert.Equal(t, expectedID, tokID)
}
func testCreateToken(ctx context.Context, t *testing.T, ta tokens.Storage) {
id := "id0"
expectedTok, err := ta.CreateToken(ctx, id, "node1", true, []string{"project-1"})
require.Nil(t, err, "expected no error, got err=%v", err)
// TODO use SQL or this is the same as GetToken
actualTok, err := ta.GetToken(ctx, id)
assert.NoError(t, err)
assert.Equal(t, expectedTok, actualTok)
}
func testCreateTokenWithValue(ctx context.Context, t *testing.T, ta tokens.Storage) {
id := "id0"
value := generateRandomTokenString(tutil.MinimumTokenLength())
tok, err := ta.CreateTokenWithValue(ctx,
id, value, "node3", true, []string{"project-1"})
assert.NoError(t, err)
assert.Equal(t, value, tok.Value)
tok2, err := ta.GetToken(ctx, tok.ID)
require.NoError(t, err)
assert.Equal(t, tok, tok2)
}
func testCreateTokenWithInvalidValueFails(ctx context.Context, t *testing.T, ta tokens.Storage) {
badValue := generateRandomTokenString(tutil.MinimumTokenLength() - 1)
tok, err := ta.CreateTokenWithValue(ctx,
"id0", badValue, "node3", true, []string{"project-1"})
assert.Error(t, err)
assert.Nil(t, tok)
}
func testCreateLegacyTokenWithValue(ctx context.Context, t *testing.T, ta tokens.Storage) {
value := generateRandomTokenString(tutil.MinimumTokenLength())
tok, err := ta.CreateLegacyTokenWithValue(ctx, value)
assert.NoError(t, err)
assert.NotNil(t, tok)
assert.Equal(t, value, tok.Value)
tok2, err := ta.GetToken(ctx, tok.ID)
require.NoError(t, err)
assert.Equal(t, tok, tok2)
}
func testCreateLegacyTokenWithInvalidValueFails(ctx context.Context, t *testing.T, ta tokens.Storage) {
badValue := generateRandomTokenString(tutil.MinimumLegacyTokenLength - 1)
tok, err := ta.CreateLegacyTokenWithValue(ctx, badValue)
assert.Error(t, err)
assert.Nil(t, tok)
}
func generateRandomTokenString(length int) string {
var letters = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ=")
b := make([]rune, length)
for i := range b {
b[i] = letters[rand.Intn(len(letters))]
}
return string(b)
}
func testDeleteToken(ctx context.Context, t *testing.T, ta tokens.Storage) {
id := "id0"
_, err := ta.CreateToken(ctx, id, "node1", true, []string{"project-1"})
require.NoError(t, err)
err = ta.DeleteToken(ctx, id)
assert.NoError(t, err)
_, err = ta.GetToken(ctx, id)
assert.Error(t, err)
if _, ok := errors.Cause(err).(*tokens.NotFoundError); !ok {
t.Errorf("expected not found token error, got err=%v", err)
}
}
func testDeleteTokenNotFound(ctx context.Context, t *testing.T, ta tokens.Storage) {
err := ta.DeleteToken(ctx, uuid.Must(uuid.NewV4()).String())
if err != nil {
if _, ok := errors.Cause(err).(*tokens.NotFoundError); !ok {
t.Errorf("expected not found token 'node1', got err=%v", err)
}
}
}
func testUpdateTokenActiveOnly(ctx context.Context, t *testing.T, ta tokens.Storage) {
id := "id0"
desc := "node1"
projs := []string{"project-1"}
tok0, err := ta.CreateToken(ctx, id, desc, true, projs)
require.NoError(t, err)
tok, err := ta.UpdateToken(ctx, id, desc, false, projs)
assert.NoError(t, err)
assert.NotNil(t, tok)
assert.Equal(t, tok0.Description, tok.Description)
assert.Equal(t, false, tok.Active)
assert.Equal(t, tok0.Created, tok.Created)
assert.True(t, tok.Updated.After(tok.Created))
assert.ElementsMatch(t, tok0.Projects, tok.Projects)
}
func testUpdateTokenUpdatesAll(ctx context.Context, t *testing.T, ta tokens.Storage) {
id := "id0"
tok, err := ta.CreateToken(ctx, id, "node1", true, []string{"project-1"})
require.NoError(t, err)
newDesc := "newDesc"
newProj := []string{"project-2"}
_, err = ta.UpdateToken(ctx, id, newDesc, false, newProj)
require.NoError(t, err)
updatedTok, err := ta.GetToken(ctx, id)
assert.Equal(t, newDesc, updatedTok.Description)
assert.Equal(t, false, updatedTok.Active)
assert.Equal(t, tok.Created, updatedTok.Created)
assert.True(t, tok.Updated.After(tok.Created))
assert.ElementsMatch(t, newProj, updatedTok.Projects)
}
func testUpdateTokenNotFound(ctx context.Context, t *testing.T, ta tokens.Storage) {
_, err := ta.UpdateToken(ctx, uuid.Must(uuid.NewV4()).String(), "desc", true, []string{"project-1"})
assert.Error(t, err)
if err != nil {
if _, ok := errors.Cause(err).(*tokens.NotFoundError); !ok {
t.Errorf("expected not found token 'node1', got err=%v", err)
}
}
}
func insertProjectsIntoNewContext(projects []string) context.Context {
return auth_context.NewOutgoingProjectsContext(auth_context.NewContext(context.Background(),
[]string{}, projects, "resource", "action", "pol"))
}
|
[
"\"CI\""
] |
[] |
[
"CI"
] |
[]
|
["CI"]
|
go
| 1 | 0 | |
mantle/cmd/kola/options.go
|
// Copyright 2015 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"fmt"
"os"
"path/filepath"
"strings"
"github.com/pkg/errors"
"github.com/coreos/mantle/auth"
"github.com/coreos/mantle/fcos"
"github.com/coreos/mantle/kola"
"github.com/coreos/mantle/platform"
"github.com/coreos/mantle/sdk"
"github.com/coreos/mantle/system"
)
var (
outputDir string
kolaPlatform string
kolaArchitectures = []string{"amd64"}
kolaPlatforms = []string{"aws", "azure", "do", "esx", "gce", "openstack", "packet", "qemu", "qemu-unpriv", "qemu-iso"}
kolaDistros = []string{"fcos", "rhcos"}
kolaIgnitionVersionDefaults = map[string]string{
"cl": "v2",
"fcos": "v3",
"rhcos": "v3",
}
)
func init() {
sv := root.PersistentFlags().StringVar
bv := root.PersistentFlags().BoolVar
ss := root.PersistentFlags().StringSlice
ssv := root.PersistentFlags().StringSliceVar
// general options
sv(&outputDir, "output-dir", "", "Temporary output directory for test data and logs")
root.PersistentFlags().StringVarP(&kolaPlatform, "platform", "p", "", "VM platform: "+strings.Join(kolaPlatforms, ", "))
root.PersistentFlags().StringVarP(&kola.Options.Distribution, "distro", "b", "", "Distribution: "+strings.Join(kolaDistros, ", "))
root.PersistentFlags().IntVarP(&kola.TestParallelism, "parallel", "j", 1, "number of tests to run in parallel")
sv(&kola.TAPFile, "tapfile", "", "file to write TAP results to")
root.PersistentFlags().BoolVarP(&kola.Options.NoTestExitError, "no-test-exit-error", "T", false, "Don't exit with non-zero if tests fail")
sv(&kola.Options.BaseName, "basename", "kola", "Cluster name prefix")
ss("debug-systemd-unit", []string{}, "full-unit-name.service to enable SYSTEMD_LOG_LEVEL=debug on. Can be specified multiple times.")
sv(&kola.Options.IgnitionVersion, "ignition-version", "", "Ignition version override: v2, v3")
ssv(&kola.DenylistedTests, "denylist-test", []string{}, "Test pattern to add to denylist. Can be specified multiple times.")
bv(&kola.NoNet, "no-net", false, "Don't run tests that require an Internet connection")
ssv(&kola.Tags, "tag", []string{}, "Test tag to run. Can be specified multiple times.")
bv(&kola.Options.SSHOnTestFailure, "ssh-on-test-failure", false, "SSH into a machine when tests fail")
sv(&kola.Options.Stream, "stream", "", "CoreOS stream ID (e.g. for Fedora CoreOS: stable, testing, next)")
sv(&kola.Options.CosaWorkdir, "workdir", "", "coreos-assembler working directory")
sv(&kola.Options.CosaBuildId, "build", "", "coreos-assembler build ID")
// rhcos-specific options
sv(&kola.Options.OSContainer, "oscontainer", "", "oscontainer image pullspec for pivot (RHCOS only)")
// aws-specific options
defaultRegion := os.Getenv("AWS_REGION")
if defaultRegion == "" {
defaultRegion = "us-east-1"
}
sv(&kola.AWSOptions.CredentialsFile, "aws-credentials-file", "", "AWS credentials file (default \"~/.aws/credentials\")")
sv(&kola.AWSOptions.Region, "aws-region", defaultRegion, "AWS region")
sv(&kola.AWSOptions.Profile, "aws-profile", "default", "AWS profile name")
sv(&kola.AWSOptions.AMI, "aws-ami", "alpha", `AWS AMI ID, or (alpha|beta|stable) to use the latest image`)
// See https://github.com/openshift/installer/issues/2919 for example
sv(&kola.AWSOptions.InstanceType, "aws-type", "m5.large", "AWS instance type")
sv(&kola.AWSOptions.SecurityGroup, "aws-sg", "kola", "AWS security group name")
sv(&kola.AWSOptions.IAMInstanceProfile, "aws-iam-profile", "kola", "AWS IAM instance profile name")
// azure-specific options
sv(&kola.AzureOptions.AzureProfile, "azure-profile", "", "Azure profile (default \"~/"+auth.AzureProfilePath+"\")")
sv(&kola.AzureOptions.AzureAuthLocation, "azure-auth", "", "Azure auth location (default \"~/"+auth.AzureAuthPath+"\")")
sv(&kola.AzureOptions.DiskURI, "azure-disk-uri", "", "Azure disk uri (custom images)")
sv(&kola.AzureOptions.Publisher, "azure-publisher", "CoreOS", "Azure image publisher (default \"CoreOS\"")
sv(&kola.AzureOptions.Offer, "azure-offer", "CoreOS", "Azure image offer (default \"CoreOS\"")
sv(&kola.AzureOptions.Sku, "azure-sku", "alpha", "Azure image sku/channel (default \"alpha\"")
sv(&kola.AzureOptions.Version, "azure-version", "", "Azure image version")
sv(&kola.AzureOptions.Location, "azure-location", "westus", "Azure location (default \"westus\"")
sv(&kola.AzureOptions.Size, "azure-size", "Standard_D2_v2", "Azure machine size (default \"Standard_D2_v2\")")
// do-specific options
sv(&kola.DOOptions.ConfigPath, "do-config-file", "", "DigitalOcean config file (default \"~/"+auth.DOConfigPath+"\")")
sv(&kola.DOOptions.Profile, "do-profile", "", "DigitalOcean profile (default \"default\")")
sv(&kola.DOOptions.AccessToken, "do-token", "", "DigitalOcean access token (overrides config file)")
sv(&kola.DOOptions.Region, "do-region", "sfo2", "DigitalOcean region slug")
sv(&kola.DOOptions.Size, "do-size", "1gb", "DigitalOcean size slug")
sv(&kola.DOOptions.Image, "do-image", "alpha", "DigitalOcean image ID, {alpha, beta, stable}, or user image name")
// esx-specific options
sv(&kola.ESXOptions.ConfigPath, "esx-config-file", "", "ESX config file (default \"~/"+auth.ESXConfigPath+"\")")
sv(&kola.ESXOptions.Server, "esx-server", "", "ESX server")
sv(&kola.ESXOptions.Profile, "esx-profile", "", "ESX profile (default \"default\")")
sv(&kola.ESXOptions.BaseVMName, "esx-base-vm", "", "ESX base VM name")
// gce-specific options
sv(&kola.GCEOptions.Image, "gce-image", "projects/coreos-cloud/global/images/family/coreos-alpha", "GCE image, full api endpoints names are accepted if resource is in a different project")
sv(&kola.GCEOptions.Project, "gce-project", "coreos-gce-testing", "GCE project name")
sv(&kola.GCEOptions.Zone, "gce-zone", "us-central1-a", "GCE zone name")
sv(&kola.GCEOptions.MachineType, "gce-machinetype", "n1-standard-1", "GCE machine type")
sv(&kola.GCEOptions.DiskType, "gce-disktype", "pd-ssd", "GCE disk type")
sv(&kola.GCEOptions.Network, "gce-network", "default", "GCE network")
bv(&kola.GCEOptions.ServiceAuth, "gce-service-auth", false, "for non-interactive auth when running within GCE")
sv(&kola.GCEOptions.JSONKeyFile, "gce-json-key", "", "use a service account's JSON key for authentication")
// openstack-specific options
sv(&kola.OpenStackOptions.ConfigPath, "openstack-config-file", "", "Path to a clouds.yaml formatted OpenStack config file. The underlying library defaults to ./clouds.yaml")
sv(&kola.OpenStackOptions.Profile, "openstack-profile", "", "OpenStack profile within clouds.yaml (default \"openstack\")")
sv(&kola.OpenStackOptions.Region, "openstack-region", "", "OpenStack region")
sv(&kola.OpenStackOptions.Image, "openstack-image", "", "OpenStack image ref")
sv(&kola.OpenStackOptions.Flavor, "openstack-flavor", "1", "OpenStack flavor ref")
sv(&kola.OpenStackOptions.Network, "openstack-network", "", "OpenStack network")
sv(&kola.OpenStackOptions.Domain, "openstack-domain", "", "OpenStack domain ID")
sv(&kola.OpenStackOptions.FloatingIPNetwork, "openstack-floating-ip-network", "", "OpenStack network to use when creating a floating IP")
// packet-specific options
sv(&kola.PacketOptions.ConfigPath, "packet-config-file", "", "Packet config file (default \"~/"+auth.PacketConfigPath+"\")")
sv(&kola.PacketOptions.Profile, "packet-profile", "", "Packet profile (default \"default\")")
sv(&kola.PacketOptions.ApiKey, "packet-api-key", "", "Packet API key (overrides config file)")
sv(&kola.PacketOptions.Project, "packet-project", "", "Packet project UUID (overrides config file)")
sv(&kola.PacketOptions.Facility, "packet-facility", "sjc1", "Packet facility code")
sv(&kola.PacketOptions.Plan, "packet-plan", "", "Packet plan slug (default arch-dependent, e.g. \"t1.small.x86\")")
sv(&kola.PacketOptions.Architecture, "packet-architecture", "x86_64", "Packet CPU architecture")
sv(&kola.PacketOptions.IPXEURL, "packet-ipxe-url", "", "iPXE script URL (default arch-dependent, e.g. \"https://raw.githubusercontent.com/coreos/coreos-assembler/master/mantle/platform/api/packet/fcos-x86_64.ipxe\")")
sv(&kola.PacketOptions.ImageURL, "packet-image-url", "", "image URL (default arch-dependent, e.g. \"https://builds.coreos.fedoraproject.org/prod/streams/stable/builds/31.20200223.3.0/x86_64/fedora-coreos-31.20200223.3.0-metal.x86_64.raw.xz\")")
// QEMU-specific options
sv(&kola.QEMUOptions.Firmware, "qemu-firmware", "", "Boot firmware: bios,uefi,uefi-secure (default bios)")
sv(&kola.QEMUOptions.DiskImage, "qemu-image", "", "path to CoreOS disk image")
sv(&kola.QEMUOptions.DiskSize, "qemu-size", "", "Resize target disk via qemu-img resize [+]SIZE")
sv(&kola.QEMUOptions.Memory, "qemu-memory", "", "Default memory size in MB")
bv(&kola.QEMUOptions.NbdDisk, "qemu-nbd-socket", false, "Present the disks over NBD socket to qemu")
bv(&kola.QEMUOptions.MultiPathDisk, "qemu-multipath", false, "Enable multiple paths for the main disk")
bv(&kola.QEMUOptions.Native4k, "qemu-native-4k", false, "Force 4k sectors for main disk")
bv(&kola.QEMUOptions.Nvme, "qemu-nvme", false, "Use NVMe for main disk")
bv(&kola.QEMUOptions.Swtpm, "qemu-swtpm", true, "Create temporary software TPM")
sv(&kola.QEMUIsoOptions.IsoPath, "qemu-iso", "", "path to CoreOS ISO image")
}
// Sync up the command line options if there is dependency
func syncOptionsImpl(useCosa bool) error {
validateOption := func(name, item string, valid []string) error {
for _, v := range valid {
if v == item {
return nil
}
}
return fmt.Errorf("unsupported %v %q", name, item)
}
// TODO: Could also auto-synchronize if e.g. --aws-ami is passed
if kolaPlatform == "" {
if kola.QEMUIsoOptions.IsoPath != "" {
kolaPlatform = "qemu-iso"
} else {
kolaPlatform = "qemu-unpriv"
}
}
// There used to be a "privileged" qemu path, it is no longer supported.
// Alias qemu to qemu-unpriv.
if kolaPlatform == "qemu" {
kolaPlatform = "qemu-unpriv"
}
// native 4k requires a UEFI bootloader
if kola.QEMUOptions.Native4k && kola.QEMUOptions.Firmware == "bios" {
return fmt.Errorf("native 4k requires uefi firmware")
}
// default to BIOS, UEFI for aarch64 and x86(only for 4k)
if kola.QEMUOptions.Firmware == "" {
if system.RpmArch() == "aarch64" {
kola.QEMUOptions.Firmware = "uefi"
} else if system.RpmArch() == "x86_64" && kola.QEMUOptions.Native4k {
kola.QEMUOptions.Firmware = "uefi"
} else {
kola.QEMUOptions.Firmware = "bios"
}
}
if err := validateOption("platform", kolaPlatform, kolaPlatforms); err != nil {
return err
}
// if no external dirs were given, automatically add the working directory;
// does nothing if ./tests/kola/ doesn't exist
if len(runExternals) == 0 {
runExternals = []string{"."}
}
foundCosa := false
if kola.Options.CosaBuildId != "" {
// specified --build? fetch that build. in this path we *require* a
// cosa workdir, either assumed as PWD or via --workdir.
if kola.Options.CosaWorkdir == "" {
kola.Options.CosaWorkdir = "."
}
localbuild, err := sdk.GetLocalBuild(kola.Options.CosaWorkdir, kola.Options.CosaBuildId)
if err != nil {
return err
}
kola.CosaBuild = localbuild
foundCosa = true
} else if kola.Options.Stream != "" {
if err := syncStreamOptions(); err != nil {
return err
}
} else {
if kola.Options.CosaWorkdir == "" {
// specified neither --build nor --workdir; only opportunistically
// try to use the PWD as the workdir, but don't error out if it's
// not
if isroot, err := sdk.IsCosaRoot("."); err != nil {
return err
} else if isroot {
kola.Options.CosaWorkdir = "."
}
}
if kola.Options.CosaWorkdir != "" && kola.Options.CosaWorkdir != "none" {
localbuild, err := sdk.GetLatestLocalBuild(kola.Options.CosaWorkdir)
if err != nil {
if !os.IsNotExist(errors.Cause(err)) {
return err
}
} else {
kola.Options.CosaBuildId = localbuild.Meta.BuildID
kola.CosaBuild = localbuild
foundCosa = true
}
}
}
if foundCosa && useCosa {
if err := syncCosaOptions(); err != nil {
return err
}
}
if kola.Options.IgnitionVersion == "" && kola.QEMUOptions.DiskImage != "" {
kola.Options.IgnitionVersion = sdk.TargetIgnitionVersionFromName(kola.QEMUOptions.DiskImage)
}
units, _ := root.PersistentFlags().GetStringSlice("debug-systemd-units")
for _, unit := range units {
kola.Options.SystemdDropins = append(kola.Options.SystemdDropins, platform.SystemdDropin{
Unit: unit,
Name: "10-debug.conf",
Contents: "[Service]\nEnvironment=SYSTEMD_LOG_LEVEL=debug",
})
}
if kola.Options.OSContainer != "" && kola.Options.Distribution != "rhcos" {
return fmt.Errorf("oscontainer is only supported on rhcos")
}
if kola.Options.Distribution == "" {
kola.Options.Distribution = kolaDistros[0]
} else if err := validateOption("distro", kola.Options.Distribution, kolaDistros); err != nil {
return err
}
if kola.Options.IgnitionVersion == "" {
var ok bool
kola.Options.IgnitionVersion, ok = kolaIgnitionVersionDefaults[kola.Options.Distribution]
if !ok {
return fmt.Errorf("Distribution %q has no default Ignition version", kola.Options.Distribution)
}
}
return nil
}
// syncOptions updates default values of options based on provided ones
func syncOptions() error {
return syncOptionsImpl(true)
}
// syncCosaOptions sets unset platform-specific
// options that can be derived from the cosa build metadata
func syncCosaOptions() error {
switch kolaPlatform {
case "qemu-unpriv", "qemu":
if kola.QEMUOptions.DiskImage == "" && kola.CosaBuild.Meta.BuildArtifacts.Qemu != nil {
kola.QEMUOptions.DiskImage = filepath.Join(kola.CosaBuild.Dir, kola.CosaBuild.Meta.BuildArtifacts.Qemu.Path)
}
case "qemu-iso":
if kola.QEMUIsoOptions.IsoPath == "" && kola.CosaBuild.Meta.BuildArtifacts.LiveIso != nil {
kola.QEMUIsoOptions.IsoPath = filepath.Join(kola.CosaBuild.Dir, kola.CosaBuild.Meta.BuildArtifacts.LiveIso.Path)
}
}
if kola.Options.IgnitionVersion == "" && kola.QEMUOptions.DiskImage == "" {
if kola.CosaBuild != nil {
kola.Options.IgnitionVersion = sdk.TargetIgnitionVersion(kola.CosaBuild.Meta)
}
}
if kola.Options.Distribution == "" {
distro, err := sdk.TargetDistro(kola.CosaBuild.Meta)
if err != nil {
return err
}
kola.Options.Distribution = distro
}
runExternals = append(runExternals, filepath.Join(kola.Options.CosaWorkdir, "src/config"))
return nil
}
// syncStreamOptions sets the underlying raw options based on a stream
// Currently this only handles AWS to demonstrate the idea; we'll
// add generic code to map between streams and cosa builds soon.
func syncStreamOptions() error {
if kola.Options.Stream == "" {
return nil
}
switch kola.Options.Distribution {
case "":
return fmt.Errorf("Must specify -b/--distro with --stream")
case "fcos":
break
default:
return fmt.Errorf("Unhandled stream for distribution %s", kola.Options.Distribution)
}
artifacts, err := fcos.FetchStreamThisArchitecture(kola.Options.Stream)
if err != nil {
return errors.Wrapf(err, "failed to fetch stream")
}
release := ""
switch kolaPlatform {
case "aws":
regionimg := artifacts.Images.Aws.Regions[kola.AWSOptions.Region]
release = regionimg.Release
kola.AWSOptions.AMI = regionimg.Image
default:
return fmt.Errorf("Unhandled platform %s for stream", kolaPlatform)
}
fmt.Printf("Resolved stream %s for platform %s to release %s\n", kola.Options.Stream, kolaPlatform, release)
return nil
}
|
[
"\"AWS_REGION\""
] |
[] |
[
"AWS_REGION"
] |
[]
|
["AWS_REGION"]
|
go
| 1 | 0 | |
main.go
|
/*
Copyright 2017 Heptio Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"flag"
"net/http"
"os"
"os/signal"
"sync"
"syscall"
"time"
"github.com/golang/glog"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/spf13/viper"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp" // enables using kubeconfig for GCP auth
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
)
// addr tells us what address to have the Prometheus metrics listen on.
var addr = flag.String("listen-address", ":8080", "The address to listen on for HTTP requests.")
// setup a signal hander to gracefully exit
func sigHandler() <-chan struct{} {
stop := make(chan struct{})
go func() {
c := make(chan os.Signal, 1)
signal.Notify(c,
syscall.SIGINT, // Ctrl+C
syscall.SIGTERM, // Termination Request
syscall.SIGSEGV, // FullDerp
syscall.SIGABRT, // Abnormal termination
syscall.SIGILL, // illegal instruction
syscall.SIGFPE) // floating point - this is why we can't have nice things
sig := <-c
glog.Warningf("Signal (%v) Detected, Shutting Down", sig)
close(stop)
}()
return stop
}
// loadConfig will parse input + config file and return a clientset
func loadConfig() kubernetes.Interface {
var config *rest.Config
var err error
flag.Parse()
// leverages a file|(ConfigMap)
// to be located at /etc/eventrouter/config
viper.SetConfigType("json")
viper.SetConfigName("config")
viper.AddConfigPath("/etc/eventrouter/")
viper.AddConfigPath(".")
viper.SetDefault("kubeconfig", "")
viper.SetDefault("sink", "glog")
viper.SetDefault("resync-interval", time.Minute*30)
viper.SetDefault("enable-prometheus", true)
if err = viper.ReadInConfig(); err != nil {
panic(err.Error())
}
viper.BindEnv("kubeconfig") // Allows the KUBECONFIG env var to override where the kubeconfig is
// Allow specifying a custom config file via the EVENTROUTER_CONFIG env var
if forceCfg := os.Getenv("EVENTROUTER_CONFIG"); forceCfg != "" {
viper.SetConfigFile(forceCfg)
}
kubeconfig := viper.GetString("kubeconfig")
if len(kubeconfig) > 0 {
config, err = clientcmd.BuildConfigFromFlags("", kubeconfig)
} else {
config, err = rest.InClusterConfig()
}
if err != nil {
panic(err.Error())
}
// creates the clientset from kubeconfig
clientset, err := kubernetes.NewForConfig(config)
if err != nil {
panic(err.Error())
}
return clientset
}
// main entry point of the program
func main() {
var wg sync.WaitGroup
clientset := loadConfig()
sharedInformers := informers.NewSharedInformerFactory(clientset, viper.GetDuration("resync-interval"))
eventsInformer := sharedInformers.Core().V1().Events()
// TODO: Support locking for HA https://github.com/kubernetes/kubernetes/pull/42666
eventRouter := NewEventRouter(clientset, eventsInformer)
stop := sigHandler()
// Startup the http listener for Prometheus Metrics endpoint.
if viper.GetBool("enable-prometheus") {
go func() {
glog.Info("Starting prometheus metrics.")
http.Handle("/metrics", promhttp.Handler())
glog.Warning(http.ListenAndServe(*addr, nil))
}()
}
// Startup the EventRouter
wg.Add(1)
go func() {
defer wg.Done()
eventRouter.Run(stop)
}()
// Startup the Informer(s)
glog.Infof("Starting shared Informer(s)")
sharedInformers.Start(stop)
wg.Wait()
glog.Warningf("Exiting main()")
os.Exit(1)
}
|
[
"\"EVENTROUTER_CONFIG\""
] |
[] |
[
"EVENTROUTER_CONFIG"
] |
[]
|
["EVENTROUTER_CONFIG"]
|
go
| 1 | 0 | |
manage.py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "nospoil.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
go/oasis-test-runner/oasis/byzantine.go
|
package oasis
import (
"fmt"
beacon "github.com/oasisprotocol/oasis-core/go/beacon/api"
"github.com/oasisprotocol/oasis-core/go/common"
"github.com/oasisprotocol/oasis-core/go/common/crypto/signature"
"github.com/oasisprotocol/oasis-core/go/common/node"
registry "github.com/oasisprotocol/oasis-core/go/registry/api"
scheduler "github.com/oasisprotocol/oasis-core/go/scheduler/api"
)
// Byzantine is an Oasis byzantine node.
type Byzantine struct {
*Node
script string
extraArgs []Argument
runtime int
consensusPort uint16
p2pPort uint16
activationEpoch beacon.EpochTime
}
// ByzantineCfg is the Oasis byzantine node configuration.
type ByzantineCfg struct {
NodeCfg
Script string
ExtraArgs []Argument
ForceElectParams *scheduler.ForceElectCommitteeRole
IdentitySeed string
ActivationEpoch beacon.EpochTime
Runtime int
}
func (worker *Byzantine) AddArgs(args *argBuilder) error {
args.debugDontBlameOasis().
debugAllowTestKeys().
debugSetRlimit().
debugEnableProfiling(worker.Node.pprofPort).
tendermintDebugAllowDuplicateIP().
tendermintCoreAddress(worker.consensusPort).
tendermintDebugAddrBookLenient().
tendermintSubmissionGasPrice(worker.consensus.SubmissionGasPrice).
workerP2pPort(worker.p2pPort).
appendSeedNodes(worker.net.seeds).
appendEntity(worker.entity).
byzantineActivationEpoch(worker.activationEpoch)
if worker.runtime > 0 {
args.byzantineRuntimeID(worker.net.runtimes[worker.runtime].id)
}
for _, v := range worker.net.Runtimes() {
if v.kind == registry.KindCompute && v.teeHardware == node.TEEHardwareIntelSGX {
args.byzantineFakeSGX()
args.byzantineVersionFakeEnclaveID(v)
}
}
args.vec = append(args.vec, worker.extraArgs...)
return nil
}
func (worker *Byzantine) CustomStart(args *argBuilder) error {
if err := worker.net.startOasisNode(worker.Node, []string{"debug", "byzantine", worker.script}, args); err != nil {
return fmt.Errorf("oasis/byzantine: failed to launch node %s: %w", worker.Name, err)
}
return nil
}
// NewByzantine provisions a new byzantine node and adds it to the network.
func (net *Network) NewByzantine(cfg *ByzantineCfg) (*Byzantine, error) {
byzantineName := fmt.Sprintf("byzantine-%d", len(net.byzantine))
host, err := net.GetNamedNode(byzantineName, &cfg.NodeCfg)
if err != nil {
return nil, err
}
if cfg.Script == "" {
return nil, fmt.Errorf("oasis/byzantine: empty script name: %w", err)
}
// Generate a deterministic identity as the Byzantine node scripts usually
// require specific roles in the first round.
if cfg.IdentitySeed == "" {
return nil, fmt.Errorf("oasis/byzantine: empty identity seed")
}
// Pre-provision the node identity so that we can update the entity.
host.nodeSigner, host.p2pSigner, host.sentryCert, err = net.provisionNodeIdentity(host.dir, cfg.IdentitySeed, false)
if err != nil {
return nil, fmt.Errorf("oasis/byzantine: failed to provision node identity: %w", err)
}
if err := cfg.Entity.addNode(host.nodeSigner); err != nil {
return nil, err
}
worker := &Byzantine{
Node: host,
script: cfg.Script,
extraArgs: cfg.ExtraArgs,
consensusPort: host.getProvisionedPort(nodePortConsensus),
p2pPort: host.getProvisionedPort(nodePortP2P),
activationEpoch: cfg.ActivationEpoch,
runtime: cfg.Runtime,
}
copy(worker.NodeID[:], host.nodeSigner[:])
net.byzantine = append(net.byzantine, worker)
host.features = append(host.features, worker)
if cfg.Runtime >= 0 {
rt := net.runtimes[cfg.Runtime].id
pk := host.nodeSigner
if net.cfg.SchedulerForceElect == nil {
net.cfg.SchedulerForceElect = make(map[common.Namespace]map[signature.PublicKey]*scheduler.ForceElectCommitteeRole)
}
if net.cfg.SchedulerForceElect[rt] == nil {
net.cfg.SchedulerForceElect[rt] = make(map[signature.PublicKey]*scheduler.ForceElectCommitteeRole)
}
if params := cfg.ForceElectParams; params != nil {
tmpParams := *params
net.cfg.SchedulerForceElect[rt][pk] = &tmpParams
}
}
return worker, nil
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
client_led_button.py
|
#!/usr/bin/python3
import socket
import time
import sys
from PyQt5.QtWidgets import (QWidget, QMainWindow, QGridLayout, QPushButton, QApplication)
HOST = '192.168.86.214' # The remote host
PORT = 50007 # The same port as used by the server
sock=socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((HOST, PORT))
class Client_led_button(QMainWindow):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.widget = QWidget()
self.setCentralWidget(self.widget)
btn1 = QPushButton("On", self)
btn2 = QPushButton("Off", self)
grid = QGridLayout(self.widget)
grid.setSpacing(10)
grid.addWidget(btn1, 0, 0)
grid.addWidget(btn2, 0, 1)
self.widget.setLayout(grid)
btn1.clicked.connect(self.buttonClicked)
btn2.clicked.connect(self.buttonClicked)
self.statusBar()
self.setGeometry(300, 300, 290, 150)
self.setWindowTitle('client led button')
self.show()
def buttonClicked(self):
sender = self.sender()
if sender.text() == "On":
sock.sendall(b"1")
data = sock.recv(1024)
self.statusBar().showMessage(repr(data))
elif sender.text() == "Off":
sock.sendall(b"0")
data = sock.recv(1024)
self.statusBar().showMessage(repr(data))
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = Client_led_button()
sys.exit(app.exec_())
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
materials/lab/3/main/main.go
|
// Build and Use this File to interact with the shodan package
// In this directory lab/3/shodan/main:
// go build main.go
// SHODAN_API_KEY=YOURAPIKEYHERE ./main <search term>
package main
import (
"fmt"
"log"
"os"
"encoding/json"
"shodan/shodan"
)
func main() {
if len(os.Args) < 2 {
log.Fatalln("Usage: main <ports|search>")
}
apiKey := os.Getenv("SHODAN_API_KEY")
s := shodan.New(apiKey)
info, err := s.APIInfo()
if err != nil {
log.Panicln(err)
}
fmt.Printf(
"Query Credits: %d\nScan Credits: %d\n\n",
info.QueryCredits,
info.ScanCredits)
if os.Args[1] == "ports" {
portList(s)
} else{
search(s)
}
}
// I would pipe this out to some junk file, it's a long list
func portList(client *shodan.Client){
ports, err := client.ListPorts()
if err != nil {
log.Panicln(err)
}
fmt.Println("Ports them crawlerz be looking for:")
for _, port := range ports {
fmt.Print(string(port))
}
}
func search(client *shodan.Client) {
if len(os.Args) != 2 {
log.Fatalln("Usage: main search <searchterm>")
}
hostSearch, err := client.HostSearch(os.Args[1])
if err != nil {
log.Panicln(err)
}
fmt.Printf("Host Data Dump\n")
for _, host := range hostSearch.Matches {
fmt.Println("==== start ",host.IPString,"====")
h,_ := json.Marshal(host)
fmt.Println(string(h))
fmt.Println("==== end ",host.IPString,"====")
//fmt.Println("Press the Enter Key to continue.")
//fmt.Scanln()
}
fmt.Printf("IP, Port\n")
for _, host := range hostSearch.Matches {
fmt.Printf("%s, %d\n", host.IPString, host.Port)
}
}
|
[
"\"SHODAN_API_KEY\""
] |
[] |
[
"SHODAN_API_KEY"
] |
[]
|
["SHODAN_API_KEY"]
|
go
| 1 | 0 | |
cmd/server/search_async.go
|
// Copyright 2018 The Moov Authors
// Use of this source code is governed by an Apache License
// license that can be found in the LICENSE file.
package main
import (
"bytes"
"encoding/json"
"fmt"
"os"
"strconv"
"strings"
"time"
"github.com/go-kit/kit/log"
)
var (
watchResearchBatchSize = 100
)
func init() {
watchResearchBatchSize = readWebhookBatchSize(os.Getenv("WEBHOOK_BATCH_SIZE"))
}
func readWebhookBatchSize(str string) int {
if str == "" {
return watchResearchBatchSize
}
d, _ := strconv.Atoi(str)
if d > 0 {
return d
}
return watchResearchBatchSize
}
// spawnResearching will block and select on updates for when to re-inspect all watches setup.
// Since watches are used to post OFAC data via webhooks they are used as catalysts in other systems.
func (s *searcher) spawnResearching(logger log.Logger, companyRepo companyRepository, custRepo customerRepository, watchRepo watchRepository, webhookRepo webhookRepository, updates chan *downloadStats) {
for {
select {
case <-updates:
s.logger.Log("search", "async: starting re-search of watches")
cursor := watchRepo.getWatchesCursor(logger, watchResearchBatchSize)
for {
watches, _ := cursor.Next()
if len(watches) == 0 {
break
}
for i := range watches {
var body *bytes.Buffer
var err error
// Perform a query (ID watches) or search (name watches) and encode the model in JSON for calling the webhook.
switch {
case watches[i].customerID != "":
s.logger.Log("search", fmt.Sprintf("async: watch %s for customer %s found", watches[i].id, watches[i].customerID))
body, err = getCustomerBody(s, watches[i].id, watches[i].customerID, 1.0, custRepo)
case watches[i].customerName != "":
s.logger.Log("search", fmt.Sprintf("async: name watch '%s' for customer %s found", watches[i].customerName, watches[i].id))
sdns := s.TopSDNs(5, watches[i].customerName)
for i := range sdns {
if strings.EqualFold(sdns[i].SDNType, "individual") {
body, err = getCustomerBody(s, watches[i].id, sdns[i].EntityID, sdns[i].match, custRepo)
break
}
}
case watches[i].companyID != "":
s.logger.Log("search", fmt.Sprintf("async: watch %s for company %s found", watches[i].id, watches[i].companyID))
body, err = getCompanyBody(s, watches[i].id, watches[i].companyID, 1.0, companyRepo)
case watches[i].companyName != "":
s.logger.Log("search", fmt.Sprintf("async: name watch '%s' for company %s found", watches[i].companyName, watches[i].id))
sdns := s.TopSDNs(5, watches[i].companyName)
for i := range sdns {
if !strings.EqualFold(sdns[i].SDNType, "individual") {
body, err = getCompanyBody(s, watches[i].id, sdns[i].EntityID, sdns[i].match, companyRepo)
break
}
}
}
if err != nil {
s.logger.Log("search", fmt.Sprintf("async: watch %s: %v", watches[i].id, err))
continue // skip to next watch since we failed somewhere
}
// Send HTTP webhook
now := time.Now()
status, err := callWebhook(watches[i].id, body, watches[i].webhook, watches[i].authToken)
if err != nil {
s.logger.Log("search", fmt.Errorf("async: problem writing watch (%s) webhook status: %v", watches[i].id, err))
}
if err := webhookRepo.recordWebhook(watches[i].id, now, status); err != nil {
s.logger.Log("search", fmt.Errorf("async: problem writing watch (%s) webhook status: %v", watches[i].id, err))
}
}
}
}
}
}
// getCustomerBody returns the JSON encoded form of a given customer by their EntityID
func getCustomerBody(s *searcher, watchID string, customerID string, match float64, repo customerRepository) (*bytes.Buffer, error) {
customer, _ := getCustomerByID(customerID, s, repo)
if customer == nil {
return nil, fmt.Errorf("async: watch %s customer %v not found", watchID, customerID)
}
customer.Match = match
var buf bytes.Buffer
if err := json.NewEncoder(&buf).Encode(customer); err != nil {
return nil, fmt.Errorf("problem creating JSON for customer watch %s: %v", watchID, err)
}
return &buf, nil
}
// getCompanyBody returns the JSON encoded form of a given customer by their EntityID
func getCompanyBody(s *searcher, watchID string, companyID string, match float64, repo companyRepository) (*bytes.Buffer, error) {
company, _ := getCompanyByID(companyID, s, repo)
if company == nil {
return nil, fmt.Errorf("async: watch %s company %v not found", watchID, companyID)
}
company.Match = match
var buf bytes.Buffer
if err := json.NewEncoder(&buf).Encode(company); err != nil {
return nil, fmt.Errorf("problem creating JSON for company watch %s: %v", watchID, err)
}
return &buf, nil
}
|
[
"\"WEBHOOK_BATCH_SIZE\""
] |
[] |
[
"WEBHOOK_BATCH_SIZE"
] |
[]
|
["WEBHOOK_BATCH_SIZE"]
|
go
| 1 | 0 | |
test/cli_misc_test.go
|
package test
import (
"fmt"
"log"
"net/url"
"os"
"strings"
"testing"
"github.com/fnproject/cli/testharness"
)
func TestFnVersion(t *testing.T) {
t.Parallel()
tctx := testharness.Create(t)
res := tctx.Fn("version")
res.AssertSuccess()
}
// this is messy and nasty as we generate different potential values for FN_API_URL based on its type
func fnApiUrlVariations(t *testing.T) []string {
srcUrl := os.Getenv("FN_API_URL")
if srcUrl == "" {
srcUrl = "http://localhost:8080/"
}
if !strings.HasPrefix(srcUrl, "http:") && !strings.HasPrefix(srcUrl, "https:") {
srcUrl = "http://" + srcUrl
}
parsed, err := url.Parse(srcUrl)
if err != nil {
t.Fatalf("Invalid/unparsable TEST_API_URL %s: %s", srcUrl, err)
}
var cases []string
if parsed.Scheme == "http" {
cases = append(cases, "http://"+parsed.Host+parsed.Path)
cases = append(cases, parsed.Host+parsed.Path)
cases = append(cases, parsed.Host)
} else if parsed.Scheme == "https" {
cases = append(cases, "https://"+parsed.Host+parsed.Path)
cases = append(cases, "https://"+parsed.Host)
} else {
log.Fatalf("Unsupported url scheme for testing %s: %s", srcUrl, parsed.Scheme)
}
return cases
}
func TestFnApiUrlSupportsDifferentFormats(t *testing.T) {
t.Parallel()
h := testharness.Create(t)
defer h.Cleanup()
for _, candidateUrl := range fnApiUrlVariations(t) {
h.WithEnv("FN_API_URL", candidateUrl)
h.Fn("list", "apps").AssertSuccess()
}
}
// Not sure what this test was intending (copied from old test.sh)
func TestSettingTimeoutWorks(t *testing.T) {
t.Parallel()
h := testharness.Create(t)
defer h.Cleanup()
h.WithEnv("FN_REGISTRY", "some_random_registry")
appName := h.NewAppName()
h.Fn("create", "app", appName).AssertSuccess()
res := h.Fn("list", "apps")
if !strings.Contains(res.Stdout, fmt.Sprintf("%s\n", appName)) {
t.Fatalf("Expecting list apps to contain app name , got %v", res)
}
funcName := h.NewFuncName(appName)
h.MkDir(funcName)
h.Cd(funcName)
h.WithMinimalFunctionSource()
h.FileAppend("func.yaml", "\ntimeout: 50\n\nschema_version: 20180708\n")
h.Fn("--verbose", "deploy", "--app", appName, "--local").AssertSuccess()
h.Fn("invoke", appName, funcName).AssertSuccess()
inspectRes := h.Fn("inspect", "fn", appName, funcName)
inspectRes.AssertSuccess()
if !strings.Contains(inspectRes.Stdout, `"timeout": 50`) {
t.Errorf("Expecting fn inspect to contain CPU %v", inspectRes)
}
h.Fn("create", "fn", appName, "another", "some_random_registry/"+funcName+":0.0.2").AssertSuccess()
h.Fn("invoke", appName, "another").AssertSuccess()
}
//Memory doesn't seem to get persisted/returned
func TestSettingMemoryWorks(t *testing.T) {
t.Parallel()
h := testharness.Create(t)
defer h.Cleanup()
h.WithEnv("FN_REGISTRY", "some_random_registry")
appName := h.NewAppName()
h.Fn("create", "app", appName).AssertSuccess()
res := h.Fn("list", "apps")
if !strings.Contains(res.Stdout, fmt.Sprintf("%s\n", appName)) {
t.Fatalf("Expecting list apps to contain app name , got %v", res)
}
funcName := h.NewFuncName(appName)
h.MkDir(funcName)
h.Cd(funcName)
h.WithMinimalFunctionSource()
h.FileAppend("func.yaml", "memory: 100\nschema_version: 20180708\n")
h.Fn("--verbose", "deploy", "--app", appName, "--local").AssertSuccess()
h.Fn("invoke", appName, funcName).AssertSuccess()
inspectRes := h.Fn("inspect", "fn", appName, funcName)
inspectRes.AssertSuccess()
if !strings.Contains(inspectRes.Stdout, `"memory": 100`) {
t.Errorf("Expecting fn inspect to contain CPU %v", inspectRes)
}
h.Fn("create", "fn", appName, "another", "some_random_registry/"+funcName+":0.0.2").AssertSuccess()
h.Fn("invoke", appName, "another").AssertSuccess()
}
func TestAllMainCommandsExist(t *testing.T) {
t.Parallel()
h := testharness.Create(t)
defer h.Cleanup()
testCommands := []string{
"build",
"bump",
"call",
"create",
"delete",
"deploy",
"get",
"init",
"inspect",
"list",
"push",
"run",
"set",
"start",
"test",
"unset",
"update",
"use",
"version",
}
for _, cmd := range testCommands {
res := h.Fn(cmd)
if strings.Contains(res.Stderr, "command not found") {
t.Errorf("Expected command %s to exist", cmd)
}
}
}
func TestAppYamlDeploy(t *testing.T) {
t.Parallel()
h := testharness.Create(t)
defer h.Cleanup()
appName := h.NewAppName()
fnName := h.NewFuncName(appName)
h.WithFile("app.yaml", fmt.Sprintf(`name: %s`, appName), 0644)
h.MkDir(fnName)
h.Cd(fnName)
h.WithMinimalFunctionSource()
h.Cd("")
h.Fn("deploy", "--all", "--local").AssertSuccess()
h.Fn("invoke", appName, fnName).AssertSuccess()
h.Fn("deploy", "--all", "--local").AssertSuccess()
h.Fn("invoke", appName, fnName).AssertSuccess()
}
func TestBump(t *testing.T) {
t.Parallel()
h := testharness.Create(t)
defer h.Cleanup()
expectFuncYamlVersion := func(v string) {
funcYaml := h.GetFile("func.yaml")
if !strings.Contains(funcYaml, fmt.Sprintf("version: %s", v)) {
t.Fatalf("Exepected version to be %s but got %s", v, funcYaml)
}
}
appName := h.NewAppName()
fnName := h.NewFuncName(appName)
h.MkDir(fnName)
h.Cd(fnName)
h.WithMinimalFunctionSource()
expectFuncYamlVersion("0.0.1")
h.Fn("bump").AssertSuccess()
expectFuncYamlVersion("0.0.2")
h.Fn("bump", "--major").AssertSuccess()
expectFuncYamlVersion("1.0.0")
h.Fn("bump").AssertSuccess()
expectFuncYamlVersion("1.0.1")
h.Fn("bump", "--minor").AssertSuccess()
expectFuncYamlVersion("1.1.0")
h.Fn("deploy", "--local", "--app", appName).AssertSuccess()
expectFuncYamlVersion("1.1.1")
h.Fn("i", "function", appName, fnName).AssertSuccess().AssertStdoutContains(fmt.Sprintf(`%s:1.1.1`, fnName))
h.Fn("deploy", "--local", "--no-bump", "--app", appName).AssertSuccess()
expectFuncYamlVersion("1.1.1")
h.Fn("i", "function", appName, fnName).AssertSuccess().AssertStdoutContains(fmt.Sprintf(`%s:1.1.1`, fnName))
}
|
[
"\"FN_API_URL\""
] |
[] |
[
"FN_API_URL"
] |
[]
|
["FN_API_URL"]
|
go
| 1 | 0 | |
marathon-java/marathon-java-driver/src/test/java/net/sourceforge/marathon/javadriver/cmdlinelauncher/LaunchWebStartTest.java
|
/*******************************************************************************
* Copyright 2016 Jalian Systems Pvt. Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package net.sourceforge.marathon.javadriver.cmdlinelauncher;
import java.io.File;
import java.util.List;
import java.util.function.Function;
import org.openqa.selenium.By;
import org.openqa.selenium.NoSuchWindowException;
import org.openqa.selenium.Platform;
import org.openqa.selenium.WebDriver;
import org.openqa.selenium.WebElement;
import org.openqa.selenium.os.CommandLine;
import org.openqa.selenium.support.ui.WebDriverWait;
import org.testng.AssertJUnit;
import org.testng.SkipException;
import org.testng.annotations.AfterMethod;
import org.testng.annotations.Test;
import net.sourceforge.marathon.javadriver.JavaDriver;
import net.sourceforge.marathon.javadriver.JavaProfile;
import net.sourceforge.marathon.javadriver.JavaProfile.LaunchMode;
@Test
public class LaunchWebStartTest {
private JavaDriver driver;
@SuppressWarnings("unused")
private void createDriver(String title) {
if (true) {
throw new SkipException("WebStartTest skipped due to unreachable application online.");
}
JavaProfile profile = new JavaProfile(LaunchMode.JAVA_WEBSTART);
File f = findFile();
profile.setJNLPPath(f.getAbsolutePath());
profile.setStartWindowTitle(title);
driver = new JavaDriver(profile);
}
@AfterMethod
public void quitDriver() {
if (driver != null) {
driver.quit();
}
}
public void getDriverWithProfile() throws Throwable {
createDriver("SwingSet3");
new WebDriverWait(driver, 60).until(new Function<WebDriver, Boolean>() {
@Override
public Boolean apply(WebDriver driver) {
try {
driver.switchTo().window("SwingSet3");
} catch (NoSuchWindowException e) {
System.out.println("LaunchWebStartTest.get_driver_with_profile(): window not found");
System.out.println(driver.getTitle());
return false;
}
List<WebElement> buttons = driver.findElements(By.cssSelector("toggle-button"));
return buttons.size() > 0;
}
});
List<WebElement> buttons = driver.findElements(By.cssSelector("toggle-button"));
AssertJUnit.assertTrue(buttons.size() > 0);
buttons.get(3).click();
buttons.get(0).click();
}
public void getDriverWithProfileUsingRegexForTitle() throws Throwable {
createDriver("/S.*3");
new WebDriverWait(driver, 60).until(new Function<WebDriver, Boolean>() {
@Override
public Boolean apply(WebDriver driver) {
try {
driver.switchTo().window("SwingSet3");
} catch (NoSuchWindowException e) {
System.out.println("LaunchWebStartTest.get_driver_with_profile(): window not found");
System.out.println(driver.getTitle());
return false;
}
List<WebElement> buttons = driver.findElements(By.cssSelector("toggle-button"));
return buttons.size() > 0;
}
});
List<WebElement> buttons = driver.findElements(By.cssSelector("toggle-button"));
AssertJUnit.assertTrue(buttons.size() > 0);
buttons.get(3).click();
buttons.get(0).click();
}
private static File findFile() {
File f = new File(System.getProperty("SwingSet3.jnlp", "../marathon-test-helpers/swingset3/SwingSet3.jnlp"));
if (f.exists()) {
return f;
}
return null;
}
public void checkForArguments() throws Throwable {
JavaProfile profile = new JavaProfile(LaunchMode.JAVA_WEBSTART);
File f = findFile();
profile.setJNLPPath(f.getAbsolutePath());
profile.setStartWindowTitle("SwingSet3");
profile.addVMArgument("-Dhello=world");
CommandLine commandLine = profile.getCommandLine();
System.out.println(commandLine);
AssertJUnit.assertTrue(commandLine.toString().matches(".*JAVA_TOOL_OPTIONS=.*-Dhello=world.*"));
}
public void checkGivenExecutableIsUsed() throws Throwable {
JavaProfile profile = new JavaProfile(LaunchMode.JAVA_WEBSTART);
profile.setJavaCommand("java");
File f = findFile();
profile.setJNLPPath(f.getAbsolutePath());
profile.setStartWindowTitle("SwingSet3");
profile.addVMArgument("-Dhello=world");
CommandLine commandLine = profile.getCommandLine();
String exec = findExecutableOnPath("java");
AssertJUnit.assertTrue(commandLine.toString(), commandLine.toString().contains(exec));
}
public static String findExecutableOnPath(String name) {
if (!Platform.getCurrent().is(Platform.WINDOWS) || name.endsWith(".exe") || name.endsWith(".bat")) {
return getPathTo(name);
}
String path;
path = getPathTo(name + ".exe");
if (path != null)
return path;
path = getPathTo(name + ".cmd");
if (path != null)
return path;
return getPathTo(name + ".bat");
}
public static String getPathTo(String name) throws AssertionError {
String path = System.getenv("Path");
if (path == null)
path = System.getenv("PATH");
for (String dirname : path.split(File.pathSeparator)) {
File file = new File(dirname, name);
if (file.isFile() && file.canExecute()) {
return file.getAbsolutePath();
}
}
return null;
}
public static void main(String[] args) throws InterruptedException {
JavaProfile profile = new JavaProfile(LaunchMode.JAVA_WEBSTART);
File f = findFile();
profile.setJNLPPath(f.getAbsolutePath());
profile.setStartWindowTitle("SwingSet3");
CommandLine commandLine = profile.getCommandLine();
commandLine.copyOutputTo(System.err);
System.out.println(commandLine);
commandLine.execute();
}
}
|
[
"\"Path\"",
"\"PATH\""
] |
[] |
[
"PATH",
"Path"
] |
[]
|
["PATH", "Path"]
|
java
| 2 | 0 | |
dot_commands/install.py
|
import os
import subprocess
path = os.getcwd()
def updatePermissions(filename):
command = ["chmod", "+x", filename]
try:
command = subprocess.Popen(command, stdout=subprocess.PIPE)
output = command.communicate()[0]
print output
except OSError as e:
print e,
def generateBin():
commands = ["info", "install", "setup"]
if not os.path.isdir(os.path.join(path, "bin")):
os.makedirs(path+"/bin")
for action in commands:
filename = open(path+"/bin/."+action, "w")
filename.write("python " + path + "/main.py " + action + " $*")
filename.close()
print "updating permissions for " + action
updatePermissions(path+"/bin/."+action)
def updatePath():
home = os.environ["HOME"]
bashprofile = open(os.path.join(home,".bash_profile"), 'a')
bashprofile.write("\nexport PATH=\"" + path + "/bin:$PATH\"")
bashprofile.close()
print "Generating binaries"
generateBin()
print "Updating Path"
updatePath()
|
[] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
python
| 1 | 0 | |
settings/__init__.py
|
""" core app configuration """
import os
environment = os.getenv('LAMBTASTIC_ENV', 'development')
if environment == 'testing':
from .testing import *
elif environment == 'production':
from .production import *
else:
from .development import *
|
[] |
[] |
[
"LAMBTASTIC_ENV"
] |
[]
|
["LAMBTASTIC_ENV"]
|
python
| 1 | 0 | |
autoscale_cloudroast/test_repo/autoscale/fixtures.py
|
"""
:summary: Base Classes for Autoscale Test Suites (Collections of Test Cases)
"""
from cafe.drivers.unittest.fixtures import BaseTestFixture
from autoscale.behaviors import AutoscaleBehaviors
from cloudcafe.common.resources import ResourcePool
from cloudcafe.common.tools.datagen import rand_name
from autoscale.config import AutoscaleConfig
from cloudcafe.auth.config import UserAuthConfig, UserConfig
from autoscale.client import AutoscalingAPIClient
from cloudcafe.auth.provider import AuthProvider
from cloudcafe.compute.servers_api.client import ServersClient
from autoscale.otter_constants import OtterConstants
import os
from time import sleep
class AutoscaleFixture(BaseTestFixture):
"""
:summary: Fixture for an Autoscale test.
"""
@classmethod
def setUpClass(cls):
"""
Initialize autoscale configs, behaviors and client
"""
super(AutoscaleFixture, cls).setUpClass()
cls.resources = ResourcePool()
cls.autoscale_config = AutoscaleConfig()
cls.endpoint_config = UserAuthConfig()
user_config = UserConfig()
access_data = AuthProvider.get_access_data(cls.endpoint_config,
user_config)
server_service = access_data.get_service(
cls.autoscale_config.server_endpoint_name)
server_url = server_service.get_endpoint(
cls.autoscale_config.region).public_url
cls.tenant_id = cls.autoscale_config.tenant_id
cls.otter_endpoint = cls.autoscale_config.server_endpoint
env = os.environ['OSTNG_CONFIG_FILE']
if ('prod.ord' in env.lower()) or ('prod.dfw' in env.lower()):
autoscale_service = access_data.get_service(
cls.autoscale_config.autoscale_endpoint_name)
cls.url = autoscale_service.get_endpoint(
cls.autoscale_config.region).public_url
else:
cls.url = str(cls.otter_endpoint) + '/' + str(cls.tenant_id)
cls.autoscale_client = AutoscalingAPIClient(
cls.url, access_data.token.id_,
'json', 'json')
cls.server_client = ServersClient(
server_url, access_data.token.id_,
'json', 'json')
cls.autoscale_behaviors = AutoscaleBehaviors(cls.autoscale_config,
cls.autoscale_client)
cls.gc_name = cls.autoscale_config.gc_name
cls.gc_cooldown = int(cls.autoscale_config.gc_cooldown)
cls.gc_min_entities = int(cls.autoscale_config.gc_min_entities)
cls.gc_min_entities_alt = int(cls.autoscale_config.gc_min_entities_alt)
cls.gc_max_entities = int(cls.autoscale_config.gc_max_entities)
cls.lc_name = cls.autoscale_config.lc_name
cls.lc_flavor_ref = cls.autoscale_config.lc_flavor_ref
cls.lc_image_ref = cls.autoscale_config.lc_image_ref
cls.lc_image_ref_alt = cls.autoscale_config.lc_image_ref_alt
cls.sp_name = rand_name(cls.autoscale_config.sp_name)
cls.sp_cooldown = int(cls.autoscale_config.sp_cooldown)
cls.sp_change = int(cls.autoscale_config.sp_change)
cls.sp_change_percent = int(cls.autoscale_config.sp_change_percent)
cls.sp_desired_capacity = int(cls.autoscale_config.sp_desired_capacity)
cls.sp_policy_type = cls.autoscale_config.sp_policy_type
cls.upd_sp_change = int(cls.autoscale_config.upd_sp_change)
cls.lc_load_balancers = cls.autoscale_config.lc_load_balancers
cls.sp_list = cls.autoscale_config.sp_list
cls.wb_name = rand_name(cls.autoscale_config.wb_name)
cls.interval_time = int(cls.autoscale_config.interval_time)
cls.timeout = int(cls.autoscale_config.timeout)
cls.scheduler_interval = OtterConstants.SCHEDULER_INTERVAL
cls.scheduler_batch = OtterConstants.SCHEDULER_BATCH
cls.max_maxentities = OtterConstants.MAX_MAXENTITIES
cls.max_cooldown = OtterConstants.MAX_COOLDOWN
def validate_headers(self, headers):
"""
Module to validate headers
"""
self.assertTrue(headers is not None,
msg='No headers returned')
if headers.get('transfer-encoding'):
self.assertEqual(headers['transfer-encoding'], 'chunked',
msg='Response header transfer-encoding is not chunked')
self.assertTrue(headers['server'] is not None,
msg='Response header server is not available')
self.assertEquals(headers['content-type'], 'application/json',
msg='Response header content-type is None')
self.assertTrue(headers['date'] is not None,
msg='Time not included')
self.assertTrue(headers['x-response-id'] is not None,
msg='No x-response-id')
def empty_scaling_group(self, group):
"""
Given the group, updates the group to be of 0 minentities and maxentities.
"""
self.autoscale_client.update_group_config(
group_id=group.id,
name="delete_me_please",
cooldown=0,
min_entities=0,
max_entities=0,
metadata={})
def verify_group_state(self, group_id, desired_capacity):
"""
Given the group id and the expected desired capacity,
asserts if the desired capacity is being met by the scaling group
through the list group status call
"""
group_state_response = self.autoscale_client.list_status_entities_sgroups(
group_id)
self.assertEquals(group_state_response.status_code, 200)
group_state = group_state_response.entity
self.assertEquals(
group_state.pendingCapacity + group_state.activeCapacity,
desired_capacity,
msg='Active + Pending servers ({0}) != ({1}) minentities on the group {2}'
.format((group_state.pendingCapacity + group_state.activeCapacity),
desired_capacity, group_id))
self.assertEquals(group_state.desiredCapacity, desired_capacity,
msg='Desired capacity ({0}) != ({1}) minentities on the group {2}'
.format(group_state.desiredCapacity, desired_capacity, group_id))
def assert_get_policy(self, created_policy, get_policy, args=False):
"""
Given the newly created policy dict and the response object from the get
policy call, asserts all the attributes are equal. args can be at_style,
cron_style or maas
"""
self.assertEquals(
get_policy.id, created_policy['id'],
msg='Policy Id upon get is not as when created')
self.assertEquals(
get_policy.links, created_policy['links'],
msg='Links for the scaling policy upon get is not as when created')
self.assertEquals(
get_policy.name, created_policy['name'],
msg='Name of the policy upon get is not as when was created')
self.assertEquals(
get_policy.cooldown, created_policy['cooldown'],
msg='Cooldown of the policy upon get != when created')
if created_policy.get('change'):
self.assertEquals(
get_policy.change, created_policy['change'],
msg='Change in the policy is not as expected')
elif created_policy.get('change_percent'):
self.assertEquals(
get_policy.changePercent, created_policy['change_percent'],
msg='Change percent in the policy is not as expected')
elif created_policy.get('desired_capacity'):
self.assertEquals(
get_policy.desiredCapacity, created_policy['desired_capacity'],
msg='Desired capacity in the policy is not as expected')
else:
self.fail(msg='Policy does not have a change type')
if args is 'at_style':
self.assertEquals(get_policy.args.at, created_policy['schedule_value'],
msg='At style schedule policy value not as expected')
if args is 'cron_style':
self.assertEquals(get_policy.args.cron, created_policy['schedule_value'],
msg='Cron style schedule policy value not as expected')
def create_default_at_style_policy_wait_for_execution(self, group_id, delay=3,
change=None,
scale_down=None):
"""
Creates an at style scale up/scale down policy to execute at utcnow() + delay and waits
the scheduler config seconds + delay, so that the policy is picked
"""
if change is None:
change = self.sp_change
if scale_down is True:
change = -change
self.autoscale_behaviors.create_schedule_policy_given(
group_id=group_id,
sp_cooldown=0,
sp_change=change,
schedule_at=self.autoscale_behaviors.get_time_in_utc(delay))
sleep(self.scheduler_interval + delay)
@classmethod
def tearDownClass(cls):
"""
Deletes the added resources
"""
super(AutoscaleFixture, cls).tearDownClass()
cls.resources.release()
class ScalingGroupFixture(AutoscaleFixture):
"""
:summary: Creates a scaling group using the default from
the test data
"""
@classmethod
def setUpClass(cls, gc_name=None, gc_cooldown=None, gc_min_entities=None,
gc_max_entities=None, gc_metadata=None, lc_name=None,
lc_image_ref=None, lc_flavor_ref=None,
lc_personality=None, lc_metadata=None,
lc_disk_config=None, lc_networks=None,
lc_load_balancers=None):
"""
Creates a scaling group with config values
"""
super(ScalingGroupFixture, cls).setUpClass()
if gc_name is None:
gc_name = rand_name('test_sgroup_fixt_')
if gc_cooldown is None:
gc_cooldown = cls.gc_cooldown
if gc_min_entities is None:
gc_min_entities = cls.gc_min_entities
if lc_name is None:
lc_name = rand_name('test_sg_fixt_srv')
if lc_flavor_ref is None:
lc_flavor_ref = cls.lc_flavor_ref
if lc_image_ref is None:
lc_image_ref = cls.lc_image_ref
cls.create_group_response = cls.autoscale_client.\
create_scaling_group(
gc_name, gc_cooldown,
gc_min_entities,
lc_name, lc_image_ref,
lc_flavor_ref,
gc_max_entities=gc_max_entities,
gc_metadata=gc_metadata,
lc_personality=lc_personality,
lc_metadata=lc_metadata,
lc_disk_config=lc_disk_config,
lc_networks=lc_networks,
lc_load_balancers=lc_load_balancers)
cls.group = cls.create_group_response.entity
cls.resources.add(cls.group.id,
cls.autoscale_client.delete_scaling_group)
@classmethod
def tearDownClass(cls):
"""
Deletes the scaling group
"""
super(ScalingGroupFixture, cls).tearDownClass()
class ScalingGroupPolicyFixture(ScalingGroupFixture):
"""
:summary: Creates a scaling group with policy using the default from
the test data
"""
@classmethod
def setUpClass(cls, name=None, cooldown=None, change=None,
change_percent=None, desired_capacity=None,
policy_type=None):
"""
Creates a scaliing policy
"""
super(ScalingGroupPolicyFixture, cls).setUpClass()
if name is None:
name = cls.sp_name
if cooldown is None:
cooldown = cls.sp_cooldown
if policy_type is None:
policy_type = cls.sp_policy_type
if change:
cls.create_policy_response = cls.autoscale_client.create_policy(
group_id=cls.group.id,
name=name, cooldown=cooldown, change=change, policy_type=policy_type)
elif change_percent:
cls.create_policy_response = cls.autoscale_client.create_policy(
group_id=cls.group.id,
name=name, cooldown=cooldown, change_percent=change_percent,
policy_type=policy_type)
elif desired_capacity:
cls.create_policy_response = cls.autoscale_client.create_policy(
group_id=cls.group.id,
name=name, cooldown=cooldown, desired_capacity=desired_capacity,
policy_type=policy_type)
else:
change = cls.sp_change
cls.create_policy_response = cls.autoscale_client.create_policy(
group_id=cls.group.id,
name=name, cooldown=cooldown, change=change, policy_type=policy_type)
cls.create_policy = cls.create_policy_response.entity
cls.policy = cls.autoscale_behaviors.get_policy_properties(
cls.create_policy)
@classmethod
def tearDownClass(cls):
"""
Deletes the scaling policy
"""
super(ScalingGroupPolicyFixture, cls).tearDownClass()
class ScalingGroupWebhookFixture(ScalingGroupPolicyFixture):
"""
:summary: Creates a scaling group with a scaling policy
and webhook using the default from the test data
"""
@classmethod
def setUpClass(cls, webhook=None, metadata=None):
"""
Create a webhook
"""
super(ScalingGroupWebhookFixture, cls).setUpClass()
if webhook is None:
webhook = cls.wb_name
cls.create_webhook_response = cls.autoscale_client.create_webhook(
group_id=cls.group.id,
policy_id=cls.policy['id'],
name=webhook,
metadata=metadata)
cls.create_webhook = cls.create_webhook_response.entity
cls.webhook = cls.autoscale_behaviors.get_webhooks_properties(
cls.create_webhook)
@classmethod
def tearDownClass(cls):
"""
Delete the webhook
"""
super(ScalingGroupWebhookFixture, cls).tearDownClass()
|
[] |
[] |
[
"OSTNG_CONFIG_FILE"
] |
[]
|
["OSTNG_CONFIG_FILE"]
|
python
| 1 | 0 | |
pkg/resourcemanager/config/env.go
|
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
package config
import (
"fmt"
"log"
"os"
"strconv"
"github.com/Azure/azure-service-operator/pkg/helpers"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/gobuffalo/envy"
)
type ConfigRequirementType int
const (
RequireClientID ConfigRequirementType = iota
RequireClientSecret
RequireTenantID
RequireSubscriptionID
)
// ParseEnvironment loads a sibling `.env` file then looks through all environment
// variables to set global configuration.
func ParseEnvironment() error {
azcloud := os.Getenv("AZURE_CLOUD_ENV")
envy.Load()
if azcloud == "" {
azcloud = "AzurePublicCloud"
}
allowed := []string{
"AzurePublicCloud",
"AzureUSGovernmentCloud",
"AzureChinaCloud",
"AzureGermanCloud",
}
if !helpers.ContainsString(allowed, azcloud) {
return fmt.Errorf("Invalid Cloud chosen: AZURE_CLOUD_ENV set to '%s'", azcloud)
}
cloudName = azcloud
azureEnv, _ := azure.EnvironmentFromName(azcloud) // shouldn't fail
authorizationServerURL = azureEnv.ActiveDirectoryEndpoint
baseURI = azureEnv.ResourceManagerEndpoint // BaseURI()
locationDefault = envy.Get("AZURE_LOCATION_DEFAULT", "southcentralus") // DefaultLocation()
useDeviceFlow = ParseBoolFromEnvironment("AZURE_USE_DEVICEFLOW") // UseDeviceFlow()
useMI = ParseBoolFromEnvironment("AZURE_USE_MI") // UseMI()
keepResources = ParseBoolFromEnvironment("AZURE_SAMPLES_KEEP_RESOURCES") // KeepResources()
operatorKeyvault = envy.Get("AZURE_OPERATOR_KEYVAULT", "") // operatorKeyvault()
testResourcePrefix = envy.Get("TEST_RESOURCE_PREFIX", "t-"+helpers.RandomString(6))
var err error
for _, requirement := range GetRequiredConfigs() {
switch requirement {
case RequireClientID:
clientID, err = envy.MustGet("AZURE_CLIENT_ID") // ClientID()
if err != nil {
return fmt.Errorf("expected env vars not provided (AZURE_CLIENT_ID): %s\n", err)
}
case RequireClientSecret:
clientSecret, err = envy.MustGet("AZURE_CLIENT_SECRET") // ClientSecret()
if err != nil {
return fmt.Errorf("expected env vars not provided (AZURE_CLIENT_SECRET): %s\n", err)
}
case RequireTenantID:
tenantID, err = envy.MustGet("AZURE_TENANT_ID") // TenantID()
if err != nil {
return fmt.Errorf("expected env vars not provided (AZURE_TENANT_ID): %s\n", err)
}
case RequireSubscriptionID:
subscriptionID, err = envy.MustGet("AZURE_SUBSCRIPTION_ID") // SubscriptionID()
if err != nil {
return fmt.Errorf("expected env vars not provided (AZURE_SUBSCRIPTION_ID): %s\n", err)
}
}
}
return nil
}
func GetRequiredConfigs() []ConfigRequirementType {
if useDeviceFlow {
// Device flow required Configs
return []ConfigRequirementType{RequireClientID, RequireTenantID, RequireSubscriptionID}
}
if useMI {
// Managed Service Identity required Configs
return []ConfigRequirementType{RequireTenantID, RequireSubscriptionID}
}
// Default required Configs
return []ConfigRequirementType{RequireClientID, RequireClientSecret, RequireTenantID, RequireSubscriptionID}
}
func ParseBoolFromEnvironment(variable string) bool {
value, err := strconv.ParseBool(envy.Get(variable, "0"))
if err != nil {
log.Printf("WARNING: invalid input value specified for bool %v: \"%v\", disabling\n", variable, value)
value = false
}
return value
}
|
[
"\"AZURE_CLOUD_ENV\""
] |
[] |
[
"AZURE_CLOUD_ENV"
] |
[]
|
["AZURE_CLOUD_ENV"]
|
go
| 1 | 0 | |
bin/substrates.py
|
# substrates Tab
import os, math
from pathlib import Path
from shutil import copyfile
from ipywidgets import Layout, Label, Text, Checkbox, Button, BoundedIntText, HBox, VBox, Box, \
FloatText, Dropdown, interactive
import matplotlib.pyplot as plt
from matplotlib.colors import BoundaryNorm
from matplotlib.ticker import MaxNLocator
from matplotlib.collections import LineCollection
from matplotlib.patches import Circle, Ellipse, Rectangle
from matplotlib.collections import PatchCollection
import matplotlib.colors as mplc
from collections import deque
import numpy as np
import scipy.io
import xml.etree.ElementTree as ET # https://docs.python.org/2/library/xml.etree.elementtree.html
import glob
import platform
import zipfile
from debug import debug_view
import warnings
hublib_flag = True
if platform.system() != 'Windows':
try:
# print("Trying to import hublib.ui")
from hublib.ui import Download
except:
hublib_flag = False
else:
hublib_flag = False
#warnings.warn(message, mplDeprecation, stacklevel=1)
warnings.filterwarnings("ignore")
class SubstrateTab(object):
def __init__(self, fury_tab):
self.fury_tab = fury_tab
self.output_dir = '.'
# self.output_dir = 'tmpdir'
self.figsize_width_substrate = 15.0 # allow extra for colormap
self.figsize_height_substrate = 12.5
self.figsize_width_svg = 12.0
self.figsize_height_svg = 12.0
# self.fig = plt.figure(figsize=(7.2,6)) # this strange figsize results in a ~square contour plot
self.first_time = True
self.modulo = 1
self.use_defaults = True
self.svg_delta_t = 1
self.substrate_delta_t = 1
self.svg_frame = 1
self.substrate_frame = 1
self.customized_output_freq = False
self.therapy_activation_time = 1000000
self.max_svg_frame_pre_therapy = 1000000
self.max_substrate_frame_pre_therapy = 1000000
self.svg_xmin = 0
# Probably don't want to hardwire these if we allow changing the domain size
# self.svg_xrange = 2000
# self.xmin = -1000.
# self.xmax = 1000.
# self.ymin = -1000.
# self.ymax = 1000.
# self.x_range = 2000.
# self.y_range = 2000.
self.show_nucleus = False
self.show_edge = True
# initial value
self.field_index = 4
# self.field_index = self.mcds_field.value + 4
self.skip_cb = False
# define dummy size of mesh (set in the tool's primary module)
self.numx = 0
self.numy = 0
self.title_str = ''
tab_height = '600px'
tab_height = '500px'
constWidth = '180px'
constWidth2 = '150px'
tab_layout = Layout(width='900px', # border='2px solid black',
height=tab_height, ) #overflow_y='scroll')
max_frames = 1
# self.mcds_plot = interactive(self.plot_substrate, frame=(0, max_frames), continuous_update=False)
# self.i_plot = interactive(self.plot_plots, frame=(0, max_frames), continuous_update=False)
self.i_plot = interactive(self.plot_substrate, frame=(0, max_frames), continuous_update=False)
# "plot_size" controls the size of the tab height, not the plot (rf. figsize for that)
# NOTE: the Substrates Plot tab has an extra row of widgets at the top of it (cf. Cell Plots tab)
svg_plot_size = '700px'
svg_plot_size = '600px'
svg_plot_size = '700px'
svg_plot_size = '900px'
self.i_plot.layout.width = svg_plot_size
self.i_plot.layout.height = svg_plot_size
self.fontsize = 20
# description='# cell frames',
self.max_frames = BoundedIntText(
min=0, max=99999, value=max_frames,
description='# frames',
layout=Layout(width='160px'),
)
self.max_frames.observe(self.update_max_frames)
# self.field_min_max = {'dummy': [0., 1., False]}
# NOTE: manually setting these for now (vs. parsing them out of data/initial.xml)
self.field_min_max = {'director signal':[0.,1.,False], 'cargo signal':[0.,1.,False] }
# hacky I know, but make a dict that's got (key,value) reversed from the dict in the Dropdown below
# self.field_dict = {0:'dummy'}
self.field_dict = {0:'director signal', 1:'cargo signal'}
self.mcds_field = Dropdown(
options={'director signal': 0, 'cargo signal':1},
disabled=True,
value=0,
# description='Field',
layout=Layout(width=constWidth)
)
# print("substrate __init__: self.mcds_field.value=",self.mcds_field.value)
# self.mcds_field.observe(self.mcds_field_cb)
self.mcds_field.observe(self.mcds_field_changed_cb)
self.field_cmap = Dropdown(
options=['viridis', 'jet', 'YlOrRd'],
value='YlOrRd',
disabled=True,
# description='Field',
layout=Layout(width=constWidth)
)
# self.field_cmap.observe(self.plot_substrate)
self.field_cmap.observe(self.mcds_field_cb)
self.cmap_fixed_toggle = Checkbox(
description='Fix',
disabled=True,
# layout=Layout(width=constWidth2),
)
self.cmap_fixed_toggle.observe(self.mcds_field_cb)
# def cmap_fixed_toggle_cb(b):
# # self.update()
# # self.field_min_max = {'oxygen': [0., 30.,True], 'glucose': [0., 1.,False]}
# field_name = self.field_dict[self.mcds_field.value]
# if (self.cmap_fixed_toggle.value):
# self.field_min_max[field_name][0] = self.cmap_min.value
# self.field_min_max[field_name][1] = self.cmap_max.value
# self.field_min_max[field_name][2] = True
# else:
# # self.field_min_max[field_name][0] = self.cmap_min.value
# # self.field_min_max[field_name][1] = self.cmap_max.value
# self.field_min_max[field_name][2] = False
# self.i_plot.update()
# self.cmap_fixed_toggle.observe(cmap_fixed_toggle_cb)
# self.save_min_max= Button(
# description='Save', #style={'description_width': 'initial'},
# button_style='success', # 'success', 'info', 'warning', 'danger' or ''
# tooltip='Save min/max for this substrate',
# disabled=True,
# layout=Layout(width='90px')
# )
# def save_min_max_cb(b):
# # field_name = self.mcds_field.options[]
# # field_name = next(key for key, value in self.mcds_field.options.items() if value == self.mcds_field.value)
# field_name = self.field_dict[self.mcds_field.value]
# # print(field_name)
# # self.field_min_max = {'oxygen': [0., 30.], 'glucose': [0., 1.], 'H+ ions': [0., 1.], 'ECM': [0., 1.], 'NP1': [0., 1.], 'NP2': [0., 1.]}
# self.field_min_max[field_name][0] = self.cmap_min.value
# self.field_min_max[field_name][1] = self.cmap_max.value
# # print(self.field_min_max)
# self.save_min_max.on_click(save_min_max_cb)
self.cmap_min = FloatText(
description='Min',
value=0,
step = 0.1,
disabled=True,
layout=Layout(width=constWidth2),
)
self.cmap_min.observe(self.mcds_field_cb)
self.cmap_max = FloatText(
description='Max',
value=38,
step = 0.1,
disabled=True,
layout=Layout(width=constWidth2),
)
self.cmap_max.observe(self.mcds_field_cb)
def cmap_fixed_toggle_cb(b):
field_name = self.field_dict[self.mcds_field.value]
# print(self.cmap_fixed_toggle.value)
if (self.cmap_fixed_toggle.value): # toggle on fixed range
self.cmap_min.disabled = False
self.cmap_max.disabled = False
self.field_min_max[field_name][0] = self.cmap_min.value
self.field_min_max[field_name][1] = self.cmap_max.value
self.field_min_max[field_name][2] = True
# self.save_min_max.disabled = False
else: # toggle off fixed range
self.cmap_min.disabled = True
self.cmap_max.disabled = True
self.field_min_max[field_name][2] = False
# self.save_min_max.disabled = True
# self.mcds_field_cb()
self.i_plot.update()
self.cmap_fixed_toggle.observe(cmap_fixed_toggle_cb)
field_cmap_row2 = HBox([self.field_cmap, self.cmap_fixed_toggle])
# field_cmap_row3 = HBox([self.save_min_max, self.cmap_min, self.cmap_max])
items_auto = [
# self.save_min_max, #layout=Layout(flex='3 1 auto', width='auto'),
self.cmap_min,
self.cmap_max,
]
box_layout = Layout(display='flex',
flex_flow='row',
align_items='stretch',
width='80%')
field_cmap_row3 = Box(children=items_auto, layout=box_layout)
# self.debug_str = Text(
# value='debug info',
# description='Debug:',
# disabled=True,
# layout=Layout(width='600px'), #constWidth = '180px'
# )
#---------------------
self.cell_nucleus_toggle = Checkbox(
description='nuclei',
disabled=False,
value = self.show_nucleus,
# layout=Layout(width=constWidth2),
)
def cell_nucleus_toggle_cb(b):
# self.update()
if (self.cell_nucleus_toggle.value):
self.show_nucleus = True
else:
self.show_nucleus = False
self.i_plot.update()
self.cell_nucleus_toggle.observe(cell_nucleus_toggle_cb)
#----
self.cell_edges_toggle = Checkbox(
description='edges',
disabled=False,
value=self.show_edge,
# layout=Layout(width=constWidth2),
)
def cell_edges_toggle_cb(b):
# self.update()
if (self.cell_edges_toggle.value):
self.show_edge = True
else:
self.show_edge = False
self.i_plot.update()
self.cell_edges_toggle.observe(cell_edges_toggle_cb)
self.cells_toggle = Checkbox(
description='Cells',
disabled=False,
value=True,
# layout=Layout(width=constWidth2),
)
def cells_toggle_cb(b):
# self.update()
self.i_plot.update()
if (self.cells_toggle.value):
self.cell_edges_toggle.disabled = False
self.cell_nucleus_toggle.disabled = False
else:
self.cell_edges_toggle.disabled = True
self.cell_nucleus_toggle.disabled = True
self.cells_toggle.observe(cells_toggle_cb)
#---------------------
self.substrates_toggle = Checkbox(
description='Substrates',
disabled=True,
value=False,
# layout=Layout(width=constWidth2),
)
def substrates_toggle_cb(b):
if (self.substrates_toggle.value): # seems bass-ackwards
self.cmap_fixed_toggle.disabled = False
self.cmap_min.disabled = False
self.cmap_max.disabled = False
self.mcds_field.disabled = False
self.field_cmap.disabled = False
else:
self.cmap_fixed_toggle.disabled = True
self.cmap_min.disabled = True
self.cmap_max.disabled = True
self.mcds_field.disabled = True
self.field_cmap.disabled = True
self.substrates_toggle.observe(substrates_toggle_cb)
self.grid_toggle = Checkbox(
description='grid',
disabled=False,
value=True,
# layout=Layout(width=constWidth2),
)
def grid_toggle_cb(b):
# self.update()
self.i_plot.update()
self.grid_toggle.observe(grid_toggle_cb)
# field_cmap_row3 = Box([self.save_min_max, self.cmap_min, self.cmap_max])
# mcds_tab = widgets.VBox([mcds_dir, mcds_plot, mcds_play], layout=tab_layout)
# mcds_params = VBox([self.mcds_field, field_cmap_row2, field_cmap_row3, self.max_frames]) # mcds_dir
# mcds_params = VBox([self.mcds_field, field_cmap_row2, field_cmap_row3,]) # mcds_dir
# self.tab = HBox([mcds_params, self.mcds_plot], layout=tab_layout)
help_label = Label('select slider: drag or left/right arrows')
# row1 = Box([help_label, Box( [self.max_frames, self.mcds_field, self.field_cmap], layout=Layout(border='0px solid black',
row1a = Box( [self.max_frames, self.mcds_field, self.field_cmap], layout=Layout(border='1px solid black',
width='50%',
height='',
align_items='stretch',
flex_direction='row',
display='flex'))
row1b = Box( [self.cells_toggle, self.cell_nucleus_toggle, self.cell_edges_toggle], layout=Layout(border='1px solid black',
width='50%',
height='',
align_items='stretch',
flex_direction='row',
display='flex'))
row1 = HBox( [row1a, Label('.....'), row1b])
row2a = Box([self.cmap_fixed_toggle, self.cmap_min, self.cmap_max], layout=Layout(border='1px solid black',
width='50%',
height='',
align_items='stretch',
flex_direction='row',
display='flex'))
# row2b = Box( [self.substrates_toggle, self.grid_toggle], layout=Layout(border='1px solid black',
row2b = Box( [self.substrates_toggle, ], layout=Layout(border='1px solid black',
width='50%',
height='',
align_items='stretch',
flex_direction='row',
display='flex'))
# row2 = HBox( [row2a, self.substrates_toggle, self.grid_toggle])
row2 = HBox( [row2a, Label('.....'), row2b])
if (hublib_flag):
self.fury_button= Button(
description="Send current frame's 3D data to Fury", #style={'description_width': 'initial'},
button_style='success', # 'success', 'info', 'warning', 'danger' or ''
tooltip='Click to send data to the Fury GPU server',
disabled=False,
layout=Layout(width='280px')
)
self.fury_feedback_str = Label(value='')
self.fury_reset_button= Button(
description="Reset Fury", #style={'description_width': 'initial'},
button_style='success', # 'success', 'info', 'warning', 'danger' or ''
disabled=False,
layout=Layout(width='180px')
)
def send_to_fury_cb(b):
self.fury_feedback_str.value = "working..."
session_dir = os.getenv('SESSIONDIR')
print('session_dir = ',session_dir)
session_id = os.getenv('SESSION')
print('session_id = ',session_id)
user_id = os.getenv('USER')
print('user_id = ',user_id)
fury_data_path_str = "/data/tools/shared/" + user_id + "/fury/" + session_id
# updated, based on email from Serge (1/19/21)
fury_data_path_str2 = "/srv/nanohub/data/tools/shared/" + user_id + "/fury/" + session_id
# dummy to test locally
# fury_data_path_str = "/tmp/" + user_id + "/fury"
print("fury_data_path_str = ",fury_data_path_str)
print("fury_data_path_str2 = ",fury_data_path_str2)
os.makedirs(fury_data_path_str, exist_ok=True)
# data_file = "output00000001_cells_physicell.mat"
# we need to copy 3(?) files (for any one frame)
mesh_file = "initial_mesh0.mat"
xml_file = "output%08d.xml" % self.svg_frame
data_file = "output%08d_cells_physicell.mat" % self.svg_frame
# from the app's root directory
# print("self.output_dir = ",self.output_dir)
# from_file = "tmpdir/" + data_file
from_file = self.output_dir + "/" + mesh_file
to_file = fury_data_path_str + "/" + mesh_file
copyfile(from_file, to_file)
from_file = self.output_dir + "/" + xml_file
to_file = fury_data_path_str + "/" + xml_file
copyfile(from_file, to_file)
from_file = self.output_dir + "/" + data_file
print("from: ",from_file)
to_file = fury_data_path_str + "/" + data_file
print("to: ",to_file)
copyfile(from_file, to_file)
# time.sleep(3)
file = Path(to_file)
while not file.exists():
time.sleep(2)
# copyfile("tmpdir/" + data_file, fury_data_path_str + "/" + "output00000001_cells_physicell.mat")
# Send signal to Fury that new data is ready: (folder, filename)
self.fury_tab.send_data(fury_data_path_str2, xml_file)
self.fury_feedback_str.value = ""
self.fury_button.on_click(send_to_fury_cb)
fury_row = HBox([self.fury_button, self.fury_feedback_str])
#--------
def fury_reset_cb(b):
self.fury_tab.reset()
self.fury_reset_button.on_click(fury_reset_cb)
# self.fury_button = Button(description='random_seed', disabled=True, layout=name_button_layout)
# param_name1.style.button_color = 'lightgreen'
self.download_button = Download('mcds.zip', style='warning', icon='cloud-download',
tooltip='Download data', cb=self.download_cb)
self.download_svg_button = Download('svg.zip', style='warning', icon='cloud-download',
tooltip='You need to allow pop-ups in your browser', cb=self.download_svg_cb)
download_row = HBox([self.download_button.w, self.download_svg_button.w, Label("Download all cell plots (browser must allow pop-ups).")])
# box_layout = Layout(border='0px solid')
controls_box = VBox([row1, row2]) # ,width='50%', layout=box_layout)
self.tab = VBox([controls_box, self.i_plot, fury_row, self.fury_reset_button, download_row])
# self.tab = VBox([controls_box, self.debug_str, self.i_plot, download_row])
else:
# self.tab = VBox([row1, row2])
self.tab = VBox([row1, row2, self.i_plot])
#---------------------------------------------------
def update_dropdown_fields(self, data_dir):
# print('update_dropdown_fields called --------')
self.output_dir = data_dir
tree = None
try:
fname = os.path.join(self.output_dir, "initial.xml")
tree = ET.parse(fname)
xml_root = tree.getroot()
except:
print("Cannot open ",fname," to read info, e.g., names of substrate fields.")
return
xml_root = tree.getroot()
self.field_min_max = {}
self.field_dict = {}
dropdown_options = {}
uep = xml_root.find('.//variables')
comment_str = ""
field_idx = 0
if (uep):
for elm in uep.findall('variable'):
# print("-----> ",elm.attrib['name'])
field_name = elm.attrib['name']
self.field_min_max[field_name] = [0., 1., False]
self.field_dict[field_idx] = field_name
dropdown_options[field_name] = field_idx
self.field_min_max[field_name][0] = 0
self.field_min_max[field_name][1] = 1
# self.field_min_max[field_name][0] = field_idx #rwh: helps debug
# self.field_min_max[field_name][1] = field_idx+1
self.field_min_max[field_name][2] = False
field_idx += 1
# constWidth = '180px'
# print('options=',dropdown_options)
# print(self.field_min_max) # debug
self.mcds_field.value = 0
self.mcds_field.options = dropdown_options
# self.mcds_field = Dropdown(
# # options={'oxygen': 0, 'glucose': 1},
# options=dropdown_options,
# value=0,
# # description='Field',
# layout=Layout(width=constWidth)
# )
# def update_max_frames_expected(self, value): # called when beginning an interactive Run
# self.max_frames.value = value # assumes naming scheme: "snapshot%08d.svg"
# self.mcds_plot.children[0].max = self.max_frames.value
#------------------------------------------------------------------------------
def update_params(self, config_tab, user_params_tab):
# xml_root.find(".//x_min").text = str(self.xmin.value)
# xml_root.find(".//x_max").text = str(self.xmax.value)
# xml_root.find(".//dx").text = str(self.xdelta.value)
# xml_root.find(".//y_min").text = str(self.ymin.value)
# xml_root.find(".//y_max").text = str(self.ymax.value)
# xml_root.find(".//dy").text = str(self.ydelta.value)
# xml_root.find(".//z_min").text = str(self.zmin.value)
# xml_root.find(".//z_max").text = str(self.zmax.value)
# xml_root.find(".//dz").text = str(self.zdelta.value)
self.xmin = config_tab.xmin.value
self.xmax = config_tab.xmax.value
self.x_range = self.xmax - self.xmin
self.svg_xrange = self.xmax - self.xmin
self.ymin = config_tab.ymin.value
self.ymax = config_tab.ymax.value
self.y_range = self.ymax - self.ymin
self.numx = math.ceil( (self.xmax - self.xmin) / config_tab.xdelta.value)
self.numy = math.ceil( (self.ymax - self.ymin) / config_tab.ydelta.value)
if (self.x_range > self.y_range):
ratio = self.y_range / self.x_range
self.figsize_width_substrate = 15.0 # allow extra for colormap
self.figsize_height_substrate = 12.5 * ratio
self.figsize_width_svg = 12.0
self.figsize_height_svg = 12.0 * ratio
else: # x < y
ratio = self.x_range / self.y_range
self.figsize_width_substrate = 15.0 * ratio
self.figsize_height_substrate = 12.5
self.figsize_width_svg = 12.0 * ratio
self.figsize_height_svg = 12.0
self.svg_flag = config_tab.toggle_svg.value
self.substrates_flag = config_tab.toggle_mcds.value
# print("substrates: update_params(): svg_flag, toggle=",self.svg_flag,config_tab.toggle_svg.value)
# print("substrates: update_params(): self.substrates_flag = ",self.substrates_flag)
self.svg_delta_t = config_tab.svg_interval.value
self.substrate_delta_t = config_tab.mcds_interval.value
self.modulo = int(self.substrate_delta_t / self.svg_delta_t)
# print("substrates: update_params(): modulo=",self.modulo)
if self.customized_output_freq:
# self.therapy_activation_time = user_params_tab.therapy_activation_time.value # NOTE: edit for user param name
# print("substrates: update_params(): therapy_activation_time=",self.therapy_activation_time)
self.max_svg_frame_pre_therapy = int(self.therapy_activation_time/self.svg_delta_t)
self.max_substrate_frame_pre_therapy = int(self.therapy_activation_time/self.substrate_delta_t)
#------------------------------------------------------------------------------
# def update(self, rdir):
# Called from driver module (e.g., pc4*.py) (among other places?)
def update(self, rdir=''):
# with debug_view:
# print("substrates: update rdir=", rdir)
# print("substrates: update rdir=", rdir)
if rdir:
self.output_dir = rdir
# print('update(): self.output_dir = ', self.output_dir)
if self.first_time:
# if True:
self.first_time = False
full_xml_filename = Path(os.path.join(self.output_dir, 'config.xml'))
# print("substrates: update(), config.xml = ",full_xml_filename)
# self.num_svgs = len(glob.glob(os.path.join(self.output_dir, 'snap*.svg')))
# self.num_substrates = len(glob.glob(os.path.join(self.output_dir, 'output*.xml')))
# print("substrates: num_svgs,num_substrates =",self.num_svgs,self.num_substrates)
# argh - no! If no files created, then denom = -1
# self.modulo = int((self.num_svgs - 1) / (self.num_substrates - 1))
# print("substrates: update(): modulo=",self.modulo)
if full_xml_filename.is_file():
tree = ET.parse(str(full_xml_filename)) # this file cannot be overwritten; part of tool distro
xml_root = tree.getroot()
self.svg_delta_t = float(xml_root.find(".//SVG//interval").text)
self.substrate_delta_t = float(xml_root.find(".//full_data//interval").text)
# print("substrates: svg,substrate delta_t values=",self.svg_delta_t,self.substrate_delta_t)
self.modulo = int(self.substrate_delta_t / self.svg_delta_t)
# print("substrates: update(): modulo=",self.modulo)
# all_files = sorted(glob.glob(os.path.join(self.output_dir, 'output*.xml'))) # if the substrates/MCDS
all_files = sorted(glob.glob(os.path.join(self.output_dir, 'snap*.svg'))) # if .svg
if len(all_files) > 0:
last_file = all_files[-1]
self.max_frames.value = int(last_file[-12:-4]) # assumes naming scheme: "snapshot%08d.svg"
else:
substrate_files = sorted(glob.glob(os.path.join(self.output_dir, 'output*.xml')))
if len(substrate_files) > 0:
last_file = substrate_files[-1]
self.max_frames.value = int(last_file[-12:-4])
def download_svg_cb(self):
file_str = os.path.join(self.output_dir, '*.svg')
# print('zip up all ',file_str)
with zipfile.ZipFile('svg.zip', 'w') as myzip:
for f in glob.glob(file_str):
myzip.write(f, os.path.basename(f)) # 2nd arg avoids full filename path in the archive
def download_cb(self):
file_xml = os.path.join(self.output_dir, '*.xml')
file_mat = os.path.join(self.output_dir, '*.mat')
# print('zip up all ',file_str)
with zipfile.ZipFile('mcds.zip', 'w') as myzip:
for f in glob.glob(file_xml):
myzip.write(f, os.path.basename(f)) # 2nd arg avoids full filename path in the archive
for f in glob.glob(file_mat):
myzip.write(f, os.path.basename(f))
def update_max_frames(self,_b):
self.i_plot.children[0].max = self.max_frames.value
# called if user selected different substrate in dropdown
def mcds_field_changed_cb(self, b):
# print("mcds_field_changed_cb: self.mcds_field.value=",self.mcds_field.value)
if (self.mcds_field.value == None):
return
self.field_index = self.mcds_field.value + 4
field_name = self.field_dict[self.mcds_field.value]
# print('mcds_field_changed_cb: field_name='+ field_name)
# print(self.field_min_max[field_name])
# self.debug_str.value = 'mcds_field_changed_cb: '+ field_name + str(self.field_min_max[field_name])
# self.debug_str.value = 'cb1: '+ str(self.field_min_max)
# BEWARE of these triggering the mcds_field_cb() callback! Hence, the "skip_cb"
self.skip_cb = True
self.cmap_min.value = self.field_min_max[field_name][0]
self.cmap_max.value = self.field_min_max[field_name][1]
self.cmap_fixed_toggle.value = bool(self.field_min_max[field_name][2])
self.skip_cb = False
self.i_plot.update()
# called if user provided different min/max values for colormap, or a different colormap
def mcds_field_cb(self, b):
if self.skip_cb:
return
self.field_index = self.mcds_field.value + 4
field_name = self.field_dict[self.mcds_field.value]
# print('mcds_field_cb: field_name='+ field_name)
# print('mcds_field_cb: '+ field_name)
self.field_min_max[field_name][0] = self.cmap_min.value
self.field_min_max[field_name][1] = self.cmap_max.value
self.field_min_max[field_name][2] = self.cmap_fixed_toggle.value
# print(self.field_min_max[field_name])
# self.debug_str.value = 'mcds_field_cb: ' + field_name + str(self.field_min_max[field_name])
# self.debug_str.value = 'cb2: '+ str(self.field_min_max)
# print('--- cb2: '+ str(self.field_min_max)) #rwh2
# self.cmap_fixed_toggle.value = self.field_min_max[field_name][2]
# field_name = self.mcds_field.options[self.mcds_field.value]
# self.cmap_min.value = self.field_min_max[field_name][0] # oxygen, etc
# self.cmap_max.value = self.field_min_max[field_name][1] # oxygen, etc
# self.field_index = self.mcds_field.value + 4
# print('field_index=',self.field_index)
self.i_plot.update()
#---------------------------------------------------------------------------
def circles(self, x, y, s, c='b', vmin=None, vmax=None, **kwargs):
"""
See https://gist.github.com/syrte/592a062c562cd2a98a83
Make a scatter plot of circles.
Similar to plt.scatter, but the size of circles are in data scale.
Parameters
----------
x, y : scalar or array_like, shape (n, )
Input data
s : scalar or array_like, shape (n, )
Radius of circles.
c : color or sequence of color, optional, default : 'b'
`c` can be a single color format string, or a sequence of color
specifications of length `N`, or a sequence of `N` numbers to be
mapped to colors using the `cmap` and `norm` specified via kwargs.
Note that `c` should not be a single numeric RGB or RGBA sequence
because that is indistinguishable from an array of values
to be colormapped. (If you insist, use `color` instead.)
`c` can be a 2-D array in which the rows are RGB or RGBA, however.
vmin, vmax : scalar, optional, default: None
`vmin` and `vmax` are used in conjunction with `norm` to normalize
luminance data. If either are `None`, the min and max of the
color array is used.
kwargs : `~matplotlib.collections.Collection` properties
Eg. alpha, edgecolor(ec), facecolor(fc), linewidth(lw), linestyle(ls),
norm, cmap, transform, etc.
Returns
-------
paths : `~matplotlib.collections.PathCollection`
Examples
--------
a = np.arange(11)
circles(a, a, s=a*0.2, c=a, alpha=0.5, ec='none')
plt.colorbar()
License
--------
This code is under [The BSD 3-Clause License]
(http://opensource.org/licenses/BSD-3-Clause)
"""
if np.isscalar(c):
kwargs.setdefault('color', c)
c = None
if 'fc' in kwargs:
kwargs.setdefault('facecolor', kwargs.pop('fc'))
if 'ec' in kwargs:
kwargs.setdefault('edgecolor', kwargs.pop('ec'))
if 'ls' in kwargs:
kwargs.setdefault('linestyle', kwargs.pop('ls'))
if 'lw' in kwargs:
kwargs.setdefault('linewidth', kwargs.pop('lw'))
# You can set `facecolor` with an array for each patch,
# while you can only set `facecolors` with a value for all.
zipped = np.broadcast(x, y, s)
patches = [Circle((x_, y_), s_)
for x_, y_, s_ in zipped]
collection = PatchCollection(patches, **kwargs)
if c is not None:
c = np.broadcast_to(c, zipped.shape).ravel()
collection.set_array(c)
collection.set_clim(vmin, vmax)
ax = plt.gca()
ax.add_collection(collection)
ax.autoscale_view()
# plt.draw_if_interactive()
if c is not None:
plt.sci(collection)
# return collection
#------------------------------------------------------------
# def plot_svg(self, frame, rdel=''):
def plot_svg(self, frame):
# global current_idx, axes_max
global current_frame
current_frame = frame
fname = "snapshot%08d.svg" % frame
full_fname = os.path.join(self.output_dir, fname)
# with debug_view:
# print("plot_svg:", full_fname)
# print("-- plot_svg:", full_fname)
if not os.path.isfile(full_fname):
print("Once output files are generated, click the slider.")
return
xlist = deque()
ylist = deque()
rlist = deque()
rgb_list = deque()
# print('\n---- ' + fname + ':')
# tree = ET.parse(fname)
tree = ET.parse(full_fname)
root = tree.getroot()
# print('--- root.tag ---')
# print(root.tag)
# print('--- root.attrib ---')
# print(root.attrib)
# print('--- child.tag, child.attrib ---')
numChildren = 0
for child in root:
# print(child.tag, child.attrib)
# print("keys=",child.attrib.keys())
if self.use_defaults and ('width' in child.attrib.keys()):
self.axes_max = float(child.attrib['width'])
# print("debug> found width --> axes_max =", axes_max)
if child.text and "Current time" in child.text:
svals = child.text.split()
# remove the ".00" on minutes
self.title_str += " cells: " + svals[2] + "d, " + svals[4] + "h, " + svals[7][:-3] + "m"
# self.cell_time_mins = int(svals[2])*1440 + int(svals[4])*60 + int(svals[7][:-3])
# self.title_str += " cells: " + str(self.cell_time_mins) + "m" # rwh
# print("width ",child.attrib['width'])
# print('attrib=',child.attrib)
# if (child.attrib['id'] == 'tissue'):
if ('id' in child.attrib.keys()):
# print('-------- found tissue!!')
tissue_parent = child
break
# print('------ search tissue')
cells_parent = None
for child in tissue_parent:
# print('attrib=',child.attrib)
if (child.attrib['id'] == 'cells'):
# print('-------- found cells, setting cells_parent')
cells_parent = child
break
numChildren += 1
num_cells = 0
# print('------ search cells')
for child in cells_parent:
# print(child.tag, child.attrib)
# print('attrib=',child.attrib)
for circle in child: # two circles in each child: outer + nucleus
# circle.attrib={'cx': '1085.59','cy': '1225.24','fill': 'rgb(159,159,96)','r': '6.67717','stroke': 'rgb(159,159,96)','stroke-width': '0.5'}
# print(' --- cx,cy=',circle.attrib['cx'],circle.attrib['cy'])
xval = float(circle.attrib['cx'])
# map SVG coords into comp domain
# xval = (xval-self.svg_xmin)/self.svg_xrange * self.x_range + self.xmin
xval = xval/self.x_range * self.x_range + self.xmin
s = circle.attrib['fill']
# print("s=",s)
# print("type(s)=",type(s))
if (s[0:3] == "rgb"): # if an rgb string, e.g. "rgb(175,175,80)"
rgb = list(map(int, s[4:-1].split(",")))
rgb[:] = [x / 255. for x in rgb]
else: # otherwise, must be a color name
rgb_tuple = mplc.to_rgb(mplc.cnames[s]) # a tuple
rgb = [x for x in rgb_tuple]
# test for bogus x,y locations (rwh TODO: use max of domain?)
too_large_val = 10000.
if (np.fabs(xval) > too_large_val):
print("bogus xval=", xval)
break
yval = float(circle.attrib['cy'])
# yval = (yval - self.svg_xmin)/self.svg_xrange * self.y_range + self.ymin
yval = yval/self.y_range * self.y_range + self.ymin
if (np.fabs(yval) > too_large_val):
print("bogus xval=", xval)
break
rval = float(circle.attrib['r'])
# if (rgb[0] > rgb[1]):
# print(num_cells,rgb, rval)
xlist.append(xval)
ylist.append(yval)
rlist.append(rval)
rgb_list.append(rgb)
# For .svg files with cells that *have* a nucleus, there will be a 2nd
if (not self.show_nucleus):
#if (not self.show_nucleus):
break
num_cells += 1
# if num_cells > 3: # for debugging
# print(fname,': num_cells= ',num_cells," --- debug exit.")
# sys.exit(1)
# break
# print(fname,': num_cells= ',num_cells)
xvals = np.array(xlist)
yvals = np.array(ylist)
rvals = np.array(rlist)
rgbs = np.array(rgb_list)
# print("xvals[0:5]=",xvals[0:5])
# print("rvals[0:5]=",rvals[0:5])
# print("rvals.min, max=",rvals.min(),rvals.max())
# rwh - is this where I change size of render window?? (YES - yipeee!)
# plt.figure(figsize=(6, 6))
# plt.cla()
# if (self.substrates_toggle.value):
self.title_str += " (" + str(num_cells) + " agents)"
# title_str = " (" + str(num_cells) + " agents)"
# else:
# mins= round(int(float(root.find(".//current_time").text))) # TODO: check units = mins
# hrs = int(mins/60)
# days = int(hrs/24)
# title_str = '%dd, %dh, %dm' % (int(days),(hrs%24), mins - (hrs*60))
plt.title(self.title_str)
plt.xlim(self.xmin, self.xmax)
plt.ylim(self.ymin, self.ymax)
# plt.xlim(axes_min,axes_max)
# plt.ylim(axes_min,axes_max)
# plt.scatter(xvals,yvals, s=rvals*scale_radius, c=rgbs)
# TODO: make figsize a function of plot_size? What about non-square plots?
# self.fig = plt.figure(figsize=(9, 9))
# axx = plt.axes([0, 0.05, 0.9, 0.9]) # left, bottom, width, height
# axx = fig.gca()
# print('fig.dpi=',fig.dpi) # = 72
# im = ax.imshow(f.reshape(100,100), interpolation='nearest', cmap=cmap, extent=[0,20, 0,20])
# ax.xlim(axes_min,axes_max)
# ax.ylim(axes_min,axes_max)
# convert radii to radii in pixels
# ax2 = self.fig.gca()
# N = len(xvals)
# rr_pix = (ax2.transData.transform(np.vstack([rvals, rvals]).T) -
# ax2.transData.transform(np.vstack([np.zeros(N), np.zeros(N)]).T))
# rpix, _ = rr_pix.T
# markers_size = (144. * rpix / self.fig.dpi)**2 # = (2*rpix / fig.dpi * 72)**2
# markers_size = markers_size/4000000.
# print('max=',markers_size.max())
#rwh - temp fix - Ah, error only occurs when "edges" is toggled on
if (self.show_edge):
try:
# plt.scatter(xvals,yvals, s=markers_size, c=rgbs, edgecolor='black', linewidth=0.5)
self.circles(xvals,yvals, s=rvals, color=rgbs, edgecolor='black', linewidth=0.5)
# cell_circles = self.circles(xvals,yvals, s=rvals, color=rgbs, edgecolor='black', linewidth=0.5)
# plt.sci(cell_circles)
except (ValueError):
pass
else:
# plt.scatter(xvals,yvals, s=markers_size, c=rgbs)
self.circles(xvals,yvals, s=rvals, color=rgbs)
# if (self.show_tracks):
# for key in self.trackd.keys():
# xtracks = self.trackd[key][:,0]
# ytracks = self.trackd[key][:,1]
# plt.plot(xtracks[0:frame],ytracks[0:frame], linewidth=5)
# plt.xlim(self.axes_min, self.axes_max)
# plt.ylim(self.axes_min, self.axes_max)
# ax.grid(False)
# axx.set_title(title_str)
# plt.title(title_str)
#---------------------------------------------------------------------------
# assume "frame" is cell frame #, unless Cells is togggled off, then it's the substrate frame #
# def plot_substrate(self, frame, grid):
def plot_substrate(self, frame):
# global current_idx, axes_max, gFileId, field_index
# print("plot_substrate(): frame*self.substrate_delta_t = ",frame*self.substrate_delta_t)
# print("plot_substrate(): frame*self.svg_delta_t = ",frame*self.svg_delta_t)
self.title_str = ''
# Recall:
# self.svg_delta_t = config_tab.svg_interval.value
# self.substrate_delta_t = config_tab.mcds_interval.value
# self.modulo = int(self.substrate_delta_t / self.svg_delta_t)
# self.therapy_activation_time = user_params_tab.therapy_activation_time.value
# print("plot_substrate(): pre_therapy: max svg, substrate frames = ",max_svg_frame_pre_therapy, max_substrate_frame_pre_therapy)
# Assume: # .svg files >= # substrate files
# if (self.cells_toggle.value):
# if (self.substrates_toggle.value and frame*self.substrate_delta_t <= self.svg_frame*self.svg_delta_t):
# if (self.substrates_toggle.value and (frame % self.modulo == 0)):
if (self.substrates_toggle.value):
# self.fig = plt.figure(figsize=(14, 15.6))
# self.fig = plt.figure(figsize=(15.0, 12.5))
self.fig = plt.figure(figsize=(self.figsize_width_substrate, self.figsize_height_substrate))
# rwh - funky way to figure out substrate frame for pc4cancerbots (due to user-defined "save_interval*")
# self.cell_time_mins
# self.substrate_frame = int(frame / self.modulo)
if (self.customized_output_freq and (frame > self.max_svg_frame_pre_therapy)):
# max_svg_frame_pre_therapy = int(self.therapy_activation_time/self.svg_delta_t)
# max_substrate_frame_pre_therapy = int(self.therapy_activation_time/self.substrate_delta_t)
self.substrate_frame = self.max_substrate_frame_pre_therapy + (frame - self.max_svg_frame_pre_therapy)
else:
self.substrate_frame = int(frame / self.modulo)
# print("plot_substrate(): self.substrate_frame=",self.substrate_frame)
# if (self.substrate_frame > (self.num_substrates-1)):
# self.substrate_frame = self.num_substrates-1
# print('self.substrate_frame = ',self.substrate_frame)
# if (self.cells_toggle.value):
# self.modulo = int((self.num_svgs - 1) / (self.num_substrates - 1))
# self.substrate_frame = frame % self.modulo
# else:
# self.substrate_frame = frame
fname = "output%08d_microenvironment0.mat" % self.substrate_frame
xml_fname = "output%08d.xml" % self.substrate_frame
# fullname = output_dir_str + fname
# fullname = fname
full_fname = os.path.join(self.output_dir, fname)
# print("--- plot_substrate(): full_fname=",full_fname)
full_xml_fname = os.path.join(self.output_dir, xml_fname)
# self.output_dir = '.'
# if not os.path.isfile(fullname):
if not os.path.isfile(full_fname):
print("Once output files are generated, click the slider.") # No: output00000000_microenvironment0.mat
return
# tree = ET.parse(xml_fname)
tree = ET.parse(full_xml_fname)
xml_root = tree.getroot()
mins = round(int(float(xml_root.find(".//current_time").text))) # TODO: check units = mins
self.substrate_mins= round(int(float(xml_root.find(".//current_time").text))) # TODO: check units = mins
hrs = int(mins/60)
days = int(hrs/24)
self.title_str = 'substrate: %dd, %dh, %dm' % (int(days),(hrs%24), mins - (hrs*60))
# self.title_str = 'substrate: %dm' % (mins ) # rwh
info_dict = {}
# scipy.io.loadmat(fullname, info_dict)
scipy.io.loadmat(full_fname, info_dict)
M = info_dict['multiscale_microenvironment']
# global_field_index = int(mcds_field.value)
# print('plot_substrate: field_index =',field_index)
f = M[self.field_index, :] # 4=tumor cells field, 5=blood vessel density, 6=growth substrate
# plt.clf()
# my_plot = plt.imshow(f.reshape(400,400), cmap='jet', extent=[0,20, 0,20])
# self.fig = plt.figure(figsize=(18.0,15)) # this strange figsize results in a ~square contour plot
# plt.subplot(grid[0:1, 0:1])
# main_ax = self.fig.add_subplot(grid[0:1, 0:1]) # works, but tiny upper-left region
#main_ax = self.fig.add_subplot(grid[0:2, 0:2])
# main_ax = self.fig.add_subplot(grid[0:, 0:2])
#main_ax = self.fig.add_subplot(grid[:-1, 0:]) # nrows, ncols
#main_ax = self.fig.add_subplot(grid[0:, 0:]) # nrows, ncols
#main_ax = self.fig.add_subplot(grid[0:4, 0:]) # nrows, ncols
# main_ax = self.fig.add_subplot(grid[0:3, 0:]) # nrows, ncols
# main_ax = self.fig.add_subplot(111) # nrows, ncols
# plt.rc('font', size=10) # TODO: does this affect the Cell plots fonts too? YES. Not what we want.
# fig.set_tight_layout(True)
# ax = plt.axes([0, 0.05, 0.9, 0.9 ]) #left, bottom, width, height
# ax = plt.axes([0, 0.0, 1, 1 ])
# cmap = plt.cm.viridis # Blues, YlOrBr, ...
# im = ax.imshow(f.reshape(100,100), interpolation='nearest', cmap=cmap, extent=[0,20, 0,20])
# ax.grid(False)
# print("substrates.py: ------- numx, numy = ", self.numx, self.numy )
# if (self.numx == 0): # need to parse vals from the config.xml
# # print("--- plot_substrate(): full_fname=",full_fname)
# fname = os.path.join(self.output_dir, "config.xml")
# tree = ET.parse(fname)
# xml_root = tree.getroot()
# self.xmin = float(xml_root.find(".//x_min").text)
# self.xmax = float(xml_root.find(".//x_max").text)
# dx = float(xml_root.find(".//dx").text)
# self.ymin = float(xml_root.find(".//y_min").text)
# self.ymax = float(xml_root.find(".//y_max").text)
# dy = float(xml_root.find(".//dy").text)
# self.numx = math.ceil( (self.xmax - self.xmin) / dx)
# self.numy = math.ceil( (self.ymax - self.ymin) / dy)
try:
xgrid = M[0, :].reshape(self.numy, self.numx)
ygrid = M[1, :].reshape(self.numy, self.numx)
except:
print("substrates.py: mismatched mesh size for reshape: numx,numy=",self.numx, self.numy)
pass
# xgrid = M[0, :].reshape(self.numy, self.numx)
# ygrid = M[1, :].reshape(self.numy, self.numx)
num_contours = 15
levels = MaxNLocator(nbins=num_contours).tick_values(self.cmap_min.value, self.cmap_max.value)
contour_ok = True
if (self.cmap_fixed_toggle.value):
try:
# substrate_plot = main_ax.contourf(xgrid, ygrid, M[self.field_index, :].reshape(self.numy, self.numx), levels=levels, extend='both', cmap=self.field_cmap.value, fontsize=self.fontsize)
substrate_plot = plt.contourf(xgrid, ygrid, M[self.field_index, :].reshape(self.numy, self.numx), levels=levels, extend='both', cmap=self.field_cmap.value, fontsize=self.fontsize)
except:
contour_ok = False
# print('got error on contourf 1.')
else:
try:
# substrate_plot = main_ax.contourf(xgrid, ygrid, M[self.field_index, :].reshape(self.numy,self.numx), num_contours, cmap=self.field_cmap.value)
substrate_plot = plt.contourf(xgrid, ygrid, M[self.field_index, :].reshape(self.numy,self.numx), num_contours, cmap=self.field_cmap.value)
except:
contour_ok = False
# print('got error on contourf 2.')
if (contour_ok):
# main_ax.set_title(self.title_str, fontsize=self.fontsize)
plt.title(self.title_str, fontsize=self.fontsize)
# main_ax.tick_params(labelsize=self.fontsize)
# cbar = plt.colorbar(my_plot)
# cbar = self.fig.colorbar(substrate_plot, ax=main_ax)
cbar = self.fig.colorbar(substrate_plot)
cbar.ax.tick_params(labelsize=self.fontsize)
# cbar = main_ax.colorbar(my_plot)
# cbar.ax.tick_params(labelsize=self.fontsize)
# axes_min = 0
# axes_max = 2000
# main_ax.set_xlim([self.xmin, self.xmax])
# main_ax.set_ylim([self.ymin, self.ymax])
plt.xlim(self.xmin, self.xmax)
plt.ylim(self.ymin, self.ymax)
# if (frame == 0): # maybe allow substrate grid display later
# xs = np.linspace(self.xmin,self.xmax,self.numx)
# ys = np.linspace(self.ymin,self.ymax,self.numy)
# hlines = np.column_stack(np.broadcast_arrays(xs[0], ys, xs[-1], ys))
# vlines = np.column_stack(np.broadcast_arrays(xs, ys[0], xs, ys[-1]))
# grid_lines = np.concatenate([hlines, vlines]).reshape(-1, 2, 2)
# line_collection = LineCollection(grid_lines, color="gray", linewidths=0.5)
# # ax = main_ax.gca()
# main_ax.add_collection(line_collection)
# # ax.set_xlim(xs[0], xs[-1])
# # ax.set_ylim(ys[0], ys[-1])
# Now plot the cells (possibly on top of the substrate)
if (self.cells_toggle.value):
if (not self.substrates_toggle.value):
# self.fig = plt.figure(figsize=(12, 12))
self.fig = plt.figure(figsize=(self.figsize_width_svg, self.figsize_height_svg))
# self.plot_svg(frame)
self.svg_frame = frame
# print('plot_svg with frame=',self.svg_frame)
self.plot_svg(self.svg_frame)
# plt.subplot(grid[2, 0])
# oxy_ax = self.fig.add_subplot(grid[2:, 0:1])
#oxy_ax = self.fig.add_subplot(grid[:2, 2:])
#oxy_ax = self.fig.add_subplot(grid[:-1, 0:2]) # nrows, ncols
#oxy_ax = self.fig.add_subplot(grid[2:3, 0:1]) # nrows, ncols
# oxy_ax = self.fig.add_subplot(grid[4:4, 0:1]) # invalid
# main_ax = self.fig.add_subplot(grid[0:1, 0:1])
# experiment with small plot of oxygen (or whatever)
# oxy_ax = self.fig.add_subplot(grid[3:4, 0:1]) # nrows, ncols
# x = np.linspace(0, 500)
# oxy_ax.plot(x, 300*np.sin(x))
#---------------------------------------------------------------------------
# def plot_plots(self, frame):
# # if (self.first_time):
# # self.svg_delta_t = 1
# # self.substrate_delta_t = 1
# # self.first_time = False
# if (self.substrates_toggle.value):
# self.fig = plt.figure(figsize=(14, 15.6))
# else: # only cells being displayed (maybe)
# self.fig = plt.figure(figsize=(12, 12))
# # grid = plt.GridSpec(4, 3, wspace=0.10, hspace=0.2) # (nrows, ncols)
# # self.plot_substrate(frame, grid)
# self.plot_substrate(frame)
# # self.plot_svg(frame)
|
[] |
[] |
[
"USER",
"SESSIONDIR",
"SESSION"
] |
[]
|
["USER", "SESSIONDIR", "SESSION"]
|
python
| 3 | 0 | |
vendor/github.com/coreos/etcd/etcdctl/ctlv3/command/watch_command.go
|
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package command
import (
"bufio"
"context"
"errors"
"fmt"
"os"
"os/exec"
"strings"
"github.com/coreos/etcd/clientv3"
"github.com/spf13/cobra"
)
var (
errBadArgsNum = errors.New("bad number of arguments")
errBadArgsNumConflictEnv = errors.New("bad number of arguments (found conflicting environment key)")
errBadArgsNumSeparator = errors.New("bad number of arguments (found separator --, but no commands)")
errBadArgsInteractiveWatch = errors.New("args[0] must be 'watch' for interactive calls")
)
var (
watchRev int64
watchPrefix bool
watchInteractive bool
watchPrevKey bool
)
// NewWatchCommand returns the cobra command for "watch".
func NewWatchCommand() *cobra.Command {
cmd := &cobra.Command{
Use: "watch [options] [key or prefix] [range_end] [--] [exec-command arg1 arg2 ...]",
Short: "Watches events stream on keys or prefixes",
Run: watchCommandFunc,
}
cmd.Flags().BoolVarP(&watchInteractive, "interactive", "i", false, "Interactive mode")
cmd.Flags().BoolVar(&watchPrefix, "prefix", false, "Watch on a prefix if prefix is set")
cmd.Flags().Int64Var(&watchRev, "rev", 0, "Revision to start watching")
cmd.Flags().BoolVar(&watchPrevKey, "prev-kv", false, "get the previous key-value pair before the event happens")
return cmd
}
// watchCommandFunc executes the "watch" command.
func watchCommandFunc(cmd *cobra.Command, args []string) {
envKey, envRange := os.Getenv("ETCDCTL_WATCH_KEY"), os.Getenv("ETCDCTL_WATCH_RANGE_END")
if envKey == "" && envRange != "" {
ExitWithError(ExitBadArgs, fmt.Errorf("ETCDCTL_WATCH_KEY is empty but got ETCDCTL_WATCH_RANGE_END=%q", envRange))
}
if watchInteractive {
watchInteractiveFunc(cmd, os.Args, envKey, envRange)
return
}
watchArgs, execArgs, err := parseWatchArgs(os.Args, args, envKey, envRange, false)
if err != nil {
ExitWithError(ExitBadArgs, err)
}
c := mustClientFromCmd(cmd)
wc, err := getWatchChan(c, watchArgs)
if err != nil {
ExitWithError(ExitBadArgs, err)
}
printWatchCh(c, wc, execArgs)
if err = c.Close(); err != nil {
ExitWithError(ExitBadConnection, err)
}
ExitWithError(ExitInterrupted, fmt.Errorf("watch is canceled by the server"))
}
func watchInteractiveFunc(cmd *cobra.Command, osArgs []string, envKey, envRange string) {
c := mustClientFromCmd(cmd)
reader := bufio.NewReader(os.Stdin)
for {
l, err := reader.ReadString('\n')
if err != nil {
ExitWithError(ExitInvalidInput, fmt.Errorf("Error reading watch request line: %v", err))
}
l = strings.TrimSuffix(l, "\n")
args := argify(l)
if len(args) < 2 && envKey == "" {
fmt.Fprintf(os.Stderr, "Invalid command %s (command type or key is not provided)\n", l)
continue
}
if args[0] != "watch" {
fmt.Fprintf(os.Stderr, "Invalid command %s (only support watch)\n", l)
continue
}
watchArgs, execArgs, perr := parseWatchArgs(osArgs, args, envKey, envRange, true)
if perr != nil {
ExitWithError(ExitBadArgs, perr)
}
ch, err := getWatchChan(c, watchArgs)
if err != nil {
fmt.Fprintf(os.Stderr, "Invalid command %s (%v)\n", l, err)
continue
}
go printWatchCh(c, ch, execArgs)
}
}
func getWatchChan(c *clientv3.Client, args []string) (clientv3.WatchChan, error) {
if len(args) < 1 {
return nil, errBadArgsNum
}
key := args[0]
opts := []clientv3.OpOption{clientv3.WithRev(watchRev)}
if len(args) == 2 {
if watchPrefix {
return nil, fmt.Errorf("`range_end` and `--prefix` are mutually exclusive")
}
opts = append(opts, clientv3.WithRange(args[1]))
}
if watchPrefix {
opts = append(opts, clientv3.WithPrefix())
}
if watchPrevKey {
opts = append(opts, clientv3.WithPrevKV())
}
return c.Watch(clientv3.WithRequireLeader(context.Background()), key, opts...), nil
}
func printWatchCh(c *clientv3.Client, ch clientv3.WatchChan, execArgs []string) {
for resp := range ch {
if resp.Canceled {
fmt.Fprintf(os.Stderr, "watch was canceled (%v)\n", resp.Err())
}
display.Watch(resp)
if len(execArgs) > 0 {
for _, ev := range resp.Events {
cmd := exec.CommandContext(c.Ctx(), execArgs[0], execArgs[1:]...)
cmd.Env = os.Environ()
cmd.Env = append(cmd.Env, fmt.Sprintf("ETCD_WATCH_REVISION=%d", resp.Header.Revision))
cmd.Env = append(cmd.Env, fmt.Sprintf("ETCD_WATCH_EVENT_TYPE=%q", ev.Type))
cmd.Env = append(cmd.Env, fmt.Sprintf("ETCD_WATCH_KEY=%q", ev.Kv.Key))
cmd.Env = append(cmd.Env, fmt.Sprintf("ETCD_WATCH_VALUE=%q", ev.Kv.Value))
cmd.Stdout, cmd.Stderr = os.Stdout, os.Stderr
if err := cmd.Run(); err != nil {
fmt.Fprintf(os.Stderr, "command %q error (%v)\n", execArgs, err)
os.Exit(1)
}
}
}
}
}
// "commandArgs" is the command arguments after "spf13/cobra" parses
// all "watch" command flags, strips out special characters (e.g. "--").
// "orArgs" is the raw arguments passed to "watch" command
// (e.g. ./bin/etcdctl watch foo --rev 1 bar).
// "--" characters are invalid arguments for "spf13/cobra" library,
// so no need to handle such cases.
func parseWatchArgs(osArgs, commandArgs []string, envKey, envRange string, interactive bool) (watchArgs []string, execArgs []string, err error) {
rawArgs := make([]string, len(osArgs))
copy(rawArgs, osArgs)
watchArgs = make([]string, len(commandArgs))
copy(watchArgs, commandArgs)
// remove preceding commands (e.g. ./bin/etcdctl watch)
// handle "./bin/etcdctl watch foo -- echo watch event"
for idx := range rawArgs {
if rawArgs[idx] == "watch" {
rawArgs = rawArgs[idx+1:]
break
}
}
// remove preceding commands (e.g. "watch foo bar" in interactive mode)
// handle "./bin/etcdctl watch foo -- echo watch event"
if interactive {
if watchArgs[0] != "watch" {
// "watch" not found
watchPrefix, watchRev, watchPrevKey = false, 0, false
return nil, nil, errBadArgsInteractiveWatch
}
watchArgs = watchArgs[1:]
}
execIdx, execExist := 0, false
if !interactive {
for execIdx = range rawArgs {
if rawArgs[execIdx] == "--" {
execExist = true
break
}
}
if execExist && execIdx == len(rawArgs)-1 {
// "watch foo bar --" should error
return nil, nil, errBadArgsNumSeparator
}
// "watch" with no argument should error
if !execExist && len(rawArgs) < 1 && envKey == "" {
return nil, nil, errBadArgsNum
}
if execExist && envKey != "" {
// "ETCDCTL_WATCH_KEY=foo watch foo -- echo 1" should error
// (watchArgs==["foo","echo","1"])
widx, ridx := len(watchArgs)-1, len(rawArgs)-1
for ; widx >= 0; widx-- {
if watchArgs[widx] == rawArgs[ridx] {
ridx--
continue
}
// watchArgs has extra:
// ETCDCTL_WATCH_KEY=foo watch foo -- echo 1
// watchArgs: foo echo 1
if ridx == execIdx {
return nil, nil, errBadArgsNumConflictEnv
}
}
}
// check conflicting arguments
// e.g. "watch --rev 1 -- echo Hello World" has no conflict
if !execExist && len(watchArgs) > 0 && envKey != "" {
// "ETCDCTL_WATCH_KEY=foo watch foo" should error
// (watchArgs==["foo"])
return nil, nil, errBadArgsNumConflictEnv
}
} else {
for execIdx = range watchArgs {
if watchArgs[execIdx] == "--" {
execExist = true
break
}
}
if execExist && execIdx == len(watchArgs)-1 {
// "watch foo bar --" should error
watchPrefix, watchRev, watchPrevKey = false, 0, false
return nil, nil, errBadArgsNumSeparator
}
flagset := NewWatchCommand().Flags()
if err := flagset.Parse(watchArgs); err != nil {
watchPrefix, watchRev, watchPrevKey = false, 0, false
return nil, nil, err
}
pArgs := flagset.Args()
// "watch" with no argument should error
if !execExist && envKey == "" && len(pArgs) < 1 {
watchPrefix, watchRev, watchPrevKey = false, 0, false
return nil, nil, errBadArgsNum
}
// check conflicting arguments
// e.g. "watch --rev 1 -- echo Hello World" has no conflict
if !execExist && len(pArgs) > 0 && envKey != "" {
// "ETCDCTL_WATCH_KEY=foo watch foo" should error
// (watchArgs==["foo"])
watchPrefix, watchRev, watchPrevKey = false, 0, false
return nil, nil, errBadArgsNumConflictEnv
}
}
argsWithSep := rawArgs
if interactive {
// interactive mode directly passes "--" to the command args
argsWithSep = watchArgs
}
idx, foundSep := 0, false
for idx = range argsWithSep {
if argsWithSep[idx] == "--" {
foundSep = true
break
}
}
if foundSep {
execArgs = argsWithSep[idx+1:]
}
if interactive {
flagset := NewWatchCommand().Flags()
if err := flagset.Parse(argsWithSep); err != nil {
return nil, nil, err
}
watchArgs = flagset.Args()
watchPrefix, err = flagset.GetBool("prefix")
if err != nil {
return nil, nil, err
}
watchRev, err = flagset.GetInt64("rev")
if err != nil {
return nil, nil, err
}
watchPrevKey, err = flagset.GetBool("prev-kv")
if err != nil {
return nil, nil, err
}
}
// "ETCDCTL_WATCH_KEY=foo watch -- echo hello"
// should translate "watch foo -- echo hello"
// (watchArgs=["echo","hello"] should be ["foo","echo","hello"])
if envKey != "" {
ranges := []string{envKey}
if envRange != "" {
ranges = append(ranges, envRange)
}
watchArgs = append(ranges, watchArgs...)
}
if !foundSep {
return watchArgs, nil, nil
}
// "watch foo bar --rev 1 -- echo hello" or "watch foo --rev 1 bar -- echo hello",
// then "watchArgs" is "foo bar echo hello"
// so need ignore args after "argsWithSep[idx]", which is "--"
endIdx := 0
for endIdx = len(watchArgs) - 1; endIdx >= 0; endIdx-- {
if watchArgs[endIdx] == argsWithSep[idx+1] {
break
}
}
watchArgs = watchArgs[:endIdx]
return watchArgs, execArgs, nil
}
|
[
"\"ETCDCTL_WATCH_KEY\"",
"\"ETCDCTL_WATCH_RANGE_END\""
] |
[] |
[
"ETCDCTL_WATCH_KEY",
"ETCDCTL_WATCH_RANGE_END"
] |
[]
|
["ETCDCTL_WATCH_KEY", "ETCDCTL_WATCH_RANGE_END"]
|
go
| 2 | 0 | |
app/app/settings.py
|
"""
Django settings for app project.
Generated by 'django-admin startproject' using Django 4.0.1.
For more information on this file, see
https://docs.djangoproject.com/en/4.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/4.0/ref/settings/
"""
import os
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/4.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-+g=^5wpckkb07&t_8@5yu@v6tqi*@wsy$5oop3#ki#y50mqnnn'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'core',
'user',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/4.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'HOST': os.environ.get('DB_HOST'),
'NAME': os.environ.get('DB_NAME'),
'USER': os.environ.get('DB_USER'),
'PASSWORD': os.environ.get('DB_PASS'),
}
}
# Password validation
# https://docs.djangoproject.com/en/4.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/4.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/4.0/howto/static-files/
STATIC_URL = 'static/'
# Default primary key field type
# https://docs.djangoproject.com/en/4.0/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
AUTH_USER_MODEL = 'core.User'
|
[] |
[] |
[
"DB_PASS",
"DB_USER",
"DB_NAME",
"DB_HOST"
] |
[]
|
["DB_PASS", "DB_USER", "DB_NAME", "DB_HOST"]
|
python
| 4 | 0 | |
app/interface/main/growup/dao/newbiedao/dao_test.go
|
package newbiedao
import (
"flag"
"go-common/app/interface/main/growup/conf"
"os"
"testing"
)
var (
d *Dao
)
func TestMain(m *testing.M) {
if os.Getenv("DEPLOY_ENV") != "" {
flag.Set("app_id", "mobile.studio.growup-interface")
flag.Set("conf_token", "c68ad4f01bc8c39a3fa6242623e79ffb")
flag.Set("tree_id", "13584")
flag.Set("conf_version", "docker-1")
flag.Set("deploy_env", "uat")
flag.Set("conf_host", "config.bilibili.co")
flag.Set("conf_path", "/tmp")
flag.Set("region", "sh")
flag.Set("zone", "sh001")
} else {
flag.Set("conf", "../../cmd/growup-interface.toml")
}
if os.Getenv("UT_LOCAL_TEST") != "" {
flag.Set("conf", "../../cmd/growup-interface.toml")
}
flag.Parse()
if err := conf.Init(); err != nil {
panic(err)
}
d = New(conf.Conf)
os.Exit(m.Run())
}
|
[
"\"DEPLOY_ENV\"",
"\"UT_LOCAL_TEST\""
] |
[] |
[
"DEPLOY_ENV",
"UT_LOCAL_TEST"
] |
[]
|
["DEPLOY_ENV", "UT_LOCAL_TEST"]
|
go
| 2 | 0 | |
plugin/weather.go
|
package plugin
import (
"encoding/json"
"io/ioutil"
"net/http"
"os"
"text/template"
)
var (
URL = "https://devapi.qweather.com/v7/"
Key = os.Getenv("QWEATHER_KEY")
)
type WeatherPlugin struct {
key string
}
func NewWeatherPlugin(key string) *WeatherPlugin {
return &WeatherPlugin{
key: key,
}
}
type WeatherResult struct {
Code string `json:"code"`
UpdateTime string `json:"updateTime"`
FxLink string `json:"fxLink"`
Now struct {
ObsTime string `json:"obsTime"`
Temp string `json:"temp"`
FeelsLike string `json:"feelsLike"`
Icon string `json:"icon"`
Text string `json:"text"`
Wind360 string `json:"wind360"`
WindDir string `json:"windDir"`
WindScale string `json:"windScale"`
WindSpeed string `json:"windSpeed"`
Humidity string `json:"humidity"`
Precip string `json:"precip"`
Pressure string `json:"pressure"`
Vis string `json:"vis"`
Cloud string `json:"cloud"`
Dew string `json:"dew"`
} `json:"now"`
Refer struct {
Sources []string `json:"sources"`
License []string `json:"license"`
} `json:"refer"`
}
func (plugin *WeatherPlugin) Name() string {
return "Weather Plugin"
}
func (plugin *WeatherPlugin) Funcs() template.FuncMap {
return template.FuncMap{
"weather": plugin.Weather,
}
}
func (plugin *WeatherPlugin) Weather(location string) *WeatherResult {
client := &http.Client{}
req, _ := http.NewRequest(http.MethodGet, URL+"weather/now", nil)
q := req.URL.Query()
q.Add("key", Key)
q.Add("location", location)
req.URL.RawQuery = q.Encode()
resp, err := client.Do(req)
if err != nil {
return nil
}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil
}
var result WeatherResult
err = json.Unmarshal(body, &result)
if err != nil {
return nil
}
return &result
}
|
[
"\"QWEATHER_KEY\""
] |
[] |
[
"QWEATHER_KEY"
] |
[]
|
["QWEATHER_KEY"]
|
go
| 1 | 0 | |
buildbox/s3_uploader.go
|
package buildbox
import (
"io/ioutil"
"github.com/crowdmob/goamz/s3"
"github.com/crowdmob/goamz/aws"
"os"
"fmt"
"strings"
"errors"
)
type S3Uploader struct {
// The destination which includes the S3 bucket name
// and the path.
// s3://my-bucket-name/foo/bar
Destination string
// The S3 Bucket we're uploading these files to
Bucket *s3.Bucket
}
func (u *S3Uploader) Setup(destination string) (error) {
u.Destination = destination
// Setup the AWS authentication
auth, err := aws.EnvAuth()
if err != nil {
fmt.Printf("Error loading AWS credentials: %s", err)
os.Exit(1)
}
// Decide what region to use
// https://github.com/crowdmob/goamz/blob/master/aws/regions.go
// I think S3 defaults to us-east-1
regionName := "us-east-1"
if os.Getenv("AWS_DEFAULT_REGION") != "" {
regionName = os.Getenv("AWS_DEFAULT_REGION")
}
// Check to make sure the region exists
region, ok := aws.Regions[regionName]
if ok == false {
return errors.New("Unknown AWS Region `" + regionName + "`")
}
// Find the bucket
s3 := s3.New(auth, region)
bucket := s3.Bucket(u.bucketName())
// If the list doesn't return an error, then we've got our
// bucket
_, err = bucket.List("", "", "", 0)
if err != nil {
return errors.New("Could not find bucket `" + u.bucketName() + " in region `" + region.Name + "` (" + err.Error() + ")")
}
u.Bucket = bucket
return nil
}
func (u *S3Uploader) URL(artifact *Artifact) (string) {
return "http://" + u.bucketName() + ".s3.amazonaws.com/" + u.artifactPath(artifact)
}
func (u *S3Uploader) Upload(artifact *Artifact) (error) {
Perms := s3.ACL("public-read")
data, err := ioutil.ReadFile(artifact.AbsolutePath)
if err != nil {
return errors.New("Failed to read file " + artifact.AbsolutePath + " (" + err.Error() + ")")
}
err = u.Bucket.Put(u.artifactPath(artifact), data, artifact.MimeType(), Perms, s3.Options{})
if err != nil {
return errors.New("Failed to PUT file " + u.artifactPath(artifact) + " (" + err.Error() + ")")
}
return nil
}
// func (u S3Uploader) Download(file string, bucket *s3.Bucket, path string) {
// data, err := bucket.Get(path)
// if err != nil {
// panic(err.Error())
// }
// perms := os.FileMode(0644)
//
// err = ioutil.WriteFile(file, data, perms)
// if err != nil {
// panic(err.Error())
// }
// }
func (u *S3Uploader) artifactPath(artifact *Artifact) (string) {
parts := []string{u.bucketPath(), artifact.Path}
return strings.Join(parts, "/")
}
func (u *S3Uploader) bucketPath() string {
return strings.Join(u.destinationParts()[1:len(u.destinationParts())], "/")
}
func (u *S3Uploader) bucketName() (string) {
return u.destinationParts()[0]
}
func (u *S3Uploader) destinationParts() []string {
trimmed_string := strings.TrimLeft(u.Destination, "s3://")
return strings.Split(trimmed_string, "/")
}
|
[
"\"AWS_DEFAULT_REGION\"",
"\"AWS_DEFAULT_REGION\""
] |
[] |
[
"AWS_DEFAULT_REGION"
] |
[]
|
["AWS_DEFAULT_REGION"]
|
go
| 1 | 0 | |
calico_cni/tests/unit/test_ipam.py
|
# Copyright 2015 Metaswitch Networks
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import json
import unittest
from mock import patch, MagicMock, Mock, call, ANY
from netaddr import IPAddress, IPNetwork
from subprocess32 import CalledProcessError, Popen, PIPE
from nose.tools import assert_equal, assert_true, assert_false, assert_raises
from StringIO import StringIO
import pycalico.netns
from pycalico.ipam import IPAMClient
from pycalico.datastore_datatypes import IPPool, Endpoint
from pycalico.datastore_errors import MultipleEndpointsMatch
from calico_cni.constants import *
from calico_cni.ipam import IpamPlugin, _exit_on_error, main
class CniIpamTest(unittest.TestCase):
"""
Test class for IPAM plugin.
"""
def setUp(self):
"""
Per-test setup method.
"""
self.container_id = "ff3afbd1-17ad-499d-b514-72438c009e81"
self.network_config = {
"name": "ut-network",
"type": "calico",
"ipam": {
"type": "calico-ipam",
"subnet": "10.22.0.0/16",
"routes": [{"dst": "0.0.0.0/0"}],
"range-start": "",
"range-end": ""
}
}
self.env = {
CNI_CONTAINERID_ENV: self.container_id,
CNI_IFNAME_ENV: "eth0",
CNI_ARGS_ENV: "",
CNI_COMMAND_ENV: CNI_CMD_ADD,
CNI_PATH_ENV: "/usr/bin/rkt/",
CNI_NETNS_ENV: "netns",
}
# Create the CniPlugin to test.
self.plugin = IpamPlugin(self.network_config, self.env)
# Mock out the datastore client.
self.m_datastore_client = MagicMock(spec=IPAMClient)
self.plugin.datastore_client = self.m_datastore_client
@patch('sys.stdout', new_callable=StringIO)
def test_execute_add_mainline(self, m_stdout):
# Mock
self.plugin.command = CNI_CMD_ADD
ip4 = IPNetwork("1.2.3.4/32")
ip6 = IPNetwork("ba:ad::be:ef/128")
self.plugin._assign_address = MagicMock(spec=self.plugin._assign_address)
self.plugin._assign_address.return_value = ip4, ip6
# Call
self.plugin.execute()
# Assert
expected = json.dumps({"ip4": {"ip": "1.2.3.4/32"},
"ip6": {"ip": "ba:ad::be:ef/128"}})
assert_equal(m_stdout.getvalue().strip(), expected)
@patch('sys.stdout', new_callable=StringIO)
def test_execute_del_mainline(self, m_stdout):
# Mock
self.plugin.command = CNI_CMD_DELETE
# Call
self.plugin.execute()
# Assert
expected = ''
assert_equal(m_stdout.getvalue().strip(), expected)
self.plugin.datastore_client.release_ip_by_handle.assert_called_once_with(handle_id=self.plugin.container_id)
@patch('sys.stdout', new_callable=StringIO)
def test_execute_del_not_assigned(self, m_stdout):
# Mock
self.plugin.command = CNI_CMD_DELETE
self.plugin.datastore_client.release_ip_by_handle.side_effect = KeyError
# Call
self.plugin.execute()
# Assert
expected = ''
assert_equal(m_stdout.getvalue().strip(), expected)
def test_assign_address_mainline(self):
# Mock
ip4 = IPNetwork("1.2.3.4/32")
ip6 = IPNetwork("ba:ad::be:ef/128")
self.plugin.datastore_client.auto_assign_ips = MagicMock(spec=self.plugin._assign_address)
self.plugin.datastore_client.auto_assign_ips.return_value = [ip4], [ip6]
# Args
handle_id = "abcdef12345"
# Call
ret_ip4, ret_ip6 = self.plugin._assign_address(handle_id)
# Assert
assert_equal(ip4, ret_ip4)
assert_equal(ip6, ret_ip6)
def test_assign_address_runtime_err(self):
# Mock
self.plugin.datastore_client.auto_assign_ips = MagicMock(spec=self.plugin._assign_address)
self.plugin.datastore_client.auto_assign_ips.side_effect = RuntimeError
# Args
handle_id = "abcdef12345"
# Call
with assert_raises(SystemExit) as err:
self.plugin._assign_address(handle_id)
e = err.exception
assert_equal(e.code, ERR_CODE_FAILED_ASSIGNMENT)
@patch("calico_cni.ipam._exit_on_error", autospec=True)
def test_assign_address_no_ipv4(self, m_exit):
# Mock
ip6 = IPNetwork("ba:ad::be:ef/128")
self.plugin.datastore_client.auto_assign_ips = MagicMock(spec=self.plugin._assign_address)
self.plugin.datastore_client.auto_assign_ips.return_value = [], [ip6]
# Args
handle_id = "abcdef12345"
# Call
ret_ip4, ret_ip6 = self.plugin._assign_address(handle_id)
# Assert
m_exit.assert_called_once_with(code=ERR_CODE_FAILED_ASSIGNMENT, message=ANY, details=ANY)
@patch("calico_cni.ipam._exit_on_error", autospec=True)
def test_assign_address_no_ipv6(self, m_exit):
# Mock
ip4 = IPNetwork("1.2.3.4/32")
self.plugin.datastore_client.auto_assign_ips = MagicMock(spec=self.plugin._assign_address)
self.plugin.datastore_client.auto_assign_ips.return_value = [ip4], []
# Args
handle_id = "abcdef12345"
# Call
ret_ip4, ret_ip6 = self.plugin._assign_address(handle_id)
# Assert
m_exit.assert_called_once_with(code=ERR_CODE_FAILED_ASSIGNMENT, message=ANY, details=ANY)
def test_parse_config_no_command(self):
# Delete command.
del self.plugin.env[CNI_COMMAND_ENV]
# Call
with assert_raises(SystemExit) as err:
self.plugin._parse_config()
e = err.exception
assert_equal(e.code, ERR_CODE_INVALID_ARGUMENT)
def test_parse_config_invalid_command(self):
# Change command.
self.plugin.env[CNI_COMMAND_ENV] = "invalid"
# Call
with assert_raises(SystemExit) as err:
self.plugin._parse_config()
e = err.exception
assert_equal(e.code, ERR_CODE_INVALID_ARGUMENT)
def test_parse_config_invalid_container_id(self):
# Delete container ID.
del self.plugin.env[CNI_CONTAINERID_ENV]
# Call
with assert_raises(SystemExit) as err:
self.plugin._parse_config()
e = err.exception
assert_equal(e.code, ERR_CODE_INVALID_ARGUMENT)
def test_exit_on_error(self):
with assert_raises(SystemExit) as err:
_exit_on_error(1, "message", "details")
e = err.exception
assert_equal(e.code, 1)
@patch("calico_cni.ipam.os", autospec=True)
@patch("calico_cni.ipam.sys", autospec=True)
@patch("calico_cni.ipam.IpamPlugin", autospec=True)
@patch("calico_cni.ipam.configure_logging", autospec=True)
def test_main(self, m_conf_log, m_plugin, m_sys, m_os):
# Mock
m_os.environ = self.env
m_sys.stdin.readlines.return_value = json.dumps(self.network_config)
m_plugin.reset_mock()
# Call
main()
# Assert
m_plugin.assert_called_once_with(self.network_config, self.env)
m_plugin(self.env, self.network_config).execute.assert_called_once_with()
@patch("calico_cni.ipam.os", autospec=True)
@patch("calico_cni.ipam.sys", autospec=True)
@patch("calico_cni.ipam.IpamPlugin", autospec=True)
@patch("calico_cni.ipam.configure_logging", autospec=True)
@patch("calico_cni.ipam._exit_on_error", autospec=True)
def test_main_execute_error(self, m_exit, m_conf_log, m_plugin, m_sys, m_os):
# Mock
m_os.environ = self.env
m_sys.stdin.readlines.return_value = json.dumps(self.network_config)
m_plugin.reset_mock()
m_plugin(self.network_config, self.env).execute.side_effect = Exception
# Call
main()
# Assert
m_exit.assert_called_once_with(ERR_CODE_UNHANDLED, message=ANY, details=ANY)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
tests/helper_functions.py
|
import os
import random
from unittest import mock
import requests
import string
import time
import signal
import socket
import subprocess
import uuid
import sys
import yaml
import pandas as pd
import pytest
import mlflow
import mlflow.pyfunc.scoring_server as pyfunc_scoring_server
import mlflow.pyfunc
from mlflow.tracking.artifact_utils import _download_artifact_from_uri
from mlflow.utils.file_utils import read_yaml, write_yaml
from mlflow.utils.environment import _get_pip_deps, _CONSTRAINTS_FILE_NAME
LOCALHOST = "127.0.0.1"
def get_safe_port():
"""Returns an ephemeral port that is very likely to be free to bind to."""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind((LOCALHOST, 0))
port = sock.getsockname()[1]
sock.close()
return port
def random_int(lo=1, hi=1e10):
return random.randint(lo, hi)
def random_str(size=10, chars=string.ascii_uppercase + string.digits):
return "".join(random.choice(chars) for _ in range(size))
def random_file(ext):
return "temp_test_%d.%s" % (random_int(), ext)
def score_model_in_sagemaker_docker_container(
model_uri,
data,
content_type,
flavor=mlflow.pyfunc.FLAVOR_NAME,
activity_polling_timeout_seconds=500,
):
"""
:param model_uri: URI to the model to be served.
:param data: The data to send to the docker container for testing. This is either a
Pandas dataframe or string of the format specified by `content_type`.
:param content_type: The type of the data to send to the docker container for testing. This is
one of `mlflow.pyfunc.scoring_server.CONTENT_TYPES`.
:param flavor: Model flavor to be deployed.
:param activity_polling_timeout_seconds: The amount of time, in seconds, to wait before
declaring the scoring process to have failed.
"""
env = dict(os.environ)
env.update(LC_ALL="en_US.UTF-8", LANG="en_US.UTF-8")
proc = _start_scoring_proc(
cmd=["mlflow", "sagemaker", "run-local", "-m", model_uri, "-p", "5000", "-f", flavor],
env=env,
)
return _evaluate_scoring_proc(proc, 5000, data, content_type, activity_polling_timeout_seconds)
def pyfunc_build_image(model_uri, extra_args=None):
"""
Builds a docker image containing the specified model, returning the name of the image.
:param model_uri: URI of model, e.g. runs:/some-run-id/run-relative/path/to/model
:param extra_args: List of extra args to pass to `mlflow models build-docker` command
"""
name = uuid.uuid4().hex
cmd = ["mlflow", "models", "build-docker", "-m", model_uri, "-n", name]
if extra_args:
cmd += extra_args
p = subprocess.Popen(cmd,)
assert p.wait() == 0, "Failed to build docker image to serve model from %s" % model_uri
return name
def pyfunc_serve_from_docker_image(image_name, host_port, extra_args=None):
"""
Serves a model from a docker container, exposing it as an endpoint at the specified port
on the host machine. Returns a handle (Popen object) to the server process.
"""
env = dict(os.environ)
env.update(LC_ALL="en_US.UTF-8", LANG="en_US.UTF-8")
scoring_cmd = ["docker", "run", "-p", "%s:8080" % host_port, image_name]
if extra_args is not None:
scoring_cmd += extra_args
return _start_scoring_proc(cmd=scoring_cmd, env=env)
def pyfunc_serve_from_docker_image_with_env_override(
image_name, host_port, gunicorn_opts, extra_args=None
):
"""
Serves a model from a docker container, exposing it as an endpoint at the specified port
on the host machine. Returns a handle (Popen object) to the server process.
"""
env = dict(os.environ)
env.update(LC_ALL="en_US.UTF-8", LANG="en_US.UTF-8")
scoring_cmd = [
"docker",
"run",
"-e",
"GUNICORN_CMD_ARGS=%s" % gunicorn_opts,
"-p",
"%s:8080" % host_port,
image_name,
]
if extra_args is not None:
scoring_cmd += extra_args
return _start_scoring_proc(cmd=scoring_cmd, env=env)
def pyfunc_serve_and_score_model(
model_uri,
data,
content_type,
activity_polling_timeout_seconds=500,
extra_args=None,
stdout=sys.stdout,
):
"""
:param model_uri: URI to the model to be served.
:param data: The data to send to the pyfunc server for testing. This is either a
Pandas dataframe or string of the format specified by `content_type`.
:param content_type: The type of the data to send to the pyfunc server for testing. This is
one of `mlflow.pyfunc.scoring_server.CONTENT_TYPES`.
:param activity_polling_timeout_seconds: The amount of time, in seconds, to wait before
declaring the scoring process to have failed.
:param extra_args: A list of extra arguments to pass to the pyfunc scoring server command. For
example, passing ``extra_args=["--no-conda"]`` will pass the ``--no-conda``
flag to the scoring server to ensure that conda environment activation
is skipped.
"""
env = dict(os.environ)
env.update(LC_ALL="en_US.UTF-8", LANG="en_US.UTF-8")
env.update(MLFLOW_TRACKING_URI=mlflow.get_tracking_uri())
env.update(MLFLOW_HOME=_get_mlflow_home())
port = get_safe_port()
scoring_cmd = [
"mlflow",
"models",
"serve",
"-m",
model_uri,
"-p",
str(port),
"--install-mlflow",
]
if extra_args is not None:
scoring_cmd += extra_args
proc = _start_scoring_proc(cmd=scoring_cmd, env=env, stdout=stdout, stderr=stdout)
return _evaluate_scoring_proc(proc, port, data, content_type, activity_polling_timeout_seconds)
def _get_mlflow_home():
"""
:return: The path to the MLflow installation root directory
"""
mlflow_module_path = os.path.dirname(os.path.abspath(mlflow.__file__))
# The MLflow root directory is one level about the mlflow module location
return os.path.join(mlflow_module_path, os.pardir)
def _start_scoring_proc(cmd, env, stdout=sys.stdout, stderr=sys.stderr):
if os.name != "nt":
return subprocess.Popen(
cmd,
stdout=stdout,
stderr=stderr,
universal_newlines=True,
env=env,
# Assign the scoring process to a process group. All child processes of the
# scoring process will be assigned to this group as well. This allows child
# processes of the scoring process to be terminated successfully
preexec_fn=os.setsid,
)
else:
return subprocess.Popen(
cmd,
stdout=stdout,
stderr=stderr,
universal_newlines=True,
env=env,
# On Windows, `os.setsid` and `preexec_fn` are unavailable
creationflags=subprocess.CREATE_NEW_PROCESS_GROUP,
)
class RestEndpoint:
def __init__(self, proc, port, activity_polling_timeout_seconds=250):
self._proc = proc
self._port = port
self._activity_polling_timeout_seconds = activity_polling_timeout_seconds
def __enter__(self):
for i in range(0, int(self._activity_polling_timeout_seconds / 5)):
assert self._proc.poll() is None, "scoring process died"
time.sleep(5)
# noinspection PyBroadException
try:
ping_status = requests.get(url="http://localhost:%d/ping" % self._port)
print("connection attempt", i, "server is up! ping status", ping_status)
if ping_status.status_code == 200:
break
except Exception:
print("connection attempt", i, "failed, server is not up yet")
if ping_status.status_code != 200:
raise Exception("ping failed, server is not happy")
print("server up, ping status", ping_status)
return self
def __exit__(self, tp, val, traceback):
if self._proc.poll() is None:
# Terminate the process group containing the scoring process.
# This will terminate all child processes of the scoring process
if os.name != "nt":
pgrp = os.getpgid(self._proc.pid)
os.killpg(pgrp, signal.SIGTERM)
else:
# https://stackoverflow.com/questions/47016723/windows-equivalent-for-spawning-and-killing-separate-process-group-in-python-3 # noqa
self._proc.send_signal(signal.CTRL_BREAK_EVENT)
self._proc.kill()
def invoke(self, data, content_type):
if type(data) == pd.DataFrame:
if content_type == pyfunc_scoring_server.CONTENT_TYPE_JSON_RECORDS_ORIENTED:
data = data.to_json(orient="records")
elif (
content_type == pyfunc_scoring_server.CONTENT_TYPE_JSON
or content_type == pyfunc_scoring_server.CONTENT_TYPE_JSON_SPLIT_ORIENTED
):
data = data.to_json(orient="split")
elif content_type == pyfunc_scoring_server.CONTENT_TYPE_CSV:
data = data.to_csv(index=False)
else:
raise Exception(
"Unexpected content type for Pandas dataframe input %s" % content_type
)
response = requests.post(
url="http://localhost:%d/invocations" % self._port,
data=data,
headers={"Content-Type": content_type},
)
return response
def _evaluate_scoring_proc(proc, port, data, content_type, activity_polling_timeout_seconds=250):
"""
:param activity_polling_timeout_seconds: The amount of time, in seconds, to wait before
declaring the scoring process to have failed.
"""
with RestEndpoint(proc, port, activity_polling_timeout_seconds) as endpoint:
return endpoint.invoke(data, content_type)
@pytest.fixture(scope="module", autouse=True)
def set_boto_credentials():
os.environ["AWS_ACCESS_KEY_ID"] = "NotARealAccessKey"
os.environ["AWS_SECRET_ACCESS_KEY"] = "NotARealSecretAccessKey"
os.environ["AWS_SESSION_TOKEN"] = "NotARealSessionToken"
@pytest.fixture
def mock_s3_bucket():
"""
Creates a mock S3 bucket using moto
:return: The name of the mock bucket
"""
import boto3
import moto
with moto.mock_s3():
bucket_name = "mock-bucket"
s3_client = boto3.client("s3")
s3_client.create_bucket(Bucket=bucket_name)
yield bucket_name
class safe_edit_yaml(object):
def __init__(self, root, file_name, edit_func):
self._root = root
self._file_name = file_name
self._edit_func = edit_func
self._original = read_yaml(root, file_name)
def __enter__(self):
new_dict = self._edit_func(self._original.copy())
write_yaml(self._root, self._file_name, new_dict, overwrite=True)
def __exit__(self, *args):
write_yaml(self._root, self._file_name, self._original, overwrite=True)
def create_mock_response(status_code, text):
"""
Create a mock resposne object with the status_code and text
:param: status_code int HTTP status code
:param: text message from the response
:reutrn: mock HTTP Response
"""
response = mock.MagicMock()
response.status_code = status_code
response.text = text
return response
def _read_yaml(path):
with open(path, "r") as f:
return yaml.safe_load(f)
def _read_lines(path):
with open(path, "r") as f:
return f.read().splitlines()
def _compare_conda_env_requirements(env_path, req_path):
assert os.path.exists(req_path)
custom_env_parsed = _read_yaml(env_path)
requirements = _read_lines(req_path)
assert _get_pip_deps(custom_env_parsed) == requirements
def _assert_pip_requirements(model_uri, requirements, constraints=None):
local_path = _download_artifact_from_uri(model_uri)
txt_reqs = _read_lines(os.path.join(local_path, "requirements.txt"))
conda_reqs = _get_pip_deps(_read_yaml(os.path.join(local_path, "conda.yaml")))
assert txt_reqs == requirements
assert conda_reqs == requirements
if constraints:
assert f"-c {_CONSTRAINTS_FILE_NAME}" in txt_reqs
assert f"-c {_CONSTRAINTS_FILE_NAME}" in conda_reqs
cons = _read_lines(os.path.join(local_path, _CONSTRAINTS_FILE_NAME))
assert cons == constraints
|
[] |
[] |
[
"AWS_SESSION_TOKEN",
"AWS_ACCESS_KEY_ID",
"AWS_SECRET_ACCESS_KEY"
] |
[]
|
["AWS_SESSION_TOKEN", "AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY"]
|
python
| 3 | 0 | |
src/org/nutz/lang/Lang.java
|
package org.nutz.lang;
import java.io.BufferedReader;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.io.PrintStream;
import java.io.Reader;
import java.io.StringReader;
import java.io.Writer;
import java.lang.management.ManagementFactory;
import java.lang.reflect.Array;
import java.lang.reflect.Field;
import java.lang.reflect.GenericArrayType;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.lang.reflect.Modifier;
import java.lang.reflect.ParameterizedType;
import java.lang.reflect.Type;
import java.lang.reflect.TypeVariable;
import java.lang.reflect.WildcardType;
import java.nio.charset.Charset;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.LinkedHashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Properties;
import java.util.Queue;
import java.util.Set;
import java.util.regex.Pattern;
import javax.crypto.Mac;
import javax.crypto.SecretKey;
import javax.crypto.spec.SecretKeySpec;
import javax.servlet.http.HttpServletRequest;
import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.ParserConfigurationException;
import org.nutz.castor.Castors;
import org.nutz.castor.FailToCastObjectException;
import org.nutz.dao.entity.annotation.Column;
import org.nutz.json.Json;
import org.nutz.lang.Encoding;
import org.nutz.lang.reflect.ReflectTool;
import org.nutz.lang.stream.StringInputStream;
import org.nutz.lang.stream.StringOutputStream;
import org.nutz.lang.stream.StringWriter;
import org.nutz.lang.util.Context;
import org.nutz.lang.util.NutMap;
import org.nutz.lang.util.NutType;
import org.nutz.lang.util.Regex;
import org.nutz.lang.util.SimpleContext;
/**
* 这些帮助函数让 Java 的某些常用功能变得更简单
*
* @author zozoh([email protected])
* @author wendal([email protected])
* @author bonyfish([email protected])
* @author wizzer([email protected])
*/
public abstract class Lang {
public static int HASH_BUFF_SIZE = 16 * 1024;
private static final Pattern IPV4_PATTERN = Pattern.compile("^(25[0-5]|2[0-4]\\d|[0-1]?\\d?\\d)(\\.(25[0-5]|2[0-4]\\d|[0-1]?\\d?\\d)){3}$");
private static final Pattern IPV6_STD_PATTERN = Pattern.compile("^(?:[0-9a-fA-F]{1,4}:){7}[0-9a-fA-F]{1,4}$");
private static final Pattern IPV6_HEX_COMPRESSED_PATTERN = Pattern.compile("^((?:[0-9A-Fa-f]{1,4}(?::[0-9A-Fa-f]{1,4})*)?)::((?:[0-9A-Fa-f]{1,4}(?::[0-9A-Fa-f]{1,4})*)?)$");
public static boolean isIPv4Address(final String input) {
return IPV4_PATTERN.matcher(input).matches();
}
public static boolean isIPv6StdAddress(final String input) {
return IPV6_STD_PATTERN.matcher(input).matches();
}
public static boolean isIPv6HexCompressedAddress(final String input) {
return IPV6_HEX_COMPRESSED_PATTERN.matcher(input).matches();
}
public static boolean isIPv6Address(final String input) {
return isIPv6StdAddress(input) || isIPv6HexCompressedAddress(input);
}
public static ComboException comboThrow(Throwable... es) {
ComboException ce = new ComboException();
for (Throwable e : es)
ce.add(e);
return ce;
}
/**
* 生成一个未实现的运行时异常
*
* @return 一个未实现的运行时异常
*/
public static RuntimeException noImplement() {
return new RuntimeException("Not implement yet!");
}
/**
* 生成一个不可能的运行时异常
*
* @return 一个不可能的运行时异常
*/
public static RuntimeException impossible() {
return new RuntimeException("r u kidding me?! It is impossible!");
}
/**
* 根据格式化字符串,生成运行时异常
*
* @param format
* 格式
* @param args
* 参数
* @return 运行时异常
*/
public static RuntimeException makeThrow(String format, Object... args) {
return new RuntimeException(String.format(format, args));
}
/**
* 根据格式化字符串,生成一个指定的异常。
*
* @param classOfT
* 异常类型, 需要有一个字符串为参数的构造函数
* @param format
* 格式
* @param args
* 参数
* @return 异常对象
*/
@SuppressWarnings("unchecked")
public static <T extends Throwable> T makeThrow(Class<T> classOfT,
String format,
Object... args) {
if (classOfT == RuntimeException.class)
return (T) new RuntimeException(String.format(format, args));
return Mirror.me(classOfT).born(String.format(format, args));
}
/**
* 将抛出对象包裹成运行时异常,并增加自己的描述
*
* @param e
* 抛出对象
* @param fmt
* 格式
* @param args
* 参数
* @return 运行时异常
*/
public static RuntimeException wrapThrow(Throwable e, String fmt, Object... args) {
return new RuntimeException(String.format(fmt, args), e);
}
/**
* 用运行时异常包裹抛出对象,如果抛出对象本身就是运行时异常,则直接返回。
* <p>
* 如果是 InvocationTargetException,那么将其剥离,只包裹其 TargetException
*
* @param e
* 抛出对象
* @return 运行时异常
*/
public static RuntimeException wrapThrow(Throwable e) {
if (e instanceof RuntimeException)
return (RuntimeException) e;
if (e instanceof InvocationTargetException)
return wrapThrow(((InvocationTargetException) e).getTargetException());
return new RuntimeException(e);
}
/**
* 用一个指定可抛出类型来包裹一个抛出对象。这个指定的可抛出类型需要有一个构造函数 接受 Throwable 类型的对象
*
* @param e
* 抛出对象
* @param wrapper
* 包裹类型
* @return 包裹后对象
*/
@SuppressWarnings("unchecked")
public static <T extends Throwable> T wrapThrow(Throwable e, Class<T> wrapper) {
if (wrapper.isAssignableFrom(e.getClass()))
return (T) e;
return Mirror.me(wrapper).born(e);
}
public static Throwable unwrapThrow(Throwable e) {
if (e == null)
return null;
if (e instanceof InvocationTargetException) {
InvocationTargetException itE = (InvocationTargetException) e;
if (itE.getTargetException() != null)
return unwrapThrow(itE.getTargetException());
}
if (e instanceof RuntimeException && e.getCause() != null)
return unwrapThrow(e.getCause());
return e;
}
public static boolean isCauseBy(Throwable e, Class<? extends Throwable> causeType) {
if (e.getClass() == causeType)
return true;
Throwable cause = e.getCause();
if (null == cause)
return false;
return isCauseBy(cause, causeType);
}
/**
* 判断两个对象是否相等。 这个函数用处是:
* <ul>
* <li>可以容忍 null
* <li>可以容忍不同类型的 Number
* <li>对数组,集合, Map 会深层比较
* </ul>
* 当然,如果你重写的 equals 方法会优先
*
* @param a0
* 比较对象1
* @param a1
* 比较对象2
* @return 是否相等
*/
public static boolean equals(Object a0, Object a1) {
if (a0 == a1)
return true;
if (a0 == null && a1 == null)
return true;
if (a0 == null || a1 == null)
return false;
// 简单的判断是否等于
if (a0.equals(a1))
return true;
Mirror<?> mi = Mirror.me(a0);
// 简单类型,变字符串比较,或者正则表达式
if (mi.isSimple() || mi.is(Pattern.class)) {
return a0.toString().equals(a1.toString());
}
// 如果类型就不能互相转换,那么一定是错的
if (!a0.getClass().isAssignableFrom(a1.getClass())
&& !a1.getClass().isAssignableFrom(a0.getClass()))
return false;
// Map
if (a0 instanceof Map && a1 instanceof Map) {
Map<?, ?> m1 = (Map<?, ?>) a0;
Map<?, ?> m2 = (Map<?, ?>) a1;
if (m1.size() != m2.size())
return false;
for (Entry<?, ?> e : m1.entrySet()) {
Object key = e.getKey();
if (!m2.containsKey(key) || !equals(m1.get(key), m2.get(key)))
return false;
}
return true;
}
// 数组
else if (a0.getClass().isArray() && a1.getClass().isArray()) {
int len = Array.getLength(a0);
if (len != Array.getLength(a1))
return false;
for (int i = 0; i < len; i++) {
if (!equals(Array.get(a0, i), Array.get(a1, i)))
return false;
}
return true;
}
// 集合
else if (a0 instanceof Collection && a1 instanceof Collection) {
Collection<?> c0 = (Collection<?>) a0;
Collection<?> c1 = (Collection<?>) a1;
if (c0.size() != c1.size())
return false;
Iterator<?> it0 = c0.iterator();
Iterator<?> it1 = c1.iterator();
while (it0.hasNext()) {
Object o0 = it0.next();
Object o1 = it1.next();
if (!equals(o0, o1))
return false;
}
return true;
}
// 一定不相等
return false;
}
/**
* 判断一个数组内是否包括某一个对象。 它的比较将通过 equals(Object,Object) 方法
*
* @param array
* 数组
* @param ele
* 对象
* @return true 包含 false 不包含
*/
public static <T> boolean contains(T[] array, T ele) {
if (null == array)
return false;
for (T e : array) {
if (equals(e, ele))
return true;
}
return false;
}
/**
* 从一个文本输入流读取所有内容,并将该流关闭
*
* @param reader
* 文本输入流
* @return 输入流所有内容
*/
public static String readAll(Reader reader) {
if (!(reader instanceof BufferedReader))
reader = new BufferedReader(reader);
try {
StringBuilder sb = new StringBuilder();
char[] data = new char[64];
int len;
while (true) {
if ((len = reader.read(data)) == -1)
break;
sb.append(data, 0, len);
}
return sb.toString();
}
catch (IOException e) {
throw Lang.wrapThrow(e);
}
finally {
Streams.safeClose(reader);
}
}
/**
* 将一段字符串写入一个文本输出流,并将该流关闭
*
* @param writer
* 文本输出流
* @param str
* 字符串
*/
public static void writeAll(Writer writer, String str) {
try {
writer.write(str);
writer.flush();
}
catch (IOException e) {
throw Lang.wrapThrow(e);
}
finally {
Streams.safeClose(writer);
}
}
/**
* 根据一段文本模拟出一个输入流对象
*
* @param cs
* 文本
* @return 输出流对象
*/
public static InputStream ins(CharSequence cs) {
return new StringInputStream(cs);
}
/**
* 根据一段文本模拟出一个文本输入流对象
*
* @param cs
* 文本
* @return 文本输出流对象
*/
public static Reader inr(CharSequence cs) {
return new StringReader(cs.toString());
}
/**
* 根据一个 StringBuilder 模拟一个文本输出流对象
*
* @param sb
* StringBuilder 对象
* @return 文本输出流对象
*/
public static Writer opw(StringBuilder sb) {
return new StringWriter(sb);
}
/**
* 根据一个 StringBuilder 模拟一个输出流对象
*
* @param sb
* StringBuilder 对象
* @return 输出流对象
*/
public static StringOutputStream ops(StringBuilder sb) {
return new StringOutputStream(sb);
}
/**
* 较方便的创建一个数组,比如:
*
* <pre>
* String[] strs = Lang.array("A", "B", "A"); => ["A","B","A"]
* </pre>
*
* @param eles
* 可变参数
* @return 数组对象
*/
public static <T> T[] array(T... eles) {
return eles;
}
/**
* 较方便的创建一个没有重复的数组,比如:
*
* <pre>
* String[] strs = Lang.arrayUniq("A","B","A"); => ["A","B"]
* String[] strs = Lang.arrayUniq(); => null
* </pre>
*
* 返回的顺序会遵循输入的顺序
*
* @param eles
* 可变参数
* @return 数组对象
*/
@SuppressWarnings("unchecked")
public static <T> T[] arrayUniq(T... eles) {
if (null == eles || eles.length == 0)
return null;
// 记录重复
HashSet<T> set = new HashSet<T>(eles.length);
for (T ele : eles) {
set.add(ele);
}
// 循环
T[] arr = (T[]) Array.newInstance(eles[0].getClass(), set.size());
int index = 0;
for (T ele : eles) {
if (set.remove(ele))
Array.set(arr, index++, ele);
}
return arr;
}
/**
* 判断一个对象是否为空。它支持如下对象类型:
* <ul>
* <li>null : 一定为空
* <li>数组
* <li>集合
* <li>Map
* <li>其他对象 : 一定不为空
* </ul>
*
* @param obj
* 任意对象
* @return 是否为空
*/
public static boolean isEmpty(Object obj) {
if (obj == null)
return true;
if (obj.getClass().isArray())
return Array.getLength(obj) == 0;
if (obj instanceof Collection<?>)
return ((Collection<?>) obj).isEmpty();
if (obj instanceof Map<?, ?>)
return ((Map<?, ?>) obj).isEmpty();
return false;
}
/**
* 判断一个数组是否是空数组
*
* @param ary
* 数组
* @return null 或者空数组都为 true 否则为 false
*/
public static <T> boolean isEmptyArray(T[] ary) {
return null == ary || ary.length == 0;
}
/**
* 较方便的创建一个列表,比如:
*
* <pre>
* List<Pet> pets = Lang.list(pet1, pet2, pet3);
* </pre>
*
* 注,这里的 List,是 ArrayList 的实例
*
* @param eles
* 可变参数
* @return 列表对象
*/
public static <T> ArrayList<T> list(T... eles) {
ArrayList<T> list = new ArrayList<T>(eles.length);
for (T ele : eles)
list.add(ele);
return list;
}
/**
* 创建一个 Hash 集合
*
* @param eles
* 可变参数
* @return 集合对象
*/
public static <T> Set<T> set(T... eles) {
Set<T> set = new HashSet<T>();
for (T ele : eles)
set.add(ele);
return set;
}
/**
* 将多个数组,合并成一个数组。如果这些数组为空,则返回 null
*
* @param arys
* 数组对象
* @return 合并后的数组对象
*/
@SuppressWarnings("unchecked")
public static <T> T[] merge(T[]... arys) {
Queue<T> list = new LinkedList<T>();
for (T[] ary : arys)
if (null != ary)
for (T e : ary)
if (null != e)
list.add(e);
if (list.isEmpty())
return null;
Class<T> type = (Class<T>) list.peek().getClass();
return list.toArray((T[]) Array.newInstance(type, list.size()));
}
/**
* 将一个对象添加成为一个数组的第一个元素,从而生成一个新的数组
*
* @param e
* 对象
* @param eles
* 数组
* @return 新数组
*/
@SuppressWarnings("unchecked")
public static <T> T[] arrayFirst(T e, T[] eles) {
try {
if (null == eles || eles.length == 0) {
T[] arr = (T[]) Array.newInstance(e.getClass(), 1);
arr[0] = e;
return arr;
}
T[] arr = (T[]) Array.newInstance(eles.getClass().getComponentType(), eles.length + 1);
arr[0] = e;
for (int i = 0; i < eles.length; i++) {
arr[i + 1] = eles[i];
}
return arr;
}
catch (NegativeArraySizeException e1) {
throw Lang.wrapThrow(e1);
}
}
/**
* 将一个对象添加成为一个数组的最后一个元素,从而生成一个新的数组
*
* @param e
* 对象
* @param eles
* 数组
* @return 新数组
*/
@SuppressWarnings("unchecked")
public static <T> T[] arrayLast(T[] eles, T e) {
try {
if (null == eles || eles.length == 0) {
T[] arr = (T[]) Array.newInstance(e.getClass(), 1);
arr[0] = e;
return arr;
}
T[] arr = (T[]) Array.newInstance(eles.getClass().getComponentType(), eles.length + 1);
for (int i = 0; i < eles.length; i++) {
arr[i] = eles[i];
}
arr[eles.length] = e;
return arr;
}
catch (NegativeArraySizeException e1) {
throw Lang.wrapThrow(e1);
}
}
/**
* 将一个数组转换成字符串
* <p>
* 所有的元素都被格式化字符串包裹。 这个格式话字符串只能有一个占位符, %s, %d 等,均可,请视你的数组内容而定
*
* @param fmt
* 格式
* @param objs
* 数组
* @return 拼合后的字符串
*/
public static <T> StringBuilder concatBy(String fmt, T[] objs) {
StringBuilder sb = new StringBuilder();
for (T obj : objs)
sb.append(String.format(fmt, obj));
return sb;
}
/**
* 将一个数组转换成字符串
* <p>
* 所有的元素都被格式化字符串包裹。 这个格式话字符串只能有一个占位符, %s, %d 等,均可,请视你的数组内容而定
* <p>
* 每个元素之间,都会用一个给定的字符分隔
*
* @param ptn
* 格式
* @param c
* 分隔符
* @param objs
* 数组
* @return 拼合后的字符串
*/
public static <T> StringBuilder concatBy(String ptn, Object c, T[] objs) {
StringBuilder sb = new StringBuilder();
for (T obj : objs)
sb.append(String.format(ptn, obj)).append(c);
if (sb.length() > 0)
sb.deleteCharAt(sb.length() - 1);
return sb;
}
/**
* 将一个数组转换成字符串
* <p>
* 每个元素之间,都会用一个给定的字符分隔
*
* @param c
* 分隔符
* @param objs
* 数组
* @return 拼合后的字符串
*/
public static <T> StringBuilder concat(Object c, T[] objs) {
StringBuilder sb = new StringBuilder();
if (null == objs || 0 == objs.length)
return sb;
sb.append(objs[0]);
for (int i = 1; i < objs.length; i++)
sb.append(c).append(objs[i]);
return sb;
}
/**
* 清除数组中的特定值
*
* @param objs
* 数组
* @param val
* 值,可以是 null,如果是对象,则会用 equals 来比较
* @return 新的数组实例
*/
@SuppressWarnings("unchecked")
public static <T> T[] without(T[] objs, T val) {
if (null == objs || objs.length == 0) {
return objs;
}
List<T> list = new ArrayList<T>(objs.length);
Class<?> eleType = null;
for (T obj : objs) {
if (obj == val || (null != obj && null != val && obj.equals(val)))
continue;
if (null == eleType && obj != null)
eleType = obj.getClass();
list.add(obj);
}
if (list.isEmpty()) {
return (T[]) new Object[0];
}
return list.toArray((T[]) Array.newInstance(eleType, list.size()));
}
/**
* 将一个长整型数组转换成字符串
* <p>
* 每个元素之间,都会用一个给定的字符分隔
*
* @param c
* 分隔符
* @param vals
* 数组
* @return 拼合后的字符串
*/
public static StringBuilder concat(Object c, long[] vals) {
StringBuilder sb = new StringBuilder();
if (null == vals || 0 == vals.length)
return sb;
sb.append(vals[0]);
for (int i = 1; i < vals.length; i++)
sb.append(c).append(vals[i]);
return sb;
}
/**
* 将一个整型数组转换成字符串
* <p>
* 每个元素之间,都会用一个给定的字符分隔
*
* @param c
* 分隔符
* @param vals
* 数组
* @return 拼合后的字符串
*/
public static StringBuilder concat(Object c, int[] vals) {
StringBuilder sb = new StringBuilder();
if (null == vals || 0 == vals.length)
return sb;
sb.append(vals[0]);
for (int i = 1; i < vals.length; i++)
sb.append(c).append(vals[i]);
return sb;
}
/**
* 将一个数组的部分元素转换成字符串
* <p>
* 每个元素之间,都会用一个给定的字符分隔
*
* @param offset
* 开始元素的下标
* @param len
* 元素数量
* @param c
* 分隔符
* @param objs
* 数组
* @return 拼合后的字符串
*/
public static <T> StringBuilder concat(int offset, int len, Object c, T[] objs) {
StringBuilder sb = new StringBuilder();
if (null == objs || len < 0 || 0 == objs.length)
return sb;
if (offset < objs.length) {
sb.append(objs[offset]);
for (int i = 1; i < len && i + offset < objs.length; i++) {
sb.append(c).append(objs[i + offset]);
}
}
return sb;
}
/**
* 将一个数组所有元素拼合成一个字符串
*
* @param objs
* 数组
* @return 拼合后的字符串
*/
public static <T> StringBuilder concat(T[] objs) {
StringBuilder sb = new StringBuilder();
for (T e : objs)
sb.append(e.toString());
return sb;
}
/**
* 将一个数组部分元素拼合成一个字符串
*
* @param offset
* 开始元素的下标
* @param len
* 元素数量
* @param array
* 数组
* @return 拼合后的字符串
*/
public static <T> StringBuilder concat(int offset, int len, T[] array) {
StringBuilder sb = new StringBuilder();
for (int i = 0; i < len; i++) {
sb.append(array[i + offset].toString());
}
return sb;
}
/**
* 将一个集合转换成字符串
* <p>
* 每个元素之间,都会用一个给定的字符分隔
*
* @param c
* 分隔符
* @param coll
* 集合
* @return 拼合后的字符串
*/
public static <T> StringBuilder concat(Object c, Collection<T> coll) {
StringBuilder sb = new StringBuilder();
if (null == coll || coll.isEmpty())
return sb;
return concat(c, coll.iterator());
}
/**
* 将一个迭代器转换成字符串
* <p>
* 每个元素之间,都会用一个给定的字符分隔
*
* @param c
* 分隔符
* @param it
* 集合
* @return 拼合后的字符串
*/
public static <T> StringBuilder concat(Object c, Iterator<T> it) {
StringBuilder sb = new StringBuilder();
if (it == null || !it.hasNext())
return sb;
sb.append(it.next());
while (it.hasNext())
sb.append(c).append(it.next());
return sb;
}
/**
* 将一个或者多个数组填入一个集合。
*
* @param <C>
* 集合类型
* @param <T>
* 数组元素类型
* @param coll
* 集合
* @param objss
* 数组 (数目可变)
* @return 集合对象
*/
public static <C extends Collection<T>, T> C fill(C coll, T[]... objss) {
for (T[] objs : objss)
for (T obj : objs)
coll.add(obj);
return coll;
}
/**
* 将一个集合变成 Map。
*
* @param mapClass
* Map 的类型
* @param coll
* 集合对象
* @param keyFieldName
* 采用集合中元素的哪个一个字段为键。
* @return Map 对象
*/
public static <T extends Map<Object, Object>> T collection2map(Class<T> mapClass,
Collection<?> coll,
String keyFieldName) {
if (null == coll)
return null;
T map = createMap(mapClass);
if (coll.size() > 0) {
Iterator<?> it = coll.iterator();
Object obj = it.next();
Mirror<?> mirror = Mirror.me(obj.getClass());
Object key = mirror.getValue(obj, keyFieldName);
map.put(key, obj);
for (; it.hasNext();) {
obj = it.next();
key = mirror.getValue(obj, keyFieldName);
map.put(key, obj);
}
}
return (T) map;
}
/**
* 将集合变成 ArrayList
*
* @param col
* 集合对象
* @return 列表对象
*/
@SuppressWarnings("unchecked")
public static <E> List<E> collection2list(Collection<E> col) {
if (null == col)
return null;
if (col.size() == 0)
return new ArrayList<E>(0);
Class<E> eleType = (Class<E>) col.iterator().next().getClass();
return collection2list(col, eleType);
}
/**
* 将集合编程变成指定类型的列表
*
* @param col
* 集合对象
* @param eleType
* 列表类型
* @return 列表对象
*/
public static <E> List<E> collection2list(Collection<?> col, Class<E> eleType) {
if (null == col)
return null;
List<E> list = new ArrayList<E>(col.size());
for (Object obj : col)
list.add(Castors.me().castTo(obj, eleType));
return list;
}
/**
* 将集合变成数组,数组的类型为集合的第一个元素的类型。如果集合为空,则返回 null
*
* @param coll
* 集合对象
* @return 数组
*/
@SuppressWarnings("unchecked")
public static <E> E[] collection2array(Collection<E> coll) {
if (null == coll)
return null;
if (coll.size() == 0)
return (E[]) new Object[0];
Class<E> eleType = (Class<E>) Lang.first(coll).getClass();
return collection2array(coll, eleType);
}
/**
* 将集合变成指定类型的数组
*
* @param col
* 集合对象
* @param eleType
* 数组元素类型
* @return 数组
*/
@SuppressWarnings("unchecked")
public static <E> E[] collection2array(Collection<?> col, Class<E> eleType) {
if (null == col)
return null;
Object re = Array.newInstance(eleType, col.size());
int i = 0;
for (Iterator<?> it = col.iterator(); it.hasNext();) {
Object obj = it.next();
if (null == obj)
Array.set(re, i++, null);
else
Array.set(re, i++, Castors.me().castTo(obj, eleType));
}
return (E[]) re;
}
/**
* 将一个数组变成 Map
*
* @param mapClass
* Map 的类型
* @param array
* 数组
* @param keyFieldName
* 采用集合中元素的哪个一个字段为键。
* @return Map 对象
*/
public static <T extends Map<Object, Object>> T array2map(Class<T> mapClass,
Object array,
String keyFieldName) {
if (null == array)
return null;
T map = createMap(mapClass);
int len = Array.getLength(array);
if (len > 0) {
Object obj = Array.get(array, 0);
Mirror<?> mirror = Mirror.me(obj.getClass());
for (int i = 0; i < len; i++) {
obj = Array.get(array, i);
Object key = mirror.getValue(obj, keyFieldName);
map.put(key, obj);
}
}
return map;
}
@SuppressWarnings("unchecked")
private static <T extends Map<Object, Object>> T createMap(Class<T> mapClass) {
T map;
try {
map = mapClass.newInstance();
}
catch (Exception e) {
map = (T) new HashMap<Object, Object>();
}
if (!mapClass.isAssignableFrom(map.getClass())) {
throw Lang.makeThrow("Fail to create map [%s]", mapClass.getName());
}
return map;
}
/**
* 将数组转换成一个列表。
*
* @param array
* 原始数组
* @return 新列表
*
* @see org.nutz.castor.Castors
*/
public static <T> List<T> array2list(T[] array) {
if (null == array)
return null;
List<T> re = new ArrayList<T>(array.length);
for (T obj : array)
re.add(obj);
return re;
}
/**
* 将数组转换成一个列表。将会采用 Castor 来深层转换数组元素
*
* @param array
* 原始数组
* @param eleType
* 新列表的元素类型
* @return 新列表
*
* @see org.nutz.castor.Castors
*/
public static <T, E> List<E> array2list(Object array, Class<E> eleType) {
if (null == array)
return null;
int len = Array.getLength(array);
List<E> re = new ArrayList<E>(len);
for (int i = 0; i < len; i++) {
Object obj = Array.get(array, i);
re.add(Castors.me().castTo(obj, eleType));
}
return re;
}
/**
* 将数组转换成另外一种类型的数组。将会采用 Castor 来深层转换数组元素
*
* @param array
* 原始数组
* @param eleType
* 新数组的元素类型
* @return 新数组
* @throws FailToCastObjectException
*
* @see org.nutz.castor.Castors
*/
public static Object array2array(Object array, Class<?> eleType)
throws FailToCastObjectException {
if (null == array)
return null;
int len = Array.getLength(array);
Object re = Array.newInstance(eleType, len);
for (int i = 0; i < len; i++) {
Array.set(re, i, Castors.me().castTo(Array.get(array, i), eleType));
}
return re;
}
/**
* 将数组转换成Object[] 数组。将会采用 Castor 来深层转换数组元素
*
* @param args
* 原始数组
* @param pts
* 新数组的元素类型
* @return 新数组
* @throws FailToCastObjectException
*
* @see org.nutz.castor.Castors
*/
public static <T> Object[] array2ObjectArray(T[] args, Class<?>[] pts)
throws FailToCastObjectException {
if (null == args)
return null;
Object[] newArgs = new Object[args.length];
for (int i = 0; i < args.length; i++) {
newArgs[i] = Castors.me().castTo(args[i], pts[i]);
}
return newArgs;
}
/**
* 根据一个 Map,和给定的对象类型,创建一个新的 JAVA 对象
*
* @param src
* Map 对象
* @param toType
* JAVA 对象类型
* @return JAVA 对象
* @throws FailToCastObjectException
*/
@SuppressWarnings({"unchecked", "rawtypes"})
public static <T> T map2Object(Map<?, ?> src, Class<T> toType)
throws FailToCastObjectException {
if (null == toType)
throw new FailToCastObjectException("target type is Null");
// 类型相同
if (toType == Map.class)
return (T) src;
// 也是一种 Map
if (Map.class.isAssignableFrom(toType)) {
Map map;
try {
map = (Map) toType.newInstance();
map.putAll(src);
return (T) map;
}
catch (Exception e) {
throw new FailToCastObjectException("target type fail to born!", unwrapThrow(e));
}
}
// 数组
if (toType.isArray())
return (T) Lang.collection2array(src.values(), toType.getComponentType());
// List
if (List.class == toType) {
return (T) Lang.collection2list(src.values());
}
// POJO
Mirror<T> mirror = Mirror.me(toType);
T obj = mirror.born();
for (Field field : mirror.getFields()) {
Object v = null;
if (!Lang.isAndroid && field.isAnnotationPresent(Column.class)) {
String cv = field.getAnnotation(Column.class).value();
v = src.get(cv);
}
if (null == v && src.containsKey(field.getName())) {
v = src.get(field.getName());
}
if (null != v) {
//Class<?> ft = field.getType();
//获取泛型基类中的字段真实类型, https://github.com/nutzam/nutz/issues/1288
Class<?> ft = ReflectTool.getGenericFieldType(toType, field);
Object vv = null;
// 集合
if (v instanceof Collection) {
Collection c = (Collection) v;
// 集合到数组
if (ft.isArray()) {
vv = Lang.collection2array(c, ft.getComponentType());
}
// 集合到集合
else {
// 创建
Collection newCol;
//Class eleType = Mirror.getGenericTypes(field, 0);
Class<?> eleType = ReflectTool.getParameterRealGenericClass(toType,
field.getGenericType(),0);
if (ft == List.class) {
newCol = new ArrayList(c.size());
} else if (ft == Set.class) {
newCol = new LinkedHashSet();
} else {
try {
newCol = (Collection) ft.newInstance();
}
catch (Exception e) {
throw Lang.wrapThrow(e);
}
}
// 赋值
for (Object ele : c) {
newCol.add(Castors.me().castTo(ele, eleType));
}
vv = newCol;
}
}
// Map
else if (v instanceof Map && Map.class.isAssignableFrom(ft)) {
// 创建
final Map map;
// Map 接口
if (ft == Map.class) {
map = new HashMap();
}
// 自己特殊的 Map
else {
try {
map = (Map) ft.newInstance();
}
catch (Exception e) {
throw new FailToCastObjectException("target type fail to born!", e);
}
}
// 赋值
//final Class<?> valType = Mirror.getGenericTypes(field, 1);
//map的key和value字段类型
final Class<?> keyType = ReflectTool.getParameterRealGenericClass(toType,
field.getGenericType(),0);
final Class<?> valType =ReflectTool.getParameterRealGenericClass(toType,
field.getGenericType(),1);
each(v, new Each<Entry>() {
public void invoke(int i, Entry en, int length) {
map.put(Castors.me().castTo(en.getKey(), keyType),
Castors.me().castTo(en.getValue(), valType));
}
});
vv = map;
}
// 强制转换
else {
vv = Castors.me().castTo(v, ft);
}
mirror.setValue(obj, field, vv);
}
}
return obj;
}
/**
* 根据一段字符串,生成一个 Map 对象。
*
* @param str
* 参照 JSON 标准的字符串,但是可以没有前后的大括号
* @return Map 对象
*/
public static NutMap map(String str) {
if (null == str)
return null;
str = Strings.trim(str);
if (!Strings.isEmpty(str)
&& (Strings.isQuoteBy(str, '{', '}') || Strings.isQuoteBy(str, '(', ')'))) {
return Json.fromJson(NutMap.class, str);
}
return Json.fromJson(NutMap.class, "{" + str + "}");
}
/**
* 将一个 Map 所有的键都按照回调进行修改
*
* 本函数遇到数组或者集合,会自动处理每个元素
*
* @param obj
* 要转换的 Map 或者 集合或者数组
*
* @param mkc
* 键值修改的回调
* @param recur
* 遇到 Map 是否递归
*
* @see MapKeyConvertor
*/
@SuppressWarnings("unchecked")
public static void convertMapKey(Object obj, MapKeyConvertor mkc, boolean recur) {
// Map
if (obj instanceof Map<?, ?>) {
Map<String, Object> map = (Map<String, Object>) obj;
NutMap map2 = new NutMap();
for (Map.Entry<String, Object> en : map.entrySet()) {
String key = en.getKey();
Object val = en.getValue();
if (recur)
convertMapKey(val, mkc, recur);
String newKey = mkc.convertKey(key);
map2.put(newKey, val);
}
map.clear();
map.putAll(map2);
}
// Collection
else if (obj instanceof Collection<?>) {
for (Object ele : (Collection<?>) obj) {
convertMapKey(ele, mkc, recur);
}
}
// Array
else if (obj.getClass().isArray()) {
for (Object ele : (Object[]) obj) {
convertMapKey(ele, mkc, recur);
}
}
}
/**
* 创建一个一个键的 Map 对象
*
* @param key
* 键
* @param v
* 值
* @return Map 对象
*/
public static NutMap map(String key, Object v) {
return new NutMap().addv(key, v);
}
/**
* 根据一个格式化字符串,生成 Map 对象
*
* @param fmt
* 格式化字符串
* @param args
* 字符串参数
* @return Map 对象
*/
public static NutMap mapf(String fmt, Object... args) {
return map(String.format(fmt, args));
}
/**
* 创建一个新的上下文对象
*
* @return 一个新创建的上下文对象
*/
public static Context context() {
return new SimpleContext();
}
/**
* 根据一个 Map 包裹成一个上下文对象
*
* @param map
* Map 对象
*
* @return 一个新创建的上下文对象
*/
public static Context context(Map<String, Object> map) {
return new SimpleContext(map);
}
/**
* 根据一段 JSON 字符串,生产一个新的上下文对象
*
* @param fmt
* JSON 字符串模板
* @param args
* 模板参数
*
* @return 一个新创建的上下文对象
*/
public static Context contextf(String fmt, Object... args) {
return context(Lang.mapf(fmt, args));
}
/**
* 根据一段 JSON 字符串,生产一个新的上下文对象
*
* @return 一个新创建的上下文对象
*/
public static Context context(String str) {
return context(map(str));
}
/**
* 根据一段字符串,生成一个List 对象。
*
* @param str
* 参照 JSON 标准的字符串,但是可以没有前后的中括号
* @return List 对象
*/
@SuppressWarnings("unchecked")
public static List<Object> list4(String str) {
if (null == str)
return null;
if ((str.length() > 0 && str.charAt(0) == '[') && str.endsWith("]"))
return (List<Object>) Json.fromJson(str);
return (List<Object>) Json.fromJson("[" + str + "]");
}
/**
* 获得一个对象的长度。它可以接受:
* <ul>
* <li>null : 0
* <li>数组
* <li>集合
* <li>Map
* <li>一般 Java 对象。 返回 1
* </ul>
* 如果你想让你的 Java 对象返回不是 1 , 请在对象中声明 length() 函数
*
* @param obj
* @return 对象长度
* @deprecated 这玩意很脑残,为啥最后要动态调个 "length",导致字符串类很麻烦,以后用 Lang.eleSize 函数代替吧
*/
@Deprecated
public static int length(Object obj) {
if (null == obj)
return 0;
if (obj.getClass().isArray()) {
return Array.getLength(obj);
} else if (obj instanceof Collection<?>) {
return ((Collection<?>) obj).size();
} else if (obj instanceof Map<?, ?>) {
return ((Map<?, ?>) obj).size();
}
try {
return (Integer) Mirror.me(obj.getClass()).invoke(obj, "length");
}
catch (Exception e) {}
return 1;
}
/**
* 获得一个容器(Map/集合/数组)对象包含的元素数量
* <ul>
* <li>null : 0
* <li>数组
* <li>集合
* <li>Map
* <li>一般 Java 对象。 返回 1
* </ul>
*
* @param obj
* @return 对象长度
* @since Nutz 1.r.62
*/
public static int eleSize(Object obj) {
// 空指针,就是 0
if (null == obj)
return 0;
// 数组
if (obj.getClass().isArray()) {
return Array.getLength(obj);
}
// 容器
if (obj instanceof Collection<?>) {
return ((Collection<?>) obj).size();
}
// Map
if (obj instanceof Map<?, ?>) {
return ((Map<?, ?>) obj).size();
}
// 其他的就是 1 咯
return 1;
}
/**
* 如果是数组或集合取得第一个对象。 否则返回自身
*
* @param obj
* 任意对象
* @return 第一个代表对象
*/
public static Object first(Object obj) {
if (null == obj)
return obj;
if (obj instanceof Collection<?>) {
Iterator<?> it = ((Collection<?>) obj).iterator();
return it.hasNext() ? it.next() : null;
}
if (obj.getClass().isArray())
return Array.getLength(obj) > 0 ? Array.get(obj, 0) : null;
return obj;
}
/**
* 获取集合中的第一个元素,如果集合为空,返回 null
*
* @param coll
* 集合
* @return 第一个元素
*/
public static <T> T first(Collection<T> coll) {
if (null == coll || coll.isEmpty())
return null;
return coll.iterator().next();
}
/**
* 获得表中的第一个名值对
*
* @param map
* 表
* @return 第一个名值对
*/
public static <K, V> Entry<K, V> first(Map<K, V> map) {
if (null == map || map.isEmpty())
return null;
return map.entrySet().iterator().next();
}
/**
* 打断 each 循环
*/
public static void Break() throws ExitLoop {
throw new ExitLoop();
}
/**
* 继续 each 循环,如果再递归,则停止递归
*/
public static void Continue() throws ContinueLoop {
throw new ContinueLoop();
}
/**
* 用回调的方式,遍历一个对象,可以支持遍历
* <ul>
* <li>数组
* <li>集合
* <li>Map
* <li>单一元素
* </ul>
*
* @param obj
* 对象
* @param callback
* 回调
*/
public static <T> void each(Object obj, Each<T> callback) {
each(obj, true, callback);
}
/**
* 用回调的方式,遍历一个对象,可以支持遍历
* <ul>
* <li>数组
* <li>集合
* <li>Map
* <li>单一元素
* </ul>
*
* @param obj
* 对象
* @param loopMap
* 是否循环 Map,如果循环 Map 则主要看 callback 的 T,如果是 Map.Entry 则循环 Entry
* 否循环 value。如果本值为 false, 则将 Map 当作一个完整的对象来看待
* @param callback
* 回调
*/
@SuppressWarnings({"rawtypes", "unchecked"})
public static <T> void each(Object obj, boolean loopMap, Each<T> callback) {
if (null == obj || null == callback)
return;
try {
// 循环开始
if (callback instanceof Loop)
if (!((Loop) callback).begin())
return;
// 进行循环
if (obj.getClass().isArray()) {
int len = Array.getLength(obj);
for (int i = 0; i < len; i++)
try {
callback.invoke(i, (T) Array.get(obj, i), len);
}
catch (ContinueLoop e) {}
catch (ExitLoop e) {
break;
}
} else if (obj instanceof Collection) {
int len = ((Collection) obj).size();
int i = 0;
for (Iterator<T> it = ((Collection) obj).iterator(); it.hasNext();)
try {
callback.invoke(i++, it.next(), len);
}
catch (ContinueLoop e) {}
catch (ExitLoop e) {
break;
}
} else if (loopMap && obj instanceof Map) {
Map map = (Map) obj;
int len = map.size();
int i = 0;
Class<T> eType = Mirror.getTypeParam(callback.getClass(), 0);
if (null != eType && eType != Object.class && eType.isAssignableFrom(Entry.class)) {
for (Object v : map.entrySet())
try {
callback.invoke(i++, (T) v, len);
}
catch (ContinueLoop e) {}
catch (ExitLoop e) {
break;
}
} else {
for (Object v : map.entrySet())
try {
callback.invoke(i++, (T) ((Entry) v).getValue(), len);
}
catch (ContinueLoop e) {}
catch (ExitLoop e) {
break;
}
}
} else if (obj instanceof Iterator<?>) {
Iterator<?> it = (Iterator<?>) obj;
int i = 0;
while (it.hasNext()) {
try {
callback.invoke(i++, (T) it.next(), -1);
}
catch (ContinueLoop e) {}
catch (ExitLoop e) {
break;
}
}
} else
try {
callback.invoke(0, (T) obj, 1);
}
catch (ContinueLoop e) {}
catch (ExitLoop e) {}
// 循环结束
if (callback instanceof Loop)
((Loop) callback).end();
}
catch (LoopException e) {
throw Lang.wrapThrow(e.getCause());
}
}
/**
* 安全的从一个数组获取一个元素,容忍 null 数组,以及支持负数的 index
* <p>
* 如果该下标越界,则返回 null
*
* @param <T>
* @param array
* 数组,如果为 null 则直接返回 null
* @param index
* 下标,-1 表示倒数第一个, -2 表示倒数第二个,以此类推
* @return 数组元素
*/
public static <T> T get(T[] array, int index) {
if (null == array)
return null;
int i = index < 0 ? array.length + index : index;
if (i < 0 || i >= array.length)
return null;
return array[i];
}
/**
* 将一个抛出对象的异常堆栈,显示成一个字符串
*
* @param e
* 抛出对象
* @return 异常堆栈文本
*/
public static String getStackTrace(Throwable e) {
StringBuilder sb = new StringBuilder();
StringOutputStream sbo = new StringOutputStream(sb);
PrintStream ps = new PrintStream(sbo);
e.printStackTrace(ps);
ps.flush();
return sbo.getStringBuilder().toString();
}
/**
* 将字符串解析成 boolean 值,支持更多的字符串
* <ul>
* <li>1 | 0
* <li>yes | no
* <li>on | off
* <li>true | false
* </ul>
*
* @param s
* 字符串
* @return 布尔值
*/
public static boolean parseBoolean(String s) {
if (null == s || s.length() == 0)
return false;
if (s.length() > 5)
return true;
if ("0".equals(s))
return false;
s = s.toLowerCase();
return !"false".equals(s) && !"off".equals(s) && !"no".equals(s);
}
/**
* 帮你快速获得一个 DocumentBuilder,方便 XML 解析。
*
* @return 一个 DocumentBuilder 对象
* @throws ParserConfigurationException
*/
public static DocumentBuilder xmls() throws ParserConfigurationException {
return Xmls.xmls();
}
/**
* 对Thread.sleep(long)的简单封装,不抛出任何异常
*
* @param millisecond
* 休眠时间
*/
public static void quiteSleep(long millisecond) {
try {
if (millisecond > 0)
Thread.sleep(millisecond);
}
catch (Throwable e) {}
}
/**
* 将字符串,变成数字对象,现支持的格式为:
* <ul>
* <li>null - 整数 0</li>
* <li>23.78 - 浮点 Float</li>
* <li>0x45 - 16进制整数 Integer</li>
* <li>78L - 长整数 Long</li>
* <li>69 - 普通整数 Integer</li>
* </ul>
*
* @param s
* 参数
* @return 数字对象
*/
public static Number str2number(String s) {
// null 值
if (null == s) {
return 0;
}
s = s.toUpperCase();
// 浮点
if (s.indexOf('.') != -1) {
char c = s.charAt(s.length() - 1);
if (c == 'F' || c == 'f') {
return Float.valueOf(s);
}
return Double.valueOf(s);
}
// 16进制整数
if (s.startsWith("0X")) {
return Integer.valueOf(s.substring(2), 16);
}
// 长整数
if (s.charAt(s.length() - 1) == 'L' || s.charAt(s.length() - 1) == 'l') {
return Long.valueOf(s.substring(0, s.length() - 1));
}
// 普通整数
Long re = Long.parseLong(s);
if (Integer.MAX_VALUE >= re && re >= Integer.MIN_VALUE)
return re.intValue();
return re;
}
@SuppressWarnings("unchecked")
private static <T extends Map<String, Object>> void obj2map(Object obj,
T map,
final Map<Object, Object> memo) {
// 已经转换过了,不要递归转换
if (null == obj || memo.containsKey(obj))
return;
memo.put(obj, "");
// Fix issue #497
// 如果是 Map,就直接 putAll 一下咯
if (obj instanceof Map<?, ?>) {
map.putAll(__change_map_to_nutmap((Map<String, Object>) obj, memo));
return;
}
// 下面是普通的 POJO
Mirror<?> mirror = Mirror.me(obj.getClass());
Field[] flds = mirror.getFields();
for (Field fld : flds) {
Object v = mirror.getValue(obj, fld);
if (null == v) {
continue;
}
Mirror<?> mr = Mirror.me(v);
// 普通值
if (mr.isSimple()) {
map.put(fld.getName(), v);
}
// 已经输出过了
else if (memo.containsKey(v)) {
map.put(fld.getName(), null);
}
// 数组或者集合
else if (mr.isColl()) {
final List<Object> list = new ArrayList<Object>(Lang.length(v));
Lang.each(v, new Each<Object>() {
public void invoke(int index, Object ele, int length) {
__join_ele_to_list_as_map(list, ele, memo);
}
});
map.put(fld.getName(), list);
}
// Map
else if (mr.isMap()) {
NutMap map2 = __change_map_to_nutmap((Map<String, Object>) v, memo);
map.put(fld.getName(), map2);
}
// 看来要递归
else {
T sub;
try {
sub = (T) map.getClass().newInstance();
}
catch (Exception e) {
throw Lang.wrapThrow(e);
}
obj2map(v, sub, memo);
map.put(fld.getName(), sub);
}
}
}
@SuppressWarnings("unchecked")
private static NutMap __change_map_to_nutmap(Map<String, Object> map,
final Map<Object, Object> memo) {
NutMap re = new NutMap();
for (Map.Entry<String, Object> en : map.entrySet()) {
Object v = en.getValue();
if (null == v)
continue;
Mirror<?> mr = Mirror.me(v);
// 普通值
if (mr.isSimple()) {
re.put(en.getKey(), v);
}
// 已经输出过了
else if (memo.containsKey(v)) {
continue;
}
// 数组或者集合
else if (mr.isColl()) {
final List<Object> list2 = new ArrayList<Object>(Lang.length(v));
Lang.each(v, new Each<Object>() {
public void invoke(int index, Object ele, int length) {
__join_ele_to_list_as_map(list2, ele, memo);
}
});
re.put(en.getKey(), list2);
}
// Map
else if (mr.isMap()) {
NutMap map2 = __change_map_to_nutmap((Map<String, Object>) v, memo);
re.put(en.getKey(), map2);
}
// 看来要递归
else {
NutMap map2 = obj2nutmap(v);
re.put(en.getKey(), map2);
}
}
return re;
}
@SuppressWarnings("unchecked")
private static void __join_ele_to_list_as_map(List<Object> list,
Object o,
final Map<Object, Object> memo) {
if (null == o) {
return;
}
// 如果是 Map,就直接 putAll 一下咯
if (o instanceof Map<?, ?>) {
NutMap map2 = __change_map_to_nutmap((Map<String, Object>) o, memo);
list.add(map2);
return;
}
Mirror<?> mr = Mirror.me(o);
// 普通值
if (mr.isSimple()) {
list.add(o);
}
// 已经输出过了
else if (memo.containsKey(o)) {
list.add(null);
}
// 数组或者集合
else if (mr.isColl()) {
final List<Object> list2 = new ArrayList<Object>(Lang.length(o));
Lang.each(o, new Each<Object>() {
public void invoke(int index, Object ele, int length) {
__join_ele_to_list_as_map(list2, ele, memo);
}
});
list.add(list2);
}
// Map
else if (mr.isMap()) {
NutMap map2 = __change_map_to_nutmap((Map<String, Object>) o, memo);
list.add(map2);
}
// 看来要递归
else {
NutMap map = obj2nutmap(o);
list.add(map);
}
}
/**
* 将对象转换成 Map
*
* @param obj
* POJO 对象
* @return Map 对象
*/
@SuppressWarnings("unchecked")
public static Map<String, Object> obj2map(Object obj) {
return obj2map(obj, HashMap.class);
}
/**
* 将对象转为 Nutz 的标准 Map 封装
*
* @param obj
* POJO du对象
* @return NutMap 对象
*/
public static NutMap obj2nutmap(Object obj) {
return obj2map(obj, NutMap.class);
}
/**
* 将对象转换成 Map
*
* @param <T>
* @param obj
* POJO 对象
* @param mapType
* Map 的类型
* @return Map 对象
*/
public static <T extends Map<String, Object>> T obj2map(Object obj, Class<T> mapType) {
try {
T map = mapType.newInstance();
Lang.obj2map(obj, map, new HashMap<Object, Object>());
return map;
}
catch (Exception e) {
throw Lang.wrapThrow(e);
}
}
/**
* 返回一个集合对象的枚举对象。实际上就是对 Iterator 接口的一个封装
*
* @param col
* 集合对象
* @return 枚举对象
*/
public static <T> Enumeration<T> enumeration(Collection<T> col) {
final Iterator<T> it = col.iterator();
return new Enumeration<T>() {
public boolean hasMoreElements() {
return it.hasNext();
}
public T nextElement() {
return it.next();
}
};
}
/**
* 将枚举对象,变成集合
*
* @param enums
* 枚举对象
* @param cols
* 集合对象
* @return 集合对象
*/
public static <T extends Collection<E>, E> T enum2collection(Enumeration<E> enums, T cols) {
while (enums.hasMoreElements())
cols.add(enums.nextElement());
return cols;
}
/**
* 将字符数组强制转换成字节数组。如果字符为双字节编码,则会丢失信息
*
* @param cs
* 字符数组
* @return 字节数组
*/
public static byte[] toBytes(char[] cs) {
byte[] bs = new byte[cs.length];
for (int i = 0; i < cs.length; i++)
bs[i] = (byte) cs[i];
return bs;
}
/**
* 将整数数组强制转换成字节数组。整数的高位将会被丢失
*
* @param is
* 整数数组
* @return 字节数组
*/
public static byte[] toBytes(int[] is) {
byte[] bs = new byte[is.length];
for (int i = 0; i < is.length; i++)
bs[i] = (byte) is[i];
return bs;
}
/**
* 判断当前系统是否为Windows
*
* @return true 如果当前系统为Windows系统
*/
public static boolean isWin() {
try {
String os = System.getenv("OS");
return os != null && os.indexOf("Windows") > -1;
}
catch (Throwable e) {
return false;
}
}
/**
* 原方法使用线程ClassLoader,各种问题,改回原版.
*/
public static Class<?> loadClass(String className) throws ClassNotFoundException {
try {
return Thread.currentThread().getContextClassLoader().loadClass(className);
}
catch (Throwable e) {
return Class.forName(className);
}
}
/**
* 当前运行的 Java 虚拟机是 JDK6 及更高版本的话,则返回 true
*
* @return true 如果当前运行的 Java 虚拟机是 JDK6
*/
public static boolean isJDK6() {
return JdkTool.getMajorVersion() >= 6;
}
/**
* 获取基本类型的默认值
*
* @param pClass
* 基本类型
* @return 0/false,如果传入的pClass不是基本类型的类,则返回null
*/
public static Object getPrimitiveDefaultValue(Class<?> pClass) {
if (int.class.equals(pClass))
return Integer.valueOf(0);
if (long.class.equals(pClass))
return Long.valueOf(0);
if (short.class.equals(pClass))
return Short.valueOf((short) 0);
if (float.class.equals(pClass))
return Float.valueOf(0f);
if (double.class.equals(pClass))
return Double.valueOf(0);
if (byte.class.equals(pClass))
return Byte.valueOf((byte) 0);
if (char.class.equals(pClass))
return Character.valueOf((char) 0);
if (boolean.class.equals(pClass))
return Boolean.FALSE;
return null;
}
/**
* 当一个类使用<T,K>来定义泛型时,本方法返回类的一个字段的具体类型。
*
* @param me
* @param field
*/
public static Type getFieldType(Mirror<?> me, String field) throws NoSuchFieldException {
return getFieldType(me, me.getField(field));
}
/**
* 当一个类使用<T, K> 来定义泛型时, 本方法返回类的一个方法所有参数的具体类型
*
* @param me
* @param method
*/
public static Type[] getMethodParamTypes(Mirror<?> me, Method method) {
Type[] types = method.getGenericParameterTypes();
List<Type> ts = new ArrayList<Type>();
for (Type type : types) {
ts.add(getGenericsType(me, type));
}
return ts.toArray(new Type[ts.size()]);
}
/**
* 当一个类使用<T,K>来定义泛型时,本方法返回类的一个字段的具体类型。
*
* @param me
* @param field
*/
public static Type getFieldType(Mirror<?> me, Field field) {
Type type = field.getGenericType();
return getGenericsType(me, type);
}
/**
* 当一个类使用<T,K>来定义泛型时,本方法返回类的一个字段的具体类型。
*
* @param me
* @param type
*/
public static Type getGenericsType(Mirror<?> me, Type type) {
Type[] types = me.getGenericsTypes();
Type t = type;
if (type instanceof TypeVariable && types != null && types.length > 0) {
Type[] tvs = me.getType().getTypeParameters();
for (int i = 0; i < tvs.length; i++) {
if (type.equals(tvs[i])) {
type = me.getGenericsType(i);
break;
}
}
}
if (!type.equals(t)) {
return type;
}
if (types != null && types.length > 0 && type instanceof ParameterizedType) {
ParameterizedType pt = (ParameterizedType) type;
if (pt.getActualTypeArguments().length >= 0) {
NutType nt = new NutType();
nt.setOwnerType(pt.getOwnerType());
nt.setRawType(pt.getRawType());
Type[] tt = new Type[pt.getActualTypeArguments().length];
for (int i = 0; i < tt.length; i++) {
tt[i] = types[i];
}
nt.setActualTypeArguments(tt);
return nt;
}
}
return type;
}
/**
* 获取一个 Type 类型实际对应的Class
*
* @param type
* 类型
* @return 与Type类型实际对应的Class
*/
@SuppressWarnings("rawtypes")
public static Class<?> getTypeClass(Type type) {
Class<?> clazz = null;
if (type instanceof Class<?>) {
clazz = (Class<?>) type;
} else if (type instanceof ParameterizedType) {
ParameterizedType pt = (ParameterizedType) type;
clazz = (Class<?>) pt.getRawType();
} else if (type instanceof GenericArrayType) {
GenericArrayType gat = (GenericArrayType) type;
Class<?> typeClass = getTypeClass(gat.getGenericComponentType());
return Array.newInstance(typeClass, 0).getClass();
} else if (type instanceof TypeVariable) {
TypeVariable tv = (TypeVariable) type;
Type[] ts = tv.getBounds();
if (ts != null && ts.length > 0)
return getTypeClass(ts[0]);
} else if (type instanceof WildcardType) {
WildcardType wt = (WildcardType) type;
Type[] t_low = wt.getLowerBounds();// 取其下界
if (t_low.length > 0)
return getTypeClass(t_low[0]);
Type[] t_up = wt.getUpperBounds(); // 没有下界?取其上界
return getTypeClass(t_up[0]);// 最起码有Object作为上界
}
return clazz;
}
/**
* 返回一个 Type 的泛型数组, 如果没有, 则直接返回null
*
* @param type
* 类型
* @return 一个 Type 的泛型数组, 如果没有, 则直接返回null
*/
public static Type[] getGenericsTypes(Type type) {
if (type instanceof ParameterizedType) {
ParameterizedType pt = (ParameterizedType) type;
return pt.getActualTypeArguments();
}
return null;
}
/**
* 强制从字符串转换成一个 Class,将 ClassNotFoundException 包裹成 RuntimeException
*
* @param <T>
* @param name
* 类名
* @param type
* 这个类型的边界
* @return 类对象
*/
@SuppressWarnings("unchecked")
public static <T> Class<T> forName(String name, Class<T> type) {
Class<?> re;
try {
re = Lang.loadClass(name);
return (Class<T>) re;
}
catch (ClassNotFoundException e) {
throw Lang.wrapThrow(e);
}
}
/**
* 获取指定文件的 MD5 值
*
* @param f
* 文件
* @return 指定文件的 MD5 值
* @see #digest(String, File)
*/
public static String md5(File f) {
return digest("MD5", f);
}
/**
* 获取指定输入流的 MD5 值
*
* @param ins
* 输入流
* @return 指定输入流的 MD5 值
* @see #digest(String, InputStream)
*/
public static String md5(InputStream ins) {
return digest("MD5", ins);
}
/**
* 获取指定字符串的 MD5 值
*
* @param cs
* 字符串
* @return 指定字符串的 MD5 值
* @see #digest(String, CharSequence)
*/
public static String md5(CharSequence cs) {
return digest("MD5", cs);
}
/**
* 获取指定文件的 SHA1 值
*
* @param f
* 文件
* @return 指定文件的 SHA1 值
* @see #digest(String, File)
*/
public static String sha1(File f) {
return digest("SHA1", f);
}
/**
* 获取指定输入流的 SHA1 值
*
* @param ins
* 输入流
* @return 指定输入流的 SHA1 值
* @see #digest(String, InputStream)
*/
public static String sha1(InputStream ins) {
return digest("SHA1", ins);
}
/**
* 获取指定字符串的 SHA1 值
*
* @param cs
* 字符串
* @return 指定字符串的 SHA1 值
* @see #digest(String, CharSequence)
*/
public static String sha1(CharSequence cs) {
return digest("SHA1", cs);
}
/**
* 获取指定文件的 SHA256 值
*
* @param f
* 文件
* @return 指定文件的 SHA256 值
* @see #digest(String, File)
*/
public static String sha256(File f) {
return digest("SHA-256", f);
}
/**
* 获取指定输入流的 SHA256 值
*
* @param ins
* 输入流
* @return 指定输入流的 SHA256 值
* @see #digest(String, InputStream)
*/
public static String sha256(InputStream ins) {
return digest("SHA-256", ins);
}
/**
* 获取指定字符串的 SHA256 值
*
* @param cs
* 字符串
* @return 指定字符串的 SHA256 值
* @see #digest(String, CharSequence)
*/
public static String sha256(CharSequence cs) {
return digest("SHA-256", cs);
}
/**
* 从数据文件计算出数字签名
*
* @param algorithm
* 算法,比如 "SHA1" "SHA-256" 或者 "MD5" 等
* @param f
* 文件
* @return 数字签名
*/
public static String digest(String algorithm, File f) {
return digest(algorithm, Streams.fileIn(f));
}
/**
* 从流计算出数字签名,计算完毕流会被关闭
*
* @param algorithm
* 算法,比如 "SHA1" 或者 "MD5" 等
* @param ins
* 输入流
* @return 数字签名
*/
public static String digest(String algorithm, InputStream ins) {
try {
MessageDigest md = MessageDigest.getInstance(algorithm);
byte[] bs = new byte[HASH_BUFF_SIZE];
int len = 0;
while ((len = ins.read(bs)) != -1) {
md.update(bs, 0, len);
}
byte[] hashBytes = md.digest();
return fixedHexString(hashBytes);
}
catch (NoSuchAlgorithmException e) {
throw Lang.wrapThrow(e);
}
catch (FileNotFoundException e) {
throw Lang.wrapThrow(e);
}
catch (IOException e) {
throw Lang.wrapThrow(e);
}
finally {
Streams.safeClose(ins);
}
}
/**
* 从字符串计算出数字签名
*
* @param algorithm
* 算法,比如 "SHA1" 或者 "MD5" 等
* @param cs
* 字符串
* @return 数字签名
*/
public static String digest(String algorithm, CharSequence cs) {
return digest(algorithm, Strings.getBytesUTF8(null == cs ? "" : cs), null, 1);
}
/**
* 从字节数组计算出数字签名
*
* @param algorithm
* 算法,比如 "SHA1" 或者 "MD5" 等
* @param bytes
* 字节数组
* @param salt
* 随机字节数组
* @param iterations
* 迭代次数
* @return 数字签名
*/
public static String digest(String algorithm, byte[] bytes, byte[] salt, int iterations) {
try {
MessageDigest md = MessageDigest.getInstance(algorithm);
if (salt != null) {
md.update(salt);
}
byte[] hashBytes = md.digest(bytes);
for (int i = 1; i < iterations; i++) {
md.reset();
hashBytes = md.digest(hashBytes);
}
return fixedHexString(hashBytes);
}
catch (NoSuchAlgorithmException e) {
throw Lang.wrapThrow(e);
}
}
/** 当前运行的 Java 虚拟机是否是在安卓环境 */
public static final boolean isAndroid;
static {
boolean flag = false;
try {
Class.forName("android.Manifest");
flag = true;
}
catch (Throwable e) {}
isAndroid = flag;
}
/**
* 将指定的数组的内容倒序排序。注意,这会破坏原数组的内容
*
* @param arrays
* 指定的数组
*/
public static <T> void reverse(T[] arrays) {
int size = arrays.length;
for (int i = 0; i < size; i++) {
int ih = i;
int it = size - 1 - i;
if (ih == it || ih > it) {
break;
}
T ah = arrays[ih];
T swap = arrays[it];
arrays[ih] = swap;
arrays[it] = ah;
}
}
@Deprecated
public static String simpleMetodDesc(Method method) {
return simpleMethodDesc(method);
}
public static String simpleMethodDesc(Method method) {
return String.format("%s.%s(...)",
method.getDeclaringClass().getSimpleName(),
method.getName());
}
public static String fixedHexString(byte[] hashBytes) {
StringBuffer sb = new StringBuffer();
for (int i = 0; i < hashBytes.length; i++) {
sb.append(Integer.toString((hashBytes[i] & 0xff) + 0x100, 16).substring(1));
}
return sb.toString();
}
/**
* 一个便利的方法,将当前线程睡眠一段时间
*
* @param ms
* 要睡眠的时间 ms
*/
public static void sleep(long ms) {
try {
Thread.sleep(ms);
}
catch (InterruptedException e) {
throw Lang.wrapThrow(e);
}
}
/**
* 一个便利的等待方法同步一个对象
*
* @param lock
* 锁对象
* @param ms
* 要等待的时间 ms
*/
public static void wait(Object lock, long ms) {
if (null != lock)
synchronized (lock) {
try {
lock.wait(ms);
}
catch (InterruptedException e) {
throw Lang.wrapThrow(e);
}
}
}
/**
* 通知对象的同步锁
*
* @param lock
* 锁对象
*/
public static void notifyAll(Object lock) {
if (null != lock)
synchronized (lock) {
lock.notifyAll();
}
}
public static void runInAnThread(Runnable runnable) {
new Thread(runnable).start();
}
/**
* map对象浅过滤,返回值是一个新的map
*
* @param source
* 原始的map对象
* @param prefix
* 包含什么前缀,并移除前缀
* @param include
* 正则表达式 仅包含哪些key(如果有前缀要求,则已经移除了前缀)
* @param exclude
* 正则表达式 排除哪些key(如果有前缀要求,则已经移除了前缀)
* @param keyMap
* 映射map, 原始key--目标key (如果有前缀要求,则已经移除了前缀)
* @return 经过过滤的map,与原始map不是同一个对象
*/
public static Map<String, Object> filter(Map<String, Object> source,
String prefix,
String include,
String exclude,
Map<String, String> keyMap) {
LinkedHashMap<String, Object> dst = new LinkedHashMap<String, Object>();
if (source == null || source.isEmpty())
return dst;
Pattern includePattern = include == null ? null : Regex.getPattern(include);
Pattern excludePattern = exclude == null ? null : Regex.getPattern(exclude);
for (Entry<String, Object> en : source.entrySet()) {
String key = en.getKey();
if (prefix != null) {
if (key.startsWith(prefix))
key = key.substring(prefix.length());
else
continue;
}
if (includePattern != null && !includePattern.matcher(key).find())
continue;
if (excludePattern != null && excludePattern.matcher(key).find())
continue;
if (keyMap != null && keyMap.containsKey(key))
dst.put(keyMap.get(key), en.getValue());
else
dst.put(key, en.getValue());
}
return dst;
}
/**
* 获得访问者的IP地址, 反向代理过的也可以获得
*
* @param request
* 请求的req对象
* @return 来源ip
*/
public static String getIP(HttpServletRequest request) {
if (request == null)
return "";
String ip = request.getHeader("X-Forwarded-For");
if (ip == null || ip.length() == 0 || "unknown".equalsIgnoreCase(ip)) {
if (ip == null || ip.length() == 0 || "unknown".equalsIgnoreCase(ip)) {
ip = request.getHeader("Proxy-Client-IP");
}
if (ip == null || ip.length() == 0 || "unknown".equalsIgnoreCase(ip)) {
ip = request.getHeader("WL-Proxy-Client-IP");
}
if (ip == null || ip.length() == 0 || "unknown".equalsIgnoreCase(ip)) {
ip = request.getHeader("HTTP_CLIENT_IP");
}
if (ip == null || ip.length() == 0 || "unknown".equalsIgnoreCase(ip)) {
ip = request.getHeader("HTTP_X_FORWARDED_FOR");
}
if (ip == null || ip.length() == 0 || "unknown".equalsIgnoreCase(ip)) {
ip = request.getRemoteAddr();
}
} else if (ip.length() > 15) {
String[] ips = ip.split(",");
for (int index = 0; index < ips.length; index++) {
String strIp = ips[index];
if (!("unknown".equalsIgnoreCase(strIp))) {
ip = strIp;
break;
}
}
}
if (Strings.isBlank(ip))
return "";
if (isIPv4Address(ip) || isIPv6Address(ip)) {
return ip;
}
return "";
}
/**
* @return 返回当前程序运行的根目录
*/
public static String runRootPath() {
String cp = Lang.class.getClassLoader().getResource("").toExternalForm();
if (cp.startsWith("file:")) {
cp = cp.substring("file:".length());
}
return cp;
}
public static <T> T copyProperties(Object origin, T target) {
return copyProperties(origin, target, null, null, false, true);
}
public static <T> T copyProperties(Object origin,
T target,
String active,
String lock,
boolean ignoreNull,
boolean ignoreStatic) {
if (origin == null)
throw new IllegalArgumentException("origin is null");
if (target == null)
throw new IllegalArgumentException("target is null");
Pattern at = active == null ? null : Regex.getPattern(active);
Pattern lo = lock == null ? null : Regex.getPattern(lock);
Mirror<Object> originMirror = Mirror.me(origin);
Mirror<T> targetMirror = Mirror.me(target);
Field[] fields = targetMirror.getFields();
for (Field field : originMirror.getFields()) {
String name = field.getName();
if (at != null && !at.matcher(name).find())
continue;
if (lo != null && lo.matcher(name).find())
continue;
if (ignoreStatic && Modifier.isStatic(field.getModifiers()))
continue;
Object val = originMirror.getValue(origin, field);
if (ignoreNull && val == null)
continue;
for (Field _field : fields) {
if (_field.getName().equals(field.getName())) {
targetMirror.setValue(target, _field, val);
}
}
// TODO 支持getter/setter比对
}
return target;
}
public static StringBuilder execOutput(String cmd) throws IOException {
return execOutput(Strings.splitIgnoreBlank(cmd, " "), Encoding.CHARSET_UTF8);
}
public static StringBuilder execOutput(String cmd, Charset charset) throws IOException {
return execOutput(Strings.splitIgnoreBlank(cmd, " "), charset);
}
public static StringBuilder execOutput(String cmd[]) throws IOException {
return execOutput(cmd, Encoding.CHARSET_UTF8);
}
public static StringBuilder execOutput(String[] cmd, Charset charset) throws IOException {
Process p = Runtime.getRuntime().exec(cmd);
p.getOutputStream().close();
InputStreamReader r = new InputStreamReader(p.getInputStream(), charset);
StringBuilder sb = new StringBuilder();
Streams.readAndClose(r, sb);
return sb;
}
public static void exec(String cmd, StringBuilder out, StringBuilder err) throws IOException {
exec(Strings.splitIgnoreBlank(cmd, " "), Encoding.CHARSET_UTF8, out, err);
}
public static void exec(String[] cmd, StringBuilder out, StringBuilder err) throws IOException {
exec(cmd, Encoding.CHARSET_UTF8, out, err);
}
public static void exec(String[] cmd, Charset charset, StringBuilder out, StringBuilder err)
throws IOException {
Process p = Runtime.getRuntime().exec(cmd);
p.getOutputStream().close();
InputStreamReader sOut = new InputStreamReader(p.getInputStream(), charset);
Streams.readAndClose(sOut, out);
InputStreamReader sErr = new InputStreamReader(p.getErrorStream(), charset);
Streams.readAndClose(sErr, err);
}
public static Class<?> loadClassQuite(String className) {
try {
return loadClass(className);
}
catch (ClassNotFoundException e) {
return null;
}
}
public static byte[] toBytes(Object obj) {
try {
ByteArrayOutputStream bao = new ByteArrayOutputStream();
ObjectOutputStream oos = new ObjectOutputStream(bao);
oos.writeObject(obj);
return bao.toByteArray();
}
catch (IOException e) {
return null;
}
}
@SuppressWarnings("unchecked")
public static <T> T fromBytes(byte[] buf, Class<T> klass) {
try {
return (T) new ObjectInputStream(new ByteArrayInputStream(buf)).readObject();
}
catch (ClassNotFoundException e) {
return null;
}
catch (IOException e) {
return null;
}
}
public static class JdkTool {
public static String getVersionLong() {
Properties sys = System.getProperties();
return sys.getProperty("java.version");
}
public static int getMajorVersion() {
String ver = getVersionLong();
if (Strings.isBlank(ver))
return 6;
String[] tmp = ver.split("\\.");
if (tmp.length < 2)
return 6;
int t = Integer.parseInt(tmp[0]);
if (t > 1)
return t;
return Integer.parseInt(tmp[1]);
}
public static boolean isEarlyAccess() {
String ver = getVersionLong();
if (Strings.isBlank(ver))
return false;
return ver.contains("-ea");
}
/**
* 获取进程id
* @param fallback 如果获取失败,返回什么呢?
* @return 进程id
*/
public static String getProcessId(final String fallback) {
final String jvmName = ManagementFactory.getRuntimeMXBean().getName();
final int index = jvmName.indexOf('@');
if (index < 1) {
return fallback;
}
try {
return Long.toString(Long.parseLong(jvmName.substring(0, index)));
}
catch (NumberFormatException e) {
}
return fallback;
}
}
/**
* 判断一个对象是否不为空。它支持如下对象类型:
* <ul>
* <li>null : 一定为空
* <li>数组
* <li>集合
* <li>Map
* <li>其他对象 : 一定不为空
* </ul>
*
* @param obj
* 任意对象
* @return 是否为空
*/
public static boolean isNotEmpty(Object obj) {
return !isEmpty(obj);
}
/**
* 获取指定字符串的 HmacMD5 值
*
* @param data 字符串
* @param secret 密钥
* @return 指定字符串的 HmacMD5 值
*/
public static String hmacmd5(String data, String secret) {
if (isEmpty(data))
throw new NullPointerException("data is null");
if (isEmpty(secret))
throw new NullPointerException("secret is null");
byte[] bytes = null;
try {
SecretKey secretKey = new SecretKeySpec(secret.getBytes(Encoding.UTF8), "HmacMD5");
Mac mac = Mac.getInstance(secretKey.getAlgorithm());
mac.init(secretKey);
bytes = mac.doFinal(data.getBytes(Encoding.UTF8));
} catch (Exception e) {
e.printStackTrace();
throw Lang.wrapThrow(e);
}
return fixedHexString(bytes);
}
/**
* 获取指定字符串的 HmacSHA256 值
*
* @param data 字符串
* @param secret 密钥
* @return 指定字符串的 HmacSHA256 值
*/
public static String hmacSHA256(String data, String secret) {
if (isEmpty(data))
throw new NullPointerException("data is null");
if (isEmpty(secret))
throw new NullPointerException("secret is null");
byte[] bytes = null;
try {
SecretKey secretKey = new SecretKeySpec(secret.getBytes(Encoding.UTF8), "HmacSHA256");
Mac mac = Mac.getInstance(secretKey.getAlgorithm());
mac.init(secretKey);
bytes = mac.doFinal(data.getBytes(Encoding.UTF8));
} catch (Exception e) {
e.printStackTrace();
throw Lang.wrapThrow(e);
}
return fixedHexString(bytes);
}
}
|
[
"\"OS\""
] |
[] |
[
"OS"
] |
[]
|
["OS"]
|
java
| 1 | 0 | |
docs/conf.py
|
# -*- coding: utf-8 -*-
#
# django-filter documentation build configuration file, created by
# sphinx-quickstart on Mon Sep 17 11:25:20 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.txt'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'django-filter'
copyright = u'2013, Alex Gaynor and others.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0.1'
# The full version, including alpha/beta/rc tags.
release = '1.0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-filterdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'django-filter.tex', u'django-filter Documentation',
u'Alex Gaynor and others.', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'django-filter', u'django-filter Documentation',
[u'Alex Gaynor and others.'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'django-filter', u'django-filter Documentation',
u'Alex Gaynor and others.', 'django-filter', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# see:
# https://github.com/snide/sphinx_rtd_theme#using-this-theme-locally-then-building-on-read-the-docs
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
# only import and set the theme if we're building docs locally
if not on_rtd:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
|
[] |
[] |
[
"READTHEDOCS"
] |
[]
|
["READTHEDOCS"]
|
python
| 1 | 0 | |
mymap/manage.py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'mymap.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.