filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
main.go | package main
import (
"fmt"
"log"
"net/http"
"os"
sdk "github.com/line/line-bot-sdk-go/v7/linebot"
"github.com/yona3/go-line-bot-sample/linebot"
)
func main() {
handler, err := linebot.NewHandler()
if err != nil {
log.Fatal(err)
return
}
bot, err := linebot.NewBot(handler)
if err != nil {
panic(err)
}
port := os.Getenv("PORT")
if port == "" {
port = "8080"
log.Printf("Defaulting to port %s", port)
}
handler.HandleEvents(func(events []*sdk.Event, r *http.Request) {
linebot.Webhook(bot, events, r)
})
http.Handle("/callback", handler)
log.Printf("Listening on port %s", port)
log.Fatal(http.ListenAndServe(fmt.Sprintf(":%s", port), nil))
}
| [
"\"PORT\""
] | [] | [
"PORT"
] | [] | ["PORT"] | go | 1 | 0 | |
recognition_engine/redis.py | import os
import redis
from threading import Thread
import shared as s
from .utils import id_to_name
class RedisThread(Thread):
def __init__(self, conn, name):
Thread.__init__(self)
self.conn = conn
self.name = name
def run(self):
channel = os.environ.get('FACERECO_REDIS_MODULE_NAME')
self.conn.publish(channel, self.name)
class RedisAdapter:
last_id = -1
def __init__(self):
redis_address = os.environ.get('SURIROBOT_REDIS_ADDRESS', '127.0.0.1')
self.conn = redis.StrictRedis(redis_address)
def process(self, id):
if self.last_id != id:
self.last_id = id
thread = RedisThread(self.conn, str(id) + '.' + id_to_name(id))
thread.start()
| [] | [] | [
"FACERECO_REDIS_MODULE_NAME",
"SURIROBOT_REDIS_ADDRESS"
] | [] | ["FACERECO_REDIS_MODULE_NAME", "SURIROBOT_REDIS_ADDRESS"] | python | 2 | 0 | |
src/pip/_internal/network/session.py | """PipSession and supporting code, containing all pip-specific
network request configuration and behavior.
"""
# The following comment should be removed at some point in the future.
# mypy: disallow-untyped-defs=False
import email.utils
import ipaddress
import json
import logging
import mimetypes
import os
import platform
import sys
import urllib.parse
import warnings
from typing import TYPE_CHECKING
from pip._vendor import requests, urllib3
from pip._vendor.cachecontrol import CacheControlAdapter
from pip._vendor.requests.adapters import BaseAdapter, HTTPAdapter
from pip._vendor.requests.models import Response
from pip._vendor.requests.structures import CaseInsensitiveDict
from pip._vendor.urllib3.exceptions import InsecureRequestWarning
from pip import __version__
from pip._internal.metadata import get_default_environment
from pip._internal.network.auth import MultiDomainBasicAuth
from pip._internal.network.cache import SafeFileCache
# Import ssl from compat so the initial import occurs in only one place.
from pip._internal.utils.compat import has_tls
from pip._internal.utils.glibc import libc_ver
from pip._internal.utils.misc import build_url_from_netloc, parse_netloc
from pip._internal.utils.urls import url_to_path
if TYPE_CHECKING:
from typing import Any, Iterator, List, Optional, Sequence, Tuple, Union
from pip._internal.models.link import Link
SecureOrigin = Tuple[str, str, Optional[Union[int, str]]]
logger = logging.getLogger(__name__)
# Ignore warning raised when using --trusted-host.
warnings.filterwarnings("ignore", category=InsecureRequestWarning)
SECURE_ORIGINS = [
# protocol, hostname, port
# Taken from Chrome's list of secure origins (See: http://bit.ly/1qrySKC)
("https", "*", "*"),
("*", "localhost", "*"),
("*", "127.0.0.0/8", "*"),
("*", "::1/128", "*"),
("file", "*", None),
# ssh is always secure.
("ssh", "*", "*"),
] # type: List[SecureOrigin]
# These are environment variables present when running under various
# CI systems. For each variable, some CI systems that use the variable
# are indicated. The collection was chosen so that for each of a number
# of popular systems, at least one of the environment variables is used.
# This list is used to provide some indication of and lower bound for
# CI traffic to PyPI. Thus, it is okay if the list is not comprehensive.
# For more background, see: https://github.com/pypa/pip/issues/5499
CI_ENVIRONMENT_VARIABLES = (
# Azure Pipelines
'BUILD_BUILDID',
# Jenkins
'BUILD_ID',
# AppVeyor, CircleCI, Codeship, Gitlab CI, Shippable, Travis CI
'CI',
# Explicit environment variable.
'PIP_IS_CI',
)
def looks_like_ci():
# type: () -> bool
"""
Return whether it looks like pip is running under CI.
"""
# We don't use the method of checking for a tty (e.g. using isatty())
# because some CI systems mimic a tty (e.g. Travis CI). Thus that
# method doesn't provide definitive information in either direction.
return any(name in os.environ for name in CI_ENVIRONMENT_VARIABLES)
def user_agent():
"""
Return a string representing the user agent.
"""
data = {
"installer": {"name": "pip", "version": __version__},
"python": platform.python_version(),
"implementation": {
"name": platform.python_implementation(),
},
}
if data["implementation"]["name"] == 'CPython':
data["implementation"]["version"] = platform.python_version()
elif data["implementation"]["name"] == 'PyPy':
if sys.pypy_version_info.releaselevel == 'final':
pypy_version_info = sys.pypy_version_info[:3]
else:
pypy_version_info = sys.pypy_version_info
data["implementation"]["version"] = ".".join(
[str(x) for x in pypy_version_info]
)
elif data["implementation"]["name"] == 'Jython':
# Complete Guess
data["implementation"]["version"] = platform.python_version()
elif data["implementation"]["name"] == 'IronPython':
# Complete Guess
data["implementation"]["version"] = platform.python_version()
if sys.platform.startswith("linux"):
from pip._vendor import distro
distro_infos = dict(filter(
lambda x: x[1],
zip(["name", "version", "id"], distro.linux_distribution()),
))
libc = dict(filter(
lambda x: x[1],
zip(["lib", "version"], libc_ver()),
))
if libc:
distro_infos["libc"] = libc
if distro_infos:
data["distro"] = distro_infos
if sys.platform.startswith("darwin") and platform.mac_ver()[0]:
data["distro"] = {"name": "macOS", "version": platform.mac_ver()[0]}
if platform.system():
data.setdefault("system", {})["name"] = platform.system()
if platform.release():
data.setdefault("system", {})["release"] = platform.release()
if platform.machine():
data["cpu"] = platform.machine()
if has_tls():
import _ssl as ssl
data["openssl_version"] = ssl.OPENSSL_VERSION
setuptools_dist = get_default_environment().get_distribution("setuptools")
if setuptools_dist is not None:
data["setuptools_version"] = str(setuptools_dist.version)
# Use None rather than False so as not to give the impression that
# pip knows it is not being run under CI. Rather, it is a null or
# inconclusive result. Also, we include some value rather than no
# value to make it easier to know that the check has been run.
data["ci"] = True if looks_like_ci() else None
user_data = os.environ.get("PIP_USER_AGENT_USER_DATA")
if user_data is not None:
data["user_data"] = user_data
return "{data[installer][name]}/{data[installer][version]} {json}".format(
data=data,
json=json.dumps(data, separators=(",", ":"), sort_keys=True),
)
class LocalFSAdapter(BaseAdapter):
def send(self, request, stream=None, timeout=None, verify=None, cert=None,
proxies=None):
pathname = url_to_path(request.url)
resp = Response()
resp.status_code = 200
resp.url = request.url
try:
stats = os.stat(pathname)
except OSError as exc:
resp.status_code = 404
resp.raw = exc
else:
modified = email.utils.formatdate(stats.st_mtime, usegmt=True)
content_type = mimetypes.guess_type(pathname)[0] or "text/plain"
resp.headers = CaseInsensitiveDict({
"Content-Type": content_type,
"Content-Length": stats.st_size,
"Last-Modified": modified,
})
resp.raw = open(pathname, "rb")
resp.close = resp.raw.close
return resp
def close(self):
pass
class InsecureHTTPAdapter(HTTPAdapter):
def cert_verify(self, conn, url, verify, cert):
super().cert_verify(conn=conn, url=url, verify=False, cert=cert)
class InsecureCacheControlAdapter(CacheControlAdapter):
def cert_verify(self, conn, url, verify, cert):
super().cert_verify(conn=conn, url=url, verify=False, cert=cert)
class PipSession(requests.Session):
timeout = None # type: Optional[int]
def __init__(
self,
*args, # type: Any
retries=0, # type: int
cache=None, # type: Optional[str]
trusted_hosts=(), # type: Sequence[str]
index_urls=None, # type: Optional[List[str]]
**kwargs, # type: Any
):
# type: (...) -> None
"""
:param trusted_hosts: Domains not to emit warnings for when not using
HTTPS.
"""
super().__init__(*args, **kwargs)
# Namespace the attribute with "pip_" just in case to prevent
# possible conflicts with the base class.
self.pip_trusted_origins = [] # type: List[Tuple[str, Optional[int]]]
# Attach our User Agent to the request
self.headers["User-Agent"] = user_agent()
# Attach our Authentication handler to the session
self.auth = MultiDomainBasicAuth(index_urls=index_urls)
# Create our urllib3.Retry instance which will allow us to customize
# how we handle retries.
retries = urllib3.Retry(
# Set the total number of retries that a particular request can
# have.
total=retries,
# A 503 error from PyPI typically means that the Fastly -> Origin
# connection got interrupted in some way. A 503 error in general
# is typically considered a transient error so we'll go ahead and
# retry it.
# A 500 may indicate transient error in Amazon S3
# A 520 or 527 - may indicate transient error in CloudFlare
status_forcelist=[500, 503, 520, 527],
# Add a small amount of back off between failed requests in
# order to prevent hammering the service.
backoff_factor=0.25,
)
# Our Insecure HTTPAdapter disables HTTPS validation. It does not
# support caching so we'll use it for all http:// URLs.
# If caching is disabled, we will also use it for
# https:// hosts that we've marked as ignoring
# TLS errors for (trusted-hosts).
insecure_adapter = InsecureHTTPAdapter(max_retries=retries)
# We want to _only_ cache responses on securely fetched origins or when
# the host is specified as trusted. We do this because
# we can't validate the response of an insecurely/untrusted fetched
# origin, and we don't want someone to be able to poison the cache and
# require manual eviction from the cache to fix it.
if cache:
secure_adapter = CacheControlAdapter(
cache=SafeFileCache(cache),
max_retries=retries,
)
self._trusted_host_adapter = InsecureCacheControlAdapter(
cache=SafeFileCache(cache),
max_retries=retries,
)
else:
secure_adapter = HTTPAdapter(max_retries=retries)
self._trusted_host_adapter = insecure_adapter
self.mount("https://", secure_adapter)
self.mount("http://", insecure_adapter)
# Enable file:// urls
self.mount("file://", LocalFSAdapter())
for host in trusted_hosts:
self.add_trusted_host(host, suppress_logging=True)
def update_index_urls(self, new_index_urls):
# type: (List[str]) -> None
"""
:param new_index_urls: New index urls to update the authentication
handler with.
"""
self.auth.index_urls = new_index_urls
def add_trusted_host(self, host, source=None, suppress_logging=False):
# type: (str, Optional[str], bool) -> None
"""
:param host: It is okay to provide a host that has previously been
added.
:param source: An optional source string, for logging where the host
string came from.
"""
if not suppress_logging:
msg = f'adding trusted host: {host!r}'
if source is not None:
msg += f' (from {source})'
logger.info(msg)
host_port = parse_netloc(host)
if host_port not in self.pip_trusted_origins:
self.pip_trusted_origins.append(host_port)
self.mount(
build_url_from_netloc(host) + '/',
self._trusted_host_adapter
)
if not host_port[1]:
# Mount wildcard ports for the same host.
self.mount(
build_url_from_netloc(host) + ':',
self._trusted_host_adapter
)
def iter_secure_origins(self):
# type: () -> Iterator[SecureOrigin]
yield from SECURE_ORIGINS
for host, port in self.pip_trusted_origins:
yield ('*', host, '*' if port is None else port)
def is_secure_origin(self, location):
# type: (Link) -> bool
# Determine if this url used a secure transport mechanism
parsed = urllib.parse.urlparse(str(location))
origin_protocol, origin_host, origin_port = (
parsed.scheme, parsed.hostname, parsed.port,
)
# The protocol to use to see if the protocol matches.
# Don't count the repository type as part of the protocol: in
# cases such as "git+ssh", only use "ssh". (I.e., Only verify against
# the last scheme.)
origin_protocol = origin_protocol.rsplit('+', 1)[-1]
# Determine if our origin is a secure origin by looking through our
# hardcoded list of secure origins, as well as any additional ones
# configured on this PackageFinder instance.
for secure_origin in self.iter_secure_origins():
secure_protocol, secure_host, secure_port = secure_origin
if origin_protocol != secure_protocol and secure_protocol != "*":
continue
try:
addr = ipaddress.ip_address(origin_host)
network = ipaddress.ip_network(secure_host)
except ValueError:
# We don't have both a valid address or a valid network, so
# we'll check this origin against hostnames.
if (
origin_host and
origin_host.lower() != secure_host.lower() and
secure_host != "*"
):
continue
else:
# We have a valid address and network, so see if the address
# is contained within the network.
if addr not in network:
continue
# Check to see if the port matches.
if (
origin_port != secure_port and
secure_port != "*" and
secure_port is not None
):
continue
# If we've gotten here, then this origin matches the current
# secure origin and we should return True
return True
# If we've gotten to this point, then the origin isn't secure and we
# will not accept it as a valid location to search. We will however
# log a warning that we are ignoring it.
logger.warning(
"The repository located at %s is not a trusted or secure host and "
"is being ignored. If this repository is available via HTTPS we "
"recommend you use HTTPS instead, otherwise you may silence "
"this warning and allow it anyway with '--trusted-host %s'.",
origin_host,
origin_host,
)
return False
def request(self, method, url, *args, **kwargs):
# Allow setting a default timeout on a session
kwargs.setdefault("timeout", self.timeout)
# Dispatch the actual request
return super().request(method, url, *args, **kwargs)
| [] | [] | [
"PIP_USER_AGENT_USER_DATA"
] | [] | ["PIP_USER_AGENT_USER_DATA"] | python | 1 | 0 | |
pbesa/middleware/adapter/GameAdapter.py | from threading import Thread
from abc import ABC, abstractmethod
#from ...kernel.system.Adm import Adm
from ...kernel.adapter.Adapter import Adapter
import os
import random
import pygame
import math
import sys
class GameAdapter(Adapter, Thread):
adm = None
conf = None
clock = None
screen = None
agentList = None
def __init__(self, conf):
self.conf = conf
self.agentList = []
self.adm = None #Adm()
super().__init__()
def setUp(self):
os.environ["SDL_VIDEO_CENTERED"] = "1"
self.screen = pygame.display.set_mode((self.conf['width'], self.conf['height']))
pygame.display.set_caption(self.conf['title'])
self.clock = pygame.time.Clock()
pygame.init()
self.clock = pygame.time.Clock()
def response(self):
pass
def request(self):
return self.data
def finalize(self):
pass
def addAgent(self, ag):
self.agentList.append(ag)
def run(self):
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
self.frame()
pygame.display.update()
self.clock.tick(40)
def updateWorlds(self, event, data):
for ag in self.agentList:
self.adm.sendEvent(ag.id, event, data)
def getPygame(self):
return pygame
def frame(self):
pass | [] | [] | [
"SDL_VIDEO_CENTERED"
] | [] | ["SDL_VIDEO_CENTERED"] | python | 1 | 0 | |
cmd/isosim/isosim.go | package main
import (
"flag"
"fmt"
isov2 "github.com/rkbalgi/libiso/v2/iso8583"
"isosim/internal/iso"
log "github.com/sirupsen/logrus"
"isosim/internal/db"
"isosim/internal/services"
"net/http"
_ "net/http/pprof"
"os"
"strconv"
"strings"
"sync"
)
//v0.1 - Initial version
//v0.2 - ISO server development (08/31/2016)
//v0.5 - Support for embedded/nested fields and logging via sirupsen/logrus
//v0.6 - react front and multiple other changes
//v0.7.0 - deprecated old plain JS frontend and fixed lot of issues
//v0.8.0 - PIN and MAC generation features
func main() {
fmt.Println("======================================================")
fmt.Printf("ISO WebSim v%s commit: %s\n", version, build)
fmt.Println("======================================================")
logLevel := flag.String("log-level", "debug", "Log level - [trace|debug|warn|info|error].")
flag.StringVar(&iso.HTMLDir, "html-dir", "", "Directory that contains any HTML's and js/css files etc.")
specsDir := flag.String("specs-dir", "", "The directory containing the ISO spec definition files.")
httpPort := flag.Int("http-port", 8080, "HTTP/s port to listen on.")
dataDir := flag.String("data-dir", "", "Directory to store messages (data sets). This is a required field.")
flag.Parse()
switch {
case strings.EqualFold("trace", *logLevel):
log.SetLevel(log.TraceLevel)
case strings.EqualFold("debug", *logLevel):
log.SetLevel(log.DebugLevel)
case strings.EqualFold("info", *logLevel):
log.SetLevel(log.InfoLevel)
case strings.EqualFold("warn", *logLevel):
log.SetLevel(log.WarnLevel)
case strings.EqualFold("error", *logLevel):
log.SetLevel(log.ErrorLevel)
default:
log.Warn("Invalid log-level specified, will default to DEBUG")
log.SetLevel(log.DebugLevel)
}
log.SetFormatter(&log.TextFormatter{ForceColors: true, DisableColors: false})
if *dataDir == "" || *specsDir == "" || iso.HTMLDir == "" {
flag.Usage()
os.Exit(1)
}
err := db.Init(*dataDir)
if err != nil {
log.Fatal(err.Error())
}
//read all the specs from the spec file
err = isov2.ReadSpecs(*specsDir)
if err != nil {
log.Fatal(err.Error())
}
//check if all the required HTML files are available
if err = services.Init(); err != nil {
log.Fatal(err.Error())
}
go func() {
tlsEnabled := os.Getenv("TLS_ENABLED")
if tlsEnabled == "true" {
certFile := os.Getenv("TLS_CERT_FILE")
keyFile := os.Getenv("TLS_KEY_FILE")
log.Infof("TLS settings: Using Certificate file : %s, Key file: %s", certFile, keyFile)
if certFile == "" || keyFile == "" {
log.Fatalf("SSL enabled, but certificate/key file unspecified.")
}
log.Fatal(http.ListenAndServeTLS(":"+strconv.Itoa(*httpPort), certFile, keyFile, nil))
} else {
log.Fatal(http.ListenAndServe(":"+strconv.Itoa(*httpPort), nil))
}
}()
wg := sync.WaitGroup{}
wg.Add(1)
log.Infof("ISO WebSim started!")
wg.Wait()
}
| [
"\"TLS_ENABLED\"",
"\"TLS_CERT_FILE\"",
"\"TLS_KEY_FILE\""
] | [] | [
"TLS_ENABLED",
"TLS_KEY_FILE",
"TLS_CERT_FILE"
] | [] | ["TLS_ENABLED", "TLS_KEY_FILE", "TLS_CERT_FILE"] | go | 3 | 0 | |
setup.py | #!/usr/bin/env python3
"""Setup for the reference implementation of the CWL standards."""
import os
import sys
import warnings
import setuptools.command.egg_info as egg_info_cmd
from setuptools import setup
if os.name == "nt":
warnings.warn(
"The CWL reference runner (cwltool) no longer supports running "
"CWL workflows natively on MS Windows as its previous MS Windows "
"support was incomplete and untested. Instead, please see "
"https://pypi.org/project/cwltool/#ms-windows-users "
"for instructions on running cwltool via "
"Windows Subsystem for Linux 2 (WSL2). If don't need to execute "
"CWL documents, then you can ignore this warning, but please "
"consider migrating to https://pypi.org/project/cwl-utils/ "
"for your CWL document processing needs."
)
SETUP_DIR = os.path.dirname(__file__)
README = os.path.join(SETUP_DIR, "README.rst")
try:
import gittaggers
Tagger = gittaggers.EggInfoFromGit
except ImportError:
Tagger = egg_info_cmd.egg_info
NEEDS_PYTEST = {"pytest", "test", "ptr"}.intersection(sys.argv)
PYTEST_RUNNER = ["pytest-runner", "pytest-cov"] if NEEDS_PYTEST else []
USE_MYPYC = False
# To compile with mypyc, a mypyc checkout must be present on the PYTHONPATH
if len(sys.argv) > 1 and sys.argv[1] == "--use-mypyc":
sys.argv.pop(1)
USE_MYPYC = True
if os.getenv("CWLTOOL_USE_MYPYC", None) == "1":
USE_MYPYC = True
if USE_MYPYC:
mypyc_targets = [
"cwltool/argparser.py",
"cwltool/builder.py",
"cwltool/checker.py",
"cwltool/command_line_tool.py",
# "cwltool/context.py", # monkeypatching
"cwltool/cwlrdf.py",
"cwltool/docker_id.py",
"cwltool/docker.py",
"cwltool/udocker.py",
"cwltool/errors.py",
"cwltool/executors.py",
"cwltool/expression.py",
"cwltool/factory.py",
"cwltool/flatten.py",
# "cwltool/__init__.py",
"cwltool/job.py",
"cwltool/load_tool.py",
# "cwltool/loghandler.py", # so we can monkeypatch the logger from tests
# "cwltool/__main__.py",
"cwltool/main.py",
"cwltool/mutation.py",
"cwltool/pack.py",
# "cwltool/pathmapper.py", # class PathMapper needs to be subclassable
"cwltool/process.py",
"cwltool/procgenerator.py",
# "cwltool/provenance.py", # WritableBag is having issues
"cwltool/resolver.py",
# "cwltool/sandboxjs.py", # probably not speed critical, tests need to mock components
"cwltool/secrets.py",
"cwltool/singularity.py",
"cwltool/software_requirements.py",
# "cwltool/stdfsaccess.py", # StdFsAccess needs to be subclassable
"cwltool/subgraph.py",
"cwltool/update.py",
"cwltool/utils.py",
"cwltool/validate_js.py",
"cwltool/workflow.py",
]
from mypyc.build import mypycify
opt_level = os.getenv("MYPYC_OPT_LEVEL", "3")
ext_modules = mypycify(mypyc_targets, opt_level=opt_level)
else:
ext_modules = []
setup(
name="cwltool",
version="3.1",
description="Common workflow language reference implementation",
long_description=open(README).read(),
long_description_content_type="text/x-rst",
author="Common workflow language working group",
author_email="[email protected]",
url="https://github.com/common-workflow-language/cwltool",
download_url="https://github.com/common-workflow-language/cwltool",
ext_modules=ext_modules,
# platforms='', # empty as is conveyed by the classifier below
# license='', # empty as is conveyed by the classifier below
packages=["cwltool", "cwltool.tests"],
package_dir={"cwltool.tests": "tests"},
include_package_data=True,
install_requires=[
"setuptools",
"requests >= 2.6.1", # >= 2.6.1 to workaround
# https://github.com/ionrock/cachecontrol/issues/137
"ruamel.yaml >= 0.15, < 0.17.22",
"rdflib >= 4.2.2, < 6.2.0",
"shellescape >= 3.4.1, < 3.9",
"schema-salad >= 8.2.20211104054942, < 9",
"mypy-extensions",
"psutil >= 5.6.6",
"prov == 1.5.1",
"bagit >= 1.6.4",
"typing-extensions",
"coloredlogs",
"pydot >= 1.4.1",
"pyparsing != 3.0.2", # breaks --print-dot (pydot) https://github.com/pyparsing/pyparsing/issues/319
"argcomplete",
],
extras_require={
"deps": ["galaxy-tool-util >= 21.1.0"],
},
python_requires=">=3.7, <4",
setup_requires=PYTEST_RUNNER,
test_suite="tests",
tests_require=[
"pytest >= 6.2, < 7.1",
"mock >= 2.0.0",
"pytest-mock >= 1.10.0",
"arcp >= 0.2.0",
"rdflib-jsonld >= 0.4.0",
],
entry_points={"console_scripts": ["cwltool=cwltool.main:run"]},
zip_safe=True,
cmdclass={"egg_info": Tagger},
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"Intended Audience :: Healthcare Industry",
"License :: OSI Approved :: Apache Software License",
"Natural Language :: English",
"Operating System :: MacOS :: MacOS X",
"Operating System :: POSIX",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Bio-Informatics",
"Topic :: Scientific/Engineering :: Astronomy",
"Topic :: Scientific/Engineering :: Atmospheric Science",
"Topic :: Scientific/Engineering :: Information Analysis",
"Topic :: Scientific/Engineering :: Medical Science Apps.",
"Topic :: System :: Distributed Computing",
"Topic :: Utilities",
],
)
| [] | [] | [
"MYPYC_OPT_LEVEL",
"CWLTOOL_USE_MYPYC"
] | [] | ["MYPYC_OPT_LEVEL", "CWLTOOL_USE_MYPYC"] | python | 2 | 0 | |
backend/test_001_32044/wsgi.py | """
WSGI config for test_001_32044 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "test_001_32044.settings")
application = get_wsgi_application()
| [] | [] | [] | [] | [] | python | 0 | 0 | |
tests/base_test_class.py | from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from selenium.common.exceptions import NoAlertPresentException
import unittest
import os
import re
# import time
dd_driver = None
dd_driver_options = None
class BaseTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
global dd_driver
if not dd_driver:
# setupModule and tearDownModule are not working in our scenario, so for now we use setupClass and a global variable
# global variables are dirty, but in unit tests scenario's like these they are acceptable
print('launching browser for: ', cls.__name__)
global dd_driver_options
dd_driver_options = Options()
# headless means no UI, if you want to see what is happening remove headless. Adding detach will leave the window open after the test
dd_driver_options.add_argument("--headless")
# dd_driver_options.add_experimental_option("detach", True)
# the next 2 maybe needed in some scenario's for example on WSL or other headless situations
dd_driver_options.add_argument("--no-sandbox")
# dd_driver_options.add_argument("--disable-dev-shm-usage")
dd_driver_options.add_argument("--disable-gpu") # on windows sometimes chrome can't start with certain gpu driver versions, even in headless mode
# start maximized or at least with sufficient with because datatables will hide certain controls when the screen is too narrow
dd_driver_options.add_argument("--window-size=1280,1024")
# dd_driver_options.add_argument("--start-maximized")
dd_driver_options.set_capability("acceptInsecureCerts", True)
# some extra logging can be turned on if you want to query the browser javascripe console in your tests
desired = webdriver.DesiredCapabilities.CHROME
desired['goog:loggingPrefs'] = {'browser': 'ALL'}
# change path of chromedriver according to which directory you have chromedriver.
print('starting chromedriver with options: ', vars(dd_driver_options), desired)
dd_driver = webdriver.Chrome('chromedriver', chrome_options=dd_driver_options, desired_capabilities=desired)
# best practice is only use explicit waits
dd_driver.implicitly_wait(1)
cls.driver = dd_driver
cls.base_url = os.environ['DD_BASE_URL']
def setUp(self):
self.verificationErrors = []
self.accept_next_alert = True
self.accept_javascript_errors = False
self.driver.execute_script("console.clear()")
# clear browser console logs?
def login_page(self):
driver = self.driver
driver.get(self.base_url + "login")
driver.find_element_by_id("id_username").clear()
driver.find_element_by_id("id_username").send_keys(os.environ['DD_ADMIN_USER'])
driver.find_element_by_id("id_password").clear()
driver.find_element_by_id("id_password").send_keys(os.environ['DD_ADMIN_PASSWORD'])
driver.find_element_by_css_selector("button.btn.btn-success").click()
self.assertFalse(self.is_element_by_css_selector_present('.alert-danger', 'Please enter a correct username and password'))
return driver
def goto_product_overview(self, driver):
driver.get(self.base_url + "product")
self.wait_for_datatable_if_content("no_products", "products_wrapper")
def goto_component_overview(self, driver):
driver.get(self.base_url + "components")
def goto_active_engagements_overview(self, driver):
# return self.goto_engagements_internal(driver, 'engagement')
# engagement overview doesn't seem to have the datatables yet modifying the DOM
# https://github.com/DefectDojo/django-DefectDojo/issues/2173
driver.get(self.base_url + 'engagement')
# self.goto_engagements_internal(driver, 'engagement')
return driver
def goto_all_engagements_overview(self, driver):
return self.goto_engagements_internal(driver, 'engagements_all')
def goto_engagements_internal(self, driver, rel_url):
driver.get(self.base_url + rel_url)
self.wait_for_datatable_if_content("no_engagements", "engagements_wrapper")
return driver
def goto_all_findings_list(self, driver):
driver.get(self.base_url + "finding")
self.wait_for_datatable_if_content("no_findings", "open_findings_wrapper")
def wait_for_datatable_if_content(self, no_content_id, wrapper_id):
no_content = None
try:
no_content = self.driver.find_element_by_id(no_content_id)
except:
pass
if no_content is None:
# wait for product_wrapper div as datatables javascript modifies the DOM on page load.
WebDriverWait(self.driver, 30).until(EC.presence_of_element_located((By.ID, wrapper_id)))
def is_element_by_css_selector_present(self, selector, text=None):
elems = self.driver.find_elements_by_css_selector(selector)
if len(elems) == 0:
# print('no elements!')
return False
if text is None:
return True
for elem in elems:
print(elem.text)
if text in elem.text:
# print('contains!')
return True
# print('text mismatch!')
return False
def is_success_message_present(self, text=None):
return self.is_element_by_css_selector_present('.alert-success', text=text)
def is_error_message_present(self, text=None):
return self.is_element_by_css_selector_present('.alert-danger', text=text)
def is_text_present_on_page(self, text):
# DEBUG: couldn't find: Product type added successfully. path: //*[contains(text(),'Product type added successfully.')]
# can't get this xpath to work
# path = "//*[contains(text(), '" + text + "')]"
# elems = self.driver.find_elements_by_xpath(path)
# if len(elems) == 0:
# print("DEBUG: couldn't find: ", text, "path: ", path)
body = self.driver.find_element_by_tag_name("body")
return re.search(text, body.text)
def element_exists_by_id(self, id):
elems = self.driver.find_elements_by_id(id)
return len(elems) > 0
def change_system_setting(self, id, enable=True):
print("changing system setting " + id + " enable: " + str(enable))
driver = self.login_page()
driver.get(self.base_url + 'system_settings')
is_enabled = driver.find_element_by_id(id).is_selected()
if (enable and not is_enabled) or (not enable and is_enabled):
# driver.find_element_by_xpath('//*[@id=' + id + ']').click()
driver.find_element_by_id(id).click()
# save settings
driver.find_element_by_css_selector("input.btn.btn-primary").click()
# check if it's enabled after reload
is_enabled = driver.find_element_by_id(id).is_selected()
if enable:
self.assertTrue(is_enabled)
if not enable:
self.assertFalse(is_enabled)
return is_enabled
def enable_system_setting(self, id):
return self.change_system_setting(id, enable=True)
def disable_system_setting(self, id):
return self.change_system_setting(id, enable=False)
def enable_jira(self):
return self.enable_system_setting('id_enable_jira')
def disable_jira(self):
return self.disable_system_setting('id_enable_jira')
def disable_github(self):
return self.disable_system_setting('id_enable_github')
def enable_github(self):
return self.enable_system_setting('id_enable_github')
def enable_block_execution(self):
# we set the admin user (ourselves) to have block_execution checked
# this will force dedupe to happen synchronously, among other things like notifications, rules, ...
driver = self.login_page()
driver.get(self.base_url + 'profile')
if not driver.find_element_by_id('id_block_execution').is_selected():
driver.find_element_by_xpath('//*[@id="id_block_execution"]').click()
# save settings
driver.find_element_by_css_selector("input.btn.btn-primary").click()
# check if it's enabled after reload
self.assertTrue(driver.find_element_by_id('id_block_execution').is_selected())
return driver
def is_alert_present(self):
try:
self.driver.switch_to_alert()
except NoAlertPresentException:
return False
return True
def close_alert_and_get_its_text(self):
try:
alert = self.driver.switch_to_alert()
alert_text = alert.text
if self.accept_next_alert:
alert.accept()
else:
alert.dismiss()
return alert_text
finally:
self.accept_next_alert = True
def assertNoConsoleErrors(self):
"""
Sample output for levels (i.e. errors are SEVERE)
{'level': 'DEBUG', 'message': 'http://localhost:8080/product/type/4/edit 560:12 "debug"', 'source': 'console-api', 'timestamp': 1583952828410}
{'level': 'INFO', 'message': 'http://localhost:8080/product/type/4/edit 561:16 "info"', 'source': 'console-api', 'timestamp': 1583952828410}
{'level': 'WARNING', 'message': 'http://localhost:8080/product/type/4/edit 562:16 "warning"', 'source': 'console-api', 'timestamp': 1583952828410}
{'level': 'SEVERE', 'message': 'http://localhost:8080/product/type/4/edit 563:16 "error"', 'source': 'console-api', 'timestamp': 1583952828410}
"""
for entry in WebdriverOnlyNewLogFacade(self.driver).get_log('browser'):
"""
images are not working in current docker/travis deployment, so ignore those 404s
see: https://github.com/DefectDojo/django-DefectDojo/issues/2045
examples:
http://localhost:8080/static/dojo/img/zoom-in.cur - Failed to load resource: the server responded with a status of 404 (Not Found)
http://localhost:8080/media/CACHE/images/finding_images/1bf9c0b1-5ed1-4b4e-9551-bcbfd198b90a/7d8d9af058566b8f2fe6548d96c63237.jpg - Failed to load resource: the server responded with a status of 404 (Not Found)
"""
accepted_javascript_messages = r'((zoom\-in\.cur.*)|(images\/finding_images\/.*))404\ \(Not\ Found\)'
# accepted_javascript_messages = r'((zoom\-in\.cur.*)|(images\/finding_images\/.*))404\ \(Not\ Found\)|(bootstrap\-chosen\.css\.map)'
if (entry['level'] == 'SEVERE'):
# print(self.driver.current_url) # TODO actually this seems to be the previous url
# self.driver.save_screenshot("C:\\Data\\django-DefectDojo\\tests\\javascript-errors.png")
# with open("C:\\Data\\django-DefectDojo\\tests\\javascript-errors.html", "w") as f:
# f.write(self.driver.page_source)
print(entry)
print('There was a SEVERE javascript error in the console, please check all steps fromt the current test to see where it happens')
print('Currently there is no reliable way to find out at which url the error happened, but it could be: .' + self.driver.current_url)
if self.accept_javascript_errors:
print('WARNING: skipping SEVERE javascript error because accept_javascript_errors is True!')
elif re.search(accepted_javascript_messages, entry['message']):
print('WARNING: skipping javascript errors related to finding images, see https://github.com/DefectDojo/django-DefectDojo/issues/2045')
else:
self.assertNotEqual(entry['level'], 'SEVERE')
return True
def tearDown(self):
self.assertNoConsoleErrors()
self.assertEqual([], self.verificationErrors)
@classmethod
def tearDownDriver(cls):
print('tearDownDriver: ', cls.__name__)
global dd_driver
if dd_driver:
if not dd_driver_options.experimental_options or not dd_driver_options.experimental_options['detach']:
print('closing browser')
dd_driver.quit()
class WebdriverOnlyNewLogFacade(object):
last_timestamp = 0
def __init__(self, webdriver):
self._webdriver = webdriver
def get_log(self, log_type):
last_timestamp = self.last_timestamp
entries = self._webdriver.get_log(log_type)
filtered = []
for entry in entries:
# check the logged timestamp against the
# stored timestamp
if entry["timestamp"] > self.last_timestamp:
filtered.append(entry)
# save the last timestamp only if newer
# in this set of logs
if entry["timestamp"] > last_timestamp:
last_timestamp = entry["timestamp"]
# store the very last timestamp
self.last_timestamp = last_timestamp
return filtered
def on_exception_html_source_logger(func):
def wrapper(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except Exception as e:
print("exception occured at url:", self.driver.current_url)
print("page source:", self.driver.page_source)
f = open("selenium_page_source.html", "w", encoding='utf-8')
f.writelines(self.driver.page_source)
# time.sleep(30)
raise(e)
return wrapper
| [] | [] | [
"DD_ADMIN_PASSWORD",
"DD_BASE_URL",
"DD_ADMIN_USER"
] | [] | ["DD_ADMIN_PASSWORD", "DD_BASE_URL", "DD_ADMIN_USER"] | python | 3 | 0 | |
orc8r/gateway/python/scripts/generate_nghttpx_config.py | #!/usr/bin/env python3
"""
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Pre-run script for services to generate a nghttpx config from a jinja template
and the config/mconfig for the service.
"""
import logging
import os
from magma.common.service_registry import ServiceRegistry
from magma.configuration.environment import is_dev_mode, is_docker_network_mode
from magma.configuration.service_configs import get_service_config_value
from generate_service_config import generate_template_config
CONFIG_OVERRIDE_DIR = '/var/opt/magma/tmp'
def get_context():
"""
Create the context to be used for nghttpx, other than the one provided
by the configs.
"""
context = {}
context['backends'] = []
for service in ServiceRegistry.list_services():
(ip_address, port) = ServiceRegistry.get_service_address(service)
backend = {'service': service, 'ip': ip_address, 'port': port}
context['backends'].append(backend)
# We get the gateway cert after bootstrapping, but we do want nghttpx
# to run before that for communication locally. Update the flag for
# jinja to act upon.
gateway_cert = get_service_config_value('control_proxy',
'gateway_cert', None)
if gateway_cert and os.path.exists(gateway_cert):
context['use_gateway_cert'] = True
else:
context['use_gateway_cert'] = False
context['dev_mode'] = is_dev_mode()
context['docker_network_mode'] = is_docker_network_mode()
context['allow_http_proxy'] = get_service_config_value(
'control_proxy', 'allow_http_proxy', False)
context['http_proxy'] = os.getenv('http_proxy', '')
return context
def main():
logging.basicConfig(
level=logging.INFO,
format='[%(asctime)s %(levelname)s %(name)s] %(message)s')
generate_template_config('control_proxy', 'nghttpx',
CONFIG_OVERRIDE_DIR, get_context())
if __name__ == "__main__":
main()
| [] | [] | [
"http_proxy"
] | [] | ["http_proxy"] | python | 1 | 0 | |
ecs-init/vendor/github.com/fsouza/go-dockerclient/client.go | // Copyright 2013 go-dockerclient authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package docker provides a client for the Docker remote API.
//
// See https://goo.gl/o2v3rk for more details on the remote API.
package docker
import (
"bufio"
"bytes"
"crypto/tls"
"crypto/x509"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
"net/http/httputil"
"net/url"
"os"
"path/filepath"
"reflect"
"runtime"
"strconv"
"strings"
"sync/atomic"
"time"
"github.com/docker/docker/opts"
"github.com/docker/docker/pkg/homedir"
"github.com/docker/docker/pkg/jsonmessage"
"github.com/docker/docker/pkg/stdcopy"
"golang.org/x/net/context"
"golang.org/x/net/context/ctxhttp"
)
const (
userAgent = "go-dockerclient"
unixProtocol = "unix"
namedPipeProtocol = "npipe"
)
var (
// ErrInvalidEndpoint is returned when the endpoint is not a valid HTTP URL.
ErrInvalidEndpoint = errors.New("invalid endpoint")
// ErrConnectionRefused is returned when the client cannot connect to the given endpoint.
ErrConnectionRefused = errors.New("cannot connect to Docker endpoint")
// ErrInactivityTimeout is returned when a streamable call has been inactive for some time.
ErrInactivityTimeout = errors.New("inactivity time exceeded timeout")
apiVersion112, _ = NewAPIVersion("1.12")
apiVersion119, _ = NewAPIVersion("1.19")
apiVersion124, _ = NewAPIVersion("1.24")
apiVersion125, _ = NewAPIVersion("1.25")
)
// APIVersion is an internal representation of a version of the Remote API.
type APIVersion []int
// NewAPIVersion returns an instance of APIVersion for the given string.
//
// The given string must be in the form <major>.<minor>.<patch>, where <major>,
// <minor> and <patch> are integer numbers.
func NewAPIVersion(input string) (APIVersion, error) {
if !strings.Contains(input, ".") {
return nil, fmt.Errorf("Unable to parse version %q", input)
}
raw := strings.Split(input, "-")
arr := strings.Split(raw[0], ".")
ret := make(APIVersion, len(arr))
var err error
for i, val := range arr {
ret[i], err = strconv.Atoi(val)
if err != nil {
return nil, fmt.Errorf("Unable to parse version %q: %q is not an integer", input, val)
}
}
return ret, nil
}
func (version APIVersion) String() string {
var str string
for i, val := range version {
str += strconv.Itoa(val)
if i < len(version)-1 {
str += "."
}
}
return str
}
// LessThan is a function for comparing APIVersion structs
func (version APIVersion) LessThan(other APIVersion) bool {
return version.compare(other) < 0
}
// LessThanOrEqualTo is a function for comparing APIVersion structs
func (version APIVersion) LessThanOrEqualTo(other APIVersion) bool {
return version.compare(other) <= 0
}
// GreaterThan is a function for comparing APIVersion structs
func (version APIVersion) GreaterThan(other APIVersion) bool {
return version.compare(other) > 0
}
// GreaterThanOrEqualTo is a function for comparing APIVersion structs
func (version APIVersion) GreaterThanOrEqualTo(other APIVersion) bool {
return version.compare(other) >= 0
}
func (version APIVersion) compare(other APIVersion) int {
for i, v := range version {
if i <= len(other)-1 {
otherVersion := other[i]
if v < otherVersion {
return -1
} else if v > otherVersion {
return 1
}
}
}
if len(version) > len(other) {
return 1
}
if len(version) < len(other) {
return -1
}
return 0
}
// Client is the basic type of this package. It provides methods for
// interaction with the API.
type Client struct {
SkipServerVersionCheck bool
HTTPClient *http.Client
TLSConfig *tls.Config
Dialer Dialer
endpoint string
endpointURL *url.URL
eventMonitor *eventMonitoringState
requestedAPIVersion APIVersion
serverAPIVersion APIVersion
expectedAPIVersion APIVersion
nativeHTTPClient *http.Client
}
// Dialer is an interface that allows network connections to be dialed
// (net.Dialer fulfills this interface) and named pipes (a shim using
// winio.DialPipe)
type Dialer interface {
Dial(network, address string) (net.Conn, error)
}
// NewClient returns a Client instance ready for communication with the given
// server endpoint. It will use the latest remote API version available in the
// server.
func NewClient(endpoint string) (*Client, error) {
client, err := NewVersionedClient(endpoint, "")
if err != nil {
return nil, err
}
client.SkipServerVersionCheck = true
return client, nil
}
// NewTLSClient returns a Client instance ready for TLS communications with the givens
// server endpoint, key and certificates . It will use the latest remote API version
// available in the server.
func NewTLSClient(endpoint string, cert, key, ca string) (*Client, error) {
client, err := NewVersionedTLSClient(endpoint, cert, key, ca, "")
if err != nil {
return nil, err
}
client.SkipServerVersionCheck = true
return client, nil
}
// NewTLSClientFromBytes returns a Client instance ready for TLS communications with the givens
// server endpoint, key and certificates (passed inline to the function as opposed to being
// read from a local file). It will use the latest remote API version available in the server.
func NewTLSClientFromBytes(endpoint string, certPEMBlock, keyPEMBlock, caPEMCert []byte) (*Client, error) {
client, err := NewVersionedTLSClientFromBytes(endpoint, certPEMBlock, keyPEMBlock, caPEMCert, "")
if err != nil {
return nil, err
}
client.SkipServerVersionCheck = true
return client, nil
}
// NewVersionedClient returns a Client instance ready for communication with
// the given server endpoint, using a specific remote API version.
func NewVersionedClient(endpoint string, apiVersionString string) (*Client, error) {
u, err := parseEndpoint(endpoint, false)
if err != nil {
return nil, err
}
var requestedAPIVersion APIVersion
if strings.Contains(apiVersionString, ".") {
requestedAPIVersion, err = NewAPIVersion(apiVersionString)
if err != nil {
return nil, err
}
}
c := &Client{
HTTPClient: defaultClient(),
Dialer: &net.Dialer{},
endpoint: endpoint,
endpointURL: u,
eventMonitor: new(eventMonitoringState),
requestedAPIVersion: requestedAPIVersion,
}
c.initializeNativeClient()
return c, nil
}
// NewVersionnedTLSClient has been DEPRECATED, please use NewVersionedTLSClient.
func NewVersionnedTLSClient(endpoint string, cert, key, ca, apiVersionString string) (*Client, error) {
return NewVersionedTLSClient(endpoint, cert, key, ca, apiVersionString)
}
// NewVersionedTLSClient returns a Client instance ready for TLS communications with the givens
// server endpoint, key and certificates, using a specific remote API version.
func NewVersionedTLSClient(endpoint string, cert, key, ca, apiVersionString string) (*Client, error) {
var certPEMBlock []byte
var keyPEMBlock []byte
var caPEMCert []byte
if _, err := os.Stat(cert); !os.IsNotExist(err) {
certPEMBlock, err = ioutil.ReadFile(cert)
if err != nil {
return nil, err
}
}
if _, err := os.Stat(key); !os.IsNotExist(err) {
keyPEMBlock, err = ioutil.ReadFile(key)
if err != nil {
return nil, err
}
}
if _, err := os.Stat(ca); !os.IsNotExist(err) {
caPEMCert, err = ioutil.ReadFile(ca)
if err != nil {
return nil, err
}
}
return NewVersionedTLSClientFromBytes(endpoint, certPEMBlock, keyPEMBlock, caPEMCert, apiVersionString)
}
// NewClientFromEnv returns a Client instance ready for communication created from
// Docker's default logic for the environment variables DOCKER_HOST, DOCKER_TLS_VERIFY, and DOCKER_CERT_PATH.
//
// See https://github.com/docker/docker/blob/1f963af697e8df3a78217f6fdbf67b8123a7db94/docker/docker.go#L68.
// See https://github.com/docker/compose/blob/81707ef1ad94403789166d2fe042c8a718a4c748/compose/cli/docker_client.py#L7.
func NewClientFromEnv() (*Client, error) {
client, err := NewVersionedClientFromEnv("")
if err != nil {
return nil, err
}
client.SkipServerVersionCheck = true
return client, nil
}
// NewVersionedClientFromEnv returns a Client instance ready for TLS communications created from
// Docker's default logic for the environment variables DOCKER_HOST, DOCKER_TLS_VERIFY, and DOCKER_CERT_PATH,
// and using a specific remote API version.
//
// See https://github.com/docker/docker/blob/1f963af697e8df3a78217f6fdbf67b8123a7db94/docker/docker.go#L68.
// See https://github.com/docker/compose/blob/81707ef1ad94403789166d2fe042c8a718a4c748/compose/cli/docker_client.py#L7.
func NewVersionedClientFromEnv(apiVersionString string) (*Client, error) {
dockerEnv, err := getDockerEnv()
if err != nil {
return nil, err
}
dockerHost := dockerEnv.dockerHost
if dockerEnv.dockerTLSVerify {
parts := strings.SplitN(dockerEnv.dockerHost, "://", 2)
if len(parts) != 2 {
return nil, fmt.Errorf("could not split %s into two parts by ://", dockerHost)
}
cert := filepath.Join(dockerEnv.dockerCertPath, "cert.pem")
key := filepath.Join(dockerEnv.dockerCertPath, "key.pem")
ca := filepath.Join(dockerEnv.dockerCertPath, "ca.pem")
return NewVersionedTLSClient(dockerEnv.dockerHost, cert, key, ca, apiVersionString)
}
return NewVersionedClient(dockerEnv.dockerHost, apiVersionString)
}
// NewVersionedTLSClientFromBytes returns a Client instance ready for TLS communications with the givens
// server endpoint, key and certificates (passed inline to the function as opposed to being
// read from a local file), using a specific remote API version.
func NewVersionedTLSClientFromBytes(endpoint string, certPEMBlock, keyPEMBlock, caPEMCert []byte, apiVersionString string) (*Client, error) {
u, err := parseEndpoint(endpoint, true)
if err != nil {
return nil, err
}
var requestedAPIVersion APIVersion
if strings.Contains(apiVersionString, ".") {
requestedAPIVersion, err = NewAPIVersion(apiVersionString)
if err != nil {
return nil, err
}
}
tlsConfig := &tls.Config{}
if certPEMBlock != nil && keyPEMBlock != nil {
tlsCert, err := tls.X509KeyPair(certPEMBlock, keyPEMBlock)
if err != nil {
return nil, err
}
tlsConfig.Certificates = []tls.Certificate{tlsCert}
}
if caPEMCert == nil {
tlsConfig.InsecureSkipVerify = true
} else {
caPool := x509.NewCertPool()
if !caPool.AppendCertsFromPEM(caPEMCert) {
return nil, errors.New("Could not add RootCA pem")
}
tlsConfig.RootCAs = caPool
}
tr := defaultTransport()
tr.TLSClientConfig = tlsConfig
if err != nil {
return nil, err
}
c := &Client{
HTTPClient: &http.Client{Transport: tr},
TLSConfig: tlsConfig,
Dialer: &net.Dialer{},
endpoint: endpoint,
endpointURL: u,
eventMonitor: new(eventMonitoringState),
requestedAPIVersion: requestedAPIVersion,
}
c.initializeNativeClient()
return c, nil
}
// SetTimeout takes a timeout and applies it to both the HTTPClient and
// nativeHTTPClient. It should not be called concurrently with any other Client
// methods.
func (c *Client) SetTimeout(t time.Duration) {
if c.HTTPClient != nil {
c.HTTPClient.Timeout = t
}
if c.nativeHTTPClient != nil {
c.nativeHTTPClient.Timeout = t
}
}
func (c *Client) checkAPIVersion() error {
serverAPIVersionString, err := c.getServerAPIVersionString()
if err != nil {
return err
}
c.serverAPIVersion, err = NewAPIVersion(serverAPIVersionString)
if err != nil {
return err
}
if c.requestedAPIVersion == nil {
c.expectedAPIVersion = c.serverAPIVersion
} else {
c.expectedAPIVersion = c.requestedAPIVersion
}
return nil
}
// Endpoint returns the current endpoint. It's useful for getting the endpoint
// when using functions that get this data from the environment (like
// NewClientFromEnv.
func (c *Client) Endpoint() string {
return c.endpoint
}
// Ping pings the docker server
//
// See https://goo.gl/wYfgY1 for more details.
func (c *Client) Ping() error {
return c.PingWithContext(nil)
}
// PingWithContext pings the docker server
// The context object can be used to cancel the ping request.
//
// See https://goo.gl/wYfgY1 for more details.
func (c *Client) PingWithContext(ctx context.Context) error {
path := "/_ping"
resp, err := c.do("GET", path, doOptions{context: ctx})
if err != nil {
return err
}
if resp.StatusCode != http.StatusOK {
return newError(resp)
}
resp.Body.Close()
return nil
}
func (c *Client) getServerAPIVersionString() (version string, err error) {
resp, err := c.do("GET", "/version", doOptions{})
if err != nil {
return "", err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return "", fmt.Errorf("Received unexpected status %d while trying to retrieve the server version", resp.StatusCode)
}
var versionResponse map[string]interface{}
if err := json.NewDecoder(resp.Body).Decode(&versionResponse); err != nil {
return "", err
}
if version, ok := (versionResponse["ApiVersion"]).(string); ok {
return version, nil
}
return "", nil
}
type doOptions struct {
data interface{}
forceJSON bool
headers map[string]string
context context.Context
}
func (c *Client) do(method, path string, doOptions doOptions) (*http.Response, error) {
var params io.Reader
if doOptions.data != nil || doOptions.forceJSON {
buf, err := json.Marshal(doOptions.data)
if err != nil {
return nil, err
}
params = bytes.NewBuffer(buf)
}
if path != "/version" && !c.SkipServerVersionCheck && c.expectedAPIVersion == nil {
err := c.checkAPIVersion()
if err != nil {
return nil, err
}
}
httpClient := c.HTTPClient
protocol := c.endpointURL.Scheme
var u string
switch protocol {
case unixProtocol, namedPipeProtocol:
httpClient = c.nativeHTTPClient
u = c.getFakeNativeURL(path)
default:
u = c.getURL(path)
}
req, err := http.NewRequest(method, u, params)
if err != nil {
return nil, err
}
req.Header.Set("User-Agent", userAgent)
if doOptions.data != nil {
req.Header.Set("Content-Type", "application/json")
} else if method == "POST" {
req.Header.Set("Content-Type", "plain/text")
}
for k, v := range doOptions.headers {
req.Header.Set(k, v)
}
ctx := doOptions.context
if ctx == nil {
ctx = context.Background()
}
resp, err := ctxhttp.Do(ctx, httpClient, req)
if err != nil {
if strings.Contains(err.Error(), "connection refused") {
return nil, ErrConnectionRefused
}
return nil, chooseError(ctx, err)
}
if resp.StatusCode < 200 || resp.StatusCode >= 400 {
return nil, newError(resp)
}
return resp, nil
}
type streamOptions struct {
setRawTerminal bool
rawJSONStream bool
useJSONDecoder bool
headers map[string]string
in io.Reader
stdout io.Writer
stderr io.Writer
reqSent chan struct{}
// timeout is the initial connection timeout
timeout time.Duration
// Timeout with no data is received, it's reset every time new data
// arrives
inactivityTimeout time.Duration
context context.Context
}
// if error in context, return that instead of generic http error
func chooseError(ctx context.Context, err error) error {
select {
case <-ctx.Done():
return ctx.Err()
default:
return err
}
}
func (c *Client) stream(method, path string, streamOptions streamOptions) error {
if (method == "POST" || method == "PUT") && streamOptions.in == nil {
streamOptions.in = bytes.NewReader(nil)
}
if path != "/version" && !c.SkipServerVersionCheck && c.expectedAPIVersion == nil {
err := c.checkAPIVersion()
if err != nil {
return err
}
}
req, err := http.NewRequest(method, c.getURL(path), streamOptions.in)
if err != nil {
return err
}
req.Header.Set("User-Agent", userAgent)
if method == "POST" {
req.Header.Set("Content-Type", "plain/text")
}
for key, val := range streamOptions.headers {
req.Header.Set(key, val)
}
var resp *http.Response
protocol := c.endpointURL.Scheme
address := c.endpointURL.Path
if streamOptions.stdout == nil {
streamOptions.stdout = ioutil.Discard
}
if streamOptions.stderr == nil {
streamOptions.stderr = ioutil.Discard
}
// make a sub-context so that our active cancellation does not affect parent
ctx := streamOptions.context
if ctx == nil {
ctx = context.Background()
}
subCtx, cancelRequest := context.WithCancel(ctx)
defer cancelRequest()
if protocol == unixProtocol || protocol == namedPipeProtocol {
var dial net.Conn
dial, err = c.Dialer.Dial(protocol, address)
if err != nil {
return err
}
go func() {
<-subCtx.Done()
dial.Close()
}()
breader := bufio.NewReader(dial)
err = req.Write(dial)
if err != nil {
return chooseError(subCtx, err)
}
// ReadResponse may hang if server does not replay
if streamOptions.timeout > 0 {
dial.SetDeadline(time.Now().Add(streamOptions.timeout))
}
if streamOptions.reqSent != nil {
close(streamOptions.reqSent)
}
if resp, err = http.ReadResponse(breader, req); err != nil {
// Cancel timeout for future I/O operations
if streamOptions.timeout > 0 {
dial.SetDeadline(time.Time{})
}
if strings.Contains(err.Error(), "connection refused") {
return ErrConnectionRefused
}
return chooseError(subCtx, err)
}
} else {
if resp, err = ctxhttp.Do(subCtx, c.HTTPClient, req); err != nil {
if strings.Contains(err.Error(), "connection refused") {
return ErrConnectionRefused
}
return chooseError(subCtx, err)
}
if streamOptions.reqSent != nil {
close(streamOptions.reqSent)
}
}
defer resp.Body.Close()
if resp.StatusCode < 200 || resp.StatusCode >= 400 {
return newError(resp)
}
var canceled uint32
if streamOptions.inactivityTimeout > 0 {
var ch chan<- struct{}
resp.Body, ch = handleInactivityTimeout(resp.Body, streamOptions.inactivityTimeout, cancelRequest, &canceled)
defer close(ch)
}
err = handleStreamResponse(resp, &streamOptions)
if err != nil {
if atomic.LoadUint32(&canceled) != 0 {
return ErrInactivityTimeout
}
return chooseError(subCtx, err)
}
return nil
}
func handleStreamResponse(resp *http.Response, streamOptions *streamOptions) error {
var err error
if !streamOptions.useJSONDecoder && resp.Header.Get("Content-Type") != "application/json" {
if streamOptions.setRawTerminal {
_, err = io.Copy(streamOptions.stdout, resp.Body)
} else {
_, err = stdcopy.StdCopy(streamOptions.stdout, streamOptions.stderr, resp.Body)
}
return err
}
// if we want to get raw json stream, just copy it back to output
// without decoding it
if streamOptions.rawJSONStream {
_, err = io.Copy(streamOptions.stdout, resp.Body)
return err
}
if st, ok := streamOptions.stdout.(interface {
io.Writer
FD() uintptr
IsTerminal() bool
}); ok {
err = jsonmessage.DisplayJSONMessagesToStream(resp.Body, st, nil)
} else {
err = jsonmessage.DisplayJSONMessagesStream(resp.Body, streamOptions.stdout, 0, false, nil)
}
return err
}
type proxyReader struct {
io.ReadCloser
calls uint64
}
func (p *proxyReader) callCount() uint64 {
return atomic.LoadUint64(&p.calls)
}
func (p *proxyReader) Read(data []byte) (int, error) {
atomic.AddUint64(&p.calls, 1)
return p.ReadCloser.Read(data)
}
func handleInactivityTimeout(reader io.ReadCloser, timeout time.Duration, cancelRequest func(), canceled *uint32) (io.ReadCloser, chan<- struct{}) {
done := make(chan struct{})
proxyReader := &proxyReader{ReadCloser: reader}
go func() {
var lastCallCount uint64
for {
select {
case <-time.After(timeout):
case <-done:
return
}
curCallCount := proxyReader.callCount()
if curCallCount == lastCallCount {
atomic.AddUint32(canceled, 1)
cancelRequest()
return
}
lastCallCount = curCallCount
}
}()
return proxyReader, done
}
type hijackOptions struct {
success chan struct{}
setRawTerminal bool
in io.Reader
stdout io.Writer
stderr io.Writer
data interface{}
}
// CloseWaiter is an interface with methods for closing the underlying resource
// and then waiting for it to finish processing.
type CloseWaiter interface {
io.Closer
Wait() error
}
type waiterFunc func() error
func (w waiterFunc) Wait() error { return w() }
type closerFunc func() error
func (c closerFunc) Close() error { return c() }
func (c *Client) hijack(method, path string, hijackOptions hijackOptions) (CloseWaiter, error) {
if path != "/version" && !c.SkipServerVersionCheck && c.expectedAPIVersion == nil {
err := c.checkAPIVersion()
if err != nil {
return nil, err
}
}
var params io.Reader
if hijackOptions.data != nil {
buf, err := json.Marshal(hijackOptions.data)
if err != nil {
return nil, err
}
params = bytes.NewBuffer(buf)
}
req, err := http.NewRequest(method, c.getURL(path), params)
if err != nil {
return nil, err
}
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Connection", "Upgrade")
req.Header.Set("Upgrade", "tcp")
protocol := c.endpointURL.Scheme
address := c.endpointURL.Path
if protocol != unixProtocol && protocol != namedPipeProtocol {
protocol = "tcp"
address = c.endpointURL.Host
}
var dial net.Conn
if c.TLSConfig != nil && protocol != unixProtocol && protocol != namedPipeProtocol {
netDialer, ok := c.Dialer.(*net.Dialer)
if !ok {
return nil, ErrTLSNotSupported
}
dial, err = tlsDialWithDialer(netDialer, protocol, address, c.TLSConfig)
if err != nil {
return nil, err
}
} else {
dial, err = c.Dialer.Dial(protocol, address)
if err != nil {
return nil, err
}
}
errs := make(chan error, 1)
quit := make(chan struct{})
go func() {
clientconn := httputil.NewClientConn(dial, nil)
defer clientconn.Close()
clientconn.Do(req)
if hijackOptions.success != nil {
hijackOptions.success <- struct{}{}
<-hijackOptions.success
}
rwc, br := clientconn.Hijack()
defer rwc.Close()
errChanOut := make(chan error, 1)
errChanIn := make(chan error, 2)
if hijackOptions.stdout == nil && hijackOptions.stderr == nil {
close(errChanOut)
} else {
// Only copy if hijackOptions.stdout and/or hijackOptions.stderr is actually set.
// Otherwise, if the only stream you care about is stdin, your attach session
// will "hang" until the container terminates, even though you're not reading
// stdout/stderr
if hijackOptions.stdout == nil {
hijackOptions.stdout = ioutil.Discard
}
if hijackOptions.stderr == nil {
hijackOptions.stderr = ioutil.Discard
}
go func() {
defer func() {
if hijackOptions.in != nil {
if closer, ok := hijackOptions.in.(io.Closer); ok {
closer.Close()
}
errChanIn <- nil
}
}()
var err error
if hijackOptions.setRawTerminal {
_, err = io.Copy(hijackOptions.stdout, br)
} else {
_, err = stdcopy.StdCopy(hijackOptions.stdout, hijackOptions.stderr, br)
}
errChanOut <- err
}()
}
go func() {
var err error
if hijackOptions.in != nil {
_, err = io.Copy(rwc, hijackOptions.in)
}
errChanIn <- err
rwc.(interface {
CloseWrite() error
}).CloseWrite()
}()
var errIn error
select {
case errIn = <-errChanIn:
case <-quit:
}
var errOut error
select {
case errOut = <-errChanOut:
case <-quit:
}
if errIn != nil {
errs <- errIn
} else {
errs <- errOut
}
}()
return struct {
closerFunc
waiterFunc
}{
closerFunc(func() error { close(quit); return nil }),
waiterFunc(func() error { return <-errs }),
}, nil
}
func (c *Client) getURL(path string) string {
urlStr := strings.TrimRight(c.endpointURL.String(), "/")
if c.endpointURL.Scheme == unixProtocol || c.endpointURL.Scheme == namedPipeProtocol {
urlStr = ""
}
if c.requestedAPIVersion != nil {
return fmt.Sprintf("%s/v%s%s", urlStr, c.requestedAPIVersion, path)
}
return fmt.Sprintf("%s%s", urlStr, path)
}
// getFakeNativeURL returns the URL needed to make an HTTP request over a UNIX
// domain socket to the given path.
func (c *Client) getFakeNativeURL(path string) string {
u := *c.endpointURL // Copy.
// Override URL so that net/http will not complain.
u.Scheme = "http"
u.Host = "unix.sock" // Doesn't matter what this is - it's not used.
u.Path = ""
urlStr := strings.TrimRight(u.String(), "/")
if c.requestedAPIVersion != nil {
return fmt.Sprintf("%s/v%s%s", urlStr, c.requestedAPIVersion, path)
}
return fmt.Sprintf("%s%s", urlStr, path)
}
type jsonMessage struct {
Status string `json:"status,omitempty"`
Progress string `json:"progress,omitempty"`
Error string `json:"error,omitempty"`
Stream string `json:"stream,omitempty"`
}
func queryString(opts interface{}) string {
if opts == nil {
return ""
}
value := reflect.ValueOf(opts)
if value.Kind() == reflect.Ptr {
value = value.Elem()
}
if value.Kind() != reflect.Struct {
return ""
}
items := url.Values(map[string][]string{})
for i := 0; i < value.NumField(); i++ {
field := value.Type().Field(i)
if field.PkgPath != "" {
continue
}
key := field.Tag.Get("qs")
if key == "" {
key = strings.ToLower(field.Name)
} else if key == "-" {
continue
}
addQueryStringValue(items, key, value.Field(i))
}
return items.Encode()
}
func addQueryStringValue(items url.Values, key string, v reflect.Value) {
switch v.Kind() {
case reflect.Bool:
if v.Bool() {
items.Add(key, "1")
}
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
if v.Int() > 0 {
items.Add(key, strconv.FormatInt(v.Int(), 10))
}
case reflect.Float32, reflect.Float64:
if v.Float() > 0 {
items.Add(key, strconv.FormatFloat(v.Float(), 'f', -1, 64))
}
case reflect.String:
if v.String() != "" {
items.Add(key, v.String())
}
case reflect.Ptr:
if !v.IsNil() {
if b, err := json.Marshal(v.Interface()); err == nil {
items.Add(key, string(b))
}
}
case reflect.Map:
if len(v.MapKeys()) > 0 {
if b, err := json.Marshal(v.Interface()); err == nil {
items.Add(key, string(b))
}
}
case reflect.Array, reflect.Slice:
vLen := v.Len()
if vLen > 0 {
for i := 0; i < vLen; i++ {
addQueryStringValue(items, key, v.Index(i))
}
}
}
}
// Error represents failures in the API. It represents a failure from the API.
type Error struct {
Status int
Message string
}
func newError(resp *http.Response) *Error {
defer resp.Body.Close()
data, err := ioutil.ReadAll(resp.Body)
if err != nil {
return &Error{Status: resp.StatusCode, Message: fmt.Sprintf("cannot read body, err: %v", err)}
}
return &Error{Status: resp.StatusCode, Message: string(data)}
}
func (e *Error) Error() string {
return fmt.Sprintf("API error (%d): %s", e.Status, e.Message)
}
func parseEndpoint(endpoint string, tls bool) (*url.URL, error) {
if endpoint != "" && !strings.Contains(endpoint, "://") {
endpoint = "tcp://" + endpoint
}
u, err := url.Parse(endpoint)
if err != nil {
return nil, ErrInvalidEndpoint
}
if tls && u.Scheme != "unix" {
u.Scheme = "https"
}
switch u.Scheme {
case unixProtocol, namedPipeProtocol:
return u, nil
case "http", "https", "tcp":
_, port, err := net.SplitHostPort(u.Host)
if err != nil {
if e, ok := err.(*net.AddrError); ok {
if e.Err == "missing port in address" {
return u, nil
}
}
return nil, ErrInvalidEndpoint
}
number, err := strconv.ParseInt(port, 10, 64)
if err == nil && number > 0 && number < 65536 {
if u.Scheme == "tcp" {
if tls {
u.Scheme = "https"
} else {
u.Scheme = "http"
}
}
return u, nil
}
return nil, ErrInvalidEndpoint
default:
return nil, ErrInvalidEndpoint
}
}
type dockerEnv struct {
dockerHost string
dockerTLSVerify bool
dockerCertPath string
}
func getDockerEnv() (*dockerEnv, error) {
dockerHost := os.Getenv("DOCKER_HOST")
var err error
if dockerHost == "" {
dockerHost = opts.DefaultHost
}
dockerTLSVerify := os.Getenv("DOCKER_TLS_VERIFY") != ""
var dockerCertPath string
if dockerTLSVerify {
dockerCertPath = os.Getenv("DOCKER_CERT_PATH")
if dockerCertPath == "" {
home := homedir.Get()
if home == "" {
return nil, errors.New("environment variable HOME must be set if DOCKER_CERT_PATH is not set")
}
dockerCertPath = filepath.Join(home, ".docker")
dockerCertPath, err = filepath.Abs(dockerCertPath)
if err != nil {
return nil, err
}
}
}
return &dockerEnv{
dockerHost: dockerHost,
dockerTLSVerify: dockerTLSVerify,
dockerCertPath: dockerCertPath,
}, nil
}
// defaultTransport returns a new http.Transport with similar default values to
// http.DefaultTransport, but with idle connections and keepalives disabled.
func defaultTransport() *http.Transport {
transport := defaultPooledTransport()
transport.DisableKeepAlives = true
transport.MaxIdleConnsPerHost = -1
return transport
}
// defaultPooledTransport returns a new http.Transport with similar default
// values to http.DefaultTransport. Do not use this for transient transports as
// it can leak file descriptors over time. Only use this for transports that
// will be re-used for the same host(s).
func defaultPooledTransport() *http.Transport {
transport := &http.Transport{
Proxy: http.ProxyFromEnvironment,
DialContext: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
}).DialContext,
MaxIdleConns: 100,
IdleConnTimeout: 90 * time.Second,
TLSHandshakeTimeout: 10 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
MaxIdleConnsPerHost: runtime.GOMAXPROCS(0) + 1,
}
return transport
}
// defaultClient returns a new http.Client with similar default values to
// http.Client, but with a non-shared Transport, idle connections disabled, and
// keepalives disabled.
func defaultClient() *http.Client {
return &http.Client{
Transport: defaultTransport(),
}
}
| [
"\"DOCKER_HOST\"",
"\"DOCKER_TLS_VERIFY\"",
"\"DOCKER_CERT_PATH\""
] | [] | [
"DOCKER_HOST",
"DOCKER_CERT_PATH",
"DOCKER_TLS_VERIFY"
] | [] | ["DOCKER_HOST", "DOCKER_CERT_PATH", "DOCKER_TLS_VERIFY"] | go | 3 | 0 | |
backends/backblaze_b2_backend.go | // Copyright © 2016 Prateek Malhotra ([email protected])
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package backends
import (
"context"
"io"
"net/http"
"os"
"strings"
"sync"
"github.com/kurin/blazer/b2"
"github.com/someone1/zfsbackup-go/helpers"
)
// B2BackendPrefix is the URI prefix used for the B2Backend.
const B2BackendPrefix = "b2"
// B2Backend integrates with BackBlaze's B2 storage service.
type B2Backend struct {
conf *BackendConfig
bucketCli *b2.Bucket
mutex sync.Mutex
prefix string
bucketName string
}
type bufferedRT struct {
bufChan chan bool
}
func (b bufferedRT) RoundTrip(r *http.Request) (*http.Response, error) {
b.bufChan <- true
defer func() { <-b.bufChan }()
return http.DefaultTransport.RoundTrip(r)
}
// Init will initialize the B2Backend and verify the provided URI is valid/exists.
func (b *B2Backend) Init(ctx context.Context, conf *BackendConfig, opts ...Option) error {
b.conf = conf
cleanPrefix := strings.TrimPrefix(b.conf.TargetURI, B2BackendPrefix+"://")
if cleanPrefix == b.conf.TargetURI {
return ErrInvalidURI
}
accountID := os.Getenv("B2_ACCOUNT_ID")
accountKey := os.Getenv("B2_ACCOUNT_KEY")
uriParts := strings.Split(cleanPrefix, "/")
b.bucketName = uriParts[0]
if len(uriParts) > 1 {
b.prefix = strings.Join(uriParts[1:], "/")
}
for _, opt := range opts {
opt.Apply(b)
}
var cliopts []b2.ClientOption
if conf.MaxParallelUploadBuffer != nil {
cliopts = append(cliopts, b2.Transport(bufferedRT{b.conf.MaxParallelUploadBuffer}))
}
client, err := b2.NewClient(ctx, accountID, accountKey, cliopts...)
if err != nil {
return err
}
b.bucketCli, err = client.Bucket(ctx, b.bucketName)
if err != nil {
return err
}
_, _, err = b.bucketCli.ListCurrentObjects(ctx, 0, nil)
if err == io.EOF {
err = nil
}
return err
}
// Upload will upload the provided volume to this B2Backend's configured bucket+prefix
func (b *B2Backend) Upload(ctx context.Context, vol *helpers.VolumeInfo) error {
// We will be doing multipart uploads, no need to allow multiple calls of Upload to initiate new uploads.
b.mutex.Lock()
defer b.mutex.Unlock()
name := b.prefix + vol.ObjectName
w := b.bucketCli.Object(name).NewWriter(ctx)
w.ConcurrentUploads = b.conf.MaxParallelUploads
w.ChunkSize = b.conf.UploadChunkSize
w.Resume = true
sha1Opt := b2.WithAttrsOption(&b2.Attrs{SHA1: vol.SHA1Sum})
sha1Opt(w)
if _, err := io.Copy(w, vol); err != nil {
w.Close()
helpers.AppLogger.Debugf("b2 backend: Error while uploading volume %s - %v", vol.ObjectName, err)
return err
}
return w.Close()
}
// Delete will delete the object with the given name from the configured bucket
func (b *B2Backend) Delete(ctx context.Context, name string) error {
return b.bucketCli.Object(name).Delete(ctx)
}
// PreDownload will do nothing for this backend.
func (b *B2Backend) PreDownload(ctx context.Context, keys []string) error {
return nil
}
// Download will download the requseted object which can be read from the returned io.ReadCloser
func (b *B2Backend) Download(ctx context.Context, name string) (io.ReadCloser, error) {
return b.bucketCli.Object(name).NewReader(ctx), nil
}
// Close will release any resources used by the B2 backend.
func (b *B2Backend) Close() error {
b.bucketCli = nil
return nil
}
// List will iterate through all objects in the configured B2 bucket and return
// a list of object names, filtering by the provided prefix.
func (b *B2Backend) List(ctx context.Context, prefix string) ([]string, error) {
resp, cursor, err := b.bucketCli.ListCurrentObjects(ctx, 1000, &b2.Cursor{
Prefix: prefix,
})
if err != nil && err != io.EOF {
return nil, err
}
l := make([]string, 0, len(resp))
for {
for _, obj := range resp {
l = append(l, obj.Name())
}
if cursor == nil || err == io.EOF {
break
}
resp, cursor, err = b.bucketCli.ListCurrentObjects(ctx, 1000, cursor)
if err != nil && err != io.EOF {
return nil, err
}
}
return l, nil
}
| [
"\"B2_ACCOUNT_ID\"",
"\"B2_ACCOUNT_KEY\""
] | [] | [
"B2_ACCOUNT_ID",
"B2_ACCOUNT_KEY"
] | [] | ["B2_ACCOUNT_ID", "B2_ACCOUNT_KEY"] | go | 2 | 0 | |
sfjf/manage.py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'sfjf.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [] | [] | [] | [] | [] | python | 0 | 0 | |
auth/auth/usecase/auth_usecase.go | package usecase
import (
"fmt"
"github.com/auth/auth"
"github.com/dgrijalva/jwt-go"
"github.com/helper"
"github.com/labstack/echo"
"github.com/models"
"github.com/twinj/uuid"
"net/http"
"os"
"strings"
"time"
)
type authUsecase struct {
}
// NewArticleUsecase will create new an articleUsecase object representation of article.Usecase interface
func NewauthUsecase() auth.Usecase {
return &authUsecase{
}
}
func (a authUsecase) CreateToken(userid string) (*models.TokenDetails, error) {
td := &models.TokenDetails{}
td.AtExpires = time.Now().Add(time.Minute * 15).Unix()
td.AccessUuid = uuid.NewV4().String()
td.RtExpires = time.Now().Add(time.Hour * 24 * 7).Unix()
td.RefreshUuid = uuid.NewV4().String()
var err error
//Creating Access Token
os.Setenv("ACCESS_SECRET", "jdnfksdmfksd") //this should be in an env file
atClaims := jwt.MapClaims{}
atClaims["authorized"] = true
atClaims["access_uuid"] = td.AccessUuid
atClaims["user_id"] = userid
atClaims["exp"] = td.AtExpires
at := jwt.NewWithClaims(jwt.SigningMethodHS256, atClaims)
td.AccessToken, err = at.SignedString([]byte(os.Getenv("ACCESS_SECRET")))
if err != nil {
return nil, err
}
//Creating Refresh Token
os.Setenv("REFRESH_SECRET", "mcmvmkmsdnfsdmfdsjf") //this should be in an env file
rtClaims := jwt.MapClaims{}
rtClaims["refresh_uuid"] = td.RefreshUuid
rtClaims["user_id"] = userid
rtClaims["exp"] = td.RtExpires
rt := jwt.NewWithClaims(jwt.SigningMethodHS256, rtClaims)
td.RefreshToken, err = rt.SignedString([]byte(os.Getenv("REFRESH_SECRET")))
if err != nil {
return nil, err
}
return td, nil
}
func (a authUsecase) CreateAuth(userid string, td *models.TokenDetails) error {
//at := time.Unix(td.AtExpires, 0) //converting Unix to UTC(to Time object)
//rt := time.Unix(td.RtExpires, 0)
//now := time.Now()
//errAccess := client.Set(td.AccessUuid, strconv.Itoa(int(userid)), at.Sub(now)).Err()
//if errAccess != nil {
// return errAccess
//}
//errRefresh := client.Set(td.RefreshUuid, strconv.Itoa(int(userid)), rt.Sub(now)).Err()
//if errRefresh != nil {
// return errRefresh
//}
helper.SetCache(td.AccessUuid,userid)
helper.SetCache(td.RefreshUuid,userid)
return nil
}
func (a authUsecase) ExtractToken(r *http.Request) string {
bearToken := r.Header.Get("Authorization")
//normally Authorization the_token_xxx
strArr := strings.Split(bearToken, " ")
if len(strArr) == 2 {
return strArr[1]
}
return ""
}
func (a authUsecase) VerifyToken(r *http.Request) (*jwt.Token, error) {
tokenString := a.ExtractToken(r)
token, err := jwt.Parse(tokenString, func(token *jwt.Token) (interface{}, error) {
//Make sure that the token method conform to "SigningMethodHMAC"
if _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {
return nil, fmt.Errorf("unexpected signing method: %v", token.Header["alg"])
}
return []byte(os.Getenv("ACCESS_SECRET")), nil
})
if err != nil {
return nil, err
}
return token, nil
}
func (a authUsecase) TokenValid(r *http.Request) error {
token, err := a.VerifyToken(r)
if err != nil {
return err
}
if _, ok := token.Claims.(jwt.Claims); !ok && !token.Valid {
return err
}
return nil
}
func (a authUsecase) ExtractTokenMetadata(r *http.Request) (*models.AccessDetails, error) {
token, err := a.VerifyToken(r)
if err != nil {
return nil, err
}
claims, ok := token.Claims.(jwt.MapClaims)
if ok && token.Valid {
accessUuid, ok := claims["access_uuid"].(string)
if !ok {
return nil, err
}
userId := fmt.Sprintf("%.f", claims["user_id"])
return &models.AccessDetails{
AccessUuid: accessUuid,
UserId: userId,
}, nil
}
return nil, err
}
func (a authUsecase) FetchAuth(authD *models.AccessDetails) (string, error) {
userid, _ := helper.GetCache(authD.AccessUuid)
//if err != nil {
// return 0, err
//}
//userID, _ := strconv.ParseUint(userid, 10, 64)
return userid, nil
}
func (a authUsecase) DeleteAuth(givenUuid string) (int64, error) {
helper.Cache.Delete(givenUuid)
//if err != nil {
// return 0, err
//}
return 1, nil
}
func (a authUsecase) TokenAuthMiddleware() echo.HandlerFunc {
return func(c echo.Context) error {
err := a.TokenValid(c.Request())
if err != nil {
return c.JSON(http.StatusUnauthorized, err.Error())
//c.Abort()
}
return nil
//c.Next()
}
}
| [
"\"ACCESS_SECRET\"",
"\"REFRESH_SECRET\"",
"\"ACCESS_SECRET\""
] | [] | [
"ACCESS_SECRET",
"REFRESH_SECRET"
] | [] | ["ACCESS_SECRET", "REFRESH_SECRET"] | go | 2 | 0 | |
commands/fetch-oracle.go | package commands
import (
"context"
"encoding/xml"
"flag"
"os"
"strings"
"time"
"github.com/google/subcommands"
"github.com/inconshreveable/log15"
c "github.com/kotakanbe/goval-dictionary/config"
"github.com/kotakanbe/goval-dictionary/db"
"github.com/kotakanbe/goval-dictionary/fetcher"
"github.com/kotakanbe/goval-dictionary/models"
"github.com/kotakanbe/goval-dictionary/util"
"github.com/ymomoi/goval-parser/oval"
)
// FetchOracleCmd is Subcommand for fetch Oracle OVAL
type FetchOracleCmd struct {
Debug bool
DebugSQL bool
Quiet bool
NoDetails bool
LogDir string
LogJSON bool
DBPath string
DBType string
HTTPProxy string
}
// Name return subcommand name
func (*FetchOracleCmd) Name() string { return "fetch-oracle" }
// Synopsis return synopsis
func (*FetchOracleCmd) Synopsis() string { return "Fetch Vulnerability dictionary from Oracle" }
// Usage return usage
func (*FetchOracleCmd) Usage() string {
return `fetch-oracle:
fetch-oracle
[-dbtype=sqlite3|mysql|postgres|redis]
[-dbpath=$PWD/oval.sqlite3 or connection string]
[-http-proxy=http://192.168.0.1:8080]
[-debug]
[-debug-sql]
[-quiet]
[-no-details]
[-log-dir=/path/to/log]
[-log-json]
For details, see https://github.com/kotakanbe/goval-dictionary#usage-fetch-oval-data-from-oracle
$ goval-dictionary fetch-oracle
`
}
// SetFlags set flag
func (p *FetchOracleCmd) SetFlags(f *flag.FlagSet) {
f.BoolVar(&p.Debug, "debug", false, "debug mode")
f.BoolVar(&p.DebugSQL, "debug-sql", false, "SQL debug mode")
f.BoolVar(&p.Quiet, "quiet", false, "quiet mode (no output)")
f.BoolVar(&p.NoDetails, "no-details", false, "without vulnerability details")
defaultLogDir := util.GetDefaultLogDir()
f.StringVar(&p.LogDir, "log-dir", defaultLogDir, "/path/to/log")
f.BoolVar(&p.LogJSON, "log-json", false, "output log as JSON")
pwd := os.Getenv("PWD")
f.StringVar(&p.DBPath, "dbpath", pwd+"/oval.sqlite3",
"/path/to/sqlite3 or SQL connection string")
f.StringVar(&p.DBType, "dbtype", "sqlite3",
"Database type to store data in (sqlite3, mysql, postgres or redis supported)")
f.StringVar(
&p.HTTPProxy,
"http-proxy",
"",
"http://proxy-url:port (default: empty)",
)
}
// Execute execute
func (p *FetchOracleCmd) Execute(_ context.Context, f *flag.FlagSet, _ ...interface{}) subcommands.ExitStatus {
c.Conf.Quiet = p.Quiet
c.Conf.DebugSQL = p.DebugSQL
c.Conf.Debug = p.Debug
c.Conf.DBPath = p.DBPath
c.Conf.DBType = p.DBType
c.Conf.HTTPProxy = p.HTTPProxy
c.Conf.NoDetails = p.NoDetails
util.SetLogger(p.LogDir, c.Conf.Quiet, c.Conf.Debug, p.LogJSON)
if !c.Conf.Validate() {
return subcommands.ExitUsageError
}
driver, locked, err := db.NewDB(c.Oracle, c.Conf.DBType, c.Conf.DBPath, c.Conf.DebugSQL)
if err != nil {
if locked {
log15.Error("Failed to open DB. Close DB connection before fetching", "err", err)
return subcommands.ExitFailure
}
log15.Error("Failed to open DB", "err", err)
return subcommands.ExitFailure
}
defer func() {
_ = driver.CloseDB()
}()
results, err := fetcher.FetchOracleFiles()
if err != nil {
log15.Error("Failed to fetch files", "err", err)
return subcommands.ExitFailure
}
for _, r := range results {
ovalroot := oval.Root{}
if err = xml.Unmarshal(r.Body, &ovalroot); err != nil {
log15.Error("Failed to unmarshal", "url", r.URL, "err", err)
return subcommands.ExitUsageError
}
log15.Info("Fetched", "URL", r.URL, "OVAL definitions", len(ovalroot.Definitions.Definitions))
// var timeformat = "2006-01-02T15:04:05.999-07:00"
var timeformat = "2006-01-02T15:04:05"
t, err := time.Parse(timeformat, strings.Split(ovalroot.Generator.Timestamp, ".")[0])
if err != nil {
log15.Error("Failed to parse time", "err", err)
return subcommands.ExitFailure
}
ss := strings.Split(r.URL, "/")
fmeta := models.FetchMeta{
Timestamp: t,
FileName: ss[len(ss)-1],
}
roots := models.ConvertOracleToModel(&ovalroot)
for _, root := range roots {
root.Timestamp = time.Now()
if err := driver.InsertOval(c.Oracle, &root, fmeta); err != nil {
log15.Error("Failed to insert oval", "err", err)
return subcommands.ExitFailure
}
}
if err := driver.InsertFetchMeta(fmeta); err != nil {
log15.Error("Failed to insert meta", "err", err)
return subcommands.ExitFailure
}
}
return subcommands.ExitSuccess
}
| [
"\"PWD\""
] | [] | [
"PWD"
] | [] | ["PWD"] | go | 1 | 0 | |
submissions-api/app/main/xml_utils.py | # SPDX-FileCopyrightText: 2021 Genome Research Ltd.
#
# SPDX-License-Identifier: MIT
import shutil
import xml.etree.ElementTree as ET
import tempfile
import os
def build_bundle_sample_xml(manifest):
'''build structure and save to file bundle_file_subfix.xml'''
dir = tempfile.TemporaryDirectory()
filename = dir.name + "bundle_" + str(manifest.manifest_id) + ".xml"
shutil.copy("main/templates/sample.xml", filename)
update_bundle_sample_xml(manifest, filename)
return filename
def update_bundle_sample_xml(manifest, bundlefile):
'''update the sample with submission alias adding a new sample'''
# print("adding sample to bundle sample xml")
tree = ET.parse(bundlefile)
root = tree.getroot()
for sample in manifest.samples:
sample_alias = ET.SubElement(root, 'SAMPLE')
sample_alias.set('alias', str(sample.sample_id))
sample_alias.set('center_name', 'SangerInstitute')
title = str(sample.sample_id) + "-tol"
title_block = ET.SubElement(sample_alias, 'TITLE')
title_block.text = title
sample_name = ET.SubElement(sample_alias, 'SAMPLE_NAME')
taxon_id = ET.SubElement(sample_name, 'TAXON_ID')
taxon_id.text = str(sample.taxonomy_id)
sample_attributes = ET.SubElement(sample_alias, 'SAMPLE_ATTRIBUTES')
ena_fields = sample.to_ena_dict()
for item in ena_fields:
sample_attribute = ET.SubElement(sample_attributes, 'SAMPLE_ATTRIBUTE')
tag = ET.SubElement(sample_attribute, 'TAG')
tag.text = item
value = ET.SubElement(sample_attribute, 'VALUE')
value.text = str(ena_fields[item]["value"])
# add ena units where necessary
if "units" in ena_fields[item]:
unit = ET.SubElement(sample_attribute, 'UNITS')
unit.text = ena_fields[item]["units"]
ET.dump(tree)
tree.write(open(bundlefile, 'w'),
encoding='unicode')
def build_submission_xml(manifest):
# build submission XML
tree = ET.parse("main/templates/submission.xml")
root = tree.getroot()
# set submission attributes
# root.set("submission_date", datetime.utcnow()
# .replace(tzinfo=d_utils.simple_utc()).isoformat())
# set SRA contacts
contacts = root.find('CONTACTS')
# set copo sra contacts
copo_contact = ET.SubElement(contacts, 'CONTACT')
copo_contact.set("name", os.getenv("ENA_CONTACT_NAME"))
copo_contact.set("inform_on_error", os.getenv("ENA_CONTACT_EMAIL"))
copo_contact.set("inform_on_status", os.getenv("ENA_CONTACT_EMAIL"))
ET.dump(tree)
dir = tempfile.TemporaryDirectory()
submissionfile = dir.name + "submission_" + str(manifest.manifest_id) + ".xml"
tree.write(open(submissionfile, 'w'),
encoding='unicode')
return submissionfile
| [] | [] | [
"ENA_CONTACT_NAME",
"ENA_CONTACT_EMAIL"
] | [] | ["ENA_CONTACT_NAME", "ENA_CONTACT_EMAIL"] | python | 2 | 0 | |
gex_test.go | /*
Copyright 2016 Jamie Moore
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"os"
"testing"
)
func TestGetGexEnv(t *testing.T) {
expected := os.Getenv("GEXENV")
n := expected
if expected == "" {
expected = "unknown"
}
actual := getGexEnv()
if actual != expected {
t.Errorf("GetGexEnv(%s): expected %s, actual %s", n, expected, actual)
}
}
func TestGetMessage(t *testing.T) {
var messageTests = []struct {
n string // input
expected string // expected result
}{
{"dev", "Hello from dev! - version unknown"},
{"test", "Hello from test! - version unknown"},
{"prod", "Hello from prod! - version unknown"},
{"unknown", "Hello from unknown! - version unknown"},
}
for _, tt := range messageTests {
actual := getMessage(tt.n)
if actual != tt.expected {
t.Errorf("GetGexEnv(%s): expected %s, actual %s", tt.n, tt.expected, actual)
}
}
}
| [
"\"GEXENV\""
] | [] | [
"GEXENV"
] | [] | ["GEXENV"] | go | 1 | 0 | |
venv/Lib/site-packages/setuptools/_distutils/command/build_ext.py | """distutils.command.build_ext
Implements the Distutils 'build_ext' command, for building extension
modules (currently limited to C extensions, should accommodate C++
extensions ASAP)."""
import contextlib
import os
import re
import sys
from distutils.core import Command
from distutils.errors import *
from distutils.sysconfig import customize_compiler, get_python_version
from distutils.sysconfig import get_config_h_filename
from distutils.dep_util import newer_group
from distutils.extension import Extension
from distutils.util import get_platform
from distutils import log
from . import py37compat
from site import USER_BASE
# An extension name is just a dot-separated list of Python NAMEs (ie.
# the same as a fully-qualified module name).
extension_name_re = re.compile \
(r'^[a-zA-Z_][a-zA-Z_0-9]*(\.[a-zA-Z_][a-zA-Z_0-9]*)*$')
def show_compilers ():
from distutils.ccompiler import show_compilers
show_compilers()
class build_ext(Command):
description = "build C/C++ extensions (compile/link to build directory)"
# XXX thoughts on how to deal with complex command-line options like
# these, i.e. how to make it so fancy_getopt can suck them off the
# command line and make it look like setup.py defined the appropriate
# lists of tuples of what-have-you.
# - each command needs a callback to process its command-line options
# - Command.__init__() needs access to its share of the whole
# command line (must ultimately come from
# Distribution.parse_command_line())
# - it then calls the current command class' option-parsing
# callback to deal with weird options like -D, which have to
# parse the option text and churn out some custom data
# structure
# - that data structure (in this case, a list of 2-tuples)
# will then be present in the command object by the time
# we get to finalize_options() (i.e. the constructor
# takes care of both command-line and client options
# in between initialize_options() and finalize_options())
sep_by = " (separated by '%s')" % os.pathsep
user_options = [
('build-lib=', 'b',
"directory for compiled extension modules"),
('build-temp=', 't',
"directory for temporary files (build by-products)"),
('plat-name=', 'p',
"platform name to cross-compile for, if supported "
"(default: %s)" % get_platform()),
('inplace', 'i',
"ignore build-lib and put compiled extensions into the source " +
"directory alongside your pure Python modules"),
('include-dirs=', 'I',
"list of directories to search for header files" + sep_by),
('define=', 'D',
"C preprocessor macros to define"),
('undef=', 'U',
"C preprocessor macros to undefine"),
('libraries=', 'l',
"external C libraries to link with"),
('library-dirs=', 'L',
"directories to search for external C libraries" + sep_by),
('rpath=', 'R',
"directories to search for shared C libraries at runtime"),
('link-objects=', 'O',
"extra explicit link objects to include in the link"),
('debug', 'g',
"compile/link with debugging information"),
('force', 'f',
"forcibly build everything (ignore file timestamps)"),
('compiler=', 'c',
"specify the compiler type"),
('parallel=', 'j',
"number of parallel build jobs"),
('swig-cpp', None,
"make SWIG create C++ files (default is C)"),
('swig-opts=', None,
"list of SWIG command line options"),
('swig=', None,
"path to the SWIG executable"),
('user', None,
"add user include, library and rpath")
]
boolean_options = ['inplace', 'debug', 'force', 'swig-cpp', 'user']
help_options = [
('help-compiler', None,
"list available compilers", show_compilers),
]
def initialize_options(self):
self.extensions = None
self.build_lib = None
self.plat_name = None
self.build_temp = None
self.inplace = 0
self.package = None
self.include_dirs = None
self.define = None
self.undef = None
self.libraries = None
self.library_dirs = None
self.rpath = None
self.link_objects = None
self.debug = None
self.force = None
self.compiler = None
self.swig = None
self.swig_cpp = None
self.swig_opts = None
self.user = None
self.parallel = None
def finalize_options(self):
from distutils import sysconfig
self.set_undefined_options('build',
('build_lib', 'build_lib'),
('build_temp', 'build_temp'),
('compiler', 'compiler'),
('debug', 'debug'),
('force', 'force'),
('parallel', 'parallel'),
('plat_name', 'plat_name'),
)
if self.package is None:
self.package = self.distribution.ext_package
self.extensions = self.distribution.ext_modules
# Make sure Python's include directories (for Python.h, pyconfig.h,
# etc.) are in the include search path.
py_include = sysconfig.get_python_inc()
plat_py_include = sysconfig.get_python_inc(plat_specific=1)
if self.include_dirs is None:
self.include_dirs = self.distribution.include_dirs or []
if isinstance(self.include_dirs, str):
self.include_dirs = self.include_dirs.split(os.pathsep)
# If in a virtualenv, add its include directory
# Issue 16116
if sys.exec_prefix != sys.base_exec_prefix:
self.include_dirs.append(os.path.join(sys.exec_prefix, 'include'))
# Put the Python "system" include dir at the end, so that
# any local include dirs take precedence.
self.include_dirs.extend(py_include.split(os.path.pathsep))
if plat_py_include != py_include:
self.include_dirs.extend(
plat_py_include.split(os.path.pathsep))
self.ensure_string_list('libraries')
self.ensure_string_list('link_objects')
# Life is easier if we're not forever checking for None, so
# simplify these options to empty lists if unset
if self.libraries is None:
self.libraries = []
if self.library_dirs is None:
self.library_dirs = []
elif isinstance(self.library_dirs, str):
self.library_dirs = self.library_dirs.split(os.pathsep)
if self.rpath is None:
self.rpath = []
elif isinstance(self.rpath, str):
self.rpath = self.rpath.split(os.pathsep)
# for extensions under windows use different directories
# for Release and Debug builds.
# also Python's library directory must be appended to library_dirs
if os.name == 'nt':
# the 'libs' directory is for binary installs - we assume that
# must be the *native* platform. But we don't really support
# cross-compiling via a binary install anyway, so we let it go.
self.library_dirs.append(os.path.join(sys.exec_prefix, 'libs'))
if sys.base_exec_prefix != sys.prefix: # Issue 16116
self.library_dirs.append(os.path.join(sys.base_exec_prefix, 'libs'))
if self.debug:
self.build_temp = os.path.join(self.build_temp, "Debug")
else:
self.build_temp = os.path.join(self.build_temp, "Release")
# Append the source distribution include and library directories,
# this allows distutils on windows to work in the source tree
self.include_dirs.append(os.path.dirname(get_config_h_filename()))
_sys_home = getattr(sys, '_home', None)
if _sys_home:
self.library_dirs.append(_sys_home)
# Use the .lib files for the correct architecture
if self.plat_name == 'win32':
suffix = 'win32'
else:
# win-amd64
suffix = self.plat_name[4:]
new_lib = os.path.join(sys.exec_prefix, 'PCbuild')
if suffix:
new_lib = os.path.join(new_lib, suffix)
self.library_dirs.append(new_lib)
# For extensions under Cygwin, Python's library directory must be
# appended to library_dirs
if sys.platform[:6] == 'cygwin':
if sys.executable.startswith(os.path.join(sys.exec_prefix, "bin")):
# building third party extensions
self.library_dirs.append(os.path.join(sys.prefix, "lib",
"python" + get_python_version(),
"config"))
else:
# building python standard extensions
self.library_dirs.append('.')
# For building extensions with a shared Python library,
# Python's library directory must be appended to library_dirs
# See Issues: #1600860, #4366
if (sysconfig.get_config_var('Py_ENABLE_SHARED')):
if not sysconfig.python_build:
# building third party extensions
self.library_dirs.append(sysconfig.get_config_var('LIBDIR'))
else:
# building python standard extensions
self.library_dirs.append('.')
# The argument parsing will result in self.define being a string, but
# it has to be a list of 2-tuples. All the preprocessor symbols
# specified by the 'define' option will be set to '1'. Multiple
# symbols can be separated with commas.
if self.define:
defines = self.define.split(',')
self.define = [(symbol, '1') for symbol in defines]
# The option for macros to undefine is also a string from the
# option parsing, but has to be a list. Multiple symbols can also
# be separated with commas here.
if self.undef:
self.undef = self.undef.split(',')
if self.swig_opts is None:
self.swig_opts = []
else:
self.swig_opts = self.swig_opts.split(' ')
# Finally add the user include and library directories if requested
if self.user:
user_include = os.path.join(USER_BASE, "include")
user_lib = os.path.join(USER_BASE, "lib")
if os.path.isdir(user_include):
self.include_dirs.append(user_include)
if os.path.isdir(user_lib):
self.library_dirs.append(user_lib)
self.rpath.append(user_lib)
if isinstance(self.parallel, str):
try:
self.parallel = int(self.parallel)
except ValueError:
raise DistutilsOptionError("parallel should be an integer")
def run(self):
from distutils.ccompiler import new_compiler
# 'self.extensions', as supplied by setup.py, is a list of
# Extension instances. See the documentation for Extension (in
# distutils.extension) for details.
#
# For backwards compatibility with Distutils 0.8.2 and earlier, we
# also allow the 'extensions' list to be a list of tuples:
# (ext_name, build_info)
# where build_info is a dictionary containing everything that
# Extension instances do except the name, with a few things being
# differently named. We convert these 2-tuples to Extension
# instances as needed.
if not self.extensions:
return
# If we were asked to build any C/C++ libraries, make sure that the
# directory where we put them is in the library search path for
# linking extensions.
if self.distribution.has_c_libraries():
build_clib = self.get_finalized_command('build_clib')
self.libraries.extend(build_clib.get_library_names() or [])
self.library_dirs.append(build_clib.build_clib)
# Setup the CCompiler object that we'll use to do all the
# compiling and linking
self.compiler = new_compiler(compiler=self.compiler,
verbose=self.verbose,
dry_run=self.dry_run,
force=self.force)
customize_compiler(self.compiler)
# If we are cross-compiling, init the compiler now (if we are not
# cross-compiling, init would not hurt, but people may rely on
# late initialization of compiler even if they shouldn't...)
if os.name == 'nt' and self.plat_name != get_platform():
self.compiler.initialize(self.plat_name)
# And make sure that any compile/link-related options (which might
# come from the command-line or from the setup script) are set in
# that CCompiler object -- that way, they automatically apply to
# all compiling and linking done here.
if self.include_dirs is not None:
self.compiler.set_include_dirs(self.include_dirs)
if self.define is not None:
# 'define' option is a list of (name,value) tuples
for (name, value) in self.define:
self.compiler.define_macro(name, value)
if self.undef is not None:
for macro in self.undef:
self.compiler.undefine_macro(macro)
if self.libraries is not None:
self.compiler.set_libraries(self.libraries)
if self.library_dirs is not None:
self.compiler.set_library_dirs(self.library_dirs)
if self.rpath is not None:
self.compiler.set_runtime_library_dirs(self.rpath)
if self.link_objects is not None:
self.compiler.set_link_objects(self.link_objects)
# Now actually compile and link everything.
self.build_extensions()
def check_extensions_list(self, extensions):
"""Ensure that the list of extensions (presumably provided as a
command option 'extensions') is valid, i.e. it is a list of
Extension objects. We also support the old-style list of 2-tuples,
where the tuples are (ext_name, build_info), which are converted to
Extension instances here.
Raise DistutilsSetupError if the structure is invalid anywhere;
just returns otherwise.
"""
if not isinstance(extensions, list):
raise DistutilsSetupError(
"'ext_modules' option must be a list of Extension instances")
for i, ext in enumerate(extensions):
if isinstance(ext, Extension):
continue # OK! (assume type-checking done
# by Extension constructor)
if not isinstance(ext, tuple) or len(ext) != 2:
raise DistutilsSetupError(
"each element of 'ext_modules' option must be an "
"Extension instance or 2-tuple")
ext_name, build_info = ext
log.warn("old-style (ext_name, build_info) tuple found in "
"ext_modules for extension '%s' "
"-- please convert to Extension instance", ext_name)
if not (isinstance(ext_name, str) and
extension_name_re.match(ext_name)):
raise DistutilsSetupError(
"first element of each tuple in 'ext_modules' "
"must be the extension name (a string)")
if not isinstance(build_info, dict):
raise DistutilsSetupError(
"second element of each tuple in 'ext_modules' "
"must be a dictionary (build info)")
# OK, the (ext_name, build_info) dict is type-safe: convert it
# to an Extension instance.
ext = Extension(ext_name, build_info['sources'])
# Easy stuff: one-to-one mapping from dict elements to
# instance attributes.
for key in ('include_dirs', 'library_dirs', 'libraries',
'extra_objects', 'extra_compile_args',
'extra_link_args'):
val = build_info.get(key)
if val is not None:
setattr(ext, key, val)
# Medium-easy stuff: same syntax/semantics, different names.
ext.runtime_library_dirs = build_info.get('rpath')
if 'def_file' in build_info:
log.warn("'def_file' element of build info dict "
"no longer supported")
# Non-trivial stuff: 'macros' split into 'define_macros'
# and 'undef_macros'.
macros = build_info.get('macros')
if macros:
ext.define_macros = []
ext.undef_macros = []
for macro in macros:
if not (isinstance(macro, tuple) and len(macro) in (1, 2)):
raise DistutilsSetupError(
"'macros' element of build info dict "
"must be 1- or 2-tuple")
if len(macro) == 1:
ext.undef_macros.append(macro[0])
elif len(macro) == 2:
ext.define_macros.append(macro)
extensions[i] = ext
def get_source_files(self):
self.check_extensions_list(self.extensions)
filenames = []
# Wouldn't it be neat if we knew the names of header files too...
for ext in self.extensions:
filenames.extend(ext.sources)
return filenames
def get_outputs(self):
# Sanity check the 'extensions' list -- can't assume this is being
# done in the same run as a 'build_extensions()' call (in fact, we
# can probably assume that it *isn't*!).
self.check_extensions_list(self.extensions)
# And build the list of output (built) filenames. Note that this
# ignores the 'inplace' flag, and assumes everything goes in the
# "build" tree.
outputs = []
for ext in self.extensions:
outputs.append(self.get_ext_fullpath(ext.name))
return outputs
def build_extensions(self):
# First, sanity-check the 'extensions' list
self.check_extensions_list(self.extensions)
if self.parallel:
self._build_extensions_parallel()
else:
self._build_extensions_serial()
def _build_extensions_parallel(self):
workers = self.parallel
if self.parallel is True:
workers = os.cpu_count() # may return None
try:
from concurrent.futures import ThreadPoolExecutor
except ImportError:
workers = None
if workers is None:
self._build_extensions_serial()
return
with ThreadPoolExecutor(max_workers=workers) as executor:
futures = [executor.submit(self.build_extension, ext)
for ext in self.extensions]
for ext, fut in zip(self.extensions, futures):
with self._filter_build_errors(ext):
fut.result()
def _build_extensions_serial(self):
for ext in self.extensions:
with self._filter_build_errors(ext):
self.build_extension(ext)
@contextlib.contextmanager
def _filter_build_errors(self, ext):
try:
yield
except (CCompilerError, DistutilsError, CompileError) as e:
if not ext.optional:
raise
self.warn('building extension "%s" failed: %s' %
(ext.name, e))
def build_extension(self, ext):
sources = ext.sources
if sources is None or not isinstance(sources, (list, tuple)):
raise DistutilsSetupError(
"in 'ext_modules' option (extension '%s'), "
"'sources' must be present and must be "
"a list of source filenames" % ext.name)
# sort to make the resulting .so file build reproducible
sources = sorted(sources)
ext_path = self.get_ext_fullpath(ext.name)
depends = sources + ext.depends
if not (self.force or newer_group(depends, ext_path, 'newer')):
log.debug("skipping '%s' extension (up-to-date)", ext.name)
return
else:
log.info("building '%s' extension", ext.name)
# First, scan the sources for SWIG definition files (.i), run
# SWIG on 'em to create .c files, and modify the sources list
# accordingly.
sources = self.swig_sources(sources, ext)
# Next, compile the source code to object files.
# XXX not honouring 'define_macros' or 'undef_macros' -- the
# CCompiler API needs to change to accommodate this, and I
# want to do one thing at a time!
# Two possible sources for extra compiler arguments:
# - 'extra_compile_args' in Extension object
# - CFLAGS environment variable (not particularly
# elegant, but people seem to expect it and I
# guess it's useful)
# The environment variable should take precedence, and
# any sensible compiler will give precedence to later
# command line args. Hence we combine them in order:
extra_args = ext.extra_compile_args or []
macros = ext.define_macros[:]
for undef in ext.undef_macros:
macros.append((undef,))
objects = self.compiler.compile(sources,
output_dir=self.build_temp,
macros=macros,
include_dirs=ext.include_dirs,
debug=self.debug,
extra_postargs=extra_args,
depends=ext.depends)
# XXX outdated variable, kept here in case third-part code
# needs it.
self._built_objects = objects[:]
# Now link the object files together into a "shared object" --
# of course, first we have to figure out all the other things
# that go into the mix.
if ext.extra_objects:
objects.extend(ext.extra_objects)
extra_args = ext.extra_link_args or []
# Detect target language, if not provided
language = ext.language or self.compiler.detect_language(sources)
self.compiler.link_shared_object(
objects, ext_path,
libraries=self.get_libraries(ext),
library_dirs=ext.library_dirs,
runtime_library_dirs=ext.runtime_library_dirs,
extra_postargs=extra_args,
export_symbols=self.get_export_symbols(ext),
debug=self.debug,
build_temp=self.build_temp,
target_lang=language)
def swig_sources(self, sources, extension):
"""Walk the list of source files in 'sources', looking for SWIG
interface (.i) files. Run SWIG on all that are found, and
return a modified 'sources' list with SWIG source files replaced
by the generated C (or C++) files.
"""
new_sources = []
swig_sources = []
swig_targets = {}
# XXX this drops generated C/C++ files into the source tree, which
# is fine for developers who want to distribute the generated
# source -- but there should be an option to put SWIG output in
# the temp dir.
if self.swig_cpp:
log.warn("--swig-cpp is deprecated - use --swig-opts=-c++")
if self.swig_cpp or ('-c++' in self.swig_opts) or \
('-c++' in extension.swig_opts):
target_ext = '.cpp'
else:
target_ext = '.c'
for source in sources:
(base, ext) = os.path.splitext(source)
if ext == ".i": # SWIG interface file
new_sources.append(base + '_wrap' + target_ext)
swig_sources.append(source)
swig_targets[source] = new_sources[-1]
else:
new_sources.append(source)
if not swig_sources:
return new_sources
swig = self.swig or self.find_swig()
swig_cmd = [swig, "-python"]
swig_cmd.extend(self.swig_opts)
if self.swig_cpp:
swig_cmd.append("-c++")
# Do not override commandline arguments
if not self.swig_opts:
for o in extension.swig_opts:
swig_cmd.append(o)
for source in swig_sources:
target = swig_targets[source]
log.info("swigging %s to %s", source, target)
self.spawn(swig_cmd + ["-o", target, source])
return new_sources
def find_swig(self):
"""Return the name of the SWIG executable. On Unix, this is
just "swig" -- it should be in the PATH. Tries a bit harder on
Windows.
"""
if os.name == "posix":
return "swig"
elif os.name == "nt":
# Look for SWIG in its standard installation directory on
# Windows (or so I presume!). If we find it there, great;
# if not, act like Unix and assume it's in the PATH.
for vers in ("1.3", "1.2", "1.1"):
fn = os.path.join("c:\\swig%s" % vers, "swig.exe")
if os.path.isfile(fn):
return fn
else:
return "swig.exe"
else:
raise DistutilsPlatformError(
"I don't know how to find (much less run) SWIG "
"on platform '%s'" % os.name)
# -- Name generators -----------------------------------------------
# (extension names, filenames, whatever)
def get_ext_fullpath(self, ext_name):
"""Returns the path of the filename for a given extension.
The file is located in `build_lib` or directly in the package
(inplace option).
"""
fullname = self.get_ext_fullname(ext_name)
modpath = fullname.split('.')
filename = self.get_ext_filename(modpath[-1])
if not self.inplace:
# no further work needed
# returning :
# build_dir/package/path/filename
filename = os.path.join(*modpath[:-1]+[filename])
return os.path.join(self.build_lib, filename)
# the inplace option requires to find the package directory
# using the build_py command for that
package = '.'.join(modpath[0:-1])
build_py = self.get_finalized_command('build_py')
package_dir = os.path.abspath(build_py.get_package_dir(package))
# returning
# package_dir/filename
return os.path.join(package_dir, filename)
def get_ext_fullname(self, ext_name):
"""Returns the fullname of a given extension name.
Adds the `package.` prefix"""
if self.package is None:
return ext_name
else:
return self.package + '.' + ext_name
def get_ext_filename(self, ext_name):
r"""Convert the name of an extension (eg. "foo.bar") into the name
of the file from which it will be loaded (eg. "foo/bar.so", or
"foo\bar.pyd").
"""
from distutils.sysconfig import get_config_var
ext_path = ext_name.split('.')
ext_suffix = get_config_var('EXT_SUFFIX')
return os.path.join(*ext_path) + ext_suffix
def get_export_symbols(self, ext):
"""Return the list of symbols that a shared extension has to
export. This either uses 'ext.export_symbols' or, if it's not
provided, "PyInit_" + module_name. Only relevant on Windows, where
the .pyd file (DLL) must export the module "PyInit_" function.
"""
name = ext.name.split('.')[-1]
try:
# Unicode module name support as defined in PEP-489
# https://www.python.org/dev/peps/pep-0489/#export-hook-name
name.encode('ascii')
except UnicodeEncodeError:
suffix = 'U_' + name.encode('punycode').replace(b'-', b'_').decode('ascii')
else:
suffix = "_" + name
initfunc_name = "PyInit" + suffix
if initfunc_name not in ext.export_symbols:
ext.export_symbols.append(initfunc_name)
return ext.export_symbols
def get_libraries(self, ext):
"""Return the list of libraries to link against when building a
shared extension. On most platforms, this is just 'ext.libraries';
on Windows, we add the Python library (eg. python20.dll).
"""
# The python library is always needed on Windows. For MSVC, this
# is redundant, since the library is mentioned in a pragma in
# pyconfig.h that MSVC groks. The other Windows compilers all seem
# to need it mentioned explicitly, though, so that's what we do.
# Append '_d' to the python import library on debug builds.
if sys.platform == "win32":
from distutils._msvccompiler import MSVCCompiler
if not isinstance(self.compiler, MSVCCompiler):
template = "python%d%d"
if self.debug:
template = template + '_d'
pythonlib = (template %
(sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff))
# don't extend ext.libraries, it may be shared with other
# extensions, it is a reference to the original list
return ext.libraries + [pythonlib]
else:
# On Android only the main executable and LD_PRELOADs are considered
# to be RTLD_GLOBAL, all the dependencies of the main executable
# remain RTLD_LOCAL and so the shared libraries must be linked with
# libpython when python is built with a shared python library (issue
# bpo-21536).
# On Cygwin (and if required, other POSIX-like platforms based on
# Windows like MinGW) it is simply necessary that all symbols in
# shared libraries are resolved at link time.
from distutils.sysconfig import get_config_var
link_libpython = False
if get_config_var('Py_ENABLE_SHARED'):
# A native build on an Android device or on Cygwin
if hasattr(sys, 'getandroidapilevel'):
link_libpython = True
elif sys.platform == 'cygwin':
link_libpython = True
elif '_PYTHON_HOST_PLATFORM' in os.environ:
# We are cross-compiling for one of the relevant platforms
if get_config_var('ANDROID_API_LEVEL') != 0:
link_libpython = True
elif get_config_var('MACHDEP') == 'cygwin':
link_libpython = True
if link_libpython:
ldversion = get_config_var('LDVERSION')
return ext.libraries + ['python' + ldversion]
return ext.libraries + py37compat.pythonlib()
| [] | [] | [] | [] | [] | python | 0 | 0 | |
multiprune_plusone/multiprune_plusone.py | import os
import numpy as np
import pandas as pd
os.environ['MKL_THREADING_LAYER'] = 'GNU'
# df = pd.DataFrame(columns=['multiprune', 'headstr', 'pluslayer', 'plushead', 'acc1'])
# df.to_csv("multiprune_plusone.csv",index=False)
prevheadlist = [set([7]),set([11]),set([0]),set([7]),set([9]),set([9])]
plusheadlist = [set(range(12))-{7},set(range(12))-{11},set(range(12))-{0},set(range(12))-{7},set(range(12))-{9},set(range(12))-{9}]
for multiprune in range(1,12):
headstr = []
for oneset in prevheadlist:
setstr = [str(int(s)) for s in oneset]
setstr = '+'.join(setstr)
headstr.append(setstr)
headstr = '.'.join(headstr)
for pluslayer in range(6):
for plushead in plusheadlist[pluslayer]:
os.system(f'python -m torch.distributed.launch --nproc_per_node 1 --master_port 12345 main.py --eval --cfg configs/swin_tiny_patch4_window7_224.yaml --resume swin_tiny_patch4_window7_224.pth --data-path data/imagenet/ --prune {multiprune}_{headstr}_{pluslayer}_{plushead}')
df = pd.read_csv("multiprune_plusone.csv")
df = df[(df.multiprune == multiprune) & (df.pluslayer == pluslayer)]
df = df.apply(pd.to_numeric, errors = 'coerce')
max_acc1_idx = df.idxmax().acc1
plusheadlist[pluslayer].remove(df.loc[max_acc1_idx].plushead)
prevheadlist[pluslayer].add(df.loc[max_acc1_idx].plushead)
| [] | [] | [
"MKL_THREADING_LAYER"
] | [] | ["MKL_THREADING_LAYER"] | python | 1 | 0 | |
pvoutput/pv.go | // Handles upload of data to pvoutput.org
// Set the following environment variables:
//
// PVSTATUSURL - Pvoutput API status endpoint
// PVAPIKEY - api key
// PVSYSTEMID - unique system identifier
//
// Refer to pvoutput.org API doco for more info on the above values.
package pvoutput
import (
"bytes"
"fmt"
// "io/ioutil"
// "log"
"net/http"
"net/url"
"os"
"strconv"
"time"
"github.com/porjo/gosuntwins/serial"
)
type pvoutput struct {
statusURL string
apiKey string
systemID string
}
var (
lastUpload time.Time = time.Now()
addCount float32 = 1
totalReading serial.Reading = serial.Reading{}
pv pvoutput = pvoutput{}
client *http.Client
)
//seconds between uploads
const Interval int = 300
var NotInitialized = fmt.Errorf("Not initialized. Environment variables not set")
func init() {
client = &http.Client{}
pv.statusURL = os.Getenv("PVSTATUSURL")
pv.apiKey = os.Getenv("PVAPIKEY")
pv.systemID = os.Getenv("PVSYSTEMID")
}
// Upload data every Interval seconds. Returns NotInitialzed error if environment variables not set
func Upload(r *serial.Reading) error {
if pv.statusURL == "" {
return NotInitialized
}
addReading(r)
if time.Now().Sub(lastUpload) >= (time.Second * time.Duration(Interval)) {
avg := avgReading(addCount, totalReading)
data := url.Values{}
data.Set("d", time.Now().Format("20060102"))
data.Set("t", time.Now().Format("15:04"))
data.Set("v1", strconv.FormatFloat(float64(avg.TodayE)*1000, 'f', 3, 32)) //Convert to watt hours
data.Set("v2", strconv.FormatFloat(float64(avg.PAC), 'f', 3, 32))
data.Set("v5", strconv.FormatFloat(float64(avg.Temp), 'f', 3, 32))
data.Set("v6", strconv.FormatFloat(float64(avg.VDC), 'f', 3, 32))
req, err := http.NewRequest("POST", pv.statusURL, bytes.NewBufferString(data.Encode()))
if err != nil {
return err
}
req.Header.Set("X-Pvoutput-Apikey", pv.apiKey)
req.Header.Set("X-Pvoutput-SystemId", pv.systemID)
req.Header.Add("Content-Type", "application/x-www-form-urlencoded")
req.Header.Add("Content-Length", strconv.Itoa(len(data.Encode())))
//log.Println("POSTing to URL: " + pv.statusURL)
//log.Printf("POSTing data: %v\n", data)
res, err := client.Do(req)
if err != nil {
return err
}
/*
defer res.Body.Close()
body, err := ioutil.ReadAll(res.Body)
if err != nil {
return
}
log.Printf("Body %s\n", body)
*/
if res.StatusCode != 200 {
return fmt.Errorf("Server responded with code %d\n", res.StatusCode)
}
lastUpload = time.Now()
totalReading = serial.Reading{}
addCount = 1
}
return nil
}
// Add new reading to running total.
func addReading(r *serial.Reading) {
totalReading.Temp += r.Temp
totalReading.VDC += r.VDC
totalReading.NowE += r.NowE
totalReading.TodayE = r.TodayE //this one isn't summed as it's already cumulative
totalReading.I += r.I
totalReading.VAC += r.VAC
totalReading.Freq += r.Freq
totalReading.PAC += r.PAC
addCount++
}
// Calculate average of supplied reading
func avgReading(count float32, r serial.Reading) serial.Reading {
avg := serial.Reading{}
avg.Temp = r.Temp / count
avg.VDC = r.VDC / count
avg.NowE = r.NowE / count
avg.TodayE = r.TodayE
avg.I = r.I / count
avg.VAC = r.VAC / count
avg.Freq = r.Freq / count
avg.PAC = r.PAC / count
return avg
}
| [
"\"PVSTATUSURL\"",
"\"PVAPIKEY\"",
"\"PVSYSTEMID\""
] | [] | [
"PVAPIKEY",
"PVSTATUSURL",
"PVSYSTEMID"
] | [] | ["PVAPIKEY", "PVSTATUSURL", "PVSYSTEMID"] | go | 3 | 0 | |
addon-resizer/nanny/main/pod_nanny.go | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
goflag "flag"
"os"
"time"
log "github.com/golang/glog"
flag "github.com/spf13/pflag"
"k8s.io/autoscaler/addon-resizer/nanny"
resource "k8s.io/kubernetes/pkg/api/resource"
client "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3"
"k8s.io/kubernetes/pkg/client/restclient"
)
const noValue = "MISSING"
var (
// Flags to define the resource requirements.
baseCPU = flag.String("cpu", noValue, "The base CPU resource requirement.")
cpuPerNode = flag.String("extra-cpu", "0", "The amount of CPU to add per node.")
baseMemory = flag.String("memory", noValue, "The base memory resource requirement.")
memoryPerNode = flag.String("extra-memory", "0Mi", "The amount of memory to add per node.")
baseStorage = flag.String("storage", noValue, "The base storage resource requirement.")
storagePerNode = flag.String("extra-storage", "0Gi", "The amount of storage to add per node.")
recommendationOffset = flag.Int("recommendation-offset", 10, "A number from range 0-100. When the dependent's resources are rewritten, they are set to the closer end of the range defined by this percentage threshold.")
acceptanceOffset = flag.Int("acceptance-offset", 20, "A number from range 0-100. The dependent's resources are rewritten when they deviate from expected by a percentage that is higher than this threshold. Can't be lower than recommendation-offset.")
// Flags to identify the container to nanny.
podNamespace = flag.String("namespace", os.Getenv("MY_POD_NAMESPACE"), "The namespace of the ward. This defaults to the nanny pod's own namespace.")
deployment = flag.String("deployment", "", "The name of the deployment being monitored. This is required.")
podName = flag.String("pod", os.Getenv("MY_POD_NAME"), "The name of the pod to watch. This defaults to the nanny's own pod.")
containerName = flag.String("container", "pod-nanny", "The name of the container to watch. This defaults to the nanny itself.")
// Flags to control runtime behavior.
pollPeriodMillis = flag.Int("poll-period", 10000, "The time, in milliseconds, to poll the dependent container.")
)
func checkPercentageFlagBounds(flagName string, flagValue int) {
if flagValue < 0 || flagValue > 100 {
log.Fatalf("%s flag must be between 0 and 100 inclusively, was %d.", flagName, flagValue)
}
}
func main() {
// First log our starting config, and then set up.
log.Infof("Invoked by %v", os.Args)
// Add standard go flags to the flag set, to enable e.g. setting glog flags.
flag.CommandLine.AddGoFlagSet(goflag.CommandLine)
flag.Parse()
// Perform further validation of flags.
if *deployment == "" {
log.Fatal("Must specify a deployment.")
}
checkPercentageFlagBounds("recommendation-offset", *recommendationOffset)
checkPercentageFlagBounds("acceptance-offset", *acceptanceOffset)
pollPeriod := time.Duration(int64(*pollPeriodMillis) * int64(time.Millisecond))
log.Infof("Poll period: %+v", pollPeriod)
log.Infof("Watching namespace: %s, pod: %s, container: %s.", *podNamespace, *podName, *containerName)
log.Infof("cpu: %s, extra_cpu: %s, memory: %s, extra_memory: %s, storage: %s, extra_storage: %s", *baseCPU, *cpuPerNode, *baseMemory, *memoryPerNode, *baseStorage, *storagePerNode)
log.Infof("Accepted range +/-%d%%", *acceptanceOffset)
log.Infof("Recommended range +/-%d%%", *recommendationOffset)
// Set up work objects.
config, err := restclient.InClusterConfig()
if err != nil {
log.Fatal(err)
}
clientset, err := client.NewForConfig(config)
if err != nil {
log.Fatal(err)
}
k8s := nanny.NewKubernetesClient(*podNamespace, *deployment, *podName, *containerName, clientset)
var resources []nanny.Resource
// Monitor only the resources specified.
if *baseCPU != noValue {
resources = append(resources, nanny.Resource{
Base: resource.MustParse(*baseCPU),
ExtraPerNode: resource.MustParse(*cpuPerNode),
Name: "cpu",
})
}
if *baseMemory != noValue {
resources = append(resources, nanny.Resource{
Base: resource.MustParse(*baseMemory),
ExtraPerNode: resource.MustParse(*memoryPerNode),
Name: "memory",
})
}
if *baseStorage != noValue {
resources = append(resources, nanny.Resource{
Base: resource.MustParse(*baseStorage),
ExtraPerNode: resource.MustParse(*memoryPerNode),
Name: "storage",
})
}
log.Infof("Resources: %+v", resources)
// Begin nannying.
nanny.PollAPIServer(
k8s,
nanny.Estimator{
AcceptanceOffset: int64(*acceptanceOffset),
RecommendationOffset: int64(*recommendationOffset),
Resources: resources,
},
pollPeriod)
}
| [
"\"MY_POD_NAMESPACE\"",
"\"MY_POD_NAME\""
] | [] | [
"MY_POD_NAME",
"MY_POD_NAMESPACE"
] | [] | ["MY_POD_NAME", "MY_POD_NAMESPACE"] | go | 2 | 0 | |
fargaterunner/main.go | package main
import (
"context"
"fmt"
"log"
"os"
"github.com/aws/aws-lambda-go/lambda"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/ecs"
)
var taskArn, clusterArn, subnetId string
func init() {
taskArn = os.Getenv("TASK_ARN")
clusterArn = os.Getenv("CLUSTER_ARN")
subnetId = os.Getenv("SUBNET_ID")
if taskArn == "" {
log.Fatal("TASK_ARN was not set as an env var.")
os.Exit(1)
}
if clusterArn == "" {
log.Fatal("CLUSTER_ARN was not set as an env var.")
os.Exit(1)
}
if subnetId == "" {
log.Fatal("VPC_ID was not set as an env var.")
os.Exit(1)
}
}
func main() {
lambda.Start(HandleRequest)
}
func HandleRequest(ctx context.Context) (string, error) {
launchType := "FARGATE"
assignPublicIP := "ENABLED"
sess := session.Must(session.NewSession())
ecsClient := ecs.New(sess)
vpcConfig := ecs.AwsVpcConfiguration{
Subnets: []*string{&subnetId},
AssignPublicIp: &assignPublicIP,
}
networkConfig := ecs.NetworkConfiguration{
AwsvpcConfiguration: &vpcConfig,
}
runTask := ecs.RunTaskInput{
Cluster: &clusterArn,
TaskDefinition: &taskArn,
NetworkConfiguration: &networkConfig,
LaunchType: &launchType,
}
_, err := ecsClient.RunTask(&runTask)
if err != nil {
return fmt.Sprintln("An error occured " + err.Error()), nil
}
return fmt.Sprintf("ECS Task Ran"), nil
}
| [
"\"TASK_ARN\"",
"\"CLUSTER_ARN\"",
"\"SUBNET_ID\""
] | [] | [
"SUBNET_ID",
"TASK_ARN",
"CLUSTER_ARN"
] | [] | ["SUBNET_ID", "TASK_ARN", "CLUSTER_ARN"] | go | 3 | 0 | |
vendor/github.com/sendgrid/sendgrid-go/examples/categories/categories.go | package main
import (
"fmt"
"github.com/sendgrid/sendgrid-go"
"log"
"os"
)
///////////////////////////////////////////////////
// Retrieve all categories
// GET /categories
func Retrieveallcategories() {
apiKey := os.Getenv("YOUR_SENDGRID_APIKEY")
host := "https://api.sendgrid.com"
request := sendgrid.GetRequest(apiKey, "/v3/categories", host)
request.Method = "GET"
queryParams := make(map[string]string)
queryParams["category"] = "test_string"
queryParams["limit"] = "1"
queryParams["offset"] = "1"
request.QueryParams = queryParams
response, err := sendgrid.API(request)
if err != nil {
log.Println(err)
} else {
fmt.Println(response.StatusCode)
fmt.Println(response.Body)
fmt.Println(response.Headers)
}
}
///////////////////////////////////////////////////
// Retrieve Email Statistics for Categories
// GET /categories/stats
func RetrieveEmailStatisticsforCategories() {
apiKey := os.Getenv("YOUR_SENDGRID_APIKEY")
host := "https://api.sendgrid.com"
request := sendgrid.GetRequest(apiKey, "/v3/categories/stats", host)
request.Method = "GET"
queryParams := make(map[string]string)
queryParams["end_date"] = "2016-04-01"
queryParams["aggregated_by"] = "day"
queryParams["limit"] = "1"
queryParams["offset"] = "1"
queryParams["start_date"] = "2016-01-01"
queryParams["categories"] = "test_string"
request.QueryParams = queryParams
response, err := sendgrid.API(request)
if err != nil {
log.Println(err)
} else {
fmt.Println(response.StatusCode)
fmt.Println(response.Body)
fmt.Println(response.Headers)
}
}
///////////////////////////////////////////////////
// Retrieve sums of email stats for each category [Needs: Stats object defined, has category ID?]
// GET /categories/stats/sums
func Retrievesumsofemailstatsforeachcategory() {
apiKey := os.Getenv("YOUR_SENDGRID_APIKEY")
host := "https://api.sendgrid.com"
request := sendgrid.GetRequest(apiKey, "/v3/categories/stats/sums", host)
request.Method = "GET"
queryParams := make(map[string]string)
queryParams["end_date"] = "2016-04-01"
queryParams["aggregated_by"] = "day"
queryParams["limit"] = "1"
queryParams["sort_by_metric"] = "test_string"
queryParams["offset"] = "1"
queryParams["start_date"] = "2016-01-01"
queryParams["sort_by_direction"] = "asc"
request.QueryParams = queryParams
response, err := sendgrid.API(request)
if err != nil {
log.Println(err)
} else {
fmt.Println(response.StatusCode)
fmt.Println(response.Body)
fmt.Println(response.Headers)
}
}
func main() {
// add your function calls here
}
| [
"\"YOUR_SENDGRID_APIKEY\"",
"\"YOUR_SENDGRID_APIKEY\"",
"\"YOUR_SENDGRID_APIKEY\""
] | [] | [
"YOUR_SENDGRID_APIKEY"
] | [] | ["YOUR_SENDGRID_APIKEY"] | go | 1 | 0 | |
tests/utils/test_distributed.py | import logging
import os
import unittest
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
import torch.optim as optim
from torch.nn.parallel import DistributedDataParallel as DDP
from pytorch_metric_learning import losses, miners
from pytorch_metric_learning.utils import common_functions as c_f
from pytorch_metric_learning.utils import distributed
from .. import TEST_DEVICE, TEST_DTYPES
# https://discuss.pytorch.org/t/check-if-models-have-same-weights/4351
def parameters_are_equal(model1, model2):
for p1, p2 in zip(model1.parameters(), model2.parameters()):
num_elements = float(torch.numel(p2.data))
if torch.sum(torch.isclose(p1.data, p2.data, rtol=1e-2)) < (num_elements * 0.8):
return False
return True
### from https://pytorch.org/tutorials/intermediate/ddp_tutorial.html ###
def setup(rank, world_size):
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = "12355"
dist_type = "gloo" if TEST_DEVICE == torch.device("cpu") else "nccl"
# initialize the process group
dist.init_process_group(dist_type, rank=rank, world_size=world_size)
def cleanup():
dist.destroy_process_group()
class ToyMpModel(torch.nn.Module):
def __init__(self):
super(ToyMpModel, self).__init__()
self.net1 = torch.nn.Linear(10, 10)
self.relu = torch.nn.ReLU()
self.net2 = torch.nn.Linear(10, 5)
def forward(self, x):
x = self.relu(self.net1(x))
return self.net2(x)
### from https://pytorch.org/tutorials/intermediate/ddp_tutorial.html ###
def single_process_function(
rank,
world_size,
lr,
model,
inputs,
labels,
loss_fn,
miner_fn,
original_model,
original_loss_fn,
original_miner_fn,
correct_loss,
correct_indices_tuple,
is_tuple_loss,
ref_outputs,
ref_labels,
):
setup(rank, world_size)
if TEST_DEVICE == torch.device("cpu"):
device = TEST_DEVICE
device_ids = None
output_device = None
else:
device = torch.device("cuda:{}".format(rank))
device_ids = [rank]
output_device = rank
ddp_mp_model = DDP(
model.to(device), device_ids=device_ids, output_device=output_device
)
if is_tuple_loss:
loss_fn = distributed.DistributedLossWrapper(loss=loss_fn)
else:
loss_fn = distributed.DistributedLossWrapper(
loss=loss_fn.to(device), device_ids=device_ids, output_device=output_device
)
loss_optimizer = optim.SGD(loss_fn.parameters(), lr=lr)
loss_optimizer.zero_grad()
miner_fn = distributed.DistributedMinerWrapper(miner=miner_fn)
optimizer = optim.SGD(ddp_mp_model.parameters(), lr=lr)
optimizer.zero_grad()
outputs = ddp_mp_model(inputs[rank].to(device))
if ref_outputs is not None:
ref_outputs[rank] = ref_outputs[rank].to(device)
indices_tuple = miner_fn(
outputs, labels[rank], ref_outputs[rank], ref_labels[rank]
)
indices_tuple = c_f.shift_indices_tuple(
indices_tuple, len(outputs) * world_size
)
loss = loss_fn(
[outputs, ref_outputs[rank]],
[labels[rank], ref_labels[rank]],
indices_tuple,
)
else:
indices_tuple = miner_fn(outputs, labels[rank])
loss = loss_fn(outputs, labels[rank], indices_tuple)
if is_tuple_loss:
pos_loss_size = loss_fn.loss.reducer.reducers["pos_loss"].losses_size
neg_loss_size = loss_fn.loss.reducer.reducers["neg_loss"].losses_size
correct_pos_loss_size = original_loss_fn.reducer.reducers[
"pos_loss"
].losses_size
correct_neg_loss_size = original_loss_fn.reducer.reducers[
"neg_loss"
].losses_size
assert pos_loss_size == correct_pos_loss_size
assert neg_loss_size == correct_neg_loss_size
else:
loss_size = loss_fn.loss.module.reducer.losses_size
correct_loss_size = original_loss_fn.reducer.losses_size
assert loss_size == correct_loss_size
assert torch.isclose(loss, torch.from_numpy(correct_loss).to(device))
assert miner_fn.miner.num_pos_pairs == original_miner_fn.num_pos_pairs
assert miner_fn.miner.num_neg_pairs == original_miner_fn.num_neg_pairs
for i in range(len(correct_indices_tuple)):
assert torch.all(
indices_tuple[i] == (torch.from_numpy(correct_indices_tuple[i]).to(device))
)
dist.barrier()
loss.backward()
original_model = original_model.to(device)
assert not parameters_are_equal(original_model, ddp_mp_model.module)
dist.barrier()
optimizer.step()
dist.barrier()
assert parameters_are_equal(original_model, ddp_mp_model.module)
if not is_tuple_loss:
original_loss_fn = original_loss_fn.to(device)
assert not parameters_are_equal(original_loss_fn, loss_fn.loss.module)
dist.barrier()
loss_optimizer.step()
dist.barrier()
assert parameters_are_equal(original_loss_fn, loss_fn.loss.module)
dist.barrier()
cleanup()
class TestDistributedLossWrapper(unittest.TestCase):
def create_loss(self, loss_class, is_tuple_loss, dtype):
if is_tuple_loss:
return loss_class()
else:
return loss_class(num_classes=2, embedding_size=5).type(dtype)
def loss_and_miner_tester(
self, loss_class, miner_class, is_tuple_loss, test_ref_emb=False
):
torch.manual_seed(75210)
if TEST_DEVICE != torch.device("cpu"):
max_world_size = min(4, torch.cuda.device_count())
if max_world_size < 1:
logging.warning(
"In GPU mode but no GPUs available. Skipping distributed test"
)
return
else:
max_world_size = 2
for dtype in TEST_DTYPES:
for world_size in range(1, max_world_size + 1):
batch_size = 20
lr = 1
inputs = [
torch.randn(batch_size, 10).type(dtype) for _ in range(world_size)
]
labels = [
torch.randint(low=0, high=2, size=(batch_size,))
for _ in range(world_size)
]
original_model = ToyMpModel().type(dtype)
model = ToyMpModel().type(dtype)
model.load_state_dict(original_model.state_dict())
original_model = original_model.to(TEST_DEVICE)
original_loss_fn = self.create_loss(loss_class, is_tuple_loss, dtype)
loss_fn = self.create_loss(loss_class, is_tuple_loss, dtype)
if not is_tuple_loss:
loss_fn.load_state_dict(original_loss_fn.state_dict())
assert parameters_are_equal(original_loss_fn, loss_fn)
original_loss_fn = original_loss_fn.to(TEST_DEVICE)
loss_optimizer = optim.SGD(original_loss_fn.parameters(), lr=lr)
loss_optimizer.zero_grad()
original_miner_fn = miner_class()
miner_fn = miner_class()
optimizer = optim.SGD(original_model.parameters(), lr=lr)
optimizer.zero_grad()
all_inputs = torch.cat(inputs, dim=0).to(TEST_DEVICE)
all_labels = torch.cat(labels, dim=0).to(TEST_DEVICE)
all_outputs = original_model(all_inputs)
if test_ref_emb:
ref_outputs = [
torch.randn(batch_size, 5).type(dtype).detach()
for _ in range(world_size)
]
ref_labels = [
torch.randint(low=0, high=2, size=(batch_size,))
for _ in range(world_size)
]
all_ref_outputs = torch.cat(ref_outputs, dim=0).to(TEST_DEVICE)
all_ref_labels = torch.cat(ref_labels, dim=0).to(TEST_DEVICE)
correct_indices_tuple = original_miner_fn(
all_outputs, all_labels, all_ref_outputs, all_ref_labels
)
correct_indices_tuple = c_f.shift_indices_tuple(
correct_indices_tuple, len(all_outputs)
)
all_outputs = torch.cat([all_outputs, all_ref_outputs], dim=0).to(
TEST_DEVICE
)
all_labels = torch.cat([all_labels, all_ref_labels], dim=0).to(
TEST_DEVICE
)
else:
ref_outputs, ref_labels = None, None
correct_indices_tuple = original_miner_fn(all_outputs, all_labels)
correct_loss = original_loss_fn(
all_outputs, all_labels, correct_indices_tuple
)
if TEST_DEVICE == torch.device("cpu"):
correct_loss.backward(retain_graph=True)
else:
(correct_loss / world_size).backward(retain_graph=True)
optimizer.step()
if not is_tuple_loss:
for p in original_loss_fn.parameters():
# Each replica loss function sees gradients from the global batch
p.grad *= world_size
loss_optimizer.step()
mp.spawn(
single_process_function,
args=(
world_size,
lr,
model,
inputs,
labels,
loss_fn,
miner_fn,
original_model,
original_loss_fn,
original_miner_fn,
correct_loss.detach().cpu().numpy(),
tuple([x.cpu().numpy() for x in correct_indices_tuple]),
is_tuple_loss,
ref_outputs,
ref_labels,
),
nprocs=world_size,
join=True,
)
def test_distributed_tuple_loss_and_miner(self):
self.loss_and_miner_tester(
losses.ContrastiveLoss, miners.MultiSimilarityMiner, True
)
def test_distributed_classifier_loss_and_miner(self):
self.loss_and_miner_tester(
losses.ArcFaceLoss, miners.MultiSimilarityMiner, False
)
def test_distributed_tuple_miner_with_ref_emb(self):
self.loss_and_miner_tester(
losses.ContrastiveLoss, miners.MultiSimilarityMiner, True, test_ref_emb=True
)
if __name__ == "__main__":
unittest.main()
| [] | [] | [
"MASTER_ADDR",
"MASTER_PORT"
] | [] | ["MASTER_ADDR", "MASTER_PORT"] | python | 2 | 0 | |
superset/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=C,R,W
"""Package's main module!"""
import json
import logging
from logging.handlers import TimedRotatingFileHandler
import os
from flask import Flask, redirect
from flask_appbuilder import AppBuilder, IndexView, SQLA
from flask_appbuilder.baseviews import expose
from flask_compress import Compress
from flask_migrate import Migrate
from flask_wtf.csrf import CSRFProtect
from werkzeug.contrib.fixers import ProxyFix
from superset import config
from superset.connectors.connector_registry import ConnectorRegistry
from superset.security import SupersetSecurityManager
from superset.utils.core import (
get_update_perms_flag, pessimistic_connection_handling, setup_cache)
APP_DIR = os.path.dirname(__file__)
CONFIG_MODULE = os.environ.get('SUPERSET_CONFIG', 'superset.config')
if not os.path.exists(config.DATA_DIR):
os.makedirs(config.DATA_DIR)
with open(APP_DIR + '/static/assets/backendSync.json', 'r') as f:
frontend_config = json.load(f)
app = Flask(__name__)
app.config.from_object(CONFIG_MODULE)
conf = app.config
#################################################################
# Handling manifest file logic at app start
#################################################################
MANIFEST_FILE = APP_DIR + '/static/assets/dist/manifest.json'
manifest = {}
def parse_manifest_json():
global manifest
try:
with open(MANIFEST_FILE, 'r') as f:
# the manifest inclues non-entry files
# we only need entries in templates
full_manifest = json.load(f)
manifest = full_manifest.get('entrypoints', {})
except Exception:
pass
def get_js_manifest_files(filename):
if app.debug:
parse_manifest_json()
entry_files = manifest.get(filename, {})
return entry_files.get('js', [])
def get_css_manifest_files(filename):
if app.debug:
parse_manifest_json()
entry_files = manifest.get(filename, {})
return entry_files.get('css', [])
def get_unloaded_chunks(files, loaded_chunks):
filtered_files = [f for f in files if f not in loaded_chunks]
for f in filtered_files:
loaded_chunks.add(f)
return filtered_files
parse_manifest_json()
@app.context_processor
def get_manifest():
return dict(
loaded_chunks=set(),
get_unloaded_chunks=get_unloaded_chunks,
js_manifest=get_js_manifest_files,
css_manifest=get_css_manifest_files,
)
#################################################################
for bp in conf.get('BLUEPRINTS'):
try:
print("Registering blueprint: '{}'".format(bp.name))
app.register_blueprint(bp)
except Exception as e:
print('blueprint registration failed')
logging.exception(e)
if conf.get('SILENCE_FAB'):
logging.getLogger('flask_appbuilder').setLevel(logging.ERROR)
if app.debug:
app.logger.setLevel(logging.DEBUG) # pylint: disable=no-member
else:
# In production mode, add log handler to sys.stderr.
app.logger.addHandler(logging.StreamHandler()) # pylint: disable=no-member
app.logger.setLevel(logging.INFO) # pylint: disable=no-member
logging.getLogger('pyhive.presto').setLevel(logging.INFO)
db = SQLA(app)
if conf.get('WTF_CSRF_ENABLED'):
csrf = CSRFProtect(app)
csrf_exempt_list = conf.get('WTF_CSRF_EXEMPT_LIST', [])
for ex in csrf_exempt_list:
csrf.exempt(ex)
pessimistic_connection_handling(db.engine)
cache = setup_cache(app, conf.get('CACHE_CONFIG'))
tables_cache = setup_cache(app, conf.get('TABLE_NAMES_CACHE_CONFIG'))
migrate = Migrate(app, db, directory=APP_DIR + '/migrations')
# Logging configuration
logging.basicConfig(format=app.config.get('LOG_FORMAT'))
logging.getLogger().setLevel(app.config.get('LOG_LEVEL'))
if app.config.get('ENABLE_TIME_ROTATE'):
logging.getLogger().setLevel(app.config.get('TIME_ROTATE_LOG_LEVEL'))
handler = TimedRotatingFileHandler(
app.config.get('FILENAME'),
when=app.config.get('ROLLOVER'),
interval=app.config.get('INTERVAL'),
backupCount=app.config.get('BACKUP_COUNT'))
logging.getLogger().addHandler(handler)
if app.config.get('ENABLE_CORS'):
from flask_cors import CORS
CORS(app, **app.config.get('CORS_OPTIONS'))
if app.config.get('ENABLE_PROXY_FIX'):
app.wsgi_app = ProxyFix(app.wsgi_app)
if app.config.get('ENABLE_CHUNK_ENCODING'):
class ChunkedEncodingFix(object):
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
# Setting wsgi.input_terminated tells werkzeug.wsgi to ignore
# content-length and read the stream till the end.
if environ.get('HTTP_TRANSFER_ENCODING', '').lower() == u'chunked':
environ['wsgi.input_terminated'] = True
return self.app(environ, start_response)
app.wsgi_app = ChunkedEncodingFix(app.wsgi_app)
if app.config.get('UPLOAD_FOLDER'):
try:
os.makedirs(app.config.get('UPLOAD_FOLDER'))
except OSError:
pass
for middleware in app.config.get('ADDITIONAL_MIDDLEWARE'):
app.wsgi_app = middleware(app.wsgi_app)
class MyIndexView(IndexView):
@expose('/')
def index(self):
return redirect('/superset/welcome')
custom_sm = app.config.get('CUSTOM_SECURITY_MANAGER') or SupersetSecurityManager
if not issubclass(custom_sm, SupersetSecurityManager):
raise Exception(
"""Your CUSTOM_SECURITY_MANAGER must now extend SupersetSecurityManager,
not FAB's security manager.
See [4565] in UPDATING.md""")
appbuilder = AppBuilder(
app,
db.session,
base_template='superset/base.html',
indexview=MyIndexView,
security_manager_class=custom_sm,
update_perms=get_update_perms_flag(),
)
security_manager = appbuilder.sm
results_backend = app.config.get('RESULTS_BACKEND')
# Registering sources
module_datasource_map = app.config.get('DEFAULT_MODULE_DS_MAP')
module_datasource_map.update(app.config.get('ADDITIONAL_MODULE_DS_MAP'))
ConnectorRegistry.register_sources(module_datasource_map)
# Flask-Compress
if conf.get('ENABLE_FLASK_COMPRESS'):
Compress(app)
# Hook that provides administrators a handle on the Flask APP
# after initialization
flask_app_mutator = app.config.get('FLASK_APP_MUTATOR')
if flask_app_mutator:
flask_app_mutator(app)
from superset import views # noqa
| [] | [] | [
"SUPERSET_CONFIG"
] | [] | ["SUPERSET_CONFIG"] | python | 1 | 0 | |
engage/api/utils.py | from engage import settings
from datetime import datetime, timedelta
import logging
import requests
import os
import bcrypt
import base64
import pytz
import boto3
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.application import MIMEApplication
log = logging.Logger(__name__)
if not settings.TEST:
ses_client = boto3.client('ses', aws_access_key_id=os.environ["AWS_ACCESS_KEY_ID"],
aws_secret_access_key=os.environ["AWS_SECRET_ACCESS_KEY"],
region_name=os.environ["AWS_REGION"])
if settings.DEBUG:
log.error("{}\n {}\n {}".format(os.getenv("AWS_ACCESS_KEY_ID"),
os.getenv("AWS_SECRET_ACCESS_KEY"), os.getenv("AWS_REGION")))
def verify_recaptcha(token):
r = requests.post('https://www.google.com/recaptcha/api/siteverify', data={
'secret': os.environ["RECAPTCHAKEY"], 'response': token})
response = r.json()
return response['success']
def array_of_ordereddict_to_list_of_names(tags_ordereddict_array):
"""
Serializers have a funny organization that isn't helpful in making further queries
Here we take the list of ordered dictionaries (id: x, name: y) and pull out the name only
and put that in a names list to return
"""
names = []
length = len(list(tags_ordereddict_array))
for i in range(length):
names.append(tags_ordereddict_array[i]["name"])
return names
def check_auth_code(plain_code, hashed):
dec = bcrypt.hashpw(plain_code.encode('utf-8'),
hashed.encode('utf-8')).decode('utf-8')
if dec == hashed:
return True
return False
def calculateTallies(messages_qs):
pro = 0
con = 0
more_info = 0
home_owner = 0
business_owner = 0
resident = 0
works = 0
school = 0
child_school = 0
total = 0
for message in messages_qs:
if message.authcode != None:
continue
if message.pro == 0:
con += 1
elif message.pro == 1:
pro += 1
else:
more_info += 1
if message.home_owner:
home_owner += 1
if message.business_owner:
business_owner += 1
if message.resident:
resident += 1
if message.works:
works += 1
if message.school:
school += 1
if message.child_school:
child_school += 1
total += 1
return {"home_owner": home_owner, "business_owner": business_owner,
"resident": resident, "works": works, "school": school,
"child_school": child_school, "pro": pro, "con": con, "more_info": more_info, "total": total}
def isCommentAllowed(timestamp):
dt = datetime.now().timestamp()
if dt > timestamp:
return False
return True
def send_mail(mail_message):
if type(mail_message["user"]) is dict:
to_email = mail_message["user"]["email"]
else:
to_email = mail_message["user"].email
multipart_content_subtype = 'mixed'
msg = MIMEMultipart(multipart_content_subtype)
msg['Subject'] = mail_message["subject"]
msg['To'] = to_email
msg['From'] = '[email protected]'
part = MIMEText(mail_message['content'], 'html')
msg.attach(part)
if "attachment_file_path" in mail_message:
with open(mail_message["attachment_file_path"], 'rb') as f:
part = MIMEApplication(f.read(), _subtype='pdf')
part.add_header('Content-Disposition', 'attachment',
filename=mail_message['attachment_file_name'])
msg.attach(part)
try:
if settings.DEBUG:
log.error(("YYY", msg.as_string()))
if not settings.TEST:
response = ses_client.send_raw_email(
Source="engage team <[email protected]>",
Destinations=[to_email],
RawMessage={'Data': msg.as_string()})
if response['MessageId'] is not None:
return True
else:
log.error("Could not send an email from {} to {} about {}".format("[email protected]",
to_email, mail_message['Subject']))
return False
else:
return True
except Exception as exc:
log.error("Could not send email and threw error")
log.error(exc)
return False
| [] | [] | [
"RECAPTCHAKEY",
"AWS_ACCESS_KEY_ID",
"AWS_SECRET_ACCESS_KEY",
"AWS_REGION"
] | [] | ["RECAPTCHAKEY", "AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_REGION"] | python | 4 | 0 | |
django_auth_example/wsgi.py | """
WSGI config for django_auth_example project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "django_auth_example.settings")
application = get_wsgi_application()
| [] | [] | [] | [] | [] | python | 0 | 0 | |
tools/docker-builder/types.go | // Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"os"
"path/filepath"
"regexp"
"strings"
"k8s.io/utils/env"
"istio.io/istio/pilot/pkg/util/sets"
testenv "istio.io/istio/pkg/test/env"
"istio.io/pkg/log"
)
// Types mirrored from https://github.com/docker/buildx/blob/master/bake/bake.go
type Group struct {
Targets []string `json:"targets" hcl:"targets"`
}
type BakeFile struct {
Target map[string]Target `json:"target,omitempty"`
Group map[string]Group `json:"group,omitempty"`
}
type Target struct {
Context *string `json:"context,omitempty" hcl:"context,optional"`
Dockerfile *string `json:"dockerfile,omitempty" hcl:"dockerfile,optional"`
DockerfileInline *string `json:"dockerfile-inline,omitempty" hcl:"dockerfile-inline,optional"`
Args map[string]string `json:"args,omitempty" hcl:"args,optional"`
Labels map[string]string `json:"labels,omitempty" hcl:"labels,optional"`
Tags []string `json:"tags,omitempty" hcl:"tags,optional"`
CacheFrom []string `json:"cache-from,omitempty" hcl:"cache-from,optional"`
CacheTo []string `json:"cache-to,omitempty" hcl:"cache-to,optional"`
Target *string `json:"target,omitempty" hcl:"target,optional"`
Secrets []string `json:"secret,omitempty" hcl:"secret,optional"`
SSH []string `json:"ssh,omitempty" hcl:"ssh,optional"`
Platforms []string `json:"platforms,omitempty" hcl:"platforms,optional"`
Outputs []string `json:"output,omitempty" hcl:"output,optional"`
Pull *bool `json:"pull,omitempty" hcl:"pull,optional"`
NoCache *bool `json:"no-cache,omitempty" hcl:"no-cache,optional"`
}
type Args struct {
Push bool
Save bool
BuildxEnabled bool
NoClobber bool
NoCache bool
Targets []string
Variants []string
Architectures []string
BaseVersion string
ProxyVersion string
IstioVersion string
Tag string
Hubs []string
}
// Define variants, which control the base image of an image.
// Tags will have the variant append (like 1.0-distroless).
// The DefaultVariant is a special variant that has no explicit tag (like 1.0); it
// is not a unique variant though. Currently, it represents DebugVariant.
// If both DebugVariant and DefaultVariant are built, there will be a single build but multiple tags
const (
// PrimaryVariant is the variant that DefaultVariant actually builds
PrimaryVariant = DebugVariant
DefaultVariant = "default"
DebugVariant = "debug"
DistrolessVariant = "distroless"
)
func DefaultArgs() Args {
// By default, we build all targets
targets := []string{
"pilot",
"proxyv2",
"app",
"istioctl",
"operator",
"install-cni",
"app_sidecar_ubuntu_xenial",
"app_sidecar_ubuntu_bionic",
"app_sidecar_ubuntu_focal",
"app_sidecar_debian_9",
"app_sidecar_debian_10",
"app_sidecar_centos_8",
"app_sidecar_centos_7",
}
if legacy, f := os.LookupEnv("DOCKER_TARGETS"); f {
// Allow env var config. It is a string separated list like "docker.pilot docker.proxy"
targets = []string{}
for _, v := range strings.Split(legacy, " ") {
if v == "" {
continue
}
targets = append(targets, strings.TrimPrefix(v, "docker."))
}
}
pv, err := testenv.ReadProxySHA()
if err != nil {
log.Warnf("failed to read proxy sha")
pv = "unknown"
}
variants := []string{DefaultVariant}
if legacy, f := os.LookupEnv("DOCKER_BUILD_VARIANTS"); f {
variants = strings.Split(legacy, " ")
}
if os.Getenv("INCLUDE_UNTAGGED_DEFAULT") == "true" {
// This legacy env var was to workaround the old build logic not being very smart
// In the new builder, we automagically detect this. So just insert the 'default' variant
cur := sets.NewSet(variants...)
cur.Insert(DefaultVariant)
variants = cur.SortedList()
}
arch := []string{"linux/amd64"}
if legacy, f := os.LookupEnv("DOCKER_ARCHITECTURES"); f {
arch = strings.Split(legacy, ",")
}
hub := []string{env.GetString("HUB", "localhost:5000")}
if hubs, f := os.LookupEnv("HUBS"); f {
hub = strings.Split(hubs, " ")
}
return Args{
Push: false,
Save: false,
NoCache: false,
BuildxEnabled: true,
Hubs: hub,
Tag: env.GetString("TAG", "latest"),
BaseVersion: fetchBaseVersion(),
IstioVersion: fetchIstioVersion(),
ProxyVersion: pv,
Architectures: arch,
Targets: targets,
Variants: variants,
}
}
var (
args = DefaultArgs()
version = false
)
var baseVersionRegexp = regexp.MustCompile(`BASE_VERSION \?= (.*)`)
func fetchBaseVersion() string {
if b, f := os.LookupEnv("BASE_VERSION"); f {
return b
}
b, err := os.ReadFile(filepath.Join(testenv.IstioSrc, "Makefile.core.mk"))
if err != nil {
log.Fatalf("failed to read file: %v", err)
return "unknown"
}
match := baseVersionRegexp.FindSubmatch(b)
if len(match) < 2 {
log.Fatalf("failed to find match")
return "unknown"
}
return string(match[1])
}
var istioVersionRegexp = regexp.MustCompile(`VERSION \?= (.*)`)
func fetchIstioVersion() string {
if b, f := os.LookupEnv("VERSION"); f {
return b
}
b, err := os.ReadFile(filepath.Join(testenv.IstioSrc, "Makefile.core.mk"))
if err != nil {
log.Fatalf("failed to read file: %v", err)
return "unknown"
}
match := istioVersionRegexp.FindSubmatch(b)
if len(match) < 2 {
log.Fatalf("failed to find match")
return "unknown"
}
return string(match[1])
}
| [
"\"INCLUDE_UNTAGGED_DEFAULT\""
] | [] | [
"INCLUDE_UNTAGGED_DEFAULT"
] | [] | ["INCLUDE_UNTAGGED_DEFAULT"] | go | 1 | 0 | |
temp_from_zip.go | package temp_from_zip
import "os"
import "fmt"
import "strconv"
import "time"
import "sync"
import "container/list"
import "net/http"
import "io/ioutil"
import "encoding/json"
type AreaWeatherData struct {
// strings hold whatever comes over the wire - this code stays agnostic
temp string
humidity string
windSpeed string
// as measured from the return of the API call
// (hence 0 in the case of a direct pull)
dataAgeSeconds string
// convention: if error is not empty, the values above are undefined.
// valid returns will include an empty error string.
error string
}
type CacheEntry struct {
temp string
humidity string
windSpeed string
timestamp int64 // epoch seconds
}
// capitals due to golangs exceedingly arbitrary rule around exports needing that
type OWMMain struct {
Temp float32
Humidity int // visual inspection - never saw fractional percent
}
type OWMWind struct {
Speed float32
}
type OWMResponse struct {
Main OWMMain
Wind OWMWind
}
// primitive ratelimiting scheme
type RateLimiter struct {
mu sync.Mutex
pullTimes list.List
}
var CACHE map[string]CacheEntry = make(map[string]CacheEntry)
// bounce the server to update, uses default 809f761fd91b3990cdc45262b01aa174 if not present
var API_KEY string = ""
var PULL_URL string = "https://api.openweathermap.org/data/2.5/weather?zip=%s,us&appid=%s"
// obviously I could put these in env vars too, the above was to make a point.
// avoid "but your data is 31 minutes old, I round up so that's
// one hour and you said it was 0 hours old" complaints
// by pulling anytime data is 29 minutes old or older,
// we can always try to report "this hour's" data
// (barring errors)
var CACHE_MAX_VALID_SECONDS int64 = 60 * 29
// openweathermap rate limits based on pricing tier
// For the moment let's assume we are using free tier.
// their policy: https://openweathermap.org/price
// 60 calls/min, 1M/month
// to keep it simple and provide a reasonable SLA,
// let's further limit so we don't have to keep track.
// 60 * 24 * 31 = 44,640 minutes in a month max (2.6m minutes)
// if we limit ourselves to 20 calls/min, we end up at 892.8k
// calls in a month, max, so no chance of blowing the per month limit.
// so we'll do that.
// nb this only affects outgoing calls, no limit is placed
// on the number of calls to this library's entry point.
// limiting at that level is up to the calling code.
var RATE_LIMIT_MAX_PULLS_MINUTE int64 = 20
var RATE_LIMIT_TRACKING_MAX_SECS int64 = 61
// see func _WaitIfNecessary for usage
var RATE_LIMITER RateLimiter
// suggest versioning the api at a higher level (website level)
// other options are to embed versioning in the API name (LatestV1)
// or in the parameters/return payload. I prefer versioning at
// perimeter to keep low level code free of those considerations.
func Latest(zip string) AreaWeatherData {
// Update environmment information (hot config update)
if (API_KEY == "") {
API_KEY = os.Getenv("OPENWEATHERMAP_API_KEY")
}
// log at debug/trace in prod
//fmt.Printf("enter Latest(%s), api key: %s\n", zip, API_KEY)
// Validate input
validatedZip, valid := _Validate(zip)
if(!valid) {
ret := AreaWeatherData{}
ret.error = fmt.Sprintf("Invalid zip(%s), accepted values are 00000 to 99999", zip)
fmt.Printf("exit Latest(%s) invalid entry\n", zip)
return ret
}
//fmt.Printf("zip %s accepted\n", validatedZip)
// Look for good-enough cache entries
cachedData, ok := CACHE[validatedZip]
errorString := ""
if (ok) {
cachedDataAge := _CalcAge(cachedData)
//fmt.Printf("cache hit, data age: %v vs max: %v\n", cachedDataAge, CACHE_MAX_VALID_SECONDS)
if(cachedDataAge > CACHE_MAX_VALID_SECONDS) {
ok = false
}
} else {
cachedData = CacheEntry{}
errorString = fmt.Sprintf("No data for zip %s\n", validatedZip)
}
// Remote pull data if no good cache entry
if(!ok) {
// every time we're going to pull, track the fact that we did so,
// and wait long enough to be certain we're respecting ratelimits
_WaitIfNecessary()
updatedData, ok := _Pull(validatedZip)
if (!ok) {
// _Pull has already logged failure
// return what we've got, could final log here.
// valid return since we've got no SLA
// and we do have a cached result (no error log in ret)
ret := _PopulateReturn(cachedData, "")
fmt.Printf("_Pull failed, returning cached entry: %v\n", ret)
return ret
}
fmt.Printf("pull succeeded (outer), new cache value: %v\n", updatedData)
errorString = ""
// update cache and our local variable.
cachedData = updatedData
CACHE[validatedZip] = updatedData
} else {
}
// however we got here, provide what we have.
ret := _PopulateReturn(cachedData, errorString)
//fmt.Printf("main return case, returning: %v\n", ret)
return ret
}
// given a cache entry, compute data age
func _CalcAge(entry CacheEntry) int64 {
secs := time.Now().Unix()
return entry.timestamp - secs
}
func _Validate(zip string) (string, bool) {
// only accept 5 digits. could be fancy and try to determine if a valid
// zip, I'd keep a list of zipcodes around, initialized dynamically at
// init time, rather than doing a hot lookup. the failure mode there is
// a new zipcode is published on a Friday and the server stays up over the
// weekend.. i dont really see the problem with some bogus lookups
asInt, err := strconv.Atoi(zip)
if err == nil {
return zip, (asInt >= 0) && (asInt <= 99999)
}
return "ERROR", false
}
func _Pull(validatedZip string) (CacheEntry, bool) {
ret := CacheEntry{}
// retry would be desirable in prod - not in a code test like this
// sample api call (if you want to look at JSON for updating the code):
// api.openweathermap.org/data/2.5/weather?zip=90210,us&appid=809f761fd91b3990cdc45262b01aa174
// no real fault tolerance here, idk golang well enough
// to do it idomatically. a bad url (eg nohttps://) panics.
reqString := fmt.Sprintf(PULL_URL, validatedZip, API_KEY)
fmt.Printf("https requesting: %s\n", reqString)
resp, err := http.Get(reqString)
respBody, err2 := ioutil.ReadAll(resp.Body)
if (err != nil || err2 != nil) {
fmt.Printf("ERROR: response: \n%s\n\n err1: %s err2: %s\n", respBody, err, err2)
return ret, false
}
// The receiving structs are set up to pull the fields we need.
var result OWMResponse
jsonErr := json.Unmarshal([]byte(respBody), &result)
if jsonErr != nil {
fmt.Printf("pull failed to parse response: %v\n", result)
return ret, false
}
ret.temp = fmt.Sprintf("%f", result.Main.Temp)
ret.humidity = strconv.Itoa(result.Main.Humidity)
ret.windSpeed = fmt.Sprintf("%f", result.Wind.Speed)
nowEpochSecs := time.Now().Unix()
ret.timestamp = nowEpochSecs
fmt.Printf("pull succeeded, new data for %s: %v\n", validatedZip, ret)
return ret, true
}
// Type conversion: remove private data and add error verbosity
func _PopulateReturn(entry CacheEntry, errorString string) AreaWeatherData {
ret := AreaWeatherData{temp:entry.temp, humidity:entry.humidity, windSpeed:entry.windSpeed}
ret.dataAgeSeconds = strconv.Itoa(int(time.Now().Unix() - entry.timestamp))
ret.error = errorString
return ret
}
// see func _WaitIfNecessary for implementation
func _WaitIfNecessary() {
// plan: lock the mutex,figure out if we've called too much lately,
// if so hold in place till we have time, and then return.
// This will have the effect of causing other threads to not
// even be able to obtain the mutex till we're done,
// which is the desired behavior (the first waiting thread
// will get our clean slate +1 call, etc)
RATE_LIMITER.mu.Lock()
defer RATE_LIMITER.mu.Unlock()
nowEpochSecs := time.Now().Unix()
L := RATE_LIMITER.pullTimes.Len()
fmt.Printf("_WaitIfNecessary stats: recent pull count: %v\n", L)
// cleanup rate limit tracking data structure
for RATE_LIMITER.pullTimes.Len() > 0 {
oldest := RATE_LIMITER.pullTimes.Front().Value.(int64)
delta := nowEpochSecs - oldest
if(delta > RATE_LIMIT_TRACKING_MAX_SECS) {
fmt.Printf("_WaitIfNecessary: delta: %v removing oldest pull: %v\n", delta, oldest)
RATE_LIMITER.pullTimes.Remove(RATE_LIMITER.pullTimes.Front())
} else {
// there were no pulls older than our limit, done cleaning up.
break
}
}
// if we're under the rate limit, just go for it.
recentPullCount := int64(RATE_LIMITER.pullTimes.Len())
if (recentPullCount < RATE_LIMIT_MAX_PULLS_MINUTE) {
RATE_LIMITER.pullTimes.PushBack(nowEpochSecs)
L = RATE_LIMITER.pullTimes.Len()
fmt.Printf("_WaitIfNecessary early return, not enough calls to worry about. list length: %v\n", L)
return
}
// because of the mutex and the algo,
// we know L.Len() == 20.
// so all we have to do to be compliant is wait
// till the oldest call is outside the window
oldestSecs := RATE_LIMITER.pullTimes.Front().Value.(int64)
// eg 61 - 500021 - 500005 -> wait 35s
waitSecs := RATE_LIMIT_TRACKING_MAX_SECS - (nowEpochSecs - oldestSecs)
waitDuration := time.Duration(waitSecs * 1e9) //nanosecond conversion, sigh
fmt.Printf("_WaitIfNecessary sleeping to avoid ratelimit: %vs\n", waitSecs)
time.Sleep(waitDuration)
//next iteration will clean up oldest call
}
| [
"\"OPENWEATHERMAP_API_KEY\""
] | [] | [
"OPENWEATHERMAP_API_KEY"
] | [] | ["OPENWEATHERMAP_API_KEY"] | go | 1 | 0 | |
config.go | package memongo
import (
"errors"
"fmt"
"log"
"net"
"os"
"path"
"runtime"
"strconv"
"time"
"github.com/warphq/memongo/mongobin"
"github.com/warphq/memongo/memongolog"
)
// Options is the configuration options for a launched MongoDB binary
type Options struct {
// ShouldUseReplica indicates whether a replica should be used. If this is not specified,
// no replica will be used and mongo server will be run as standalone.
ShouldUseReplica bool
// Port to run MongoDB on. If this is not specified, a random (OS-assigned)
// port will be used
Port int
// Path to the cache for downloaded mongod binaries. Defaults to the
// system cache location.
CachePath string
// If DownloadURL and MongodBin are not given, this version of MongoDB will
// be downloaded
MongoVersion string
// If given, mongod will be downloaded from this URL instead of the
// auto-detected URL based on the current platform and MongoVersion
DownloadURL string
// If given, this binary will be run instead of downloading a mongod binary
MongodBin string
// Logger for printing messages. Defaults to printing to stdout.
Logger *log.Logger
// A LogLevel to log at. Defaults to LogLevelInfo.
LogLevel memongolog.LogLevel
// How long to wait for mongod to start up and report a port number. Does
// not include download time, only startup time. Defaults to 10 seconds.
StartupTimeout time.Duration
}
func (opts *Options) fillDefaults() error {
if opts.MongodBin == "" {
opts.MongodBin = os.Getenv("MEMONGO_MONGOD_BIN")
}
if opts.MongodBin == "" {
// The user didn't give us a local path to a binary. That means we need
// a download URL and a cache path.
// Determine the cache path
if opts.CachePath == "" {
opts.CachePath = os.Getenv("MEMONGO_CACHE_PATH")
}
if opts.CachePath == "" && os.Getenv("XDG_CACHE_HOME") != "" {
opts.CachePath = path.Join(os.Getenv("XDG_CACHE_HOME"), "memongo")
}
if opts.CachePath == "" {
if runtime.GOOS == "darwin" {
opts.CachePath = path.Join(os.Getenv("HOME"), "Library", "Caches", "memongo")
} else {
opts.CachePath = path.Join(os.Getenv("HOME"), ".cache", "memongo")
}
}
// Determine the download URL
if opts.DownloadURL == "" {
opts.DownloadURL = os.Getenv("MEMONGO_DOWNLOAD_URL")
}
if opts.DownloadURL == "" {
if opts.MongoVersion == "" {
return errors.New("one of MongoVersion, DownloadURL, or MongodBin must be given")
}
spec, err := mongobin.MakeDownloadSpec(opts.MongoVersion)
if err != nil {
return err
}
opts.DownloadURL = spec.GetDownloadURL()
}
}
// Determine the port number
if opts.Port == 0 {
mongoVersionEnv := os.Getenv("MEMONGO_MONGOD_PORT")
if mongoVersionEnv != "" {
port, err := strconv.Atoi(mongoVersionEnv)
if err != nil {
return fmt.Errorf("error parsing MEMONGO_MONGOD_PORT: %s", err)
}
opts.Port = port
}
}
if opts.Port == 0 {
port, err := getFreePort()
if err != nil {
return fmt.Errorf("error finding a free port: %s", err)
}
opts.Port = port
if opts.StartupTimeout == 0 {
opts.StartupTimeout = 10 * time.Second
}
}
return nil
}
func (opts *Options) getLogger() *memongolog.Logger {
return memongolog.New(opts.Logger, opts.LogLevel)
}
func (opts *Options) getOrDownloadBinPath() (string, error) {
if opts.MongodBin != "" {
return opts.MongodBin, nil
}
// Download or fetch from cache
binPath, err := mongobin.GetOrDownloadMongod(opts.DownloadURL, opts.CachePath, opts.getLogger())
if err != nil {
return "", err
}
return binPath, nil
}
func getFreePort() (int, error) {
// Based on: https://github.com/phayes/freeport/blob/master/freeport.go
addr, err := net.ResolveTCPAddr("tcp", "localhost:0")
if err != nil {
return 0, err
}
l, err := net.ListenTCP("tcp", addr)
if err != nil {
return 0, err
}
defer l.Close()
return l.Addr().(*net.TCPAddr).Port, nil
}
| [
"\"MEMONGO_MONGOD_BIN\"",
"\"MEMONGO_CACHE_PATH\"",
"\"XDG_CACHE_HOME\"",
"\"XDG_CACHE_HOME\"",
"\"HOME\"",
"\"HOME\"",
"\"MEMONGO_DOWNLOAD_URL\"",
"\"MEMONGO_MONGOD_PORT\""
] | [] | [
"MEMONGO_MONGOD_PORT",
"XDG_CACHE_HOME",
"MEMONGO_DOWNLOAD_URL",
"MEMONGO_MONGOD_BIN",
"MEMONGO_CACHE_PATH",
"HOME"
] | [] | ["MEMONGO_MONGOD_PORT", "XDG_CACHE_HOME", "MEMONGO_DOWNLOAD_URL", "MEMONGO_MONGOD_BIN", "MEMONGO_CACHE_PATH", "HOME"] | go | 6 | 0 | |
openface/manage.py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "openface.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| [] | [] | [] | [] | [] | python | 0 | 0 | |
models_AC.py | import os
# Restrict the script to run on CPU
os.environ ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = ""
# Import Keras Tensoflow Backend
# from keras import backend as K
import tensorflow as tf
# Configure it to use only specific CPU Cores
config = tf.ConfigProto(intra_op_parallelism_threads=4,
inter_op_parallelism_threads=4,
device_count={"CPU": 1, "GPU": 0},
allow_soft_placement=True)
tf.Session(config=config)
# import tensorflow as tf
# Formulation of the baseline emotion recognition model
# The input is a set of sentences padded on the word level
def SentenceModel(vocab_size, embedding_size, first_rnn_size, num_classes, dropout, embedding, num_speakers):
# Sanity check
tf.reset_default_graph()
####################################################################################################################
# Placeholders and other needed variables
####################################################################################################################
x = tf.placeholder(tf.int32, [None, None])
speaker = tf.placeholder(tf.int32, [None, 2])
seqlen = tf.placeholder(tf.int32, [None])
y = tf.placeholder(tf.int32, [None])
batch_size = tf.shape(x)[0]
keep_prob = tf.constant(dropout)
########################################### MODEL STRUCTURE ########################################################
####################################################################################################################
# Embedding layer
####################################################################################################################
if embedding == 'glove':
filename = '../glove.6B.' + str(embedding_size) + 'd.txt'
def loadGloVe(filename):
embd = []
file = open(filename, 'r', encoding="utf-8")
for line in file.readlines():
row = line.strip().split(' ')
embd.append([float(i) for i in row[1:]])
print('Loaded GloVe Weights!')
file.close()
return embd
glove_embd = loadGloVe(filename)
glove_weights_initializer = tf.constant_initializer(glove_embd)
embeddings = tf.get_variable(
name='embeddings',
shape=(len(glove_embd), embedding_size),
initializer=glove_weights_initializer,
trainable=False)
if embedding == 'random':
embeddings = tf.get_variable('embedding_matrix', [vocab_size, embedding_size])
rnn_inputs = tf.nn.embedding_lookup(embeddings, x)
rnn_inputs = tf.nn.dropout(rnn_inputs, keep_prob)
####################################################################################################################
# Bidirectional RNN
####################################################################################################################
# They say forget bias helps if its 1.0
lstm_fw_cell = tf.nn.rnn_cell.LSTMCell(first_rnn_size, forget_bias=1.0)
lstm_bw_cell = tf.nn.rnn_cell.LSTMCell(first_rnn_size, forget_bias=1.0)
(fw, bw), final_state = tf.nn.bidirectional_dynamic_rnn(lstm_fw_cell, lstm_bw_cell, rnn_inputs, dtype=tf.float32)
rnn_outputs = tf.concat([fw, bw], axis=2)
# Get the last output of the variable length sequence
last_rnn_output = tf.gather_nd(rnn_outputs, tf.stack([tf.range(batch_size), seqlen - 1], axis=1))
if num_speakers:
last_rnn_output = tf.concat([tf.cast(speaker, tf.float32), last_rnn_output], 1)
####################################################################################################################
# Final Dense layer to produce outputs
####################################################################################################################
last_rnn_output = tf.nn.dropout(last_rnn_output, keep_prob)
logits = tf.layers.dense(last_rnn_output, num_classes)
##################################### END OF MODEL STRUCTURE #######################################################
####################################################################################################################
# Training Function
####################################################################################################################
preds = tf.nn.softmax(logits)
predictions = tf.cast(tf.argmax(preds, 1), tf.int32)
loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits))
train_step = tf.train.AdamOptimizer(5e-3).minimize(loss)
return {
'x': x,
'seqlen': seqlen,
'speaker': speaker,
'y': y,
'ts': train_step,
'preds': predictions
}
# Formulation of the sentence dependent emotion recognition model
# The input is a set of sentences padded on word and sentence level
def DialogModel(vocab_size, embedding_size, first_rnn_size, second_rnn_size, num_classes, dropout, score_type, model_type,
window_size, embedding, num_speakers):
# Sanity check
tf.reset_default_graph()
####################################################################################################################
# Placeholders and other needed variables
####################################################################################################################
x = tf.placeholder(tf.int32, [None, None])
speaker = tf.placeholder(tf.int32, [None, 2])
seqlen = tf.placeholder(tf.int32, [None])
num_dialogs = tf.placeholder(tf.int32, [])
y = tf.placeholder(tf.int32, [None, None])
batch_size = tf.shape(x)[0]
keep_prob = tf.constant(dropout)
###################################### MODEL STRUCTURE #############################################################
####################################################################################################################
# Embedding layer
####################################################################################################################
if embedding == 'glove':
filename = '../glove.6B.' + str(embedding_size) + 'd.txt'
def loadGloVe(filename):
embd = []
file = open(filename, 'r', encoding="utf-8")
for line in file.readlines():
row = line.strip().split(' ')
embd.append([float(i) for i in row[1:]])
print('Loaded GloVe Weights!')
file.close()
return embd
glove_embd = loadGloVe(filename)
glove_weights_initializer = tf.constant_initializer(glove_embd)
embeddings = tf.get_variable(
name='embeddings',
shape=(len(glove_embd), embedding_size),
initializer=glove_weights_initializer,
trainable=False)
if embedding == 'random':
embeddings = tf.get_variable('embedding_matrix', [vocab_size, embedding_size])
rnn_inputs = tf.nn.embedding_lookup(embeddings, x)
rnn_inputs = tf.nn.dropout(rnn_inputs, keep_prob)
####################################################################################################################
# Bidirectional RNN
####################################################################################################################
# They say forget bias helps if its 1.0
lstm_fw_cell = tf.nn.rnn_cell.LSTMCell(first_rnn_size / 2, forget_bias=1.0)
lstm_bw_cell = tf.nn.rnn_cell.LSTMCell(first_rnn_size / 2, forget_bias=1.0)
(fw, bw), final_state = tf.nn.bidirectional_dynamic_rnn(lstm_fw_cell, lstm_bw_cell, rnn_inputs, dtype=tf.float32)
rnn_outputs = tf.concat([fw, bw], axis=2)
# Get the last output of the variable length sequence
last_rnn_output = tf.gather_nd(rnn_outputs, tf.stack([tf.range(batch_size), seqlen - 1], axis=1))
if num_speakers:
last_rnn_output = tf.concat([tf.cast(speaker, tf.float32), last_rnn_output], 1)
first_rnn_size += 2
# Restructure the dialog format
last_rnn_output = tf.reshape(last_rnn_output, (num_dialogs, -1, first_rnn_size))
####################################################################################################################
# Sentence level RNN
####################################################################################################################
cell = tf.nn.rnn_cell.GRUCell(second_rnn_size)
# trainable initial state of RNN
init_state = tf.Variable(tf.zeros([1, second_rnn_size]))
init_state = tf.tile(init_state, [num_dialogs, 1])
rnn_outputs2, final_state = tf.nn.dynamic_rnn(cell, last_rnn_output,
initial_state=init_state) # sequence_length=seqlen
####################################################################################################################
# Custom differential memory component for the incremental sentence length
####################################################################################################################
# Dot score function
if score_type == 'dot':
window_size = int(window_size)
def score_func(j, context, rnn_outputs2):
if window_size != -1:
start = tf.cond(j > window_size, lambda: j-window_size, lambda: 0)
att = tf.einsum('ijm,im->ij', rnn_outputs2[:, start:j + 1, :], rnn_outputs2[:, j, :])
att = tf.nn.softmax(att)
context_temp = tf.einsum('ij,ijk->ik', att, rnn_outputs2[:, start:j + 1, :])
else:
att = tf.einsum('ijm,im->ij', rnn_outputs2[:, :j + 1, :], rnn_outputs2[:, j, :])
att = tf.nn.softmax(att)
context_temp = tf.einsum('ij,ijk->ik', att, rnn_outputs2[:, :j + 1, :])
context_temp = tf.expand_dims(context_temp, axis=1)
context = tf.cond(j > 0, lambda: tf.concat([context, context_temp], axis=1), lambda: context_temp)
j = tf.add(j, tf.constant(1))
return j, context, rnn_outputs2
# General score function
if score_type == 'general':
W = tf.get_variable('score_W', [second_rnn_size, second_rnn_size])
def score_func(j, context, rnn_outputs2):
att = tf.einsum('ijm,im->ij', tf.tanh(tf.einsum('ijk,km->ijm', rnn_outputs2[:, :j + 1, :], W)),
rnn_outputs2[:, j, :])
att = tf.nn.softmax(att)
# att = tf.Print(att, [att], summarize=25)
context_temp = tf.einsum('ij,ijk->ik', att, rnn_outputs2[:, :j + 1, :])
context_temp = tf.expand_dims(context_temp, axis=1)
context = tf.cond(j > 0, lambda: tf.concat([context, context_temp], axis=1), lambda: context_temp)
j = tf.add(j, tf.constant(1))
return j, context, rnn_outputs2
# Concat score function
if score_type == 'concat':
W = tf.get_variable('score_W', [second_rnn_size, second_rnn_size / 2])
B = tf.get_variable('score_B', [second_rnn_size / 2])
U = tf.get_variable('score_U', [second_rnn_size / 2])
def score_func(j, context, rnn_outputs2):
att = tf.tanh(tf.add(tf.einsum('ijk,km->ijm', rnn_outputs2[:, :j + 1, :], W), B))
att = tf.einsum('ijm,m->ij', att, U)
att = tf.nn.softmax(att)
#att = tf.Print(att, [att], summarize=25)
context_temp = tf.einsum('ij,ijk->ik', att, rnn_outputs2[:, :j + 1, :])
context_temp = tf.expand_dims(context_temp, axis=1)
context = tf.cond(j > 0, lambda: tf.concat([context, context_temp], axis=1), lambda: context_temp)
j = tf.add(j, tf.constant(1))
return j, context, rnn_outputs2
# Differential memory loop
if model_type == 'Double_RNN_with_memory':
j0 = tf.constant(0)
context0 = tf.zeros([10, 10, second_rnn_size])
cond = lambda j, m, rnn_outputs2: j < tf.shape(rnn_outputs2)[1]
_, context, _ = tf.while_loop(cond, score_func, [j0, context0, rnn_outputs2],
shape_invariants=[j0.get_shape(),
tf.TensorShape([None, None, second_rnn_size]),
rnn_outputs2.get_shape()],
parallel_iterations=1)
if model_type == 'RNN_with_memory':
j0 = tf.constant(0)
context0 = tf.zeros([10, 10, first_rnn_size])
cond = lambda j, m, last_rnn_output: j < tf.shape(last_rnn_output)[1]
_, context, _ = tf.while_loop(cond, score_func, [j0, context0, last_rnn_output],
shape_invariants=[j0.get_shape(),
tf.TensorShape([None, None, first_rnn_size]),
last_rnn_output.get_shape()],
parallel_iterations=1)
####################################################################################################################
# Final Dense layer to produce outputs
####################################################################################################################
if model_type == 'Double_RNN_with_memory' or model_type == 'RNN_with_memory':
# TODO dropout option
# Dropout before final layer
context = tf.nn.dropout(context, keep_prob)
logits = tf.layers.dense(context, num_classes)
##################################### END OF MODEL STRUCTURE #######################################################
####################################################################################################################
# Training Function
####################################################################################################################
preds = tf.nn.softmax(logits)
predictions = tf.cast(tf.argmax(preds, 2), tf.int32)
loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits))
train_step = tf.train.AdamOptimizer(5e-3).minimize(loss)
return {
'x': x,
'seqlen': seqlen,
'speaker': speaker,
'y': y,
'ts': train_step,
'preds': predictions,
'preds_': preds,
'num_dialogs': num_dialogs
}
| [] | [] | [
"CUDA_VISIBLE_DEVICES"
] | [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
pkg/ingress/controller.go | // Licensed to the Apache Software Foundation (ASF) under one or more
// contributor license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright ownership.
// The ASF licenses this file to You under the Apache License, Version 2.0
// (the "License"); you may not use this file except in compliance with
// the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ingress
import (
"context"
"fmt"
"os"
"strings"
"sync"
"time"
"go.uber.org/zap"
v1 "k8s.io/api/core/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/client-go/kubernetes/scheme"
typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
listerscorev1 "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/leaderelection"
"k8s.io/client-go/tools/leaderelection/resourcelock"
"k8s.io/client-go/tools/record"
gatewaylistersv1alpha2 "sigs.k8s.io/gateway-api/pkg/client/listers/gateway/apis/v1alpha2"
"github.com/apache/apisix-ingress-controller/pkg/api"
"github.com/apache/apisix-ingress-controller/pkg/api/validation"
"github.com/apache/apisix-ingress-controller/pkg/apisix"
apisixcache "github.com/apache/apisix-ingress-controller/pkg/apisix/cache"
"github.com/apache/apisix-ingress-controller/pkg/config"
"github.com/apache/apisix-ingress-controller/pkg/kube"
configv2beta3 "github.com/apache/apisix-ingress-controller/pkg/kube/apisix/apis/config/v2beta3"
apisixscheme "github.com/apache/apisix-ingress-controller/pkg/kube/apisix/client/clientset/versioned/scheme"
listersv2beta3 "github.com/apache/apisix-ingress-controller/pkg/kube/apisix/client/listers/config/v2beta3"
"github.com/apache/apisix-ingress-controller/pkg/kube/translation"
"github.com/apache/apisix-ingress-controller/pkg/log"
"github.com/apache/apisix-ingress-controller/pkg/metrics"
"github.com/apache/apisix-ingress-controller/pkg/types"
apisixv1 "github.com/apache/apisix-ingress-controller/pkg/types/apisix/v1"
)
const (
// _component is used for event component
_component = "ApisixIngress"
// _resourceSynced is used when a resource is synced successfully
_resourceSynced = "ResourcesSynced"
// _messageResourceSynced is used to specify controller
_messageResourceSynced = "%s synced successfully"
// _resourceSyncAborted is used when a resource synced failed
_resourceSyncAborted = "ResourceSyncAborted"
// _messageResourceFailed is used to report error
_messageResourceFailed = "%s synced failed, with error: %s"
)
// Controller is the ingress apisix controller object.
type Controller struct {
name string
namespace string
cfg *config.Config
wg sync.WaitGroup
watchingNamespaces *sync.Map
watchingLabels types.Labels
apisix apisix.APISIX
podCache types.PodCache
translator translation.Translator
apiServer *api.Server
MetricsCollector metrics.Collector
kubeClient *kube.KubeClient
// recorder event
recorder record.EventRecorder
// this map enrolls which ApisixTls objects refer to a Kubernetes
// Secret object.
// type: Map<SecretKey, Map<ApisixTlsKey, ApisixTls>>
// SecretKey is `namespace_name`, ApisixTlsKey is kube style meta key: `namespace/name`
secretSSLMap *sync.Map
// leaderContextCancelFunc will be called when apisix-ingress-controller
// decides to give up its leader role.
leaderContextCancelFunc context.CancelFunc
// common informers and listers
namespaceInformer cache.SharedIndexInformer
namespaceLister listerscorev1.NamespaceLister
podInformer cache.SharedIndexInformer
podLister listerscorev1.PodLister
epInformer cache.SharedIndexInformer
epLister kube.EndpointLister
svcInformer cache.SharedIndexInformer
svcLister listerscorev1.ServiceLister
ingressLister kube.IngressLister
ingressInformer cache.SharedIndexInformer
secretInformer cache.SharedIndexInformer
secretLister listerscorev1.SecretLister
apisixUpstreamInformer cache.SharedIndexInformer
apisixUpstreamLister listersv2beta3.ApisixUpstreamLister
apisixRouteLister kube.ApisixRouteLister
apisixRouteInformer cache.SharedIndexInformer
apisixTlsLister kube.ApisixTlsLister
apisixTlsInformer cache.SharedIndexInformer
apisixClusterConfigLister kube.ApisixClusterConfigLister
apisixClusterConfigInformer cache.SharedIndexInformer
apisixConsumerInformer cache.SharedIndexInformer
apisixConsumerLister kube.ApisixConsumerLister
apisixPluginConfigInformer cache.SharedIndexInformer
apisixPluginConfigLister kube.ApisixPluginConfigLister
gatewayInformer cache.SharedIndexInformer
gatewayLister gatewaylistersv1alpha2.GatewayLister
// resource controllers
namespaceController *namespaceController
podController *podController
endpointsController *endpointsController
endpointSliceController *endpointSliceController
ingressController *ingressController
secretController *secretController
gatewayController *gatewayController
apisixUpstreamController *apisixUpstreamController
apisixRouteController *apisixRouteController
apisixTlsController *apisixTlsController
apisixClusterConfigController *apisixClusterConfigController
apisixConsumerController *apisixConsumerController
apisixPluginConfigController *apisixPluginConfigController
}
// NewController creates an ingress apisix controller object.
func NewController(cfg *config.Config) (*Controller, error) {
podName := os.Getenv("POD_NAME")
podNamespace := os.Getenv("POD_NAMESPACE")
if podNamespace == "" {
podNamespace = "default"
}
client, err := apisix.NewClient()
if err != nil {
return nil, err
}
kubeClient, err := kube.NewKubeClient(cfg)
if err != nil {
return nil, err
}
apiSrv, err := api.NewServer(cfg)
if err != nil {
return nil, err
}
var (
watchingNamespace = new(sync.Map)
watchingLabels = make(map[string]string)
)
if len(cfg.Kubernetes.AppNamespaces) > 1 || cfg.Kubernetes.AppNamespaces[0] != v1.NamespaceAll {
for _, ns := range cfg.Kubernetes.AppNamespaces {
watchingNamespace.Store(ns, struct{}{})
}
}
// support namespace label-selector
for _, labels := range cfg.Kubernetes.NamespaceSelector {
labelSlice := strings.Split(labels, "=")
watchingLabels[labelSlice[0]] = labelSlice[1]
}
// recorder
utilruntime.Must(apisixscheme.AddToScheme(scheme.Scheme))
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: kubeClient.Client.CoreV1().Events("")})
c := &Controller{
name: podName,
namespace: podNamespace,
cfg: cfg,
apiServer: apiSrv,
apisix: client,
MetricsCollector: metrics.NewPrometheusCollector(),
kubeClient: kubeClient,
watchingNamespaces: watchingNamespace,
watchingLabels: watchingLabels,
secretSSLMap: new(sync.Map),
recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: _component}),
podCache: types.NewPodCache(),
}
return c, nil
}
func (c *Controller) initWhenStartLeading() {
var (
ingressInformer cache.SharedIndexInformer
apisixRouteInformer cache.SharedIndexInformer
apisixTlsInformer cache.SharedIndexInformer
apisixClusterConfigInformer cache.SharedIndexInformer
apisixConsumerInformer cache.SharedIndexInformer
)
kubeFactory := c.kubeClient.NewSharedIndexInformerFactory()
apisixFactory := c.kubeClient.NewAPISIXSharedIndexInformerFactory()
gatewayFactory := c.kubeClient.NewGatewaySharedIndexInformerFactory()
c.namespaceLister = kubeFactory.Core().V1().Namespaces().Lister()
c.podLister = kubeFactory.Core().V1().Pods().Lister()
c.epLister, c.epInformer = kube.NewEndpointListerAndInformer(kubeFactory, c.cfg.Kubernetes.WatchEndpointSlices)
c.svcLister = kubeFactory.Core().V1().Services().Lister()
c.ingressLister = kube.NewIngressLister(
kubeFactory.Networking().V1().Ingresses().Lister(),
kubeFactory.Networking().V1beta1().Ingresses().Lister(),
kubeFactory.Extensions().V1beta1().Ingresses().Lister(),
)
c.secretLister = kubeFactory.Core().V1().Secrets().Lister()
c.apisixRouteLister = kube.NewApisixRouteLister(
apisixFactory.Apisix().V2beta2().ApisixRoutes().Lister(),
apisixFactory.Apisix().V2beta3().ApisixRoutes().Lister(),
apisixFactory.Apisix().V2().ApisixRoutes().Lister(),
)
c.apisixUpstreamLister = apisixFactory.Apisix().V2beta3().ApisixUpstreams().Lister()
c.apisixTlsLister = kube.NewApisixTlsLister(
apisixFactory.Apisix().V2beta3().ApisixTlses().Lister(),
apisixFactory.Apisix().V2().ApisixTlses().Lister(),
)
c.apisixClusterConfigLister = kube.NewApisixClusterConfigLister(
apisixFactory.Apisix().V2beta3().ApisixClusterConfigs().Lister(),
apisixFactory.Apisix().V2().ApisixClusterConfigs().Lister(),
)
c.apisixConsumerLister = kube.NewApisixConsumerLister(
apisixFactory.Apisix().V2beta3().ApisixConsumers().Lister(),
apisixFactory.Apisix().V2().ApisixConsumers().Lister(),
)
c.apisixPluginConfigLister = kube.NewApisixPluginConfigLister(
apisixFactory.Apisix().V2beta3().ApisixPluginConfigs().Lister(),
)
c.translator = translation.NewTranslator(&translation.TranslatorOptions{
PodCache: c.podCache,
PodLister: c.podLister,
EndpointLister: c.epLister,
ServiceLister: c.svcLister,
ApisixUpstreamLister: c.apisixUpstreamLister,
SecretLister: c.secretLister,
UseEndpointSlices: c.cfg.Kubernetes.WatchEndpointSlices,
})
if c.cfg.Kubernetes.IngressVersion == config.IngressNetworkingV1 {
ingressInformer = kubeFactory.Networking().V1().Ingresses().Informer()
} else if c.cfg.Kubernetes.IngressVersion == config.IngressNetworkingV1beta1 {
ingressInformer = kubeFactory.Networking().V1beta1().Ingresses().Informer()
} else {
ingressInformer = kubeFactory.Extensions().V1beta1().Ingresses().Informer()
}
c.gatewayLister = gatewayFactory.Gateway().V1alpha2().Gateways().Lister()
c.gatewayInformer = gatewayFactory.Gateway().V1alpha2().Gateways().Informer()
switch c.cfg.Kubernetes.ApisixRouteVersion {
case config.ApisixRouteV2beta2:
apisixRouteInformer = apisixFactory.Apisix().V2beta2().ApisixRoutes().Informer()
case config.ApisixRouteV2beta3:
apisixRouteInformer = apisixFactory.Apisix().V2beta3().ApisixRoutes().Informer()
case config.ApisixRouteV2:
apisixRouteInformer = apisixFactory.Apisix().V2().ApisixRoutes().Informer()
default:
panic(fmt.Errorf("unsupported ApisixRoute version %s", c.cfg.Kubernetes.ApisixRouteVersion))
}
switch c.cfg.Kubernetes.ApisixTlsVersion {
case config.ApisixV2beta3:
apisixTlsInformer = apisixFactory.Apisix().V2beta3().ApisixTlses().Informer()
case config.ApisixV2:
apisixTlsInformer = apisixFactory.Apisix().V2().ApisixTlses().Informer()
default:
panic(fmt.Errorf("unsupported ApisixTls version %s", c.cfg.Kubernetes.ApisixTlsVersion))
}
switch c.cfg.Kubernetes.ApisixClusterConfigVersion {
case config.ApisixV2beta3:
apisixClusterConfigInformer = apisixFactory.Apisix().V2beta3().ApisixClusterConfigs().Informer()
case config.ApisixV2:
apisixClusterConfigInformer = apisixFactory.Apisix().V2().ApisixClusterConfigs().Informer()
default:
panic(fmt.Errorf("unsupported ApisixClusterConfig version %v", c.cfg.Kubernetes.ApisixClusterConfigVersion))
}
switch c.cfg.Kubernetes.ApisixConsumerVersion {
case config.ApisixRouteV2beta3:
apisixConsumerInformer = apisixFactory.Apisix().V2beta3().ApisixConsumers().Informer()
case config.ApisixRouteV2:
apisixConsumerInformer = apisixFactory.Apisix().V2().ApisixConsumers().Informer()
}
c.namespaceInformer = kubeFactory.Core().V1().Namespaces().Informer()
c.podInformer = kubeFactory.Core().V1().Pods().Informer()
c.svcInformer = kubeFactory.Core().V1().Services().Informer()
c.ingressInformer = ingressInformer
c.apisixRouteInformer = apisixRouteInformer
c.apisixUpstreamInformer = apisixFactory.Apisix().V2beta3().ApisixUpstreams().Informer()
c.apisixClusterConfigInformer = apisixClusterConfigInformer
c.secretInformer = kubeFactory.Core().V1().Secrets().Informer()
c.apisixTlsInformer = apisixTlsInformer
c.apisixConsumerInformer = apisixConsumerInformer
c.apisixPluginConfigInformer = apisixFactory.Apisix().V2beta3().ApisixPluginConfigs().Informer()
if c.cfg.Kubernetes.WatchEndpointSlices {
c.endpointSliceController = c.newEndpointSliceController()
} else {
c.endpointsController = c.newEndpointsController()
}
c.namespaceController = c.newNamespaceController()
c.podController = c.newPodController()
c.apisixUpstreamController = c.newApisixUpstreamController()
c.ingressController = c.newIngressController()
c.apisixRouteController = c.newApisixRouteController()
c.apisixClusterConfigController = c.newApisixClusterConfigController()
c.apisixTlsController = c.newApisixTlsController()
c.secretController = c.newSecretController()
c.apisixConsumerController = c.newApisixConsumerController()
c.apisixPluginConfigController = c.newApisixPluginConfigController()
c.gatewayController = c.newGatewayController()
}
// recorderEvent recorder events for resources
func (c *Controller) recorderEvent(object runtime.Object, eventtype, reason string, err error) {
if err != nil {
message := fmt.Sprintf(_messageResourceFailed, _component, err.Error())
c.recorder.Event(object, eventtype, reason, message)
} else {
message := fmt.Sprintf(_messageResourceSynced, _component)
c.recorder.Event(object, eventtype, reason, message)
}
}
// recorderEvent recorder events for resources
func (c *Controller) recorderEventS(object runtime.Object, eventtype, reason string, msg string) {
c.recorder.Event(object, eventtype, reason, msg)
}
func (c *Controller) goAttach(handler func()) {
c.wg.Add(1)
go func() {
defer c.wg.Done()
handler()
}()
}
// Eventf implements the resourcelock.EventRecorder interface.
func (c *Controller) Eventf(_ runtime.Object, eventType string, reason string, message string, _ ...interface{}) {
log.Infow(reason, zap.String("message", message), zap.String("event_type", eventType))
}
// Run launches the controller.
func (c *Controller) Run(stop chan struct{}) error {
rootCtx, rootCancel := context.WithCancel(context.Background())
defer rootCancel()
go func() {
<-stop
rootCancel()
}()
c.MetricsCollector.ResetLeader(false)
go func() {
if err := c.apiServer.Run(rootCtx.Done()); err != nil {
log.Errorf("failed to launch API Server: %s", err)
}
}()
lock := &resourcelock.LeaseLock{
LeaseMeta: metav1.ObjectMeta{
Namespace: c.namespace,
Name: c.cfg.Kubernetes.ElectionID,
},
Client: c.kubeClient.Client.CoordinationV1(),
LockConfig: resourcelock.ResourceLockConfig{
Identity: c.name,
EventRecorder: c,
},
}
cfg := leaderelection.LeaderElectionConfig{
Lock: lock,
LeaseDuration: 15 * time.Second,
RenewDeadline: 5 * time.Second,
RetryPeriod: 2 * time.Second,
Callbacks: leaderelection.LeaderCallbacks{
OnStartedLeading: c.run,
OnNewLeader: func(identity string) {
log.Warnf("found a new leader %s", identity)
if identity != c.name {
log.Infow("controller now is running as a candidate",
zap.String("namespace", c.namespace),
zap.String("pod", c.name),
)
c.MetricsCollector.ResetLeader(false)
// delete the old APISIX cluster, so that the cached state
// like synchronization won't be used next time the candidate
// becomes the leader again.
c.apisix.DeleteCluster(c.cfg.APISIX.DefaultClusterName)
}
},
OnStoppedLeading: func() {
log.Infow("controller now is running as a candidate",
zap.String("namespace", c.namespace),
zap.String("pod", c.name),
)
c.MetricsCollector.ResetLeader(false)
// delete the old APISIX cluster, so that the cached state
// like synchronization won't be used next time the candidate
// becomes the leader again.
c.apisix.DeleteCluster(c.cfg.APISIX.DefaultClusterName)
},
},
ReleaseOnCancel: true,
Name: "ingress-apisix",
}
elector, err := leaderelection.NewLeaderElector(cfg)
if err != nil {
log.Errorf("failed to create leader elector: %s", err.Error())
return err
}
election:
curCtx, cancel := context.WithCancel(rootCtx)
c.leaderContextCancelFunc = cancel
elector.Run(curCtx)
select {
case <-rootCtx.Done():
return nil
default:
goto election
}
}
func (c *Controller) run(ctx context.Context) {
log.Infow("controller tries to leading ...",
zap.String("namespace", c.namespace),
zap.String("pod", c.name),
)
var cancelFunc context.CancelFunc
ctx, cancelFunc = context.WithCancel(ctx)
defer cancelFunc()
// give up leader
defer c.leaderContextCancelFunc()
clusterOpts := &apisix.ClusterOptions{
Name: c.cfg.APISIX.DefaultClusterName,
AdminKey: c.cfg.APISIX.DefaultClusterAdminKey,
BaseURL: c.cfg.APISIX.DefaultClusterBaseURL,
MetricsCollector: c.MetricsCollector,
}
err := c.apisix.AddCluster(ctx, clusterOpts)
if err != nil && err != apisix.ErrDuplicatedCluster {
// TODO give up the leader role
log.Errorf("failed to add default cluster: %s", err)
return
}
if err := c.apisix.Cluster(c.cfg.APISIX.DefaultClusterName).HasSynced(ctx); err != nil {
// TODO give up the leader role
log.Errorf("failed to wait the default cluster to be ready: %s", err)
// re-create apisix cluster, used in next c.run
if err = c.apisix.UpdateCluster(ctx, clusterOpts); err != nil {
log.Errorf("failed to update default cluster: %s", err)
return
}
return
}
c.initWhenStartLeading()
// list namespaces and init watchingNamespaces
if err := c.initWatchingNamespacesByLabels(ctx); err != nil {
ctx.Done()
return
}
// compare resources of k8s with objects of APISIX
if err = c.CompareResources(ctx); err != nil {
ctx.Done()
return
}
c.goAttach(func() {
c.checkClusterHealth(ctx, cancelFunc)
})
c.goAttach(func() {
c.namespaceInformer.Run(ctx.Done())
})
c.goAttach(func() {
c.podInformer.Run(ctx.Done())
})
c.goAttach(func() {
c.epInformer.Run(ctx.Done())
})
c.goAttach(func() {
c.svcInformer.Run(ctx.Done())
})
c.goAttach(func() {
c.ingressInformer.Run(ctx.Done())
})
c.goAttach(func() {
c.apisixRouteInformer.Run(ctx.Done())
})
c.goAttach(func() {
c.apisixUpstreamInformer.Run(ctx.Done())
})
c.goAttach(func() {
c.apisixClusterConfigInformer.Run(ctx.Done())
})
c.goAttach(func() {
c.secretInformer.Run(ctx.Done())
})
c.goAttach(func() {
c.apisixTlsInformer.Run(ctx.Done())
})
c.goAttach(func() {
c.apisixConsumerInformer.Run(ctx.Done())
})
c.goAttach(func() {
c.apisixPluginConfigInformer.Run(ctx.Done())
})
c.goAttach(func() {
c.namespaceController.run(ctx)
})
c.goAttach(func() {
c.podController.run(ctx)
})
c.goAttach(func() {
if c.cfg.Kubernetes.WatchEndpointSlices {
c.endpointSliceController.run(ctx)
} else {
c.endpointsController.run(ctx)
}
})
if c.cfg.Kubernetes.EnableGatewayAPI {
c.goAttach(func() {
c.gatewayInformer.Run(ctx.Done())
})
c.goAttach(func() {
c.gatewayController.run(ctx)
})
}
c.goAttach(func() {
c.apisixUpstreamController.run(ctx)
})
c.goAttach(func() {
c.ingressController.run(ctx)
})
c.goAttach(func() {
c.apisixRouteController.run(ctx)
})
c.goAttach(func() {
c.apisixClusterConfigController.run(ctx)
})
c.goAttach(func() {
c.apisixTlsController.run(ctx)
})
c.goAttach(func() {
c.secretController.run(ctx)
})
c.goAttach(func() {
c.apisixConsumerController.run(ctx)
})
c.goAttach(func() {
c.apisixPluginConfigController.run(ctx)
})
c.MetricsCollector.ResetLeader(true)
log.Infow("controller now is running as leader",
zap.String("namespace", c.namespace),
zap.String("pod", c.name),
)
<-ctx.Done()
c.wg.Wait()
}
// isWatchingNamespace accepts a resource key, getting the namespace part
// and checking whether the namespace is being watched.
func (c *Controller) isWatchingNamespace(key string) (ok bool) {
if !validation.HasValueInSyncMap(c.watchingNamespaces) {
ok = true
return
}
ns, _, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
// Ignore resource with invalid key.
ok = false
log.Warnf("resource %s was ignored since: %s", key, err)
return
}
_, ok = c.watchingNamespaces.Load(ns)
return
}
func (c *Controller) syncSSL(ctx context.Context, ssl *apisixv1.Ssl, event types.EventType) error {
var (
err error
)
clusterName := c.cfg.APISIX.DefaultClusterName
if event == types.EventDelete {
err = c.apisix.Cluster(clusterName).SSL().Delete(ctx, ssl)
} else if event == types.EventUpdate {
_, err = c.apisix.Cluster(clusterName).SSL().Update(ctx, ssl)
} else {
_, err = c.apisix.Cluster(clusterName).SSL().Create(ctx, ssl)
}
return err
}
func (c *Controller) syncConsumer(ctx context.Context, consumer *apisixv1.Consumer, event types.EventType) (err error) {
clusterName := c.cfg.APISIX.DefaultClusterName
if event == types.EventDelete {
err = c.apisix.Cluster(clusterName).Consumer().Delete(ctx, consumer)
} else if event == types.EventUpdate {
_, err = c.apisix.Cluster(clusterName).Consumer().Update(ctx, consumer)
} else {
_, err = c.apisix.Cluster(clusterName).Consumer().Create(ctx, consumer)
}
return
}
func (c *Controller) syncEndpoint(ctx context.Context, ep kube.Endpoint) error {
namespace := ep.Namespace()
svcName := ep.ServiceName()
svc, err := c.svcLister.Services(ep.Namespace()).Get(svcName)
if err != nil {
if k8serrors.IsNotFound(err) {
log.Infof("service %s/%s not found", ep.Namespace(), svcName)
return nil
}
log.Errorf("failed to get service %s/%s: %s", ep.Namespace(), svcName, err)
return err
}
var subsets []configv2beta3.ApisixUpstreamSubset
subsets = append(subsets, configv2beta3.ApisixUpstreamSubset{})
au, err := c.apisixUpstreamLister.ApisixUpstreams(namespace).Get(svcName)
if err != nil {
if !k8serrors.IsNotFound(err) {
log.Errorf("failed to get ApisixUpstream %s/%s: %s", ep.Namespace(), svcName, err)
return err
}
} else if au.Spec != nil && len(au.Spec.Subsets) > 0 {
subsets = append(subsets, au.Spec.Subsets...)
}
clusters := c.apisix.ListClusters()
for _, port := range svc.Spec.Ports {
for _, subset := range subsets {
nodes, err := c.translator.TranslateUpstreamNodes(ep, port.Port, subset.Labels)
if err != nil {
log.Errorw("failed to translate upstream nodes",
zap.Error(err),
zap.Any("endpoints", ep),
zap.Int32("port", port.Port),
)
}
name := apisixv1.ComposeUpstreamName(namespace, svcName, subset.Name, port.Port)
for _, cluster := range clusters {
if err := c.syncUpstreamNodesChangeToCluster(ctx, cluster, nodes, name); err != nil {
return err
}
}
}
}
return nil
}
func (c *Controller) syncUpstreamNodesChangeToCluster(ctx context.Context, cluster apisix.Cluster, nodes apisixv1.UpstreamNodes, upsName string) error {
upstream, err := cluster.Upstream().Get(ctx, upsName)
if err != nil {
if err == apisixcache.ErrNotFound {
log.Warnw("upstream is not referenced",
zap.String("cluster", cluster.String()),
zap.String("upstream", upsName),
)
return nil
} else {
log.Errorw("failed to get upstream",
zap.String("upstream", upsName),
zap.String("cluster", cluster.String()),
zap.Error(err),
)
return err
}
}
upstream.Nodes = nodes
log.Debugw("upstream binds new nodes",
zap.Any("upstream", upstream),
zap.String("cluster", cluster.String()),
)
updated := &manifest{
upstreams: []*apisixv1.Upstream{upstream},
}
return c.syncManifests(ctx, nil, updated, nil)
}
func (c *Controller) checkClusterHealth(ctx context.Context, cancelFunc context.CancelFunc) {
defer cancelFunc()
t := time.NewTicker(5 * time.Second)
defer t.Stop()
for {
select {
case <-ctx.Done():
return
case <-t.C:
}
err := c.apisix.Cluster(c.cfg.APISIX.DefaultClusterName).HealthCheck(ctx)
if err != nil {
// Finally failed health check, then give up leader.
log.Warnf("failed to check health for default cluster: %s, give up leader", err)
c.apiServer.HealthState.Lock()
defer c.apiServer.HealthState.Unlock()
c.apiServer.HealthState.Err = err
return
}
log.Debugf("success check health for default cluster")
c.MetricsCollector.IncrCheckClusterHealth(c.name)
}
}
| [
"\"POD_NAME\"",
"\"POD_NAMESPACE\""
] | [] | [
"POD_NAMESPACE",
"POD_NAME"
] | [] | ["POD_NAMESPACE", "POD_NAME"] | go | 2 | 0 | |
src/main/resources/scripts/managewait.py | #
# The MIT License
#
# Copyright 2016 Vector Software, East Greenwich, Rhode Island USA
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import subprocess
import os
import sys
import argparse
import shutil
import re
import time
class ManageWait():
def __init__(self, verbose, command_line, wait_time, wait_loops):
self.wait_time = wait_time
self.wait_loops = wait_loops
self.verbose = verbose
self.command_line = command_line
def exec_manage(self, silent = False):
# Versions of VectorCAST prior to 2019 relied on the environment variable VECTORCAST_DIR.
# We will use that variable as a fall back if the VectorCAST executables aren't on the system path.
exe_env = os.environ.copy()
if 'VECTORCAST_DIR' in os.environ:
exe_env['PATH'] = os.pathsep.join([os.environ.get('PATH', ''), exe_env['VECTORCAST_DIR']])
callStr = "manage " + self.command_line
output = ''
if self.verbose:
output += "\nVerbose: %s" % callStr
# capture the output of the manage call
loop_count = 0
while 1:
loop_count += 1
p = subprocess.Popen(callStr,stdout=subprocess.PIPE,stderr=subprocess.STDOUT, shell=True, env=exe_env)
(out_mgt, out_mgt2) = p.communicate()
output += "\n" + out_mgt.rstrip()
# No point checking error code - errors are not propagated from
# clicast to manage
if "Licensed number of users already reached" in out_mgt:
if loop_count < self.wait_loops:
# Change FLEXlm Error to FLEXlm Err.. to avoid Groovy script from
# marking retry attempts as overall job failure
out_mgt = out_mgt.replace("FLEXlm Error", "FLEXlm Err..")
print out_mgt
msg = "Warning: Failed to obtain a license, sleeping %ds and then re-trying, attempt %d of %d" % (self.wait_time, loop_count+1, self.wait_loops)
print msg
output += "\n" + msg
time.sleep(self.wait_time)
else:
print out_mgt
msg = "ERROR: Failed to obtain a license after %d attempts, terminating" % self.wait_loops
print msg
output += "\n" + msg
sys.exit(-1)
else:
if not silent:
print out_mgt
break;
return output
## main
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--verbose', help='Enable verbose output', action="store_true")
parser.add_argument('--command_line', help='Command line to pass to Manage', required=True)
parser.add_argument('--wait_time', help='Time (in seconds) to wait between execution attempts', type=int, default=30)
parser.add_argument('--wait_loops', help='Number of times to retry execution', type=int, default=1)
args = parser.parse_args()
manageWait = ManageWait(args.verbose, args.command_line, args.wait_time, args.wait_loops)
manageWait.exec_manage()
| [] | [] | [
"PATH"
] | [] | ["PATH"] | python | 1 | 0 | |
bentoml/_internal/frameworks/tensorflow_v2.py | import os
import re
import uuid
import typing as t
import logging
import pathlib
import functools
from typing import TYPE_CHECKING
from distutils.dir_util import copy_tree
from simple_di import inject
from simple_di import Provide
import bentoml
from bentoml import Tag
from bentoml.exceptions import BentoMLException
from bentoml.exceptions import MissingDependencyException
from ..types import LazyType
from ..runner.utils import Params
from ..utils.tensorflow import get_tf_version
from ..utils.tensorflow import is_gpu_available
from ..utils.tensorflow import hook_loaded_model
from .common.model_runner import BaseModelRunner
from ..configuration.containers import BentoMLContainer
logger = logging.getLogger(__name__)
try:
import tensorflow as tf # type: ignore
except ImportError: # pragma: no cover
raise MissingDependencyException(
"""\
`tensorflow` is required in order to use `bentoml.tensorflow`.
Instruction: `pip install tensorflow`
"""
)
try:
import tensorflow_hub as hub # type: ignore
from tensorflow_hub import resolve # type: ignore
from tensorflow_hub import native_module # type: ignore
except ImportError: # pragma: no cover
logger.warning(
"""\
If you want to use `bentoml.tensorflow.import_from_tfhub(),
make sure to `pip install --upgrade tensorflow_hub` before using.
"""
)
hub = None
try:
import importlib.metadata as importlib_metadata
except ImportError:
import importlib_metadata
if TYPE_CHECKING:
from tensorflow_hub import Module as HubModule # type: ignore
from tensorflow_hub import KerasLayer # type: ignore
from .. import external_typing as ext
from ..types import PathType
from ..models import ModelStore
from ..external_typing import tensorflow as tf_ext
TFArgType = t.Union[t.List[t.Union[int, float]], ext.NpNDArray, tf_ext.Tensor]
MODULE_NAME = "bentoml.tensorflow_v2"
def _clean_name(name: str) -> str: # pragma: no cover
if name.startswith(("http://", "https://")):
name = name.split("/", maxsplit=3)[-1]
else:
name = name.split("/")[-1]
return re.sub(r"\W|^(?=\d)-", "_", name)
@inject
def load(
bento_tag: t.Union[str, Tag],
tags: t.Optional[t.List[str]] = None,
options: t.Optional["tf_ext.SaveOptions"] = None,
load_as_hub_module: t.Optional[bool] = None,
model_store: "ModelStore" = Provide[BentoMLContainer.model_store],
) -> t.Union["tf_ext.AutoTrackable", "tf_ext.Module", "HubModule", "KerasLayer"]:
"""
Load a model from BentoML local modelstore with given name.
Args:
bento_tag (:code:`Union[str, Tag]`):
Tag of a saved model in BentoML local modelstore.
tags (:code:`str`, `optional`, defaults to `None`):
A set of strings specifying the graph variant to use, if loading from a v1 module.
options (:code:`tensorflow.saved_model.SaveOptions`, `optional`, default to :code:`None`):
:code:`tensorflow.saved_model.LoadOptions` object that specifies options for loading. This
argument can only be used from TensorFlow 2.3 onwards.
load_as_hub_module (`bool`, `optional`, default to :code:`True`):
Load the given weight that is saved from tfhub as either `hub.KerasLayer` or `hub.Module`.
The latter only applies for TF1.
model_store (:mod:`~bentoml._internal.models.store.ModelStore`, default to :mod:`BentoMLContainer.model_store`):
BentoML modelstore, provided by DI Container.
Returns:
:obj:`SavedModel`: an instance of :obj:`SavedModel` format from BentoML modelstore.
Examples:
.. code-block:: python
import bentoml
# load a model back into memory
model = bentoml.tensorflow.load("my_tensorflow_model")
""" # noqa: LN001
model = model_store.get(bento_tag)
if model.info.module not in (MODULE_NAME, __name__):
raise BentoMLException(
f"Model {bento_tag} was saved with module {model.info.module}, failed loading with {MODULE_NAME}."
)
if model.info.context["import_from_tfhub"]:
assert load_as_hub_module is not None, (
"You have to specified `load_as_hub_module=True | False`"
" to load a `tensorflow_hub` module. If True is chosen,"
" then BentoML will return either an instance of `hub.KerasLayer`"
" or `hub.Module` depending on your TF version. For most usecase,"
" we recommend to keep `load_as_hub_module=True`. If you wish to extend"
" the functionalities of the given model, set `load_as_hub_module=False`"
" will return a SavedModel object."
)
if hub is None:
raise MissingDependencyException(
"""\
`tensorflow_hub` does not exists.
Make sure to `pip install --upgrade tensorflow_hub` before using.
"""
)
module_path = model.path_of(model.info.options["local_path"])
if load_as_hub_module:
return (
hub.Module(module_path)
if get_tf_version().startswith("1")
else hub.KerasLayer(module_path)
)
# In case users want to load as a SavedModel file object.
# https://github.com/tensorflow/hub/blob/master/tensorflow_hub/module_v2.py#L93
is_hub_module_v1: bool = tf.io.gfile.exists( # type: ignore
native_module.get_module_proto_path(module_path)
)
if tags is None and is_hub_module_v1:
tags = []
if options is not None:
if not LazyType(
"tensorflow.python.saved_model.save_options.SaveOptions"
).isinstance(options):
raise BentoMLException(
f"`options` has to be of type `tf.saved_model.SaveOptions`, got {type(options)} instead."
)
if not hasattr(getattr(tf, "saved_model", None), "LoadOptions"):
raise NotImplementedError(
"options are not supported for TF < 2.3.x,"
f" Current version: {get_tf_version()}"
)
tf_model: "tf_ext.AutoTrackable" = tf.compat.v1.saved_model.load_v2( # type: ignore
module_path,
tags=tags,
options=options, # type: ignore
)
else:
tf_model: "tf_ext.AutoTrackable" = tf.compat.v1.saved_model.load_v2( # type: ignore
module_path,
tags=tags,
)
tf_model._is_hub_module_v1 = (
is_hub_module_v1 # pylint: disable=protected-access # noqa
)
return tf_model
else:
tf_model: "tf_ext.AutoTrackable" = tf.compat.v1.saved_model.load_v2(model.path) # type: ignore
return hook_loaded_model(tf_model, MODULE_NAME)
@inject
def import_from_tfhub(
identifier: t.Union[str, "HubModule", "KerasLayer"],
name: t.Optional[str] = None,
labels: t.Optional[t.Dict[str, str]] = None,
custom_objects: t.Optional[t.Dict[str, t.Any]] = None,
metadata: t.Optional[t.Dict[str, t.Any]] = None,
model_store: "ModelStore" = Provide[BentoMLContainer.model_store],
) -> Tag:
"""
Import a model from `Tensorflow Hub <https://tfhub.dev/>`_ to BentoML modelstore.
Args:
identifier (:code:`Union[str, tensorflow_hub.Module, tensorflow_hub.KerasLayer]`): Identifier accepts
two type of inputs:
- if `type` of :code:`identifier` either of type :code:`tensorflow_hub.Module` (**legacy** `tensorflow_hub`) or :code:`tensorflow_hub.KerasLayer` (`tensorflow_hub`), then we will save the given model to a :code:`SavedModel` format.
- if `type` of :code:`identifier` is a :obj:`str`, we assume that this is the URI retrieved from Tensorflow Hub. We then clean the given URI, and get a local copy of a given model to BentoML modelstore. name (:code:`str`, `optional`, defaults to `None`): An optional name for the model. If :code:`identifier` is a :obj:`str`, then name can be autogenerated from the given URI.
name (:code:`str`, `optional`, default to `None`):
Optional name for the saved model. If None, then name will be generated from :code:`identifier`.
labels (:code:`Dict[str, str]`, `optional`, default to :code:`None`):
user-defined labels for managing models, e.g. team=nlp, stage=dev
custom_objects (:code:`Dict[str, Any]]`, `optional`, default to :code:`None`):
user-defined additional python objects to be saved alongside the model,
e.g. a tokenizer instance, preprocessor function, model configuration json
metadata (:code:`Dict[str, Any]`, `optional`, default to :code:`None`):
Custom metadata for given model.
model_store (:mod:`~bentoml._internal.models.store.ModelStore`, default to :mod:`BentoMLContainer.model_store`):
BentoML modelstore, provided by DI Container.
Returns:
:obj:`~bentoml.Tag`: A :obj:`~bentoml.Tag` object that can be used to retrieve the model with :func:`bentoml.tensorflow.load`:
Example for importing a model from Tensorflow Hub:
.. code-block:: python
import tensorflow_text as text # noqa # pylint: disable
import bentoml
tag = bentoml.tensorflow.import_from_tfhub("https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3")
# load model back with `load`:
model = bentoml.tensorflow.load(tag, load_as_hub_module=True)
Example for importing a custom Tensorflow Hub model:
.. code-block:: python
import tensorflow as tf
import tensorflow_hub as hub
import bentoml
def _plus_one_model_tf2():
obj = tf.train.Checkpoint()
@tf.function(input_signature=[tf.TensorSpec(None, dtype=tf.float32)])
def plus_one(x):
return x + 1
obj.__call__ = plus_one
return obj
# then save the given model to BentoML modelstore:
model = _plus_one_model_tf2()
tag = bentoml.tensorflow.import_from_tfhub(model)
""" # noqa
if hub is None:
raise MissingDependencyException(
"""\
`tensorflow_hub` does not exists.
Make sure to `pip install --upgrade tensorflow_hub` before using.
"""
)
context: t.Dict[str, t.Any] = {
"framework_name": "tensorflow",
"pip_dependencies": [
f"tensorflow=={get_tf_version()}",
f"tensorflow_hub=={importlib_metadata.version('tensorflow_hub')}",
],
"import_from_tfhub": True,
}
if name is None:
if isinstance(identifier, str):
name = _clean_name(identifier)
else:
name = f"{identifier.__class__.__name__}_{uuid.uuid4().hex[:5].upper()}"
with bentoml.models.create(
name,
module=MODULE_NAME,
options=None,
context=context,
metadata=metadata,
labels=labels,
custom_objects=custom_objects,
) as _model:
if isinstance(identifier, str):
current_cache_dir = os.environ.get("TFHUB_CACHE_DIR")
os.environ["TFHUB_CACHE_DIR"] = _model.path
fpath: str = resolve(identifier)
folder = fpath.split("/")[-1]
_model.info.options = {"model": identifier, "local_path": folder}
if current_cache_dir is not None:
os.environ["TFHUB_CACHE_DIR"] = current_cache_dir
else:
if hasattr(identifier, "export"):
# hub.Module.export()
with tf.compat.v1.Session(graph=tf.compat.v1.get_default_graph()) as sess: # type: ignore
sess.run(tf.compat.v1.global_variables_initializer()) # type: ignore
identifier.export(_model.path, sess) # type: ignore
else:
tf.saved_model.save(identifier, _model.path)
_model.info.options = {
"model": identifier.__class__.__name__,
"local_path": ".",
}
return _model.tag
@inject
def save(
name: str,
model: t.Union["PathType", "tf_ext.KerasModel", "tf_ext.Module"],
*,
signatures: t.Optional["tf_ext.ConcreteFunction"] = None,
options: t.Optional["tf_ext.SaveOptions"] = None,
labels: t.Optional[t.Dict[str, str]] = None,
custom_objects: t.Optional[t.Dict[str, t.Any]] = None,
metadata: t.Optional[t.Dict[str, t.Any]] = None,
model_store: "ModelStore" = Provide[BentoMLContainer.model_store],
) -> Tag:
"""
Save a model instance to BentoML modelstore.
Args:
name (:code:`str`):
Name for given model instance. This should pass Python identifier check.
model (:code:`Union[keras.Model, tf.Module, path-like objects]`):
Instance of model to be saved
labels (:code:`Dict[str, str]`, `optional`, default to :code:`None`):
user-defined labels for managing models, e.g. team=nlp, stage=dev
custom_objects (:code:`Dict[str, Any]]`, `optional`, default to :code:`None`):
user-defined additional python objects to be saved alongside the model,
e.g. a tokenizer instance, preprocessor function, model configuration json
metadata (:code:`Dict[str, Any]`, `optional`, default to :code:`None`):
Custom metadata for given model.
model_store (:mod:`~bentoml._internal.models.store.ModelStore`, default to :mod:`BentoMLContainer.model_store`):
BentoML modelstore, provided by DI Container.
signatures (:code:`Union[Callable[..., Any], dict]`, `optional`, default to :code:`None`):
Refers to `Signatures explanation <https://www.tensorflow.org/api_docs/python/tf/saved_model/save>`_
from Tensorflow documentation for more information.
options (`tf.saved_model.SaveOptions`, `optional`, default to :code:`None`):
:obj:`tf.saved_model.SaveOptions` object that specifies options for saving.
Raises:
ValueError: If :obj:`obj` is not trackable.
Returns:
:obj:`~bentoml.Tag`: A :obj:`tag` with a format `name:version` where `name` is the user-defined model's name, and a generated `version` by BentoML.
Examples:
.. code-block:: python
import tensorflow as tf
import numpy as np
import bentoml
class NativeModel(tf.Module):
def __init__(self):
super().__init__()
self.weights = np.asfarray([[1.0], [1.0], [1.0], [1.0], [1.0]])
self.dense = lambda inputs: tf.matmul(inputs, self.weights)
@tf.function(
input_signature=[tf.TensorSpec(shape=[1, 5], dtype=tf.float64, name="inputs")]
)
def __call__(self, inputs):
return self.dense(inputs)
# then save the given model to BentoML modelstore:
model = NativeModel()
tag = bentoml.tensorflow.save("native_toy", model)
.. note::
:code:`bentoml.tensorflow.save` API also support saving `RaggedTensor <https://www.tensorflow.org/guide/ragged_tensor>`_ model and Keras model. If you choose to save a Keras model
with :code:`bentoml.tensorflow.save`, then the model will be saved under a :obj:`SavedModel` format instead of :obj:`.h5`.
""" # noqa
context: t.Dict[str, t.Any] = {
"framework_name": "tensorflow",
"pip_dependencies": [f"tensorflow=={get_tf_version()}"],
"import_from_tfhub": False,
}
with bentoml.models.create(
name,
module=MODULE_NAME,
options=None,
context=context,
labels=labels,
custom_objects=custom_objects,
metadata=metadata,
) as _model:
if isinstance(model, (str, bytes, os.PathLike, pathlib.Path)): # type: ignore[reportUnknownMemberType]
assert os.path.isdir(model)
copy_tree(str(model), _model.path)
else:
if options:
logger.warning(
f"Parameter 'options: {str(options)}' is ignored when "
f"using tensorflow {get_tf_version()}"
)
tf.saved_model.save(
model, _model.path, signatures=signatures, options=options
)
return _model.tag
class _TensorflowRunner(BaseModelRunner):
def __init__(
self,
tag: t.Union[str, Tag],
predict_fn_name: str,
device_id: str,
partial_kwargs: t.Optional[t.Dict[str, t.Any]],
name: t.Optional[str] = None,
):
super().__init__(tag, name=name)
self._device_id = device_id
self._configure(device_id)
self._predict_fn_name = predict_fn_name
self._partial_kwargs: t.Dict[str, t.Any] = (
partial_kwargs if partial_kwargs is not None else dict()
)
def _configure(self, device_id: str) -> None:
if "GPU" in device_id:
tf.config.set_visible_devices(device_id, "GPU")
self._config_proto = dict(
allow_soft_placement=True,
log_device_placement=False,
intra_op_parallelism_threads=self._num_threads,
inter_op_parallelism_threads=self._num_threads,
)
@property
def _num_threads(self) -> int:
if is_gpu_available() and self.resource_quota.on_gpu:
return 1
return int(round(self.resource_quota.cpu))
@property
def num_replica(self) -> int:
if is_gpu_available() and self.resource_quota.on_gpu:
return len(self.resource_quota.gpus)
return 1
def _setup(self) -> None:
self._model = load(self._tag, model_store=self.model_store)
raw_predict_fn = getattr(self._model, self._predict_fn_name) # type: ignore
self._predict_fn = functools.partial(raw_predict_fn, **self._partial_kwargs)
def _run_batch(self, *args: "TFArgType", **kwargs: "TFArgType") -> "ext.NpNDArray":
params = Params["TFArgType"](*args, **kwargs)
with tf.device(self._device_id): # type: ignore
def _mapping(item: "TFArgType") -> "tf_ext.TensorLike":
if not LazyType["tf_ext.TensorLike"]("tf.Tensor").isinstance(item):
return t.cast("tf_ext.TensorLike", tf.convert_to_tensor(item))
else:
return item
params = params.map(_mapping)
tf.compat.v1.global_variables_initializer() # type: ignore
res = self._predict_fn(*params.args, **params.kwargs)
return t.cast("ext.NpNDArray", res.numpy())
def load_runner(
tag: t.Union[str, Tag],
*,
predict_fn_name: str = "__call__",
device_id: str = "CPU:0",
name: t.Optional[str] = None,
partial_kwargs: t.Optional[t.Dict[str, t.Any]] = None,
) -> "_TensorflowRunner":
"""
Runner represents a unit of serving logic that can be scaled horizontally to
maximize throughput. `bentoml.tensorflow.load_runner` implements a Runner class that
wrap around a Tensorflow model, which optimize it for the BentoML runtime.
Args:
tag (:code:`Union[str, Tag]`):
Tag of a saved model in BentoML local modelstore.
predict_fn_name (:code:`str`, default to :code:`__call__`):
Inference function to be used.
partial_kwargs (:code:`Dict[str, Any]`, `optional`, default to :code:`None`):
Dictionary of partial kwargs that can be shared across different model.
device_id (:code:`str`, `optional`, default to the first CPU):
Optional devices to put the given model on. Refers to `Logical Devices <https://www.tensorflow.org/api_docs/python/tf/config/list_logical_devices>`_ from TF documentation.
Returns:
:obj:`~bentoml._internal.runner.Runner`: Runner instances for :mod:`bentoml.tensorflow` model
Examples:
.. code-block:: python
import bentoml
# load a runner from a given flag
runner = bentoml.tensorflow.load_runner(tag)
# load a runner on GPU:0
runner = bentoml.tensorflow.load_runner(tag, resource_quota=dict(gpus=0), device_id="GPU:0")
"""
return _TensorflowRunner(
tag=tag,
predict_fn_name=predict_fn_name,
device_id=device_id,
partial_kwargs=partial_kwargs,
name=name,
)
| [] | [] | [
"TFHUB_CACHE_DIR"
] | [] | ["TFHUB_CACHE_DIR"] | python | 1 | 0 | |
seal/wsgi.py | """
WSGI config for seal project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'seal.settings')
application = get_wsgi_application()
| [] | [] | [] | [] | [] | python | 0 | 0 | |
pkg/k8s/client.go | package k8s
import (
"fmt"
"os"
accessclient "github.com/servicemeshinterface/smi-sdk-go/pkg/gen/client/access/clientset/versioned"
specsclient "github.com/servicemeshinterface/smi-sdk-go/pkg/gen/client/specs/clientset/versioned"
splitclient "github.com/servicemeshinterface/smi-sdk-go/pkg/gen/client/split/clientset/versioned"
"github.com/sirupsen/logrus"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
)
// Client is an interface for the various resource controllers.
type Client interface {
KubernetesClient() kubernetes.Interface
AccessClient() accessclient.Interface
SpecsClient() specsclient.Interface
SplitClient() splitclient.Interface
}
// Ensure the client wrapper fits the Client interface.
var _ Client = (*ClientWrapper)(nil)
// ClientWrapper holds the clients for the various resource controllers.
type ClientWrapper struct {
kubeClient *kubernetes.Clientset
accessClient *accessclient.Clientset
specsClient *specsclient.Clientset
splitClient *splitclient.Clientset
}
// NewClient creates and returns a ClientWrapper that satisfies the Client interface.
func NewClient(logger logrus.FieldLogger, masterURL, kubeConfig string) (Client, error) {
config, err := buildConfig(logger, masterURL, kubeConfig)
if err != nil {
return nil, err
}
kubeClient, err := buildKubernetesClient(logger, config)
if err != nil {
return nil, err
}
accessClient, err := buildSmiAccessClient(logger, config)
if err != nil {
return nil, err
}
specsClient, err := buildSmiSpecsClient(logger, config)
if err != nil {
return nil, err
}
splitClient, err := buildSmiSplitClient(logger, config)
if err != nil {
return nil, err
}
return &ClientWrapper{
kubeClient: kubeClient,
accessClient: accessClient,
specsClient: specsClient,
splitClient: splitClient,
}, nil
}
// buildConfig takes the master URL and kubeconfig, and returns an external or internal config.
func buildConfig(logger logrus.FieldLogger, masterURL, kubeConfig string) (*rest.Config, error) {
if os.Getenv("KUBERNETES_SERVICE_HOST") != "" && os.Getenv("KUBERNETES_SERVICE_PORT") != "" {
// If these env vars are set, we can build an in-cluster config.
logger.Debug("Creating in-cluster client")
return rest.InClusterConfig()
}
if masterURL != "" || kubeConfig != "" {
logger.Debug("Creating cluster-external client from provided masterURL or kubeconfig")
return clientcmd.BuildConfigFromFlags(masterURL, kubeConfig)
}
return nil, fmt.Errorf("could not create client: missing masterURL or kubeConfig")
}
// KubernetesClient is used to get the kubernetes clientset.
func (w *ClientWrapper) KubernetesClient() kubernetes.Interface {
return w.kubeClient
}
// AccessClient is used to get the SMI Access clientset.
func (w *ClientWrapper) AccessClient() accessclient.Interface {
return w.accessClient
}
// SpecsClient is used to get the SMI Specs clientset.
func (w *ClientWrapper) SpecsClient() specsclient.Interface {
return w.specsClient
}
// SplitClient is used to get the SMI Split clientset.
func (w *ClientWrapper) SplitClient() splitclient.Interface {
return w.splitClient
}
// buildClient returns a useable kubernetes client.
func buildKubernetesClient(logger logrus.FieldLogger, config *rest.Config) (*kubernetes.Clientset, error) {
logger.Debug("Building Kubernetes Client...")
client, err := kubernetes.NewForConfig(config)
if err != nil {
return nil, fmt.Errorf("unable to create kubernetes client: %v", err)
}
return client, nil
}
// buildSmiAccessClient returns a client to manage SMI Access objects.
func buildSmiAccessClient(logger logrus.FieldLogger, config *rest.Config) (*accessclient.Clientset, error) {
logger.Debug("Building SMI Access Client...")
client, err := accessclient.NewForConfig(config)
if err != nil {
return nil, fmt.Errorf("unable to create SMI Access Client: %v", err)
}
return client, nil
}
// buildSmiSpecsClient returns a client to manage SMI Specs objects.
func buildSmiSpecsClient(logger logrus.FieldLogger, config *rest.Config) (*specsclient.Clientset, error) {
logger.Debug("Building SMI Specs Client...")
client, err := specsclient.NewForConfig(config)
if err != nil {
return nil, fmt.Errorf("unable to create SMI Specs Client: %v", err)
}
return client, nil
}
// buildSmiSplitClient returns a client to manage SMI Split objects.
func buildSmiSplitClient(logger logrus.FieldLogger, config *rest.Config) (*splitclient.Clientset, error) {
logger.Debug("Building SMI Split Client...")
client, err := splitclient.NewForConfig(config)
if err != nil {
return nil, fmt.Errorf("unable to create SMI Split Client: %v", err)
}
return client, nil
}
| [
"\"KUBERNETES_SERVICE_HOST\"",
"\"KUBERNETES_SERVICE_PORT\""
] | [] | [
"KUBERNETES_SERVICE_HOST",
"KUBERNETES_SERVICE_PORT"
] | [] | ["KUBERNETES_SERVICE_HOST", "KUBERNETES_SERVICE_PORT"] | go | 2 | 0 | |
gslb/ingestion/gslb.go | /*
* Copyright 2019-2020 VMware, Inc.
* All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ingestion
import (
"context"
"errors"
"flag"
"fmt"
"os"
"sort"
"strconv"
"strings"
"sync"
"time"
"github.com/vmware/alb-sdk/go/models"
"github.com/vmware/global-load-balancing-services-for-kubernetes/gslb/apiserver"
avictrl "github.com/vmware/global-load-balancing-services-for-kubernetes/gslb/cache"
"github.com/vmware/global-load-balancing-services-for-kubernetes/gslb/gslbutils"
"github.com/vmware/global-load-balancing-services-for-kubernetes/gslb/nodes"
gouuid "github.com/google/uuid"
oshiftclient "github.com/openshift/client-go/route/clientset/versioned"
"github.com/vmware/load-balancer-and-ingress-services-for-kubernetes/pkg/utils"
"k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
restclient "k8s.io/client-go/rest"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
gslbalphav1 "github.com/vmware/global-load-balancing-services-for-kubernetes/pkg/apis/amko/v1alpha1"
gslbcs "github.com/vmware/global-load-balancing-services-for-kubernetes/pkg/client/v1alpha1/clientset/versioned"
gslbinformers "github.com/vmware/global-load-balancing-services-for-kubernetes/pkg/client/v1alpha1/informers/externalversions"
gslblisters "github.com/vmware/global-load-balancing-services-for-kubernetes/pkg/client/v1alpha1/listers/amko/v1alpha1"
gdpcs "github.com/vmware/global-load-balancing-services-for-kubernetes/pkg/client/v1alpha2/clientset/versioned"
gdpinformers "github.com/vmware/global-load-balancing-services-for-kubernetes/pkg/client/v1alpha2/informers/externalversions"
corev1 "k8s.io/api/core/v1"
avicache "github.com/vmware/global-load-balancing-services-for-kubernetes/gslb/cache"
avirest "github.com/vmware/global-load-balancing-services-for-kubernetes/gslb/rest"
aviretry "github.com/vmware/global-load-balancing-services-for-kubernetes/gslb/retry"
crd "github.com/vmware/load-balancer-and-ingress-services-for-kubernetes/pkg/client/v1alpha1/clientset/versioned"
akoinformer "github.com/vmware/load-balancer-and-ingress-services-for-kubernetes/pkg/client/v1alpha1/informers/externalversions"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
_ "k8s.io/client-go/plugin/pkg/client/auth/oidc"
)
var pendingClusters map[KubeClusterDetails]struct{}
const (
BootupMsg = "starting up amko"
BootupSyncMsg = "syncing all objects"
BootupSyncEndMsg = "synced all objects"
AcceptedMsg = "success: gslb config accepted"
ControllerNotLeaderMsg = "error: controller not a leader"
InvalidConfigMsg = "error: invalid gslb config"
EditRestartMsg = "gslb config edited, amko needs a restart"
AlreadySetMsg = "error: can't add another gslbconfig"
NoSecretMsg = "error: secret object doesn't exist"
KubeConfigErr = "error: provided kubeconfig has an error"
ControllerAPIErr = "error: issue in connecting to the controller API"
ClusterHealthCheckErr = "error: cluster healthcheck failed, "
)
type KubeClusterDetails struct {
clusterName string
kubeconfig string
kubeapi string
informers *utils.Informers
}
func GetNewKubeClusterDetails(clusterName, kubeConfig, kubeapi string, informers *utils.Informers) KubeClusterDetails {
return KubeClusterDetails{
clusterName: clusterName,
kubeconfig: kubeConfig,
kubeapi: kubeapi,
informers: informers,
}
}
func (kc KubeClusterDetails) GetClusterContextName() string {
return kc.clusterName
}
type K8SInformers struct {
Cs kubernetes.Interface
}
type ClusterCache struct {
clusterName string
}
type InitializeGSLBMemberClustersFn func(string, []gslbalphav1.MemberCluster) ([]*GSLBMemberController, error)
type GSLBConfigAddfn func(obj interface{}, f InitializeGSLBMemberClustersFn) error
var (
masterURL string
kubeConfig string
insideCluster bool
membersKubeConfig string
stopCh <-chan struct{}
cacheOnce sync.Once
informerTimeout int64
)
func GetStopChannel() <-chan struct{} {
return stopCh
}
func SetInformerListTimeout(val int64) {
informerTimeout = val
}
type GSLBConfigController struct {
kubeclientset kubernetes.Interface
gslbclientset gslbcs.Interface
gslbLister gslblisters.GSLBConfigLister
gslbSynced cache.InformerSynced
workqueue workqueue.RateLimitingInterface
recorder record.EventRecorder
}
func (gslbController *GSLBConfigController) Cleanup() {
gslbutils.Logf("object: GSLBConfigController, msg: %s", "cleaning up the entire GSLB configuration")
// unset GSLBConfig and be prepared to take in the next GSLB config object
gslbutils.SetGSLBConfig(false)
}
func (gslbController *GSLBConfigController) Run(stopCh <-chan struct{}) error {
defer runtime.HandleCrash()
gslbutils.Logf("object: GSLBConfigController, msg: %s", "starting the workers")
<-stopCh
gslbutils.Logf("object: GSLBConfigController, msg: %s", "shutting down the workers")
return nil
}
func initFlags() {
gslbutils.Logf("object: main, msg: %s", "initializing the flags")
defKubeConfig := os.Getenv("HOME") + "/.kube/config"
flag.StringVar(&kubeConfig, "kubeconfigpath", defKubeConfig, "Path to kubeconfig. Only required if out-of-cluster.")
flag.StringVar(&masterURL, "master", "", "The address of the kubernetes API server. Overrides any value in kubeconfig. Overrides any value in kubeconfig, only required if out-of-cluster.")
gslbutils.Logf("master: %s, kubeconfig: %s, msg: %s", masterURL, kubeConfig, "fetched from cmd")
}
func getGSLBConfigChecksum(gc *gslbalphav1.GSLBConfig) uint32 {
var cksum uint32
gcSpec := gc.Spec.DeepCopy()
if gcSpec == nil {
gslbutils.Errf("gslb config %s in namespace %s has no spec, can't calculate checksum", gc.GetObjectMeta().GetName(),
gc.GetObjectMeta().GetNamespace())
return cksum
}
cksum += utils.Hash(gcSpec.GSLBLeader.ControllerIP) + utils.Hash(gcSpec.GSLBLeader.ControllerVersion) +
utils.Hash(gcSpec.GSLBLeader.Credentials)
memberClusters := []string{}
for _, c := range gcSpec.MemberClusters {
memberClusters = append(memberClusters, c.ClusterContext)
}
sort.Strings(memberClusters)
cksum += utils.Hash(utils.Stringify(memberClusters)) + utils.Hash(strconv.Itoa(gcSpec.RefreshInterval))
return cksum
}
// GetNewController builds the GSLB Controller which has an informer for GSLB Config object
func GetNewController(kubeclientset kubernetes.Interface, gslbclientset gslbcs.Interface,
gslbInformerFactory gslbinformers.SharedInformerFactory,
AddGSLBConfigFunc GSLBConfigAddfn,
initializeMemberClusters InitializeGSLBMemberClustersFn) *GSLBConfigController {
gslbInformer := gslbInformerFactory.Amko().V1alpha1().GSLBConfigs()
gslbController := &GSLBConfigController{
kubeclientset: kubeclientset,
gslbclientset: gslbclientset,
gslbLister: gslbInformer.Lister(),
gslbSynced: gslbInformer.Informer().HasSynced,
workqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "gslb-configs"),
}
gslbutils.Logf("object: GSLBConfigController, msg: %s", "setting up event handlers")
// Event handler for when GSLB Config change
gslbInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
err := AddGSLBConfigFunc(obj, initializeMemberClusters)
if err != nil {
gslbutils.Errf("error in adding GSLBConfig object: %v", err)
}
},
// Update not allowed for the GSLB Cluster Config object
DeleteFunc: func(obj interface{}) {
gcObj := obj.(*gslbalphav1.GSLBConfig)
// Cleanup everything
gcName, gcNS := gslbutils.GetGSLBConfigNameAndNS()
if gcName != gcObj.GetObjectMeta().GetName() || gcNS != gcObj.GetObjectMeta().GetNamespace() {
// not the GSLBConfig object which was accepted
return
}
gslbController.Cleanup()
},
UpdateFunc: func(oldObj interface{}, newObj interface{}) {
oldGc := oldObj.(*gslbalphav1.GSLBConfig)
newGc := newObj.(*gslbalphav1.GSLBConfig)
if oldGc.ResourceVersion == newGc.ResourceVersion {
return
}
existingGCName, existingGCNamespace := gslbutils.GetGSLBConfigNameAndNS()
if existingGCName != oldGc.GetObjectMeta().GetName() || existingGCNamespace != oldGc.GetObjectMeta().GetNamespace() {
gslbutils.Warnf("a GSLBConfig %s already exists in namespace %s, ignoring the updates to this object", existingGCName,
existingGCNamespace)
return
}
if oldGc.Spec.LogLevel != newGc.Spec.LogLevel {
gslbutils.Logf("log level changed")
if gslbutils.IsLogLevelValid(newGc.Spec.LogLevel) {
utils.AviLog.SetLevel(newGc.Spec.LogLevel)
gslbutils.Logf("setting the new log level as %s", newGc.Spec.LogLevel)
} else {
gslbutils.Errf("log level %s unrecognized", newGc.Spec.LogLevel)
}
}
if oldGc.Spec.GSLBLeader.ControllerIP != newGc.Spec.GSLBLeader.ControllerIP {
gslbutils.Warnf("GSLB Leader IP has changed, will restart")
gslbutils.AMKOControlConfig().PodEventf(corev1.EventTypeWarning, gslbutils.AMKOShutdown, "GSLB Leader IP changed")
apiserver.GetAmkoAPIServer().ShutDown()
return
}
if getGSLBConfigChecksum(oldGc) == getGSLBConfigChecksum(newGc) {
return
}
gslbutils.Warnf("an update has been made to the GSLBConfig object, AMKO needs a reboot to register the changes")
gslbutils.AMKOControlConfig().PodEventf(corev1.EventTypeWarning, gslbutils.AMKOShutdown, EditRestartMsg)
gslbutils.UpdateGSLBConfigStatus(EditRestartMsg)
gslbutils.SetGSLBConfig(true)
},
})
return gslbController
}
// CheckAcceptedGSLBConfigAndInitalize checks whether there's already an accepted GSLBConfig object that
// exists. If yes, we take that and set as our GSLB configuration.
func CheckAcceptedGSLBConfigAndInitalize(gcList *gslbalphav1.GSLBConfigList) (bool, error) {
gcObjs := gcList.Items
var acceptedGC *gslbalphav1.GSLBConfig
for _, gcObj := range gcObjs {
if gcObj.Status.State == AcceptedMsg {
if acceptedGC == nil {
acceptedGC = &gcObj
} else {
// there are more than two accepted GSLBConfig objects, which pertains to an undefined state
gslbutils.Errf("ns: %s, msg: more than one GSLBConfig objects which were accepted, undefined state, can't do a full sync",
gslbutils.AVISystem)
return false, errors.New("more than one GSLBConfig objects in accepted state")
}
}
}
if acceptedGC != nil {
err := AddGSLBConfigObject(acceptedGC, InitializeGSLBMemberClusters)
if err != nil {
return false, err
}
return true, nil
}
return false, nil
}
// CheckGSLBConfigsAndInitialize iterates through all the GSLBConfig objects in the system and does:
// 1. add a GSLBConfig object if only one GSLBConfig object exists with accepted state.
// 2. add a GSLBConfig object if only one GSLBConfig object (in non-accepted state).
// 3. returns false if there was an error on either of the above two conditions, else retruns true.
func CheckGSLBConfigsAndInitialize() bool {
gcList, err := gslbutils.AMKOControlConfig().GSLBClientset().AmkoV1alpha1().GSLBConfigs(gslbutils.AVISystem).List(context.TODO(), metav1.ListOptions{TimeoutSeconds: &informerTimeout})
if err != nil {
gslbutils.Errf("ns: %s, error in listing the GSLBConfig objects, %s, %s", gslbutils.AVISystem,
err.Error(), "can't do a full sync")
return false
}
if len(gcList.Items) == 0 {
gslbutils.Logf("ns: %s, no GSLBConfig objects found during bootup, will skip fullsync", gslbutils.AVISystem)
return false
}
added, err := CheckAcceptedGSLBConfigAndInitalize(gcList)
if err != nil {
gslbutils.AMKOControlConfig().PodEventf(corev1.EventTypeWarning, gslbutils.GSLBConfigError, err.Error())
return false
}
if added {
return true
}
if len(gcList.Items) > 1 {
// more than one GC objects exist and none of them were already accepted, we panic
gslbutils.LogAndPanic("more than one GSLBConfig objects in " + gslbutils.AVISystem + " exist, please add only one")
}
gslbutils.Logf("ns: %s, msg: found a GSLBConfig object", gslbutils.AVISystem)
if err := AddGSLBConfigObject(&gcList.Items[0], InitializeGSLBMemberClusters); err != nil {
gslbutils.Warnf(err.Error())
gslbutils.AMKOControlConfig().PodEventf(corev1.EventTypeWarning, gslbutils.GSLBConfigError, err.Error())
return false
}
return true
}
// IsGSLBConfigValid returns true if the the GSLB Config object was created
// in "avi-system" namespace.
// TODO: Validate the controllers inside the config object
func IsGSLBConfigValid(obj interface{}) (*gslbalphav1.GSLBConfig, error) {
config := obj.(*gslbalphav1.GSLBConfig)
if config.ObjectMeta.Namespace == gslbutils.AVISystem {
return config, nil
}
if gslbutils.IsLogLevelValid(config.Spec.LogLevel) {
return config, nil
}
return nil, errors.New("invalid gslb config, namespace can only be avi-system")
}
func PublishChangeToRestLayer(gsKey interface{}, sharedQ *utils.WorkerQueue) {
aviCacheKey, ok := gsKey.(avicache.TenantName)
if !ok {
gslbutils.Errf("CacheKey: %v, msg: cache key malformed, not publishing to rest layer", gsKey)
return
}
nodes.PublishKeyToRestLayer(aviCacheKey.Tenant, aviCacheKey.Name, aviCacheKey.Name+"/"+aviCacheKey.Tenant, sharedQ)
}
func CheckAndSetGslbLeader() error {
var leader bool
leader, err := avicache.IsAviSiteLeader()
if err != nil {
gslbutils.SetResyncRequired(true)
return err
}
if leader {
gslbutils.SetControllerAsLeader()
return nil
}
gslbutils.SetControllerAsFollower()
return errors.New("AVI site is not the GSLB leader")
}
func ResyncNodesToRestLayer() {
prevStateCtrl := gslbutils.IsControllerLeader()
err := CheckAndSetGslbLeader()
if err != nil {
// controller details can't be fetched due to some error, so return
gslbutils.Errf("error fetching Gslb leader details, %s", err.Error())
gslbutils.SetResyncRequired(true)
return
}
newStateCtrl := gslbutils.IsControllerLeader()
if newStateCtrl == false {
// controller is a follower, set resync and return
gslbutils.Errf("controller is a follower, can't re-sync")
// will try to re-sync next time
gslbutils.SetResyncRequired(true)
return
}
// controller is the leader
if prevStateCtrl != newStateCtrl {
gslbutils.Logf("Gslb controller state has changed from follower to leader")
gslbutils.SetResyncRequired(true)
}
if !gslbutils.IsResyncRequired() {
gslbutils.Logf("resync not required")
return
}
// re-sync is required anyway
gslbutils.Logf("Gslb leader controller re-sync required, will perform re-sync now")
nodes.PublishAllGraphKeys()
// once syncing is done, no further resync required
gslbutils.SetResyncRequired(false)
}
// CacheRefreshRoutine fetches the objects in the AVI controller and finds out
// the delta between the existing and the new objects.
func CacheRefreshRoutine() {
gslbutils.Logf("starting AVI cache refresh...\ncreating a new AVI cache")
// Check if the controller is leader or not, return if not.
err := CheckAndSetGslbLeader()
if err != nil {
gslbutils.Errf("error in verifying site as GSLB leader: %s", err.Error())
return
}
newAviCache := avicache.PopulateGSCache(false)
existingAviCache := avicache.GetAviCache()
sharedQ := utils.SharedWorkQueue().GetQueueByName(utils.GraphLayer)
// The refresh cycle builds a new set of AVI objects in `newAviCache` and compares them with
// the existing avi cache. If a discrepancy is found, we just write the key to layer 3.
for key, obj := range existingAviCache.Cache {
existingGSObj, ok := obj.(*avicache.AviGSCache)
if !ok {
gslbutils.Errf("CacheKey: %v, CacheObj: %v, msg: existing GSLB Object in avi cache malformed", key, existingGSObj)
continue
}
newGS, found := newAviCache.AviCacheGet(key)
if !found {
existingAviCache.AviCacheAdd(key, nil)
PublishChangeToRestLayer(key, sharedQ)
continue
}
newGSObj, ok := newGS.(*avicache.AviGSCache)
if !ok {
gslbutils.Warnf("CacheKey: %v, CacheObj: %v, msg: new GSLB object in avi cache malformed, will update", key,
newGSObj)
continue
}
if existingGSObj.CloudConfigCksum != newGSObj.CloudConfigCksum {
gslbutils.Logf("CacheKey: %v, CacheObj: %v, msg: GSLB Service has changed in AVI, will update", key, obj)
// First update the newly fetched avi cache in the existing avi cache key
existingAviCache.AviCacheAdd(key, newGSObj)
PublishChangeToRestLayer(key, sharedQ)
}
}
gslbutils.Logf("AVI Cache refresh done")
}
// GenerateKubeConfig reads the kubeconfig given through the environment variable
// decodes it and then writes to a temporary file.
func GenerateKubeConfig() error {
membersKubeConfig = os.Getenv("GSLB_CONFIG")
if membersKubeConfig == "" {
utils.AviLog.Fatal("GSLB_CONFIG environment variable not set, exiting...")
return errors.New("GSLB_CONFIG environment variable not set, exiting")
}
f, err := os.Create(gslbutils.GSLBKubePath)
if err != nil {
return errors.New("Error in creating file: " + err.Error())
}
_, err = f.WriteString(membersKubeConfig)
if err != nil {
return errors.New("Error in writing to config file: " + err.Error())
}
return nil
}
func parseControllerDetails(gc *gslbalphav1.GSLBConfig) error {
// Read the gslb leader's credentials
leaderIP := gc.Spec.GSLBLeader.ControllerIP
leaderVersion := gc.Spec.GSLBLeader.ControllerVersion
leaderSecret := gc.Spec.GSLBLeader.Credentials
if leaderIP == "" {
gslbutils.Errf("controllerIP: %s, msg: Invalid controller IP for the leader", leaderIP)
gslbutils.UpdateGSLBConfigStatus(InvalidConfigMsg + " with controller IP " + leaderIP)
return errors.New("invalid leader IP")
}
if leaderSecret == "" {
gslbutils.Errf("credentials: %s, msg: Invalid controller secret for leader", leaderSecret)
gslbutils.UpdateGSLBConfigStatus(InvalidConfigMsg + " with leaderSecret " + leaderSecret)
return errors.New("invalid leader secret")
}
secretObj, err := gslbutils.AMKOControlConfig().Clientset().CoreV1().Secrets(gslbutils.AVISystem).Get(context.TODO(), leaderSecret, metav1.GetOptions{})
if err != nil || secretObj == nil {
gslbutils.Errf("Error in fetching leader controller secret %s in namespace %s, can't initialize controller",
leaderSecret, gslbutils.AVISystem)
gslbutils.UpdateGSLBConfigStatus(NoSecretMsg + " " + leaderSecret)
return errors.New("error in fetching leader secret")
}
ctrlUsername := secretObj.Data["username"]
ctrlPassword := secretObj.Data["password"]
gslbutils.NewAviControllerConfig(string(ctrlUsername), string(ctrlPassword), leaderIP, leaderVersion)
return nil
}
func GetUUIDFromGSLBConfig(gcObj *gslbalphav1.GSLBConfig) error {
annotation := gcObj.GetAnnotations()
// if a valid UUID is present in the GSLBConfig object, we set it for the current AMKO instance
if v, ok := annotation[gslbutils.AmkoUuid]; ok {
parsedUUID, err := gouuid.Parse(v)
if err != nil {
return fmt.Errorf("error in parsing annotation for UUID %s: %v", v, err)
}
gslbutils.AMKOControlConfig().SetCreatedByField("amko-" + parsedUUID.String())
return nil
}
gslbutils.Warnf("no annotation present in GSLBConfig object for %s, will generate a new one", gslbutils.AmkoUuid)
uuidVal, err := gouuid.NewUUID()
if err != nil {
return fmt.Errorf("error in generating new UUID for this AMKO instance: %v", err)
}
uuidStr := uuidVal.String()
gslbutils.AMKOControlConfig().SetCreatedByField("amko-" + uuidStr)
if err := gslbutils.UpdateAmkoUuidGSLBConfig(gcObj, uuidStr); err != nil {
return fmt.Errorf("error in updating GSLBConfig object: %v", err)
}
return nil
}
// AddGSLBConfigObject parses the gslb config object and starts informers
// for the member clusters.
func AddGSLBConfigObject(obj interface{}, initializeGSLBMemberClusters InitializeGSLBMemberClustersFn) error {
gslbObj := obj.(*gslbalphav1.GSLBConfig)
existingName, existingNS := gslbutils.GetGSLBConfigNameAndNS()
if existingName == "" && existingNS == "" {
gslbutils.SetGSLBConfigObj(gslbObj)
}
if gslbutils.IsGSLBConfigSet() {
// first check, if we have the same GSLB config which is set, if yes, no need to do anything
if existingName == gslbObj.GetObjectMeta().GetName() && existingNS == gslbObj.GetObjectMeta().GetNamespace() {
gslbutils.Logf("GSLB object set during bootup, ignoring this")
return nil
}
// else, populate the status field with an error message
gslbObj.Status.State = AlreadySetMsg
_, updateErr := gslbutils.AMKOControlConfig().GSLBClientset().AmkoV1alpha1().GSLBConfigs(gslbObj.Namespace).Update(context.TODO(), gslbObj, metav1.UpdateOptions{})
if updateErr != nil {
return fmt.Errorf("error in updating the status field of GSLB Config object %s in %s namespace",
gslbObj.GetObjectMeta().GetName(), gslbObj.GetObjectMeta().GetNamespace())
}
return fmt.Errorf("GSLB configuration is set already, can't change it. Delete and re-create the GSLB config object.")
}
gc, err := IsGSLBConfigValid(obj)
if err != nil {
gslbutils.UpdateGSLBConfigStatus(InvalidConfigMsg + err.Error())
return err
}
// check the AMKO UUID annotation and set it as "created_by" for this instance
if err := GetUUIDFromGSLBConfig(gslbObj); err != nil {
return fmt.Errorf("error in setting a new UUID for this AMKO instance: %v", err)
}
utils.AviLog.SetLevel(gc.Spec.LogLevel)
gslbutils.SetCustomFqdnMode(gc.Spec.UseCustomGlobalFqdn)
gslbutils.Debugf("ns: %s, gslbConfig: %s, msg: %s", gc.ObjectMeta.Namespace, gc.ObjectMeta.Name,
"got an add event")
// parse and set the controller configuration
err = parseControllerDetails(gc)
if err != nil {
return fmt.Errorf("error while parsing controller details: %s", err.Error())
}
err = avicache.VerifyVersion()
if err != nil {
gslbutils.UpdateGSLBConfigStatus(ControllerAPIErr + ", " + err.Error())
return err
}
// check if the controller details provided are for a leader site
isLeader, err := avicache.IsAviSiteLeader()
if err != nil {
errMsg := fmt.Sprintf("error fetching Gslb leader site details, %s", err.Error())
gslbutils.UpdateGSLBConfigStatus(errMsg)
gslbutils.LogAndPanic(errMsg)
}
if !isLeader {
gslbutils.Errf("Controller details provided are not for a leader, returning")
gslbutils.UpdateGSLBConfigStatus(ControllerNotLeaderMsg)
gslbutils.SetControllerAsFollower()
}
gslbutils.SetControllerAsLeader()
cacheRefreshInterval := gc.Spec.RefreshInterval
if cacheRefreshInterval <= 0 {
gslbutils.Warnf("Invalid refresh interval provided, will set it to default %d seconds", gslbutils.DefaultRefreshInterval)
cacheRefreshInterval = gslbutils.DefaultRefreshInterval
}
gslbutils.Debugf("Cache refresh interval: %d seconds", cacheRefreshInterval)
// Secret created with name: "gslb-config-secret" and environment variable to set is
// GSLB_CONFIG.
err = GenerateKubeConfig()
if err != nil {
gslbutils.UpdateGSLBConfigStatus(KubeConfigErr + " " + err.Error())
gslbutils.LogAndPanic(fmt.Sprintf("Error in generating the kubeconfig file: %s", err.Error()))
}
gslbutils.AMKOControlConfig().PodEventf(corev1.EventTypeNormal, gslbutils.MemberClusterValidation, "AMKO Cluster kubeconfig generated.")
aviCtrlList, err := initializeGSLBMemberClusters(gslbutils.GSLBKubePath, gc.Spec.MemberClusters)
if err != nil {
gslbutils.Errf("couldn't initialize the kubernetes/openshift clusters: %s, returning", err.Error())
gslbutils.UpdateGSLBConfigStatus(ClusterHealthCheckErr + err.Error())
// shutdown the api server to let k8s/openshift restart the pod back up
gslbutils.AMKOControlConfig().PodEventf(corev1.EventTypeWarning, gslbutils.AMKOShutdown, "Couldn't initialize the Clusters: %s", err.Error())
apiserver.GetAmkoAPIServer().ShutDown()
}
gslbutils.AMKOControlConfig().PodEventf(corev1.EventTypeNormal, gslbutils.MemberClusterValidation, "GSLB Member clusters validated.")
gslbutils.UpdateGSLBConfigStatus(BootupSyncMsg)
// TODO: Change the GSLBConfig CRD to take full sync interval as an input and fetch that
// value before going into full sync
// boot up time cache population
gslbutils.Logf("will populate avi cache now...")
avicache.PopulateHMCache(true)
avicache.PopulateSPCache()
newCache := avicache.PopulateGSCache(true)
bootupSync(aviCtrlList, newCache)
gslbutils.AMKOControlConfig().PodEventf(corev1.EventTypeNormal, gslbutils.GSLBConfigValidation, "Initial bootup sync completed.")
gslbutils.UpdateGSLBConfigStatus(BootupSyncEndMsg)
// Initialize a periodic worker running full sync
resyncNodesWorker := gslbutils.NewFullSyncThread(time.Duration(cacheRefreshInterval))
resyncNodesWorker.SyncFunction = ResyncNodesToRestLayer
go resyncNodesWorker.Run()
// Initialize a periodic worker to sync member clusters which failed to connect during initial bootup
// To Do: make this customisable through a field in gslb config
resyncMemberWorker := gslbutils.NewFullSyncThread(time.Duration(gslbutils.DefaultClusterConnectInterval))
resyncMemberWorker.SyncFunction = resyncMemberCluster
go resyncMemberWorker.Run()
gcChan := gslbutils.GetGSLBConfigObjectChan()
*gcChan <- true
// GSLB Configuration successfully done
gslbutils.SetGSLBConfig(true)
gslbutils.AMKOControlConfig().PodEventf(corev1.EventTypeNormal, gslbutils.GSLBConfigValidation, "GSLB Configuration validated and accepted.")
gslbutils.UpdateGSLBConfigStatus(AcceptedMsg)
// Start the informers for the member controllers
for _, aviCtrl := range aviCtrlList {
aviCtrl.Start(stopCh)
gslbutils.AMKOControlConfig().PodEventf(corev1.EventTypeNormal, gslbutils.AMKOClusterReady, "Started listening on object updates in cluster %s", aviCtrl.GetName())
}
// Set the workers for the node/graph layer
// During test mode, the graph layer workers are already initialized
if !gslbutils.InTestMode() {
StartGraphLayerWorkers()
}
return nil
}
var graphOnce sync.Once
func StartGraphLayerWorkers() {
graphOnce.Do(func() {
ingestionSharedQueue := utils.SharedWorkQueue().GetQueueByName(utils.ObjectIngestionLayer)
ingestionSharedQueue.SyncFunc = nodes.SyncFromIngestionLayer
ingestionSharedQueue.Run(stopCh, gslbutils.GetWaitGroupFromMap(gslbutils.WGIngestion))
})
}
// Initialize initializes the first controller which looks for GSLB Config
func Initialize() {
initFlags()
flag.Parse()
if logfilepath := os.Getenv("LOG_FILE_PATH"); logfilepath != "" {
flag.Lookup("log_dir").Value.Set(logfilepath)
} else {
flag.Lookup("logtostderr").Value.Set("true")
}
stopCh = utils.SetupSignalHandler()
// Check if we are running inside kubernetes
cfg, err := rest.InClusterConfig()
if err != nil {
gslbutils.Warnf("object: main, msg: %s, %s", "not running inside kubernetes cluster", err)
} else {
gslbutils.Logf("object: main, msg: %s", "running inside kubernetes cluster, won't use config files")
insideCluster = true
}
if insideCluster == false {
cfg, err = clientcmd.BuildConfigFromFlags(masterURL, kubeConfig)
gslbutils.Logf("masterURL: %s, kubeconfigPath: %s, msg: %s", masterURL, kubeConfig,
"built from flags")
if err != nil {
gslbutils.LogAndPanic("object: main, msg: " + err.Error() + ", error building kubeconfig")
}
}
kubeClient, err := kubernetes.NewForConfig(cfg)
if err != nil {
gslbutils.LogAndPanic("error building kubernetes clientset: " + err.Error())
}
amkoControlConfig := gslbutils.AMKOControlConfig()
amkoControlConfig.SetClientset(kubeClient)
if insideCluster {
// No need to save the Pod metadata, if running AMKO locally.
pod, err := kubeClient.CoreV1().Pods(gslbutils.AVISystem).Get(context.TODO(), os.Getenv("POD_NAME"), metav1.GetOptions{})
if err != nil {
gslbutils.LogAndPanic("error getting AMKO pod details: " + err.Error())
}
amkoControlConfig.SaveAMKOPodObjectMeta(pod.DeepCopy())
}
amkoControlConfig.SetEventRecorder(gslbutils.AMKOEventComponent, kubeClient)
// handleBootup checks AMKOCluster object, validates and then starts a reconciler to process updates.
isLeader, err := HandleBootup(cfg)
if err != nil {
gslbutils.LogAndPanic("error during boot up: " + err.Error())
}
CreateController()
// If the current cluster is not the leader then don't progress and wait forever
if !isLeader {
<-stopCh
return
}
gslbutils.SetWaitGroupMap()
gslbClient, err := gslbcs.NewForConfig(cfg)
if err != nil {
gslbutils.LogAndPanic("error building gslb config clientset: " + err.Error())
}
amkoControlConfig.SetGSLBClientset(gslbClient)
gdpClient, err := gdpcs.NewForConfig(cfg)
if err != nil {
gslbutils.LogAndPanic("error building gdp clientset: " + err.Error())
}
amkoControlConfig.SetGDPClientset(gdpClient)
// required to publish the GDP status, the reason we need this is because, during unit tests, we don't
// traverse this path and hence we don't initialize GlobalGslbClient, and hence, we can't update the
// status of the GDP object. Always check this flag before updating the status.
amkoControlConfig.SetPublishGSLBStatus(true)
amkoControlConfig.SetPublishGDPStatus(true)
SetInformerListTimeout(120)
numIngestionWorkers := utils.NumWorkersIngestion
ingestionQueueParams := utils.WorkerQueue{NumWorkers: numIngestionWorkers, WorkqueueName: utils.ObjectIngestionLayer}
graphQueueParams := utils.WorkerQueue{NumWorkers: gslbutils.NumRestWorkers, WorkqueueName: utils.GraphLayer}
slowRetryQParams := utils.WorkerQueue{NumWorkers: 1, WorkqueueName: gslbutils.SlowRetryQueue, SlowSyncTime: gslbutils.SlowSyncTime}
fastRetryQParams := utils.WorkerQueue{NumWorkers: 1, WorkqueueName: gslbutils.FastRetryQueue}
utils.SharedWorkQueue(&ingestionQueueParams, &graphQueueParams, &slowRetryQParams, &fastRetryQParams)
// Set workers for layer 3 (REST layer)
graphSharedQueue := utils.SharedWorkQueue().GetQueueByName(utils.GraphLayer)
graphSharedQueue.SyncFunc = avirest.SyncFromNodesLayer
graphSharedQueue.Run(stopCh, gslbutils.GetWaitGroupFromMap(gslbutils.WGGraph))
// Set up retry Queue
slowRetryQueue := utils.SharedWorkQueue().GetQueueByName(gslbutils.SlowRetryQueue)
slowRetryQueue.SyncFunc = aviretry.SyncFromRetryLayer
slowRetryQueue.Run(stopCh, gslbutils.GetWaitGroupFromMap(gslbutils.WGSlowRetry))
fastRetryQueue := utils.SharedWorkQueue().GetQueueByName(gslbutils.FastRetryQueue)
fastRetryQueue.SyncFunc = aviretry.SyncFromRetryLayer
fastRetryQueue.Run(stopCh, gslbutils.GetWaitGroupFromMap(gslbutils.WGFastRetry))
gslbInformerFactory := gslbinformers.NewSharedInformerFactory(gslbClient, time.Second*30)
gslbController := GetNewController(kubeClient, gslbClient, gslbInformerFactory,
AddGSLBConfigObject, InitializeGSLBMemberClusters)
// check whether we already have a GSLBConfig object created which was previously accepted
// this is to make sure that after a reboot, we don't pick a different GSLBConfig object which
// wasn't accepted.
if alreadyConfigured := CheckGSLBConfigsAndInitialize(); alreadyConfigured {
gslbutils.AMKOControlConfig().PodEventf(corev1.EventTypeNormal, gslbutils.GSLBConfigValidation, "GSLB Config already validated and configured.")
}
// Start the informer for the GDP controller
gslbInformer := gslbInformerFactory.Amko().V1alpha1().GSLBConfigs()
go gslbInformer.Informer().Run(stopCh)
gslbutils.Logf("waiting for a GSLB config object to be added")
// Wait till a GSLB config object is added
gcChan := gslbutils.GetGSLBConfigObjectChan()
<-*gcChan
gdpInformerFactory := gdpinformers.NewSharedInformerFactory(gdpClient, time.Second*30)
gdpCtrl := InitializeGDPController(kubeClient, gdpClient, gdpInformerFactory, AddGDPObj,
UpdateGDPObj, DeleteGDPObj)
// Start the informer for the GDP controller
gdpInformer := gdpInformerFactory.Amko().V1alpha2().GlobalDeploymentPolicies()
go gdpInformer.Informer().Run(stopCh)
gslbhrCtrl := InitializeGSLBHostRuleController(kubeClient, gslbClient, gslbInformerFactory,
AddGSLBHostRuleObj, UpdateGSLBHostRuleObj, DeleteGSLBHostRuleObj)
gslbhrInformer := gslbInformerFactory.Amko().V1alpha1().GSLBHostRules()
go gslbhrInformer.Informer().Run(stopCh)
go RunControllers(gslbController, gdpCtrl, gslbhrCtrl, stopCh)
<-stopCh
gslbutils.WaitForWorkersToExit()
}
func RunControllers(gslbController *GSLBConfigController, gdpController *GDPController, gslbhrCtrl *GSLBHostRuleController, stopCh <-chan struct{}) {
if err := gslbController.Run(stopCh); err != nil {
gslbutils.LogAndPanic("error running GSLB Controller: " + err.Error())
}
if err := gdpController.Run(stopCh); err != nil {
gslbutils.LogAndPanic("error running GDP Controller: " + err.Error())
}
if err := gslbhrCtrl.Run(stopCh); err != nil {
gslbutils.LogAndPanic("error running GSLBHostRule Controller: " + err.Error())
}
}
// BuildContextConfig builds the kubernetes/openshift context config
func BuildContextConfig(kubeconfigPath, context string) (*restclient.Config, error) {
return clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
&clientcmd.ClientConfigLoadingRules{ExplicitPath: kubeconfigPath},
&clientcmd.ConfigOverrides{
CurrentContext: context,
}).ClientConfig()
}
func InformersToRegister(oclient *oshiftclient.Clientset, kclient *kubernetes.Clientset, cname string) ([]string, error) {
allInformers := []string{}
_, err := kclient.CoreV1().Services("").List(context.TODO(), metav1.ListOptions{TimeoutSeconds: &informerTimeout})
if err != nil {
gslbutils.Errf("can't access the services api for cluster %s, error : %v", cname, err)
return allInformers, errors.New("cluster " + cname + " health check failed, can't access the services api")
}
_, err = oclient.RouteV1().Routes("").List(context.TODO(), metav1.ListOptions{TimeoutSeconds: &informerTimeout})
gslbutils.Debugf("cluster: %s, msg: checking if cluster has a route informer %v", cname, err)
if err == nil {
// Openshift cluster with route support, we will just add service informer
allInformers = append(allInformers, utils.RouteInformer)
} else {
// Kubernetes cluster
allInformers = append(allInformers, utils.IngressInformer)
}
if utils.IsMultiClusterIngressEnabled() {
allInformers = append(allInformers, utils.MultiClusterIngressInformer)
}
allInformers = append(allInformers, utils.ServiceInformer)
allInformers = append(allInformers, utils.NSInformer)
return allInformers, nil
}
func InitializeMemberCluster(cfg *restclient.Config, cluster KubeClusterDetails,
clients map[string]*kubernetes.Clientset) (*GSLBMemberController, error) {
informersArg := make(map[string]interface{})
kubeClient, err := kubernetes.NewForConfig(cfg)
if err != nil {
return nil, fmt.Errorf("error in creating kubernetes clientset: %v", err)
}
oshiftClient, err := oshiftclient.NewForConfig(cfg)
if err != nil {
return nil, fmt.Errorf("error in creating openshift clientset: %v", err)
}
crdClient, err := crd.NewForConfig(cfg)
if err != nil {
return nil, fmt.Errorf("couldn't initialize ako clientset: %v", err)
}
informersArg[utils.INFORMERS_OPENSHIFT_CLIENT] = oshiftClient
informersArg[utils.INFORMERS_INSTANTIATE_ONCE] = false
informersArg[utils.INFORMERS_AKO_CLIENT] = crdClient
registeredInformers, err := InformersToRegister(oshiftClient, kubeClient, cluster.clusterName)
if err != nil {
return nil, fmt.Errorf("error in initializing informers: %v", err)
}
if len(registeredInformers) == 0 {
return nil, fmt.Errorf("no informers available for this cluster")
}
gslbutils.Logf("Informers for cluster %s: %v", cluster.clusterName, registeredInformers)
informerInstance := utils.NewInformers(utils.KubeClientIntf{
ClientSet: kubeClient},
registeredInformers,
informersArg)
clients[cluster.clusterName] = kubeClient
var aviCtrl GSLBMemberController
akoInformerFactory := akoinformer.NewSharedInformerFactory(crdClient, time.Second*30)
hostRuleInformer := akoInformerFactory.Ako().V1alpha1().HostRules()
aviCtrl = GetGSLBMemberController(cluster.clusterName, informerInstance, &hostRuleInformer)
_, err = crdClient.AkoV1alpha1().HostRules("").List(context.TODO(), metav1.ListOptions{})
if err != nil {
return nil, fmt.Errorf("HostRule API not available for cluster: %v", err)
}
aviCtrl.hrClientSet = crdClient
aviCtrl.SetupEventHandlers(K8SInformers{Cs: clients[cluster.clusterName]})
return &aviCtrl, nil
}
// InitializeGSLBClusters initializes the GSLB member clusters
func InitializeGSLBMemberClusters(membersKubeConfig string, memberClusters []gslbalphav1.MemberCluster) ([]*GSLBMemberController, error) {
clusterDetails := loadClusterAccess(membersKubeConfig, memberClusters)
clients := make(map[string]*kubernetes.Clientset)
aviCtrlList := make([]*GSLBMemberController, 0)
pendingClusters = make(map[KubeClusterDetails]struct{})
for _, cluster := range clusterDetails {
gslbutils.Logf("cluster: %s, msg: %s", cluster.clusterName, "initializing")
gslbutils.AddClusterContext(cluster.clusterName)
cfg, err := BuildContextConfig(cluster.kubeconfig, cluster.clusterName)
if err != nil {
gslbutils.Warnf("cluster: %s, msg: %s, %s", cluster.clusterName, "error in connecting to kubernetes API",
err)
continue
}
gslbutils.Logf("cluster: %s, msg: %s", cluster.clusterName, "successfully connected to kubernetes API")
aviCtrl, err := InitializeMemberCluster(cfg, cluster, clients)
if err != nil {
gslbutils.Warnf("error initializing member cluster %s: %s", cluster.clusterName, err)
pendingClusters[cluster] = struct{}{}
continue
}
if aviCtrl != nil {
aviCtrlList = append(aviCtrlList, aviCtrl)
}
}
return aviCtrlList, nil
}
func loadClusterAccess(membersKubeConfig string, memberClusters []gslbalphav1.MemberCluster) []KubeClusterDetails {
var clusterDetails []KubeClusterDetails
for _, memberCluster := range memberClusters {
clusterDetails = append(clusterDetails, KubeClusterDetails{memberCluster.ClusterContext,
membersKubeConfig, "", nil})
gslbutils.Logf("cluster: %s, msg: %s", memberCluster.ClusterContext, "loaded cluster access")
}
return clusterDetails
}
func isHealthMonitorTemplatePresentInCache(hmName string) bool {
aviHmCache := avictrl.GetAviHmCache()
obj, present := aviHmCache.AviHmCacheGet(avictrl.TenantName{Tenant: utils.ADMIN_NS, Name: hmName})
if !present {
return false
}
hmObj, ok := obj.(*avictrl.AviHmObj)
return ok && hmObj.CustomHmSettings != nil
}
// Checks whether the template is present in the controller. If present, it adds the contents of the template to the cache which will
// used for the creation of health monitors. If the template is not present in the controller, the GDP/GSLBHostRule
// will be rejected.
func validateAndAddHmTemplateToCache(hmTemplate string, gdp bool, fullSync bool) error {
if fullSync && isHealthMonitorTemplatePresentInCache(hmTemplate) {
gslbutils.Debugf("health monitor template %s present in hm cache", hmTemplate)
return nil
}
hm, err := avictrl.GetHMFromName(hmTemplate, gdp)
if err != nil {
gslbutils.Errf("Health Monitor Template %s not found", hmTemplate)
return fmt.Errorf("health monitor template %s not found", hmTemplate)
}
if hm.IsFederated != nil &&
!(*hm.IsFederated) {
gslbutils.Errf("Health Monitor Template %s not federated", hmTemplate)
return fmt.Errorf("health monitor template %s not federated", hmTemplate)
}
// Get the response code, request header based on the protocol type of HM.
var hmHTTP *models.HealthMonitorHTTP
switch *hm.Type {
case gslbutils.SystemGslbHealthMonitorHTTP:
hmHTTP = hm.HTTPMonitor
case gslbutils.SystemGslbHealthMonitorHTTPS:
hmHTTP = hm.HTTPSMonitor
default:
return fmt.Errorf("health monitor template is not supported for non-path based health monitors")
}
// client request header must be in the format <GET/HEAD> /<path> <HTTP/version>
if len(strings.Split(*hmHTTP.HTTPRequest, gslbutils.RequestHeaderStringSeparator)) != gslbutils.NoOfRequestHeaderParams {
gslbutils.Errf("Client request header in Health Monitor Template %s is not correct", hmTemplate)
return fmt.Errorf("client request header in health monitor template %s is invalid", hmTemplate)
}
key := avictrl.TenantName{Tenant: utils.ADMIN_NS, Name: hmTemplate}
hmCacheObj := avictrl.AviHmObj{
Name: hmTemplate,
Tenant: utils.ADMIN_NS,
UUID: *hm.UUID,
CustomHmSettings: &avictrl.CustomHmSettings{
RequestHeader: *hmHTTP.HTTPRequest,
ResponseCode: hmHTTP.HTTPResponseCode,
},
Description: gslbutils.CreatedByUser,
CreatedBy: gslbutils.CreatedByUser,
}
aviHmCache := avictrl.GetAviHmCache()
aviHmCache.AviHmCacheAdd(key, &hmCacheObj)
return nil
}
| [
"\"HOME\"",
"\"GSLB_CONFIG\"",
"\"LOG_FILE_PATH\"",
"\"POD_NAME\""
] | [] | [
"GSLB_CONFIG",
"LOG_FILE_PATH",
"HOME",
"POD_NAME"
] | [] | ["GSLB_CONFIG", "LOG_FILE_PATH", "HOME", "POD_NAME"] | go | 4 | 0 | |
java/meterpreter/stdapi/src/main/java/com/metasploit/meterpreter/stdapi/stdapi_fs_file_expand_path_V1_5.java | package com.metasploit.meterpreter.stdapi;
import java.io.File;
public class stdapi_fs_file_expand_path_V1_5 extends stdapi_fs_file_expand_path {
protected String getShellPath() {
String result;
if (File.pathSeparatorChar == ';')
result = System.getenv("COMSPEC");
else
result = System.getenv("SHELL");
if (result == null || result.length() == 0)
result = super.getShellPath();
return result;
}
}
| [
"\"COMSPEC\"",
"\"SHELL\""
] | [] | [
"SHELL",
"COMSPEC"
] | [] | ["SHELL", "COMSPEC"] | java | 2 | 0 | |
debug.go | //go:build debug
// +build debug
package gojq
import (
"fmt"
"io"
"os"
"strconv"
"strings"
)
var (
debug bool
debugOut io.Writer
)
func init() {
if out := os.Getenv("GOJQ_DEBUG"); out != "" {
debug = true
if out == "stdout" {
debugOut = os.Stdout
} else {
debugOut = os.Stderr
}
}
}
type codeinfo struct {
name string
pc int
}
func (c *compiler) appendCodeInfo(x interface{}) {
if !debug {
return
}
var name string
switch x := x.(type) {
case string:
name = x
default:
name = fmt.Sprint(x)
}
var diff int
if c.codes[len(c.codes)-1] != nil && c.codes[len(c.codes)-1].op == opret && strings.HasPrefix(name, "end of ") {
diff = -1
}
c.codeinfos = append(c.codeinfos, codeinfo{name, c.pc() + diff})
}
func (c *compiler) deleteCodeInfo(name string) {
for i := 0; i < len(c.codeinfos); i++ {
if strings.HasSuffix(c.codeinfos[i].name, name) {
copy(c.codeinfos[i:], c.codeinfos[i+1:])
c.codeinfos = c.codeinfos[:len(c.codeinfos)-1]
i--
}
}
}
func (env *env) lookupInfoName(pc int) string {
var name string
for _, ci := range env.codeinfos {
if ci.pc == pc {
if name != "" {
name += ", "
}
name += ci.name
}
}
return name
}
func (env *env) debugCodes() {
if !debug {
return
}
for i, c := range env.codes {
pc := i
switch c.op {
case opcall, opcallrec:
if x, ok := c.v.(int); ok {
pc = x
}
case opjump:
x := c.v.(int)
if x > 0 && env.codes[x-1].op == opscope {
pc = x - 1
}
}
var s string
if name := env.lookupInfoName(pc); name != "" {
switch c.op {
case opcall, opcallrec, opjump:
if !strings.HasPrefix(name, "module ") {
s = "\t## call " + name
break
}
fallthrough
default:
s = "\t## " + name
}
}
fmt.Fprintf(debugOut, "\t%d\t%s%s%s\n", i, formatOp(c.op, false), debugOperand(c), s)
}
fmt.Fprintln(debugOut, "\t"+strings.Repeat("-", 40)+"+")
}
func (env *env) debugState(pc int, backtrack bool) {
if !debug {
return
}
var sb strings.Builder
c := env.codes[pc]
fmt.Fprintf(&sb, "\t%d\t%s%s\t|", pc, formatOp(c.op, backtrack), debugOperand(c))
var xs []int
for i := env.stack.index; i >= 0; i = env.stack.data[i].next {
xs = append(xs, i)
}
for i := len(xs) - 1; i >= 0; i-- {
sb.WriteString("\t")
sb.WriteString(debugValue(env.stack.data[xs[i]].value))
}
switch c.op {
case opcall, opcallrec:
if x, ok := c.v.(int); ok {
pc = x
}
case opjump:
x := c.v.(int)
if x > 0 && env.codes[x-1].op == opscope {
pc = x - 1
}
}
if name := env.lookupInfoName(pc); name != "" {
switch c.op {
case opcall, opcallrec, opjump:
if !strings.HasPrefix(name, "module ") {
sb.WriteString("\t\t\t## call " + name)
break
}
fallthrough
default:
sb.WriteString("\t\t\t## " + name)
}
}
fmt.Fprintln(debugOut, sb.String())
}
func formatOp(c opcode, backtrack bool) string {
if backtrack {
return c.String() + " <backtrack>" + strings.Repeat(" ", 13-len(c.String()))
}
return c.String() + strings.Repeat(" ", 25-len(c.String()))
}
func (env *env) debugForks(pc int, op string) {
if !debug {
return
}
var sb strings.Builder
for i, v := range env.forks {
if i > 0 {
sb.WriteByte('\t')
}
if i == len(env.forks)-1 {
sb.WriteByte('<')
}
fmt.Fprintf(&sb, "%d, %s", v.pc, debugValue(env.stack.data[v.stackindex].value))
if i == len(env.forks)-1 {
sb.WriteByte('>')
}
}
fmt.Fprintf(debugOut, "\t-\t%s%s%d\t|\t%s\n", op, strings.Repeat(" ", 22), pc, sb.String())
}
func debugOperand(c *code) string {
switch c.op {
case opcall, opcallrec:
switch v := c.v.(type) {
case int:
return strconv.Itoa(v)
case [3]interface{}:
return fmt.Sprintf("%s/%d", v[2], v[1])
default:
panic(c)
}
default:
return debugValue(c.v)
}
}
func debugValue(v interface{}) string {
switch v := v.(type) {
case Iter:
return fmt.Sprintf("gojq.Iter(%#v)", v)
case [2]int:
return fmt.Sprintf("[%d,%d]", v[0], v[1])
case [3]int:
return fmt.Sprintf("[%d,%d,%d]", v[0], v[1], v[2])
case [3]interface{}:
return fmt.Sprintf("[%v,%v,%v]", v[0], v[1], v[2])
default:
return previewValue(v)
}
}
| [
"\"GOJQ_DEBUG\""
] | [] | [
"GOJQ_DEBUG"
] | [] | ["GOJQ_DEBUG"] | go | 1 | 0 | |
test/test_fx.py | # Owner(s): ["oncall: fx"]
import builtins
import contextlib
import copy
import functools
import inspect
import math
import numbers
import operator
import os
import pickle
import sys
import torch
import traceback
import typing
import types
import warnings
import unittest
from math import sqrt
from torch.multiprocessing import Process
from torch.testing import FileCheck
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal.common_device_type import ops, onlyCPU, instantiate_device_type_tests
import torch.utils._pytree as pytree
import torch.fx._pytree as fx_pytree
from torch.fx import symbolic_trace, Proxy, Node, GraphModule, Interpreter, Tracer, Transformer, Graph, wrap, PH, CodeGen
from torch.fx.node import Target, Argument
from torch.fx.passes import shape_prop
from torch.fx.immutable_collections import immutable_dict, immutable_list
from torch.fx.experimental.rewriter import RewritingTracer
from torch.fx.operator_schemas import get_signature_for_torch_op
from copy import deepcopy
from collections import namedtuple
from torch.fx.proxy import TraceError
from torch.fx._compatibility import _BACK_COMPAT_OBJECTS, _MARKED_WITH_COMATIBLITY
from fx.test_subgraph_rewriter import TestSubgraphRewriter # noqa: F401
from fx.test_dce_pass import TestDCE # noqa: F401
from fx.test_fx_const_fold import TestConstFold # noqa: F401
from fx.test_fx_param_shape_control_flow import TestConstParamShapeInControlFlow # noqa: F401
if sys.version_info >= (3, 7):
from fx.test_gradual_type import AnnotationsTest # noqa: F401
if sys.version_info >= (3, 7):
from fx.test_gradual_type import TypeCheckerTest # noqa: F401
from typing import Any, Callable, Dict, NamedTuple, List, Optional, Tuple, Union
from torch.testing._internal.common_utils import (
IS_FBCODE,
IS_MACOS,
IS_WINDOWS,
TEST_WITH_ROCM,
find_library_location,
run_tests,
)
from torch.testing._internal.jit_utils import JitTestCase
from fx.named_tup import MyNamedTup
try:
from torchvision import models as torchvision_models
HAS_TORCHVISION = True
except ImportError:
HAS_TORCHVISION = False
skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision")
class SimpleTest(torch.nn.Module):
def forward(self, x):
return torch.relu(x + 3.0)
def a_non_torch_leaf(a, b):
return a + b
# Used for test_autowrap_function. Autowrapped functions need to be global
def fx_int(x: float) -> int:
return int(x)
def fx_int_x2(x: float) -> int:
return int(x) * 2
# used in test_pytree. It's all the way out here because pickling a GraphModule
# that uses Point errors out if Point is local to the function
Point = namedtuple('Point', ['x', 'y'])
# Test wrap() passing both a function name as well as a function
# directly
def a_lifted_leaf(a, b):
return a[0] + a[1] + b
wrap('a_lifted_leaf')
# Test wrapping twice doesn't break anything
wrap('a_lifted_leaf')
def a_lifted_leaf2(a, b):
return a[0] + a[1] + b
wrap(a_lifted_leaf2)
wrap('len')
wrap('getattr')
@wrap
def wrapped_via_decorator(a):
return a + 1
wrap('wrapped_with_submodule')
def wrapped_with_submodule(x: torch.Tensor, batchnorm1d: torch.nn.BatchNorm1d):
return batchnorm1d(x)
real_wrapped_via_decorator = wrapped_via_decorator
real_a_lifed_leaf = a_lifted_leaf
real_a_lifed_leaf2 = a_lifted_leaf2
_sqrt = sqrt
wrap('wrapper_fn')
def wrapper_fn(x):
return torch.foo(x)
class Pair(NamedTuple):
x : torch.Tensor
y : torch.Tensor
# for testing pytrees
class Foo(object): # noqa: B209
def __init__(self, a, b):
self.a = a
self.b = b
class TestFX(JitTestCase):
def setUp(self):
# Checking for mutable operations whil tracing is feature flagged
# Enable it in testing but not by default
self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations
torch.fx.proxy.TracerBase.check_mutable_operations = True
if not (TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS):
lib_file_path = find_library_location('libtorchbind_test.so')
torch.ops.load_library(str(lib_file_path))
def tearDown(self):
torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag
def checkGraphModule(self, m: torch.nn.Module, args, kwargs=None):
"""Check that an nn.Module's results match the GraphModule version
for a given set of args/kwargs.
"""
kwargs = kwargs if kwargs else {}
ref_outs = m(*args, **kwargs)
gm = symbolic_trace(m)
gm.graph.lint()
test_outs = gm(*args, **kwargs)
self.assertEqual(ref_outs, test_outs)
def test_graph_module(self):
class MySub(torch.nn.Module):
def __init__(self):
super().__init__()
self.w = torch.nn.Parameter(torch.rand(4, 3))
def forward(self, x):
return self.w + x
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.lin = torch.nn.Linear(4, 3)
self.sub_mod = MySub()
self.w = torch.nn.Parameter(torch.rand(3))
def forward(self, A, B, c):
t = torch.sigmoid(A) + self.lin(c)
return self.sub_mod(t.data + self.w + t + 1 - A + B // A + -A + A.add(B, alpha=3))
m = MyModule()
gm = symbolic_trace(m)
ms = torch.jit.script(gm)
class M2(torch.nn.Module):
def forward(self, A):
m, idx = torch.max(A, 0)
return m + 1, idx + 1
m2 = M2()
gm2 = symbolic_trace(m2)
class T(torch.nn.Module):
def forward(self, A, b=4, *args, c=5, **kwargs):
x = A + 1 + args[0] + kwargs['3']
return x
t = T()
symbolic_trace(t)
# test for issue described at https://github.com/pytorch/pytorch/issues/63883
class M3(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
m3 = M3()
gm3 = symbolic_trace(m3)
new_instance = gm3.__new__(type(gm3))
new_instance.__init__(gm3, gm3.graph)
x = torch.randn(5, 3)
torch.testing.assert_allclose(new_instance(x), torch.relu(x))
def test_custom_import(self):
graph = torch.fx.Graph()
a = graph.placeholder('x')
b = graph.placeholder('y')
c = graph.call_function(a_non_torch_leaf, (a, b))
d = graph.call_function(torch.sin, (c,))
graph.output(d)
gm = GraphModule(torch.nn.Module(), graph)
x, y = torch.rand(1), torch.rand(1)
self.assertEqual(torch.sin(x + y), gm(x, y))
def test_args_kwargs(self):
class T(torch.nn.Module):
def forward(self, *args, **kwargs):
x = args[0] + kwargs['foo']
return x
t = T()
self.checkGraphModule(t, (torch.rand(1), torch.rand(1)), {'foo': torch.rand(1)})
def test_args_kwargs_no_self(self):
class T(torch.nn.Module):
def forward(*args, **kwargs): # noqa: B902
self = args[0]
return torch.relu(args[1])
t = T()
with self.assertRaisesRegex(RuntimeError, r'cannot be part of \*args expansion'):
self.checkGraphModule(t, (torch.rand(1), torch.rand(1)), {'foo': torch.rand(1)})
def test_fx_shifts(self):
class MyModule(torch.nn.Module):
def forward(self, x):
return x << 3, x >> 3
input = torch.LongTensor(10).random_(0, 1024)
m = MyModule()
self.checkGraphModule(m, (input,))
def test_fx_and_or(self):
class MyModule(torch.nn.Module):
def forward(self, x):
return x & x, x | x
input = torch.LongTensor(10).random_(0, 1024)
m = MyModule()
self.checkGraphModule(m, (input,))
def test_dict(self):
class MyDictMod(torch.nn.Module):
def forward(self, d):
return d['3'].relu(), {'4' : d['3'].neg()}
input_dict = {'3': torch.rand(3, 4)}
m = MyDictMod()
self.checkGraphModule(m, (input_dict,))
def test_matmul_tracing(self):
const = torch.randn(3)
def matmul_f(x):
return x @ const
mod = symbolic_trace(matmul_f)
inp = torch.randn(3)
self.assertEqual(mod(inp), matmul_f(inp))
def rmatmul_f(x):
return const @ x
mod = symbolic_trace(rmatmul_f)
inp = torch.randn(3)
self.assertEqual(mod(inp), rmatmul_f(inp))
def test_disallow_override(self):
# Custom delegate to disallow in-place tensor operations
class NoMutableCallTracer(Tracer):
def create_node(self, kind : str, target : Union[str, Callable],
args : Tuple[Argument, ...], kwargs : Dict[str, Any], name : Optional[str] = None,
type_expr : Optional[Any] = None) -> Node:
name = target if isinstance(target, str) else torch.typename(target)
if name[-1] == '_':
raise RuntimeError('In-place operations are not supported')
return super().create_node(kind, target, args, kwargs, name)
# Test method
class MyInplaceMod(torch.nn.Module):
def forward(self, x):
x.add_(3.0)
return x
m = MyInplaceMod()
with self.assertRaisesRegex(RuntimeError, 'In-place operations'):
NoMutableCallTracer().trace(m)
# Test free function
class MyInplaceMod2(torch.nn.Module):
def forward(self, x):
torch.log_(x)
return x
m2 = MyInplaceMod2()
with self.assertRaisesRegex(RuntimeError, 'In-place operations'):
NoMutableCallTracer().trace(m2)
# Test symbolic node as an arg
class MyInplaceMod3(torch.nn.Module):
def forward(self, x):
y = torch.ones(3, 4)
y.add_(x)
return x
m3 = MyInplaceMod3()
with self.assertRaisesRegex(RuntimeError, 'In-place operations'):
NoMutableCallTracer().trace(m3)
def test_leaf_module(self):
# Custom delegate to make it so that there are no leaf modules, everything
# should get traced through
class NoLeafModulesTracer(Tracer):
def is_leaf_module(self, m, qualname):
return False
class MyReluMod(torch.nn.Module):
def __init__(self):
super().__init__()
self.relu = torch.nn.ReLU()
def forward(self, x):
return self.relu(x)
mrm = MyReluMod()
sym = NoLeafModulesTracer().trace(mrm)
for node in sym.nodes:
self.assertNotEqual(node.op, 'call_module')
sym.lint()
def test_wrap(self):
self.assertEqual(3 + 4 + 5, a_lifted_leaf((3, 4), 5))
def to_trace(y):
return a_lifted_leaf((4, y), 3) + a_lifted_leaf((3, 4), 5) + a_lifted_leaf((y, y), y)
m = symbolic_trace(to_trace)
self.assertIn('a_lifted_leaf', m.code)
self.assertEqual(27, m(2))
self.assertIs(a_lifted_leaf, real_a_lifed_leaf)
def test_wrap_fn_directly(self):
self.assertEqual(3 + 4 + 5, a_lifted_leaf2((3, 4), 5))
def to_trace(y):
return a_lifted_leaf2((4, y), 3) + a_lifted_leaf2((3, 4), 5) + a_lifted_leaf2((y, y), y)
m = symbolic_trace(to_trace)
self.assertIn('a_lifted_leaf2', m.code)
self.assertEqual(27, m(2))
self.assertIs(a_lifted_leaf2, real_a_lifed_leaf2)
def test_wrapped_via_decorator(self):
self.assertEqual(wrapped_via_decorator(0), 1)
def to_trace(y):
return wrapped_via_decorator(y)
m = symbolic_trace(to_trace)
self.assertIn('wrapped_via_decorator', m.code)
self.assertEqual(m(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
def test_wrapped_via_decorator_and_transformed(self):
self.assertEqual(wrapped_via_decorator(0), 1)
def to_trace(y):
return wrapped_via_decorator(y)
m = symbolic_trace(to_trace)
self.assertIn('wrapped_via_decorator', m.code)
self.assertEqual(m(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
transformed = torch.fx.Transformer(m).transform()
self.assertIn('wrapped_via_decorator', transformed.code)
self.assertEqual(transformed(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
def test_wrap_with_submodule(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.batchnorm1d = torch.nn.BatchNorm1d(2, affine=False)
def forward(self, x: torch.Tensor):
return wrapped_with_submodule(x, self.batchnorm1d)
m = symbolic_trace(M())
self.assertIn("wrapped_with_submodule", m.code)
input = torch.rand(3, 2)
ref_batchnorm1d = torch.nn.BatchNorm1d(2, affine=False)
self.assertEqual(ref_batchnorm1d(input), m(input))
def test_wrapped_retrace(self):
def to_trace(y):
return wrapped_via_decorator(y)
m = symbolic_trace(to_trace)
self.assertIn('wrapped_via_decorator', m.code)
self.assertEqual(m(0), 1)
retraced = symbolic_trace(m)
self.assertIn('wrapped_via_decorator', retraced.code)
self.assertEqual(retraced(0), 1)
def test_graph_edit_with_proxy(self):
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
m = M()
g = symbolic_trace(m).graph
new_g = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
output_val = new_g.graph_copy(g, val_map)
t = Proxy(output_val)
# test that we can use proxy objects to generate more graph code later for things that do not need to work with modules.
new_g.output((t + t).node)
gm = GraphModule(m, new_g)
gm.graph.lint()
self.assertEqual(gm(3, 4), 14)
def test_graph_unique_names(self):
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
m = M()
g = symbolic_trace(m).graph
new_g = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
output_val = new_g.graph_copy(g, val_map)
t = Proxy(output_val)
# test that we can use proxy objects to generate more graph code later for things that do not need to work with modules.
new_g.output((t + t).node)
gm = GraphModule(m, new_g)
seen_names : Set[str] = set()
for node in gm.graph.nodes:
assert node.name not in seen_names
seen_names.add(node.name)
def test_stack_traces(self):
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
tracer = torch.fx.Tracer()
tracer.record_stack_traces = True
graph = tracer.trace(M())
# saving the original list because we will insert new nodes as a part of a test
orig_graph_nodes = list(graph.nodes)
for node in orig_graph_nodes:
if node.op == 'output':
continue
self.assertTrue(node.stack_trace is not None)
assert 'test_fx.py' in node.stack_trace
# verify that copying the node does not lose the stack trace
new_node = graph.node_copy(node)
self.assertTrue(new_node.stack_trace is not None)
assert 'test_fx.py' in new_node.stack_trace
def test_graph_unique_names_manual(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_module', 'linear_mod', args=(a,), name='foo_1_1')
c : torch.fx.Node = graph.create_node('get_attr', 'y_attr', name='foo_1')
d : torch.fx.Node = graph.create_node('call_function', operator.add, args=(b, c))
graph.output(d)
graph2 = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
graph2.graph_copy(graph, val_map)
seen_names : Set[str] = set()
for node in graph2.nodes:
assert node.name not in seen_names
seen_names.add(node.name)
def test_unpack(self):
class M(torch.nn.Module):
def forward(self, a, b):
c, d = a
return c + d + b
a = (torch.rand(1), torch.rand(1))
b = torch.rand(1)
m = M()
self.checkGraphModule(m, (a, b))
def test_native_callable(self):
if TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS:
raise unittest.SkipTest("non-portable load_library call used in test")
# This test exercises the case where we use FX to translate from Python
# code to some native callable object
#
# For the purposes of testing, we use ElementwiseInterpreter defined
# in test_custom_class.cpp.
#
# We test that we can
# 1) Construct a native callable from FX IR
# 2) Construct a drop-in replacement module that delegates to the
# native callable rather than the original code
# 3) Run both the original code and native callable wrapper with
# equivalent results
# 4) TorchScript compile the native callable wrapper and confirm
# equivalent results with the reference
# 5) TorchScript serialize and deserialize the native callable
# and confirm equivalent results with the reference
# We use this simple Module as a reference computation
class MySimpleMod(torch.nn.Module):
def forward(self, x):
return 3.0 * x + x
msm = MySimpleMod()
# This is what a lowering pass might look like: a function that takes
# a valid nn.Module, symbolically traces it, lowers the Module to some
# representation, and wraps that representation up into another
# nn.Module instance that handles dispatch to the compiled/lowered code.
def lower_to_elementwise_interpreter(orig_mod : torch.nn.Module) -> torch.nn.Module:
# ===== Stage 1: Symbolic trace the module =====
mod = symbolic_trace(orig_mod)
# ===== Stage 2: Lower GraphModule representation to the C++
# interpreter's instruction format ======
instructions = []
constant_idx = 0
constants = {}
fn_input_names = []
target_to_name = {
operator.add : "add",
operator.mul : "mul"
}
output_node : Optional[Node] = None
# For each instruction, create a triple
# (instruction_name : str, inputs : List[str], output : str)
# to feed into the C++ interpreter
for n in mod.graph.nodes:
target, args, out_name = n.target, n.args, n.name
assert len(n.kwargs) == 0, "kwargs currently not supported"
if n.op == 'placeholder':
# Placeholders specify function argument names. Save these
# for later when we generate the wrapper GraphModule
fn_input_names.append(target)
elif n.op == 'call_function':
assert target in target_to_name, "Unsupported call target " + target
arg_names = []
for arg in args:
if not isinstance(arg, Node):
# Pull out constants. These constants will later be
# fed to the interpreter C++ object via add_constant()
arg_name = f'constant_{constant_idx}'
constants[arg_name] = torch.tensor(
[arg] if isinstance(arg, numbers.Number) else arg)
arg_names.append(arg_name)
constant_idx += 1
else:
arg_names.append(arg.name)
instructions.append((target_to_name[target], arg_names, out_name))
elif n.op == 'output':
if output_node is not None:
raise RuntimeError('Multiple output nodes!')
output_node = n
else:
raise RuntimeError('Unsupported opcode ' + n.op)
interpreter = torch.classes._TorchScriptTesting._ElementwiseInterpreter()
# Load constants
for k, v in constants.items():
interpreter.add_constant(k, v)
# Specify names for positional input arguments
interpreter.set_input_names(fn_input_names)
# Load instructions
interpreter.set_instructions(instructions)
# Specify name for single output
assert isinstance(output_node.args[0], torch.fx.Node)
interpreter.set_output_name(output_node.args[0].name)
# ===== Stage 3: Create a wrapper GraphModule around the interpreter =====
class WrapperModule(torch.nn.Module):
def __init__(self, interpreter):
super().__init__()
self.interpreter = interpreter
wrapper = WrapperModule(interpreter)
# Create a graph that: 1) Takes function arguments 2) Invokes the interpreter
# 3) Returns the speficied return value
# FIXME: The following code could be greatly simplified by symbolic_trace'ing
# the wrapper with a Tracer that considers the Wrapper instance a root
# module, however, I can't get `__call__` exposed on TorchBind classes
# without it messing up Python `hasattr` for some reason. More digging
# into CPython's implementation of hasattr is probably in order...
graph = torch.fx.Graph()
# Add placeholders for fn inputs
placeholder_nodes = []
for name in fn_input_names:
placeholder_nodes.append(graph.create_node('placeholder', name))
# Get the interpreter object
interpreter_node = graph.create_node('get_attr', 'interpreter')
# Add a node to call the interpreter instance
output_node = graph.create_node(
op='call_method', target='__call__', args=(interpreter_node, placeholder_nodes))
# Register output
graph.output(output_node)
graph.lint()
# Return final GraphModule!!!
return GraphModule(wrapper, graph)
# Lower GraphModule to C++ interpreter
lowered = lower_to_elementwise_interpreter(msm)
# Compare correctness with original module
x = torch.rand(3, 4)
ref_out = msm(x)
test_out = lowered(x)
torch.testing.assert_close(test_out, ref_out)
# Test TorchScript compilation
scripted_lowered = torch.jit.script(lowered)
script_out = scripted_lowered(x)
torch.testing.assert_close(script_out, ref_out)
# Test TorchScript ser/de
import_copy = self.getExportImportCopy(scripted_lowered)
imported_out = import_copy(x)
torch.testing.assert_close(imported_out, ref_out)
def test_reserved_getattr(self):
"""Ensure that we do not name any nodes with a reserved builtin like `getattr`"""
class M(torch.nn.Module):
def forward(self, a):
return a.foo.bar.baz
m = M()
m_g = symbolic_trace(m)
m_g.graph.lint()
for node in m_g.graph.nodes:
self.assertTrue(node.name != "getattr")
def test_node_tagging(self):
class TaggingTracer(Tracer):
def create_node(self, kind : str, target : Union[str, Callable],
args : Tuple[Argument, ...], kwargs : Dict[str, Any], name : Optional[str] = None,
type_expr : Optional[Any] = None) -> Node:
n = super().create_node(kind, target, args, kwargs, name)
n.tag = 'foo'
return n
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
m = M()
g = TaggingTracer().trace(m)
g.lint()
for n in g.nodes:
self.assertTrue(hasattr(n, 'tag'))
self.assertEqual(n.tag, 'foo')
def test_tensor_attribute(self):
class TensorAttribute(torch.nn.Module):
def __init__(self):
super().__init__()
self.tensor = torch.rand(3, 4)
def forward(self, x):
return torch.nn.functional.linear(x, self.tensor)
ta = TensorAttribute()
traced = symbolic_trace(ta)
traced(torch.rand(4, 4))
class WrapperForQualname(torch.nn.Module):
def __init__(self):
super().__init__()
self.ta = TensorAttribute()
def forward(self, x):
return torch.nn.functional.linear(x, self.ta.tensor)
wfq = WrapperForQualname()
traced2 = symbolic_trace(wfq)
traced2.graph.lint()
traced2(torch.rand(4, 4))
def test_tensor_attribute_coalseced(self):
def count_attrs(fx_module):
targets = set()
for node in traced.graph.nodes:
if node.op == 'get_attr':
targets.add(node.target)
return len(targets)
val = torch.tensor(5)
def f(x):
return x + val + val
traced = symbolic_trace(f)
traced.graph.lint()
self.assertEqual(count_attrs(traced), 1)
val2 = torch.tensor(5)
def f(x):
val = torch.tensor(5)
return x + val + val2
traced = symbolic_trace(f)
traced.graph.lint()
self.assertEqual(count_attrs(traced), 2)
def test_symbolic_trace_sequential(self):
class Simple(torch.nn.Module):
def forward(self, x):
return torch.neg(x)
seq = torch.nn.Sequential(
Simple(),
Simple(),
Simple()
)
traced = symbolic_trace(seq)
traced.graph.lint()
x = torch.rand(3, 4)
self.assertEqual(traced(x), seq(x))
def test_tensor_constant(self):
class ConstTensor(torch.nn.Module):
def forward(self, x):
return torch.nn.functional.linear(x, torch.zeros(3, 4))
ct = ConstTensor()
traced = symbolic_trace(ct)
traced.graph.lint()
traced(torch.rand(4, 4))
def test_pickle_graphmodule(self):
class Nested(torch.nn.Module):
def __init__(self):
super().__init__()
self.st = torch.nn.Linear(4, 4)
def forward(self, x):
return self.st(x)
n = Nested()
traced = symbolic_trace(n)
traced.graph.lint()
pickled = pickle.dumps(traced)
loaded = pickle.loads(pickled)
loaded.graph.lint()
x = torch.rand(3, 4)
self.assertEqual(loaded(x), traced(x))
def test_pickle_custom_import(self):
graph = torch.fx.Graph()
a = graph.placeholder('x')
b = graph.placeholder('y')
c = graph.call_function(a_non_torch_leaf, (a, b))
d = graph.call_function(torch.sin, (c,))
graph.output(d)
gm = GraphModule(torch.nn.Module(), graph)
pickled = pickle.dumps(gm)
loaded = pickle.loads(pickled)
loaded.graph.lint()
x, y = torch.rand(1), torch.rand(1)
self.assertEqual(loaded(x, y), gm(x, y))
def test_all_input_nodes(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.placeholder('x')
b : torch.fx.Node = graph.call_module('linear_mod', args=(a,))
c : torch.fx.Node = graph.get_attr('y_attr')
d : torch.fx.Node = graph.call_function(operator.add, args=(b, c))
e : torch.fx.Node = graph.call_function(torch.unsqueeze, args=(d, 0))
graph.output(e)
graph.lint()
self.assertEqual(b.all_input_nodes, [a])
self.assertEqual(c.all_input_nodes, [])
self.assertEqual(d.all_input_nodes, [b, c])
self.assertEqual(e.all_input_nodes, [d])
def test_deepcopy_graphmodule_with_transform(self):
st = SimpleTest()
traced = symbolic_trace(st)
traced.graph.lint()
def transform(traced):
new_graph = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
output_value = new_graph.graph_copy(traced.graph, val_map)
relu_out = new_graph.create_node(
op='call_method', target='neg', args=(output_value,), kwargs={})
new_graph.output(relu_out)
return GraphModule(traced, new_graph)
transformed = transform(traced)
transformed.graph.lint()
copied = copy.deepcopy(transformed)
self.assertNotEqual(id(type(transformed)), id(type(copied)))
x = torch.randn(3, 4)
self.assertEqual(copied(x), transformed(x))
def test_deepcopy_with_submods_params(self):
class Bar(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
def forward(self, x):
return torch.relu(x) + self.param
class Baz(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.bar = Bar()
def forward(self, x):
return self.bar(x) - self.param
baz = Baz()
traced = symbolic_trace(baz)
traced.graph.lint()
copied = copy.deepcopy(traced)
copied.graph.lint()
def test_deepcopy_graph_with_tracer_cls(self):
class TestTracer(Tracer):
def is_leaf_module(self, module, name):
return True
g = Graph(tracer_cls=TestTracer)
x = g.placeholder("x")
g.output(x)
h = copy.deepcopy(g)
self.assertIsNotNone(h._tracer_cls)
self.assertTrue(g._tracer_cls == h._tracer_cls)
def test_unpack_list_better_error(self):
class SomeArgs(torch.nn.Module):
def forward(self, a, b):
return torch.rand(3, 4)
class UnpacksList(torch.nn.Module):
def __init__(self):
super().__init__()
self.sa = SomeArgs()
def forward(self, x : list):
return self.sa(*x)
ul = UnpacksList()
with self.assertRaisesRegex(TraceError, 'Proxy object cannot be iterated.'):
symbolic_trace(ul)
def test_unpack_dict_better_error(self):
class SomeKwargs(torch.nn.Module):
def forward(self, x=3, y=4):
return torch.rand(3, 4)
class UnpacksDict(torch.nn.Module):
def __init__(self):
super().__init__()
self.sk = SomeKwargs()
def forward(self, x : dict):
return self.sk(**x)
ud = UnpacksDict()
with self.assertRaisesRegex(TraceError, 'Proxy object cannot be iterated.'):
symbolic_trace(ud)
def test_pretty_print_targets(self):
# Test that Graph pretty-print prints friendly name for targets
# in `operator` and `builtins`
class SomeMod(torch.nn.Module):
def forward(self, x):
return torch.add(x.foo + x.bar, 3.0)
traced = symbolic_trace(SomeMod())
graph_str = str(traced.graph)
self.assertIn('builtins.getattr', graph_str)
self.assertIn('operator.add', graph_str)
self.assertIn('torch.add', graph_str)
def test_pretty_print_node(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.param: torch.nn.Parameter = torch.nn.Parameter(
torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x: torch.Tensor, y: int = 2):
return self.linear(x[y] + self.param).clamp(min=0.0, max=1.0)
traced = symbolic_trace(M())
all_formatted = "\n".join([n.format_node() for n in traced.graph.nodes])
FileCheck().check("x").check("placeholder") \
.check("y").check("placeholder") \
.check("getitem").check("call_function") \
.check("param").check("get_attr") \
.check("add").check("call_function") \
.check("linear").check("call_module") \
.check("clamp").check("call_method") \
.run(all_formatted)
def test_script_tensor_constant(self):
# TorchScript seems to ignore attributes that start with `__`.
# We used to call anonymous Tensor values `__tensor_constant*`, but
# they were getting ignored by script. Now they're called
# `_tensor_constant*`
class IHaveATensorConstant(torch.nn.Module):
def forward(self, x):
return x + torch.rand(3, 4)
traced = torch.fx.symbolic_trace(IHaveATensorConstant())
torch.jit.script(traced)
def test_autowrap_functions(self):
class AutowrapFnTest(torch.nn.Module):
def forward(self, x):
return fx_int(x.shape[0] / 2)
class AutowrapFnTest2(torch.nn.Module):
def forward(self, x):
return fx_int(x.shape[0] / 2) + fx_int_x2(x.shape[0] / 2)
# Check function(s) are wrapped
# `int` would normally throw a TypeError as argument can't be `Proxy`
tracer = Tracer(autowrap_functions=(fx_int,))
graph = tracer.trace(AutowrapFnTest())
traced = GraphModule(tracer.root, graph, 'test')
tracer_2 = Tracer(autowrap_functions=(fx_int, fx_int_x2))
tracer_2.trace(AutowrapFnTest2())
# Test scriptability
traced_scripted = torch.jit.script(traced)
self.assertEqual(traced_scripted(torch.rand(4)), 2)
def test_torch_fx_len(self):
class FXLenTest(torch.nn.Module):
def forward(self, x):
return len(x)
traced = symbolic_trace(FXLenTest())
self.assertEqual(traced(torch.rand(3, 4)), 3)
# Test scriptability
scripted = torch.jit.script(FXLenTest())
self.assertEqual(scripted(torch.rand(3)), 3)
traced_scripted = torch.jit.script(traced)
self.assertEqual(traced_scripted(torch.rand(3)), 3)
# Test non-proxy len
class FXLenTest2(torch.nn.Module):
def __init__(self):
super().__init__()
self.l = [3, 4, 5]
def forward(self, x):
return x + len(self.l)
traced2 = symbolic_trace(FXLenTest2())
inp = torch.rand(3, 4)
self.assertEqual(traced2(inp), inp + 3.0)
self.assertIs(len, builtins.len)
def test_torch_fx_getattr(self):
class FXGetattrTest(torch.nn.Module):
def forward(self, x):
return getattr(x, 'nonexistent_attr', torch.Tensor([2, 3]))
traced = symbolic_trace(FXGetattrTest())
self.assertEqual(traced(torch.rand(3, 4)), torch.Tensor([2, 3]))
def test_sqrt(self):
class Sqrt1(torch.nn.Module):
def forward(self, x):
return sqrt(x.size(0))
class Sqrt2(torch.nn.Module):
def forward(self, x):
return math.sqrt(x.size(0))
class Sqrt3(torch.nn.Module):
def forward(self, x):
return x + math.sqrt(2) + sqrt(2)
self.checkGraphModule(Sqrt1(), [torch.zeros(8)])
self.checkGraphModule(Sqrt2(), [torch.zeros(8)])
self.checkGraphModule(Sqrt3(), [torch.zeros(8)])
self.assertIs(sqrt, _sqrt)
self.assertIs(math.sqrt, _sqrt)
def test_torch_custom_ops(self):
class M(torch.nn.Module):
def forward(self, a):
b = torch.ops.aten.sigmoid(a)
c = torch.ops.aten.cat([a, b])
return torch.ops.aten.cat((c, c))
m = M()
input = torch.randn(3)
ref_out = m(input)
gm = symbolic_trace(m)
gm.graph.lint()
out = gm(input)
self.assertEqual(out, ref_out)
def test_pickle_torch_custom_ops(self):
class M(torch.nn.Module):
def forward(self, a):
b = torch.ops.aten.sigmoid(a)
c = torch.ops.aten.cat([a, b])
return torch.ops.aten.cat((c, c))
m = M()
input = torch.randn(3)
ref_out = m(input)
gm = symbolic_trace(m)
gm.graph.lint()
pickled = pickle.dumps(gm)
loaded = pickle.loads(pickled)
self.assertEqual(loaded(input), gm(input))
def test_pretty_print(self):
st = SimpleTest()
traced = symbolic_trace(st)
traced.graph.lint()
printed = str(traced)
assert 'SimpleTest()' in printed
assert 'torch.relu' in printed
def test_pretty_print_graph(self):
class KwargPrintTest(torch.nn.Module):
def forward(self, x):
return torch.squeeze(x + 3.0, dim=2)
st = KwargPrintTest()
traced = symbolic_trace(st)
traced.graph.lint()
stringed = str(traced.graph)
for s in ['args', 'kwargs', '#users']:
assert s in stringed
def test_custom_proxy_type(self):
class TensorPair:
def __init__(self, left, right):
self.left, self.right = left, right
def add(self, other):
l = self.left + other.left
r = self.right + other.right
return TensorPair(l, r)
def mul(self, other):
l = self.left * other.left
r = self.right * other.right
return TensorPair(l, r)
def use_tensor_pair(x : TensorPair, y : TensorPair):
s = x.add(y)
return s.mul(x)
x = TensorPair(torch.randn(5, 3), torch.randn(5, 3))
y = TensorPair(torch.randn(5, 3), torch.randn(5, 3))
ref_out = use_tensor_pair(x, y)
traced = symbolic_trace(use_tensor_pair)
traced_out = traced(x, y)
self.assertEqual(traced_out.left, ref_out.left)
self.assertEqual(traced_out.right, ref_out.right)
def test_custom_proxy_type_literal(self):
class TensorPair(metaclass=torch.fx.ProxyableClassMeta):
def __init__(self, left, right):
self.left, self.right = left, right
def add(self, other):
l = self.left + other.left
r = self.right + other.right
return TensorPair(l, r)
def mul(self, other):
l = self.left * other.left
r = self.right * other.right
return TensorPair(l, r)
def use_tensor_pair_literal(x : TensorPair):
s = x.add(TensorPair(torch.zeros(5, 3), torch.zeros(5, 3)))
return s.mul(x)
x = TensorPair(torch.randn(5, 3), torch.randn(5, 3))
ref_out = use_tensor_pair_literal(x)
traced = symbolic_trace(use_tensor_pair_literal)
traced_out = traced(x)
self.assertEqual(traced_out.left, ref_out.left)
self.assertEqual(traced_out.right, ref_out.right)
def test_custom_proxy_dynamic_value(self):
class TensorPair(metaclass=torch.fx.ProxyableClassMeta):
def __init__(self, left, right):
self.left, self.right = left, right
def add(self, other):
l = self.left + other.left
r = self.right + other.right
return TensorPair(l, r)
def mul(self, other):
l = self.left * other.left
r = self.right * other.right
return TensorPair(l, r)
def use_tensor_pair_ctor(x : TensorPair, y : torch.Tensor):
s = x.add(TensorPair(y, y))
return s.mul(x)
x = TensorPair(torch.randn(5, 3), torch.randn(5, 3))
y = torch.randn(5, 3)
ref_out = use_tensor_pair_ctor(x, y)
traced = symbolic_trace(use_tensor_pair_ctor)
traced_out = traced(x, y)
self.assertEqual(traced_out.left, ref_out.left)
self.assertEqual(traced_out.right, ref_out.right)
def test_custom_proxy_input_dependent_control_flow(self):
class ZeroTensor(metaclass=torch.fx.ProxyableClassMeta):
def __init__(self, inp):
if inp.sum() == 0:
self.is_zero = True
self.tensor = torch.tensor([])
else:
self.is_zero = False
self.tensor = inp
def add(self, other):
if self.is_zero:
return ZeroTensor(other.tensor)
elif other.is_zero:
return self
def use_zero_tensor(x : torch.Tensor, y : torch.Tensor):
return ZeroTensor(x + y)
x, y = torch.randn(5, 3), torch.randn(5, 3)
ref_out = use_zero_tensor(x, y)
traced = symbolic_trace(use_zero_tensor)
traced_out = traced(x, y)
self.assertEqual(traced_out.is_zero, ref_out.is_zero)
self.assertEqual(traced_out.tensor, ref_out.tensor)
def test_graph_fns(self):
g = Graph()
a = g.placeholder('a')
b = g.call_module('linear', (a,))
c = g.get_attr('bias')
d = g.call_method('add', (b, c))
e = g.call_function(torch.sin, (d,))
g.output(e)
mod = torch.nn.Module()
mod.linear = torch.nn.Linear(3, 4)
mod.bias = torch.rand(4)
gm = GraphModule(mod, g)
gm.graph.lint()
input = torch.rand(3)
r = gm(input)
ref = torch.sin(mod.linear(input) + mod.bias)
self.assertEqual(r, ref)
def test_remove_uses(self):
g : torch.fx.Graph = Graph()
x : torch.fx.Node = g.placeholder('x')
relu : torch.fx.Node = g.call_function(torch.relu, (x,))
neg : torch.fx.Node = g.call_function(torch.neg, (relu,))
g.output(neg)
neg.replace_all_uses_with(relu)
g.erase_node(neg)
self.assertTrue(neg not in relu.users)
def test_nonetype_annotation(self):
eb = torch.nn.EmbeddingBag(3, 4)
symbolic_trace(eb)
def test_pickle_nonetype_annotation(self):
eb = torch.nn.EmbeddingBag(10, 3, mode='sum')
traced = symbolic_trace(eb)
pickled = pickle.dumps(traced)
loaded = pickle.loads(pickled)
loaded.graph.lint()
input = torch.LongTensor([1, 2, 4, 5, 4, 3, 2, 9])
offsets = torch.LongTensor([0, 4])
self.assertEqual(loaded(input, offsets), traced(input, offsets))
def test_return_tuple(self):
class M(torch.nn.Module):
def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
return (x, x + x)
original = M()
traced = symbolic_trace(original)
self.assertEqual(traced(torch.ones(1)), original.forward(torch.ones(1)))
def test_construct_root_dict(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_module', 'foo.bar.baz', args=(a,))
c : torch.fx.Node = graph.create_node('get_attr', 'zip.zap.zam')
d : torch.fx.Node = graph.create_node('call_function', operator.add, args=(b, c))
graph.output(d)
linear_mod : torch.nn.Module = torch.nn.Linear(3, 4)
add_param : torch.Tensor = torch.rand(3, 4)
gm : torch.fx.GraphModule = torch.fx.GraphModule(
{'foo.bar.baz': linear_mod, 'zip.zap.zam' : add_param}, graph)
gm.graph.lint()
assert 'self.foo.bar.baz' in gm.code
x : torch.Tensor = torch.rand(3, 3)
out : torch.Tensor = gm(x)
ref_out : torch.Tensor = linear_mod(x) + add_param
self.assertEqual(out, ref_out)
def test_symbolic_trace_assert(self):
class AssertsTensorShape(torch.nn.Module):
def forward(self, x):
torch._assert(x.shape[1] > 4, "assert_foobar")
return x
m = AssertsTensorShape()
# verify traceability
traced = symbolic_trace(m)
# verify assertion on traced model works correctly at runtime
traced(torch.rand(4, 5))
with self.assertRaisesRegex(AssertionError, "assert_foobar"):
traced(torch.rand(4, 3))
# verify the symbolically traced module is scriptable
ms = torch.jit.script(m)
with self.assertRaisesRegex(torch.jit.Error, "assert_foobar"):
ms(torch.rand(4, 3))
def test_fx_create_arg(self):
class CustomArgObject:
def __init__(self, x, y):
self.x = x
self.y = y
def __fx_create_arg__(self, tracer: torch.fx.Tracer):
return tracer.create_node(
"call_function",
CustomArgObject,
args=(
tracer.create_arg(self.x),
tracer.create_arg(self.y),
),
kwargs={},
)
class HasCustomArgObjectWhenLeaf(torch.nn.Module):
def forward(self, o: CustomArgObject):
# Not normally traceable; good reason to make
# this module a leaf.
for x in o.x:
o.y += x
return o.y
class Root(torch.nn.Module):
def __init__(self):
super().__init__()
self.inner = HasCustomArgObjectWhenLeaf()
def forward(self, x, y):
o = CustomArgObject(x, y)
return self.inner(o)
class CreateArgTracer(torch.fx.Tracer):
def is_leaf_module(self, m, module_qualified_name):
return type(m) is HasCustomArgObjectWhenLeaf
m = Root()
graph = CreateArgTracer().trace(m)
gm = torch.fx.GraphModule(m, graph)
assert "CustomArgObject(" in gm.code
def test_trace_fn_constant(self):
some_constant = torch.rand(3, 4)
def add_const(x):
return some_constant + x
traced = symbolic_trace(add_const)
input = torch.rand(3, 4)
self.assertEqual(traced(input), add_const(input))
def test_copy_no_remap(self):
traced = symbolic_trace(SimpleTest())
g = traced.graph
copied = torch.fx.Graph()
for node in g.nodes:
copied.node_copy(node)
with self.assertRaisesRegex(RuntimeError, 'does not belong to this Graph'):
copied.lint()
def test_wrong_topo(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_module', 'foo.bar.baz', args=(a,))
c : torch.fx.Node = graph.create_node('get_attr', 'zip.zap.zam')
d : torch.fx.Node = graph.create_node('call_function', operator.add, args=(b, c))
graph.output(d)
nodes = list(graph.nodes)
nodes[3].append(nodes[2])
with self.assertRaisesRegex(RuntimeError, 'was used before it has been defined'):
graph.lint()
def test_wrong_target_type(self):
graph : torch.fx.Graph = torch.fx.Graph()
with self.assertRaises(ValueError):
n = torch.fx.Node(graph=graph, name='foo', op='call_function', target='foo',
args=(), kwargs={})
def test_example_shape_prop(self):
class TestCase(torch.nn.Module):
def __init__(self):
super().__init__()
self.attr = torch.randn(3, 4)
self.submod = torch.nn.Linear(4, 4)
def forward(self, x):
return torch.neg(self.submod(x.relu() + self.attr))
tc = TestCase()
tc_traced = symbolic_trace(tc)
ref_out = tc_traced(torch.rand(3, 4))
shape_prop.ShapeProp(tc_traced).propagate(torch.rand(3, 4))
# Make sure we're testing all opcodes
opcodes = set()
output_shape : Optional[torch.Shape] = None
output_stride : Optional[Tuple[int]] = None
for node in tc_traced.graph.nodes:
opcodes.add(node.op)
if node.op == 'output':
output_shape = node.args[0].meta['tensor_meta'].shape
output_stride = node.args[0].meta['tensor_meta'].stride
self.assertEqual(opcodes, set(['placeholder', 'get_attr', 'call_function', 'call_method',
'call_module', 'output']))
# Test shape propogation and make sure results match actual
self.assertEqual(output_shape, ref_out.shape)
self.assertEqual(output_stride, ref_out.stride())
def test_shape_prop_layout(self):
class ConvTest(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv_mod = torch.nn.Conv2d(5, 5, 3)
def forward(self, x):
return self.conv_mod(x)
# contiguous layout
test_mod = ConvTest()
traced = symbolic_trace(test_mod)
x = torch.randn(5, 5, 224, 224)
shape_prop.ShapeProp(traced).propagate(x)
assert(all(node.meta['tensor_meta'].memory_format is torch.contiguous_format
for node in traced.graph.nodes))
x_channels_last = x.contiguous(memory_format=torch.channels_last)
traced.to(memory_format=torch.channels_last)
shape_prop.ShapeProp(traced).propagate(x_channels_last)
for node in traced.graph.nodes:
# NB: the implementation of conv may not preserve the memory format,
# unfortunately. The best we can do is just check that the placeholder
# node is channels-last
if node.op in {'placeholder'}:
self.assertEqual(node.meta['tensor_meta'].memory_format, torch.channels_last)
def test_shape_prop_aggregate(self):
class ReturnTwo(torch.nn.Module):
def forward(self, x):
return (3, torch.sum(x))
class UnderTest(torch.nn.Module):
def __init__(self):
super().__init__()
self.rt = ReturnTwo()
def forward(self, x):
return self.rt(x)
ut = UnderTest()
class RTTracer(torch.fx.Tracer):
def is_leaf_module(self, m, module_qualified_name):
return type(m) is ReturnTwo
graph = RTTracer().trace(ut)
mod = torch.fx.GraphModule(ut, graph)
shape_prop.ShapeProp(mod).propagate(torch.rand(3, 4))
for node in mod.graph.nodes:
if node.op == 'call_module':
assert 'tensor_meta' in node.meta
tensor_meta = node.meta['tensor_meta']
assert tensor_meta[0] == 3
assert tensor_meta[1].shape == torch.Size([])
def test_shape_prop_layout_3d(self):
class ConvTest3d(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv_mod = torch.nn.Conv3d(5, 5, 3)
def forward(self, x):
return self.conv_mod(x)
test_mod_3d = ConvTest3d()
traced_3d = symbolic_trace(test_mod_3d)
x_3d = torch.randn(5, 5, 224, 224, 15)
shape_prop.ShapeProp(traced_3d).propagate(x_3d)
assert(all(node.meta['tensor_meta'].memory_format is torch.contiguous_format
for node in traced_3d.graph.nodes))
x_channels_last_3d = x_3d.contiguous(memory_format=torch.channels_last_3d)
traced_3d.to(memory_format=torch.channels_last_3d)
shape_prop.ShapeProp(traced_3d).propagate(x_channels_last_3d)
for node in traced_3d.graph.nodes:
# NB: the implementation of conv may not preserve the memory format,
# unfortunately. The best we can do is just check that the placeholder
# node is channels-last
if node.op in {'placeholder'}:
self.assertEqual(node.meta['tensor_meta'].memory_format, torch.channels_last_3d)
def test_interpreter(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
m = MyModule()
gm = torch.fx.symbolic_trace(m)
interpreter = Interpreter(gm)
input = torch.randn(3, 4)
self.assertEqual(interpreter.run(input), gm(input))
self.assertEqual(interpreter.run(input), m(input))
def test_interpreter_run_node_override(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
m = MyModule()
gm = torch.fx.symbolic_trace(m)
class RunNodeInterpreter(Interpreter):
def __init__(self, module):
super().__init__(module)
def run_node(self, n : Node) -> Any:
result = super().run_node(n)
n.cached_value = result
return result
input = torch.randn(3, 4)
RunNodeInterpreter(gm).run(input)
for node in gm.graph.nodes:
assert hasattr(node, 'cached_value')
def test_interpreter_onthefly_swap(self):
def fn(x):
return torch.sigmoid(x).neg()
gm = torch.fx.symbolic_trace(fn)
class NegSigmSwapInterpreter(Interpreter):
def call_function(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == torch.sigmoid:
return torch.neg(*args, **kwargs)
return super().call_function(n)
def call_method(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == 'neg':
call_self, *args_tail = args
return call_self.sigmoid(*args_tail, **kwargs)
return super().call_method(n)
input = torch.randn(3, 4)
result = NegSigmSwapInterpreter(gm).run(input)
self.assertEqual(result, torch.neg(input).sigmoid())
def test_interpreter_partial_eval(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
gm = torch.fx.symbolic_trace(MyModule())
interp = Interpreter(gm)
env = {}
for node in gm.graph.nodes:
if node.op == 'call_module' and node.target == 'linear':
env[node] = torch.arange(0, 12, 1).reshape(3, 4) - 6.0
break
assert len(env) == 1
x = torch.randn(3, 4)
result = interp.run(x, initial_env=env)
self.assertEqual(result, (torch.arange(0, 12, 1).reshape(3, 4) - 6.0).clamp(0.0, 1.0))
def test_interpreter_star_args(self):
def with_star_args(x, *args):
return x + args[0]
gm = torch.fx.symbolic_trace(with_star_args)
interp = Interpreter(gm)
result = interp.run(torch.ones(3, 4), torch.ones(3, 4), torch.rand(3, 4))
self.assertEqual(result, torch.ones(3, 4) * 2.0)
@skipIfNoTorchVision
def test_interpreter_noop_resnet18(self):
rn18 = torchvision_models.resnet18()
transformed = torch.fx.Transformer(symbolic_trace(rn18)).transform()
inp = torch.randn(5, 3, 224, 224)
self.assertEqual(transformed(inp), rn18(inp))
@skipIfNoTorchVision
def test_interpreter_gc_values(self):
rn18 = torchvision_models.resnet18()
interp = Interpreter(symbolic_trace(rn18))
inp = torch.rand(5, 3, 224, 224)
out = interp.run(inp)
env_key_names = set(n.name for n in interp.env.keys())
self.assertEqual(env_key_names, set(['output']))
def test_interpreter_default_args(self):
class Model(torch.nn.Module):
def forward(self, x, y=3.14159):
return x + y
model = Model()
gm = torch.fx.symbolic_trace(model)
interp = Interpreter(gm)
x = torch.randn(5, 3)
out = interp.run(x)
torch.testing.assert_allclose(out, x + 3.14159)
def test_interpreter_not_enough_args(self):
class Model(torch.nn.Module):
def forward(self, x, y):
return x + y
model = Model()
gm = torch.fx.symbolic_trace(model)
interp = Interpreter(gm)
x = torch.randn(5, 3)
with self.assertRaisesRegex(RuntimeError,
'Expected positional argument for parameter y, but one was not passed in'):
out = interp.run(x)
def test_transformer_noop(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
m = MyModule()
gm = torch.fx.symbolic_trace(m)
new_gm = Transformer(gm).transform()
input = torch.randn(3, 4)
self.assertEqual(new_gm(input), gm(input))
def test_transformer_op_swap(self):
def fn(x):
return torch.sigmoid(x).neg()
gm = torch.fx.symbolic_trace(fn)
class NegSigmSwapXformer(Transformer):
def call_function(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == torch.sigmoid:
return torch.neg(*args, **kwargs)
return super().call_function(n)
def call_method(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == 'neg':
call_self, *args_tail = args
return call_self.sigmoid(*args_tail, **kwargs)
return super().call_method(n)
transformed = NegSigmSwapXformer(gm).transform()
input = torch.randn(3, 4)
self.assertEqual(transformed(input), torch.neg(input).sigmoid())
def test_transformer_multi_outputs(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
x = x + self.param
out = self.linear(x)
return x, out
m = MyModule()
gm = torch.fx.symbolic_trace(m)
new_gm = Transformer(gm).transform()
input = torch.randn(3, 4)
self.assertEqual(new_gm(input), gm(input))
def test_fn_type_annotations(self):
class Foo(torch.nn.Module):
def forward(self, p : Pair, z : torch.Tensor, i : int) -> Dict[str, torch.Tensor]:
return {'a': p.x + p.y + z + i}
foo_scripted = torch.jit.script(Foo())
foo_scripted(Pair(torch.rand(5), torch.rand(5)), torch.rand(5), 3)
fxed = symbolic_trace(Foo())
fxed_scripted = torch.jit.script(fxed)
fxed_scripted(Pair(torch.rand(5), torch.rand(5)), torch.rand(5), 3)
def test_fn_type_annotation_empty(self):
def forward(a : List[torch.Tensor]):
return a[0]
torch.jit.script(symbolic_trace(forward))
def test_wrapped_method(self):
def wrap_with_relu(fn):
@functools.wraps(fn)
def wrapper(*args, **kwargs):
return torch.relu(fn(*args, **kwargs))
return wrapper
class Foo(torch.nn.Module):
@wrap_with_relu
def forward(self, x, w):
return torch.matmul(x, w)
f = Foo()
traced = symbolic_trace(f)
x, w = torch.rand(3, 4), torch.rand(4, 4)
self.assertTrue(any(n.target == torch.relu for n in traced.graph.nodes))
def test_empty_graph_codegen(self):
graph = torch.fx.Graph()
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
self.assertEqual(gm(), None)
def test_sequential(self):
m = torch.nn.Sequential(torch.nn.Conv2d(1, 1, 1))
gm = torch.fx.symbolic_trace(m)
gm_copy = copy.deepcopy(gm)
def test_ctx_mgr(self):
@contextlib.contextmanager
def do_nothing():
yield
class M(torch.nn.Module):
def __init__(self):
super().__init__()
@do_nothing()
def forward(self, x):
return torch.relu(x)
m = M()
self.checkGraphModule(m, (torch.rand(3, 4),))
def test_typename_print(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,),
type_expr=List[float])
output : torch.fx.Node = graph.output(b)
self.assertTrue('typing.List[float]' in str(graph))
def test_layout(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return torch.empty_like(x, layout=torch.strided, pin_memory=False).fill_(0)
traced = symbolic_trace(M())
x = torch.rand(5, 9, 3, 4)
self.assertEqual(traced(x), torch.zeros_like(x))
def test_ellipsis(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y):
return x + y[:, 1:10, ...]
traced = symbolic_trace(M())
x, y = torch.rand(5, 9, 3, 4), torch.rand(5, 15, 3, 4)
self.assertEqual(traced(x, y), x + y[:, 1:10, ...])
def test_inf_nan(self):
class FooMod(torch.nn.Module):
def forward(self, x):
return x + float('inf'), x + float('-inf'), x + float('nan')
fm = FooMod()
self.checkGraphModule(fm, (torch.rand(3, 4),))
def test_inf_nan_kwds(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', operator.add, (x, float('inf')), {}, name='inf')
c : torch.fx.Node = graph.create_node('call_function', operator.add, (x, float('nan')), {}, name='nan')
graph.output((b, c))
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
x = torch.rand(3, 4)
self.assertEqual(gm(x), (x + float('inf'), x + float('nan')))
def test_deepcopy_recursion_depth(self):
depth = sys.getrecursionlimit() + 20
g = torch.fx.Graph()
x = g.placeholder('x')
for i in range(depth):
x = g.call_function(torch.relu, (x,))
g.output(x)
copied_graph = copy.deepcopy(g)
val_map = {}
for orig_node, new_node in zip(g.nodes, copied_graph.nodes):
val_map[orig_node] = new_node
for orig_node, new_node in zip(g.nodes, copied_graph.nodes):
orig_users = set(orig_node.users.keys())
orig_users_equiv = set(val_map[u] for u in orig_users)
new_users = set(new_node.users.keys())
self.assertEqual(orig_users_equiv, new_users)
@skipIfNoTorchVision
def test_replace_uses(self):
rn18 = torchvision_models.resnet18()
class LowerReluTracer(torch.fx.Tracer):
def is_leaf_module(self, m : torch.nn.Module, qualname : str):
if isinstance(m, torch.nn.ReLU):
return False
return super().is_leaf_module(m, qualname)
rn18_traced = GraphModule(rn18, LowerReluTracer().trace(rn18))
to_erase = []
for node in rn18_traced.graph.nodes:
if node.op == 'call_function' and node.target in [torch.relu, torch.nn.functional.relu]:
kwargs = node.kwargs.copy()
# Neg doesn't have in-place
kwargs.pop('inplace')
with rn18_traced.graph.inserting_before(node):
new_node = rn18_traced.graph.call_function(
the_function=torch.neg, args=node.args, kwargs=node.kwargs)
node.replace_all_uses_with(replace_with=new_node)
to_erase.append(node)
for node in to_erase:
rn18_traced.graph.erase_node(node)
def test_replace_input(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
y : torch.fx.Node = graph.create_node('placeholder', 'y')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
b.replace_input_with(x, y)
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
input_x = torch.randn(33, 44)
input_y = torch.randn(11, 22)
self.assertEqual(gm(input_x, input_y), torch.relu(input_y))
def test_insertion_point(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
with graph.inserting_before(b):
neg : torch.fx.Node = graph.call_function(the_function=torch.neg, args=(x,))
_, *relu_args = b.args
b.args = (neg, *relu_args)
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
input = torch.randn(33, 44)
self.assertEqual(gm(input), torch.relu(torch.neg(input)))
def test_update_args_api(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
y : torch.fx.Node = graph.create_node('placeholder', 'y')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
orig_gm = torch.fx.GraphModule(torch.nn.Module(), graph)
inp_x, inp_y = torch.randn(5, 3), torch.randn(3, 5)
self.assertEqual(orig_gm(inp_x, inp_y), torch.relu(inp_x))
b.update_arg(0, y)
new_gm = torch.fx.GraphModule(torch.nn.Module(), graph)
self.assertEqual(new_gm(inp_x, inp_y), torch.relu(inp_y))
def test_update_kwargs_api(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
y : torch.fx.Node = graph.create_node('placeholder', 'y')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, kwargs={'input': x})
output : torch.fx.Node = graph.output(b)
orig_gm = torch.fx.GraphModule(torch.nn.Module(), graph)
inp_x, inp_y = torch.randn(5, 3), torch.randn(3, 5)
self.assertEqual(orig_gm(inp_x, inp_y), torch.relu(inp_x))
b.update_kwarg('input', y)
new_gm = torch.fx.GraphModule(torch.nn.Module(), graph)
self.assertEqual(new_gm(inp_x, inp_y), torch.relu(inp_y))
def test_move_before(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
neg : torch.fx.Node = graph.call_function(the_function=torch.neg, args=(x,))
_, *relu_args = b.args
b.args = (neg, *relu_args)
b.prepend(neg)
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
input = torch.randn(33, 44)
self.assertEqual(gm(input), torch.relu(torch.neg(input)))
def test_prepend_self(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
b.prepend(b)
x.append(b)
self.assertEqual(len(graph.nodes), 3)
def test_erase_node_error(self):
st = SimpleTest()
traced = symbolic_trace(st)
for node in traced.graph.nodes:
# Test deleting with uses both in another Node and at the output
if node.target in [operator.add, torch.relu]:
with self.assertRaisesRegex(RuntimeError, 'but it still had .* users in the graph'):
traced.graph.erase_node(node)
def test_copy_it(self):
d = immutable_dict([(3, 4), (5, 6)])
l = immutable_list([(3, 4), (5, 6)])
self.assertEqual(d, deepcopy(d))
self.assertEqual(l, deepcopy(l))
def test_get_torch_func_signature(self):
for key in dir(torch):
obj = getattr(torch, key)
if callable(obj):
schemas = get_signature_for_torch_op(obj)
def test_find_uses(self):
graph = torch.fx.Graph()
x = torch.fx.Proxy(graph.placeholder('x'))
y = torch.relu(x)
z = x + x
u = torch.neg(x)
graph.output((y + z + u).node)
graph.lint()
users_of_x = x.node.users
self.assertEqual(len(users_of_x), 3)
expected_ops = set(['relu', 'add', 'neg'])
for use in users_of_x:
assert any(use.name.startswith(prefix) for prefix in expected_ops)
def test_inline_graph(self):
class InlineInto(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
class ToInline(torch.nn.Module):
def forward(self, x):
return torch.neg(x)
inline_into = symbolic_trace(InlineInto())
to_inline = symbolic_trace(ToInline())
combined_graph = torch.fx.Graph()
output_node = combined_graph.graph_copy(inline_into.graph, {})
input_node = list(to_inline.graph.nodes)[0]
assert input_node and input_node.op == 'placeholder'
val_map = {input_node : output_node}
output = combined_graph.graph_copy(to_inline.graph, val_map)
combined_graph.output(output)
combined_module = torch.fx.GraphModule(torch.nn.Module(), combined_graph)
input = torch.rand(3, 4)
self.assertEqual(combined_module(input), input.relu().neg())
def test_multi_insert_point(self):
graph = torch.fx.Graph()
x = torch.fx.Proxy(graph.placeholder('x'))
relu = torch.relu(x)
with graph.inserting_before(relu.node):
y = torch.neg(x)
z = torch.tanh(y)
graph.output((relu.node, z.node))
graph.lint()
expected_ops = ['x', 'neg', 'tanh', 'relu']
for node, expected in zip(graph.nodes, expected_ops):
assert expected in node.name
def test_reassign_args_kwargs_uses(self):
graph = torch.fx.Graph()
x, y = Proxy(graph.placeholder('x')), Proxy(graph.placeholder('y'))
z = x + y
zed = z + z + z
graph.output(zed.node)
graph.lint()
# zed = z + z + z -> zed = z + z + x
zed.node.args = (zed.node.args[0], x.node)
self.assertEqual(list(x.node.users.keys()), [z.node, zed.node])
# z = x + y -> z = y + y
z.node.args = (y.node, y.node)
self.assertEqual(list(x.node.users.keys()), [zed.node])
def test_trace_function(self):
def foo(x, y):
return torch.relu(x) + y
x, y = torch.randn(3, 4), torch.randn(3, 4)
self.checkGraphModule(foo, (x, y))
def test_trace_dict_int_keys(self):
class ModWithDictArg(torch.nn.Module):
def forward(self, d : Dict[int, torch.Tensor]):
return d[42]
class CallsModWithDict(torch.nn.Module):
def __init__(self):
super().__init__()
self.m = ModWithDictArg()
def forward(self, x):
return self.m({42: x})
class MyTracer(torch.fx.Tracer):
def is_leaf_module(self, m: torch.nn.Module, module_qualified_name : str) -> bool:
return isinstance(m, ModWithDictArg)
traced_graph = MyTracer().trace(CallsModWithDict())
def test_trace_dict_proxy_keys(self):
class ModWithDictArg(torch.nn.Module):
def forward(self, d : Dict[torch.Tensor, torch.Tensor]):
return d[42]
class CallsModWithDict(torch.nn.Module):
def __init__(self):
super().__init__()
self.m = ModWithDictArg()
def forward(self, x):
return self.m({x: x})
class MyTracer(torch.fx.Tracer):
def is_leaf_module(self, m: torch.nn.Module, module_qualified_name : str) -> bool:
return isinstance(m, ModWithDictArg)
with self.assertRaisesRegex(RuntimeError, 'cannot contain a Node'):
traced_graph = MyTracer().trace(CallsModWithDict())
def test_module_deepcopy_edit_nodes(self):
class Foo(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
traced1 = symbolic_trace(Foo())
copied = copy.deepcopy(traced1)
for node in copied.graph.nodes:
if node.target == torch.relu:
node.target = torch.neg
copied.recompile()
traced1.recompile()
x = torch.randn(15, 15)
torch.testing.assert_allclose(traced1(x), torch.relu(x))
torch.testing.assert_allclose(copied(x), torch.neg(x))
def test_direct_param_use(self):
class TransposeTest(torch.nn.Module):
def __init__(self):
super().__init__()
self.b = torch.nn.Parameter(torch.rand(4, 3))
def forward(self, x):
return self.b
class Foo(torch.nn.Module):
def __init__(self):
super().__init__()
self.a = TransposeTest()
def forward(self, x):
return self.a.b, self.a.b.t(), self.a.b.view(12)
traced = torch.fx.symbolic_trace(Foo())
assert(all('constant' not in node.target for node in traced.graph.nodes))
def test_single_default_arg(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, y=1):
return y
m = M()
self.checkGraphModule(m, ())
self.checkGraphModule(m, (3,))
def test_multiple_default_args(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, y=1, z=2):
return y + z
m = M()
self.checkGraphModule(m, ())
self.checkGraphModule(m, (3,))
self.checkGraphModule(m, (3, 4))
def test_regular_and_default_args(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y=1):
return x + y
m = M()
self.checkGraphModule(m, (2,))
self.checkGraphModule(m, (2, 3))
def test_string_literal_return(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self):
return "foo"
m = M()
self.checkGraphModule(m, ())
def test_namedtuple_return_qualname(self):
class NamedTupReturn(torch.nn.Module):
def forward(self, x):
return MyNamedTup(x, x)
traced = symbolic_trace(NamedTupReturn())
input = torch.rand(3, 4)
self.assertEqual(traced(input), MyNamedTup(input, input))
def test_update_args_kwargs_yells_at_you(self):
symtraced = symbolic_trace(SimpleTest())
node = next(iter(symtraced.graph.nodes))
with self.assertRaisesRegex(AttributeError, '__update_args_kwargs'):
node.__update_args_kwargs((), {})
def test_torchbind_class_attribute_in_fx(self):
if TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS:
self.skipTest("torch.classes._TorchScriptTesting._StackString is registered, skipping")
class FooBar1234(torch.nn.Module):
def __init__(self):
super(FooBar1234, self).__init__()
self.f = torch.classes._TorchScriptTesting._StackString(["3", "4"])
def forward(self):
return self.f.top()
m = FooBar1234()
self.checkGraphModule(m, ())
def test_torchbind_class_attribute_in_fx_tensor_arg(self):
if TEST_WITH_ROCM or IS_FBCODE or IS_WINDOWS or IS_MACOS:
self.skipTest("torch.classes._TorchScriptTesting._ReLUClass is registered, skipping")
class FooBar2341(torch.nn.Module):
def __init__(self):
super(FooBar2341, self).__init__()
self.f = torch.classes._TorchScriptTesting._ReLUClass()
def forward(self, x):
return self.f.run(x)
m = FooBar2341()
traced = symbolic_trace(m)
input = torch.randn(3, 4)
self.assertEqual(traced(input), m(input))
self.assertTrue(any(n.op == 'call_method' for n in traced.graph.nodes))
def test_script_method_trace(self):
class Scripted(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
class Holder(torch.nn.Module):
def __init__(self):
super().__init__()
self.s = torch.jit.script(Scripted())
def forward(self, x):
return self.s(x)
h = Holder()
traced = symbolic_trace(h)
input = torch.randn(3, 4)
self.assertEqual(traced(input), h(input))
self.assertTrue(any(n.op == 'call_method' for n in traced.graph.nodes))
def test_namedtuple_return_trace(self):
class NamedTupReturn(torch.nn.Module):
def forward(self, x):
return Pair(x, x)
traced = symbolic_trace(NamedTupReturn())
input = torch.rand(3, 4)
self.assertEqual(traced(input), Pair(input, input))
def test_return_type_exists(self):
class ReturnTypeModule(torch.nn.Module):
def other(self, x: List[str]) -> List[str]:
return x
def forward(self, x: List[str]) -> List[str]:
return self.other(x)
traced = symbolic_trace(ReturnTypeModule())
self.assertIn("-> typing_List[str]", traced._code)
scripted = torch.jit.script(traced)
self.assertIn("-> List[str]", scripted.code)
def getitem_inner(self):
class GetItemBase(torch.nn.Module):
def __init__(self):
super().__init__()
self.register_buffer('pe', torch.randn(8, 8))
class GetItem1(GetItemBase):
def forward(self, x):
return self.pe[:, :x.size(0)]
class GetItem2(GetItemBase):
def forward(self, x):
return self.pe[x.size(0)]
class GetItem3(GetItemBase):
def forward(self, x):
return self.pe[4] # fx creates `self._tensor_constant0` here
self.checkGraphModule(GetItem1(), [torch.zeros(4)])
self.checkGraphModule(GetItem2(), [torch.zeros(4)])
self.checkGraphModule(GetItem3(), [torch.zeros(4)])
@unittest.skipUnless(os.environ.get("FX_PATCH_GETITEM") == "1",
"Will be checked in test_getitem_subproc")
def test_getitem(self):
self.getitem_inner()
def test_getitem_subproc(self):
# need to run this test in a subproc to work around:
# https://github.com/pytorch/pytorch/issues/50710
proc = Process(target=run_getitem_target)
proc.start()
proc.join()
self.assertEqual(proc.exitcode, 0)
def test_user_friendly_call_provenance_with_function(self):
def fn(x):
return wrapper_fn(x)
traced = torch.fx.symbolic_trace(fn)
with self.assertRaisesRegex(RuntimeError, "'wrapper_fn' is "
"being compiled since it was called"
" from 'fn.forward'"):
scripted = torch.jit.script(traced)
def test_user_friendly_call_provenance_with_module(self):
class M(torch.nn.Module):
def forward(self, x):
return wrapper_fn(x)
traced = torch.fx.symbolic_trace(M())
with self.assertRaisesRegex(RuntimeError, "'wrapper_fn' is "
"being compiled since it was called"
" from 'M.forward'"):
scripted = torch.jit.script(traced)
def test_snake_case(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.activations = torch.nn.ModuleDict([
["snake_case", torch.nn.ReLU()],
["PascalCase", torch.nn.LeakyReLU()],
["ALL_CAPS", torch.nn.PReLU()]
])
def forward(self, x):
a = self.activations["snake_case"](x)
b = self.activations["PascalCase"](x)
c = self.activations["ALL_CAPS"](x)
return a, b, c
traced = symbolic_trace(M())
check = [
("activations_snake_case", "activations.snake_case"),
("activations_pascal_case", "activations.PascalCase"),
("activations_all_caps", "activations.ALL_CAPS")
]
i = 0
for node in traced.graph.nodes:
if node.op == "placeholder" or node.op == "output":
continue
name = check[i][0]
target = check[i][1]
self.assertEqual(name, node.name)
self.assertEqual(target, node.target)
i += 1
self.assertEqual(i, 3)
def test_no_mutation(self):
from torch.fx.immutable_collections import immutable_list
x = immutable_list([3, 4])
with self.assertRaisesRegex(NotImplementedError, "new_args"):
x[0] = 4
def test_partial_trace(self):
class Foo(torch.nn.Module):
def forward(self, x, y):
if y:
return 2 * x
else:
return x
mod = Foo()
mod_true = symbolic_trace(mod, concrete_args={'y': True})
mod_false = symbolic_trace(mod, concrete_args={'y': False})
self.assertEqual(mod_true(3, True), 6)
print(mod_true.code)
assert(any([i.target == torch._assert for i in mod_true.graph.nodes]))
with self.assertRaises(AssertionError):
mod_true(3, False)
self.assertEqual(mod_false(3, False), 3)
with self.assertRaises(AssertionError):
mod_false(3, True)
def f_higher(a, f):
return f(a)
nf = symbolic_trace(f_higher, concrete_args={'f': lambda x: x * 2})
self.assertEqual(nf(3, lambda x: x * 2), 6)
def test_custom_traceback_raised_when_exception_source_is_graphmodule(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.W = torch.nn.Parameter(torch.randn(5))
def forward(self, x):
return torch.dot(self.W, x)
traced = torch.fx.symbolic_trace(M())
out = [n for n in traced.graph.nodes if n.op == "output"][-1]
with traced.graph.inserting_before(out):
relu_out = traced.graph.call_method(method_name='relu',
args=(out.args[0],))
out.args = (relu_out,)
traced.recompile()
with self.capture_stderr() as captured:
with self.assertRaises(TypeError):
traced(5)
self.assertRegex(captured[0],
r"Call using an FX-traced Module, line .* of the "
r"traced Module's generated forward function:")
def test_custom_traceback_not_raised_when_exception_source_is_submodule(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(3, 4)
def forward(self, x):
return self.linear(x)
traced = torch.fx.symbolic_trace(M())
# Do not change this to `capture_stderr` or another context
# manager without ensuring that the output is as expected
try:
traced(torch.rand(5, 5))
except RuntimeError:
captured = traceback.format_exc()
self.assertNotRegex(captured,
r"Call using an FX-traced Module, line .* of the "
r"traced Module's generated forward function:")
def test_graph_module_replicate_for_dp(self):
class Foo(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
gm = torch.fx.symbolic_trace(Foo())
x = torch.randn(5, 3)
out = gm(x)
replica = gm._replicate_for_data_parallel()
out_replica = replica(x)
torch.testing.assert_allclose(out_replica, out)
def test_ast_rewriter_rewrites_assert(self):
class M(torch.nn.Module):
def forward(self, x: torch.Tensor, y: int, z: int):
assert y == z
return torch.add(x, x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M())
traced = GraphModule(ast_rewriter.root, graph, "gm")
traced.graph.lint()
def test_ast_rewriter_rewrites_assert_with_message(self):
class M(torch.nn.Module):
def forward(self, x: torch.Tensor, y: int, z: int):
assert y == z, "msg"
return torch.add(x, x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M())
traced = GraphModule(ast_rewriter.root, graph, "gm")
traced.graph.lint()
def test_throw_out_variant(self):
def foo(x):
y = torch.rand_like(x)
torch.sigmoid(x, out=y)
return y
class MyTracer(torch.fx.Tracer):
check_mutable_operations = True
tracer = MyTracer()
with self.assertRaisesRegex(RuntimeError, 'mutable operation aten::sigmoid.out'):
traced_graph = tracer.trace(foo)
def test_ast_rewriter_reassigns_submodules(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.bn = torch.nn.BatchNorm2d(100)
def forward(self, x: torch.Tensor):
return torch.add(x, x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M())
traced = GraphModule(ast_rewriter.root, graph, "gm")
traced.graph.lint()
def test_ast_rewriter_wrap(self):
self.assertEqual(3 + 4 + 5, a_lifted_leaf((3, 4), 5))
def to_trace(y):
return (
a_lifted_leaf((4, y), 3)
+ a_lifted_leaf((3, 4), 5)
+ a_lifted_leaf((y, y), y)
)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(to_trace)
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("a_lifted_leaf", traced.code)
self.assertEqual(27, traced(2))
self.assertIs(a_lifted_leaf, real_a_lifed_leaf)
def test_ast_rewriter_wrap_fn_directly(self):
self.assertEqual(3 + 4 + 5, a_lifted_leaf2((3, 4), 5))
def to_trace(y):
return (
a_lifted_leaf2((4, y), 3)
+ a_lifted_leaf2((3, 4), 5)
+ a_lifted_leaf2((y, y), y)
)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(to_trace)
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("a_lifted_leaf2", traced.code)
self.assertEqual(27, traced(2))
self.assertIs(a_lifted_leaf2, real_a_lifed_leaf2)
def test_profiler_ranges_side_effect(self):
g = torch.fx.Graph()
handle = g.call_function(torch.ops.profiler._record_function_enter, ('test_range',))
g.call_function(torch.ops.profiler._record_function_exit, (handle,))
g.output(None)
found_targets = {}
for node in g.nodes:
if node.op == 'call_function':
found_targets.setdefault(node.target)
self.assertEqual(
list(found_targets.keys()),
[torch.ops.profiler._record_function_enter, torch.ops.profiler._record_function_exit]
)
g.eliminate_dead_code()
found_targets = {}
for node in g.nodes:
if node.op == 'call_function':
found_targets.setdefault(node.target)
self.assertEqual(
list(found_targets.keys()),
[torch.ops.profiler._record_function_enter, torch.ops.profiler._record_function_exit]
)
def test_ast_rewriter_wrapped_via_decorator(self):
class F(torch.nn.Module):
def forward(self, x):
return wrapped_via_decorator(x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(F())
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("wrapped_via_decorator", traced.code)
self.assertEqual(traced(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
def test_ast_rewriter_wrapped_via_decorator_and_transformed(self):
self.assertEqual(wrapped_via_decorator(0), 1)
def to_trace(y):
return wrapped_via_decorator(y)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(to_trace)
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("wrapped_via_decorator", traced.code)
self.assertEqual(traced(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
transformed = torch.fx.Transformer(traced).transform()
self.assertIn("wrapped_via_decorator", transformed.code)
self.assertEqual(transformed(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
def test_ast_rewriter_wrap_with_submodule(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.batchnorm1d = torch.nn.BatchNorm1d(2, affine=False)
def forward(self, x: torch.Tensor):
return wrapped_with_submodule(x, self.batchnorm1d)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M())
traced = GraphModule(ast_rewriter.root, graph, "gm")
self.assertIn("wrapped_with_submodule", traced.code)
input = torch.rand(3, 2)
ref_batchnorm1d = torch.nn.BatchNorm1d(2, affine=False)
self.assertEqual(ref_batchnorm1d(input), traced(input))
def test_submodule_manipulation_API(self):
class C(torch.nn.Module):
def __init__(self):
super(C, self).__init__()
self.conv = torch.nn.Conv2d(16, 33, 3, stride=2)
self.param = torch.nn.Parameter(torch.rand(2, 3))
def forward(self, x):
return self.conv(torch.cat([self.param, x]))
class B(torch.nn.Module):
def __init__(self):
super(B, self).__init__()
self.linear = torch.nn.Linear(100, 200)
self.register_buffer("buf", torch.randn(2, 3))
self.net_c = C()
def forward(self, x):
return self.linear(torch.cat([self.buf, self.net_c(x)]))
class A(torch.nn.Module):
def __init__(self):
super(A, self).__init__()
self.net_b = B()
self.param = torch.nn.Parameter(torch.rand(2, 3))
def forward(self, x):
return self.net_b(x) + self.param
a = symbolic_trace(A())
a.add_submodule("net_b.net_c.dropout", torch.nn.Dropout(p=0.2))
conv = [n for n in a.graph.nodes if n.target == "net_b.net_c.conv"][-1]
with a.graph.inserting_before(conv):
with warnings.catch_warnings(record=True) as w:
dropout = a.graph.call_module(module_name="net_b.net_c.dropout",
args=conv.args)
self.assertEqual(len(w), 0)
conv.replace_all_uses_with(dropout)
a.graph.erase_node(conv)
a.recompile()
def module_exists(gm: GraphModule, path: str) -> bool:
return any(path == name for name, _ in gm.named_modules())
def parameter_exists(gm: GraphModule, path: str) -> bool:
return (any(path == name for name, _ in gm.named_parameters())
and any(path == name for name in gm.state_dict().keys()))
def buffer_exists(gm: GraphModule, path: str) -> bool:
return (any(path == name for name, _ in gm.named_buffers())
and any(path == name for name in gm.state_dict().keys()))
# Test that we added the "dropout" submodule
self.assertTrue(module_exists(a, "net_b.net_c.dropout"))
# Test `get_submodule` with an added submodule
self.assertIsNotNone(a.get_submodule("net_b.net_c.dropout"))
# Test that the "conv" submodule is still there
self.assertTrue(module_exists(a, "net_b.net_c.conv"))
# Test `get_submodule` with an original module
self.assertIsNotNone(a.get_submodule("net_b.net_c.conv"))
# Test that the "conv" node is NOT still there
conv = [n for n in a.graph.nodes if n.target == "net_b.net_c.conv"]
self.assertEqual(conv, [])
a.delete_submodule("net_b.net_c.conv")
# Test that the "conv" submodule is now gone
self.assertFalse(module_exists(a, "net_b.net_c.conv"))
# Test `get_submodule` with a deleted submodule
with self.assertRaisesRegex(AttributeError, "has no attribute "
"`conv`"):
self.assertIsNone(a.get_submodule("net_b.net_c.conv"))
# Test `get_attr` warnings
cat = [n for n in a.graph.nodes if n.target == torch.cat][-1]
with a.graph.inserting_before(cat):
with warnings.catch_warnings(record=True) as w:
param = a.graph.get_attr(qualified_name="net_b.net_c.param")
self.assertEqual(len(w), 0)
with self.assertWarnsRegex(UserWarning, "Attempted to "
"insert a get_attr Node with no "
"underlying reference in the "
"owning GraphModule"):
bad_param = a.graph.get_attr(qualified_name="net_b.param")
a.graph.erase_node(bad_param)
cat.args = (*cat.args, param)
a.recompile()
a.graph.lint()
# Test `get_parameter`
a.get_parameter("net_b.net_c.param")
with self.assertRaisesRegex(AttributeError, "is not an "
"nn.Parameter"):
a.get_parameter("net_b.buf")
with self.assertRaisesRegex(AttributeError, "has no attribute "
"`param`"):
a.get_parameter("net_b.param")
# Test `get_buffer`
a.get_buffer("net_b.buf")
with self.assertRaisesRegex(AttributeError, "is not a "
"buffer"):
a.get_buffer("net_b.net_c.param")
with self.assertRaisesRegex(AttributeError, "has no attribute "
"`buf`"):
a.get_buffer("net_b.net_c.buf")
# Test non-nested attributes
a.get_submodule("")
a.get_parameter("param")
# Insert some unused submodules
a.add_submodule("net_b.embedding", torch.nn.Embedding(10, 3))
a.add_submodule("net_b.net_c.embedding", torch.nn.Embedding(10, 3))
a.add_submodule("net_b.net_c.rnn", torch.nn.RNN(10, 20, 2))
a.add_submodule("batch_norm_2d", torch.nn.BatchNorm2d(100))
# Garbage collection
a.delete_all_unused_submodules()
# Test that all the unused submodules are gone
self.assertFalse(module_exists(a, "net_b.embedding"))
self.assertFalse(module_exists(a, "net_b.net_c.embedding"))
self.assertFalse(module_exists(a, "net_b.net_c.rnn"))
self.assertFalse(module_exists(a, "batch_norm_2d"))
# Test that we didn't delete any unused Parameters or buffers
self.assertTrue(parameter_exists(a, "net_b.net_c.param"))
self.assertTrue(buffer_exists(a, "net_b.buf"))
a.graph.lint()
def test_delete_unused_submodules_leaf(self):
class SubModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(10, 10)
self.relu = torch.nn.ReLU()
def forward(self, x):
x = self.linear(x)
x = self.relu(x)
return x
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
self.submod = SubModule()
def forward(self, x):
x = self.submod(x)
return x
model = Model()
class MyCustomTracer(torch.fx.Tracer):
def is_leaf_module(self, m: torch.nn.Module, module_qualified_name : str) -> bool:
return module_qualified_name == "submod"
inputs = torch.randn(1, 10)
traced_graph = MyCustomTracer().trace(model)
gm2 = torch.fx.GraphModule(model, traced_graph)
gm2.delete_all_unused_submodules()
torch.testing.assert_allclose(gm2(inputs), model(inputs))
def test_tracing_graphmodules_as_leaf_submodules(self):
class A(torch.nn.Module):
def forward(self, t):
return t + t
class B(torch.nn.Module):
def __init__(self):
super(type(self), self).__init__()
self.calling = False
self.called = False
def forward(self, t):
if self.calling:
return t - t
else:
return t + t
def __call__(self, *args):
self.called = True
self.calling = True
return super(type(self), self).__call__(*args)
self.calling = False
class M(torch.nn.Module):
def __init__(self, a, b):
super().__init__()
self.a = a
self.b = b
def forward(self, t):
x = self.a(t)
y = self.b(t)
return x + y
class LeafTracer(Tracer):
def is_leaf_module(self, module, name):
return True
class LeafTracerNotB(Tracer):
def is_leaf_module(self, module, name):
return False if "b" in name else True
# Recompile calls added "for fun", since they
# chain __call__ wrappers.
#
# Test: B as a regular, non-leaf module
#
a = symbolic_trace(A())
a.recompile()
m = M(a, B())
graph = LeafTracerNotB().trace(m)
gm = GraphModule(m, graph)
gm.recompile()
# Test graphmodule/submodule a is not inlined.
self.assertTrue(isinstance(gm.get_submodule("a"), GraphModule))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "a"]
self.assertTrue(len(match) == 1)
# Test submodule b is not treated as leaf.
self.assertFalse(hasattr(gm, "b"))
# Test assert custom __call__ on submodule b was honored.
match = [
n
for n in gm.graph.nodes
if n.op == "call_function" and n.target == operator.sub
]
self.assertTrue(len(match) == 1)
#
# Test: B as a regular, leaf module
# symbolic_trace should only patch torch.nn.Module.__call__,
# which means B.__call__ should still execute
#
a = symbolic_trace(A())
a.recompile()
b = B()
m = M(a, b)
graph = LeafTracer().trace(m)
gm = GraphModule(m, graph)
gm.recompile()
# Test graphmodule/submodule a is not inlined.
self.assertTrue(isinstance(gm.get_submodule("a"), GraphModule))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "a"]
self.assertTrue(len(match) == 1)
# Test submodule b is leaf:
self.assertTrue(isinstance(gm.get_submodule("b"), torch.nn.Module))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "b"]
self.assertTrue(len(match) == 1)
# Test b.__call__ was run
self.assertTrue(b.called)
self.assertTrue(gm.get_submodule("b").called)
#
# Test: B as GraphModule leaf
# __call__ not honored since symbolic_trace directly invokes forward()
#
a = symbolic_trace(A())
a.recompile()
b = symbolic_trace(B())
b.recompile()
m = M(a, b)
graph = LeafTracer().trace(m)
gm = GraphModule(m, graph)
gm.recompile()
self.assertTrue(isinstance(gm.get_submodule("a"), GraphModule))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "a"]
self.assertTrue(len(match) == 1)
self.assertTrue(isinstance(gm.get_submodule("b"), torch.nn.Module))
match = [n for n in gm.graph.nodes if n.op == "call_module" and n.target == "b"]
self.assertTrue(len(match) == 1)
def _test_graph_module_init_buffer_param_copied(self, use_dict_init: bool):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.register_buffer("my_buff", torch.rand(3, 4))
self.register_parameter(
"my_param", torch.nn.Parameter(torch.rand(3, 4))
)
def forward(self, x):
return x + self.my_buff + self.my_param
mod = MyModule()
mod_traced = symbolic_trace(mod)
# Create new GraphModule based on original, either w/ dict or root module.
orig_buff = mod_traced.get_buffer("my_buff")
orig_param = mod_traced.get_parameter("my_param")
mod_traced_new = GraphModule(
{"my_buff": orig_buff, "my_param": orig_param} if use_dict_init else mod,
mod_traced.graph,
)
# Check that both my_buff and my_param are found and the same.
try:
new_buff = mod_traced_new.get_buffer("my_buff")
except Exception:
self.fail("Did not find my_buff")
self.assertEqual(orig_buff, new_buff)
try:
new_param = mod_traced_new.get_parameter("my_param")
except Exception:
self.fail("Did not find my_param")
self.assertEqual(orig_param, new_param)
x = torch.rand(3, 4)
orig_out = mod_traced(x)
submodules_out = mod_traced_new(x)
self.assertEqual(orig_out, submodules_out)
def test_graph_module_init_buffer_param_copied_dict_init(self):
self._test_graph_module_init_buffer_param_copied(use_dict_init=True)
def test_graph_module_init_buffer_param_copied_mod_init(self):
self._test_graph_module_init_buffer_param_copied(use_dict_init=False)
def test_annotations_with_no_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: torch.Tensor, a: A) -> torch.Tensor:
return a(x)
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
def test_annotations_with_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: 'torch.Tensor', a: 'A') -> 'torch.Tensor':
return a(x)
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
def test_annotations_with_non_torch_reference_and_no_internal_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: List[torch.Tensor], a: A) -> torch.Tensor:
return a(x[0])
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
def test_annotations_with_non_torch_reference_and_internal_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: List['torch.Tensor'], a: A) -> 'torch.Tensor':
return a(x)[0]
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
@unittest.skipIf(sys.version_info < (3, 7), "`__future__` feature "
"`annotations` is not defined in Python <3.7")
def test_annotation_with_future(self):
try:
import fx.test_future # noqa: F401
finally:
del sys.modules["__future__"]
def test_annotations_empty_tuple(self):
class Foo(torch.nn.Module):
def forward(self, x: Tuple[()], y: Tuple[str, Tuple[()]]):
return "foo"
traced = torch.fx.symbolic_trace(Foo())
x = ()
y = ("bar", ())
traced(x, y)
FileCheck().check("_Tuple[()]") \
.check("typing_Tuple[str,typing_Tuple[()]]") \
.run(traced.code)
scripted = torch.jit.script(traced)
scripted(x, y)
FileCheck().check("Tuple[()]") \
.check("Tuple[str, Tuple[()]]") \
.run(scripted.code)
@unittest.skipIf(IS_WINDOWS, "Python Windows bug? https://bugs.python.org/issue45108")
def test_assert(self):
def f(x):
assert x > 1
return x + 1
try:
torch.fx.proxy.TracerBase.trace_asserts = True
traced = symbolic_trace(f)
finally:
torch.fx.proxy.TracerBase.trace_asserts = False
self.assertEqual(f(2), traced(2))
with self.assertRaises(AssertionError):
traced(0)
def test_pytree(self):
def f_sum(x):
return sum(x)
def f_sum_dict(x):
out = 0
for k, v in x.items():
out += v
return out
def f_dict_list_map(x):
new_dict = {}
for k, v in x.items():
new_dict[k] = [i + 1 for i in v]
return new_dict
def f_dict_add(x):
return x['a'] + sum(x['z'])
def f_namedtuple_add(x):
return x.x + x.y
pytree._register_pytree_node(
Foo,
lambda x: ([x.a, x.b], None),
lambda x, _: Foo(x[0], x[1]),
)
fx_pytree.register_pytree_flatten_spec(Foo, lambda x, _: [x.a, x.b])
def f_custom(x):
return x.a + x.b
def f_custom_dict(x):
return f_sum_dict(x.a) + x.b
def f_return_custom(x):
return Foo(x.b, x.a)
tests = [
(f_sum, [PH, PH, PH]),
(f_sum, []),
(f_sum_dict, {'a': PH, 'b': PH, 'c': PH}),
(f_dict_list_map, {'a': (PH, PH), 'b': [PH], 'c': []}),
(f_dict_list_map, {5: (PH, PH, PH)}),
(f_dict_add, {'a': PH, 'z': (PH, PH, PH)}),
(f_dict_add, {'a': PH, 'z': []}),
(f_custom, Foo(PH, PH)),
(f_custom, Foo(PH, 3)),
(f_custom_dict, Foo({'a': PH, 'b': PH}, PH)),
# (f_return_custom, Foo(PH, PH)), # Don't currently support output pytrees
(f_namedtuple_add, Point(PH, PH)),
]
def verify_pytree(f, inp):
val = pytree.tree_map(lambda x: torch.randn(3) if x == PH else x, inp)
num_flat_args = len([i == PH for i in pytree.tree_flatten(inp)[0]])
orig_out = f(val)
nf = symbolic_trace(f, concrete_args={'x': inp})
self.assertEqual(nf(val), orig_out)
bare_fx = GraphModule({}, copy.deepcopy(nf.graph))
bare_fx.graph.set_codegen(CodeGen())
bare_fx.recompile()
self.assertEqual(nf.graph.process_outputs(bare_fx(*nf.graph.process_inputs(val))), orig_out)
assert num_flat_args == 0 or "tree_flatten_spec" in nf.code
assert(sum([i.op == 'placeholder' for i in nf.graph.nodes]) == num_flat_args)
nf = symbolic_trace(nf)
self.assertEqual(nf(val), orig_out)
assert "tree_flatten_spec" not in nf.code
assert(sum([i.op == 'placeholder' for i in nf.graph.nodes]) == 1)
nf = symbolic_trace(nf, concrete_args={'x': inp})
self.assertEqual(nf(val), orig_out)
assert num_flat_args == 0 or "tree_flatten_spec" in nf.code
assert(sum([i.op == 'placeholder' for i in nf.graph.nodes]) == num_flat_args)
pickled = pickle.dumps(nf)
nf = pickle.loads(pickled)
self.assertEqual(nf(val), orig_out)
for f, inp in tests:
verify_pytree(f, inp)
def test_pytree_concrete(self):
def f(b, a):
if b:
return a['a']
else:
return a['z']
inp = {'a': {'a': PH, 'z': PH}, 'b': True}
nf = symbolic_trace(f, concrete_args=inp)
val = pytree.tree_map(lambda x: torch.randn(3) if x == PH else x, inp)
self.assertEqual(nf(**val), f(**val))
nf = symbolic_trace(nf)
self.assertEqual(nf(**val), f(**val))
def test_custom_codegen(self):
class ListCodeGen(CodeGen):
def gen_fn_def(self, free_vars, maybe_return_annotation):
lst_unpack = f"""
def forward(self, args_list: List[torch.Tensor]){maybe_return_annotation}:
{', '.join(free_vars)} = args_list"""
return lst_unpack
def additional_globals(self):
return [('List', typing.List)]
def process_inputs(self, *inputs):
assert(len(inputs) == 1)
return inputs[0]
def f(a, b):
return a + b
nf = symbolic_trace(f)
vals = [torch.randn(3), torch.randn(3)]
self.assertEqual(nf(*vals), f(*vals))
nf.graph.set_codegen(ListCodeGen())
nf.recompile()
bare_fx = GraphModule({}, copy.deepcopy(nf.graph))
bare_fx.graph.set_codegen(CodeGen())
bare_fx.recompile()
self.assertEqual(nf(vals), f(*vals))
self.assertEqual(nf.graph.process_outputs(bare_fx(*nf.graph.process_inputs(vals))), f(*vals))
ts_f = torch.jit.script(nf)
self.assertEqual(nf(vals), ts_f(vals))
def test_imul_code_print(self):
graph = torch.fx.Graph()
a = graph.placeholder("a")
b = graph.placeholder("b")
graph.call_function(operator.imul, (a, b), {})
graph.output(a)
gm = torch.fx.GraphModule({}, graph)
gm.recompile()
self.assertEqual(gm(2, 3), 6)
self.assertIn("a *= b", gm.code)
def run_getitem_target():
from torch.fx._symbolic_trace import _wrapped_methods_to_patch
_wrapped_methods_to_patch.append((torch.Tensor, "__getitem__"))
try:
TestFX().getitem_inner()
finally:
_wrapped_methods_to_patch.pop()
class TestOperatorSignatures(JitTestCase):
def setUp(self):
# Checking for mutable operations whil tracing is feature flagged
# Enable it in testing but not by default
self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations
torch.fx.proxy.TracerBase.check_mutable_operations = True
def tearDown(self):
torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag
@onlyCPU
@ops(op_db, allowed_dtypes=(torch.float,))
def test_get_torch_func_signature_exhaustive(self, device, dtype, op):
if not isinstance(op.op, types.BuiltinFunctionType):
raise unittest.SkipTest("This path doesn't work on Python functions")
sample_inputs_itr = op.sample_inputs(device, dtype, requires_grad=False)
schemas = get_signature_for_torch_op(op.op)
if not schemas:
raise RuntimeError('No Schemas Returned')
for sample_input in sample_inputs_itr:
# Iterate through overloads until we hit a match. If we exit this
# loop via `else`, we haven't found a match
for schema in schemas:
try:
bound_args = schema.bind(sample_input.input, *sample_input.args, **sample_input.kwargs)
bound_args.apply_defaults()
op(*bound_args.args, **bound_args.kwargs)
break
except TypeError as e:
pass
else:
raise RuntimeError(f'Did not match any schemas for op {op.name}!')
class TestFXAPIBackwardCompatibility(JitTestCase):
def setUp(self):
self.maxDiff = None
# Checking for mutable operations whil tracing is feature flagged
# Enable it in testing but not by default
self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations
torch.fx.proxy.TracerBase.check_mutable_operations = True
def tearDown(self):
torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag
def _fn_to_stable_annotation_str(self, obj):
"""
Unfortunately we have to serialize function signatures manually since
serialization for `inspect.Signature` objects is not stable across
python versions
"""
fn_name = torch.typename(obj)
signature = inspect.signature(obj)
sig_str = f'{fn_name}{signature}'
arg_strs = []
for k, v in signature.parameters.items():
maybe_type_annotation = f': {self._annotation_type_to_stable_str(v.annotation, sig_str)}'\
if v.annotation is not inspect.Signature.empty else ''
def default_val_str(val):
if isinstance(val, (tuple, list)):
str_pieces = ['(' if isinstance(val, tuple) else '[']
str_pieces.append(', '.join(default_val_str(v) for v in val))
if isinstance(val, tuple) and len(str_pieces) == 2:
str_pieces.append(',')
str_pieces.append(')' if isinstance(val, tuple) else ']')
return ''.join(str_pieces)
# Need to fix up some default value strings.
# First case: modules. Default module `repr` contains the FS path of the module.
# Don't leak that
if isinstance(val, types.ModuleType):
return f'<module {val.__name__}>'
# Second case: callables. Callables (such as lambdas) encode their address in
# their string repr. Don't do that
if callable(val):
return f'<function {val.__name__}>'
return str(val)
if v.default is not inspect.Signature.empty:
default_val_str = default_val_str(v.default) if not isinstance(v.default, str) else f"'{v.default}'"
maybe_default = f' = {default_val_str}'
else:
maybe_default = ''
maybe_stars = ''
if v.kind == inspect.Parameter.VAR_POSITIONAL:
maybe_stars = '*'
elif v.kind == inspect.Parameter.VAR_KEYWORD:
maybe_stars = '**'
arg_strs.append(f'{maybe_stars}{k}{maybe_type_annotation}{maybe_default}')
return_annot = f' -> {self._annotation_type_to_stable_str(signature.return_annotation, sig_str)}'\
if signature.return_annotation is not inspect.Signature.empty else ''
return f'{fn_name}({", ".join(arg_strs)}){return_annot}'
def _annotation_type_to_stable_str(self, t, sig_str):
if t is inspect.Signature.empty:
return ''
# Forward ref
if isinstance(t, str):
return f"'{t}'"
if hasattr(typing, 'ForwardRef') and isinstance(t, typing.ForwardRef):
return t.__forward_arg__
if hasattr(typing, '_ForwardRef') and isinstance(t, typing._ForwardRef):
return t.__forward_arg__
trivial_mappings = {
str : 'str',
int : 'int',
float: 'float',
bool: 'bool',
torch.dtype: 'torch.dtype',
torch.Tensor: 'torch.Tensor',
torch.device: 'torch.device',
torch.memory_format: 'torch.memory_format',
slice: 'slice',
torch.nn.Module: 'torch.nn.modules.module.Module',
torch.fx.Graph : 'torch.fx.graph.Graph',
torch.fx.Node : 'torch.fx.node.Node',
torch.fx.Proxy : 'torch.fx.proxy.Proxy',
torch.fx.node.Target : 'torch.fx.node.Target',
torch.fx.node.Argument : 'torch.fx.node.Argument',
torch.fx.graph.PythonCode : 'torch.fx.graph.PythonCode',
torch.fx.graph_module.GraphModule: 'torch.fx.graph_module.GraphModule',
torch.fx.subgraph_rewriter.Match: 'torch.fx.subgraph_rewriter.Match',
Ellipsis : '...',
typing.Any: 'Any',
type(None): 'NoneType',
None: 'None',
typing.Iterator: 'Iterator',
}
mapping = trivial_mappings.get(t, None)
if mapping:
return mapping
# Handle types with contained types
contained = getattr(t, '__args__', None) or []
# Callables contain a bare List for arguments
contained = t if isinstance(t, list) else contained
# Python 3.8 puts type vars into __args__ for unbound types such as Dict
if all(isinstance(ct, typing.TypeVar) for ct in contained):
contained = []
contained_type_annots = [self._annotation_type_to_stable_str(ct, sig_str) for ct in contained]
contained_type_str = f'[{", ".join(contained_type_annots)}]' if len(contained_type_annots) > 0 else ''
origin = getattr(t, '__origin__', None)
if origin is None:
# Unbound types don't have `__origin__` in some Python versions, so fix that up here.
origin = t if t in {typing.Tuple, typing.Union, typing.Dict, typing.List, typing.Type, typing.Callable} else origin
if origin in {tuple, typing.Tuple}:
return f'Tuple{contained_type_str}'
if origin in {typing.Union}:
# Annoying hack to detect Optional
if len(contained) == 2 and (contained[0] is type(None)) ^ (contained[1] is type(None)):
not_none_param = contained[0] if contained[0] is not type(None) else contained[1]
return f'Optional[{self._annotation_type_to_stable_str(not_none_param, sig_str)}]'
return f'Union{contained_type_str}'
if origin in {dict, typing.Dict}:
return f'Dict{contained_type_str}'
if origin in {list, typing.List}:
return f'List{contained_type_str}'
if origin in {type, typing.Type}:
return f'Type{contained_type_str}'
if isinstance(t, typing.Callable):
if len(contained) > 0 and contained[0] is not Ellipsis:
return f'Callable[[{", ".join(contained_type_annots[:-1])}], {contained_type_annots[-1]}]'
else:
return f'Callable{contained_type_str}'
raise RuntimeError(f'Unrecognized type {t} used in BC-compatible type signature {sig_str}.'
f'Please add support for this type and confirm with the '
f'FX team that your signature change is valid.')
def test_function_back_compat(self):
"""
Test backward compatibility for function signatures with
@compatibility(is_backward_compatible=True). Currently this checks for
exact signature matches, which may lead to false positives. If this
becomes too annoying, we can refine this check to actually parse out
the saved schema strings and check if the change is truly backward-
incompatible.
"""
signature_strs = []
for obj in _BACK_COMPAT_OBJECTS:
if not isinstance(obj, type):
signature_strs.append(self._fn_to_stable_annotation_str(obj))
signature_strs.sort()
try:
self.assertExpected('\n'.join(signature_strs), 'fx_backcompat_function_signatures')
except AssertionError as e:
msg = f"{e}\n****** ERROR ******\nAn FX function that has been marked " \
f"as backwards-compatible has experienced a signature change. See the " \
f"above exception context for more information. If this change was " \
f"unintended, please revert it. If it was intended, check with the FX " \
f"team to ensure that the proper deprecation protocols have been followed " \
f"and subsequently --accept the change."
raise AssertionError(msg)
def test_class_member_back_compat(self):
"""
Test backward compatibility for members of classes with
@compatibility(is_backward_compatible=True). Currently this checks for
exact matches on the publicly visible members of the class.
"""
class_method_strs = []
for obj in _BACK_COMPAT_OBJECTS:
if isinstance(obj, type):
public_members = [name for name in obj.__dict__ if not name.startswith('_')]
class_method_strs.append(f'{torch.typename(obj)} {sorted(public_members)}')
class_method_strs.sort()
try:
self.assertExpected('\n'.join(class_method_strs), 'fx_backcompat_class_members')
except AssertionError as e:
msg = f"{e}\n****** ERROR ******\nAn FX class that has been marked " \
f"as backwards-compatible has experienced change in its public members. See the " \
f"above exception context for more information. If this change was " \
f"unintended, please revert it. If it was intended, check with the FX " \
f"team to ensure that the proper deprecation protocols have been followed " \
f"and subsequently --accept the change."
raise AssertionError(msg)
def test_public_api_surface(self):
non_back_compat_objects = {}
def check_symbols_have_bc_designation(m, prefix):
if not m.__name__.startswith('torch.fx'):
return
if m.__name__.startswith('torch.fx.experimental'):
return
for k, v in m.__dict__.items():
if v is m:
continue
if k.startswith('_'):
continue
if isinstance(v, types.ModuleType):
check_symbols_have_bc_designation(v, prefix + [k])
elif isinstance(v, type) or isinstance(v, types.FunctionType):
if v not in _MARKED_WITH_COMATIBLITY:
non_back_compat_objects.setdefault(v)
check_symbols_have_bc_designation(torch.fx, ['torch', 'fx'])
check_symbols_have_bc_designation(torch.fx.passes, ['torch', 'fx', 'passes'])
non_back_compat_strs = [torch.typename(obj) for obj in non_back_compat_objects.keys()]
# Only want objects in torch.fx
non_back_compat_strs = [
s for s in non_back_compat_strs if s.startswith('torch.fx') and not s.startswith('torch.fx.experimental')]
# Only want objects in public namespaces
non_back_compat_strs = [
s for s in non_back_compat_strs if all(not atom.startswith('_') for atom in s.split('.'))]
non_back_compat_strs.sort()
if len(non_back_compat_strs) != 0:
raise AssertionError(f"Public FX API(s) {non_back_compat_strs} introduced but not given a "
f"backwards-compatibility classification! Please decorate these "
f"API(s) with `@torch.fx._compatibility.compatibility` to specify "
f"BC guarantees.")
class TestFunctionalTracing(JitTestCase):
def setUp(self):
# Checking for mutable operations whil tracing is feature flagged
# Enable it in testing but not by default
self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations
torch.fx.proxy.TracerBase.check_mutable_operations = True
def tearDown(self):
torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag
IGNORE_FUNCS = ("has_torch_function", "has_torch_function_unary",
"has_torch_function_variadic", "handle_torch_function",
"boolean_dispatch")
TO_PATCH = {"has_torch_function": None,
"has_torch_function_unary": None,
"has_torch_function_variadic": None}
BUILT_IN_FUNC = (AssertionError, "")
PROXY_ITERABLE = (TypeError, r"argument of type 'Proxy' is not iterable")
PROXY_ITERATED = (TraceError, r"Proxy object cannot be iterated")
LEN_ERROR = (RuntimeError, r"'len' is not supported in symbolic tracing by default")
ARG_TYPE_MISMATCH = (TypeError, r", not Proxy$")
CONTROL_FLOW = (TraceError, r"symbolically traced variables cannot be used as inputs to control flow")
INTERPOLATE_ARGS_CONFLICT = (ValueError, r"only one of size or scale_factor should be defined")
MUTABLE = (RuntimeError, r"Tried to trace mutable operation")
UNTRACEABLE_FUNCTIONALS = {
"adaptive_avg_pool1d": BUILT_IN_FUNC,
"avg_pool1d": BUILT_IN_FUNC,
"avg_pool2d": BUILT_IN_FUNC,
"avg_pool3d": BUILT_IN_FUNC,
"bilinear": BUILT_IN_FUNC,
"celu_": BUILT_IN_FUNC,
"channel_shuffle": BUILT_IN_FUNC,
"native_channel_shuffle": BUILT_IN_FUNC,
"conv1d": BUILT_IN_FUNC,
"conv2d": BUILT_IN_FUNC,
"conv3d": BUILT_IN_FUNC,
"conv_tbc": BUILT_IN_FUNC,
"conv_transpose1d": BUILT_IN_FUNC,
"conv_transpose2d": BUILT_IN_FUNC,
"conv_transpose3d": BUILT_IN_FUNC,
"cosine_similarity": BUILT_IN_FUNC,
"elu_": BUILT_IN_FUNC,
"gelu": BUILT_IN_FUNC,
"hardshrink": BUILT_IN_FUNC,
"hardtanh_": BUILT_IN_FUNC,
"leaky_relu_": BUILT_IN_FUNC,
"linear": BUILT_IN_FUNC,
"logsigmoid": BUILT_IN_FUNC,
"one_hot": BUILT_IN_FUNC,
"pairwise_distance": BUILT_IN_FUNC,
"pdist": BUILT_IN_FUNC,
"pixel_shuffle": BUILT_IN_FUNC,
"pixel_unshuffle": BUILT_IN_FUNC,
"prelu": BUILT_IN_FUNC,
"relu_": BUILT_IN_FUNC,
"rrelu_": BUILT_IN_FUNC,
"selu_": BUILT_IN_FUNC,
"softplus": BUILT_IN_FUNC,
"softshrink": BUILT_IN_FUNC,
"threshold_": BUILT_IN_FUNC,
"adaptive_avg_pool2d": LEN_ERROR,
"adaptive_avg_pool3d": LEN_ERROR,
"adaptive_max_pool2d_with_indices": LEN_ERROR,
"adaptive_max_pool3d_with_indices": LEN_ERROR,
"instance_norm": CONTROL_FLOW,
"pad": LEN_ERROR,
"adaptive_max_pool1d": PROXY_ITERABLE,
"adaptive_max_pool2d": PROXY_ITERABLE,
"adaptive_max_pool3d": PROXY_ITERABLE,
"fractional_max_pool2d": PROXY_ITERABLE,
"fractional_max_pool3d": PROXY_ITERABLE,
"max_pool1d": PROXY_ITERABLE,
"max_pool2d": PROXY_ITERABLE,
"max_pool3d": PROXY_ITERABLE,
"group_norm": PROXY_ITERATED,
"lp_pool2d": PROXY_ITERATED,
"max_unpool1d": PROXY_ITERATED,
"max_unpool2d": PROXY_ITERATED,
"max_unpool3d": PROXY_ITERATED,
"adaptive_max_pool1d_with_indices": ARG_TYPE_MISMATCH,
"fractional_max_pool2d_with_indices": ARG_TYPE_MISMATCH,
"fractional_max_pool3d_with_indices": ARG_TYPE_MISMATCH,
"layer_norm": ARG_TYPE_MISMATCH,
"lp_pool1d": ARG_TYPE_MISMATCH,
"affine_grid": CONTROL_FLOW,
"alpha_dropout": CONTROL_FLOW,
"batch_norm": CONTROL_FLOW,
"binary_cross_entropy": CONTROL_FLOW,
"binary_cross_entropy_with_logits": CONTROL_FLOW,
"celu": CONTROL_FLOW,
"cosine_embedding_loss": CONTROL_FLOW,
"cross_entropy": CONTROL_FLOW,
"ctc_loss": CONTROL_FLOW,
"dropout": CONTROL_FLOW,
"dropout2d": CONTROL_FLOW,
"dropout3d": CONTROL_FLOW,
"elu": CONTROL_FLOW,
"embedding": CONTROL_FLOW,
"embedding_bag": CONTROL_FLOW,
"feature_alpha_dropout": CONTROL_FLOW,
"fold": CONTROL_FLOW,
"gaussian_nll_loss": CONTROL_FLOW,
"glu": CONTROL_FLOW,
"grid_sample": CONTROL_FLOW,
"gumbel_softmax": CONTROL_FLOW,
"hardsigmoid": CONTROL_FLOW,
"hardswish": CONTROL_FLOW,
"hardtanh": CONTROL_FLOW,
"hinge_embedding_loss": CONTROL_FLOW,
"huber_loss": CONTROL_FLOW,
"interpolate": CONTROL_FLOW,
"kl_div": CONTROL_FLOW,
"l1_loss": CONTROL_FLOW,
"leaky_relu": CONTROL_FLOW,
"local_response_norm": CONTROL_FLOW,
"margin_ranking_loss": CONTROL_FLOW,
"max_pool1d_with_indices": CONTROL_FLOW,
"max_pool2d_with_indices": CONTROL_FLOW,
"max_pool3d_with_indices": CONTROL_FLOW,
"mse_loss": CONTROL_FLOW,
"multi_head_attention_forward": CONTROL_FLOW,
"multi_margin_loss": CONTROL_FLOW,
"multilabel_margin_loss": CONTROL_FLOW,
"multilabel_soft_margin_loss": CONTROL_FLOW,
"nll_loss": CONTROL_FLOW,
"poisson_nll_loss": CONTROL_FLOW,
"relu": CONTROL_FLOW,
"relu6": CONTROL_FLOW,
"rrelu": CONTROL_FLOW,
"selu": CONTROL_FLOW,
"silu": CONTROL_FLOW,
"mish": CONTROL_FLOW,
"smooth_l1_loss": CONTROL_FLOW,
"soft_margin_loss": CONTROL_FLOW,
"threshold": CONTROL_FLOW,
"triplet_margin_loss": CONTROL_FLOW,
"triplet_margin_with_distance_loss": CONTROL_FLOW,
"unfold": CONTROL_FLOW,
"upsample": CONTROL_FLOW,
"upsample_bilinear": INTERPOLATE_ARGS_CONFLICT,
"upsample_nearest": INTERPOLATE_ARGS_CONFLICT,
"normalize" : MUTABLE,
}
# List of nn.functionals with Tensor inputs but not with type annotation
FUNCTIONALS_WITHOUT_ANNOTATION = (
"adaptive_max_pool1d",
"adaptive_max_pool2d",
"adaptive_max_pool3d",
"fractional_max_pool2d",
"fractional_max_pool3d",
"max_pool1d",
"max_pool2d",
"max_pool3d",
"gaussian_nll_loss",
"upsample",
"upsample_bilinear",
"upsample_nearest",
)
# Inconsistent behavior between Python 3.8 and other Python versions:
# - Python 3.8+: Re-raise internal exception like `PROXY_ITERATED`
# - Other Python: Raise `argument of type 'Proxy' is not iterable` due to the same
# internal exception above
# Use the following map to override the expected exception for Python 3.8
UNTRACEABLE_FUNCTIONALS_PY38 = {
"adaptive_max_pool1d": PROXY_ITERATED,
"adaptive_max_pool2d": PROXY_ITERATED,
"adaptive_max_pool3d": PROXY_ITERATED,
"fractional_max_pool2d": PROXY_ITERATED,
"fractional_max_pool3d": PROXY_ITERATED,
"max_pool1d": PROXY_ITERATED,
"max_pool2d": PROXY_ITERATED,
"max_pool3d": PROXY_ITERATED,
"group_norm": LEN_ERROR
}
@classmethod
def _get_functional(cls):
functional_list = []
for f in dir(torch.nn.functional):
if not f.islower():
continue
# Ignore internal functions
if f.startswith('_'):
continue
# Ignore supporting functions
if f in cls.IGNORE_FUNCS:
continue
fn = getattr(torch.nn.functional, f)
# Ignore non-callable object like modules
if not isinstance(fn, Callable):
continue
if f not in cls.FUNCTIONALS_WITHOUT_ANNOTATION:
try:
sig = inspect.signature(fn)
has_tensor_arg = False
for arg, param in sig.parameters.items():
if isinstance(param.annotation, type) and issubclass(param.annotation, torch.Tensor):
has_tensor_arg = True
if not has_tensor_arg:
continue
# No signature or Object is not supported
except ValueError:
pass
functional_list.append((f, fn))
return functional_list
@classmethod
def generate_test_func(cls, func_name, fn):
def functional_test(self):
if func_name in self.UNTRACEABLE_FUNCTIONALS_PY38 and \
sys.version_info >= (3, 8) and sys.version_info < (3, 10):
exc, err = self.UNTRACEABLE_FUNCTIONALS_PY38[func_name]
with self.assertRaisesRegex(exc, err):
symbolic_trace(fn)
elif func_name in self.UNTRACEABLE_FUNCTIONALS:
exc, err = self.UNTRACEABLE_FUNCTIONALS[func_name]
with self.assertRaisesRegex(exc, err):
symbolic_trace(fn)
else:
symbolic_trace(fn)
return functional_test
@classmethod
def generate_tests(cls):
functional_list = cls._get_functional()
for func_name, fn in functional_list:
test_name = "test_nn_functional_" + func_name
functional_test = cls.generate_test_func(func_name, fn)
setattr(cls, test_name, functional_test)
@classmethod
def setUpClass(cls):
def no(*args, **kwargs):
return False
for name in cls.TO_PATCH.keys():
cls.TO_PATCH[name] = getattr(torch.nn.functional, name)
setattr(torch.nn.functional, name, no)
@classmethod
def tearDownClass(cls):
for name in cls.TO_PATCH.keys():
setattr(torch.nn.functional, name, cls.TO_PATCH[name])
TestFunctionalTracing.generate_tests()
instantiate_device_type_tests(TestOperatorSignatures, globals())
@skipIfNoTorchVision
class TestVisionTracing(JitTestCase):
def setUp(self):
# Checking for mutable operations whil tracing is feature flagged
# Enable it in testing but not by default
self.orig_tracer_mutable_flag = torch.fx.proxy.TracerBase.check_mutable_operations
torch.fx.proxy.TracerBase.check_mutable_operations = True
def tearDown(self):
torch.fx.proxy.TracerBase.check_mutable_operations = self.orig_tracer_mutable_flag
PROXY_ITERATED = (TraceError, r"Proxy object cannot be iterated")
INCONSISTENT_TYPE = (
RuntimeError,
r"Return value was annotated as having type __torch__.torchvision.models[.\w]+ but is actually of type Tensor"
)
UNTRACEABLE_MODELS = {
"fasterrcnn_resnet50_fpn": PROXY_ITERATED,
"fasterrcnn_mobilenet_v3_large_320_fpn": PROXY_ITERATED,
"fasterrcnn_mobilenet_v3_large_fpn": PROXY_ITERATED,
"maskrcnn_resnet50_fpn": PROXY_ITERATED,
"keypointrcnn_resnet50_fpn": PROXY_ITERATED,
"retinanet_resnet50_fpn": PROXY_ITERATED,
}
UNSCRIPTABLE_MODELS = {
"googlenet": INCONSISTENT_TYPE,
"inception_v3": INCONSISTENT_TYPE,
}
output_transform = {
"fcn_resnet50": lambda x: x["out"],
"fcn_resnet101": lambda x: x["out"],
"deeplabv3_resnet50": lambda x: x["out"],
"deeplabv3_resnet101": lambda x: x["out"],
"deeplabv3_mobilenet_v3_large": lambda x: x["out"],
"lraspp_mobilenet_v3_large": lambda x: x["out"],
"fasterrcnn_resnet50_fpn": lambda x: x[1],
"fasterrcnn_mobilenet_v3_large_fpn": lambda x: x[1],
"fasterrcnn_mobilenet_v3_large_320_fpn": lambda x: x[1],
"maskrcnn_resnet50_fpn": lambda x: x[1],
"keypointrcnn_resnet50_fpn": lambda x: x[1],
"retinanet_resnet50_fpn": lambda x: x[1],
}
@classmethod
def generate_test_fn(cls, name, model_fn, x, kwargs):
def run_test(self):
model = model_fn(**kwargs)
model = model.eval()
if name in self.UNTRACEABLE_MODELS:
err, exc = self.UNTRACEABLE_MODELS[name]
with self.assertRaisesRegex(err, exc):
graph = symbolic_trace(model)
else:
out_transform = self.output_transform.get(name, lambda x: x)
graph : torch.fx.GraphModule = symbolic_trace(model)
a = out_transform(model(x))
b = out_transform(graph(x))
self.assertEqual(a, b)
if name in self.UNSCRIPTABLE_MODELS:
err, exc = self.UNSCRIPTABLE_MODELS[name]
with self.assertRaisesRegex(err, exc):
script = torch.jit.script(graph)
else:
script = torch.jit.script(graph)
c = out_transform(script(x))
self.assertEqual(a, c)
return run_test
@classmethod
def generate_classification_tests(cls):
for k, v in torchvision_models.__dict__.items():
if callable(v) and k[0].lower() == k[0] and k[0] != "_":
test_name = 'test_torchvision_models_' + k
x = torch.rand(1, 3, 299, 299) if k in ['inception_v3'] else torch.rand(1, 3, 224, 224)
kwargs = dict(num_classes=50)
model_test = cls.generate_test_fn(k, v, x, kwargs)
setattr(cls, test_name, model_test)
@classmethod
def generate_segmentation_tests(cls):
for k, v in torchvision_models.segmentation.__dict__.items():
if callable(v) and k[0].lower() == k[0] and k[0] != "_":
test_name = 'test_torchvision_models_segmentation_' + k
x = torch.rand(1, 3, 32, 32)
kwargs = dict(num_classes=10, pretrained_backbone=False)
model_test = cls.generate_test_fn(k, v, x, kwargs)
setattr(cls, test_name, model_test)
@classmethod
def generate_detection_tests(cls):
for k, v in torchvision_models.detection.__dict__.items():
if callable(v) and k[0].lower() == k[0] and k[0] != "_":
test_name = 'test_torchvision_models_detection_' + k
x = [torch.rand(3, 300, 300)]
kwargs = dict(num_classes=10, pretrained_backbone=False)
model_test = cls.generate_test_fn(k, v, x, kwargs)
setattr(cls, test_name, model_test)
@classmethod
def generate_video_tests(cls):
for k, v in torchvision_models.video.__dict__.items():
if callable(v) and k[0].lower() == k[0] and k[0] != "_":
test_name = 'test_torchvision_models_video_' + k
x = torch.rand(1, 3, 4, 112, 112)
kwargs = dict(num_classes=50)
model_test = cls.generate_test_fn(k, v, x, kwargs)
setattr(cls, test_name, model_test)
@classmethod
def generate_tests(cls):
cls.generate_classification_tests()
cls.generate_detection_tests()
cls.generate_segmentation_tests()
cls.generate_video_tests()
if HAS_TORCHVISION:
TestVisionTracing.generate_tests()
if __name__ == '__main__':
run_tests()
| [] | [] | [
"FX_PATCH_GETITEM"
] | [] | ["FX_PATCH_GETITEM"] | python | 1 | 0 | |
cluster-operator/src/main/java/io/strimzi/operator/cluster/model/ModelUtils.java | /*
* Copyright Strimzi authors.
* License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
*/
package io.strimzi.operator.cluster.model;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.ObjectMapper;
import io.fabric8.kubernetes.api.model.Affinity;
import io.fabric8.kubernetes.api.model.AffinityBuilder;
import io.fabric8.kubernetes.api.model.Container;
import io.fabric8.kubernetes.api.model.EnvVar;
import io.fabric8.kubernetes.api.model.HasMetadata;
import io.fabric8.kubernetes.api.model.LabelSelector;
import io.fabric8.kubernetes.api.model.NodeSelectorRequirement;
import io.fabric8.kubernetes.api.model.NodeSelectorRequirementBuilder;
import io.fabric8.kubernetes.api.model.NodeSelectorTerm;
import io.fabric8.kubernetes.api.model.NodeSelectorTermBuilder;
import io.fabric8.kubernetes.api.model.OwnerReference;
import io.fabric8.kubernetes.api.model.OwnerReferenceBuilder;
import io.fabric8.kubernetes.api.model.Quantity;
import io.fabric8.kubernetes.api.model.ResourceRequirements;
import io.fabric8.kubernetes.api.model.Secret;
import io.fabric8.kubernetes.api.model.SecretBuilder;
import io.fabric8.kubernetes.api.model.Toleration;
import io.fabric8.kubernetes.api.model.apps.StatefulSet;
import io.fabric8.kubernetes.api.model.networking.v1.NetworkPolicyPeer;
import io.strimzi.api.kafka.model.CertificateAuthority;
import io.strimzi.api.kafka.model.HasConfigurableMetrics;
import io.strimzi.api.kafka.model.JvmOptions;
import io.strimzi.api.kafka.model.SystemProperty;
import io.strimzi.api.kafka.model.storage.JbodStorage;
import io.strimzi.api.kafka.model.storage.PersistentClaimStorage;
import io.strimzi.api.kafka.model.storage.Storage;
import io.strimzi.api.kafka.model.TlsSidecar;
import io.strimzi.api.kafka.model.TlsSidecarLogLevel;
import io.strimzi.api.kafka.model.template.DeploymentTemplate;
import io.strimzi.api.kafka.model.template.InternalServiceTemplate;
import io.strimzi.api.kafka.model.template.PodDisruptionBudgetTemplate;
import io.strimzi.api.kafka.model.template.PodTemplate;
import io.strimzi.certs.CertAndKey;
import io.strimzi.operator.cluster.KafkaUpgradeException;
import io.strimzi.operator.common.Annotations;
import io.strimzi.operator.common.Reconciliation;
import io.strimzi.operator.common.ReconciliationLogger;
import io.strimzi.operator.common.Util;
import io.strimzi.operator.common.model.Labels;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Base64;
import java.util.Collections;
import java.util.HashMap;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import static java.util.Collections.emptyMap;
/**
* ModelUtils is a utility class that holds generic static helper functions
* These are generally to be used within the classes that extend the AbstractModel class
*/
public class ModelUtils {
private ModelUtils() {}
protected static final ReconciliationLogger LOGGER = ReconciliationLogger.create(ModelUtils.class.getName());
public static final String TLS_SIDECAR_LOG_LEVEL = "TLS_SIDECAR_LOG_LEVEL";
/**
* @param certificateAuthority The CA configuration.
* @return The cert validity.
*/
public static int getCertificateValidity(CertificateAuthority certificateAuthority) {
int validity = CertificateAuthority.DEFAULT_CERTS_VALIDITY_DAYS;
if (certificateAuthority != null
&& certificateAuthority.getValidityDays() > 0) {
validity = certificateAuthority.getValidityDays();
}
return validity;
}
/**
* @param certificateAuthority The CA configuration.
* @return The renewal days.
*/
public static int getRenewalDays(CertificateAuthority certificateAuthority) {
int renewalDays = CertificateAuthority.DEFAULT_CERTS_RENEWAL_DAYS;
if (certificateAuthority != null
&& certificateAuthority.getRenewalDays() > 0) {
renewalDays = certificateAuthority.getRenewalDays();
}
return renewalDays;
}
/**
* Generate labels used by entity-operators to find the resources related to given cluster
*
* @param cluster Name of the cluster
* @return Map with label definition
*/
public static String defaultResourceLabels(String cluster) {
return String.format("%s=%s",
Labels.STRIMZI_CLUSTER_LABEL, cluster);
}
/**
* @param sts The StatefulSet
* @param containerName The name of the container whoes environment variables are to be retrieved
* @return The environment of the Kafka container in the sts.
*/
public static Map<String, String> getContainerEnv(StatefulSet sts, String containerName) {
for (Container container : sts.getSpec().getTemplate().getSpec().getContainers()) {
if (containerName.equals(container.getName())) {
LinkedHashMap<String, String> map = new LinkedHashMap<>(container.getEnv() == null ? 2 : container.getEnv().size());
if (container.getEnv() != null) {
for (EnvVar envVar : container.getEnv()) {
map.put(envVar.getName(), envVar.getValue());
}
}
return map;
}
}
throw new KafkaUpgradeException("Could not find '" + containerName + "' container in StatefulSet " + sts.getMetadata().getName());
}
static EnvVar tlsSidecarLogEnvVar(TlsSidecar tlsSidecar) {
return AbstractModel.buildEnvVar(TLS_SIDECAR_LOG_LEVEL,
(tlsSidecar != null && tlsSidecar.getLogLevel() != null ?
tlsSidecar.getLogLevel() : TlsSidecarLogLevel.NOTICE).toValue());
}
public static Secret buildSecret(Reconciliation reconciliation, ClusterCa clusterCa, Secret secret, String namespace, String secretName,
String commonName, String keyCertName, Labels labels, OwnerReference ownerReference, boolean isMaintenanceTimeWindowsSatisfied) {
Map<String, String> data = new HashMap<>(4);
CertAndKey certAndKey = null;
boolean shouldBeRegenerated = false;
List<String> reasons = new ArrayList<>(2);
if (secret == null) {
reasons.add("certificate doesn't exist yet");
shouldBeRegenerated = true;
} else {
if (clusterCa.keyCreated() || clusterCa.certRenewed() ||
(isMaintenanceTimeWindowsSatisfied && clusterCa.isExpiring(secret, keyCertName + ".crt")) ||
clusterCa.hasCaCertGenerationChanged(secret)) {
reasons.add("certificate needs to be renewed");
shouldBeRegenerated = true;
}
}
if (shouldBeRegenerated) {
LOGGER.debugCr(reconciliation, "Certificate for pod {} need to be regenerated because: {}", keyCertName, String.join(", ", reasons));
try {
certAndKey = clusterCa.generateSignedCert(commonName, Ca.IO_STRIMZI);
} catch (IOException e) {
LOGGER.warnCr(reconciliation, "Error while generating certificates", e);
}
LOGGER.debugCr(reconciliation, "End generating certificates");
} else {
if (secret.getData().get(keyCertName + ".p12") != null &&
!secret.getData().get(keyCertName + ".p12").isEmpty() &&
secret.getData().get(keyCertName + ".password") != null &&
!secret.getData().get(keyCertName + ".password").isEmpty()) {
certAndKey = new CertAndKey(
decodeFromSecret(secret, keyCertName + ".key"),
decodeFromSecret(secret, keyCertName + ".crt"),
null,
decodeFromSecret(secret, keyCertName + ".p12"),
new String(decodeFromSecret(secret, keyCertName + ".password"), StandardCharsets.US_ASCII)
);
} else {
try {
// coming from an older operator version, the secret exists but without keystore and password
certAndKey = clusterCa.addKeyAndCertToKeyStore(commonName,
decodeFromSecret(secret, keyCertName + ".key"),
decodeFromSecret(secret, keyCertName + ".crt"));
} catch (IOException e) {
LOGGER.errorCr(reconciliation, "Error generating the keystore for {}", keyCertName, e);
}
}
}
if (certAndKey != null) {
data.put(keyCertName + ".key", certAndKey.keyAsBase64String());
data.put(keyCertName + ".crt", certAndKey.certAsBase64String());
data.put(keyCertName + ".p12", certAndKey.keyStoreAsBase64String());
data.put(keyCertName + ".password", certAndKey.storePasswordAsBase64String());
}
return createSecret(secretName, namespace, labels, ownerReference, data,
Collections.singletonMap(clusterCa.caCertGenerationAnnotation(), String.valueOf(clusterCa.certGeneration())), emptyMap());
}
public static Secret createSecret(String name, String namespace, Labels labels, OwnerReference ownerReference,
Map<String, String> data, Map<String, String> customAnnotations, Map<String, String> customLabels) {
if (ownerReference == null) {
return new SecretBuilder()
.withNewMetadata()
.withName(name)
.withNamespace(namespace)
.withLabels(Util.mergeLabelsOrAnnotations(labels.toMap(), customLabels))
.withAnnotations(customAnnotations)
.endMetadata()
.withType("Opaque")
.withData(data)
.build();
} else {
return new SecretBuilder()
.withNewMetadata()
.withName(name)
.withOwnerReferences(ownerReference)
.withNamespace(namespace)
.withLabels(Util.mergeLabelsOrAnnotations(labels.toMap(), customLabels))
.withAnnotations(customAnnotations)
.endMetadata()
.withType("Opaque")
.withData(data)
.build();
}
}
/**
* Parses the values from the PodDisruptionBudgetTemplate in CRD model into the component model
*
* @param model AbstractModel class where the values from the PodDisruptionBudgetTemplate should be set
* @param pdb PodDisruptionBudgetTemplate with the values form the CRD
*/
public static void parsePodDisruptionBudgetTemplate(AbstractModel model, PodDisruptionBudgetTemplate pdb) {
if (pdb != null) {
if (pdb.getMetadata() != null) {
model.templatePodDisruptionBudgetLabels = pdb.getMetadata().getLabels();
model.templatePodDisruptionBudgetAnnotations = pdb.getMetadata().getAnnotations();
}
model.templatePodDisruptionBudgetMaxUnavailable = pdb.getMaxUnavailable();
}
}
/**
* Parses the values from the PodTemplate in CRD model into the component model
*
* @param model AbstractModel class where the values from the PodTemplate should be set
* @param pod PodTemplate with the values form the CRD
*/
public static void parsePodTemplate(AbstractModel model, PodTemplate pod) {
if (pod != null) {
if (pod.getMetadata() != null) {
model.templatePodLabels = pod.getMetadata().getLabels();
model.templatePodAnnotations = pod.getMetadata().getAnnotations();
}
if (pod.getAffinity() != null) {
model.setUserAffinity(pod.getAffinity());
}
if (pod.getTolerations() != null) {
model.setTolerations(removeEmptyValuesFromTolerations(pod.getTolerations()));
}
model.templateTerminationGracePeriodSeconds = pod.getTerminationGracePeriodSeconds();
model.templateImagePullSecrets = pod.getImagePullSecrets();
model.templateSecurityContext = pod.getSecurityContext();
model.templatePodPriorityClassName = pod.getPriorityClassName();
model.templatePodSchedulerName = pod.getSchedulerName();
model.templatePodHostAliases = pod.getHostAliases();
model.templatePodTopologySpreadConstraints = pod.getTopologySpreadConstraints();
model.templatePodEnableServiceLinks = pod.getEnableServiceLinks();
model.templateTmpDirSizeLimit = pod.getTmpDirSizeLimit();
}
}
/**
* Parses the values from the InternalServiceTemplate in CRD model into the component model
*
* @param model AbstractModel class where the values from the PodTemplate should be set
* @param service InternalServiceTemplate with the values form the CRD
*/
public static void parseInternalServiceTemplate(AbstractModel model, InternalServiceTemplate service) {
if (service != null) {
if (service.getMetadata() != null) {
model.templateServiceLabels = service.getMetadata().getLabels();
model.templateServiceAnnotations = service.getMetadata().getAnnotations();
}
model.templateServiceIpFamilyPolicy = service.getIpFamilyPolicy();
model.templateServiceIpFamilies = service.getIpFamilies();
}
}
/**
* Parses the values from the InternalServiceTemplate of a headless service in CRD model into the component model
*
* @param model AbstractModel class where the values from the PodTemplate should be set
* @param service InternalServiceTemplate with the values form the CRD
*/
public static void parseInternalHeadlessServiceTemplate(AbstractModel model, InternalServiceTemplate service) {
if (service != null) {
if (service.getMetadata() != null) {
model.templateHeadlessServiceLabels = service.getMetadata().getLabels();
model.templateHeadlessServiceAnnotations = service.getMetadata().getAnnotations();
}
model.templateHeadlessServiceIpFamilyPolicy = service.getIpFamilyPolicy();
model.templateHeadlessServiceIpFamilies = service.getIpFamilies();
}
}
/**
* Parses the values from the DeploymentTemplate in CRD model into the component model
*
* @param model AbstractModel class where the values from the DeploymentTemplate should be set
* @param template DeploymentTemplate with the values form the CRD
*/
public static void parseDeploymentTemplate(AbstractModel model, DeploymentTemplate template) {
if (template != null) {
if (template.getMetadata() != null) {
model.templateDeploymentLabels = template.getMetadata().getLabels();
model.templateDeploymentAnnotations = template.getMetadata().getAnnotations();
}
if (template.getDeploymentStrategy() != null) {
model.templateDeploymentStrategy = template.getDeploymentStrategy();
} else {
model.templateDeploymentStrategy = io.strimzi.api.kafka.model.template.DeploymentStrategy.ROLLING_UPDATE;
}
}
}
/**
* Returns whether the given {@code Storage} instance is a persistent claim one or
* a JBOD containing at least one persistent volume.
*
* @param storage the Storage instance to check
* @return Whether the give Storage contains any persistent storage.
*/
public static boolean containsPersistentStorage(Storage storage) {
boolean isPersistentClaimStorage = storage instanceof PersistentClaimStorage;
if (!isPersistentClaimStorage && storage instanceof JbodStorage) {
isPersistentClaimStorage |= ((JbodStorage) storage).getVolumes()
.stream().anyMatch(volume -> volume instanceof PersistentClaimStorage);
}
return isPersistentClaimStorage;
}
public static Storage decodeStorageFromJson(String json) {
try {
return new ObjectMapper().readValue(json, Storage.class);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
public static String encodeStorageToJson(Storage storage) {
try {
return new ObjectMapper().writeValueAsString(storage);
} catch (JsonProcessingException e) {
throw new RuntimeException(e);
}
}
/**
* Gets a map with custom labels or annotations from an environment variable
*
* @param envVarName Name of the environment variable which should be used as input
*
* @return A map with labels or annotations
*/
public static Map<String, String> getCustomLabelsOrAnnotations(String envVarName) {
return Util.parseMap(System.getenv().get(envVarName));
}
private static byte[] decodeFromSecret(Secret secret, String key) {
return Base64.getDecoder().decode(secret.getData().get(key));
}
/**
* Compares two Secrets with certificates and checks whether any value for a key which exists in both Secrets
* changed. This method is used to evaluate whether rolling update of existing brokers is needed when secrets with
* certificates change. It separates changes for existing certificates with other changes to the secret such as
* added or removed certificates (scale-up or scale-down).
*
* @param current Existing secret
* @param desired Desired secret
*
* @return True if there is a key which exists in the data sections of both secrets and which changed.
*/
public static boolean doExistingCertificatesDiffer(Secret current, Secret desired) {
Map<String, String> currentData = current.getData();
Map<String, String> desiredData = desired.getData();
if (currentData == null) {
return true;
} else {
for (Map.Entry<String, String> entry : currentData.entrySet()) {
String desiredValue = desiredData.get(entry.getKey());
if (entry.getValue() != null
&& desiredValue != null
&& !entry.getValue().equals(desiredValue)) {
return true;
}
}
}
return false;
}
public static <T> List<T> asListOrEmptyList(List<T> list) {
return Optional.ofNullable(list)
.orElse(Collections.emptyList());
}
private static String getJavaSystemPropertiesToString(List<SystemProperty> javaSystemProperties) {
if (javaSystemProperties == null) {
return null;
}
List<String> javaSystemPropertiesList = new ArrayList<>(javaSystemProperties.size());
for (SystemProperty property: javaSystemProperties) {
javaSystemPropertiesList.add("-D" + property.getName() + "=" + property.getValue());
}
return String.join(" ", javaSystemPropertiesList);
}
/**
* This method transforms a String into a List of Strings, where each entry is an uncommented line of input.
* The lines beginning with '#' (comments) are ignored.
* @param config ConfigMap data as a String
* @return List of String key=value
*/
public static List<String> getLinesWithoutCommentsAndEmptyLines(String config) {
List<String> validLines = new ArrayList<>();
if (config != null) {
List<String> allLines = Arrays.asList(config.split("\\r?\\n"));
for (String line : allLines) {
if (!line.isEmpty() && !line.matches("\\s*\\#.*")) {
validLines.add(line);
}
}
}
return validLines;
}
/**
* Get the set of JVM options, bringing the Java system properties as well, and fill corresponding Strimzi environment variables
* in order to pass them to the running application on the command line
*
* @param envVars environment variables list to put the JVM options and Java system properties
* @param jvmOptions JVM options
*/
public static void javaOptions(List<EnvVar> envVars, JvmOptions jvmOptions) {
StringBuilder strimziJavaOpts = new StringBuilder();
String xms = jvmOptions != null ? jvmOptions.getXms() : null;
if (xms != null) {
strimziJavaOpts.append("-Xms").append(xms);
}
String xmx = jvmOptions != null ? jvmOptions.getXmx() : null;
if (xmx != null) {
strimziJavaOpts.append(" -Xmx").append(xmx);
}
Map<String, String> xx = jvmOptions != null ? jvmOptions.getXx() : null;
if (xx != null) {
xx.forEach((k, v) -> {
strimziJavaOpts.append(' ').append("-XX:");
if ("true".equalsIgnoreCase(v)) {
strimziJavaOpts.append("+").append(k);
} else if ("false".equalsIgnoreCase(v)) {
strimziJavaOpts.append("-").append(k);
} else {
strimziJavaOpts.append(k).append("=").append(v);
}
});
}
String optsTrim = strimziJavaOpts.toString().trim();
if (!optsTrim.isEmpty()) {
envVars.add(AbstractModel.buildEnvVar(AbstractModel.ENV_VAR_STRIMZI_JAVA_OPTS, optsTrim));
}
List<SystemProperty> javaSystemProperties = jvmOptions != null ? jvmOptions.getJavaSystemProperties() : null;
if (javaSystemProperties != null) {
String propsTrim = ModelUtils.getJavaSystemPropertiesToString(javaSystemProperties).trim();
if (!propsTrim.isEmpty()) {
envVars.add(AbstractModel.buildEnvVar(AbstractModel.ENV_VAR_STRIMZI_JAVA_SYSTEM_PROPERTIES, propsTrim));
}
}
}
/**
* Adds the STRIMZI_JAVA_SYSTEM_PROPERTIES variable to the EnvVar list if any system properties were specified
* through the provided JVM options
*
* @param envVars list of the Environment Variables to add to
* @param jvmOptions JVM options
*/
public static void jvmSystemProperties(List<EnvVar> envVars, JvmOptions jvmOptions) {
if (jvmOptions != null) {
String jvmSystemPropertiesString = ModelUtils.getJavaSystemPropertiesToString(jvmOptions.getJavaSystemProperties());
if (jvmSystemPropertiesString != null && !jvmSystemPropertiesString.isEmpty()) {
envVars.add(AbstractModel.buildEnvVar(AbstractModel.ENV_VAR_STRIMZI_JAVA_SYSTEM_PROPERTIES, jvmSystemPropertiesString));
}
}
}
/**
* Adds the KAFKA_JVM_PERFORMANCE_OPTS variable to the EnvVar list if any performance related options were specified
* through the provided JVM options
*
* @param envVars list of the Environment Variables to add to
* @param jvmOptions JVM options
*/
public static void jvmPerformanceOptions(List<EnvVar> envVars, JvmOptions jvmOptions) {
StringBuilder jvmPerformanceOpts = new StringBuilder();
Map<String, String> xx = jvmOptions != null ? jvmOptions.getXx() : null;
if (xx != null) {
xx.forEach((k, v) -> {
jvmPerformanceOpts.append(' ').append("-XX:");
if ("true".equalsIgnoreCase(v)) {
jvmPerformanceOpts.append("+").append(k);
} else if ("false".equalsIgnoreCase(v)) {
jvmPerformanceOpts.append("-").append(k);
} else {
jvmPerformanceOpts.append(k).append("=").append(v);
}
});
}
String jvmPerformanceOptsString = jvmPerformanceOpts.toString().trim();
if (!jvmPerformanceOptsString.isEmpty()) {
envVars.add(AbstractModel.buildEnvVar(AbstractModel.ENV_VAR_KAFKA_JVM_PERFORMANCE_OPTS, jvmPerformanceOptsString));
}
}
/**
* Adds KAFKA_HEAP_OPTS variable to the EnvVar list if any heap related options were specified through the provided JVM options
* If Xmx Java Options are not set STRIMZI_DYNAMIC_HEAP_PERCENTAGE and STRIMZI_DYNAMIC_HEAP_MAX may also be set by using the ResourceRequirements
*
* @param envVars list of the Environment Variables to add to
* @param dynamicHeapPercentage value to set for the STRIMZI_DYNAMIC_HEAP_PERCENTAGE
* @param dynamicHeapMaxBytes value to set for the STRIMZI_DYNAMIC_HEAP_MAX
* @param jvmOptions JVM options
* @param resources the resource requirements
*/
public static void heapOptions(List<EnvVar> envVars, int dynamicHeapPercentage, long dynamicHeapMaxBytes, JvmOptions jvmOptions, ResourceRequirements resources) {
if (dynamicHeapPercentage <= 0 || dynamicHeapPercentage > 100) {
throw new RuntimeException("The Heap percentage " + dynamicHeapPercentage + " is invalid. It has to be >0 and <= 100.");
}
StringBuilder kafkaHeapOpts = new StringBuilder();
String xms = jvmOptions != null ? jvmOptions.getXms() : null;
if (xms != null) {
kafkaHeapOpts.append("-Xms")
.append(xms);
}
String xmx = jvmOptions != null ? jvmOptions.getXmx() : null;
if (xmx != null) {
// Honour user provided explicit max heap
kafkaHeapOpts.append(' ').append("-Xmx").append(xmx);
} else {
// Get the resources => if requests are set, take request. If requests are not set, try limits
Quantity configuredMemory = null;
if (resources != null) {
if (resources.getRequests() != null && resources.getRequests().get("memory") != null) {
configuredMemory = resources.getRequests().get("memory");
} else if (resources.getLimits() != null && resources.getLimits().get("memory") != null) {
configuredMemory = resources.getLimits().get("memory");
}
}
if (configuredMemory != null) {
// Delegate to the container to figure out only when CGroup memory limits are defined to prevent allocating
// too much memory on the kubelet.
envVars.add(AbstractModel.buildEnvVar(AbstractModel.ENV_VAR_DYNAMIC_HEAP_PERCENTAGE, Integer.toString(dynamicHeapPercentage)));
if (dynamicHeapMaxBytes > 0) {
envVars.add(AbstractModel.buildEnvVar(AbstractModel.ENV_VAR_DYNAMIC_HEAP_MAX, Long.toString(dynamicHeapMaxBytes)));
}
} else if (xms == null) {
// When no memory limit, `Xms`, and `Xmx` are defined then set a default `Xms` and
// leave `Xmx` undefined.
kafkaHeapOpts.append("-Xms").append(AbstractModel.DEFAULT_JVM_XMS);
}
}
String kafkaHeapOptsString = kafkaHeapOpts.toString().trim();
if (!kafkaHeapOptsString.isEmpty()) {
envVars.add(AbstractModel.buildEnvVar(AbstractModel.ENV_VAR_KAFKA_HEAP_OPTS, kafkaHeapOptsString));
}
}
/**
* If the toleration.value is an empty string, set it to null. That solves an issue when built STS contains a filed
* with an empty property value. K8s is removing properties like this and thus we cannot fetch an equal STS which was
* created with (some) empty value.
*
* @param tolerations Tolerations list to check whether toleration.value is an empty string and eventually replace it by null
*
* @return List of tolerations with fixed empty strings
*/
public static List<Toleration> removeEmptyValuesFromTolerations(List<Toleration> tolerations) {
if (tolerations != null) {
tolerations.stream().filter(toleration -> toleration.getValue() != null && toleration.getValue().isEmpty()).forEach(emptyValTol -> emptyValTol.setValue(null));
return tolerations;
} else {
return null;
}
}
/**
*
* @param builder the builder which is used to populate the node affinity
* @param userAffinity the userAffinity which is defined by the user
* @param topologyKey the topology key which is used to select the node
* @return the AffinityBuilder which has the node selector with topology key which is needed to make sure
* the pods are scheduled only on nodes with the rack label
*/
public static AffinityBuilder populateAffinityBuilderWithRackLabelSelector(AffinityBuilder builder, Affinity userAffinity, String topologyKey) {
// We need to add node affinity to make sure the pods are scheduled only on nodes with the rack label
NodeSelectorRequirement selector = new NodeSelectorRequirementBuilder()
.withOperator("Exists")
.withKey(topologyKey)
.build();
if (userAffinity != null
&& userAffinity.getNodeAffinity() != null
&& userAffinity.getNodeAffinity().getRequiredDuringSchedulingIgnoredDuringExecution() != null
&& userAffinity.getNodeAffinity().getRequiredDuringSchedulingIgnoredDuringExecution().getNodeSelectorTerms() != null) {
// User has specified some Node Selector Terms => we should enhance them
List<NodeSelectorTerm> oldTerms = userAffinity.getNodeAffinity().getRequiredDuringSchedulingIgnoredDuringExecution().getNodeSelectorTerms();
List<NodeSelectorTerm> enhancedTerms = new ArrayList<>(oldTerms.size());
for (NodeSelectorTerm term : oldTerms) {
NodeSelectorTerm enhancedTerm = new NodeSelectorTermBuilder(term)
.addToMatchExpressions(selector)
.build();
enhancedTerms.add(enhancedTerm);
}
builder = builder
.editOrNewNodeAffinity()
.withNewRequiredDuringSchedulingIgnoredDuringExecution()
.withNodeSelectorTerms(enhancedTerms)
.endRequiredDuringSchedulingIgnoredDuringExecution()
.endNodeAffinity();
} else {
// User has not specified any selector terms => we add our own
builder = builder
.editOrNewNodeAffinity()
.editOrNewRequiredDuringSchedulingIgnoredDuringExecution()
.addNewNodeSelectorTerm()
.withMatchExpressions(selector)
.endNodeSelectorTerm()
.endRequiredDuringSchedulingIgnoredDuringExecution()
.endNodeAffinity();
}
return builder;
}
/**
* Decides whether the Cluster Operator needs namespaceSelector to be configured in the network policies in order
* to talk with the operands. This follows the following rules:
* - If it runs in the same namespace as the operand, do not set namespace selector
* - If it runs in a different namespace, but user provided selector labels, use the labels
* - If it runs in a different namespace, and user didn't provided selector labels, open it to COs in all namespaces
*
* @param peer Network policy peer where the namespace selector should be set
* @param operandNamespace Namespace of the operand
* @param operatorNamespace Namespace of the Strimzi CO
* @param operatorNamespaceLabels Namespace labels provided by the user
*/
public static void setClusterOperatorNetworkPolicyNamespaceSelector(NetworkPolicyPeer peer, String operandNamespace, String operatorNamespace, Labels operatorNamespaceLabels) {
if (!operandNamespace.equals(operatorNamespace)) {
// If CO and the operand do not run in the same namespace, we need to handle cross namespace access
if (operatorNamespaceLabels != null && !operatorNamespaceLabels.toMap().isEmpty()) {
// If user specified the namespace labels, we can use them to make the network policy as tight as possible
LabelSelector nsLabelSelector = new LabelSelector();
nsLabelSelector.setMatchLabels(operatorNamespaceLabels.toMap());
peer.setNamespaceSelector(nsLabelSelector);
} else {
// If no namespace labels were specified, we open the network policy to COs in all namespaces
peer.setNamespaceSelector(new LabelSelector());
}
}
}
/**
* Checks if the section of the custom resource has any metrics configuration and sets it in the AbstractModel.
*
* @param model The cluster model where the metrics will be configured
* @param resourceWithMetrics The section of the resource with metrics configuration
*/
public static void parseMetrics(AbstractModel model, HasConfigurableMetrics resourceWithMetrics) {
if (resourceWithMetrics.getMetricsConfig() != null) {
model.setMetricsEnabled(true);
model.setMetricsConfigInCm(resourceWithMetrics.getMetricsConfig());
}
}
/**
* Creates the OwnerReference based on the resource passed as parameter
*
* @param owner The resource which should be the owner
*
* @return The new OwnerReference
*/
public static OwnerReference createOwnerReference(HasMetadata owner) {
return new OwnerReferenceBuilder()
.withApiVersion(owner.getApiVersion())
.withKind(owner.getKind())
.withName(owner.getMetadata().getName())
.withUid(owner.getMetadata().getUid())
.withBlockOwnerDeletion(false)
.withController(false)
.build();
}
/**
* Checks whether the resource has given Owner Reference among its list of owner references
*
* @param resource Resource which should be checked for OwnerReference
* @param owner OwnerReference which should be verified
*
* @return True if the owner reference is found. False otherwise.
*/
public static boolean hasOwnerReference(HasMetadata resource, OwnerReference owner) {
if (resource.getMetadata().getOwnerReferences() != null) {
return resource.getMetadata().getOwnerReferences()
.stream()
.anyMatch(o -> owner.getApiVersion().equals(o.getApiVersion())
&& owner.getKind().equals(o.getKind())
&& owner.getName().equals(o.getName())
&& owner.getUid().equals(o.getUid()));
} else {
return false;
}
}
/**
* Extracts the CA generation from the CA
*
* @param ca CA from which the generation should be extracted
*
* @return CA generation or the initial generation if no generation is set
*/
public static int caCertGeneration(Ca ca) {
return Annotations.intAnnotation(ca.caCertSecret(), Ca.ANNO_STRIMZI_IO_CA_CERT_GENERATION, Ca.INIT_GENERATION);
}
}
| [] | [] | [] | [] | [] | java | 0 | 0 | |
benchmarking/pytorch_geometric/train_resdel_gnn.py | import argparse
import datetime
import os
import time
import dotenv as de
import numpy as np
de.load_dotenv()
import torch
import torch.nn as nn
from torch_geometric.data import DataLoader
from torch_geometric.nn import GCNConv
from torch.nn import Linear
import torch.nn.functional as F
# import atom3d.util.datatypes as dt
import resdel_dataloader as dl
class GCN(torch.nn.Module):
def __init__(self, num_features, hidden_dim):
super(GCN, self).__init__()
self.conv1 = GCNConv(num_features, hidden_dim)
self.bn1 = nn.BatchNorm1d(hidden_dim)
self.conv2 = GCNConv(hidden_dim, hidden_dim*2)
self.bn2 = nn.BatchNorm1d(hidden_dim*2)
self.conv3 = GCNConv(hidden_dim*2, hidden_dim*4)
self.bn3 = nn.BatchNorm1d(hidden_dim*4)
self.conv4 = GCNConv(hidden_dim*4, hidden_dim*4)
self.bn4 = nn.BatchNorm1d(hidden_dim*4)
self.conv5 = GCNConv(hidden_dim*4, hidden_dim*2)
self.bn5 = nn.BatchNorm1d(hidden_dim*2)
self.fc1 = Linear(hidden_dim*2, hidden_dim*2)
self.fc2 = Linear(hidden_dim*2, 20)
def forward(self, x, edge_index, edge_weight, ca_idx, batch):
x = self.conv1(x, edge_index, edge_weight)
x = F.relu(x)
x = self.bn1(x)
x = self.conv2(x, edge_index, edge_weight)
x = F.relu(x)
x = self.bn2(x)
x = self.conv3(x, edge_index, edge_weight)
x = F.relu(x)
x = self.bn3(x)
x = self.conv4(x, edge_index, edge_weight)
x = self.bn4(x)
x = F.relu(x)
x = self.conv5(x, edge_index, edge_weight)
x = self.bn5(x)
# x = global_add_pool(x, batch)
x = torch.index_select(x, 0, ca_idx)
x = F.relu(x)
x = F.relu(self.fc1(x))
x = F.dropout(x, p=0.25, training=self.training)
return self.fc2(x)
def get_acc(logits, label, cm=None):
pred = torch.argmax(logits, 1)
acc = float((pred == label).sum(-1)) / label.size()[0]
return acc
# from pytorch ...
def get_top_k_acc(output, target, k=3):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
batch_size = target.size(0)
_, pred = output.topk(k, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
#res.append(correct_k.mul_(100.0 / batch_size))
return correct_k.mul_(1.0 / batch_size).item()
def adjust_graph_indices(graph):
batch_size = len(graph.n_nodes)
total_n = 0
for i in range(batch_size-1):
n_nodes = graph.n_nodes[i].item()
total_n += n_nodes
graph.ca_idx[i+1] += total_n
return graph
@torch.no_grad()
def test(model, loader, criterion, device, batch_size):
model.eval()
losses = []
avg_acc = []
avg_top_k_acc = []
for i, graph in enumerate(loader):
graph = graph.to(device)
if len(graph.ca_idx) != batch_size:
# print(f'skipping batch, {len(graph1.ca_idx)} CA atoms with batch size {batch_size}')
continue
graph = adjust_graph_indices(graph)
out = model(graph.x, graph.edge_index, graph.edge_attr.view(-1), graph.ca_idx, graph.batch)
loss = criterion(out, graph.y)
acc = get_acc(out, graph.y)
top_k_acc = get_top_k_acc(out, graph.y, k=3)
losses.append(loss.item())
avg_acc.append(acc)
avg_top_k_acc.append(top_k_acc)
return np.mean(losses), np.mean(avg_acc), np.mean(avg_top_k_acc)
def train(data_dir, device, log_dir, checkpoint, seed=None, test_mode=False):
epochs = 5
batch_size = 64
in_channels = 5
learning_rate = 1e-4
reg = 5e-6
if not os.path.exists(os.path.join(log_dir, 'params.txt')):
with open(os.path.join(log_dir, 'log.txt'), 'w') as f:
f.write(f'Epochs: {epochs}\n')
f.write(f'Batch size: {batch_size}\n')
f.write(f'Learning rate: {learning_rate}\n')
train_set = dl.Resdel_Dataset_PTG(os.path.join(data_dir, 'train'))
train_loader = DataLoader(train_set, batch_size=batch_size, num_workers=8, shuffle=True)
val_set = dl.Resdel_Dataset_PTG(os.path.join(data_dir, 'val'))
val_loader = DataLoader(val_set, batch_size=batch_size, num_workers=8, shuffle=True)
for graph in train_loader:
num_features = graph.num_features
break
model = GCN(num_features, hidden_dim=64)
model.to(device)
# if torch.cuda.device_count() > 1:
# print('using', torch.cuda.device_count(), 'GPUs')
# parallel = True
# model = DataParallel(model)
model.train()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)#, weight_decay=reg)
criterion = nn.CrossEntropyLoss()
criterion.to(device)
if checkpoint:
cpt = torch.load(checkpoint, map_location=device)
model.load_state_dict(cpt['model_state_dict'])
optimizer.load_state_dict(cpt['optimizer_state_dict'])
best_val_loss = 999
best_val_idx = 0
print_frequency = 100
for epoch in range(1, epochs+1):
print(f'EPOCH {epoch}\n------------')
start = time.time()
for it, graph in enumerate(train_loader):
graph = graph.to(device)
if len(graph.ca_idx) != batch_size:
# print(f'skipping batch, {len(graph1.ca_idx)} CA atoms with batch size {batch_size}')
continue
graph = adjust_graph_indices(graph)
optimizer.zero_grad()
out = model(graph.x, graph.edge_index, graph.edge_attr.view(-1), graph.ca_idx, graph.batch)
train_loss = criterion(out, graph.y)
train_loss.backward()
optimizer.step()
if it % print_frequency == 0:
elapsed = time.time() - start
print(f'Epoch {epoch}, iter {it}, train loss {train_loss}, avg it/sec {print_frequency / elapsed}')
start = time.time()
print('validating...')
curr_val_loss, val_acc, val_top_k_acc = test(model, val_loader, criterion, device, batch_size)
# logger.info('{:03d}\t{}\t{:.7f}\t{:.7f}\t{:.7f}\t{:.7f}\n'.format(epoch, it, train_loss, curr_val_loss, val_acc, val_top_k_acc))
# print('{:03d}\t{}\t{:.7f}\t{:.7f}\t{:.7f}\t{:.7f}\n'.format(epoch, it, train_loss, curr_val_loss, val_acc, v'al_top_k_acc))
print(f'Epoch {epoch}, iter {it}, val loss {curr_val_loss}, val acc {val_acc}, val top 3 acc {val_top_k_acc}')
if curr_val_loss < best_val_loss:
# save best validation score and iteration number
best_val_loss = curr_val_loss
best_val_idx = it
# overwrite best model
torch.save({
'epoch': epoch,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'loss': train_loss,
}, os.path.join(log_dir, f'best_weights.pt'))
model.train()
if test_mode:
print('testing...')
test_set = dl.Resdel_Dataset_PTG(os.path.join(data_dir, 'test_unbalanced'))
test_loader = DataLoader(test_set, batch_size=batch_size, num_workers=8, shuffle=True)
for graph in test_loader:
num_features = graph.num_features
break
model = GCN(num_features, hidden_dim=64).to(device)
model.eval()
cpt = torch.load(os.path.join(log_dir, f'best_weights.pt'))
model.load_state_dict(cpt['model_state_dict'])
test_loss, test_acc, test_top_k_acc = test(model, test_loader, criterion, device, batch_size)
print('Test loss: {:7f}, Test Accuracy {:.4f}, Top 3 Accuracy {:4f}, F1 Score {:4f}'.format(test_loss, test_acc, test_top_k_acc, test_f1))
return test_loss, test_acc, test_top_k_acc
return best_val_loss
if __name__=='__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--mode', type=str, default='train')
parser.add_argument('--log_dir', type=str, default=None)
parser.add_argument('--checkpoint', type=str, default=None)
args = parser.parse_args()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
log_dir = args.log_dir
base_dir = '../../data/residue_deletion'
data_dir = os.environ['SC_DIR']+'atom3d/graph_pt'
if args.mode == 'train':
if log_dir is None:
now = datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
log_dir = os.path.join(base_dir, 'logs_cnn', now)
else:
log_dir = os.path.join(base_dir, 'logs_cnn', log_dir)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
train(data_dir, device, log_dir, args.checkpoint)
elif args.mode == 'test':
test_loss_list = []
acc_list = []
f1_list = []
for seed in np.random.randint(0, 100, size=3):
print('seed:', seed)
log_dir = os.path.join(base_dir, 'logs_cnn', f'test_{seed}')
if not os.path.exists(log_dir):
os.makedirs(log_dir)
np.random.seed(seed)
torch.manual_seed(seed)
test_loss, test_acc, test_top_k_acc, test_f1 = train(data_dir, device, log_dir, args.checkpoint, seed=seed, test_mode=True)
test_loss_list.append(test_loss)
acc_list.append(test_acc)
f1_list.append(test_f1)
print(f'Avg test_loss: {np.mean(test_loss_list)}, St.Dev test_loss {np.std(test_loss_list)}, \
Avg accuracy {np.mean(acc_list)}, St.Dev accuracy {np.std(acc_list)},\
Avg F1 {np.mean(f1_list)}, St.Dev F1 {np.std(f1_list)}')
| [] | [] | [
"SC_DIR"
] | [] | ["SC_DIR"] | python | 1 | 0 | |
cmd/utils_linux.go | // +build linux
package cmd
import (
"os"
"os/exec"
"strings"
)
// fetch OS name
func OS() string {
return strings.TrimSpace(os.Getenv("NAME"))
}
// fetch kernel info
func Kernel() string {
procVer, _ := os.ReadFile("/proc/version")
return strings.TrimSpace(strings.Split(string(procVer), " ")[2])
}
// fetch shell name
func Shell() string {
shellEnv := strings.Split(os.Getenv("SHELL"), "/")
return strings.TrimSpace(shellEnv[len(shellEnv)-1])
}
// fetch window manager name
func WM() string {
wm, _ := exec.Command("bash", "-c", `xprop -id $(xprop -root -notype | awk '$1=="_NET_SUPPORTING_WM_CHECK:"{print $5}') -notype -f _NET_WM_NAME 8t | grep "WM_NAME" | cut -f2 -d \"`).CombinedOutput()
return strings.TrimSpace(string(wm))
}
| [
"\"NAME\"",
"\"SHELL\""
] | [] | [
"SHELL",
"NAME"
] | [] | ["SHELL", "NAME"] | go | 2 | 0 | |
select_menu_entry_test.go | package main_test
import (
"bytes"
"io"
"os"
"testing"
"github.com/Nerdmaster/terminal"
"github.com/dhamidi/leader"
"github.com/stretchr/testify/assert"
)
type testTerminal struct {
In io.Reader
Out io.Writer
KeyReader *terminal.KeyReader
}
func newTestTerminal() *testTerminal {
return &testTerminal{
In: bytes.NewBufferString(""),
Out: bytes.NewBufferString(""),
}
}
func (term *testTerminal) MakeRaw() error { return nil }
func (term *testTerminal) Restore() error { return nil }
func (term *testTerminal) Write(p []byte) (int, error) { return term.Out.Write(p) }
func (term *testTerminal) OutputTo(out io.Writer) *testTerminal {
term.Out = out
return term
}
func (term *testTerminal) InputFrom(in io.Reader) *testTerminal {
term.In = in
term.KeyReader = terminal.NewKeyReader(in)
return term
}
func (term *testTerminal) ReadKey() (rune, error) {
key, err := term.KeyReader.ReadKeypress()
if err != nil {
return terminal.KeyCtrlC, nil
}
return key.Key, nil
}
func defineTestFile(ctx *main.Context, path string, contents string) {
ctx.Files.(*testFileSystem).Define(path, contents)
}
func newTestContextForConfig(t *testing.T) *main.Context {
return newTestContext(t, main.NewKeyMap("root"), bytes.NewBufferString(""), bytes.NewBufferString(""))
}
func newTestContext(t *testing.T, root *main.KeyMap, input io.Reader, output io.Writer) *main.Context {
testTerminal := newTestTerminal().InputFrom(input)
if output != nil {
testTerminal.OutputTo(output)
}
return &main.Context{
Files: newTestFileSystem(),
Shell: main.NewBashShell(os.Getenv),
Terminal: testTerminal,
CurrentKeyMap: root,
}
}
func TestSelectMenuEntry_Execute_changes_current_key_map(t *testing.T) {
keymap := main.NewKeyMap("root")
input := bytes.NewBufferString("a")
keymap.Bind('a').Children().Rename("b").DefineKey('b', main.DoNothing)
context := newTestContext(t, keymap, input, nil)
selectMenuEntry := main.NewSelectMenuEntry(context)
selectMenuEntry.Execute()
assert.Equal(t, "b", context.CurrentKeyMap.Name())
}
func TestSelectMenuEntry_Execute_runs_command_associated_with_binding(t *testing.T) {
keymap := main.NewKeyMap("root")
command := newMockCommand()
input := bytes.NewBufferString("ab")
keymap.Bind('a').Children().Rename("b").DefineKey('b', command.Execute)
context := newTestContext(t, keymap, input, nil)
selectMenuEntry := main.NewSelectMenuEntry(context)
selectMenuEntry.Execute()
selectMenuEntry.Execute()
assert.Equal(t, 1, command.called)
}
func TestSelectMenuEntry_Execute_gives_does_not_execute_command_on_binding_with_children(t *testing.T) {
keymap := main.NewKeyMap("root")
command := newMockCommand()
input := bytes.NewBufferString("ab")
keymap.Bind('a').Do(command.Execute).Children().DefineKey('b', main.DoNothing)
context := newTestContext(t, keymap, input, nil)
selectMenuEntry := main.NewSelectMenuEntry(context)
selectMenuEntry.Execute()
selectMenuEntry.Execute()
assert.Equal(t, 0, command.called)
}
func TestSelectMenuEntry_Execute_displays_breadscrumbs_for_the_current_path(t *testing.T) {
keymap := main.NewKeyMap("root")
input := bytes.NewBufferString("a")
output := bytes.NewBufferString("")
keymap.Bind('a').Children().Rename("a").DefineKey('b', main.DoNothing)
context := newTestContext(t, keymap, input, output)
selectMenuEntry := main.NewSelectMenuEntry(context)
selectMenuEntry.Execute()
assert.Contains(t, output.String(), "root > a")
}
func TestSelectMenuEntry_Execute_displays_the_current_keymap_as_a_menu(t *testing.T) {
keymap := main.NewKeyMap("root")
keymap.Bind('a').Do(main.DoNothing).Describe("do a")
keymap.Bind('b').Do(main.DoNothing).Describe("do b")
input := bytes.NewBufferString("")
output := bytes.NewBufferString("")
context := newTestContext(t, keymap, input, output)
selectMenuEntry := main.NewSelectMenuEntry(context)
selectMenuEntry.Execute()
expectedMenu := main.NewMenuView([]*main.MenuEntry{
{Key: 'a', Label: "do a"},
{Key: 'b', Label: "do b"},
})
expectedOutput := main.MustRenderViewToString(expectedMenu)
assert.Contains(t, output.String(), expectedOutput)
}
func TestSelectMenuEntry_Execute_erases_the_current_menu_before_selecting_a_child_menu(t *testing.T) {
keymap := main.NewKeyMap("root")
input := bytes.NewBufferString("a")
keymap.Bind('a').Children().Rename("b").DefineKey('b', main.DoNothing)
output := bytes.NewBufferString("")
context := newTestContext(t, keymap, input, output)
selectMenuEntry := main.NewSelectMenuEntry(context)
selectMenuEntry.Execute()
expectedMenu := main.NewMenuView([]*main.MenuEntry{
{Key: 'a', Label: "do a"},
})
eraseMenuBuffer := bytes.NewBufferString("")
expectedMenu.Erase(eraseMenuBuffer)
assert.True(t, bytes.Contains(output.Bytes(), eraseMenuBuffer.Bytes()),
"output %q does not contain instructions %q", output, eraseMenuBuffer)
}
func TestSelectMenuEntry_Execute_erases_the_current_menu_before_running_a_command(t *testing.T) {
keymap := main.NewKeyMap("root")
input := bytes.NewBufferString("ab")
keymap.Bind('a').Children().Rename("b").DefineKey('b', main.DoNothing)
output := bytes.NewBufferString("")
context := newTestContext(t, keymap, input, output)
selectMenuEntry := main.NewSelectMenuEntry(context)
selectMenuEntry.Execute()
expectedViews := []main.View{
main.NewVerticalBoxView(
main.NewBreadcrumbsView([]string{"root"}),
main.NewMenuView([]*main.MenuEntry{
{Key: 'a', Label: "b"},
}),
),
main.NewVerticalBoxView(
main.NewBreadcrumbsView([]string{"root", "b"}),
main.NewMenuView([]*main.MenuEntry{
{Key: 'b', Label: ""},
}),
),
}
outputBuffer := bytes.NewBufferString("")
expectedViews[0].Render(outputBuffer)
expectedViews[0].Erase(outputBuffer)
expectedViews[1].Render(outputBuffer)
expectedViews[1].Erase(outputBuffer)
assert.True(t, bytes.Contains(output.Bytes(), outputBuffer.Bytes()),
"output %q does not contain instructions %q", output, outputBuffer)
}
func TestSelectMenuEntry_Execute_keeps_executing_looping_keys_repeatedly(t *testing.T) {
command := newMockCommand()
input := bytes.NewBufferString("aaaa")
keymap := main.NewKeyMap("root")
keymap.Bind('a').Do(command.Execute).SetLooping(true)
context := newTestContext(t, keymap, input, nil)
selectMenuEntry := main.NewSelectMenuEntry(context)
selectMenuEntry.Execute()
assert.Equal(t, 4, command.called)
}
func TestSelectMenuEntry_Execute_looping_key_shows_child_menu_when_parent_and_child_are_bound_to_same_key(t *testing.T) {
input := bytes.NewBufferString("jjj")
keymap := main.NewKeyMap("root")
output := bytes.NewBufferString("")
context := newTestContext(t, keymap, input, output)
keymap.Bind('j').Children().
Rename("jump").
Bind('j').Describe("down").SetLooping(true).Do(
main.NewRunShellCommand(context, "pushd ..").Execute,
)
commands := bytes.NewBufferString("")
context.Executor = main.NewPrintingExecutor(context, commands)
main.NewSelectMenuEntry(context).Execute()
assert.Contains(t, commands.String(), context.Shell.EvalNext("pushd ..", []rune{'j'}))
}
| [] | [] | [] | [] | [] | go | 0 | 0 | |
examples/pybullet/gym/pybullet_envs/env_bases.py | import gym, gym.spaces, gym.utils, gym.utils.seeding
import numpy as np
import pybullet
import os
from pybullet_utils import bullet_client
from pkg_resources import parse_version
try:
if os.environ["PYBULLET_EGL"]:
import pkgutil
except:
pass
class MJCFBaseBulletEnv(gym.Env):
"""
Base class for Bullet physics simulation loading MJCF (MuJoCo .xml) environments in a Scene.
These environments create single-player scenes and behave like normal Gym environments, if
you don't use multiplayer.
"""
metadata = {'render.modes': ['human', 'rgb_array'], 'video.frames_per_second': 60}
def __init__(self, robot, render=False):
self.scene = None
self.physicsClientId = -1
self.ownsPhysicsClient = 0
self.camera = Camera()
self.isRender = render
self.robot = robot
self.seed()
self._cam_dist = 3
self._cam_yaw = 0
self._cam_pitch = -30
self._render_width = 320
self._render_height = 240
self.action_space = robot.action_space
self.observation_space = robot.observation_space
#self.reset()
def configure(self, args):
self.robot.args = args
def seed(self, seed=None):
self.np_random, seed = gym.utils.seeding.np_random(seed)
self.robot.np_random = self.np_random # use the same np_randomizer for robot as for env
return [seed]
def reset(self):
if (self.physicsClientId < 0):
self.ownsPhysicsClient = True
if self.isRender:
self._p = bullet_client.BulletClient(connection_mode=pybullet.GUI)
else:
self._p = bullet_client.BulletClient()
self._p.resetSimulation()
#optionally enable EGL for faster headless rendering
try:
if os.environ["PYBULLET_EGL"]:
con_mode = self._p.getConnectionInfo()['connectionMethod']
if con_mode==self._p.DIRECT:
egl = pkgutil.get_loader('eglRenderer')
if (egl):
self._p.loadPlugin(egl.get_filename(), "_eglRendererPlugin")
else:
self._p.loadPlugin("eglRendererPlugin")
except:
pass
self.physicsClientId = self._p._client
self._p.configureDebugVisualizer(pybullet.COV_ENABLE_GUI, 0)
if self.scene is None:
self.scene = self.create_single_player_scene(self._p)
if not self.scene.multiplayer and self.ownsPhysicsClient:
self.scene.episode_restart(self._p)
self.robot.scene = self.scene
self.frame = 0
self.done = 0
self.reward = 0
dump = 0
s = self.robot.reset(self._p)
self.potential = self.robot.calc_potential()
return s
def render(self, mode='human', close=False):
if mode == "human":
self.isRender = True
if mode != "rgb_array":
return np.array([])
base_pos = [0, 0, 0]
if (hasattr(self, 'robot')):
if (hasattr(self.robot, 'body_xyz')):
base_pos = self.robot.body_xyz
if (self.physicsClientId>=0):
view_matrix = self._p.computeViewMatrixFromYawPitchRoll(cameraTargetPosition=base_pos,
distance=self._cam_dist,
yaw=self._cam_yaw,
pitch=self._cam_pitch,
roll=0,
upAxisIndex=2)
proj_matrix = self._p.computeProjectionMatrixFOV(fov=60,
aspect=float(self._render_width) /
self._render_height,
nearVal=0.1,
farVal=100.0)
(_, _, px, _, _) = self._p.getCameraImage(width=self._render_width,
height=self._render_height,
viewMatrix=view_matrix,
projectionMatrix=proj_matrix,
renderer=pybullet.ER_BULLET_HARDWARE_OPENGL)
try:
# Keep the previous orientation of the camera set by the user.
con_mode = self._p.getConnectionInfo()['connectionMethod']
if con_mode==self._p.SHARED_MEMORY or con_mode == self._p.GUI:
[yaw, pitch, dist] = self._p.getDebugVisualizerCamera()[8:11]
self._p.resetDebugVisualizerCamera(dist, yaw, pitch, base_pos)
except:
pass
else:
px = np.array([[[255,255,255,255]]*self._render_width]*self._render_height, dtype=np.uint8)
rgb_array = np.array(px, dtype=np.uint8)
rgb_array = np.reshape(np.array(px), (self._render_height, self._render_width, -1))
rgb_array = rgb_array[:, :, :3]
return rgb_array
def close(self):
if (self.ownsPhysicsClient):
if (self.physicsClientId >= 0):
self._p.disconnect()
self.physicsClientId = -1
def HUD(self, state, a, done):
pass
# def step(self, *args, **kwargs):
# if self.isRender:
# base_pos=[0,0,0]
# if (hasattr(self,'robot')):
# if (hasattr(self.robot,'body_xyz')):
# base_pos = self.robot.body_xyz
# # Keep the previous orientation of the camera set by the user.
# #[yaw, pitch, dist] = self._p.getDebugVisualizerCamera()[8:11]
# self._p.resetDebugVisualizerCamera(3,0,0, base_pos)
#
#
# return self.step(*args, **kwargs)
if parse_version(gym.__version__) < parse_version('0.9.6'):
_render = render
_reset = reset
_seed = seed
class Camera:
def __init__(self):
pass
def move_and_look_at(self, i, j, k, x, y, z):
lookat = [x, y, z]
distance = 10
yaw = 10
self._p.resetDebugVisualizerCamera(distance, yaw, -20, lookat)
| [] | [] | [
"PYBULLET_EGL"
] | [] | ["PYBULLET_EGL"] | python | 1 | 0 | |
auth-appengine/classes/__init__.py | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import traceback
from googleapiclient.discovery import Resource
from googleapiclient.errors import HttpError
from typing import Any, Dict, List, Mapping
from classes.credentials import Credentials
from classes.decorators import lazy_property, retry
from classes.firestore import Firestore
from classes.gmail import GMail, GMailMessage
from classes.report_type import Type
class Fetcher(object):
@retry(exceptions=HttpError, tries=3, backoff=2)
def fetch(self, method, **kwargs: Mapping[str, str]) -> Dict[str, Any]:
"""Fetch results from a Resource connection.
Args:
method (class): method to execute.
**kwargs (Dict[str, Any]): the Resource method arguments.
Returns:
Dict[str, Any]: results.
"""
result = method(**kwargs).execute()
return result
class ReportFetcher(object):
report_type: Type = None
def read_header(self, report_details: dict) -> list:
pass
def stream_to_gcs(self, bucket: str, report_details: Dict[str, Any]) -> None:
pass
def normalize_report_details(self, report_object: Dict[str, Any], report_id: str):
pass
def fetch_report_config(self, report_object: Dict[str, Any], report_id: str):
pass
def get_latest_report_file(self, report_id: str):
pass
def run_report(self, report_id: int):
pass
def check_running_report(self, config: Dict[str, Any]):
pass
def get_reports(self) -> Dict[str, Any]:
pass
def service(self) -> Resource:
pass
class ReportRunner(object):
report_type = None
project = None
email = None
@lazy_property
def firestore(self) -> Firestore:
return Firestore(project=self.project, email=self.email)
def run(self, unattended: bool):
"""Run the report.
Args:
unattended (bool): log the report for later or wait for the result
"""
pass
def _email_error(self,
message: str,
email: str=None,
error: Exception=None) -> None:
"""Email the error to the administrator
Send an email (with errors) to the administrator and/or job owner.
Args:
message (str): the message.
email (str, optional): job owner's email. Defaults to None.
error (Exception, optional): any error found. Defaults to None.
"""
_to = [email] if email else []
_administrator = \
os.environ.get('ADMINISTRATOR_EMAIL') or self.FIRESTORE.get_document(
Type._ADMIN, 'admin').get('email')
_cc = [_administrator] if _administrator else []
if _trace := \
''.join(traceback.TracebackException.from_exception(error).format()) \
if error else None:
_trace = 'Error\n\n' + _trace
if _to or _cc:
message = GMailMessage(
to=_to,
cc=_cc,
subject=f'Error in report_loader',
body=f'{message}{_trace if _trace else ""}',
project=os.environ.get('GCP_PROJECT'))
GMail().send_message(
message=message,
credentials=Credentials(
email=email, project=os.environ.get('GCP_PROJECT'))
)
| [] | [] | [
"ADMINISTRATOR_EMAIL",
"GCP_PROJECT"
] | [] | ["ADMINISTRATOR_EMAIL", "GCP_PROJECT"] | python | 2 | 0 | |
src/radical/pilot/agent/resource_manager/slurm.py |
__copyright__ = 'Copyright 2016-2021, The RADICAL-Cybertools Team'
__license__ = 'MIT'
import os
import radical.utils as ru
from .base import RMInfo, ResourceManager
# ------------------------------------------------------------------------------
#
class Slurm(ResourceManager):
# --------------------------------------------------------------------------
#
def _init_from_scratch(self, rm_info: RMInfo) -> RMInfo:
nodelist = os.environ.get('SLURM_NODELIST')
if nodelist is None:
raise RuntimeError('$SLURM_NODELIST not set')
# Parse SLURM nodefile environment variable
node_names = ru.get_hostlist(nodelist)
self._log.info('found SLURM_NODELIST %s. Expanded to: %s',
nodelist, node_names)
if not rm_info.cores_per_node:
# $SLURM_CPUS_ON_NODE = Number of physical cores per node
cpn_str = os.environ.get('SLURM_CPUS_ON_NODE')
if cpn_str is None:
raise RuntimeError('$SLURM_CPUS_ON_NODE not set')
rm_info.cores_per_node = int(cpn_str)
nodes = [(node, rm_info.cores_per_node) for node in node_names]
rm_info.node_list = self._get_node_list(nodes, rm_info)
return rm_info
# ------------------------------------------------------------------------------
| [] | [] | [
"SLURM_NODELIST",
"SLURM_CPUS_ON_NODE"
] | [] | ["SLURM_NODELIST", "SLURM_CPUS_ON_NODE"] | python | 2 | 0 | |
k8s-aks-dns-ingress/azure/auth.go | package azure
import (
"bytes"
"encoding/binary"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"unicode/utf16"
"github.com/dimchansky/utfbom"
)
// Authentication represents the authentication file for Azure.
type Authentication struct {
ClientID string `json:"clientId,omitempty"`
ClientSecret string `json:"clientSecret,omitempty"`
SubscriptionID string `json:"subscriptionId,omitempty"`
TenantID string `json:"tenantId,omitempty"`
ActiveDirectoryEndpoint string `json:"activeDirectoryEndpointUrl,omitempty"`
ResourceManagerEndpoint string `json:"resourceManagerEndpointUrl,omitempty"`
GraphResourceID string `json:"activeDirectoryGraphResourceId,omitempty"`
SQLManagementEndpoint string `json:"sqlManagementEndpointUrl,omitempty"`
GalleryEndpoint string `json:"galleryEndpointUrl,omitempty"`
ManagementEndpoint string `json:"managementEndpointUrl,omitempty"`
}
// NewAuthentication returns an authentication struct from user provided
// credentials.
func NewAuthentication(azureCloud, clientID, clientSecret, subscriptionID, tenantID string) *Authentication {
environment := PublicCloud
switch azureCloud {
case PublicCloud.Name:
environment = PublicCloud
case USGovernmentCloud.Name:
environment = USGovernmentCloud
case ChinaCloud.Name:
environment = ChinaCloud
case GermanCloud.Name:
environment = GermanCloud
}
return &Authentication{
ClientID: clientID,
ClientSecret: clientSecret,
SubscriptionID: subscriptionID,
TenantID: tenantID,
ActiveDirectoryEndpoint: environment.ActiveDirectoryEndpoint,
ResourceManagerEndpoint: environment.ResourceManagerEndpoint,
GraphResourceID: environment.GraphEndpoint,
SQLManagementEndpoint: environment.SQLDatabaseDNSSuffix,
GalleryEndpoint: environment.GalleryEndpoint,
ManagementEndpoint: environment.ServiceManagementEndpoint,
}
}
// NewAuthenticationFromFile returns an authentication struct from file path
func NewAuthenticationFromFile(filepath string) (*Authentication, error) {
b, err := ioutil.ReadFile(filepath)
if err != nil {
return nil, fmt.Errorf("Reading authentication file %q failed: %v", filepath, err)
}
// Authentication file might be encoded.
decoded, err := decode(b)
if err != nil {
return nil, fmt.Errorf("Decoding authentication file %q failed: %v", filepath, err)
}
// Unmarshal the authentication file.
var auth Authentication
if err := json.Unmarshal(decoded, &auth); err != nil {
return nil, err
}
return &auth, nil
}
// GetAuthCreds returns the authentication credentials either from the file or the enviornment.
func GetAuthCreds(config string) (*Authentication, error) {
if len(config) >= 0 {
if _, err := os.Stat(config); os.IsNotExist(err) {
// The file does not exist, let's tell the user.
return nil, fmt.Errorf("azure config was specified as %q but does not exist", config)
}
// If we have a config file specified let's just use it.
auth, err := NewAuthenticationFromFile(config)
if err != nil {
return nil, err
}
return auth, nil
}
auth := &Authentication{}
if clientID := os.Getenv("AZURE_CLIENT_ID"); clientID != "" {
auth.ClientID = clientID
}
if clientSecret := os.Getenv("AZURE_CLIENT_SECRET"); clientSecret != "" {
auth.ClientSecret = clientSecret
}
if tenantID := os.Getenv("AZURE_TENANT_ID"); tenantID != "" {
auth.TenantID = tenantID
}
if subscriptionID := os.Getenv("AZURE_SUBSCRIPTION_ID"); subscriptionID != "" {
auth.SubscriptionID = subscriptionID
}
return auth, nil
}
func decode(b []byte) ([]byte, error) {
reader, enc := utfbom.Skip(bytes.NewReader(b))
switch enc {
case utfbom.UTF16LittleEndian:
u16 := make([]uint16, (len(b)/2)-1)
err := binary.Read(reader, binary.LittleEndian, &u16)
if err != nil {
return nil, err
}
return []byte(string(utf16.Decode(u16))), nil
case utfbom.UTF16BigEndian:
u16 := make([]uint16, (len(b)/2)-1)
err := binary.Read(reader, binary.BigEndian, &u16)
if err != nil {
return nil, err
}
return []byte(string(utf16.Decode(u16))), nil
}
return ioutil.ReadAll(reader)
}
| [
"\"AZURE_CLIENT_ID\"",
"\"AZURE_CLIENT_SECRET\"",
"\"AZURE_TENANT_ID\"",
"\"AZURE_SUBSCRIPTION_ID\""
] | [] | [
"AZURE_CLIENT_ID",
"AZURE_SUBSCRIPTION_ID",
"AZURE_CLIENT_SECRET",
"AZURE_TENANT_ID"
] | [] | ["AZURE_CLIENT_ID", "AZURE_SUBSCRIPTION_ID", "AZURE_CLIENT_SECRET", "AZURE_TENANT_ID"] | go | 4 | 0 | |
cmd/frontend/internal/cli/serve_cmd.go | package cli
import (
"context"
"database/sql"
"errors"
"fmt"
"log"
"net/http"
"net/url"
"os"
"path/filepath"
"strconv"
"strings"
"time"
"github.com/graph-gophers/graphql-go"
"github.com/inconshreveable/log15"
"github.com/keegancsmith/tmpfriend"
"github.com/opentracing/opentracing-go"
"github.com/prometheus/client_golang/prometheus"
"github.com/throttled/throttled/v2/store/redigostore"
"github.com/sourcegraph/sourcegraph/cmd/frontend/backend"
"github.com/sourcegraph/sourcegraph/cmd/frontend/enterprise"
"github.com/sourcegraph/sourcegraph/cmd/frontend/envvar"
"github.com/sourcegraph/sourcegraph/cmd/frontend/globals"
"github.com/sourcegraph/sourcegraph/cmd/frontend/graphqlbackend"
"github.com/sourcegraph/sourcegraph/cmd/frontend/internal/app/ui"
"github.com/sourcegraph/sourcegraph/cmd/frontend/internal/app/updatecheck"
"github.com/sourcegraph/sourcegraph/cmd/frontend/internal/bg"
"github.com/sourcegraph/sourcegraph/cmd/frontend/internal/cli/loghandlers"
"github.com/sourcegraph/sourcegraph/cmd/frontend/internal/siteid"
"github.com/sourcegraph/sourcegraph/cmd/frontend/internal/vfsutil"
"github.com/sourcegraph/sourcegraph/internal/conf"
"github.com/sourcegraph/sourcegraph/internal/database"
"github.com/sourcegraph/sourcegraph/internal/database/dbconn"
"github.com/sourcegraph/sourcegraph/internal/database/dbutil"
"github.com/sourcegraph/sourcegraph/internal/debugserver"
"github.com/sourcegraph/sourcegraph/internal/encryption/keyring"
"github.com/sourcegraph/sourcegraph/internal/env"
"github.com/sourcegraph/sourcegraph/internal/goroutine"
"github.com/sourcegraph/sourcegraph/internal/httpserver"
"github.com/sourcegraph/sourcegraph/internal/logging"
"github.com/sourcegraph/sourcegraph/internal/observation"
"github.com/sourcegraph/sourcegraph/internal/oobmigration"
"github.com/sourcegraph/sourcegraph/internal/profiler"
"github.com/sourcegraph/sourcegraph/internal/redispool"
"github.com/sourcegraph/sourcegraph/internal/sysreq"
"github.com/sourcegraph/sourcegraph/internal/trace"
"github.com/sourcegraph/sourcegraph/internal/tracer"
"github.com/sourcegraph/sourcegraph/internal/version"
)
var (
traceFields = env.Get("SRC_LOG_TRACE", "HTTP", "space separated list of trace logs to show. Options: all, HTTP, build, github")
traceThreshold = env.Get("SRC_LOG_TRACE_THRESHOLD", "", "show traces that take longer than this")
printLogo, _ = strconv.ParseBool(env.Get("LOGO", "false", "print Sourcegraph logo upon startup"))
httpAddr = env.Get("SRC_HTTP_ADDR", ":3080", "HTTP listen address for app and HTTP API")
httpAddrInternal = envvar.HTTPAddrInternal
nginxAddr = env.Get("SRC_NGINX_HTTP_ADDR", "", "HTTP listen address for nginx reverse proxy to SRC_HTTP_ADDR. Has preference over SRC_HTTP_ADDR for ExternalURL.")
// dev browser browser extension ID. You can find this by going to chrome://extensions
devExtension = "chrome-extension://bmfbcejdknlknpncfpeloejonjoledha"
// production browser extension ID. This is found by viewing our extension in the chrome store.
prodExtension = "chrome-extension://dgjhfomjieaadpoljlnidmbgkdffpack"
)
func init() {
// If CACHE_DIR is specified, use that
cacheDir := env.Get("CACHE_DIR", "/tmp", "directory to store cached archives.")
vfsutil.ArchiveCacheDir = filepath.Join(cacheDir, "frontend-archive-cache")
}
// defaultExternalURL returns the default external URL of the application.
func defaultExternalURL(nginxAddr, httpAddr string) *url.URL {
addr := nginxAddr
if addr == "" {
addr = httpAddr
}
var hostPort string
if strings.HasPrefix(addr, ":") {
// Prepend localhost if HTTP listen addr is just a port.
hostPort = "127.0.0.1" + addr
} else {
hostPort = addr
}
return &url.URL{Scheme: "http", Host: hostPort}
}
// InitDB initializes and returns the global database connection and sets the
// version of the frontend in our versions table.
func InitDB() (*sql.DB, error) {
if err := dbconn.SetupGlobalConnection(""); err != nil {
return nil, fmt.Errorf("failed to connect to frontend database: %s", err)
}
ctx := context.Background()
migrate := true
for {
// We need this loop so that we handle the missing versions table,
// which would be added by running the migrations. Once we detect that
// it's missing, we run the migrations and try to update the version again.
err := backend.UpdateServiceVersion(ctx, "frontend", version.Version())
if err != nil && !dbutil.IsPostgresError(err, "42P01") {
return nil, err
}
if !migrate {
return dbconn.Global, nil
}
if err := dbconn.MigrateDB(dbconn.Global, dbconn.Frontend); err != nil {
return nil, err
}
migrate = false
}
}
// Main is the main entrypoint for the frontend server program.
func Main(enterpriseSetupHook func(db dbutil.DB, outOfBandMigrationRunner *oobmigration.Runner) enterprise.Services) error {
ctx := context.Background()
log.SetFlags(0)
log.SetPrefix("")
if err := profiler.Init(); err != nil {
log.Fatalf("failed to initialize profiling: %v", err)
}
ready := make(chan struct{})
go debugserver.NewServerRoutine(ready).Start()
db, err := InitDB()
if err != nil {
log.Fatalf("ERROR: %v", err)
}
ui.InitRouter(db)
// override site config first
if err := overrideSiteConfig(ctx); err != nil {
log.Fatalf("failed to apply site config overrides: %v", err)
}
globals.ConfigurationServerFrontendOnly = conf.InitConfigurationServerFrontendOnly(&configurationSource{})
conf.MustValidateDefaults()
// now we can init the keyring, as it depends on site config
if err := keyring.Init(ctx); err != nil {
log.Fatalf("failed to initialize encryption keyring: %v", err)
}
if err := overrideGlobalSettings(ctx, db); err != nil {
log.Fatalf("failed to override global settings: %v", err)
}
// now the keyring is configured it's safe to override the rest of the config
// and that config can access the keyring
if err := overrideExtSvcConfig(ctx, db); err != nil {
log.Fatalf("failed to override external service config: %v", err)
}
// Filter trace logs
d, _ := time.ParseDuration(traceThreshold)
logging.Init(logging.Filter(loghandlers.Trace(strings.Fields(traceFields), d)))
tracer.Init()
trace.Init(true)
// Create an out-of-band migration runner onto which each enterprise init function
// can register migration routines to run in the background while they still have
// work remaining.
outOfBandMigrationRunner := oobmigration.NewRunnerWithDB(db, time.Second*30, &observation.Context{
Logger: log15.Root(),
Tracer: &trace.Tracer{Tracer: opentracing.GlobalTracer()},
Registerer: prometheus.DefaultRegisterer,
})
// Run a background job to handle encryption of external service configuration.
extsvcMigrator := database.NewExternalServiceConfigMigratorWithDB(db)
extsvcMigrator.AllowDecrypt = os.Getenv("ALLOW_DECRYPT_MIGRATION") == "true"
if err := outOfBandMigrationRunner.Register(extsvcMigrator.ID(), extsvcMigrator, oobmigration.MigratorOptions{Interval: 3 * time.Second}); err != nil {
log.Fatalf("failed to run external service encryption job: %v", err)
}
// Run a background job to handle encryption of external service configuration.
extAccMigrator := database.NewExternalAccountsMigratorWithDB(db)
extAccMigrator.AllowDecrypt = os.Getenv("ALLOW_DECRYPT_MIGRATION") == "true"
if err := outOfBandMigrationRunner.Register(extAccMigrator.ID(), extAccMigrator, oobmigration.MigratorOptions{Interval: 3 * time.Second}); err != nil {
log.Fatalf("failed to run user external account encryption job: %v", err)
}
// Run enterprise setup hook
enterprise := enterpriseSetupHook(db, outOfBandMigrationRunner)
if len(os.Args) >= 2 {
switch os.Args[1] {
case "help", "-h", "--help":
log.Printf("Version: %s", version.Version())
log.Print()
env.PrintHelp()
log.Print()
ctx, cancel := context.WithTimeout(ctx, 5*time.Second)
defer cancel()
for _, st := range sysreq.Check(ctx, skippedSysReqs()) {
log.Printf("%s:", st.Name)
if st.OK() {
log.Print("\tOK")
continue
}
if st.Skipped {
log.Print("\tSkipped")
continue
}
if st.Problem != "" {
log.Print("\t" + st.Problem)
}
if st.Err != nil {
log.Printf("\tError: %s", st.Err)
}
if st.Fix != "" {
log.Printf("\tPossible fix: %s", st.Fix)
}
}
return nil
}
}
printConfigValidation()
cleanup := tmpfriend.SetupOrNOOP()
defer cleanup()
// Don't proceed if system requirements are missing, to avoid
// presenting users with a half-working experience.
if err := checkSysReqs(context.Background(), os.Stderr); err != nil {
return err
}
siteid.Init()
globals.WatchExternalURL(defaultExternalURL(nginxAddr, httpAddr))
globals.WatchPermissionsUserMapping()
goroutine.Go(func() { bg.CheckRedisCacheEvictionPolicy() })
goroutine.Go(func() { bg.DeleteOldCacheDataInRedis() })
goroutine.Go(func() { bg.DeleteOldEventLogsInPostgres(context.Background(), db) })
go updatecheck.Start(db)
// Parse GraphQL schema and set up resolvers that depend on dbconn.Global
// being initialized
if dbconn.Global == nil {
return errors.New("dbconn.Global is nil when trying to parse GraphQL schema")
}
schema, err := graphqlbackend.NewSchema(db, enterprise.BatchChangesResolver, enterprise.CodeIntelResolver, enterprise.InsightsResolver, enterprise.AuthzResolver, enterprise.CodeMonitorsResolver, enterprise.LicenseResolver, enterprise.DotcomResolver)
if err != nil {
return err
}
ratelimitStore, err := redigostore.New(redispool.Cache, "gql:rl:", 0)
if err != nil {
return err
}
rateLimitWatcher := graphqlbackend.NewBasicLimitWatcher(ratelimitStore)
server, err := makeExternalAPI(db, schema, enterprise, rateLimitWatcher)
if err != nil {
return err
}
internalAPI, err := makeInternalAPI(schema, db, enterprise, rateLimitWatcher)
if err != nil {
return err
}
routines := []goroutine.BackgroundRoutine{
server,
outOfBandMigrationRunner,
}
if internalAPI != nil {
routines = append(routines, internalAPI)
}
if printLogo {
fmt.Println(" ")
fmt.Println(logoColor)
fmt.Println(" ")
}
fmt.Printf("✱ Sourcegraph is ready at: %s\n", globals.ExternalURL())
close(ready)
goroutine.MonitorBackgroundRoutines(context.Background(), routines...)
return nil
}
func makeExternalAPI(db dbutil.DB, schema *graphql.Schema, enterprise enterprise.Services, rateLimiter graphqlbackend.LimitWatcher) (goroutine.BackgroundRoutine, error) {
// Create the external HTTP handler.
externalHandler, err := newExternalHTTPHandler(db, schema, enterprise.GitHubWebhook, enterprise.GitLabWebhook, enterprise.BitbucketServerWebhook, enterprise.NewCodeIntelUploadHandler, enterprise.NewExecutorProxyHandler, rateLimiter)
if err != nil {
return nil, err
}
listener, err := httpserver.NewListener(httpAddr)
if err != nil {
return nil, err
}
server := httpserver.New(listener, &http.Server{
Handler: externalHandler,
ReadTimeout: 75 * time.Second,
WriteTimeout: 10 * time.Minute,
})
log15.Debug("HTTP running", "on", httpAddr)
return server, nil
}
func makeInternalAPI(schema *graphql.Schema, db dbutil.DB, enterprise enterprise.Services, rateLimiter graphqlbackend.LimitWatcher) (goroutine.BackgroundRoutine, error) {
if httpAddrInternal == "" {
return nil, nil
}
listener, err := httpserver.NewListener(httpAddrInternal)
if err != nil {
return nil, err
}
// The internal HTTP handler does not include the auth handlers.
internalHandler := newInternalHTTPHandler(schema, db, enterprise.NewCodeIntelUploadHandler, rateLimiter)
server := httpserver.New(listener, &http.Server{
Handler: internalHandler,
ReadTimeout: 75 * time.Second,
// Higher since for internal RPCs which can have large responses
// (eg git archive). Should match the timeout used for git archive
// in gitserver.
WriteTimeout: time.Hour,
})
log15.Debug("HTTP (internal) running", "on", httpAddrInternal)
return server, nil
}
func isAllowedOrigin(origin string, allowedOrigins []string) bool {
for _, o := range allowedOrigins {
if o == "*" || o == origin {
return true
}
}
return false
}
| [
"\"ALLOW_DECRYPT_MIGRATION\"",
"\"ALLOW_DECRYPT_MIGRATION\""
] | [] | [
"ALLOW_DECRYPT_MIGRATION"
] | [] | ["ALLOW_DECRYPT_MIGRATION"] | go | 1 | 0 | |
e2e/e2e_test.go | package e2e
import (
"fmt"
"io/ioutil"
"net/http"
"os"
"strings"
"testing"
"github.com/golang/protobuf/jsonpb"
"github.com/btcsuite/btcd/btcec"
"github.com/cosmos/cosmos-sdk/crypto/hd"
"github.com/cosmos/go-bip39"
datadealtypes "github.com/medibloc/panacea-core/v2/x/datadeal/types"
"github.com/stretchr/testify/require"
)
func TestDataDealValidateData(t *testing.T) {
buyerMnemonic := os.Getenv("E2E_DATA_BUYER_MNEMONIC")
require.NotEmpty(t, buyerMnemonic)
oracleHTTPAddr := os.Getenv("E2E_ORACLE_HTTP_ADDR")
require.NotEmpty(t, oracleHTTPAddr)
dealID := 1
requesterAddr := "panacea1c7yh0ql0rhvyqm4vuwgaqu0jypafnwqdc6x60e"
data := `{
"name": "This is a name",
"description": "This is a description",
"body": [{ "type": "markdown", "attributes": { "value": "val1" } }]
}`
req, err := http.NewRequest(
http.MethodPost,
fmt.Sprintf("http://%s/v0/data-deal/deals/%d/data?requester_address=%s", oracleHTTPAddr, dealID, requesterAddr),
strings.NewReader(data),
)
require.NoError(t, err)
req.Header.Set("Content-Type", "application/json")
resp, err := http.DefaultClient.Do(req)
require.NoError(t, err)
defer resp.Body.Close()
require.Equal(t, http.StatusCreated, resp.StatusCode)
var cert datadealtypes.DataCert
unmarshaler := &jsonpb.Unmarshaler{}
err = unmarshaler.Unmarshal(resp.Body, &cert)
require.NoError(t, err)
privKey := getPrivateKey(t, buyerMnemonic)
dataURL := string(decrypt(t, privKey, cert.UnsignedCert.EncryptedDataUrl))
t.Logf("dataURL: %v", dataURL)
downloadedData := downloadFile(t, dataURL)
decryptedData := decrypt(t, privKey, downloadedData)
require.Equal(t, data, string(decryptedData))
}
// TODO: When the data pool module is done, the e2e unit test will be added.
//func TestDataPoolValidateData(t *testing.T) {
// buyerMnemonic := os.Getenv("E2E_DATA_BUYER_MNEMONIC")
// require.NotEmpty(t, buyerMnemonic)
// dataValMnemonic := os.Getenv("E2E_ORACLE_MNEMONIC")
// require.NotEmpty(t, dataValMnemonic)
// oracleHTTPAddr := os.Getenv("E2E_ORACLE_HTTP_ADDR")
// require.NotEmpty(t, oracleHTTPAddr)
//
// poolID := 1
// round := 1
// requesterAddr := "panacea1c7yh0ql0rhvyqm4vuwgaqu0jypafnwqdc6x60e"
// data := `{
// "name": "This is a name",
// "description": "This is a description",
// "body": [{ "type": "markdown", "attributes": { "value": "val1" } }]
// }`
//
// req, err := http.NewRequest(
// http.MethodPost,
// fmt.Sprintf("http://%s/v0/data-pool/pools/%d/rounds/%d/data?requester_address=%s", oracleHTTPAddr, poolID, round, requesterAddr),
// strings.NewReader(data),
// )
// require.NoError(t, err)
// req.Header.Set("Content-Type", "application/json")
//
// resp, err := http.DefaultClient.Do(req)
// require.NoError(t, err)
// defer resp.Body.Close()
// fmt.Println(resp)
// require.Equal(t, http.StatusCreated, resp.StatusCode)
//
// var cert datapooltypes.DataCert
// unmarshaler := &jsonpb.Unmarshaler{}
// err = unmarshaler.Unmarshal(resp.Body, &cert)
// require.NoError(t, err)
//
// // TODO Check if MED is sent to the seller normally after the sale data.
//
// // TODO Confirm that the buyer who purchased NFT Token can import data normally.
//}
const (
accountNum = 0
coinType = 371
addressIdx = 0
)
func getPrivateKey(t *testing.T, mnemonic string) []byte {
seed, err := bip39.NewSeedWithErrorChecking(mnemonic, "")
require.NoError(t, err)
hdPath := hd.NewFundraiserParams(accountNum, coinType, addressIdx).String()
masterPriv, chainCode := hd.ComputeMastersFromSeed(seed)
privKey, err := hd.DerivePrivateKeyForPath(masterPriv, chainCode, hdPath)
require.NoError(t, err)
return privKey
}
func decrypt(t *testing.T, privKeyBz []byte, data []byte) []byte {
privKey, _ := btcec.PrivKeyFromBytes(btcec.S256(), privKeyBz[:])
decrypted, err := btcec.Decrypt(privKey, data)
require.NoError(t, err)
return decrypted
}
func downloadFile(t *testing.T, url string) []byte {
req, err := http.NewRequest(http.MethodGet, url, nil)
require.NoError(t, err)
resp, err := http.DefaultClient.Do(req)
require.NoError(t, err)
defer resp.Body.Close()
require.Equal(t, http.StatusOK, resp.StatusCode)
data, err := ioutil.ReadAll(resp.Body)
require.NoError(t, err)
return data
}
| [
"\"E2E_DATA_BUYER_MNEMONIC\"",
"\"E2E_ORACLE_HTTP_ADDR\"",
"\"E2E_DATA_BUYER_MNEMONIC\"",
"\"E2E_ORACLE_MNEMONIC\"",
"\"E2E_ORACLE_HTTP_ADDR\""
] | [] | [
"E2E_ORACLE_HTTP_ADDR",
"E2E_ORACLE_MNEMONIC",
"E2E_DATA_BUYER_MNEMONIC"
] | [] | ["E2E_ORACLE_HTTP_ADDR", "E2E_ORACLE_MNEMONIC", "E2E_DATA_BUYER_MNEMONIC"] | go | 3 | 0 | |
main.go | package main
import (
"flag"
"fmt"
"log"
"os"
"time"
"github.com/0xAX/notificator"
"github.com/boltdb/bolt"
"github.com/manishrjain/asanawarrior/asana"
"github.com/manishrjain/asanawarrior/taskwarrior"
"github.com/manishrjain/asanawarrior/x"
"github.com/pkg/errors"
)
var duration = flag.Int("dur", 1, "How often to run sync, specified in minutes.")
var dbpath = flag.String("db", os.Getenv("HOME")+"/.task/asanawarrior.db",
"File path for db which stores certain sync information.")
var notifyInterval = flag.Int("interval", 10,
"Minimum duration in seconds between successive notifications. Set to zero for no notifications.")
var maxDeletes = flag.Int("deletes", 5,
"If Asanawarrior sees more than these number of deletes, it's going to crash to"+
" protect your Asana from mass deletion.")
var db *bolt.DB
var bucketName = []byte("aw")
var notify *notificator.Notificator
type Match struct {
Xid string
Asana x.WarriorTask
TaskWr x.WarriorTask
}
type notification struct {
Title string
Text string
}
var notifications = make(chan notification, 100)
// generateMatches matches all tasks from Asana to Taskwarrior, and stores non-matches as
// individual entries from each, without the other being present.
func generateMatches(atasks []x.WarriorTask, twtasks []x.WarriorTask) []*Match {
amap := make(map[string]*Match)
for _, at := range atasks {
m := &Match{
Xid: at.Xid,
Asana: at,
}
amap[at.Xid] = m
}
matches := make([]*Match, 0, 1000)
// Iterate over task warrior tasks.
for _, tw := range twtasks {
if m, ok := amap[tw.Xid]; !ok {
// Not in Asana.
match := &Match{
TaskWr: tw,
}
matches = append(matches, match)
} else {
// Also in Asana.
m.TaskWr = tw
delete(amap, tw.Xid)
matches = append(matches, m)
}
}
// Left over tasks from Asana not in task warrior.
for _, m := range amap {
matches = append(matches, m)
}
return matches
}
func approxAfter(t1, t2 time.Time) bool {
return t1.Sub(t2) > time.Second
}
func asanaKey(xid string) []byte {
return []byte(fmt.Sprintf("asana-%s", xid))
}
func taskwKey(uuid string) []byte {
return []byte(fmt.Sprintf("taskw-%s", uuid))
}
func storeInDb(asanaTask, twTask x.WarriorTask) {
if err := db.Update(func(tx *bolt.Tx) error {
b := tx.Bucket(bucketName)
if err := b.Put(asanaKey(asanaTask.Xid),
[]byte(asanaTask.Modified.Format(time.RFC3339))); err != nil {
return err
}
if err := b.Put(taskwKey(twTask.Uuid),
[]byte(twTask.Modified.Format(time.RFC3339))); err != nil {
return err
}
return nil
}); err != nil {
log.Fatalf("Write to db failed with error: %v", err)
}
}
func getSyncTimestamps(xid string, uuid string) (time.Time, time.Time) {
var at, tt time.Time
db.View(func(tx *bolt.Tx) error {
b := tx.Bucket(bucketName)
ats := string(b.Get(asanaKey(xid)))
tts := string(b.Get(taskwKey(uuid)))
var err error
if at, err = time.Parse(time.RFC3339, ats); err != nil {
log.Fatalf("Unable to find asana ts: %v %v", xid, uuid)
}
if tt, err = time.Parse(time.RFC3339, tts); err != nil {
log.Fatalf("Unable to find taskwarrior ts: %v %v", xid, uuid)
}
return nil
})
return at, tt
}
func syncMatch(m *Match, deleteFromAsana *[]*Match) error {
if m.Xid == "" {
// Task not present in Asana, but present in TW.
if m.TaskWr.Xid != "" {
if m.TaskWr.Deleted {
// Already deleted from TW. Do nothing.
return nil
}
// This task used to have an Asana ID. But, we can't find the corresponding Asana task.
// It can happen when Asana task was deleted.
// If so, delete the task from TW as well.
fmt.Printf("Delete from Taskwarrior: [%q]\n", m.TaskWr.Name)
pushNotification("Delete", m.TaskWr.Name)
if err := taskwarrior.Delete(m.TaskWr); err != nil {
return errors.Wrap(err, "Delete from Taskwarrior")
}
return nil
}
// Create in Asana.
fmt.Printf("Create in Asana: [%q]\n", m.TaskWr.Name)
asanaUpdated, err := asana.AddNew(m.TaskWr)
if err != nil {
return errors.Wrap(err, "create asana addnew")
}
// Update TW with the Xid.
if err := taskwarrior.OverwriteUuid(asanaUpdated, m.TaskWr.Uuid); err != nil {
return errors.Wrap(err, "create asana overwriteuuid")
}
taskwUpdated, err := taskwarrior.GetTask(m.TaskWr.Uuid)
if err != nil {
return errors.Wrap(err, "create asana GetTask")
}
// Store Asana and Taskwarrior timestamps as of this sync.
storeInDb(asanaUpdated, taskwUpdated)
return nil
}
if m.TaskWr.Xid == "" {
// No Asana xid found in Taskwarrior. So, create it.
fmt.Printf("Create in Taskwarrior: [%q]\n", m.Asana.Name)
pushNotification("Create", m.Asana.Name)
uuid, err := taskwarrior.AddNew(m.Asana)
if err != nil {
return errors.Wrap(err, "syncMatch create in taskwarrior")
}
if len(uuid) == 0 {
log.Fatalf("Unable to parse UUID of new task: %+v", m.Asana)
}
updated, err := taskwarrior.GetTask(uuid)
if err != nil {
return err
}
// Store Asana and Taskwarrior timestamps as of this sync.
storeInDb(m.Asana, updated)
return nil
}
if m.Asana.Xid != m.TaskWr.Xid {
log.Fatalf("Xids should be matched: %+v\n", m)
}
// Task is present in both Asana and TW.
asanaTs, taskwTs := getSyncTimestamps(m.Asana.Xid, m.TaskWr.Uuid)
if approxAfter(m.Asana.Modified, asanaTs) {
// Asana was updated. Overwrite TW.
fmt.Printf("Overwrite Taskwarrior: [%q] [time diff: %v]\n",
m.Asana.Name, m.Asana.Modified.Sub(asanaTs))
pushNotification("Update", m.Asana.Name)
if err := taskwarrior.OverwriteUuid(m.Asana, m.TaskWr.Uuid); err != nil {
return errors.Wrap(err, "Overwrite Taskwarrior")
}
updated, err := taskwarrior.GetTask(m.TaskWr.Uuid)
if err != nil {
return errors.Wrap(err, "Overwrite Taskwarrior GetTask")
}
storeInDb(m.Asana, updated)
return nil
}
// If task has been marked as deleted since the last modification.
if m.TaskWr.Deleted && approxAfter(m.TaskWr.Modified, taskwTs) {
if deleteFromAsana != nil {
*deleteFromAsana = append(*deleteFromAsana, m)
return nil
}
fmt.Printf("Deleting task from Asana: [%q]\n", m.TaskWr.Name)
pushNotification("Deleting from Asana", m.TaskWr.Name)
if err := asana.Delete(m.Xid); err != nil {
return errors.Wrap(err, "Delete task from Asana")
}
// Don't delete from boltdb, but update the timestamps,
// so we don't reapply this deletion.
// We don't need to retrieve Asana task back, because we won't be able to. It has been deleted.
// If the task gets undeleted, Asana won't modify the timestamp. So, let's set it to zero
// in our records, so if it comes back, we'll see it as an update.
m.Asana.Modified = time.Time{}
storeInDb(m.Asana, m.TaskWr)
return nil
}
if approxAfter(m.TaskWr.Modified, taskwTs) {
// TW was updated. Overwrite Asana.
fmt.Printf("Overwrite Asana: [%q] [time diff: %v]\n",
m.TaskWr.Name, m.TaskWr.Modified.Sub(taskwTs))
if err := asana.UpdateTask(m.TaskWr, m.Asana); err != nil {
return errors.Wrap(err, "syncMatch overwrite asana")
}
updated, err := asana.GetOneTask(m.Xid)
if err != nil {
return errors.Wrap(err, "syncMatch GetOneTask")
}
storeInDb(updated, m.TaskWr)
return nil
}
return nil
}
func runSync() {
atasks, err := asana.GetTasks()
// atasks, err := asana.GetTasks(1)
if err != nil {
log.Fatalf("%+v", err)
}
fmt.Printf("%27s: %d active\n", "Asana results found", len(atasks))
twtasks, err := taskwarrior.GetTasks()
if err != nil {
log.Fatal(err)
}
deleted := 0
for _, t := range twtasks {
if t.Deleted {
deleted++
}
}
fmt.Printf("%27s: %d active, %d deleted\n",
"Taskwarrior results found", len(twtasks)-deleted, deleted)
matches := generateMatches(atasks, twtasks)
deletes := make([]*Match, 0, 10)
for _, m := range matches {
if err := syncMatch(m, &deletes); err != nil {
log.Printf("syncMatch error: %v %+v", err, m)
}
}
if len(deletes) > *maxDeletes {
fmt.Printf(`
==========================================
Task deletions requested from Asana : %d
Max allowed per sync : %d
Most likely this is a mistake!
If so, please run 'rm -f ~/.task/*' to clean up Taskwarrior (or wherever else you store it's state).
If you genuinely want to delete all these tasks, then set the 'deletes' flag to a higher value.
Crashing to avoid mass deletes from Asana!
==========================================
`, len(deletes), *maxDeletes)
os.Exit(1)
}
for _, m := range deletes {
if err := syncMatch(m, nil); err != nil {
log.Printf("syncMatch error: %v %+v", err, m)
}
}
fmt.Println("All synced up. DONE.")
}
func pushNotification(title, text string) {
if notify == nil {
return
}
n := notification{Title: title, Text: text}
select {
case notifications <- n:
default:
// Let it go.
}
}
func processNotifications() {
ni := time.Duration(*notifyInterval)
ticker := time.NewTicker(ni * time.Second)
l := make([]notification, 0, 10)
for {
select {
case <-ticker.C:
if len(l) == 0 {
// pass
} else if len(l) == 1 {
n := l[0]
notify.Push("Asanawarrior "+n.Title, n.Text, "", notificator.UR_NORMAL)
} else {
notify.Push("Asanawarrior "+l[0].Title,
fmt.Sprintf("%q and %d more updates", l[0].Text, len(l)-1), "", notificator.UR_NORMAL)
}
l = l[:0]
case n := <-notifications:
if *notifyInterval > 0 {
l = append(l, n)
}
}
}
}
func main() {
flag.Parse()
fmt.Println("Asanawarrior v1.0 - Bringing the power of Taskwarrior to Asana")
notify = notificator.New(notificator.Options{
AppName: "Asanawarrior",
})
go processNotifications()
var err error
db, err = bolt.Open(*dbpath, 0600, nil)
if err != nil {
log.Fatalf("Unable to open bolt db at %v. Error: %v", *dbpath, err)
}
defer db.Close()
db.Update(func(tx *bolt.Tx) error {
_, err := tx.CreateBucketIfNotExists(bucketName)
if err != nil {
log.Fatalf("Unable to create bucket in bolt db.")
}
return nil
})
// Initiate a sync right away.
fmt.Println()
fmt.Println("Starting sync at", time.Now())
runSync()
// And then do it at regular intervals.
ticker := time.NewTicker(time.Duration(*duration) * time.Minute)
for t := range ticker.C {
fmt.Println()
fmt.Println("Starting sync at", t)
runSync()
}
}
| [
"\"HOME\""
] | [] | [
"HOME"
] | [] | ["HOME"] | go | 1 | 0 | |
main.go | package main
import (
"errors"
"flag"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"strings"
)
var (
source = flag.String("source", "vendor", "source directory")
target = flag.String("target", "", "target directory (defaults to $GOBIN, if not set $GOPATH/bin)")
commands = flag.String("commands", "", "comma separated list of commands to execute after go install in temporary environment")
quiet = flag.Bool("quiet", false, "disable output")
)
func main() {
flag.Parse()
packages := flag.Args()
if len(packages) < 1 {
fail(errors.New("no packages: specify a package"))
}
gopath, err := ioutil.TempDir("", "go-vendorinstall-gopath")
if err != nil {
fail(err)
}
print(fmt.Sprintf("gopath: %s", gopath))
defer func() {
if err := os.RemoveAll(gopath); err != nil {
fail(err)
}
}()
if len(*target) == 0 {
if gobin := os.Getenv("GOBIN"); len(gobin) > 0 {
target = &gobin
} else {
bin := fmt.Sprintf("%s/bin", os.Getenv("GOPATH"))
target = &bin
}
}
gobin, err := filepath.Abs(*target)
if err != nil {
fail(err)
}
print(fmt.Sprintf("gobin: %s", gobin))
if err := link(gopath, *source); err != nil {
fail(err)
}
oldpath := os.Getenv("PATH")
path := fmt.Sprintf("%s%s%s", gobin, string(os.PathListSeparator), os.Getenv("PATH"))
os.Setenv("PATH", fmt.Sprintf("%s%s%s", gobin, string(os.PathListSeparator), os.Getenv("PATH")))
defer os.Setenv("PATH", oldpath)
env := []string{fmt.Sprintf("PATH=%s", path), fmt.Sprintf("GOPATH=%s", gopath), fmt.Sprintf("GOBIN=%s", gobin)}
args := append([]string{"install"}, packages...)
if out, err := doexec("go", gopath, args, env); err != nil {
print(string(out))
fail(err)
}
if len(*commands) > 0 {
for _, cmd := range strings.Split(*commands, ",") {
split := strings.Split(cmd, " ")
if out, err := doexec(split[0], gopath, split[1:], env); err != nil {
print(string(out))
fail(err)
}
}
}
}
func print(msg string) {
if !*quiet {
fmt.Println(msg)
}
}
func fail(err error) {
fmt.Printf("error: %s", err.Error())
os.Exit(1)
}
func link(gopath, source string) error {
srcdir, err := filepath.Abs(source)
if err != nil {
return err
}
linkto := filepath.Join(gopath, "src")
if err := os.MkdirAll(linkto, 0777); err != nil {
return err
}
files, err := ioutil.ReadDir(srcdir)
if err != nil {
return err
}
for _, file := range files {
real := filepath.Join(srcdir, file.Name())
link := filepath.Join(linkto, file.Name())
if err := os.Symlink(real, link); err != nil {
return err
}
}
return nil
}
func doexec(bin, dir string, args []string, env []string) ([]byte, error) {
print(fmt.Sprintf("%s %s", bin, strings.Join(args, " ")))
cmd := exec.Command(bin, args...)
cmd.Env = env
cmd.Dir = dir
return cmd.CombinedOutput()
}
| [
"\"GOBIN\"",
"\"GOPATH\"",
"\"PATH\"",
"\"PATH\"",
"\"PATH\""
] | [] | [
"GOPATH",
"GOBIN",
"PATH"
] | [] | ["GOPATH", "GOBIN", "PATH"] | go | 3 | 0 | |
pkg/process/config/config.go | package config
import (
"bytes"
"context"
"encoding/json"
"fmt"
"net"
"net/http"
"net/url"
"os"
"os/exec"
"regexp"
"strconv"
"strings"
"time"
model "github.com/DataDog/agent-payload/process"
"github.com/DataDog/datadog-agent/cmd/agent/api/pb"
sysconfig "github.com/DataDog/datadog-agent/cmd/system-probe/config"
"github.com/DataDog/datadog-agent/pkg/config"
oconfig "github.com/DataDog/datadog-agent/pkg/orchestrator/config"
"github.com/DataDog/datadog-agent/pkg/process/util"
apicfg "github.com/DataDog/datadog-agent/pkg/process/util/api/config"
"github.com/DataDog/datadog-agent/pkg/util/fargate"
ddgrpc "github.com/DataDog/datadog-agent/pkg/util/grpc"
"github.com/DataDog/datadog-agent/pkg/util/hostname/validate"
"github.com/DataDog/datadog-agent/pkg/util/log"
"google.golang.org/grpc"
)
const (
// defaultProxyPort is the default port used for proxies.
// This mirrors the configuration for the infrastructure agent.
defaultProxyPort = 3128
defaultGRPCConnectionTimeout = 60 * time.Second
)
// Name for check performed by process-agent or system-probe
const (
ProcessCheckName = "process"
RTProcessCheckName = "rtprocess"
ContainerCheckName = "container"
RTContainerCheckName = "rtcontainer"
ConnectionsCheckName = "connections"
PodCheckName = "pod"
NetworkCheckName = "Network"
OOMKillCheckName = "OOM Kill"
TCPQueueLengthCheckName = "TCP queue length"
ProcessModuleCheckName = "Process Module"
)
var (
processChecks = []string{ProcessCheckName, RTProcessCheckName}
containerChecks = []string{ContainerCheckName, RTContainerCheckName}
moduleCheckMap = map[sysconfig.ModuleName][]string{
sysconfig.NetworkTracerModule: {ConnectionsCheckName, NetworkCheckName},
sysconfig.OOMKillProbeModule: {OOMKillCheckName},
sysconfig.TCPQueueLengthTracerModule: {TCPQueueLengthCheckName},
sysconfig.ProcessModule: {ProcessModuleCheckName},
}
)
type proxyFunc func(*http.Request) (*url.URL, error)
type cmdFunc = func(name string, arg ...string) *exec.Cmd
// WindowsConfig stores all windows-specific configuration for the process-agent and system-probe.
type WindowsConfig struct {
// Number of checks runs between refreshes of command-line arguments
ArgsRefreshInterval int
// Controls getting process arguments immediately when a new process is discovered
AddNewArgs bool
}
// AgentConfig is the global config for the process-agent. This information
// is sourced from config files and the environment variables.
type AgentConfig struct {
Enabled bool
HostName string
APIEndpoints []apicfg.Endpoint
LogFile string
LogLevel string
LogToConsole bool
QueueSize int // The number of items allowed in each delivery queue.
ProcessQueueBytes int // The total number of bytes that can be enqueued for delivery to the process intake endpoint
Blacklist []*regexp.Regexp
Scrubber *DataScrubber
MaxPerMessage int
MaxCtrProcessesPerMessage int // The maximum number of processes that belong to a container for a given message
MaxConnsPerMessage int
AllowRealTime bool
Transport *http.Transport `json:"-"`
DDAgentBin string
StatsdHost string
StatsdPort int
ProcessExpVarPort int
ProfilingEnabled bool
ProfilingSite string
ProfilingURL string
ProfilingAPIKey string
ProfilingEnvironment string
// host type of the agent, used to populate container payload with additional host information
ContainerHostType model.ContainerHostType
// System probe collection configuration
EnableSystemProbe bool
SystemProbeAddress string
// Orchestrator config
Orchestrator *oconfig.OrchestratorConfig
// Check config
EnabledChecks []string
CheckIntervals map[string]time.Duration
// Internal store of a proxy used for generating the Transport
proxy proxyFunc
// Windows-specific config
Windows WindowsConfig
grpcConnectionTimeout time.Duration
}
// CheckIsEnabled returns a bool indicating if the given check name is enabled.
func (a AgentConfig) CheckIsEnabled(checkName string) bool {
return util.StringInSlice(a.EnabledChecks, checkName)
}
// CheckInterval returns the interval for the given check name, defaulting to 10s if not found.
func (a AgentConfig) CheckInterval(checkName string) time.Duration {
d, ok := a.CheckIntervals[checkName]
if !ok {
log.Errorf("missing check interval for '%s', you must set a default", checkName)
d = 10 * time.Second
}
return d
}
const (
defaultProcessEndpoint = "https://process.datadoghq.com"
maxMessageBatch = 100
defaultMaxCtrProcsMessageBatch = 10000
maxCtrProcsMessageBatch = 30000
)
// NewDefaultTransport provides a http transport configuration with sane default timeouts
func NewDefaultTransport() *http.Transport {
return &http.Transport{
MaxIdleConns: 5,
IdleConnTimeout: 90 * time.Second,
Dial: (&net.Dialer{
Timeout: 10 * time.Second,
KeepAlive: 10 * time.Second,
}).Dial,
TLSHandshakeTimeout: 5 * time.Second,
ResponseHeaderTimeout: 5 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
}
}
// NewDefaultAgentConfig returns an AgentConfig with defaults initialized
func NewDefaultAgentConfig(canAccessContainers bool) *AgentConfig {
processEndpoint, err := url.Parse(defaultProcessEndpoint)
if err != nil {
// This is a hardcoded URL so parsing it should not fail
panic(err)
}
var enabledChecks []string
if canAccessContainers {
enabledChecks = containerChecks
}
ac := &AgentConfig{
Enabled: canAccessContainers, // We'll always run inside of a container.
APIEndpoints: []apicfg.Endpoint{{Endpoint: processEndpoint}},
LogFile: defaultLogFilePath,
LogLevel: "info",
LogToConsole: false,
// Allow buffering up to 75 megabytes of payload data in total
ProcessQueueBytes: 60 * 1000 * 1000,
// This can be fairly high as the input should get throttled by queue bytes first.
// Assuming we generate ~8 checks/minute (for process/network), this should allow buffering of ~30 minutes of data assuming it fits within the queue bytes memory budget
QueueSize: 256,
MaxPerMessage: maxMessageBatch,
MaxCtrProcessesPerMessage: defaultMaxCtrProcsMessageBatch,
MaxConnsPerMessage: 600,
AllowRealTime: true,
HostName: "",
Transport: NewDefaultTransport(),
ProcessExpVarPort: 6062,
ContainerHostType: model.ContainerHostType_notSpecified,
// Statsd for internal instrumentation
StatsdHost: "127.0.0.1",
StatsdPort: 8125,
// System probe collection configuration
EnableSystemProbe: false,
SystemProbeAddress: defaultSystemProbeAddress,
// Orchestrator config
Orchestrator: oconfig.NewDefaultOrchestratorConfig(),
// Check config
EnabledChecks: enabledChecks,
CheckIntervals: map[string]time.Duration{
ProcessCheckName: 10 * time.Second,
RTProcessCheckName: 2 * time.Second,
ContainerCheckName: 10 * time.Second,
RTContainerCheckName: 2 * time.Second,
ConnectionsCheckName: 30 * time.Second,
PodCheckName: 10 * time.Second,
},
// DataScrubber to hide command line sensitive words
Scrubber: NewDefaultDataScrubber(),
Blacklist: make([]*regexp.Regexp, 0),
// Windows process config
Windows: WindowsConfig{
ArgsRefreshInterval: 15, // with default 20s check interval we refresh every 5m
AddNewArgs: true,
},
grpcConnectionTimeout: defaultGRPCConnectionTimeout,
}
// Set default values for proc/sys paths if unset.
// Don't set this is /host is not mounted to use context within container.
// Generally only applicable for container-only cases like Fargate.
if config.IsContainerized() && util.PathExists("/host") {
if v := os.Getenv("HOST_PROC"); v == "" {
os.Setenv("HOST_PROC", "/host/proc")
}
if v := os.Getenv("HOST_SYS"); v == "" {
os.Setenv("HOST_SYS", "/host/sys")
}
}
return ac
}
func loadConfigIfExists(path string) error {
if path != "" {
if util.PathExists(path) {
config.Datadog.AddConfigPath(path)
if strings.HasSuffix(path, ".yaml") { // If they set a config file directly, let's try to honor that
config.Datadog.SetConfigFile(path)
}
if _, err := config.LoadWithoutSecret(); err != nil {
return err
}
} else {
log.Infof("no config exists at %s, ignoring...", path)
}
}
return nil
}
// NewAgentConfig returns an AgentConfig using a configuration file. It can be nil
// if there is no file available. In this case we'll configure only via environment.
func NewAgentConfig(loggerName config.LoggerName, yamlPath, netYamlPath string) (*AgentConfig, error) {
var err error
// Note: This only considers container sources that are already setup. It's possible that container sources may
// need a few minutes to be ready on newly provisioned hosts.
_, err = util.GetContainers()
canAccessContainers := err == nil
cfg := NewDefaultAgentConfig(canAccessContainers)
// For Agent 6 we will have a YAML config file to use.
if err := loadConfigIfExists(yamlPath); err != nil {
return nil, err
}
if err := cfg.LoadProcessYamlConfig(yamlPath); err != nil {
return nil, err
}
if err := cfg.Orchestrator.Load(); err != nil {
return nil, err
}
// (Re)configure the logging from our configuration
if err := setupLogger(loggerName, cfg.LogFile, cfg); err != nil {
log.Errorf("failed to setup configured logger: %s", err)
return nil, err
}
// For system probe, there is an additional config file that is shared with the system-probe
syscfg, err := sysconfig.Merge(netYamlPath)
if err != nil {
return nil, err
}
if syscfg.Enabled {
cfg.EnableSystemProbe = true
cfg.MaxConnsPerMessage = syscfg.MaxConnsPerMessage
cfg.SystemProbeAddress = syscfg.SocketAddress
// enable corresponding checks to system-probe modules
for mod := range syscfg.EnabledModules {
if checks, ok := moduleCheckMap[mod]; ok {
cfg.EnabledChecks = append(cfg.EnabledChecks, checks...)
}
}
if !cfg.Enabled {
log.Info("enabling process-agent for connections check as the system-probe is enabled")
cfg.Enabled = true
}
}
// TODO: Once proxies have been moved to common config util, remove this
if cfg.proxy, err = proxyFromEnv(cfg.proxy); err != nil {
log.Errorf("error parsing environment proxy settings, not using a proxy: %s", err)
cfg.proxy = nil
}
// Python-style log level has WARNING vs WARN
if strings.ToLower(cfg.LogLevel) == "warning" {
cfg.LogLevel = "warn"
}
if err := validate.ValidHostname(cfg.HostName); err != nil {
// lookup hostname if there is no config override or if the override is invalid
if hostname, err := getHostname(cfg.DDAgentBin, cfg.grpcConnectionTimeout); err == nil {
cfg.HostName = hostname
} else {
log.Errorf("Cannot get hostname: %v", err)
}
}
cfg.ContainerHostType = getContainerHostType()
if cfg.proxy != nil {
cfg.Transport.Proxy = cfg.proxy
}
// sanity check. This element is used with the modulo operator (%), so it can't be zero.
// if it is, log the error, and assume the config was attempting to disable
if cfg.Windows.ArgsRefreshInterval == 0 {
log.Warnf("invalid configuration: windows_collect_skip_new_args was set to 0. Disabling argument collection")
cfg.Windows.ArgsRefreshInterval = -1
}
// activate the pod collection if enabled and we have the cluster name set
if cfg.Orchestrator.OrchestrationCollectionEnabled {
if cfg.Orchestrator.KubeClusterName != "" {
cfg.EnabledChecks = append(cfg.EnabledChecks, PodCheckName)
} else {
log.Warnf("Failed to auto-detect a Kubernetes cluster name. Pod collection will not start. To fix this, set it manually via the cluster_name config option")
}
}
return cfg, nil
}
// getContainerHostType uses the fargate library to detect container environment and returns the protobuf version of it
func getContainerHostType() model.ContainerHostType {
switch fargate.GetOrchestrator() {
case fargate.ECS:
return model.ContainerHostType_fargateECS
case fargate.EKS:
return model.ContainerHostType_fargateEKS
}
return model.ContainerHostType_notSpecified
}
func loadEnvVariables() {
// The following environment variables will be loaded in the order listed, meaning variables
// further down the list may override prior variables.
for _, variable := range []struct{ env, cfg string }{
{"DD_PROCESS_AGENT_CONTAINER_SOURCE", "process_config.container_source"},
{"DD_SCRUB_ARGS", "process_config.scrub_args"},
{"DD_STRIP_PROCESS_ARGS", "process_config.strip_proc_arguments"},
{"DD_PROCESS_AGENT_URL", "process_config.process_dd_url"},
{"DD_PROCESS_AGENT_INTERNAL_PROFILING_ENABLED", "process_config.internal_profiling.enabled"},
{"DD_PROCESS_AGENT_REMOTE_TAGGER", "process_config.remote_tagger"},
{"DD_ORCHESTRATOR_URL", "orchestrator_explorer.orchestrator_dd_url"},
{"DD_HOSTNAME", "hostname"},
{"DD_DOGSTATSD_PORT", "dogstatsd_port"},
{"DD_BIND_HOST", "bind_host"},
{"HTTPS_PROXY", "proxy.https"},
{"DD_PROXY_HTTPS", "proxy.https"},
{"DD_LOGS_STDOUT", "log_to_console"},
{"LOG_TO_CONSOLE", "log_to_console"},
{"DD_LOG_TO_CONSOLE", "log_to_console"},
{"LOG_LEVEL", "log_level"}, // Support LOG_LEVEL and DD_LOG_LEVEL but prefer DD_LOG_LEVEL
{"DD_LOG_LEVEL", "log_level"},
} {
if v, ok := os.LookupEnv(variable.env); ok {
config.Datadog.Set(variable.cfg, v)
}
}
// Support API_KEY and DD_API_KEY but prefer DD_API_KEY.
apiKey, envKey := os.Getenv("DD_API_KEY"), "DD_API_KEY"
if apiKey == "" {
apiKey, envKey = os.Getenv("API_KEY"), "API_KEY"
}
if apiKey != "" { // We don't want to overwrite the API KEY provided as an environment variable
log.Infof("overriding API key from env %s value", envKey)
config.Datadog.Set("api_key", config.SanitizeAPIKey(strings.Split(apiKey, ",")[0]))
}
if v := os.Getenv("DD_CUSTOM_SENSITIVE_WORDS"); v != "" {
config.Datadog.Set("process_config.custom_sensitive_words", strings.Split(v, ","))
}
if v := os.Getenv("DD_PROCESS_ADDITIONAL_ENDPOINTS"); v != "" {
endpoints := make(map[string][]string)
if err := json.Unmarshal([]byte(v), &endpoints); err != nil {
log.Errorf(`Could not parse DD_PROCESS_ADDITIONAL_ENDPOINTS: %v. It must be of the form '{"https://process.agent.datadoghq.com": ["apikey1", ...], ...}'.`, err)
} else {
config.Datadog.Set("process_config.additional_endpoints", endpoints)
}
}
if v := os.Getenv("DD_ORCHESTRATOR_ADDITIONAL_ENDPOINTS"); v != "" {
endpoints := make(map[string][]string)
if err := json.Unmarshal([]byte(v), &endpoints); err != nil {
log.Errorf(`Could not parse DD_ORCHESTRATOR_ADDITIONAL_ENDPOINTS: %v. It must be of the form '{"https://process.agent.datadoghq.com": ["apikey1", ...], ...}'.`, err)
} else {
config.Datadog.Set("orchestrator_explorer.orchestrator_additional_endpoints", endpoints)
}
}
}
// IsBlacklisted returns a boolean indicating if the given command is blacklisted by our config.
func IsBlacklisted(cmdline []string, blacklist []*regexp.Regexp) bool {
cmd := strings.Join(cmdline, " ")
for _, b := range blacklist {
if b.MatchString(cmd) {
return true
}
}
return false
}
func isAffirmative(value string) (bool, error) {
if value == "" {
return false, fmt.Errorf("value is empty")
}
v := strings.ToLower(value)
return v == "true" || v == "yes" || v == "1", nil
}
// getHostname attempts to resolve the hostname in the following order: the main datadog agent via grpc, the main agent
// via cli and lastly falling back to os.Hostname() if it is unavailable
func getHostname(ddAgentBin string, grpcConnectionTimeout time.Duration) (string, error) {
// Fargate is handled as an exceptional case (there is no concept of a host, so we use the ARN in-place).
if fargate.IsFargateInstance() {
hostname, err := fargate.GetFargateHost()
if err == nil {
return hostname, nil
}
log.Errorf("failed to get Fargate host: %v", err)
}
// Get the hostname via gRPC from the main agent if a hostname has not been set either from config/fargate
hostname, err := getHostnameFromGRPC(ddgrpc.GetDDAgentClient, grpcConnectionTimeout)
if err == nil {
return hostname, nil
}
log.Errorf("failed to get hostname from grpc: %v", err)
// If the hostname is not set then we fallback to use the agent binary
hostname, err = getHostnameFromCmd(ddAgentBin, exec.Command)
if err == nil {
return hostname, nil
}
log.Errorf("failed to get hostname from cmd: %v", err)
return os.Hostname()
}
// getHostnameCmd shells out to obtain the hostname used by the infra agent
func getHostnameFromCmd(ddAgentBin string, cmdFn cmdFunc) (string, error) {
cmd := cmdFn(ddAgentBin, "hostname")
// Copying all environment variables to child process
// Windows: Required, so the child process can load DLLs, etc.
// Linux: Optional, but will make use of DD_HOSTNAME and DOCKER_DD_AGENT if they exist
cmd.Env = append(cmd.Env, os.Environ()...)
var stdout, stderr bytes.Buffer
cmd.Stdout = &stdout
cmd.Stderr = &stderr
err := cmd.Run()
if err != nil {
return "", err
}
hostname := strings.TrimSpace(stdout.String())
if hostname == "" {
return "", fmt.Errorf("error retrieving dd-agent hostname %s", stderr.String())
}
return hostname, nil
}
// getHostnameFromGRPC retrieves the hostname from the main datadog agent via GRPC
func getHostnameFromGRPC(grpcClientFn func(ctx context.Context, opts ...grpc.DialOption) (pb.AgentClient, error), grpcConnectionTimeout time.Duration) (string, error) {
ctx, cancel := context.WithTimeout(context.Background(), grpcConnectionTimeout)
defer cancel()
ddAgentClient, err := grpcClientFn(ctx)
if err != nil {
return "", fmt.Errorf("cannot connect to datadog agent via grpc: %w", err)
}
reply, err := ddAgentClient.GetHostname(ctx, &pb.HostnameRequest{})
if err != nil {
return "", fmt.Errorf("cannot get hostname from datadog agent via grpc: %w", err)
}
log.Debugf("retrieved hostname:%s from datadog agent via grpc", reply.Hostname)
return reply.Hostname, nil
}
// proxyFromEnv parses out the proxy configuration from the ENV variables in a
// similar way to getProxySettings and, if enough values are available, returns
// a new proxy URL value. If the environment is not set for this then the
// `defaultVal` is returned.
func proxyFromEnv(defaultVal proxyFunc) (proxyFunc, error) {
var host string
scheme := "http"
if v := os.Getenv("PROXY_HOST"); v != "" {
// accept either http://myproxy.com or myproxy.com
if i := strings.Index(v, "://"); i != -1 {
// when available, parse the scheme from the url
scheme = v[0:i]
host = v[i+3:]
} else {
host = v
}
}
if host == "" {
return defaultVal, nil
}
port := defaultProxyPort
if v := os.Getenv("PROXY_PORT"); v != "" {
port, _ = strconv.Atoi(v)
}
var user, password string
if v := os.Getenv("PROXY_USER"); v != "" {
user = v
}
if v := os.Getenv("PROXY_PASSWORD"); v != "" {
password = v
}
return constructProxy(host, scheme, port, user, password)
}
// constructProxy constructs a *url.Url for a proxy given the parts of a
// Note that we assume we have at least a non-empty host for this call but
// all other values can be their defaults (empty string or 0).
func constructProxy(host, scheme string, port int, user, password string) (proxyFunc, error) {
var userpass *url.Userinfo
if user != "" {
if password != "" {
userpass = url.UserPassword(user, password)
} else {
userpass = url.User(user)
}
}
var path string
if userpass != nil {
path = fmt.Sprintf("%s@%s:%v", userpass.String(), host, port)
} else {
path = fmt.Sprintf("%s:%v", host, port)
}
if scheme != "" {
path = fmt.Sprintf("%s://%s", scheme, path)
}
u, err := url.Parse(path)
if err != nil {
return nil, err
}
return http.ProxyURL(u), nil
}
func setupLogger(loggerName config.LoggerName, logFile string, cfg *AgentConfig) error {
return config.SetupLogger(
loggerName,
cfg.LogLevel,
logFile,
config.GetSyslogURI(),
config.Datadog.GetBool("syslog_rfc"),
config.Datadog.GetBool("log_to_console"),
config.Datadog.GetBool("log_format_json"),
)
}
| [
"\"HOST_PROC\"",
"\"HOST_SYS\"",
"\"DD_API_KEY\"",
"\"API_KEY\"",
"\"DD_CUSTOM_SENSITIVE_WORDS\"",
"\"DD_PROCESS_ADDITIONAL_ENDPOINTS\"",
"\"DD_ORCHESTRATOR_ADDITIONAL_ENDPOINTS\"",
"\"PROXY_HOST\"",
"\"PROXY_PORT\"",
"\"PROXY_USER\"",
"\"PROXY_PASSWORD\""
] | [] | [
"HOST_SYS",
"PROXY_PASSWORD",
"PROXY_HOST",
"API_KEY",
"HOST_PROC",
"PROXY_USER",
"PROXY_PORT",
"DD_PROCESS_ADDITIONAL_ENDPOINTS",
"DD_ORCHESTRATOR_ADDITIONAL_ENDPOINTS",
"DD_API_KEY",
"DD_CUSTOM_SENSITIVE_WORDS"
] | [] | ["HOST_SYS", "PROXY_PASSWORD", "PROXY_HOST", "API_KEY", "HOST_PROC", "PROXY_USER", "PROXY_PORT", "DD_PROCESS_ADDITIONAL_ENDPOINTS", "DD_ORCHESTRATOR_ADDITIONAL_ENDPOINTS", "DD_API_KEY", "DD_CUSTOM_SENSITIVE_WORDS"] | go | 11 | 0 | |
services/emr/struct_job_instance_workers.go | package emr
//Licensed under the Apache License, Version 2.0 (the "License");
//you may not use this file except in compliance with the License.
//You may obtain a copy of the License at
//
//http://www.apache.org/licenses/LICENSE-2.0
//
//Unless required by applicable law or agreed to in writing, software
//distributed under the License is distributed on an "AS IS" BASIS,
//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//See the License for the specific language governing permissions and
//limitations under the License.
//
// Code generated by Alibaba Cloud SDK Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
// JobInstanceWorkers is a nested struct in emr response
type JobInstanceWorkers struct {
JobInstanceWorkerInfo []JobInstanceWorkerInfo `json:"JobInstanceWorkerInfo" xml:"JobInstanceWorkerInfo"`
}
| [] | [] | [] | [] | [] | go | null | null | null |
app/SpacyDans.py | import spacy
from spacy import displacy
from collections import Counter
import en_core_web_sm
from nltk.chunk import conlltags2tree, tree2conlltags
from pprint import pprint
from textacy import extract
import io
import sys
import os
from langdetect import detect
nlp = en_core_web_sm.load()
nlp_nl = spacy.load("nl_core_news_sm")
def load_model(lang, text, orig_text):
#Language models supported: nlp_en, nlp_nl, nlp_ru, nlp_de, nlp_es nlp_fr nlp_nb nlp_it nlp_pl nlp_pt
doc = False
docNER = False
if lang == 'en':
doc = nlp(text)
docNER = nlp(orig_text)
if lang == 'nl':
doc = nlp_nl(text)
docNER = nlp_nl(orig_text)
if lang == 'ru':
nlp_ru = spacy.load("de_core_news_sm")
doc = nlp_ru(text)
docNER = nlp_ru(orig_text)
if lang == 'de':
nlp_de = spacy.load("de_core_news_sm")
doc = nlp_de(text)
docNER = nlp_de(orig_text)
if lang == 'es':
nlp_es = spacy.load("es_core_news_sm")
doc = nlp_es(text)
docNER = nlp_es(orig_text)
if lang == 'fr':
nlp_fr = spacy.load("fr_core_news_sm")
doc = nlp_fr(text)
docNER = nlp_fr(orig_text)
if lang == 'nb':
nlp_nb = spacy.load("nb_core_news_sm")
doc = nlp_nb(text)
docNER = nlp_nb(orig_text)
if lang == 'it':
nlp_it = spacy.load("it_core_news_sm")
doc = nlp_it(text)
docNER = nlp_it(orig_text)
if lang == 'pl':
nlp_pl = spacy.load("pl_core_news_sm")
doc = nlp_pl(text)
docNER = nlp_pl(orig_text)
if lang == 'pt':
nlp_pl = spacy.load("pt_core_news_sm")
doc = nlp_pt(text)
docNER = nlp_pt(orig_text)
return (doc, docNER)
def ngrams_tokens(filename=None, article=False, params={}):
text = False
data = {}
lines = False
grams_ammount = 2
SUMMARY_LEN = 250
MIN_FREQ=2
if 'plain_text' in article:
summary = ''
for sentence in article['plain_text']:
if len(summary) < SUMMARY_LEN:
summary = summary + ' ' + str(sentence['text'])
if summary:
article['summary'] = summary
article['plain_text'].insert(0, {'text': article['title'] + '. '})
text = str(article['plain_text'])
if filename:
print("NLP process for %s..." % filename)
fo = io.open(filename, mode="r", encoding="utf-8")
lines = fo.readlines()
text = str(lines)
text = text.replace('\n', ' ')
if lines:
lang = detect(lines[0:5])
else:
lang = detect(text)
data['lang'] = lang
orig_text = text
text = text.lower()
if 'disable_nlp' in os.environ:
lang = 'unknown'
skip = True
(doc, docNER) = load_model(lang, text, orig_text)
compoundentities = []
entities = []
savedkeywords = {}
# If there is language model
if doc:
for X in docNER.ents:
tmpres = {}
tmpres['entity'] = X.text
tmpres['label'] = X.label_
compoundentities.append(tmpres)
for token in docNER:
entinfo = {'text': token.text, 'pos': token.pos_, 'dep': token.dep_}
entities.append(entinfo)
#pprint([(X.text, X.label_) for X in doc.ents])
if 'ngrams' in params:
grams_ammount = int(params['ngrams'])
if 'minfreq' in params:
min_freq = int(params['minfreq'])
ngrams = list(extract.basics.ngrams(doc, grams_ammount, min_freq=MIN_FREQ))
for ngram in ngrams:
savedkeywords[str(ngram)] = str(ngram)
for token in savedkeywords:
print("%s" % (token))
if 'showcontent' in params:
thisdoc = {}
thisdoc['title'] = article['title']
thisdoc['text'] = article['plain_text']
thisdoc['content'] = article['content']
if 'summary' in article:
thisdoc['summary'] = article['summary']
data['content'] = thisdoc
if 'showpos' in params:
if entities:
data['pos'] = entities
if compoundentities:
data['original_entities'] = compoundentities
entities = []
known = {}
thisent = []
for e in compoundentities:
known[e['entity']] = e['label']
for e in known:
entities.append(e)
data['entities'] = ", ".join(entities)
if savedkeywords:
keywords = []
for item in savedkeywords:
for key in item:
keywords.append(key)
data['keywords'] = ", ".join(savedkeywords)
data['original_keywords'] = savedkeywords
return data
| [] | [] | [] | [] | [] | python | 0 | 0 | |
teste5/asgi.py | """
ASGI config for teste5 project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'teste5.settings')
application = get_asgi_application()
| [] | [] | [] | [] | [] | python | 0 | 0 | |
tensorflow/imagerecognition/openmpi-cifar10-resnet20-all-reduce/main.py | r"""Distributed TensorFlow with Monitored Training Session.
This implements the 1a image recognition benchmark task, see https://mlbench.readthedocs.io/en/latest/benchmark-tasks.html#a-image-classification-resnet-cifar-10
for more details
Adapted from official tutorial::
https://www.tensorflow.org/deploy/distributed
Launch::
mpirun -n 3 --allow-run-as-root python ....
"""
import argparse
import logging
import os
import tensorflow as tf
from mlbench_core.controlflow.tensorflow.train_validation import train_round, \
validation_round
from mlbench_core.dataset.imagerecognition.tensorflow.cifar10 import \
DatasetCifar
from mlbench_core.evaluation.goals import task1_time_to_accuracy_light_goal, \
task1_time_to_accuracy_goal
from mlbench_core.evaluation.tensorflow.criterion import \
softmax_cross_entropy_with_logits_v2_l2_regularized
from mlbench_core.evaluation.tensorflow.metrics import TopKAccuracy
from mlbench_core.lr_scheduler.tensorflow.lr import manual_stepping
from mlbench_core.models.tensorflow.resnet_model import Cifar10Model
from mlbench_core.utils import Tracker
def define_graph(inputs, labels, is_training, batch_size, replicas_to_aggregate):
"""
Define graph for synchronized training.
"""
model = Cifar10Model(
resnet_size=20,
data_format='channels_last',
resnet_version=2,
dtype=tf.float32)
logits = model(inputs, is_training)
loss = softmax_cross_entropy_with_logits_v2_l2_regularized(
logits=logits,
labels=labels,
l2=2e-4,
# Exclude BN weights from L2 regularizer
loss_filter_fn=lambda name: 'batch_normalization' not in name)
# Use Top K accuracy as metrics
metrics = [
TopKAccuracy(logits, labels, topk=1),
TopKAccuracy(logits, labels, topk=5),
]
global_step = tf.train.get_or_create_global_step()
# scheduling learning steps.
lr_scheduler = manual_stepping(
global_step=global_step,
boundaries=[32000 // replicas_to_aggregate,
48000 // replicas_to_aggregate],
rates=[0.1, 0.01, 0.001],
warmup=False)
# Define the optimizer
optimizer_ = tf.train.MomentumOptimizer(
learning_rate=lr_scheduler,
momentum=0.9,
use_nesterov=True)
# Wrap optimizer with `SyncReplicasOptimizer`
optimizer = tf.train.SyncReplicasOptimizer(
optimizer_,
replicas_to_aggregate=replicas_to_aggregate,
total_num_replicas=replicas_to_aggregate)
hooks = [
optimizer.make_session_run_hook((rank == 0), num_tokens=0)
]
# The update for batch normalization.
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
# Not all of the processes contribute one update. Some faster procs can push more updates.
grads_and_vars = list(optimizer.compute_gradients(
loss, tf.trainable_variables()))
train_op = optimizer.apply_gradients(
grads_and_vars, global_step=global_step)
return train_op, loss, metrics, hooks
def main(is_ps, run_id, rank, world_size, cluster_spec, batch_size,
replicas_to_aggregate, light_target=False):
logging.info("Initial.")
job_name = "ps" if is_ps else "worker"
cluster = tf.train.ClusterSpec(cluster_spec)
gpu_options = tf.GPUOptions(allow_growth=True,
per_process_gpu_memory_fraction=0.2)
session_conf = tf.ConfigProto(
gpu_options=gpu_options,
allow_soft_placement=True,
log_device_placement=False)
server = tf.train.Server(
cluster, job_name=job_name, task_index=rank, config=session_conf)
if is_ps:
server.join()
else:
# Pin variables to parameter server.
device_fn = tf.train.replica_device_setter(
ps_tasks=None,
ps_device="/job:ps",
worker_device="/job:{}/task:{}/device:GPU:{}".format(
job_name, rank, rank),
merge_devices=True,
cluster=cluster,
ps_ops=None,
ps_strategy=None)
with tf.Graph().as_default():
with tf.device(device_fn):
data_loader = DatasetCifar(
dataset='cifar-10',
dataset_root='/datasets',
batch_size=batch_size,
world_size=world_size,
rank=rank,
seed=42,
tf_dtype=tf.float32)
train_op, loss, metrics, hooks = define_graph(
data_loader.inputs,
data_loader.labels,
data_loader.training,
batch_size,
replicas_to_aggregate)
local_init_op = tf.group(
tf.local_variables_initializer(),
data_loader.train_init_op,
data_loader.validation_init_op)
scaffold = tf.train.Scaffold(
init_op=None,
init_feed_dict=None,
init_fn=None,
ready_op=None,
ready_for_local_init_op=None,
local_init_op=local_init_op)
lr_tensor_name = tf.get_default_graph().get_tensor_by_name("learning_rate:0")
with tf.train.MonitoredTrainingSession(config=session_conf,
master=server.target,
scaffold=scaffold,
is_chief=(rank == 0),
checkpoint_dir=None,
save_checkpoint_secs=None,
save_summaries_steps=None,
stop_grace_period_secs=5,
hooks=hooks) as sess:
logging.info("Begin training.")
final_epoch = 164
if light_target:
goal = task1_time_to_accuracy_light_goal()
else:
goal = task1_time_to_accuracy_goal()
tracker = Tracker(metrics, run_id, rank, goal=goal)
tracker.start()
for i_epoch in range(final_epoch):
logging.debug("=> Epoch {}".format(i_epoch))
train_round(sess, data_loader.train_init_op, train_op,
loss, metrics, batch_size,
data_loader.num_batches_per_epoch_for_train,
tracker, lr_tensor=lr_tensor_name,
lr_scheduler_level='epoch')
validation_round(sess, data_loader.validation_init_op,
loss, metrics, batch_size,
data_loader.num_batches_per_epoch_for_eval,
tracker)
tracker.epoch_end()
if tracker.goal_reached:
print("Goal Reached!")
return
logging.info("Finish.")
def configure_logger(log_dir, is_ps, rank):
logger = logging.getLogger('')
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter(
'{:6} rank={} : %(message)s'.format("ps" if is_ps else "worker", rank),
"%Y-%m-%d %H:%M:%S")
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(formatter)
logger.addHandler(ch)
log_name = '{}-{}.log'.format("ps" if is_ps else "worker", rank)
log_name = os.path.join(log_dir, log_name)
if os.path.exists(log_name):
os.remove(log_name)
fh = logging.FileHandler(log_name)
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
logger.addHandler(fh)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Process run parameters')
parser.add_argument('--run_id', type=str, help='The id of the run')
parser.add_argument('--hosts', type=str, help='The hosts participating in this run')
parser.add_argument('--light', action='store_true', default=False,
help='Train to light target metric goal')
args = parser.parse_args()
rank = int(os.environ['OMPI_COMM_WORLD_RANK'])
size = int(os.environ['OMPI_COMM_WORLD_SIZE'])
hosts = args.hosts.split(",")
if len(hosts) < 2:
raise ValueError("At least 2 pods are needed for this benchmark (1 parameter server, 1 worker)")
workers = [h + ":22222" for h in hosts[1:]]
ps = hosts[0] + ":22222" # First worker is the parameter server
cluster_spec = {"worker": workers,
"ps": [ps]}
# Parse role in the cluster by rank.
is_ps = rank < len(cluster_spec['ps'])
rank = rank if is_ps else rank - len(cluster_spec['ps'])
world_size = size - len(cluster_spec['ps'])
# Configure Logging
if not os.path.exists('/mlbench'):
os.makedirs('/mlbench')
configure_logger('/mlbench', is_ps, rank)
batch_size = 128
replicas_to_aggregate = len(cluster_spec['worker'])
main(is_ps, args.run_id, rank, world_size, cluster_spec,
batch_size, replicas_to_aggregate, light_target=args.light)
| [] | [] | [
"OMPI_COMM_WORLD_RANK",
"OMPI_COMM_WORLD_SIZE"
] | [] | ["OMPI_COMM_WORLD_RANK", "OMPI_COMM_WORLD_SIZE"] | python | 2 | 0 | |
server/opts_test.go | // Copyright 2012-2020 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"bytes"
"crypto/tls"
"flag"
"fmt"
"io/ioutil"
"net/url"
"os"
"reflect"
"runtime"
"strings"
"testing"
"time"
"github.com/nats-io/jwt/v2"
"github.com/nats-io/nats.go"
"github.com/nats-io/nkeys"
)
func checkOptionsEqual(t *testing.T, golden, opts *Options) {
t.Helper()
// Clone them so we can remove private fields that we don't
// want to be compared.
goldenClone := golden.Clone()
goldenClone.inConfig, goldenClone.inCmdLine = nil, nil
optsClone := opts.Clone()
optsClone.inConfig, optsClone.inCmdLine = nil, nil
if !reflect.DeepEqual(goldenClone, optsClone) {
t.Fatalf("Options are incorrect.\nexpected: %+v\ngot: %+v", goldenClone, optsClone)
}
}
func TestDefaultOptions(t *testing.T) {
golden := &Options{
Host: DEFAULT_HOST,
Port: DEFAULT_PORT,
MaxConn: DEFAULT_MAX_CONNECTIONS,
HTTPHost: DEFAULT_HOST,
PingInterval: DEFAULT_PING_INTERVAL,
MaxPingsOut: DEFAULT_PING_MAX_OUT,
TLSTimeout: float64(TLS_TIMEOUT) / float64(time.Second),
AuthTimeout: float64(AUTH_TIMEOUT) / float64(time.Second),
MaxControlLine: MAX_CONTROL_LINE_SIZE,
MaxPayload: MAX_PAYLOAD_SIZE,
MaxPending: MAX_PENDING_SIZE,
WriteDeadline: DEFAULT_FLUSH_DEADLINE,
MaxClosedClients: DEFAULT_MAX_CLOSED_CLIENTS,
LameDuckDuration: DEFAULT_LAME_DUCK_DURATION,
LameDuckGracePeriod: DEFAULT_LAME_DUCK_GRACE_PERIOD,
LeafNode: LeafNodeOpts{
ReconnectInterval: DEFAULT_LEAF_NODE_RECONNECT,
},
ConnectErrorReports: DEFAULT_CONNECT_ERROR_REPORTS,
ReconnectErrorReports: DEFAULT_RECONNECT_ERROR_REPORTS,
MaxTracedMsgLen: 0,
JetStreamMaxMemory: -1,
JetStreamMaxStore: -1,
}
opts := &Options{}
setBaselineOptions(opts)
checkOptionsEqual(t, golden, opts)
}
func TestOptions_RandomPort(t *testing.T) {
opts := &Options{Port: RANDOM_PORT}
setBaselineOptions(opts)
if opts.Port != 0 {
t.Fatalf("Process of options should have resolved random port to "+
"zero.\nexpected: %d\ngot: %d", 0, opts.Port)
}
}
func TestConfigFile(t *testing.T) {
golden := &Options{
ConfigFile: "./configs/test.conf",
ServerName: "testing_server",
Host: "127.0.0.1",
Port: 4242,
Username: "derek",
Password: "porkchop",
AuthTimeout: 1.0,
Debug: false,
Trace: true,
Logtime: false,
HTTPPort: 8222,
HTTPBasePath: "/nats",
PidFile: "/tmp/nats-server.pid",
ProfPort: 6543,
Syslog: true,
RemoteSyslog: "udp://foo.com:33",
MaxControlLine: 2048,
MaxPayload: 65536,
MaxConn: 100,
MaxSubs: 1000,
MaxPending: 10000000,
PingInterval: 60 * time.Second,
MaxPingsOut: 3,
WriteDeadline: 3 * time.Second,
LameDuckDuration: 4 * time.Minute,
ConnectErrorReports: 86400,
ReconnectErrorReports: 5,
}
opts, err := ProcessConfigFile("./configs/test.conf")
if err != nil {
t.Fatalf("Received an error reading config file: %v", err)
}
checkOptionsEqual(t, golden, opts)
}
func TestTLSConfigFile(t *testing.T) {
golden := &Options{
ConfigFile: "./configs/tls.conf",
Host: "127.0.0.1",
Port: 4443,
Username: "derek",
Password: "foo",
AuthTimeout: 1.0,
TLSTimeout: 2.0,
}
opts, err := ProcessConfigFile("./configs/tls.conf")
if err != nil {
t.Fatalf("Received an error reading config file: %v", err)
}
tlsConfig := opts.TLSConfig
if tlsConfig == nil {
t.Fatal("Expected opts.TLSConfig to be non-nil")
}
opts.TLSConfig = nil
checkOptionsEqual(t, golden, opts)
// Now check TLSConfig a bit more closely
// CipherSuites
ciphers := defaultCipherSuites()
if !reflect.DeepEqual(tlsConfig.CipherSuites, ciphers) {
t.Fatalf("Got incorrect cipher suite list: [%+v]", tlsConfig.CipherSuites)
}
if tlsConfig.MinVersion != tls.VersionTLS12 {
t.Fatalf("Expected MinVersion of 1.2 [%v], got [%v]", tls.VersionTLS12, tlsConfig.MinVersion)
}
if !tlsConfig.PreferServerCipherSuites {
t.Fatal("Expected PreferServerCipherSuites to be true")
}
// Verify hostname is correct in certificate
if len(tlsConfig.Certificates) != 1 {
t.Fatal("Expected 1 certificate")
}
cert := tlsConfig.Certificates[0].Leaf
if err := cert.VerifyHostname("127.0.0.1"); err != nil {
t.Fatalf("Could not verify hostname in certificate: %v", err)
}
// Now test adding cipher suites.
opts, err = ProcessConfigFile("./configs/tls_ciphers.conf")
if err != nil {
t.Fatalf("Received an error reading config file: %v", err)
}
tlsConfig = opts.TLSConfig
if tlsConfig == nil {
t.Fatal("Expected opts.TLSConfig to be non-nil")
}
// CipherSuites listed in the config - test all of them.
ciphers = []uint16{
tls.TLS_RSA_WITH_RC4_128_SHA,
tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA,
tls.TLS_RSA_WITH_AES_128_CBC_SHA,
tls.TLS_RSA_WITH_AES_256_CBC_SHA,
tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,
tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA,
tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
}
if !reflect.DeepEqual(tlsConfig.CipherSuites, ciphers) {
t.Fatalf("Got incorrect cipher suite list: [%+v]", tlsConfig.CipherSuites)
}
// Test an unrecognized/bad cipher
if _, err := ProcessConfigFile("./configs/tls_bad_cipher.conf"); err == nil {
t.Fatal("Did not receive an error from a unrecognized cipher")
}
// Test an empty cipher entry in a config file.
if _, err := ProcessConfigFile("./configs/tls_empty_cipher.conf"); err == nil {
t.Fatal("Did not receive an error from empty cipher_suites")
}
// Test a curve preference from the config.
curves := []tls.CurveID{
tls.CurveP256,
}
// test on a file that will load the curve preference defaults
opts, err = ProcessConfigFile("./configs/tls_ciphers.conf")
if err != nil {
t.Fatalf("Received an error reading config file: %v", err)
}
if !reflect.DeepEqual(opts.TLSConfig.CurvePreferences, defaultCurvePreferences()) {
t.Fatalf("Got incorrect curve preference list: [%+v]", tlsConfig.CurvePreferences)
}
// Test specifying a single curve preference
opts, err = ProcessConfigFile("./configs/tls_curve_prefs.conf")
if err != nil {
t.Fatal("Did not receive an error from a unrecognized cipher.")
}
if !reflect.DeepEqual(opts.TLSConfig.CurvePreferences, curves) {
t.Fatalf("Got incorrect cipher suite list: [%+v]", tlsConfig.CurvePreferences)
}
// Test an unrecognized/bad curve preference
if _, err := ProcessConfigFile("./configs/tls_bad_curve_prefs.conf"); err == nil {
t.Fatal("Did not receive an error from a unrecognized curve preference")
}
// Test an empty curve preference
if _, err := ProcessConfigFile("./configs/tls_empty_curve_prefs.conf"); err == nil {
t.Fatal("Did not receive an error from empty curve preferences")
}
}
func TestMergeOverrides(t *testing.T) {
golden := &Options{
ConfigFile: "./configs/test.conf",
ServerName: "testing_server",
Host: "127.0.0.1",
Port: 2222,
Username: "derek",
Password: "porkchop",
AuthTimeout: 1.0,
Debug: true,
Trace: true,
Logtime: false,
HTTPPort: DEFAULT_HTTP_PORT,
HTTPBasePath: DEFAULT_HTTP_BASE_PATH,
PidFile: "/tmp/nats-server.pid",
ProfPort: 6789,
Syslog: true,
RemoteSyslog: "udp://foo.com:33",
MaxControlLine: 2048,
MaxPayload: 65536,
MaxConn: 100,
MaxSubs: 1000,
MaxPending: 10000000,
PingInterval: 60 * time.Second,
MaxPingsOut: 3,
Cluster: ClusterOpts{
NoAdvertise: true,
ConnectRetries: 2,
},
WriteDeadline: 3 * time.Second,
LameDuckDuration: 4 * time.Minute,
ConnectErrorReports: 86400,
ReconnectErrorReports: 5,
}
fopts, err := ProcessConfigFile("./configs/test.conf")
if err != nil {
t.Fatalf("Received an error reading config file: %v", err)
}
// Overrides via flags
opts := &Options{
Port: 2222,
Password: "porkchop",
Debug: true,
HTTPPort: DEFAULT_HTTP_PORT,
HTTPBasePath: DEFAULT_HTTP_BASE_PATH,
ProfPort: 6789,
Cluster: ClusterOpts{
NoAdvertise: true,
ConnectRetries: 2,
},
}
merged := MergeOptions(fopts, opts)
checkOptionsEqual(t, golden, merged)
}
func TestRemoveSelfReference(t *testing.T) {
url1, _ := url.Parse("nats-route://user:[email protected]:4223")
url2, _ := url.Parse("nats-route://user:[email protected]:4223")
url3, _ := url.Parse("nats-route://user:[email protected]:4223")
routes := []*url.URL{url1, url2, url3}
newroutes, err := RemoveSelfReference(4223, routes)
if err != nil {
t.Fatalf("Error during RemoveSelfReference: %v", err)
}
if len(newroutes) != 1 {
t.Fatalf("Wrong number of routes: %d", len(newroutes))
}
if newroutes[0] != routes[0] {
t.Fatalf("Self reference IP address %s in Routes", routes[0])
}
}
func TestAllowRouteWithDifferentPort(t *testing.T) {
url1, _ := url.Parse("nats-route://user:[email protected]:4224")
routes := []*url.URL{url1}
newroutes, err := RemoveSelfReference(4223, routes)
if err != nil {
t.Fatalf("Error during RemoveSelfReference: %v", err)
}
if len(newroutes) != 1 {
t.Fatalf("Wrong number of routes: %d", len(newroutes))
}
}
func TestRouteFlagOverride(t *testing.T) {
routeFlag := "nats-route://ruser:[email protected]:8246"
rurl, _ := url.Parse(routeFlag)
golden := &Options{
ConfigFile: "./configs/srv_a.conf",
Host: "127.0.0.1",
Port: 7222,
Cluster: ClusterOpts{
Name: "abc",
Host: "127.0.0.1",
Port: 7244,
Username: "ruser",
Password: "top_secret",
AuthTimeout: 0.5,
},
Routes: []*url.URL{rurl},
RoutesStr: routeFlag,
}
fopts, err := ProcessConfigFile("./configs/srv_a.conf")
if err != nil {
t.Fatalf("Received an error reading config file: %v", err)
}
// Overrides via flags
opts := &Options{
RoutesStr: routeFlag,
}
merged := MergeOptions(fopts, opts)
checkOptionsEqual(t, golden, merged)
}
func TestClusterFlagsOverride(t *testing.T) {
routeFlag := "nats-route://ruser:[email protected]:7246"
rurl, _ := url.Parse(routeFlag)
// In this test, we override the cluster listen string. Note that in
// the golden options, the cluster other infos correspond to what
// is recovered from the configuration file, this explains the
// discrepency between ClusterListenStr and the rest.
// The server would then process the ClusterListenStr override and
// correctly override ClusterHost/ClustherPort/etc..
golden := &Options{
ConfigFile: "./configs/srv_a.conf",
Host: "127.0.0.1",
Port: 7222,
Cluster: ClusterOpts{
Name: "abc",
Host: "127.0.0.1",
Port: 7244,
ListenStr: "nats://127.0.0.1:8224",
Username: "ruser",
Password: "top_secret",
AuthTimeout: 0.5,
},
Routes: []*url.URL{rurl},
}
fopts, err := ProcessConfigFile("./configs/srv_a.conf")
if err != nil {
t.Fatalf("Received an error reading config file: %v", err)
}
// Overrides via flags
opts := &Options{
Cluster: ClusterOpts{
ListenStr: "nats://127.0.0.1:8224",
},
}
merged := MergeOptions(fopts, opts)
checkOptionsEqual(t, golden, merged)
}
func TestRouteFlagOverrideWithMultiple(t *testing.T) {
routeFlag := "nats-route://ruser:[email protected]:8246, nats-route://ruser:[email protected]:8266"
rurls := RoutesFromStr(routeFlag)
golden := &Options{
ConfigFile: "./configs/srv_a.conf",
Host: "127.0.0.1",
Port: 7222,
Cluster: ClusterOpts{
Host: "127.0.0.1",
Name: "abc",
Port: 7244,
Username: "ruser",
Password: "top_secret",
AuthTimeout: 0.5,
},
Routes: rurls,
RoutesStr: routeFlag,
}
fopts, err := ProcessConfigFile("./configs/srv_a.conf")
if err != nil {
t.Fatalf("Received an error reading config file: %v", err)
}
// Overrides via flags
opts := &Options{
RoutesStr: routeFlag,
}
merged := MergeOptions(fopts, opts)
checkOptionsEqual(t, golden, merged)
}
func TestDynamicPortOnListen(t *testing.T) {
opts, err := ProcessConfigFile("./configs/listen-1.conf")
if err != nil {
t.Fatalf("Received an error reading config file: %v", err)
}
if opts.Port != -1 {
t.Fatalf("Received incorrect port %v, expected -1", opts.Port)
}
if opts.HTTPPort != -1 {
t.Fatalf("Received incorrect monitoring port %v, expected -1", opts.HTTPPort)
}
if opts.HTTPSPort != -1 {
t.Fatalf("Received incorrect secure monitoring port %v, expected -1", opts.HTTPSPort)
}
}
func TestListenConfig(t *testing.T) {
opts, err := ProcessConfigFile("./configs/listen.conf")
if err != nil {
t.Fatalf("Received an error reading config file: %v", err)
}
setBaselineOptions(opts)
// Normal clients
host := "10.0.1.22"
port := 4422
monHost := "127.0.0.1"
if opts.Host != host {
t.Fatalf("Received incorrect host %q, expected %q", opts.Host, host)
}
if opts.HTTPHost != monHost {
t.Fatalf("Received incorrect host %q, expected %q", opts.HTTPHost, monHost)
}
if opts.Port != port {
t.Fatalf("Received incorrect port %v, expected %v", opts.Port, port)
}
// Clustering
clusterHost := "127.0.0.1"
clusterPort := 4244
if opts.Cluster.Host != clusterHost {
t.Fatalf("Received incorrect cluster host %q, expected %q", opts.Cluster.Host, clusterHost)
}
if opts.Cluster.Port != clusterPort {
t.Fatalf("Received incorrect cluster port %v, expected %v", opts.Cluster.Port, clusterPort)
}
// HTTP
httpHost := "127.0.0.1"
httpPort := 8422
if opts.HTTPHost != httpHost {
t.Fatalf("Received incorrect http host %q, expected %q", opts.HTTPHost, httpHost)
}
if opts.HTTPPort != httpPort {
t.Fatalf("Received incorrect http port %v, expected %v", opts.HTTPPort, httpPort)
}
// HTTPS
httpsPort := 9443
if opts.HTTPSPort != httpsPort {
t.Fatalf("Received incorrect https port %v, expected %v", opts.HTTPSPort, httpsPort)
}
}
func TestListenPortOnlyConfig(t *testing.T) {
opts, err := ProcessConfigFile("./configs/listen_port.conf")
if err != nil {
t.Fatalf("Received an error reading config file: %v", err)
}
setBaselineOptions(opts)
port := 8922
if opts.Host != DEFAULT_HOST {
t.Fatalf("Received incorrect host %q, expected %q", opts.Host, DEFAULT_HOST)
}
if opts.HTTPHost != DEFAULT_HOST {
t.Fatalf("Received incorrect host %q, expected %q", opts.Host, DEFAULT_HOST)
}
if opts.Port != port {
t.Fatalf("Received incorrect port %v, expected %v", opts.Port, port)
}
}
func TestListenPortWithColonConfig(t *testing.T) {
opts, err := ProcessConfigFile("./configs/listen_port_with_colon.conf")
if err != nil {
t.Fatalf("Received an error reading config file: %v", err)
}
setBaselineOptions(opts)
port := 8922
if opts.Host != DEFAULT_HOST {
t.Fatalf("Received incorrect host %q, expected %q", opts.Host, DEFAULT_HOST)
}
if opts.HTTPHost != DEFAULT_HOST {
t.Fatalf("Received incorrect host %q, expected %q", opts.Host, DEFAULT_HOST)
}
if opts.Port != port {
t.Fatalf("Received incorrect port %v, expected %v", opts.Port, port)
}
}
func TestListenMonitoringDefault(t *testing.T) {
opts := &Options{
Host: "10.0.1.22",
}
setBaselineOptions(opts)
host := "10.0.1.22"
if opts.Host != host {
t.Fatalf("Received incorrect host %q, expected %q", opts.Host, host)
}
if opts.HTTPHost != host {
t.Fatalf("Received incorrect host %q, expected %q", opts.Host, host)
}
if opts.Port != DEFAULT_PORT {
t.Fatalf("Received incorrect port %v, expected %v", opts.Port, DEFAULT_PORT)
}
}
func TestMultipleUsersConfig(t *testing.T) {
opts, err := ProcessConfigFile("./configs/multiple_users.conf")
if err != nil {
t.Fatalf("Received an error reading config file: %v", err)
}
setBaselineOptions(opts)
}
// Test highly depends on contents of the config file listed below. Any changes to that file
// may very well break this test.
func TestAuthorizationConfig(t *testing.T) {
opts, err := ProcessConfigFile("./configs/authorization.conf")
if err != nil {
t.Fatalf("Received an error reading config file: %v", err)
}
setBaselineOptions(opts)
lu := len(opts.Users)
if lu != 5 {
t.Fatalf("Expected 5 users, got %d", lu)
}
// Build a map
mu := make(map[string]*User)
for _, u := range opts.Users {
mu[u.Username] = u
}
// Alice
alice, ok := mu["alice"]
if !ok {
t.Fatalf("Expected to see user Alice")
}
// Check for permissions details
if alice.Permissions == nil {
t.Fatalf("Expected Alice's permissions to be non-nil")
}
if alice.Permissions.Publish == nil {
t.Fatalf("Expected Alice's publish permissions to be non-nil")
}
if len(alice.Permissions.Publish.Allow) != 1 {
t.Fatalf("Expected Alice's publish permissions to have 1 element, got %d",
len(alice.Permissions.Publish.Allow))
}
pubPerm := alice.Permissions.Publish.Allow[0]
if pubPerm != "*" {
t.Fatalf("Expected Alice's publish permissions to be '*', got %q", pubPerm)
}
if alice.Permissions.Subscribe == nil {
t.Fatalf("Expected Alice's subscribe permissions to be non-nil")
}
if len(alice.Permissions.Subscribe.Allow) != 1 {
t.Fatalf("Expected Alice's subscribe permissions to have 1 element, got %d",
len(alice.Permissions.Subscribe.Allow))
}
subPerm := alice.Permissions.Subscribe.Allow[0]
if subPerm != ">" {
t.Fatalf("Expected Alice's subscribe permissions to be '>', got %q", subPerm)
}
// Bob
bob, ok := mu["bob"]
if !ok {
t.Fatalf("Expected to see user Bob")
}
if bob.Permissions == nil {
t.Fatalf("Expected Bob's permissions to be non-nil")
}
// Susan
susan, ok := mu["susan"]
if !ok {
t.Fatalf("Expected to see user Susan")
}
if susan.Permissions == nil {
t.Fatalf("Expected Susan's permissions to be non-nil")
}
// Check susan closely since she inherited the default permissions.
if susan.Permissions == nil {
t.Fatalf("Expected Susan's permissions to be non-nil")
}
if susan.Permissions.Publish != nil {
t.Fatalf("Expected Susan's publish permissions to be nil")
}
if susan.Permissions.Subscribe == nil {
t.Fatalf("Expected Susan's subscribe permissions to be non-nil")
}
if len(susan.Permissions.Subscribe.Allow) != 1 {
t.Fatalf("Expected Susan's subscribe permissions to have 1 element, got %d",
len(susan.Permissions.Subscribe.Allow))
}
subPerm = susan.Permissions.Subscribe.Allow[0]
if subPerm != "PUBLIC.>" {
t.Fatalf("Expected Susan's subscribe permissions to be 'PUBLIC.>', got %q", subPerm)
}
// Service A
svca, ok := mu["svca"]
if !ok {
t.Fatalf("Expected to see user Service A")
}
if svca.Permissions == nil {
t.Fatalf("Expected Service A's permissions to be non-nil")
}
if svca.Permissions.Subscribe == nil {
t.Fatalf("Expected Service A's subscribe permissions to be non-nil")
}
if len(svca.Permissions.Subscribe.Allow) != 1 {
t.Fatalf("Expected Service A's subscribe permissions to have 1 element, got %d",
len(svca.Permissions.Subscribe.Allow))
}
subPerm = svca.Permissions.Subscribe.Allow[0]
if subPerm != "my.service.req" {
t.Fatalf("Expected Service A's subscribe permissions to be 'my.service.req', got %q", subPerm)
}
// We want allow_responses to essentially set deny all, or allow none in this case.
if svca.Permissions.Publish == nil {
t.Fatalf("Expected Service A's publish permissions to be non-nil")
}
if len(svca.Permissions.Publish.Allow) != 0 {
t.Fatalf("Expected Service A's publish permissions to have no elements, got %d",
len(svca.Permissions.Publish.Allow))
}
// We should have a ResponsePermission present with default values.
if svca.Permissions.Response == nil {
t.Fatalf("Expected Service A's response permissions to be non-nil")
}
if svca.Permissions.Response.MaxMsgs != DEFAULT_ALLOW_RESPONSE_MAX_MSGS {
t.Fatalf("Expected Service A's response permissions of max msgs to be %d, got %d",
DEFAULT_ALLOW_RESPONSE_MAX_MSGS, svca.Permissions.Response.MaxMsgs,
)
}
if svca.Permissions.Response.Expires != DEFAULT_ALLOW_RESPONSE_EXPIRATION {
t.Fatalf("Expected Service A's response permissions of expiration to be %v, got %v",
DEFAULT_ALLOW_RESPONSE_EXPIRATION, svca.Permissions.Response.Expires,
)
}
// Service B
svcb, ok := mu["svcb"]
if !ok {
t.Fatalf("Expected to see user Service B")
}
if svcb.Permissions == nil {
t.Fatalf("Expected Service B's permissions to be non-nil")
}
if svcb.Permissions.Subscribe == nil {
t.Fatalf("Expected Service B's subscribe permissions to be non-nil")
}
if len(svcb.Permissions.Subscribe.Allow) != 1 {
t.Fatalf("Expected Service B's subscribe permissions to have 1 element, got %d",
len(svcb.Permissions.Subscribe.Allow))
}
subPerm = svcb.Permissions.Subscribe.Allow[0]
if subPerm != "my.service.req" {
t.Fatalf("Expected Service B's subscribe permissions to be 'my.service.req', got %q", subPerm)
}
// We want allow_responses to essentially set deny all, or allow none in this case.
if svcb.Permissions.Publish == nil {
t.Fatalf("Expected Service B's publish permissions to be non-nil")
}
if len(svcb.Permissions.Publish.Allow) != 0 {
t.Fatalf("Expected Service B's publish permissions to have no elements, got %d",
len(svcb.Permissions.Publish.Allow))
}
// We should have a ResponsePermission present with default values.
if svcb.Permissions.Response == nil {
t.Fatalf("Expected Service B's response permissions to be non-nil")
}
if svcb.Permissions.Response.MaxMsgs != 10 {
t.Fatalf("Expected Service B's response permissions of max msgs to be %d, got %d",
10, svcb.Permissions.Response.MaxMsgs,
)
}
if svcb.Permissions.Response.Expires != time.Minute {
t.Fatalf("Expected Service B's response permissions of expiration to be %v, got %v",
time.Minute, svcb.Permissions.Response.Expires,
)
}
}
// Test highly depends on contents of the config file listed below. Any changes to that file
// may very well break this test.
func TestNewStyleAuthorizationConfig(t *testing.T) {
opts, err := ProcessConfigFile("./configs/new_style_authorization.conf")
if err != nil {
t.Fatalf("Received an error reading config file: %v", err)
}
setBaselineOptions(opts)
lu := len(opts.Users)
if lu != 2 {
t.Fatalf("Expected 2 users, got %d", lu)
}
// Build a map
mu := make(map[string]*User)
for _, u := range opts.Users {
mu[u.Username] = u
}
// Alice
alice, ok := mu["alice"]
if !ok {
t.Fatalf("Expected to see user Alice")
}
if alice.Permissions == nil {
t.Fatalf("Expected Alice's permissions to be non-nil")
}
if alice.Permissions.Publish == nil {
t.Fatalf("Expected Alice's publish permissions to be non-nil")
}
if len(alice.Permissions.Publish.Allow) != 3 {
t.Fatalf("Expected Alice's allowed publish permissions to have 3 elements, got %d",
len(alice.Permissions.Publish.Allow))
}
pubPerm := alice.Permissions.Publish.Allow[0]
if pubPerm != "foo" {
t.Fatalf("Expected Alice's first allowed publish permission to be 'foo', got %q", pubPerm)
}
pubPerm = alice.Permissions.Publish.Allow[1]
if pubPerm != "bar" {
t.Fatalf("Expected Alice's second allowed publish permission to be 'bar', got %q", pubPerm)
}
pubPerm = alice.Permissions.Publish.Allow[2]
if pubPerm != "baz" {
t.Fatalf("Expected Alice's third allowed publish permission to be 'baz', got %q", pubPerm)
}
if len(alice.Permissions.Publish.Deny) != 0 {
t.Fatalf("Expected Alice's denied publish permissions to have 0 elements, got %d",
len(alice.Permissions.Publish.Deny))
}
if alice.Permissions.Subscribe == nil {
t.Fatalf("Expected Alice's subscribe permissions to be non-nil")
}
if len(alice.Permissions.Subscribe.Allow) != 0 {
t.Fatalf("Expected Alice's allowed subscribe permissions to have 0 elements, got %d",
len(alice.Permissions.Subscribe.Allow))
}
if len(alice.Permissions.Subscribe.Deny) != 1 {
t.Fatalf("Expected Alice's denied subscribe permissions to have 1 element, got %d",
len(alice.Permissions.Subscribe.Deny))
}
subPerm := alice.Permissions.Subscribe.Deny[0]
if subPerm != "$SYS.>" {
t.Fatalf("Expected Alice's only denied subscribe permission to be '$SYS.>', got %q", subPerm)
}
// Bob
bob, ok := mu["bob"]
if !ok {
t.Fatalf("Expected to see user Bob")
}
if bob.Permissions == nil {
t.Fatalf("Expected Bob's permissions to be non-nil")
}
if bob.Permissions.Publish == nil {
t.Fatalf("Expected Bobs's publish permissions to be non-nil")
}
if len(bob.Permissions.Publish.Allow) != 1 {
t.Fatalf("Expected Bob's allowed publish permissions to have 1 element, got %d",
len(bob.Permissions.Publish.Allow))
}
pubPerm = bob.Permissions.Publish.Allow[0]
if pubPerm != "$SYS.>" {
t.Fatalf("Expected Bob's first allowed publish permission to be '$SYS.>', got %q", pubPerm)
}
if len(bob.Permissions.Publish.Deny) != 0 {
t.Fatalf("Expected Bob's denied publish permissions to have 0 elements, got %d",
len(bob.Permissions.Publish.Deny))
}
if bob.Permissions.Subscribe == nil {
t.Fatalf("Expected Bob's subscribe permissions to be non-nil")
}
if len(bob.Permissions.Subscribe.Allow) != 0 {
t.Fatalf("Expected Bob's allowed subscribe permissions to have 0 elements, got %d",
len(bob.Permissions.Subscribe.Allow))
}
if len(bob.Permissions.Subscribe.Deny) != 3 {
t.Fatalf("Expected Bobs's denied subscribe permissions to have 3 elements, got %d",
len(bob.Permissions.Subscribe.Deny))
}
subPerm = bob.Permissions.Subscribe.Deny[0]
if subPerm != "foo" {
t.Fatalf("Expected Bobs's first denied subscribe permission to be 'foo', got %q", subPerm)
}
subPerm = bob.Permissions.Subscribe.Deny[1]
if subPerm != "bar" {
t.Fatalf("Expected Bobs's second denied subscribe permission to be 'bar', got %q", subPerm)
}
subPerm = bob.Permissions.Subscribe.Deny[2]
if subPerm != "baz" {
t.Fatalf("Expected Bobs's third denied subscribe permission to be 'baz', got %q", subPerm)
}
}
// Test new nkey users
func TestNkeyUsersConfig(t *testing.T) {
confFileName := createConfFile(t, []byte(`
authorization {
users = [
{nkey: "UDKTV7HZVYJFJN64LLMYQBUR6MTNNYCDC3LAZH4VHURW3GZLL3FULBXV"}
{nkey: "UA3C5TBZYK5GJQJRWPMU6NFY5JNAEVQB2V2TUZFZDHFJFUYVKTTUOFKZ"}
]
}`))
defer os.Remove(confFileName)
opts, err := ProcessConfigFile(confFileName)
if err != nil {
t.Fatalf("Received an error reading config file: %v", err)
}
lu := len(opts.Nkeys)
if lu != 2 {
t.Fatalf("Expected 2 nkey users, got %d", lu)
}
}
func TestNkeyUsersDefaultPermissionsConfig(t *testing.T) {
confFileName := createConfFile(t, []byte(`
authorization {
default_permissions = {
publish = "foo"
}
users = [
{ user: "user", password: "pwd"}
{ user: "other", password: "pwd",
permissions = {
subscribe = "bar"
}
}
{ nkey: "UDKTV7HZVYJFJN64LLMYQBUR6MTNNYCDC3LAZH4VHURW3GZLL3FULBXV" }
{ nkey: "UA3C5TBZYK5GJQJRWPMU6NFY5JNAEVQB2V2TUZFZDHFJFUYVKTTUOFKZ",
permissions = {
subscribe = "bar"
}
}
]
}
accounts {
A {
default_permissions = {
publish = "foo"
}
users = [
{ user: "accuser", password: "pwd"}
{ user: "accother", password: "pwd",
permissions = {
subscribe = "bar"
}
}
{ nkey: "UC4YEYJHYKTU4LHROX7UEKEIO5RP5OUWDYXELHWXZOQHZYXHUD44LCRS" }
{ nkey: "UDLSDF4UY3YW7JJQCYE6T2D4KFDCH6RGF3R65KHK247G3POJPI27VMQ3",
permissions = {
subscribe = "bar"
}
}
]
}
}
`))
checkPerms := func(permsDef *Permissions, permsNonDef *Permissions) {
if permsDef.Publish.Allow[0] != "foo" {
t.Fatal("Publish allow foo missing")
} else if permsDef.Subscribe != nil {
t.Fatal("Has unexpected Subscribe permission")
} else if permsNonDef.Subscribe.Allow[0] != "bar" {
t.Fatal("Subscribe allow bar missing")
} else if permsNonDef.Publish != nil {
t.Fatal("Has unexpected Publish permission")
}
}
defer os.Remove(confFileName)
opts, err := ProcessConfigFile(confFileName)
if err != nil {
t.Fatalf("Received an error reading config file: %v", err)
}
findUsers := func(u1, u2 string) (found []*User) {
find := []string{u1, u2}
for _, f := range find {
for _, u := range opts.Users {
if u.Username == f {
found = append(found, u)
break
}
}
}
return
}
findNkeyUsers := func(nk1, nk2 string) (found []*NkeyUser) {
find := []string{nk1, nk2}
for _, f := range find {
for _, u := range opts.Nkeys {
if strings.HasPrefix(u.Nkey, f) {
found = append(found, u)
break
}
}
}
return
}
if lu := len(opts.Users); lu != 4 {
t.Fatalf("Expected 4 nkey users, got %d", lu)
}
foundU := findUsers("user", "other")
checkPerms(foundU[0].Permissions, foundU[1].Permissions)
foundU = findUsers("accuser", "accother")
checkPerms(foundU[0].Permissions, foundU[1].Permissions)
if lu := len(opts.Nkeys); lu != 4 {
t.Fatalf("Expected 4 nkey users, got %d", lu)
}
foundNk := findNkeyUsers("UDK", "UA3")
checkPerms(foundNk[0].Permissions, foundNk[1].Permissions)
foundNk = findNkeyUsers("UC4", "UDL")
checkPerms(foundNk[0].Permissions, foundNk[1].Permissions)
}
func TestNkeyUsersWithPermsConfig(t *testing.T) {
confFileName := createConfFile(t, []byte(`
authorization {
users = [
{nkey: "UDKTV7HZVYJFJN64LLMYQBUR6MTNNYCDC3LAZH4VHURW3GZLL3FULBXV",
permissions = {
publish = "$SYS.>"
subscribe = { deny = ["foo", "bar", "baz"] }
}
}
]
}`))
defer os.Remove(confFileName)
opts, err := ProcessConfigFile(confFileName)
if err != nil {
t.Fatalf("Received an error reading config file: %v", err)
}
lu := len(opts.Nkeys)
if lu != 1 {
t.Fatalf("Expected 1 nkey user, got %d", lu)
}
nk := opts.Nkeys[0]
if nk.Permissions == nil {
t.Fatal("Expected to have permissions")
}
if nk.Permissions.Publish == nil {
t.Fatal("Expected to have publish permissions")
}
if nk.Permissions.Publish.Allow[0] != "$SYS.>" {
t.Fatalf("Expected publish to allow \"$SYS.>\", but got %v", nk.Permissions.Publish.Allow[0])
}
if nk.Permissions.Subscribe == nil {
t.Fatal("Expected to have subscribe permissions")
}
if nk.Permissions.Subscribe.Allow != nil {
t.Fatal("Expected to have no subscribe allow permissions")
}
deny := nk.Permissions.Subscribe.Deny
if deny == nil || len(deny) != 3 ||
deny[0] != "foo" || deny[1] != "bar" || deny[2] != "baz" {
t.Fatalf("Expected to have subscribe deny permissions, got %v", deny)
}
}
func TestBadNkeyConfig(t *testing.T) {
confFileName := "nkeys_bad.conf"
defer os.Remove(confFileName)
content := `
authorization {
users = [ {nkey: "Ufoo"}]
}`
if err := ioutil.WriteFile(confFileName, []byte(content), 0666); err != nil {
t.Fatalf("Error writing config file: %v", err)
}
if _, err := ProcessConfigFile(confFileName); err == nil {
t.Fatalf("Expected an error from nkey entry with password")
}
}
func TestNkeyWithPassConfig(t *testing.T) {
confFileName := "nkeys_pass.conf"
defer os.Remove(confFileName)
content := `
authorization {
users = [
{nkey: "UDKTV7HZVYJFJN64LLMYQBUR6MTNNYCDC3LAZH4VHURW3GZLL3FULBXV", pass: "foo"}
]
}`
if err := ioutil.WriteFile(confFileName, []byte(content), 0666); err != nil {
t.Fatalf("Error writing config file: %v", err)
}
if _, err := ProcessConfigFile(confFileName); err == nil {
t.Fatalf("Expected an error from bad nkey entry")
}
}
func TestTokenWithUserPass(t *testing.T) {
confFileName := "test.conf"
defer os.Remove(confFileName)
content := `
authorization={
user: user
pass: password
token: $2a$11$whatever
}`
if err := ioutil.WriteFile(confFileName, []byte(content), 0666); err != nil {
t.Fatalf("Error writing config file: %v", err)
}
_, err := ProcessConfigFile(confFileName)
if err == nil {
t.Fatal("Expected error, got none")
}
if !strings.Contains(err.Error(), "token") {
t.Fatalf("Expected error related to token, got %v", err)
}
}
func TestTokenWithUsers(t *testing.T) {
confFileName := "test.conf"
defer os.Remove(confFileName)
content := `
authorization={
token: $2a$11$whatever
users: [
{user: test, password: test}
]
}`
if err := ioutil.WriteFile(confFileName, []byte(content), 0666); err != nil {
t.Fatalf("Error writing config file: %v", err)
}
_, err := ProcessConfigFile(confFileName)
if err == nil {
t.Fatal("Expected error, got none")
}
if !strings.Contains(err.Error(), "token") {
t.Fatalf("Expected error related to token, got %v", err)
}
}
func TestParseWriteDeadline(t *testing.T) {
confFile := "test.conf"
defer os.Remove(confFile)
if err := ioutil.WriteFile(confFile, []byte("write_deadline: \"1x\""), 0666); err != nil {
t.Fatalf("Error writing config file: %v", err)
}
_, err := ProcessConfigFile(confFile)
if err == nil {
t.Fatal("Expected error, got none")
}
if !strings.Contains(err.Error(), "parsing") {
t.Fatalf("Expected error related to parsing, got %v", err)
}
os.Remove(confFile)
if err := ioutil.WriteFile(confFile, []byte("write_deadline: \"1s\""), 0666); err != nil {
t.Fatalf("Error writing config file: %v", err)
}
opts, err := ProcessConfigFile(confFile)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if opts.WriteDeadline != time.Second {
t.Fatalf("Expected write_deadline to be 1s, got %v", opts.WriteDeadline)
}
os.Remove(confFile)
oldStdout := os.Stdout
_, w, _ := os.Pipe()
defer func() {
w.Close()
os.Stdout = oldStdout
}()
os.Stdout = w
if err := ioutil.WriteFile(confFile, []byte("write_deadline: 2"), 0666); err != nil {
t.Fatalf("Error writing config file: %v", err)
}
opts, err = ProcessConfigFile(confFile)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if opts.WriteDeadline != 2*time.Second {
t.Fatalf("Expected write_deadline to be 2s, got %v", opts.WriteDeadline)
}
}
func TestOptionsClone(t *testing.T) {
opts := &Options{
ConfigFile: "./configs/test.conf",
Host: "127.0.0.1",
Port: 2222,
Username: "derek",
Password: "porkchop",
AuthTimeout: 1.0,
Debug: true,
Trace: true,
Logtime: false,
HTTPPort: DEFAULT_HTTP_PORT,
HTTPBasePath: DEFAULT_HTTP_BASE_PATH,
PidFile: "/tmp/nats-server.pid",
ProfPort: 6789,
Syslog: true,
RemoteSyslog: "udp://foo.com:33",
MaxControlLine: 2048,
MaxPayload: 65536,
MaxConn: 100,
PingInterval: 60 * time.Second,
MaxPingsOut: 3,
Cluster: ClusterOpts{
NoAdvertise: true,
ConnectRetries: 2,
},
Gateway: GatewayOpts{
Name: "A",
Gateways: []*RemoteGatewayOpts{
{Name: "B", URLs: []*url.URL{{Scheme: "nats", Host: "host:5222"}}},
{Name: "C"},
},
},
WriteDeadline: 3 * time.Second,
Routes: []*url.URL{{}},
Users: []*User{{Username: "foo", Password: "bar"}},
}
clone := opts.Clone()
if !reflect.DeepEqual(opts, clone) {
t.Fatalf("Cloned Options are incorrect.\nexpected: %+v\ngot: %+v",
clone, opts)
}
clone.Users[0].Password = "baz"
if reflect.DeepEqual(opts, clone) {
t.Fatal("Expected Options to be different")
}
opts.Gateway.Gateways[0].URLs[0] = nil
if reflect.DeepEqual(opts.Gateway.Gateways[0], clone.Gateway.Gateways[0]) {
t.Fatal("Expected Options to be different")
}
if clone.Gateway.Gateways[0].URLs[0].Host != "host:5222" {
t.Fatalf("Unexpected URL: %v", clone.Gateway.Gateways[0].URLs[0])
}
}
func TestOptionsCloneNilLists(t *testing.T) {
opts := &Options{}
clone := opts.Clone()
if clone.Routes != nil {
t.Fatalf("Expected Routes to be nil, got: %v", clone.Routes)
}
if clone.Users != nil {
t.Fatalf("Expected Users to be nil, got: %v", clone.Users)
}
}
func TestOptionsCloneNil(t *testing.T) {
opts := (*Options)(nil)
clone := opts.Clone()
if clone != nil {
t.Fatalf("Expected nil, got: %+v", clone)
}
}
func TestEmptyConfig(t *testing.T) {
opts, err := ProcessConfigFile("")
if err != nil {
t.Fatalf("Expected no error from empty config, got: %+v", err)
}
if opts.ConfigFile != "" {
t.Fatalf("Expected empty config, got: %+v", opts)
}
}
func TestMalformedListenAddress(t *testing.T) {
opts, err := ProcessConfigFile("./configs/malformed_listen_address.conf")
if err == nil {
t.Fatalf("Expected an error reading config file: got %+v", opts)
}
}
func TestMalformedClusterAddress(t *testing.T) {
opts, err := ProcessConfigFile("./configs/malformed_cluster_address.conf")
if err == nil {
t.Fatalf("Expected an error reading config file: got %+v", opts)
}
}
func TestPanic(t *testing.T) {
conf := createConfFile(t, []byte(`port: "this_string_trips_a_panic"`))
defer os.Remove(conf)
opts, err := ProcessConfigFile(conf)
if err == nil {
t.Fatalf("Expected an error reading config file: got %+v", opts)
} else {
if !strings.Contains(err.Error(), ":1:0: interface conversion:") {
t.Fatalf("This was supposed to trip a panic on interface conversion right at the beginning")
}
}
}
func TestPingIntervalOld(t *testing.T) {
conf := createConfFile(t, []byte(`ping_interval: 5`))
defer os.Remove(conf)
opts := &Options{}
err := opts.ProcessConfigFile(conf)
if err == nil {
t.Fatalf("expected an error")
}
errTyped, ok := err.(*processConfigErr)
if !ok {
t.Fatalf("expected an error of type processConfigErr")
}
if len(errTyped.warnings) != 1 {
t.Fatalf("expected processConfigErr to have one warning")
}
if len(errTyped.errors) != 0 {
t.Fatalf("expected processConfigErr to have no error")
}
if opts.PingInterval != 5*time.Second {
t.Fatalf("expected ping interval to be 5 seconds")
}
}
func TestPingIntervalNew(t *testing.T) {
conf := createConfFile(t, []byte(`ping_interval: "5m"`))
defer os.Remove(conf)
opts := &Options{}
if err := opts.ProcessConfigFile(conf); err != nil {
t.Fatalf("expected no error")
}
if opts.PingInterval != 5*time.Minute {
t.Fatalf("expected ping interval to be 5 minutes")
}
}
func TestOptionsProcessConfigFile(t *testing.T) {
// Create options with default values of Debug and Trace
// that are the opposite of what is in the config file.
// Set another option that is not present in the config file.
logFileName := "test.log"
opts := &Options{
Debug: true,
Trace: false,
LogFile: logFileName,
}
configFileName := "./configs/test.conf"
if err := opts.ProcessConfigFile(configFileName); err != nil {
t.Fatalf("Error processing config file: %v", err)
}
// Verify that values are as expected
if opts.ConfigFile != configFileName {
t.Fatalf("Expected ConfigFile to be set to %q, got %v", configFileName, opts.ConfigFile)
}
if opts.Debug {
t.Fatal("Debug option should have been set to false from config file")
}
if !opts.Trace {
t.Fatal("Trace option should have been set to true from config file")
}
if opts.LogFile != logFileName {
t.Fatalf("Expected LogFile to be %q, got %q", logFileName, opts.LogFile)
}
}
func TestConfigureOptions(t *testing.T) {
// Options.Configure() will snapshot the flags. This is used by the reload code.
// We need to set it back to nil otherwise it will impact reload tests.
defer func() { FlagSnapshot = nil }()
ch := make(chan bool, 1)
checkPrintInvoked := func() {
ch <- true
}
usage := func() { panic("should not get there") }
var fs *flag.FlagSet
type testPrint struct {
args []string
version, help, tlsHelp func()
}
testFuncs := []testPrint{
{[]string{"-v"}, checkPrintInvoked, usage, PrintTLSHelpAndDie},
{[]string{"version"}, checkPrintInvoked, usage, PrintTLSHelpAndDie},
{[]string{"-h"}, PrintServerAndExit, checkPrintInvoked, PrintTLSHelpAndDie},
{[]string{"help"}, PrintServerAndExit, checkPrintInvoked, PrintTLSHelpAndDie},
{[]string{"-help_tls"}, PrintServerAndExit, usage, checkPrintInvoked},
}
for _, tf := range testFuncs {
fs = flag.NewFlagSet("test", flag.ContinueOnError)
opts, err := ConfigureOptions(fs, tf.args, tf.version, tf.help, tf.tlsHelp)
if err != nil {
t.Fatalf("Error on configure: %v", err)
}
if opts != nil {
t.Fatalf("Expected options to be nil, got %v", opts)
}
select {
case <-ch:
case <-time.After(time.Second):
t.Fatalf("Should have invoked print function for args=%v", tf.args)
}
}
// Helper function that expect parsing with given args to not produce an error.
mustNotFail := func(args []string) *Options {
fs := flag.NewFlagSet("test", flag.ContinueOnError)
opts, err := ConfigureOptions(fs, args, PrintServerAndExit, fs.Usage, PrintTLSHelpAndDie)
if err != nil {
stackFatalf(t, "Error on configure: %v", err)
}
return opts
}
// Helper function that expect configuration to fail.
expectToFail := func(args []string, errContent ...string) {
fs := flag.NewFlagSet("test", flag.ContinueOnError)
// Silence the flagSet so that on failure nothing is printed.
// (flagSet would print error message about unknown flags, etc..)
silenceOuput := &bytes.Buffer{}
fs.SetOutput(silenceOuput)
opts, err := ConfigureOptions(fs, args, PrintServerAndExit, fs.Usage, PrintTLSHelpAndDie)
if opts != nil || err == nil {
stackFatalf(t, "Expected no option and an error, got opts=%v and err=%v", opts, err)
}
for _, testErr := range errContent {
if strings.Contains(err.Error(), testErr) {
// We got the error we wanted.
return
}
}
stackFatalf(t, "Expected errors containing any of those %v, got %v", errContent, err)
}
// Basic test with port number
opts := mustNotFail([]string{"-p", "1234"})
if opts.Port != 1234 {
t.Fatalf("Expected port to be 1234, got %v", opts.Port)
}
// Should fail because of unknown parameter
expectToFail([]string{"foo"}, "command")
// Should fail because unknown flag
expectToFail([]string{"-xxx", "foo"}, "flag")
// Should fail because of config file missing
expectToFail([]string{"-c", "xxx.cfg"}, "file")
// Should fail because of too many args for signal command
expectToFail([]string{"-sl", "quit=pid=foo"}, "signal")
// Should fail because of invalid pid
// On windows, if not running with admin privileges, you would get access denied.
expectToFail([]string{"-sl", "quit=pid"}, "pid", "denied")
// The config file set Trace to true.
opts = mustNotFail([]string{"-c", "./configs/test.conf"})
if !opts.Trace {
t.Fatal("Trace should have been set to true")
}
// The config file set Trace to true, but was overridden by param -V=false
opts = mustNotFail([]string{"-c", "./configs/test.conf", "-V=false"})
if opts.Trace {
t.Fatal("Trace should have been set to false")
}
// The config file set Trace to true, but was overridden by param -DV=false
opts = mustNotFail([]string{"-c", "./configs/test.conf", "-DV=false"})
if opts.Debug || opts.Trace {
t.Fatal("Debug and Trace should have been set to false")
}
// The config file set Trace to true, but was overridden by param -DV
opts = mustNotFail([]string{"-c", "./configs/test.conf", "-DV"})
if !opts.Debug || !opts.Trace {
t.Fatal("Debug and Trace should have been set to true")
}
// This should fail since -cluster is missing
expectedURL, _ := url.Parse("nats://127.0.0.1:6223")
expectToFail([]string{"-routes", expectedURL.String()}, "solicited routes")
// Ensure that we can set cluster and routes from command line
opts = mustNotFail([]string{"-cluster", "nats://127.0.0.1:6222", "-routes", expectedURL.String()})
if opts.Cluster.ListenStr != "nats://127.0.0.1:6222" {
t.Fatalf("Unexpected Cluster.ListenStr=%q", opts.Cluster.ListenStr)
}
if opts.RoutesStr != "nats://127.0.0.1:6223" || len(opts.Routes) != 1 || opts.Routes[0].String() != expectedURL.String() {
t.Fatalf("Unexpected RoutesStr: %q and Routes: %v", opts.RoutesStr, opts.Routes)
}
// Use a config with cluster configuration and explicit route defined.
// Override with empty routes string.
opts = mustNotFail([]string{"-c", "./configs/srv_a.conf", "-routes", ""})
if opts.RoutesStr != "" || len(opts.Routes) != 0 {
t.Fatalf("Unexpected RoutesStr: %q and Routes: %v", opts.RoutesStr, opts.Routes)
}
// Use a config with cluster configuration and override cluster listen string
expectedURL, _ = url.Parse("nats-route://ruser:[email protected]:7246")
opts = mustNotFail([]string{"-c", "./configs/srv_a.conf", "-cluster", "nats://ivan:[email protected]:6222"})
if opts.Cluster.Username != "ivan" || opts.Cluster.Password != "pwd" || opts.Cluster.Port != 6222 ||
len(opts.Routes) != 1 || opts.Routes[0].String() != expectedURL.String() {
t.Fatalf("Unexpected Cluster and/or Routes: %#v - %v", opts.Cluster, opts.Routes)
}
// Disable clustering from command line
opts = mustNotFail([]string{"-c", "./configs/srv_a.conf", "-cluster", ""})
if opts.Cluster.Port != 0 {
t.Fatalf("Unexpected Cluster: %v", opts.Cluster)
}
// Various erros due to malformed cluster listen string.
// (adding -routes to have more than 1 set flag to check
// that Visit() stops when an error is found).
expectToFail([]string{"-cluster", ":", "-routes", ""}, "protocol")
expectToFail([]string{"-cluster", "nats://127.0.0.1", "-routes", ""}, "port")
expectToFail([]string{"-cluster", "nats://127.0.0.1:xxx", "-routes", ""}, "invalid port")
expectToFail([]string{"-cluster", "nats://ivan:127.0.0.1:6222", "-routes", ""}, "colons")
expectToFail([]string{"-cluster", "nats://[email protected]:6222", "-routes", ""}, "password")
// Override config file's TLS configuration from command line, and completely disable TLS
opts = mustNotFail([]string{"-c", "./configs/tls.conf", "-tls=false"})
if opts.TLSConfig != nil || opts.TLS {
t.Fatal("Expected TLS to be disabled")
}
// Override config file's TLS configuration from command line, and force TLS verification.
// However, since TLS config has to be regenerated, user need to provide -tlscert and -tlskey too.
// So this should fail.
expectToFail([]string{"-c", "./configs/tls.conf", "-tlsverify"}, "valid")
// Now same than above, but with all valid params.
opts = mustNotFail([]string{"-c", "./configs/tls.conf", "-tlsverify", "-tlscert", "./configs/certs/server.pem", "-tlskey", "./configs/certs/key.pem"})
if opts.TLSConfig == nil || !opts.TLSVerify {
t.Fatal("Expected TLS to be configured and force verification")
}
// Configure TLS, but some TLS params missing
expectToFail([]string{"-tls"}, "valid")
expectToFail([]string{"-tls", "-tlscert", "./configs/certs/server.pem"}, "valid")
// One of the file does not exist
expectToFail([]string{"-tls", "-tlscert", "./configs/certs/server.pem", "-tlskey", "./configs/certs/notfound.pem"}, "file")
// Configure TLS and check that this results in a TLSConfig option.
opts = mustNotFail([]string{"-tls", "-tlscert", "./configs/certs/server.pem", "-tlskey", "./configs/certs/key.pem"})
if opts.TLSConfig == nil || !opts.TLS {
t.Fatal("Expected TLSConfig to be set")
}
}
func TestClusterPermissionsConfig(t *testing.T) {
template := `
cluster {
port: 1234
%s
authorization {
user: ivan
password: pwd
permissions {
import {
allow: "foo"
}
export {
allow: "bar"
}
}
}
}
`
conf := createConfFile(t, []byte(fmt.Sprintf(template, "")))
defer os.Remove(conf)
opts, err := ProcessConfigFile(conf)
if err != nil {
if cerr, ok := err.(*processConfigErr); ok && len(cerr.Errors()) > 0 {
t.Fatalf("Error processing config file: %v", err)
}
}
if opts.Cluster.Permissions == nil {
t.Fatal("Expected cluster permissions to be set")
}
if opts.Cluster.Permissions.Import == nil {
t.Fatal("Expected cluster import permissions to be set")
}
if len(opts.Cluster.Permissions.Import.Allow) != 1 || opts.Cluster.Permissions.Import.Allow[0] != "foo" {
t.Fatalf("Expected cluster import permissions to have %q, got %v", "foo", opts.Cluster.Permissions.Import.Allow)
}
if opts.Cluster.Permissions.Export == nil {
t.Fatal("Expected cluster export permissions to be set")
}
if len(opts.Cluster.Permissions.Export.Allow) != 1 || opts.Cluster.Permissions.Export.Allow[0] != "bar" {
t.Fatalf("Expected cluster export permissions to have %q, got %v", "bar", opts.Cluster.Permissions.Export.Allow)
}
// Now add permissions in top level cluster and check
// that this is the one that is being used.
conf = createConfFile(t, []byte(fmt.Sprintf(template, `
permissions {
import {
allow: "baz"
}
export {
allow: "bat"
}
}
`)))
defer os.Remove(conf)
opts, err = ProcessConfigFile(conf)
if err != nil {
t.Fatalf("Error processing config file: %v", err)
}
if opts.Cluster.Permissions == nil {
t.Fatal("Expected cluster permissions to be set")
}
if opts.Cluster.Permissions.Import == nil {
t.Fatal("Expected cluster import permissions to be set")
}
if len(opts.Cluster.Permissions.Import.Allow) != 1 || opts.Cluster.Permissions.Import.Allow[0] != "baz" {
t.Fatalf("Expected cluster import permissions to have %q, got %v", "baz", opts.Cluster.Permissions.Import.Allow)
}
if opts.Cluster.Permissions.Export == nil {
t.Fatal("Expected cluster export permissions to be set")
}
if len(opts.Cluster.Permissions.Export.Allow) != 1 || opts.Cluster.Permissions.Export.Allow[0] != "bat" {
t.Fatalf("Expected cluster export permissions to have %q, got %v", "bat", opts.Cluster.Permissions.Export.Allow)
}
// Tests with invalid permissions
invalidPerms := []string{
`permissions: foo`,
`permissions {
unknown_field: "foo"
}`,
`permissions {
import: [1, 2, 3]
}`,
`permissions {
import {
unknown_field: "foo"
}
}`,
`permissions {
import {
allow {
x: y
}
}
}`,
`permissions {
import {
deny {
x: y
}
}
}`,
`permissions {
export: [1, 2, 3]
}`,
`permissions {
export {
unknown_field: "foo"
}
}`,
`permissions {
export {
allow {
x: y
}
}
}`,
`permissions {
export {
deny {
x: y
}
}
}`,
}
for _, perms := range invalidPerms {
conf = createConfFile(t, []byte(fmt.Sprintf(`
cluster {
port: 1234
%s
}
`, perms)))
_, err := ProcessConfigFile(conf)
os.Remove(conf)
if err == nil {
t.Fatalf("Expected failure for permissions %s", perms)
}
}
for _, perms := range invalidPerms {
conf = createConfFile(t, []byte(fmt.Sprintf(`
cluster {
port: 1234
authorization {
user: ivan
password: pwd
%s
}
}
`, perms)))
_, err := ProcessConfigFile(conf)
os.Remove(conf)
if err == nil {
t.Fatalf("Expected failure for permissions %s", perms)
}
}
}
func TestParseServiceLatency(t *testing.T) {
cases := []struct {
name string
conf string
want *serviceLatency
wantErr bool
}{
{
name: "block with percent sample default value",
conf: `system_account = nats.io
accounts {
nats.io {
exports [{
service: nats.add
latency: {
sampling: 100%
subject: latency.tracking.add
}
}]
}
}`,
want: &serviceLatency{
subject: "latency.tracking.add",
sampling: 100,
},
},
{
name: "block with percent sample nondefault value",
conf: `system_account = nats.io
accounts {
nats.io {
exports [{
service: nats.add
latency: {
sampling: 33%
subject: latency.tracking.add
}
}]
}
}`,
want: &serviceLatency{
subject: "latency.tracking.add",
sampling: 33,
},
},
{
name: "block with number sample nondefault value",
conf: `system_account = nats.io
accounts {
nats.io {
exports [{
service: nats.add
latency: {
sampling: 87
subject: latency.tracking.add
}
}]
}
}`,
want: &serviceLatency{
subject: "latency.tracking.add",
sampling: 87,
},
},
{
name: "field with subject",
conf: `system_account = nats.io
accounts {
nats.io {
exports [{
service: nats.add
latency: latency.tracking.add
}]
}
}`,
want: &serviceLatency{
subject: "latency.tracking.add",
sampling: 100,
},
},
{
name: "block with missing subject",
conf: `system_account = nats.io
accounts {
nats.io {
exports [{
service: nats.add
latency: {
sampling: 87
}
}]
}
}`,
wantErr: true,
},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
f := createConfFile(t, []byte(c.conf))
opts, err := ProcessConfigFile(f)
os.Remove(f)
switch {
case c.wantErr && err == nil:
t.Fatalf("Expected ProcessConfigFile to fail, but didn't")
case c.wantErr && err != nil:
// We wanted an error and got one, test passed.
return
case !c.wantErr && err == nil:
// We didn't want an error and didn't get one, keep going.
break
case !c.wantErr && err != nil:
t.Fatalf("Failed to process config: %v", err)
}
if len(opts.Accounts) != 1 {
t.Fatalf("Expected accounts to have len %d, got %d", 1, len(opts.Accounts))
}
if len(opts.Accounts[0].exports.services) != 1 {
t.Fatalf("Expected export services to have len %d, got %d", 1, len(opts.Accounts[0].exports.services))
}
s, ok := opts.Accounts[0].exports.services["nats.add"]
if !ok {
t.Fatalf("Expected export service nats.add, missing")
}
if !reflect.DeepEqual(s.latency, c.want) {
t.Fatalf("Expected latency to be %#v, got %#v", c.want, s.latency)
}
})
}
}
func TestAccountUsersLoadedProperly(t *testing.T) {
conf := createConfFile(t, []byte(`
listen: "127.0.0.1:-1"
authorization {
users [
{user: ivan, password: bar}
{nkey : UC6NLCN7AS34YOJVCYD4PJ3QB7QGLYG5B5IMBT25VW5K4TNUJODM7BOX}
]
}
accounts {
synadia {
users [
{user: derek, password: foo}
{nkey : UBAAQWTW6CG2G6ANGNKB5U2B7HRWHSGMZEZX3AQSAJOQDAUGJD46LD2E}
]
}
}
`))
check := func(t *testing.T) {
t.Helper()
s, _ := RunServerWithConfig(conf)
defer s.Shutdown()
opts := s.getOpts()
if n := len(opts.Users); n != 2 {
t.Fatalf("Should have 2 users, got %v", n)
}
if n := len(opts.Nkeys); n != 2 {
t.Fatalf("Should have 2 nkeys, got %v", n)
}
}
// Repeat test since issue was with ordering of processing
// of authorization vs accounts that depends on range of a map (after actual parsing)
for i := 0; i < 20; i++ {
check(t)
}
}
func TestParsingGateways(t *testing.T) {
content := `
gateway {
name: "A"
listen: "127.0.0.1:4444"
host: "127.0.0.1"
port: 4444
authorization {
user: "ivan"
password: "pwd"
timeout: 2.0
}
tls {
cert_file: "./configs/certs/server.pem"
key_file: "./configs/certs/key.pem"
timeout: 3.0
}
advertise: "me:1"
connect_retries: 10
gateways: [
{
name: "B"
urls: ["nats://user1:pwd1@host2:5222", "nats://user1:pwd1@host3:6222"]
}
{
name: "C"
url: "nats://host4:7222"
}
]
}
`
file := "server_config_gateways.conf"
defer os.Remove(file)
if err := ioutil.WriteFile(file, []byte(content), 0600); err != nil {
t.Fatalf("Error writing config file: %v", err)
}
opts, err := ProcessConfigFile(file)
if err != nil {
t.Fatalf("Error processing file: %v", err)
}
expected := &GatewayOpts{
Name: "A",
Host: "127.0.0.1",
Port: 4444,
Username: "ivan",
Password: "pwd",
AuthTimeout: 2.0,
Advertise: "me:1",
ConnectRetries: 10,
TLSTimeout: 3.0,
}
u1, _ := url.Parse("nats://user1:pwd1@host2:5222")
u2, _ := url.Parse("nats://user1:pwd1@host3:6222")
urls := []*url.URL{u1, u2}
gw := &RemoteGatewayOpts{
Name: "B",
URLs: urls,
}
expected.Gateways = append(expected.Gateways, gw)
u1, _ = url.Parse("nats://host4:7222")
urls = []*url.URL{u1}
gw = &RemoteGatewayOpts{
Name: "C",
URLs: urls,
}
expected.Gateways = append(expected.Gateways, gw)
// Just make sure that TLSConfig is set.. we have aother test
// to check proper generating TLSConfig from config file...
if opts.Gateway.TLSConfig == nil {
t.Fatalf("Expected TLSConfig, got none")
}
opts.Gateway.TLSConfig = nil
if !reflect.DeepEqual(&opts.Gateway, expected) {
t.Fatalf("Expected %v, got %v", expected, opts.Gateway)
}
}
func TestParsingGatewaysErrors(t *testing.T) {
for _, test := range []struct {
name string
content string
expectedErr string
}{
{
"bad_type",
`gateway: "bad_type"`,
"Expected gateway to be a map",
},
{
"bad_listen",
`gateway {
name: "A"
port: -1
listen: "bad::address"
}`,
"parse address",
},
{
"bad_auth",
`gateway {
name: "A"
port: -1
authorization {
users {
}
}
}`,
"be an array",
},
{
"unknown_field",
`gateway {
name: "A"
port: -1
reject_unknown: true
unknown_field: 1
}`,
"unknown field",
},
{
"users_not_supported",
`gateway {
name: "A"
port: -1
authorization {
users [
{user: alice, password: foo}
{user: bob, password: bar}
]
}
}`,
"does not allow multiple users",
},
{
"tls_error",
`gateway {
name: "A"
port: -1
tls {
cert_file: 123
}
}`,
"to be filename",
},
{
"tls_gen_error_cert_file_not_found",
`gateway {
name: "A"
port: -1
tls {
cert_file: "./configs/certs/missing.pem"
key_file: "./configs/certs/server-key.pem"
}
}`,
"certificate/key pair",
},
{
"tls_gen_error_key_file_not_found",
`gateway {
name: "A"
port: -1
tls {
cert_file: "./configs/certs/server.pem"
key_file: "./configs/certs/missing.pem"
}
}`,
"certificate/key pair",
},
{
"tls_gen_error_key_file_missing",
`gateway {
name: "A"
port: -1
tls {
cert_file: "./configs/certs/server.pem"
}
}`,
`missing 'key_file' in TLS configuration`,
},
{
"tls_gen_error_cert_file_missing",
`gateway {
name: "A"
port: -1
tls {
key_file: "./configs/certs/server-key.pem"
}
}`,
`missing 'cert_file' in TLS configuration`,
},
{
"tls_gen_error_key_file_not_found",
`gateway {
name: "A"
port: -1
tls {
cert_file: "./configs/certs/server.pem"
key_file: "./configs/certs/missing.pem"
}
}`,
"certificate/key pair",
},
{
"gateways_needs_to_be_an_array",
`gateway {
name: "A"
gateways {
name: "B"
}
}`,
"Expected gateways field to be an array",
},
{
"gateways_entry_needs_to_be_a_map",
`gateway {
name: "A"
gateways [
"g1", "g2"
]
}`,
"Expected gateway entry to be a map",
},
{
"bad_url",
`gateway {
name: "A"
gateways [
{
name: "B"
url: "nats://wrong url"
}
]
}`,
"error parsing gateway url",
},
{
"bad_urls",
`gateway {
name: "A"
gateways [
{
name: "B"
urls: ["nats://wrong url", "nats://host:5222"]
}
]
}`,
"error parsing gateway url",
},
{
"gateway_tls_error",
`gateway {
name: "A"
port: -1
gateways [
{
name: "B"
tls {
cert_file: 123
}
}
]
}`,
"to be filename",
},
{
"gateway_unknown_field",
`gateway {
name: "A"
port: -1
gateways [
{
name: "B"
unknown_field: 1
}
]
}`,
"unknown field",
},
} {
t.Run(test.name, func(t *testing.T) {
file := fmt.Sprintf("server_config_gateways_%s.conf", test.name)
defer os.Remove(file)
if err := ioutil.WriteFile(file, []byte(test.content), 0600); err != nil {
t.Fatalf("Error writing config file: %v", err)
}
_, err := ProcessConfigFile(file)
if err == nil {
t.Fatalf("Expected to fail, did not. Content:\n%s", test.content)
} else if !strings.Contains(err.Error(), test.expectedErr) {
t.Fatalf("Expected error containing %q, got %q, for content:\n%s", test.expectedErr, err, test.content)
}
})
}
}
func TestParsingLeafNodesListener(t *testing.T) {
content := `
leafnodes {
listen: "127.0.0.1:3333"
host: "127.0.0.1"
port: 3333
advertise: "me:22"
authorization {
user: "derek"
password: "s3cr3t!"
timeout: 2.2
}
tls {
cert_file: "./configs/certs/server.pem"
key_file: "./configs/certs/key.pem"
timeout: 3.3
}
}
`
conf := createConfFile(t, []byte(content))
defer os.Remove(conf)
opts, err := ProcessConfigFile(conf)
if err != nil {
t.Fatalf("Error processing file: %v", err)
}
expected := &LeafNodeOpts{
Host: "127.0.0.1",
Port: 3333,
Username: "derek",
Password: "s3cr3t!",
AuthTimeout: 2.2,
Advertise: "me:22",
TLSTimeout: 3.3,
}
if opts.LeafNode.TLSConfig == nil {
t.Fatalf("Expected TLSConfig, got none")
}
opts.LeafNode.TLSConfig = nil
if !reflect.DeepEqual(&opts.LeafNode, expected) {
t.Fatalf("Expected %v, got %v", expected, opts.LeafNode)
}
}
func TestParsingLeafNodeRemotes(t *testing.T) {
t.Run("parse config file with relative path", func(t *testing.T) {
content := `
leafnodes {
remotes = [
{
url: nats-leaf://127.0.0.1:2222
account: foobar // Local Account to bind to..
credentials: "./my.creds"
}
]
}
`
conf := createConfFile(t, []byte(content))
defer os.Remove(conf)
opts, err := ProcessConfigFile(conf)
if err != nil {
t.Fatalf("Error processing file: %v", err)
}
if len(opts.LeafNode.Remotes) != 1 {
t.Fatalf("Expected 1 remote, got %d", len(opts.LeafNode.Remotes))
}
expected := &RemoteLeafOpts{
LocalAccount: "foobar",
Credentials: "./my.creds",
}
u, _ := url.Parse("nats-leaf://127.0.0.1:2222")
expected.URLs = append(expected.URLs, u)
if !reflect.DeepEqual(opts.LeafNode.Remotes[0], expected) {
t.Fatalf("Expected %v, got %v", expected, opts.LeafNode.Remotes[0])
}
})
t.Run("parse config file with tilde path", func(t *testing.T) {
if runtime.GOOS == "windows" {
t.SkipNow()
}
origHome := os.Getenv("HOME")
defer os.Setenv("HOME", origHome)
os.Setenv("HOME", "/home/foo")
content := `
leafnodes {
remotes = [
{
url: nats-leaf://127.0.0.1:2222
account: foobar // Local Account to bind to..
credentials: "~/my.creds"
}
]
}
`
conf := createConfFile(t, []byte(content))
defer os.Remove(conf)
opts, err := ProcessConfigFile(conf)
if err != nil {
t.Fatalf("Error processing file: %v", err)
}
expected := &RemoteLeafOpts{
LocalAccount: "foobar",
Credentials: "/home/foo/my.creds",
}
u, _ := url.Parse("nats-leaf://127.0.0.1:2222")
expected.URLs = append(expected.URLs, u)
if !reflect.DeepEqual(opts.LeafNode.Remotes[0], expected) {
t.Fatalf("Expected %v, got %v", expected, opts.LeafNode.Remotes[0])
}
})
}
func TestLargeMaxControlLine(t *testing.T) {
confFileName := "big_mcl.conf"
defer os.Remove(confFileName)
content := `
max_control_line = 3000000000
`
if err := ioutil.WriteFile(confFileName, []byte(content), 0666); err != nil {
t.Fatalf("Error writing config file: %v", err)
}
if _, err := ProcessConfigFile(confFileName); err == nil {
t.Fatalf("Expected an error from too large of a max_control_line entry")
}
}
func TestLargeMaxPayload(t *testing.T) {
confFileName := "big_mp.conf"
defer os.Remove(confFileName)
content := `
max_payload = 3000000000
`
if err := ioutil.WriteFile(confFileName, []byte(content), 0666); err != nil {
t.Fatalf("Error writing config file: %v", err)
}
if _, err := ProcessConfigFile(confFileName); err == nil {
t.Fatalf("Expected an error from too large of a max_payload entry")
}
}
func TestHandleUnknownTopLevelConfigurationField(t *testing.T) {
conf := createConfFile(t, []byte(`
port: 1234
streaming {
id: "me"
}
`))
defer os.Remove(conf)
// Verify that we get an error because of unknown "streaming" field.
opts := &Options{}
if err := opts.ProcessConfigFile(conf); err == nil || !strings.Contains(err.Error(), "streaming") {
t.Fatal("Expected error, got none")
}
// Verify that if that is set, we get no error
NoErrOnUnknownFields(true)
defer NoErrOnUnknownFields(false)
if err := opts.ProcessConfigFile(conf); err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if opts.Port != 1234 {
t.Fatalf("Port was not parsed correctly: %v", opts.Port)
}
// Verify that ignore works only on top level fields.
changeCurrentConfigContentWithNewContent(t, conf, []byte(`
port: 1234
cluster {
non_top_level_unknown_field: 123
}
streaming {
id: "me"
}
`))
if err := opts.ProcessConfigFile(conf); err == nil || !strings.Contains(err.Error(), "non_top_level") {
t.Fatal("Expected error, got none")
}
}
func TestSublistNoCacheConfig(t *testing.T) {
confFileName := createConfFile(t, []byte(`
disable_sublist_cache: true
`))
defer os.Remove(confFileName)
opts, err := ProcessConfigFile(confFileName)
if err != nil {
t.Fatalf("Received an error reading config file: %v", err)
}
if !opts.NoSublistCache {
t.Fatalf("Expected sublist cache to be disabled")
}
}
func TestSublistNoCacheConfigOnAccounts(t *testing.T) {
confFileName := createConfFile(t, []byte(`
listen: "127.0.0.1:-1"
disable_sublist_cache: true
accounts {
synadia {
users [ {nkey : UBAAQWTW6CG2G6ANGNKB5U2B7HRWHSGMZEZX3AQSAJOQDAUGJD46LD2E} ]
}
nats.io {
users [ {nkey : UC6NLCN7AS34YOJVCYD4PJ3QB7QGLYG5B5IMBT25VW5K4TNUJODM7BOX} ]
}
}
no_sys_acc = true
`))
defer os.Remove(confFileName)
s, _ := RunServerWithConfig(confFileName)
defer s.Shutdown()
// Check that all account sublists do not have caching enabled.
ta := s.numReservedAccounts() + 2
if la := s.numAccounts(); la != ta {
t.Fatalf("Expected to have a server with %d active accounts, got %v", ta, la)
}
s.accounts.Range(func(k, v interface{}) bool {
acc := v.(*Account)
if acc == nil {
t.Fatalf("Expected non-nil sublist for account")
}
if acc.sl.CacheEnabled() {
t.Fatalf("Expected the account sublist to not have caching enabled")
}
return true
})
}
func TestParsingResponsePermissions(t *testing.T) {
template := `
listen: "127.0.0.1:-1"
authorization {
users [
{
user: ivan
password: pwd
permissions {
allow_responses {
%s
%s
}
}
}
]
}
`
check := func(t *testing.T, conf string, expectedError string, expectedMaxMsgs int, expectedTTL time.Duration) {
t.Helper()
opts, err := ProcessConfigFile(conf)
if expectedError != "" {
if err == nil || !strings.Contains(err.Error(), expectedError) {
t.Fatalf("Expected error about %q, got %q", expectedError, err)
}
// OK!
return
}
if err != nil {
t.Fatalf("Error on process: %v", err)
}
u := opts.Users[0]
p := u.Permissions.Response
if p == nil {
t.Fatalf("Expected response permissions to be set, it was not")
}
if n := p.MaxMsgs; n != expectedMaxMsgs {
t.Fatalf("Expected response max msgs to be %v, got %v", expectedMaxMsgs, n)
}
if ttl := p.Expires; ttl != expectedTTL {
t.Fatalf("Expected response ttl to be %v, got %v", expectedTTL, ttl)
}
}
// Check defaults
conf := createConfFile(t, []byte(fmt.Sprintf(template, "", "")))
defer os.Remove(conf)
check(t, conf, "", DEFAULT_ALLOW_RESPONSE_MAX_MSGS, DEFAULT_ALLOW_RESPONSE_EXPIRATION)
conf = createConfFile(t, []byte(fmt.Sprintf(template, "max: 10", "")))
defer os.Remove(conf)
check(t, conf, "", 10, DEFAULT_ALLOW_RESPONSE_EXPIRATION)
conf = createConfFile(t, []byte(fmt.Sprintf(template, "", "ttl: 5s")))
defer os.Remove(conf)
check(t, conf, "", DEFAULT_ALLOW_RESPONSE_MAX_MSGS, 5*time.Second)
conf = createConfFile(t, []byte(fmt.Sprintf(template, "max: 0", "")))
defer os.Remove(conf)
check(t, conf, "", DEFAULT_ALLOW_RESPONSE_MAX_MSGS, DEFAULT_ALLOW_RESPONSE_EXPIRATION)
conf = createConfFile(t, []byte(fmt.Sprintf(template, "", `ttl: "0s"`)))
defer os.Remove(conf)
check(t, conf, "", DEFAULT_ALLOW_RESPONSE_MAX_MSGS, DEFAULT_ALLOW_RESPONSE_EXPIRATION)
// Check normal values
conf = createConfFile(t, []byte(fmt.Sprintf(template, "max: 10", `ttl: "5s"`)))
defer os.Remove(conf)
check(t, conf, "", 10, 5*time.Second)
// Check negative values ok
conf = createConfFile(t, []byte(fmt.Sprintf(template, "max: -1", `ttl: "5s"`)))
defer os.Remove(conf)
check(t, conf, "", -1, 5*time.Second)
conf = createConfFile(t, []byte(fmt.Sprintf(template, "max: 10", `ttl: "-1s"`)))
defer os.Remove(conf)
check(t, conf, "", 10, -1*time.Second)
conf = createConfFile(t, []byte(fmt.Sprintf(template, "max: -1", `ttl: "-1s"`)))
defer os.Remove(conf)
check(t, conf, "", -1, -1*time.Second)
// Check parsing errors
conf = createConfFile(t, []byte(fmt.Sprintf(template, "unknown_field: 123", "")))
defer os.Remove(conf)
check(t, conf, "Unknown field", 0, 0)
conf = createConfFile(t, []byte(fmt.Sprintf(template, "max: 10", "ttl: 123")))
defer os.Remove(conf)
check(t, conf, "not a duration string", 0, 0)
conf = createConfFile(t, []byte(fmt.Sprintf(template, "max: 10", "ttl: xyz")))
defer os.Remove(conf)
check(t, conf, "error parsing expires", 0, 0)
}
func TestExpandPath(t *testing.T) {
if runtime.GOOS == "windows" {
origUserProfile := os.Getenv("USERPROFILE")
origHomeDrive, origHomePath := os.Getenv("HOMEDRIVE"), os.Getenv("HOMEPATH")
defer func() {
os.Setenv("USERPROFILE", origUserProfile)
os.Setenv("HOMEDRIVE", origHomeDrive)
os.Setenv("HOMEPATH", origHomePath)
}()
cases := []struct {
path string
userProfile string
homeDrive string
homePath string
wantPath string
wantErr bool
}{
// Missing HOMEDRIVE and HOMEPATH.
{path: "/Foo/Bar", userProfile: `C:\Foo\Bar`, wantPath: "/Foo/Bar"},
{path: "Foo/Bar", userProfile: `C:\Foo\Bar`, wantPath: "Foo/Bar"},
{path: "~/Fizz", userProfile: `C:\Foo\Bar`, wantPath: `C:\Foo\Bar\Fizz`},
{path: `${HOMEDRIVE}${HOMEPATH}\Fizz`, homeDrive: `C:`, homePath: `\Foo\Bar`, wantPath: `C:\Foo\Bar\Fizz`},
// Missing USERPROFILE.
{path: "~/Fizz", homeDrive: "X:", homePath: `\Foo\Bar`, wantPath: `X:\Foo\Bar\Fizz`},
// Set all environment variables. HOMEDRIVE and HOMEPATH take
// precedence.
{path: "~/Fizz", userProfile: `C:\Foo\Bar`,
homeDrive: "X:", homePath: `\Foo\Bar`, wantPath: `X:\Foo\Bar\Fizz`},
// Missing all environment variables.
{path: "~/Fizz", wantErr: true},
}
for i, c := range cases {
t.Run(fmt.Sprintf("windows case %d", i), func(t *testing.T) {
os.Setenv("USERPROFILE", c.userProfile)
os.Setenv("HOMEDRIVE", c.homeDrive)
os.Setenv("HOMEPATH", c.homePath)
gotPath, err := expandPath(c.path)
if !c.wantErr && err != nil {
t.Fatalf("unexpected error: got=%v; want=%v", err, nil)
} else if c.wantErr && err == nil {
t.Fatalf("unexpected success: got=%v; want=%v", nil, "err")
}
if gotPath != c.wantPath {
t.Fatalf("unexpected path: got=%v; want=%v", gotPath, c.wantPath)
}
})
}
return
}
// Unix tests
origHome := os.Getenv("HOME")
defer os.Setenv("HOME", origHome)
cases := []struct {
path string
home string
wantPath string
wantErr bool
}{
{path: "/foo/bar", home: "/fizz/buzz", wantPath: "/foo/bar"},
{path: "foo/bar", home: "/fizz/buzz", wantPath: "foo/bar"},
{path: "~/fizz", home: "/foo/bar", wantPath: "/foo/bar/fizz"},
{path: "$HOME/fizz", home: "/foo/bar", wantPath: "/foo/bar/fizz"},
// missing HOME env var
{path: "~/fizz", wantErr: true},
}
for i, c := range cases {
t.Run(fmt.Sprintf("unix case %d", i), func(t *testing.T) {
os.Setenv("HOME", c.home)
gotPath, err := expandPath(c.path)
if !c.wantErr && err != nil {
t.Fatalf("unexpected error: got=%v; want=%v", err, nil)
} else if c.wantErr && err == nil {
t.Fatalf("unexpected success: got=%v; want=%v", nil, "err")
}
if gotPath != c.wantPath {
t.Fatalf("unexpected path: got=%v; want=%v", gotPath, c.wantPath)
}
})
}
}
func TestNoAuthUserCode(t *testing.T) {
confFileName := createConfFile(t, []byte(`
listen: "127.0.0.1:-1"
no_auth_user: $NO_AUTH_USER
accounts {
synadia {
users [
{user: "a", password: "a"},
{nkey : UBAAQWTW6CG2G6ANGNKB5U2B7HRWHSGMZEZX3AQSAJOQDAUGJD46LD2E},
]
}
acc {
users [
{user: "c", password: "c"}
]
}
}
# config for $G
authorization {
users [
{user: "b", password: "b"}
]
}
`))
defer os.Remove(confFileName)
defer os.Unsetenv("NO_AUTH_USER")
for _, user := range []string{"a", "b", "b"} {
t.Run(user, func(t *testing.T) {
os.Setenv("NO_AUTH_USER", user)
opts, err := ProcessConfigFile(confFileName)
if err != nil {
t.Fatalf("Received unexpected error %s", err)
} else {
opts.NoLog = true
srv := RunServer(opts)
nc, err := nats.Connect(fmt.Sprintf("nats://127.0.0.1:%d", opts.Port))
if err != nil {
t.Fatalf("couldn't connect %s", err)
}
nc.Close()
srv.Shutdown()
}
})
}
for _, badUser := range []string{"notthere", "UBAAQWTW6CG2G6ANGNKB5U2B7HRWHSGMZEZX3AQSAJOQDAUGJD46LD2E"} {
t.Run(badUser, func(t *testing.T) {
os.Setenv("NO_AUTH_USER", badUser)
opts, err := ProcessConfigFile(confFileName)
if err != nil {
t.Fatalf("Received unexpected error %s", err)
}
s, err := NewServer(opts)
if err != nil {
if !strings.HasPrefix(err.Error(), "no_auth_user") {
t.Fatalf("Received unexpected error %s", err)
}
return // error looks as expected
}
s.Shutdown()
t.Fatalf("Received no error, where no_auth_user error was expected")
})
}
}
const operatorJwtWithSysAccAndUrlResolver = `
listen: "127.0.0.1:-1"
operator: eyJ0eXAiOiJqd3QiLCJhbGciOiJlZDI1NTE5In0.eyJqdGkiOiJJVEdJNjNCUUszM1VNN1pBSzZWT1RXNUZEU01ESlNQU1pRQ0RMNUlLUzZQTVhBU0ROQ01RIiwiaWF0IjoxNTg5ODM5MjA1LCJpc3MiOiJPQ1k2REUyRVRTTjNVT0RGVFlFWEJaTFFMSTdYNEdTWFI1NE5aQzRCQkxJNlFDVFpVVDY1T0lWTiIsIm5hbWUiOiJPUCIsInN1YiI6Ik9DWTZERTJFVFNOM1VPREZUWUVYQlpMUUxJN1g0R1NYUjU0TlpDNEJCTEk2UUNUWlVUNjVPSVZOIiwidHlwZSI6Im9wZXJhdG9yIiwibmF0cyI6eyJhY2NvdW50X3NlcnZlcl91cmwiOiJodHRwOi8vbG9jYWxob3N0OjgwMDAvand0L3YxIiwib3BlcmF0b3Jfc2VydmljZV91cmxzIjpbIm5hdHM6Ly9sb2NhbGhvc3Q6NDIyMiJdLCJzeXN0ZW1fYWNjb3VudCI6IkFEWjU0N0IyNFdIUExXT0s3VE1MTkJTQTdGUUZYUjZVTTJOWjRISE5JQjdSREZWWlFGT1o0R1FRIn19.3u710KqMLwgXwsMvhxfEp9xzK84XyAZ-4dd6QY0T6hGj8Bw9mS-HcQ7HbvDDNU01S61tNFfpma_JR6LtB3ixBg
`
func TestReadOperatorJWT(t *testing.T) {
confFileName := createConfFile(t, []byte(operatorJwtWithSysAccAndUrlResolver))
defer os.Remove(confFileName)
opts, err := ProcessConfigFile(confFileName)
if err != nil {
t.Fatalf("Received unexpected error %s", err)
}
if opts.SystemAccount != "ADZ547B24WHPLWOK7TMLNBSA7FQFXR6UM2NZ4HHNIB7RDFVZQFOZ4GQQ" {
t.Fatalf("Expected different SystemAccount: %s", opts.SystemAccount)
}
if r, ok := opts.AccountResolver.(*URLAccResolver); !ok {
t.Fatalf("Expected different SystemAccount: %s", opts.SystemAccount)
} else if r.url != "http://localhost:8000/jwt/v1/accounts/" {
t.Fatalf("Expected different SystemAccount: %s", r.url)
}
}
// using memory resolver so this test does not have to start the memory resolver
const operatorJwtWithSysAccAndMemResolver = `
listen: "127.0.0.1:-1"
// Operator "TESTOP"
operator: eyJ0eXAiOiJqd3QiLCJhbGciOiJlZDI1NTE5In0.eyJqdGkiOiJLRTZRU0tWTU1VWFFKNFZCTDNSNDdGRFlIWElaTDRZSE1INjVIT0k1UjZCNUpPUkxVQlZBIiwiaWF0IjoxNTg5OTE2MzgyLCJpc3MiOiJPQVRUVkJYTElVTVRRT1FXVUEySU0zRkdUQlFRSEFHUEZaQTVET05NTlFSUlRQUjYzTERBTDM1WiIsIm5hbWUiOiJURVNUT1AiLCJzdWIiOiJPQVRUVkJYTElVTVRRT1FXVUEySU0zRkdUQlFRSEFHUEZaQTVET05NTlFSUlRQUjYzTERBTDM1WiIsInR5cGUiOiJvcGVyYXRvciIsIm5hdHMiOnsic3lzdGVtX2FjY291bnQiOiJBRFNQT1lNSFhKTjZKVllRQ0xSWjVYUTVJVU42QTNTMzNYQTROVjRWSDc0NDIzVTdVN1lSNFlWVyJ9fQ.HiyUtlk8kectKHeQHtuqFcjFt0RbYZE_WAqPCcoWlV2IFVdXuOTzShYEMgDmtgvsFG_zxNQOj08Gr6a06ovwBA
resolver: MEMORY
resolver_preload: {
// Account "TESTSYS"
ADSPOYMHXJN6JVYQCLRZ5XQ5IUN6A3S33XA4NV4VH74423U7U7YR4YVW: eyJ0eXAiOiJqd3QiLCJhbGciOiJlZDI1NTE5In0.eyJqdGkiOiI2WEtYUFZNTjdEVFlBSUE0R1JDWUxXUElSM1ZEM1Q2UVk2RFg3NURHTVFVWkdVWTJSRFNRIiwiaWF0IjoxNTg5OTE2MzIzLCJpc3MiOiJPQVRUVkJYTElVTVRRT1FXVUEySU0zRkdUQlFRSEFHUEZaQTVET05NTlFSUlRQUjYzTERBTDM1WiIsIm5hbWUiOiJURVNUU1lTIiwic3ViIjoiQURTUE9ZTUhYSk42SlZZUUNMUlo1WFE1SVVONkEzUzMzWEE0TlY0Vkg3NDQyM1U3VTdZUjRZVlciLCJ0eXBlIjoiYWNjb3VudCIsIm5hdHMiOnsibGltaXRzIjp7InN1YnMiOi0xLCJjb25uIjotMSwibGVhZiI6LTEsImltcG9ydHMiOi0xLCJleHBvcnRzIjotMSwiZGF0YSI6LTEsInBheWxvYWQiOi0xLCJ3aWxkY2FyZHMiOnRydWV9fX0.vhtWanIrOncdNfg-yO-7L61ccc-yRacvVtEsaIgWBEmW4czlEPhsiF1MkUKG91rtgcbwUf73ZIFEfja5MgFBAQ
}
`
func TestReadOperatorJWTSystemAccountMatch(t *testing.T) {
confFileName := createConfFile(t, []byte(operatorJwtWithSysAccAndMemResolver+`
system_account: ADSPOYMHXJN6JVYQCLRZ5XQ5IUN6A3S33XA4NV4VH74423U7U7YR4YVW
`))
defer os.Remove(confFileName)
opts, err := ProcessConfigFile(confFileName)
if err != nil {
t.Fatalf("Received unexpected error %s", err)
}
s, err := NewServer(opts)
if err != nil {
t.Fatalf("Received unexpected error %s", err)
}
s.Shutdown()
}
func TestReadOperatorJWTSystemAccountMismatch(t *testing.T) {
confFileName := createConfFile(t, []byte(operatorJwtWithSysAccAndMemResolver+`
system_account: ADXJJCDCSRSMCOV25FXQW7R4QOG7R763TVEXBNWJHLBMBGWOJYG5XZBG
`))
defer os.Remove(confFileName)
opts, err := ProcessConfigFile(confFileName)
if err != nil {
t.Fatalf("Received unexpected error %s", err)
}
s, err := NewServer(opts)
if err == nil {
s.Shutdown()
t.Fatalf("Received no error")
} else if !strings.Contains(err.Error(), "system_account in config and operator JWT must be identical") {
t.Fatalf("Received unexpected error %s", err)
}
}
func TestReadOperatorAssertVersion(t *testing.T) {
kp, _ := nkeys.CreateOperator()
pk, _ := kp.PublicKey()
op := jwt.NewOperatorClaims(pk)
op.AssertServerVersion = "1.2.3"
jwt, err := op.Encode(kp)
if err != nil {
t.Fatalf("Received unexpected error %s", err)
}
confFileName := createConfFile(t, []byte(fmt.Sprintf(`
operator: %s
resolver: MEM
`, jwt)))
defer os.Remove(confFileName)
opts, err := ProcessConfigFile(confFileName)
if err != nil {
t.Fatalf("Received unexpected error %s", err)
}
s, err := NewServer(opts)
if err != nil {
t.Fatalf("Received unexpected error %s", err)
}
s.Shutdown()
}
func TestReadOperatorAssertVersionFail(t *testing.T) {
kp, _ := nkeys.CreateOperator()
pk, _ := kp.PublicKey()
op := jwt.NewOperatorClaims(pk)
op.AssertServerVersion = "10.20.30"
jwt, err := op.Encode(kp)
if err != nil {
t.Fatalf("Received unexpected error %s", err)
}
confFileName := createConfFile(t, []byte(fmt.Sprintf(`
operator: %s
resolver: MEM
`, jwt)))
defer os.Remove(confFileName)
opts, err := ProcessConfigFile(confFileName)
if err != nil {
t.Fatalf("Received unexpected error %s", err)
}
s, err := NewServer(opts)
if err == nil {
s.Shutdown()
t.Fatalf("Received no error")
} else if !strings.Contains(err.Error(), "expected major version 10 > server major version") {
t.Fatal("expected different error got: ", err)
}
}
func TestClusterNameAndGatewayNameConflict(t *testing.T) {
conf := createConfFile(t, []byte(`
listen: 127.0.0.1:-1
cluster {
name: A
listen: 127.0.0.1:-1
}
gateway {
name: B
listen: 127.0.0.1:-1
}
`))
defer os.Remove(conf)
opts, err := ProcessConfigFile(conf)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if err := validateOptions(opts); err != ErrClusterNameConfigConflict {
t.Fatalf("Expected ErrClusterNameConfigConflict got %v", err)
}
}
func TestDefaultAuthTimeout(t *testing.T) {
opts := DefaultOptions()
opts.AuthTimeout = 0
s := RunServer(opts)
defer s.Shutdown()
sopts := s.getOpts()
if at := time.Duration(sopts.AuthTimeout * float64(time.Second)); at != AUTH_TIMEOUT {
t.Fatalf("Expected auth timeout to be %v, got %v", AUTH_TIMEOUT, at)
}
s.Shutdown()
opts = DefaultOptions()
tc := &TLSConfigOpts{
CertFile: "../test/configs/certs/server-cert.pem",
KeyFile: "../test/configs/certs/server-key.pem",
CaFile: "../test/configs/certs/ca.pem",
Timeout: 4.0,
}
tlsConfig, err := GenTLSConfig(tc)
if err != nil {
t.Fatalf("Error generating tls config: %v", err)
}
opts.TLSConfig = tlsConfig
opts.TLSTimeout = tc.Timeout
s = RunServer(opts)
defer s.Shutdown()
sopts = s.getOpts()
if sopts.AuthTimeout != 5 {
t.Fatalf("Expected auth timeout to be %v, got %v", 5, sopts.AuthTimeout)
}
}
| [
"\"HOME\"",
"\"USERPROFILE\"",
"\"HOMEDRIVE\"",
"\"HOMEPATH\"",
"\"HOME\""
] | [] | [
"HOME",
"HOMEPATH",
"HOMEDRIVE",
"USERPROFILE"
] | [] | ["HOME", "HOMEPATH", "HOMEDRIVE", "USERPROFILE"] | go | 4 | 0 | |
manage.py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'cowrywise.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [] | [] | [] | [] | [] | python | 0 | 0 | |
tests/support/case.py | """
:codeauthor: Pedro Algarvio ([email protected])
====================================
Custom Salt TestCase Implementations
====================================
Custom reusable :class:`TestCase<python2:unittest.TestCase>`
implementations.
"""
import errno
import io
import json
import logging
import os
import re
import subprocess
import sys
import tempfile
import textwrap
import time
from datetime import datetime, timedelta
import pytest
import salt.utils.files
from saltfactories.utils.processes import terminate_process
from tests.support.cli_scripts import ScriptPathMixin
from tests.support.helpers import SKIP_IF_NOT_RUNNING_PYTEST, RedirectStdStreams
from tests.support.mixins import ( # pylint: disable=unused-import
AdaptedConfigurationTestCaseMixin,
SaltClientTestCaseMixin,
SaltMultimasterClientTestCaseMixin,
)
from tests.support.runtests import RUNTIME_VARS
from tests.support.unit import TestCase
STATE_FUNCTION_RUNNING_RE = re.compile(
r"""The function (?:"|')(?P<state_func>.*)(?:"|') is running as PID """
r"(?P<pid>[\d]+) and was started at (?P<date>.*) with jid (?P<jid>[\d]+)"
)
log = logging.getLogger(__name__)
class ShellCase(TestCase, AdaptedConfigurationTestCaseMixin, ScriptPathMixin):
"""
Execute a test for a shell command
"""
RUN_TIMEOUT = 30
def run_salt(
self,
arg_str,
with_retcode=False,
catch_stderr=False,
timeout=None,
popen_kwargs=None,
config_dir=None,
):
r'''
Run the ``salt`` CLI tool with the provided arguments
.. code-block:: python
class MatchTest(ShellCase):
def test_list(self):
"""
test salt -L matcher
"""
data = self.run_salt('-L minion test.ping')
data = '\n'.join(data)
self.assertIn('minion', data)
'''
if timeout is None:
timeout = self.RUN_TIMEOUT
arg_str = "-t {} {}".format(timeout, arg_str)
return self.run_script(
"salt",
arg_str,
with_retcode=with_retcode,
catch_stderr=catch_stderr,
timeout=timeout,
config_dir=config_dir,
)
def run_ssh(
self,
arg_str,
with_retcode=False,
catch_stderr=False,
timeout=None,
wipe=False,
raw=False,
roster_file=None,
ssh_opts="",
log_level="error",
config_dir=None,
**kwargs
):
"""
Execute salt-ssh
"""
if timeout is None:
timeout = self.RUN_TIMEOUT
if not roster_file:
roster_file = os.path.join(RUNTIME_VARS.TMP_CONF_DIR, "roster")
arg_str = (
"{wipe} {raw} -l {log_level} --ignore-host-keys --priv {client_key} --roster-file "
"{roster_file} {ssh_opts} localhost {arg_str} --out=json"
).format(
wipe=" -W" if wipe else "",
raw=" -r" if raw else "",
log_level=log_level,
client_key=os.path.join(RUNTIME_VARS.TMP_SSH_CONF_DIR, "client_key"),
roster_file=roster_file,
ssh_opts=ssh_opts,
arg_str=arg_str,
)
ret = self.run_script(
"salt-ssh",
arg_str,
with_retcode=with_retcode,
catch_stderr=catch_stderr,
raw=True,
timeout=timeout,
config_dir=config_dir,
**kwargs
)
log.debug("Result of run_ssh for command '%s %s': %s", arg_str, kwargs, ret)
return ret
def run_run(
self,
arg_str,
with_retcode=False,
catch_stderr=False,
asynchronous=False,
timeout=None,
config_dir=None,
**kwargs
):
"""
Execute salt-run
"""
if timeout is None:
timeout = self.RUN_TIMEOUT
asynchronous = kwargs.get("async", asynchronous)
arg_str = "{async_flag} -t {timeout} {}".format(
arg_str, timeout=timeout, async_flag=" --async" if asynchronous else "",
)
ret = self.run_script(
"salt-run",
arg_str,
with_retcode=with_retcode,
catch_stderr=catch_stderr,
timeout=timeout,
config_dir=config_dir,
)
log.debug("Result of run_run for command '%s': %s", arg_str, ret)
return ret
def run_run_plus(self, fun, *arg, **kwargs):
"""
Execute the runner function and return the return data and output in a dict
"""
output = kwargs.pop("_output", None)
opts_overrides = kwargs.pop("opts_overrides", None)
ret = {"fun": fun}
# Late import
import salt.config
import salt.output
import salt.runner
opts = salt.config.client_config(self.get_config_file_path("master"))
if opts_overrides:
opts.update(opts_overrides)
opts_arg = list(arg)
if kwargs:
opts_arg.append({"__kwarg__": True})
opts_arg[-1].update(kwargs)
opts.update({"doc": False, "fun": fun, "arg": opts_arg})
with RedirectStdStreams():
runner = salt.runner.Runner(opts)
ret["return"] = runner.run()
try:
ret["jid"] = runner.jid
except AttributeError:
ret["jid"] = None
# Compile output
# TODO: Support outputters other than nested
opts["color"] = False
opts["output_file"] = io.StringIO()
try:
salt.output.display_output(ret["return"], opts=opts, out=output)
out = opts["output_file"].getvalue()
if output is None:
out = out.splitlines()
elif output == "json":
out = json.loads(out)
ret["out"] = out
finally:
opts["output_file"].close()
log.debug(
"Result of run_run_plus for fun '%s' with arg '%s': %s", fun, opts_arg, ret
)
return ret
def run_key(self, arg_str, catch_stderr=False, with_retcode=False, config_dir=None):
"""
Execute salt-key
"""
return self.run_script(
"salt-key",
arg_str,
catch_stderr=catch_stderr,
with_retcode=with_retcode,
config_dir=config_dir,
)
def run_cp(
self,
arg_str,
with_retcode=False,
catch_stderr=False,
timeout=None,
config_dir=None,
):
"""
Execute salt-cp
"""
if timeout is None:
timeout = self.RUN_TIMEOUT
# Note: not logging result of run_cp because it will log a bunch of
# bytes which will not be very helpful.
return self.run_script(
"salt-cp",
arg_str,
with_retcode=with_retcode,
catch_stderr=catch_stderr,
timeout=timeout,
config_dir=config_dir,
)
def run_call(
self,
arg_str,
with_retcode=False,
catch_stderr=False,
local=False,
timeout=None,
config_dir=None,
):
if timeout is None:
timeout = self.RUN_TIMEOUT
if not config_dir:
config_dir = RUNTIME_VARS.TMP_MINION_CONF_DIR
arg_str = "{} {}".format("--local" if local else "", arg_str)
ret = self.run_script(
"salt-call",
arg_str,
with_retcode=with_retcode,
catch_stderr=catch_stderr,
timeout=timeout,
config_dir=config_dir,
)
log.debug("Result of run_call for command '%s': %s", arg_str, ret)
return ret
def run_function(
self,
function,
arg=(),
with_retcode=False,
catch_stderr=False,
local=False,
timeout=RUN_TIMEOUT,
**kwargs
):
"""
Execute function with salt-call.
This function is added for compatibility with ModuleCase. This makes it possible to use
decorators like @with_system_user.
"""
arg_str = "{} {} {}".format(
function,
" ".join(str(arg_) for arg_ in arg),
" ".join("{}={}".format(*item) for item in kwargs.items()),
)
return self.run_call(arg_str, with_retcode, catch_stderr, local, timeout)
def run_cloud(self, arg_str, catch_stderr=False, timeout=None, config_dir=None):
"""
Execute salt-cloud
"""
if timeout is None:
timeout = self.RUN_TIMEOUT
ret = self.run_script(
"salt-cloud", arg_str, catch_stderr, timeout=timeout, config_dir=config_dir
)
log.debug("Result of run_cloud for command '%s': %s", arg_str, ret)
return ret
def run_spm(
self,
arg_str,
with_retcode=False,
catch_stderr=False,
timeout=None,
config_dir=None,
):
"""
Execute spm
"""
if timeout is None:
timeout = self.RUN_TIMEOUT
ret = self.run_script(
"spm",
arg_str,
with_retcode=with_retcode,
catch_stderr=catch_stderr,
timeout=timeout,
config_dir=config_dir,
)
log.debug("Result of run_spm for command '%s': %s", arg_str, ret)
return ret
def run_script(
self,
script,
arg_str,
catch_stderr=False,
with_retcode=False,
catch_timeout=False,
# FIXME A timeout of zero or disabling timeouts may not return results!
timeout=15,
raw=False,
popen_kwargs=None,
log_output=None,
config_dir=None,
**kwargs
):
"""
Execute a script with the given argument string
The ``log_output`` argument is ternary, it can be True, False, or None.
If the value is boolean, then it forces the results to either be logged
or not logged. If it is None, then the return code of the subprocess
determines whether or not to log results.
"""
import salt.utils.platform
script_path = self.get_script_path(script)
if not os.path.isfile(script_path):
return False
popen_kwargs = popen_kwargs or {}
if salt.utils.platform.is_windows():
cmd = "python "
if "cwd" not in popen_kwargs:
popen_kwargs["cwd"] = os.getcwd()
if "env" not in popen_kwargs:
popen_kwargs["env"] = os.environ.copy()
popen_kwargs["env"]["PYTHONPATH"] = RUNTIME_VARS.CODE_DIR
else:
cmd = "PYTHONPATH="
python_path = os.environ.get("PYTHONPATH", None)
if python_path is not None:
cmd += "{}:".format(python_path)
if sys.version_info[0] < 3:
cmd += "{} ".format(":".join(sys.path[1:]))
else:
cmd += "{} ".format(":".join(sys.path[0:]))
cmd += "python{}.{} ".format(*sys.version_info)
cmd += "{} --config-dir={} {} ".format(
script_path, config_dir or RUNTIME_VARS.TMP_CONF_DIR, arg_str
)
if kwargs:
# late import
import salt.utils.json
for key, value in kwargs.items():
cmd += "'{}={} '".format(key, salt.utils.json.dumps(value))
tmp_file = tempfile.SpooledTemporaryFile()
popen_kwargs = dict(
{"shell": True, "stdout": tmp_file, "universal_newlines": True},
**popen_kwargs
)
if catch_stderr is True:
popen_kwargs["stderr"] = subprocess.PIPE
if not sys.platform.lower().startswith("win"):
popen_kwargs["close_fds"] = True
def detach_from_parent_group():
# detach from parent group (no more inherited signals!)
os.setpgrp()
popen_kwargs["preexec_fn"] = detach_from_parent_group
def format_return(retcode, stdout, stderr=None, timed_out=False):
"""
DRY helper to log script result if it failed, and then return the
desired output based on whether or not stderr was desired, and
wither or not a retcode was desired.
"""
log_func = log.debug
if timed_out:
log.error(
"run_script timed out after %d seconds (process killed)", timeout
)
log_func = log.error
if log_output is True or timed_out or (log_output is None and retcode != 0):
log_func(
"run_script results for: %s %s\n"
"return code: %s\n"
"stdout:\n"
"%s\n\n"
"stderr:\n"
"%s",
script,
arg_str,
retcode,
stdout,
stderr,
)
stdout = stdout or ""
stderr = stderr or ""
if not raw:
stdout = stdout.splitlines()
stderr = stderr.splitlines()
ret = [stdout]
if catch_stderr:
ret.append(stderr)
if with_retcode:
ret.append(retcode)
if catch_timeout:
ret.append(timed_out)
return ret[0] if len(ret) == 1 else tuple(ret)
log.debug("Running Popen(%r, %r)", cmd, popen_kwargs)
process = subprocess.Popen(cmd, **popen_kwargs)
if timeout is not None:
stop_at = datetime.now() + timedelta(seconds=timeout)
term_sent = False
while True:
process.poll()
time.sleep(0.1)
if datetime.now() <= stop_at:
# We haven't reached the timeout yet
if process.returncode is not None:
break
else:
terminate_process(process.pid, kill_children=True)
return format_return(
process.returncode, *process.communicate(), timed_out=True
)
tmp_file.seek(0)
try:
out = tmp_file.read().decode(__salt_system_encoding__)
except (NameError, UnicodeDecodeError):
# Let's cross our fingers and hope for the best
out = tmp_file.read().decode("utf-8")
if catch_stderr:
if sys.version_info < (2, 7):
# On python 2.6, the subprocess'es communicate() method uses
# select which, is limited by the OS to 1024 file descriptors
# We need more available descriptors to run the tests which
# need the stderr output.
# So instead of .communicate() we wait for the process to
# finish, but, as the python docs state "This will deadlock
# when using stdout=PIPE and/or stderr=PIPE and the child
# process generates enough output to a pipe such that it
# blocks waiting for the OS pipe buffer to accept more data.
# Use communicate() to avoid that." <- a catch, catch situation
#
# Use this work around were it's needed only, python 2.6
process.wait()
err = process.stderr.read()
else:
_, err = process.communicate()
# Force closing stderr/stdout to release file descriptors
if process.stdout is not None:
process.stdout.close()
if process.stderr is not None:
process.stderr.close()
# pylint: disable=maybe-no-member
try:
return format_return(process.returncode, out, err or "")
finally:
try:
if os.path.exists(tmp_file.name):
if isinstance(tmp_file.name, str):
# tmp_file.name is an int when using SpooledTemporaryFiles
# int types cannot be used with os.remove() in Python 3
os.remove(tmp_file.name)
else:
# Clean up file handles
tmp_file.close()
process.terminate()
except OSError as err:
# process already terminated
pass
# pylint: enable=maybe-no-member
# TODO Remove this?
process.communicate()
if process.stdout is not None:
process.stdout.close()
try:
return format_return(process.returncode, out)
finally:
try:
if os.path.exists(tmp_file.name):
if isinstance(tmp_file.name, str):
# tmp_file.name is an int when using SpooledTemporaryFiles
# int types cannot be used with os.remove() in Python 3
os.remove(tmp_file.name)
else:
# Clean up file handles
tmp_file.close()
process.terminate()
except OSError as err:
# process already terminated
pass
class MultiMasterTestShellCase(ShellCase):
"""
'''
Execute a test for a shell command when running multi-master tests
"""
@property
def config_dir(self):
return RUNTIME_VARS.TMP_MM_CONF_DIR
class SPMTestUserInterface:
"""
Test user interface to SPMClient
"""
def __init__(self):
self._status = []
self._confirm = []
self._error = []
def status(self, msg):
self._status.append(msg)
def confirm(self, action):
self._confirm.append(action)
def error(self, msg):
self._error.append(msg)
class SPMCase(TestCase, AdaptedConfigurationTestCaseMixin):
"""
Class for handling spm commands
"""
def _spm_build_files(self, config):
self.formula_dir = os.path.join(
" ".join(config["file_roots"]["base"]), "formulas"
)
self.formula_sls_dir = os.path.join(self.formula_dir, "apache")
self.formula_sls = os.path.join(self.formula_sls_dir, "apache.sls")
self.formula_file = os.path.join(self.formula_dir, "FORMULA")
dirs = [self.formula_dir, self.formula_sls_dir]
for f_dir in dirs:
os.makedirs(f_dir)
with salt.utils.files.fopen(self.formula_sls, "w") as fp:
fp.write(
textwrap.dedent(
"""\
install-apache:
pkg.installed:
- name: apache2
"""
)
)
with salt.utils.files.fopen(self.formula_file, "w") as fp:
fp.write(
textwrap.dedent(
"""\
name: apache
os: RedHat, Debian, Ubuntu, Suse, FreeBSD
os_family: RedHat, Debian, Suse, FreeBSD
version: 201506
release: 2
summary: Formula for installing Apache
description: Formula for installing Apache
"""
)
)
def _spm_config(self, assume_yes=True):
self._tmp_spm = tempfile.mkdtemp()
config = self.get_temp_config(
"minion",
**{
"spm_logfile": os.path.join(self._tmp_spm, "log"),
"spm_repos_config": os.path.join(self._tmp_spm, "etc", "spm.repos"),
"spm_cache_dir": os.path.join(self._tmp_spm, "cache"),
"spm_build_dir": os.path.join(self._tmp_spm, "build"),
"spm_build_exclude": ["apache/.git"],
"spm_db_provider": "sqlite3",
"spm_files_provider": "local",
"spm_db": os.path.join(self._tmp_spm, "packages.db"),
"extension_modules": os.path.join(self._tmp_spm, "modules"),
"file_roots": {"base": [self._tmp_spm]},
"formula_path": os.path.join(self._tmp_spm, "salt"),
"pillar_path": os.path.join(self._tmp_spm, "pillar"),
"reactor_path": os.path.join(self._tmp_spm, "reactor"),
"assume_yes": True if assume_yes else False,
"force": False,
"verbose": False,
"cache": "localfs",
"cachedir": os.path.join(self._tmp_spm, "cache"),
"spm_repo_dups": "ignore",
"spm_share_dir": os.path.join(self._tmp_spm, "share"),
}
)
import salt.utils.yaml
if not os.path.isdir(config["formula_path"]):
os.makedirs(config["formula_path"])
with salt.utils.files.fopen(os.path.join(self._tmp_spm, "spm"), "w") as fp:
salt.utils.yaml.safe_dump(config, fp)
return config
def _spm_create_update_repo(self, config):
build_spm = self.run_spm("build", self.config, self.formula_dir)
c_repo = self.run_spm("create_repo", self.config, self.config["spm_build_dir"])
repo_conf_dir = self.config["spm_repos_config"] + ".d"
os.makedirs(repo_conf_dir)
with salt.utils.files.fopen(os.path.join(repo_conf_dir, "spm.repo"), "w") as fp:
fp.write(
textwrap.dedent(
"""\
local_repo:
url: file://{}
""".format(
self.config["spm_build_dir"]
)
)
)
u_repo = self.run_spm("update_repo", self.config)
def _spm_client(self, config):
import salt.spm
self.ui = SPMTestUserInterface()
client = salt.spm.SPMClient(self.ui, config)
return client
def run_spm(self, cmd, config, arg=None):
client = self._spm_client(config)
client.run([cmd, arg])
client._close()
return self.ui._status
class ModuleCase(TestCase, SaltClientTestCaseMixin):
"""
Execute a module function
"""
def wait_for_all_jobs(self, minions=("minion", "sub_minion"), sleep=0.3):
"""
Wait for all jobs currently running on the list of minions to finish
"""
for minion in minions:
while True:
ret = self.run_function(
"saltutil.running", minion_tgt=minion, timeout=300
)
if ret:
log.debug("Waiting for minion's jobs: %s", minion)
time.sleep(sleep)
else:
break
def minion_run(self, _function, *args, **kw):
"""
Run a single salt function on the 'minion' target and condition
the return down to match the behavior of the raw function call
"""
return self.run_function(_function, args, **kw)
def run_function(
self,
function,
arg=(),
minion_tgt="minion",
timeout=300,
master_tgt=None,
**kwargs
):
"""
Run a single salt function and condition the return down to match the
behavior of the raw function call
"""
known_to_return_none = (
"data.get",
"file.chown",
"file.chgrp",
"pkg.refresh_db",
"ssh.recv_known_host_entries",
"time.sleep",
"grains.delkey",
"grains.delval",
)
if "f_arg" in kwargs:
kwargs["arg"] = kwargs.pop("f_arg")
if "f_timeout" in kwargs:
kwargs["timeout"] = kwargs.pop("f_timeout")
client = self.client if master_tgt is None else self.clients[master_tgt]
log.debug(
"Running client.cmd(minion_tgt=%r, function=%r, arg=%r, timeout=%r, kwarg=%r)",
minion_tgt,
function,
arg,
timeout,
kwargs,
)
orig = client.cmd(minion_tgt, function, arg, timeout=timeout, kwarg=kwargs)
if RUNTIME_VARS.PYTEST_SESSION:
fail_or_skip_func = self.fail
else:
fail_or_skip_func = self.skipTest
if minion_tgt not in orig:
fail_or_skip_func(
"WARNING(SHOULD NOT HAPPEN #1935): Failed to get a reply "
"from the minion '{}'. Command output: {}".format(minion_tgt, orig)
)
elif orig[minion_tgt] is None and function not in known_to_return_none:
fail_or_skip_func(
"WARNING(SHOULD NOT HAPPEN #1935): Failed to get '{}' from "
"the minion '{}'. Command output: {}".format(function, minion_tgt, orig)
)
# Try to match stalled state functions
orig[minion_tgt] = self._check_state_return(orig[minion_tgt])
return orig[minion_tgt]
def run_state(self, function, **kwargs):
"""
Run the state.single command and return the state return structure
"""
ret = self.run_function("state.single", [function], **kwargs)
return self._check_state_return(ret)
def _check_state_return(self, ret):
if isinstance(ret, dict):
# This is the supposed return format for state calls
return ret
if isinstance(ret, list):
jids = []
# These are usually errors
for item in ret[:]:
if not isinstance(item, str):
# We don't know how to handle this
continue
match = STATE_FUNCTION_RUNNING_RE.match(item)
if not match:
# We don't know how to handle this
continue
jid = match.group("jid")
if jid in jids:
continue
jids.append(jid)
job_data = self.run_function("saltutil.find_job", [jid])
job_kill = self.run_function("saltutil.kill_job", [jid])
msg = (
"A running state.single was found causing a state lock. "
"Job details: '{}' Killing Job Returned: '{}'".format(
job_data, job_kill
)
)
ret.append(
"[TEST SUITE ENFORCED]{}" "[/TEST SUITE ENFORCED]".format(msg)
)
return ret
class MultimasterModuleCase(ModuleCase, SaltMultimasterClientTestCaseMixin):
"""
Execute a module function
"""
def run_function(
self,
function,
arg=(),
minion_tgt="mm-minion",
timeout=300,
master_tgt="mm-master",
**kwargs
):
"""
Run a single salt function and condition the return down to match the
behavior of the raw function call
"""
known_to_return_none = (
"data.get",
"file.chown",
"file.chgrp",
"pkg.refresh_db",
"ssh.recv_known_host_entries",
"time.sleep",
)
if minion_tgt == "mm-sub-minion":
known_to_return_none += ("mine.update",)
if "f_arg" in kwargs:
kwargs["arg"] = kwargs.pop("f_arg")
if "f_timeout" in kwargs:
kwargs["timeout"] = kwargs.pop("f_timeout")
if master_tgt is None:
client = self.clients["mm-master"]
elif isinstance(master_tgt, int):
client = self.clients[list(self.clients)[master_tgt]]
else:
client = self.clients[master_tgt]
orig = client.cmd(minion_tgt, function, arg, timeout=timeout, kwarg=kwargs)
if RUNTIME_VARS.PYTEST_SESSION:
fail_or_skip_func = self.fail
else:
fail_or_skip_func = self.skipTest
if minion_tgt not in orig:
fail_or_skip_func(
"WARNING(SHOULD NOT HAPPEN #1935): Failed to get a reply "
"from the minion '{}'. Command output: {}".format(minion_tgt, orig)
)
elif orig[minion_tgt] is None and function not in known_to_return_none:
fail_or_skip_func(
"WARNING(SHOULD NOT HAPPEN #1935): Failed to get '{}' from "
"the minion '{}'. Command output: {}".format(function, minion_tgt, orig)
)
# Try to match stalled state functions
orig[minion_tgt] = self._check_state_return(orig[minion_tgt])
return orig[minion_tgt]
def run_function_all_masters(
self, function, arg=(), minion_tgt="mm-minion", timeout=300, **kwargs
):
"""
Run a single salt function from all the masters in multimaster environment
and condition the return down to match the behavior of the raw function call
"""
ret = []
for master_id in self.clients:
ret.append(
self.run_function(
function,
arg=arg,
minion_tgt=minion_tgt,
timeout=timeout,
master_tgt=master_id,
**kwargs
)
)
return ret
class SyndicCase(TestCase, SaltClientTestCaseMixin):
"""
Execute a syndic based execution test
"""
_salt_client_config_file_name_ = "syndic_master"
def run_function(self, function, arg=(), timeout=90):
"""
Run a single salt function and condition the return down to match the
behavior of the raw function call
"""
orig = self.client.cmd("minion", function, arg, timeout=timeout)
if RUNTIME_VARS.PYTEST_SESSION:
fail_or_skip_func = self.fail
else:
fail_or_skip_func = self.skipTest
if "minion" not in orig:
fail_or_skip_func(
"WARNING(SHOULD NOT HAPPEN #1935): Failed to get a reply "
"from the minion. Command output: {}".format(orig)
)
return orig["minion"]
@SKIP_IF_NOT_RUNNING_PYTEST
@pytest.mark.usefixtures("salt_ssh_cli")
@pytest.mark.requires_sshd_server
class SSHCase(ShellCase):
"""
Execute a command via salt-ssh
"""
def _arg_str(self, function, arg):
return "{} {}".format(function, " ".join(arg))
# pylint: disable=arguments-differ
def run_function(
self, function, arg=(), timeout=180, wipe=True, raw=False, **kwargs
):
"""
We use a 180s timeout here, which some slower systems do end up needing
"""
ret = self.run_ssh(
self._arg_str(function, arg), timeout=timeout, wipe=wipe, raw=raw, **kwargs
)
log.debug(
"SSHCase run_function executed %s with arg %s and kwargs %s",
function,
arg,
kwargs,
)
log.debug("SSHCase JSON return: %s", ret)
# Late import
import salt.utils.json
try:
return salt.utils.json.loads(ret)["localhost"]
except Exception: # pylint: disable=broad-except
return ret
# pylint: enable=arguments-differ
def custom_roster(self, new_roster, data):
"""
helper method to create a custom roster to use for a ssh test
"""
roster = os.path.join(RUNTIME_VARS.TMP_CONF_DIR, "roster")
with salt.utils.files.fopen(roster, "r") as fp_:
conf = salt.utils.yaml.safe_load(fp_)
conf["localhost"].update(data)
with salt.utils.files.fopen(new_roster, "w") as fp_:
salt.utils.yaml.safe_dump(conf, fp_)
class ClientCase(AdaptedConfigurationTestCaseMixin, TestCase):
"""
A base class containing relevant options for starting the various Salt
Python API entrypoints
"""
def get_opts(self):
# Late import
import salt.config
return salt.config.client_config(self.get_config_file_path("master"))
def mkdir_p(self, path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
| [] | [] | [
"PYTHONPATH"
] | [] | ["PYTHONPATH"] | python | 1 | 0 | |
waiting/src/lib/mine/cert/bin/helper/Python/my_configuration.py | import os
def get_cluster_user_pass():
return os.environ["CB_Cluster_PassSuper"]
def get_cluster_user_name():
return os.environ["CB_Cluster_UserSuper"]
def get_directory_answer():
return os.environ["CB_All_DirAnswer"]
def get_directory_data():
return os.environ["CB_Cluster_DirData"]
def get_directory_dataset():
return os.environ["CB_Client_DirDataset"]
def get_directory_index():
return os.environ["CB_Cluster_DirIndex"]
def get_directory_output():
return os.environ["CB_Client_DirOutput"]
def get_directory_package():
return os.environ["CB_Client_DirPackage"]
def get_exam_id():
return os.environ["CB_ExamId"]
def get_file_package_server():
return os.environ["CB_PackageServer"]
def get_file_ssh_key():
return os.environ["CB_All_FileSshKey"]
def get_group_a_name():
return os.environ["CB_GroupA_Name"]
def get_group_b_name():
return os.environ["CB_GroupB_Name"]
def get_host_client():
return os.environ["CB_HostClient"]
def get_hosts_server():
return os.environ["CB_HostsServer"]
def get_machine_username():
return os.environ["CB_All_UserExaminee"]
def get_port_web_ui():
return int(os.environ["CB_Cluster_PortWebUi"])
def get_project_name():
return os.environ["BO_ProjectName"]
def get_project_version():
return os.environ["CB_Version"]
def get_session_id():
return os.environ["CB_SessionId"]
def get_sleep_in_seconds():
return int(os.environ["CB_SleepSeconds"])
def get_task_count_init():
return int(os.environ["CB_ExamTasksInit"])
def get_task_count_main():
return int(os.environ["CB_ExamTasksMain"])
""" Disabled content
"""
| [] | [] | [
"CB_Cluster_PassSuper",
"CB_Client_DirPackage",
"CB_Client_DirDataset",
"CB_Cluster_DirIndex",
"CB_Cluster_PortWebUi",
"CB_SleepSeconds",
"CB_Cluster_UserSuper",
"CB_Client_DirOutput",
"CB_ExamTasksMain",
"CB_GroupA_Name",
"CB_Cluster_DirData",
"CB_ExamTasksInit",
"BO_ProjectName",
"CB_All_UserExaminee",
"CB_All_DirAnswer",
"CB_GroupB_Name",
"CB_SessionId",
"CB_All_FileSshKey",
"CB_HostsServer",
"CB_ExamId",
"CB_PackageServer",
"CB_HostClient",
"CB_Version"
] | [] | ["CB_Cluster_PassSuper", "CB_Client_DirPackage", "CB_Client_DirDataset", "CB_Cluster_DirIndex", "CB_Cluster_PortWebUi", "CB_SleepSeconds", "CB_Cluster_UserSuper", "CB_Client_DirOutput", "CB_ExamTasksMain", "CB_GroupA_Name", "CB_Cluster_DirData", "CB_ExamTasksInit", "BO_ProjectName", "CB_All_UserExaminee", "CB_All_DirAnswer", "CB_GroupB_Name", "CB_SessionId", "CB_All_FileSshKey", "CB_HostsServer", "CB_ExamId", "CB_PackageServer", "CB_HostClient", "CB_Version"] | python | 23 | 0 | |
pkg/service/config.go | package service
import (
"github.com/spacetab-io/solar-staff-sdk-go/pkg/contracts"
"github.com/spacetab-io/solar-staff-sdk-go/pkg/models"
)
func NewService(r Repository) Service {
return &service{r}
}
type service struct {
r Repository
}
type Service interface {
Balance() (*models.Balance, error)
TaskByID(taskID uint64) (*models.Task, error)
TaskByCustomerTaskID(customerTaskID string) (*models.Task, error)
WorkerList() ([]models.Worker, error)
PaymentOnTask(task contracts.PaymentRequest) (*models.Payment, error)
}
type Repository interface {
//Info
GetBalance() (*models.Balance, error)
//Task
GetTaskByID(taskID uint64) (*models.Task, error)
GetTaskByMerchantTransaction(customerTaskID string) (*models.Task, error)
//Worker
GetWorkerList() ([]models.Worker, error)
//Payment
GetPaymentOnTask(task contracts.PaymentRequest) (*models.Payment, error)
}
| [] | [] | [] | [] | [] | go | null | null | null |
test/e2e/cli_test.go | // +build e2e
package e2e
import (
"io/ioutil"
"os"
"path/filepath"
"regexp"
"strings"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/suite"
corev1 "k8s.io/apimachinery/pkg/apis/meta/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
wfv1 "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1"
"github.com/argoproj/argo/test/e2e/fixtures"
)
type CLISuite struct {
fixtures.E2ESuite
}
func (s *CLISuite) BeforeTest(suiteName, testName string) {
s.E2ESuite.BeforeTest(suiteName, testName)
_ = os.Unsetenv("ARGO_SERVER")
_ = os.Unsetenv("ARGO_TOKEN")
}
func (s *CLISuite) testNeedsOffloading() {
serverUnavailable := os.Getenv("ARGO_SERVER") == ""
if s.Persistence.IsEnabled() && serverUnavailable {
if !serverUnavailable {
s.T().Skip("test needs offloading, but the Argo Server is unavailable - if `testNeedsOffloading()` is the first line of your test test, you should move your test to `CliWithServerSuite`?")
}
s.T().Skip("test needs offloading, but offloading not enabled")
}
}
func (s *CLISuite) TestCompletion() {
s.Given().RunCli([]string{"completion", "bash"}, func(t *testing.T, output string, err error) {
assert.NoError(t, err)
assert.Contains(t, output, "bash completion for argo")
})
}
func (s *CLISuite) TestVersion() {
// check we can run this without error
s.Given().
RunCli([]string{"version"}, func(t *testing.T, output string, err error) {
assert.NoError(t, err)
})
}
func (s *CLISuite) TestSubmitDryRun() {
s.Given().
RunCli([]string{"submit", "smoke/basic.yaml", "--dry-run", "-o", "yaml"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, "generateName: basic")
// dry-run should never get a UID
assert.NotContains(t, output, "uid:")
}
})
}
func (s *CLISuite) TestSubmitServerDryRun() {
s.Given().
RunCli([]string{"submit", "smoke/basic.yaml", "--server-dry-run", "-o", "yaml"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, "name: basic")
// server-dry-run should get a UID
assert.Contains(t, output, "uid:")
}
})
}
func (s *CLISuite) TestTokenArg() {
if os.Getenv("CI") != "true" {
s.T().SkipNow()
}
s.Run("ListWithBadToken", func() {
s.Given().RunCli([]string{"list", "--user", "fake_token_user", "--token", "badtoken"}, func(t *testing.T, output string, err error) {
assert.Error(t, err)
})
})
var goodToken string
s.Run("GetSAToken", func() {
token, err := s.GetServiceAccountToken()
assert.NoError(s.T(), err)
goodToken = token
})
s.Run("ListWithGoodToken", func() {
s.Given().RunCli([]string{"list", "--user", "fake_token_user", "--token", goodToken}, func(t *testing.T, output string, err error) {
assert.NoError(t, err)
assert.Contains(t, output, "NAME")
assert.Contains(t, output, "STATUS")
})
})
}
func (s *CLISuite) TestLogs() {
var name string
s.Given().
Workflow(`@smoke/basic.yaml`).
When().
SubmitWorkflow().
WaitForWorkflow(fixtures.ToStart).
WaitForWorkflow(fixtures.Condition(func(wf *wfv1.Workflow) bool {
return wf.Status.Nodes.FindByDisplayName(wf.Name) != nil
}), "pod running", 10*time.Second).
Then().
ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) {
name = metadata.Name
})
s.Run("FollowWorkflowLogs", func() {
s.Given().
RunCli([]string{"logs", name, "--follow"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, ":) Hello Argo!")
}
})
})
s.Run("FollowPodLogs", func() {
s.Given().
RunCli([]string{"logs", name, name, "--follow"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, ":) Hello Argo!")
}
})
})
s.Run("ContainerLogs", func() {
s.Given().
RunCli([]string{"logs", name, name, "-c", "wait"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, "Executor")
}
})
})
s.Run("Since", func() {
s.Given().
RunCli([]string{"logs", name, "--since=1s"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.NotContains(t, output, ":) Hello Argo!")
}
})
})
s.Run("SinceTime", func() {
s.Given().
RunCli([]string{"logs", name, "--since-time=" + time.Now().Format(time.RFC3339)}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.NotContains(t, output, ":) Hello Argo!")
}
})
})
s.Run("TailLines", func() {
s.Given().
RunCli([]string{"logs", name, "--tail=0"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.NotContains(t, output, ":) Hello Argo!")
}
})
})
s.Run("CompletedWorkflow", func() {
s.Given().
When().
WaitForWorkflow().
Then().
RunCli([]string{"logs", name, "--tail=10"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, ":) Hello Argo!")
}
})
})
}
// this test probably should be in the ArgoServerSuite, but it's just much easier to write the test
// for the CLI
func (s *CLISuite) TestLogProblems() {
s.Given().
Workflow(`@testdata/log-problems.yaml`).
When().
SubmitWorkflow().
WaitForWorkflow(fixtures.ToStart).
Then().
// logs should come in order
RunCli([]string{"logs", "log-problems", "--follow"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
lines := strings.Split(output, "\n")
if assert.Len(t, lines, 6) {
assert.Contains(t, lines[0], "one")
assert.Contains(t, lines[1], "two")
assert.Contains(t, lines[2], "three")
assert.Contains(t, lines[3], "four")
assert.Contains(t, lines[4], "five")
}
}
}).
When().
// Next check that all log entries and received and in the correct order.
WaitForWorkflow().
Then().
RunCli([]string{"logs", "log-problems"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
lines := strings.Split(output, "\n")
if assert.Len(t, lines, 6) {
assert.Contains(t, lines[0], "one")
assert.Contains(t, lines[1], "two")
assert.Contains(t, lines[2], "three")
assert.Contains(t, lines[3], "four")
assert.Contains(t, lines[4], "five")
}
}
})
}
func (s *CLISuite) TestRoot() {
s.Run("Submit", func() {
s.Given().RunCli([]string{"submit", "testdata/basic-workflow.yaml"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, "Name:")
assert.Contains(t, output, "Namespace:")
assert.Contains(t, output, "ServiceAccount:")
assert.Contains(t, output, "Status:")
assert.Contains(t, output, "Created:")
}
})
})
s.Run("List", func() {
s.testNeedsOffloading()
for i := 0; i < 3; i++ {
s.Given().
Workflow("@smoke/basic-generate-name.yaml").
When().
SubmitWorkflow().
WaitForWorkflow()
}
s.Given().RunCli([]string{"list", "--chunk-size", "1"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, "NAME")
assert.Contains(t, output, "STATUS")
assert.Contains(t, output, "AGE")
assert.Contains(t, output, "DURATION")
assert.Contains(t, output, "PRIORITY")
}
})
})
s.Run("Get", func() {
s.testNeedsOffloading()
s.Given().RunCli([]string{"get", "basic"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, "Name:")
assert.Contains(t, output, "Namespace:")
assert.Contains(t, output, "ServiceAccount:")
assert.Contains(t, output, "Status:")
assert.Contains(t, output, "Created:")
}
})
})
var createdWorkflowName string
s.Run("From", func() {
s.Given().CronWorkflow("@cron/basic.yaml").
When().
CreateCronWorkflow().
RunCli([]string{"submit", "--from", "cronwf/test-cron-wf-basic", "-l", "argo-e2e=true"}, func(t *testing.T, output string, err error) {
assert.NoError(t, err)
assert.Contains(t, output, "Name: test-cron-wf-basic-")
r := regexp.MustCompile(`Name:\s+?(test-cron-wf-basic-[a-z0-9]+)`)
res := r.FindStringSubmatch(output)
if len(res) != 2 {
assert.Fail(t, "Internal test error, please report a bug")
}
createdWorkflowName = res[1]
}).
WaitForWorkflow(createdWorkflowName).
Then().
ExpectWorkflowName(createdWorkflowName, func(t *testing.T, metadata *corev1.ObjectMeta, status *wfv1.WorkflowStatus) {
assert.Equal(t, wfv1.NodeSucceeded, status.Phase)
})
})
}
func (s *CLIWithServerSuite) TestWorkflowSuspendResume() {
s.testNeedsOffloading()
s.Given().
Workflow("@testdata/sleep-3s.yaml").
When().
SubmitWorkflow().
WaitForWorkflow(fixtures.ToStart).
RunCli([]string{"suspend", "sleep-3s"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, "workflow sleep-3s suspended")
}
}).
RunCli([]string{"resume", "sleep-3s"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, "workflow sleep-3s resumed")
}
}).
WaitForWorkflow().
Then().
ExpectWorkflow(func(t *testing.T, _ *corev1.ObjectMeta, status *wfv1.WorkflowStatus) {
assert.Equal(t, wfv1.NodeSucceeded, status.Phase)
})
}
func (s *CLIWithServerSuite) TestNodeSuspendResume() {
s.testNeedsOffloading()
s.Given().
Workflow("@testdata/node-suspend.yaml").
When().
SubmitWorkflow().
WaitForWorkflow(fixtures.Condition(func(wf *wfv1.Workflow) bool {
return wf.Status.AnyActiveSuspendNode()
}), "suspended node").
RunCli([]string{"resume", "node-suspend", "--node-field-selector", "inputs.parameters.tag.value=suspend1-tag1"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, "workflow node-suspend resumed")
}
}).
WaitForWorkflow(fixtures.Condition(func(wf *wfv1.Workflow) bool {
return wf.Status.AnyActiveSuspendNode()
}), "suspended node").
RunCli([]string{"stop", "node-suspend", "--node-field-selector", "inputs.parameters.tag.value=suspend2-tag1", "--message", "because"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, "workflow node-suspend stopped")
}
}).
WaitForWorkflow(fixtures.Condition(func(wf *wfv1.Workflow) bool {
return wf.Status.Phase == wfv1.NodeFailed
}), "suspended node").
Then().
ExpectWorkflow(func(t *testing.T, _ *corev1.ObjectMeta, status *wfv1.WorkflowStatus) {
if assert.Equal(t, wfv1.NodeFailed, status.Phase) {
r := regexp.MustCompile(`child '(node-suspend-[0-9]+)' failed`)
res := r.FindStringSubmatch(status.Message)
assert.Equal(t, len(res), 2)
assert.Equal(t, status.Nodes[res[1]].Message, "because")
}
})
}
func (s *CLISuite) TestWorkflowDeleteByName() {
var name string
s.Given().
Workflow("@smoke/basic.yaml").
When().
SubmitWorkflow().
Then().
ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) {
name = metadata.Name
}).
RunCli([]string{"delete", name}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Regexp(t, "Workflow 'basic-.*' deleted", output)
}
})
}
func (s *CLISuite) TestWorkflowDeleteDryRun() {
s.Given().
When().
RunCli([]string{"delete", "--dry-run", "basic"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, "Workflow 'basic' deleted (dry-run)")
}
})
}
func (s *CLISuite) TestWorkflowDeleteNothing() {
s.Given().
Workflow("@smoke/basic.yaml").
When().
SubmitWorkflow().
RunCli([]string{"delete"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.NotContains(t, output, "deleted")
}
})
}
func (s *CLISuite) TestWorkflowDeleteNotFound() {
s.Given().
When().
RunCli([]string{"delete", "not-found"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, "Workflow 'not-found' not found")
}
})
}
func (s *CLISuite) TestWorkflowDeleteAll() {
s.Given().
Workflow("@smoke/basic.yaml").
When().
SubmitWorkflow().
Given().
RunCli([]string{"delete", "--all", "-l", "argo-e2e"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Regexp(t, "Workflow 'basic-.*' deleted", output)
}
})
}
func (s *CLISuite) TestWorkflowDeleteCompleted() {
s.Given().
Workflow("@testdata/sleep-3s.yaml").
When().
SubmitWorkflow().
Given().
RunCli([]string{"delete", "--completed", "-l", "argo-e2e"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
// nothing should be deleted yet
assert.NotContains(t, output, "deleted")
}
}).
When().
WaitForWorkflow().
Given().
RunCli([]string{"delete", "--completed", "-l", "argo-e2e"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, "deleted")
}
})
}
func (s *CLISuite) TestWorkflowDeleteResubmitted() {
s.Given().
Workflow("@testdata/exit-1.yaml").
When().
SubmitWorkflow().
WaitForWorkflow().
Given().
RunCli([]string{"resubmit", "--memoized", "exit-1"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, "Name:")
assert.Contains(t, output, "Namespace:")
assert.Contains(t, output, "ServiceAccount:")
assert.Contains(t, output, "Status:")
assert.Contains(t, output, "Created:")
}
}).
When().
Given().
RunCli([]string{"delete", "--resubmitted", "-l", "argo-e2e"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, "deleted")
}
})
}
func (s *CLISuite) TestWorkflowDeleteOlder() {
s.Given().
Workflow("@smoke/basic.yaml").
When().
SubmitWorkflow().
WaitForWorkflow().
Given().
RunCli([]string{"delete", "--older", "1d", "-l", "argo-e2e"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
// nothing over a day should be deleted
assert.NotContains(t, output, "deleted")
}
}).
RunCli([]string{"delete", "--older", "0s", "-l", "argo-e2e"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, "deleted")
}
})
}
func (s *CLISuite) TestWorkflowDeleteByPrefix() {
s.Given().
Workflow("@smoke/basic.yaml").
When().
SubmitWorkflow().
Given().
RunCli([]string{"delete", "--prefix", "missing-prefix", "-l", "argo-e2e"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
// nothing should be deleted
assert.NotContains(t, output, "deleted")
}
}).
RunCli([]string{"delete", "--prefix", "basic", "-l", "argo-e2e"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, "deleted")
}
})
}
func (s *CLISuite) TestWorkflowLint() {
s.Run("LintFile", func() {
s.Given().RunCli([]string{"lint", "smoke/basic.yaml"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, "smoke/basic.yaml is valid")
}
})
})
s.Run("LintFileEmptyParamDAG", func() {
s.Given().RunCli([]string{"lint", "expectedfailures/empty-parameter-dag.yaml"}, func(t *testing.T, output string, err error) {
if assert.EqualError(t, err, "exit status 1") {
assert.Contains(t, output, "templates.abc.tasks.a templates.whalesay inputs.parameters.message was not supplied")
}
})
})
s.Run("LintFileEmptyParamSteps", func() {
s.Given().RunCli([]string{"lint", "expectedfailures/empty-parameter-steps.yaml"}, func(t *testing.T, output string, err error) {
if assert.EqualError(t, err, "exit status 1") {
assert.Contains(t, output, "templates.abc.steps[0].a templates.whalesay inputs.parameters.message was not supplied")
}
})
})
s.Run("LintFileWithTemplate", func() {
s.Given().
WorkflowTemplate("@smoke/workflow-template-whalesay-template.yaml").
When().
CreateWorkflowTemplates().
Given().
RunCli([]string{"lint", "smoke/hello-world-workflow-tmpl.yaml"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, "smoke/hello-world-workflow-tmpl.yaml is valid")
}
})
})
s.Run("LintDir", func() {
tmp, err := ioutil.TempDir("", "")
s.CheckError(err)
defer func() { _ = os.RemoveAll(tmp) }()
// Read all content of src to data
data, err := ioutil.ReadFile("smoke/basic.yaml")
s.CheckError(err)
// Write data to dst
err = ioutil.WriteFile(filepath.Join(tmp, "my-workflow.yaml"), data, 0644)
s.CheckError(err)
s.Given().
RunCli([]string{"lint", tmp}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, "my-workflow.yaml is valid")
}
})
})
s.Run("Different Kind", func() {
s.Given().
RunCli([]string{"lint", "testdata/workflow-template-nested-template.yaml"}, func(t *testing.T, output string, err error) {
if assert.Error(t, err) {
assert.Contains(t, output, "WorkflowTemplate 'workflow-template-nested-template' is not of kind Workflow. Ignoring...")
assert.Contains(t, output, "Error in file testdata/workflow-template-nested-template.yaml: there was nothing to validate")
}
})
})
s.Run("Valid", func() {
s.Given().
RunCli([]string{"lint", "testdata/exit-1.yaml"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, "exit-1.yaml is valid")
}
})
})
s.Run("Invalid", func() {
s.Given().
RunCli([]string{"lint", "expectedfailures/empty-parameter-dag.yaml"}, func(t *testing.T, output string, err error) {
if assert.Error(t, err) {
assert.Contains(t, output, "Error in file expectedfailures/empty-parameter-dag.yaml:")
}
})
})
// Not all files in this directory are Workflows, expect failure
s.Run("NotAllWorkflows", func() {
s.Given().
RunCli([]string{"lint", "testdata"}, func(t *testing.T, output string, err error) {
if assert.Error(t, err) {
assert.Contains(t, output, "WorkflowTemplate 'workflow-template-nested-template' is not of kind Workflow. Ignoring...")
assert.Contains(t, output, "Error in file testdata/workflow-template-nested-template.yaml: there was nothing to validate")
}
})
})
// All files in this directory are Workflows, expect success
s.Run("AllWorkflows", func() {
s.Given().
RunCli([]string{"lint", "stress"}, func(t *testing.T, output string, err error) {
assert.NoError(t, err)
})
})
}
func (s *CLIWithServerSuite) TestWorkflowRetry() {
s.testNeedsOffloading()
var retryTime corev1.Time
s.Given().
Workflow("@testdata/retry-test.yaml").
When().
SubmitWorkflow().
WaitForWorkflow(fixtures.ToStart).
WaitForWorkflow(fixtures.Condition(func(wf *wfv1.Workflow) bool {
return wf.Status.AnyActiveSuspendNode()
}), "suspended node").
RunCli([]string{"terminate", "retry-test"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, "workflow retry-test terminated")
}
}).
WaitForWorkflow(fixtures.Condition(func(wf *wfv1.Workflow) bool {
retryTime = wf.Status.FinishedAt
return wf.Status.Phase == wfv1.NodeFailed
}), "is terminated", 20*time.Second).
RunCli([]string{"retry", "retry-test", "--restart-successful", "--node-field-selector", "templateName==steps-inner"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, "Name:")
assert.Contains(t, output, "Namespace:")
}
}).
WaitForWorkflow(fixtures.Condition(func(wf *wfv1.Workflow) bool {
return wf.Status.AnyActiveSuspendNode()
}), "suspended node").
Then().
ExpectWorkflow(func(t *testing.T, _ *corev1.ObjectMeta, status *wfv1.WorkflowStatus) {
outerStepsPodNode := status.Nodes.FindByDisplayName("steps-outer-step1")
innerStepsPodNode := status.Nodes.FindByDisplayName("steps-inner-step1")
assert.True(t, outerStepsPodNode.FinishedAt.Before(&retryTime))
assert.True(t, retryTime.Before(&innerStepsPodNode.FinishedAt))
})
}
func (s *CLISuite) TestWorkflowTerminate() {
var name string
s.Given().
Workflow("@smoke/basic.yaml").
When().
SubmitWorkflow().
Then().
ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) {
name = metadata.Name
}).
RunCli([]string{"terminate", name}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Regexp(t, "workflow basic-.* terminated", output)
}
})
}
func (s *CLIWithServerSuite) TestWorkflowWait() {
s.testNeedsOffloading()
var name string
s.Given().
Workflow("@smoke/basic.yaml").
When().
SubmitWorkflow().
Then().
ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) {
name = metadata.Name
}).
RunCli([]string{"wait", name}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Regexp(t, "basic-.* Succeeded", output)
}
})
}
func (s *CLIWithServerSuite) TestWorkflowWatch() {
s.testNeedsOffloading()
s.Given().
Workflow("@smoke/basic.yaml").
When().
SubmitWorkflow().
Then().
RunCli([]string{"watch", "@latest"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, "Name: ")
}
})
}
func (s *CLISuite) TestTemplate() {
s.Run("Lint", func() {
s.Given().RunCli([]string{"template", "lint", "smoke/workflow-template-whalesay-template.yaml"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, "validated")
}
})
})
s.Run("Create", func() {
s.Given().RunCli([]string{"template", "create", "smoke/workflow-template-whalesay-template.yaml"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, "Name:")
assert.Contains(t, output, "Namespace:")
assert.Contains(t, output, "Created:")
}
})
})
s.Run("List", func() {
s.Given().RunCli([]string{"template", "list"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, "NAME")
}
})
})
s.Run("Get", func() {
s.Given().RunCli([]string{"template", "get", "not-found"}, func(t *testing.T, output string, err error) {
if assert.EqualError(t, err, "exit status 1") {
assert.Contains(t, output, `"not-found" not found`)
}
}).RunCli([]string{"template", "get", "workflow-template-whalesay-template"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, "Name:")
assert.Contains(t, output, "Namespace:")
assert.Contains(t, output, "Created:")
}
})
})
s.Run("Submittable-Template", func() {
s.testNeedsOffloading()
s.Given().RunCli([]string{"submit", "--from", "workflowtemplate/workflow-template-whalesay-template", "-l", "argo-e2e=true"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, "Name:")
assert.Contains(t, output, "Namespace:")
assert.Contains(t, output, "Created:")
}
})
var workflowName string
s.Given().RunCli([]string{"list"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
r := regexp.MustCompile(`\s+?(workflow-template-whalesay-template-[a-z0-9]+)`)
res := r.FindStringSubmatch(output)
if len(res) != 2 {
assert.Fail(t, "Internal test error, please report a bug")
}
workflowName = res[1]
}
})
s.Given().
WorkflowName(workflowName).
When().
WaitForWorkflow().
RunCli([]string{"get", workflowName}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, workflowName)
assert.Contains(t, output, "Succeeded")
}
})
})
s.Run("Delete", func() {
s.Given().RunCli([]string{"template", "delete", "workflow-template-whalesay-template"}, func(t *testing.T, output string, err error) {
assert.NoError(t, err)
})
})
}
func (s *CLISuite) TestWorkflowResubmit() {
s.Given().
Workflow("@testdata/exit-1.yaml").
When().
SubmitWorkflow().
WaitForWorkflow().
Given().
RunCli([]string{"resubmit", "--memoized", "exit-1"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, "Name:")
assert.Contains(t, output, "Namespace:")
assert.Contains(t, output, "ServiceAccount:")
assert.Contains(t, output, "Status:")
assert.Contains(t, output, "Created:")
}
})
}
func (s *CLISuite) TestCron() {
s.Run("Lint", func() {
s.Given().RunCli([]string{"cron", "lint", "cron/basic.yaml"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, "cron/basic.yaml is valid")
assert.Contains(t, output, "Cron workflow manifests validated")
}
})
})
s.Run("Different Kind", func() {
s.Given().
RunCli([]string{"cron", "lint", "testdata/workflow-template-nested-template.yaml"}, func(t *testing.T, output string, err error) {
if assert.Error(t, err) {
assert.Contains(t, output, "WorkflowTemplate 'workflow-template-nested-template' is not of kind CronWorkflow. Ignoring...")
assert.Contains(t, output, "Error in file testdata/workflow-template-nested-template.yaml: there was nothing to validate")
}
})
})
// Not all files in this directory are CronWorkflows, expect failure
s.Run("NotAllWorkflows", func() {
s.Given().
RunCli([]string{"cron", "lint", "testdata"}, func(t *testing.T, output string, err error) {
if assert.Error(t, err) {
assert.Contains(t, output, "WorkflowTemplate 'workflow-template-nested-template' is not of kind CronWorkflow. Ignoring...")
assert.Contains(t, output, "Error in file testdata/workflow-template-nested-template.yaml: there was nothing to validate")
}
})
})
// All files in this directory are CronWorkflows, expect success
s.Run("AllCron", func() {
s.Given().
RunCli([]string{"cron", "lint", "cron"}, func(t *testing.T, output string, err error) {
assert.NoError(t, err)
})
})
s.Run("Create", func() {
s.Given().RunCli([]string{"cron", "create", "cron/basic.yaml"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, "Name:")
assert.Contains(t, output, "Namespace:")
assert.Contains(t, output, "Created:")
assert.Contains(t, output, "Schedule:")
assert.Contains(t, output, "Suspended:")
assert.Contains(t, output, "StartingDeadlineSeconds:")
assert.Contains(t, output, "ConcurrencyPolicy:")
}
})
})
s.Run("Delete", func() {
s.Given().RunCli([]string{"cron", "delete", "test-cron-wf-basic"}, func(t *testing.T, output string, err error) {
assert.NoError(t, err)
})
})
s.Run("Create Schedule Override", func() {
s.Given().RunCli([]string{"cron", "create", "cron/basic.yaml", "--schedule", "1 2 3 * *"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, "Schedule: 1 2 3 * *")
}
})
})
s.Run("Create Parameter Override", func() {
s.Given().RunCli([]string{"cron", "create", "cron/param.yaml", "-p", "message=\"bar test passed\""}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, "bar test passed")
}
})
})
s.Run("List", func() {
s.Given().RunCli([]string{"cron", "list"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, "NAME")
assert.Contains(t, output, "AGE")
assert.Contains(t, output, "LAST RUN")
assert.Contains(t, output, "SCHEDULE")
assert.Contains(t, output, "SUSPENDED")
}
})
})
s.Run("Suspend", func() {
s.Given().RunCli([]string{"cron", "suspend", "test-cron-wf-basic"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, "CronWorkflow 'test-cron-wf-basic' suspended")
}
})
})
s.Run("Resume", func() {
s.Given().RunCli([]string{"cron", "resume", "test-cron-wf-basic"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, "CronWorkflow 'test-cron-wf-basic' resumed")
}
})
})
s.Run("Get", func() {
s.Given().RunCli([]string{"cron", "get", "not-found"}, func(t *testing.T, output string, err error) {
if assert.EqualError(t, err, "exit status 1") {
assert.Contains(t, output, `\"not-found\" not found`)
}
}).RunCli([]string{"cron", "get", "test-cron-wf-basic"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, "Name:")
assert.Contains(t, output, "Namespace:")
assert.Contains(t, output, "Created:")
assert.Contains(t, output, "Schedule:")
assert.Contains(t, output, "Suspended:")
assert.Contains(t, output, "StartingDeadlineSeconds:")
assert.Contains(t, output, "ConcurrencyPolicy:")
}
})
})
}
func (s *CLISuite) TestClusterTemplateCommands() {
s.Run("Create", func() {
s.Given().
RunCli([]string{"cluster-template", "create", "smoke/cluster-workflow-template-whalesay-template.yaml"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, "cluster-workflow-template-whalesay-template")
}
})
})
s.Run("Get", func() {
s.Given().
RunCli([]string{"cluster-template", "get", "cluster-workflow-template-whalesay-template"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, "cluster-workflow-template-whalesay-template")
}
})
})
s.Run("list", func() {
s.Given().
RunCli([]string{"cluster-template", "list"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, "cluster-workflow-template-whalesay-template")
}
})
})
s.Run("Delete", func() {
s.Given().
RunCli([]string{"cluster-template", "delete", "cluster-workflow-template-whalesay-template"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, "cluster-workflow-template-whalesay-template")
}
})
})
}
func (s *CLISuite) TestWorkflowTemplateRefSubmit() {
s.Run("CreateWFT", func() {
s.Given().RunCli([]string{"template", "create", "smoke/workflow-template-whalesay-template.yaml"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, "Name:")
assert.Contains(t, output, "Namespace:")
assert.Contains(t, output, "Created:")
}
})
})
s.Run("CreateWF", func() {
s.Given().RunCli([]string{"submit", "testdata/workflow-template-ref.yaml"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, "Name:")
assert.Contains(t, output, "Namespace:")
assert.Contains(t, output, "Created:")
}
})
})
s.Run("CreateCWFT", func() {
s.Given().RunCli([]string{"cluster-template", "create", "smoke/cluster-workflow-template-whalesay-template.yaml"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, "Name:")
assert.Contains(t, output, "Created:")
}
})
})
s.Run("CreateWFWithCWFTRef", func() {
s.Given().RunCli([]string{"submit", "testdata/cluster-workflow-template-ref.yaml"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, "Name:")
assert.Contains(t, output, "Namespace:")
assert.Contains(t, output, "Created:")
}
})
})
}
func (s *CLIWithServerSuite) TestWorkflowLevelSemaphore() {
semaphoreData := map[string]string{
"workflow": "1",
}
s.testNeedsOffloading()
s.Given().
Workflow("@testdata/semaphore-wf-level.yaml").
When().
CreateConfigMap("my-config", semaphoreData).
RunCli([]string{"submit", "testdata/semaphore-wf-level-1.yaml"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, "semaphore-wf-level-1")
}
}).
SubmitWorkflow().
WaitForWorkflow(fixtures.Condition(func(wf *wfv1.Workflow) bool {
return wf.Status.Phase == ""
}), "Workflow is waiting for lock").
WaitForWorkflow().
DeleteConfigMap("my-config").
Then().
ExpectWorkflow(func(t *testing.T, _ *metav1.ObjectMeta, status *wfv1.WorkflowStatus) {
assert.Equal(t, wfv1.NodeSucceeded, status.Phase)
})
}
func (s *CLIWithServerSuite) TestTemplateLevelSemaphore() {
semaphoreData := map[string]string{
"template": "1",
}
s.testNeedsOffloading()
s.Given().
Workflow("@testdata/semaphore-tmpl-level.yaml").
When().
CreateConfigMap("my-config", semaphoreData).
SubmitWorkflow().
WaitForWorkflow(fixtures.Condition(func(wf *wfv1.Workflow) bool {
return wf.Status.Phase == wfv1.NodeRunning
}), "waiting for Workflow to run", 10*time.Second).
RunCli([]string{"get", "semaphore-tmpl-level"}, func(t *testing.T, output string, err error) {
assert.Contains(t, output, "Waiting for")
}).
WaitForWorkflow()
}
func (s *CLISuite) TestRetryOmit() {
s.testNeedsOffloading()
s.Given().
Workflow("@testdata/retry-omit.yaml").
When().
SubmitWorkflow().
WaitForWorkflow(fixtures.Condition(func(wf *wfv1.Workflow) bool {
return wf.Status.Nodes.Any(func(node wfv1.NodeStatus) bool {
return node.Phase == wfv1.NodeOmitted
})
}), "any node omitted").
WaitForWorkflow(10*time.Second).
Then().
ExpectWorkflow(func(t *testing.T, metadata *metav1.ObjectMeta, status *wfv1.WorkflowStatus) {
node := status.Nodes.FindByDisplayName("should-not-execute")
if assert.NotNil(t, node) {
assert.Equal(t, wfv1.NodeOmitted, node.Phase)
}
}).
RunCli([]string{"retry", "dag-diamond-8q7vp"}, func(t *testing.T, output string, err error) {
assert.NoError(t, err)
assert.Contains(t, output, "Status: Running")
}).When().
WaitForWorkflow()
}
func (s *CLISuite) TestSynchronizationWfLevelMutex() {
s.testNeedsOffloading()
s.Given().
Workflow("@functional/synchronization-mutex-wf-level.yaml").
When().
RunCli([]string{"submit", "functional/synchronization-mutex-wf-level-1.yaml"}, func(t *testing.T, output string, err error) {
if assert.NoError(t, err) {
assert.Contains(t, output, "synchronization-wf-level-mutex")
}
}).
SubmitWorkflow().
Wait(1*time.Second).
RunCli([]string{"get", "synchronization-wf-level-mutex"}, func(t *testing.T, output string, err error) {
assert.Contains(t, output, "Pending")
}).
WaitForWorkflow().
Then().
ExpectWorkflow(func(t *testing.T, _ *metav1.ObjectMeta, status *wfv1.WorkflowStatus) {
assert.Equal(t, wfv1.NodeSucceeded, status.Phase)
})
}
func (s *CLISuite) TestTemplateLevelMutex() {
s.testNeedsOffloading()
s.Given().
Workflow("@functional/synchronization-mutex-tmpl-level.yaml").
When().
SubmitWorkflow().
Wait(3*time.Second).
RunCli([]string{"get", "synchronization-tmpl-level-mutex"}, func(t *testing.T, output string, err error) {
assert.Contains(t, output, "Waiting for")
}).
WaitForWorkflow().
Then().
ExpectWorkflow(func(t *testing.T, _ *metav1.ObjectMeta, status *wfv1.WorkflowStatus) {
assert.Equal(t, wfv1.NodeSucceeded, status.Phase)
})
}
func (s *CLIWithServerSuite) TestResourceTemplateStopAndTerminate() {
s.testNeedsOffloading()
s.Run("ResourceTemplateStop", func() {
s.Given().
WorkflowName("resource-tmpl-wf").
When().
RunCli([]string{"submit", "functional/resource-template.yaml"}, func(t *testing.T, output string, err error) {
assert.Contains(t, output, "Pending")
}).
RunCli([]string{"get", "resource-tmpl-wf"}, func(t *testing.T, output string, err error) {
assert.Contains(t, output, "Running")
}).
RunCli([]string{"stop", "resource-tmpl-wf"}, func(t *testing.T, output string, err error) {
assert.Contains(t, output, "workflow resource-tmpl-wf stopped")
}).
WaitForWorkflow().
RunCli([]string{"get", "resource-tmpl-wf"}, func(t *testing.T, output string, err error) {
assert.Contains(t, output, "Stopped with strategy 'Stop'")
}).
RunCli([]string{"delete", "resource-tmpl-wf"}, func(t *testing.T, output string, err error) {
assert.Contains(t, output, "deleted")
})
})
s.Run("ResourceTemplateTerminate", func() {
s.Given().
WorkflowName("resource-tmpl-wf-1").
When().
RunCli([]string{"submit", "functional/resource-template.yaml", "--name", "resource-tmpl-wf-1"}, func(t *testing.T, output string, err error) {
assert.Contains(t, output, "Pending")
}).
RunCli([]string{"get", "resource-tmpl-wf-1"}, func(t *testing.T, output string, err error) {
assert.Contains(t, output, "Running")
}).
RunCli([]string{"terminate", "resource-tmpl-wf-1"}, func(t *testing.T, output string, err error) {
assert.Contains(t, output, "workflow resource-tmpl-wf-1 terminated")
}).
WaitForWorkflow().
RunCli([]string{"get", "resource-tmpl-wf-1"}, func(t *testing.T, output string, err error) {
assert.Contains(t, output, "Stopped with strategy 'Terminate'")
})
})
}
func (s *CLIWithServerSuite) TestMetaDataNamespace() {
s.Given().
Exec("../../dist/argo", []string{"cron", "create", "testdata/wf-default-ns.yaml"}, func(t *testing.T, output string, err error) {
if assert.Error(t, err) {
assert.Contains(t, output, "PermissionDenied")
assert.Contains(t, output, `in the namespace "default"`)
}
}).
Exec("../../dist/argo", []string{"cron", "get", "test-cron-wf-basic", "-n", "default"}, func(t *testing.T, output string, err error) {
if assert.Error(t, err) {
assert.Contains(t, output, "PermissionDenied")
assert.Contains(t, output, `in the namespace \"default\"`)
}
}).
Exec("../../dist/argo", []string{"cron", "delete", "test-cron-wf-basic", "-n", "default"}, func(t *testing.T, output string, err error) {
if assert.Error(t, err) {
assert.Contains(t, output, "PermissionDenied")
assert.Contains(t, output, `in the namespace \"default\"`)
}
})
}
func TestCLISuite(t *testing.T) {
suite.Run(t, new(CLISuite))
}
| [
"\"ARGO_SERVER\"",
"\"CI\""
] | [] | [
"ARGO_SERVER",
"CI"
] | [] | ["ARGO_SERVER", "CI"] | go | 2 | 0 | |
ems/receiver.py | # Copyright (c) 2015 Fraunhofer FOKUS. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import base64
import json
import logging
import subprocess
import traceback
import yaml
from git import Repo, GitCommandError
import os
from sets import Set
__author__ = 'lto,ogo'
log = logging.getLogger(__name__)
logging_dir = '/var/log/openbaton/'
# the environmental variable SCRIPTS_PATH is set everytime the script-path was contained in the json message, you can use after you cloned or saved the script
scripts_path = "/opt/openbaton/scripts"
ob_parameters_file_name = "ob_parameters"
def save_scripts(dict_msg):
log.info("Recevied save scripts command")
payload = dict_msg.get('payload')
path = dict_msg.get('script-path')
try:
script = base64.b64decode(payload)
if path is None:
log.info("No path provided, saving into default directory")
path = scripts_path
if not os.path.exists(path):
os.makedirs(path)
name = dict_msg.get('name')
if path[-1] == "/":
path_name = path + name
else:
path_name = path + "/" + name
os.environ['SCRIPTS_PATH'] = path
f = open(path_name, "w")
f.write(script)
# log.info("Written %s into %s" % (script, path_name))
out = str(os.listdir(path))
err = ""
status = 0
st = os.stat(path)
os.chmod(path, st.st_mode | 0111)
st = os.stat(path_name)
os.chmod(path_name, st.st_mode | 0111)
except TypeError: # catches typeerror in case of the message not being properly encoded
log.error("Incorrect script encoding")
action = None
out = None
err = "Incorrect script encoding"
status = "1"
return generate_response(out=out, err=err, status=status)
def clone_scripts(dict_msg):
payload = dict_msg.get('payload')
path = dict_msg.get('script-path')
if path is None:
path = scripts_path
url = payload
os.environ['SCRIPTS_PATH'] = path
log.info("Cloning from: %s into %s" % (url, path))
try:
Repo.clone_from(url, path)
log.info('Cloned')
for file in os.listdir(path):
st = os.stat(path + "/" + file)
os.chmod(path + "/" + file, st.st_mode | 0111)
out = str(os.listdir(path))
err = ""
status = 0
except GitCommandError as e:
log.info("Encountered error while cloning")
err = traceback.format_exc()
status = e.status
out = None
return generate_response(out=out, err=err, status=status)
def execute(dict_msg):
log.info("Received execute command")
payload = dict_msg.get('payload')
if payload[-1] == "/":
payload = scripts_path + payload
else:
payload = scripts_path + "/" + payload
env = dict_msg.get('env')
log.info("Executing: %s with env %s" % (payload, env))
if env is None or len(env) == 0:
env = None
else:
env.update(os.environ)
ems_out_log = open('/var/log/openbaton/ems-out.log', "w+")
ems_err_log = open('/var/log/openbaton/ems-err.log', "w+")
if '.py' in payload:
proc = subprocess.Popen(payload.split(), stdout=ems_out_log, stderr=ems_err_log, env=env)
else:
proc = subprocess.Popen(["/bin/bash"] + payload.split(), stdout=ems_out_log, stderr=ems_err_log, env=env)
status = proc.wait()
ems_out_log.seek(0)
ems_err_log.seek(0)
out = ems_out_log.read()
err = ems_err_log.read()
ems_out_log.close()
ems_err_log.close()
log.info("Executed: ERR: %s OUT: %s", err, out)
return generate_response(out=out, err=err, status=status)
def repos_scripts_update(dict_msg):
log.info("Updating scripts")
payload = dict_msg.get('payload')
url = payload
try:
Repo.pull(url, "/opt/openbaton/scripts/")
log.info("Updated")
except GitCommandError as e:
log.info("Encountered error while updatign scripts")
err = traceback.format_exc()
status = e.status
out = None
return generate_response(out=out, err=err, status=status)
else:
out = str(os.listdir(scripts_path))
err = ""
status = 0
return generate_response(out=out, err=err, status=status)
def scripts_update(dict_msg):
log.info("Updating scripts")
script_name = dict_msg.get('name')
payload = dict_msg.get('payload')
path = dict_msg.get('script-path')
os.environ['SCRIPTS_PATH'] = path
script_payload = base64.b64decode(payload)
try:
f = open(scripts_path + "/" + script_name, "w")
f.write(script_payload)
f.close()
log.info("Updated file %s" % script_name)
except GitCommandError as e:
log.info("Encountered error while updating scripts")
err = traceback.format_exc()
status = e.status
out = None
return generate_response(out=out, err=err, status=status)
else:
out = str(os.listdir(scripts_path))
err = ""
status = 0
return generate_response(out=out, err=err, status=status)
def save_vnf_parameters(parameters_file_path_bash, vnf_parameters):
log.info("Reading VNF Parameters")
with open(parameters_file_path_bash, 'a+') as f:
f.write("# VNF Parameters\n")
for vnf_type in vnf_parameters.keys():
log.debug("Reading VNF Parameters of VNF type: " + vnf_type)
vnf_param_str = "export OB_" + vnf_type + "_VNF_"
internal_parameters = vnf_parameters.get(vnf_type).get('parameters')
for vnf_parameter_key in internal_parameters.keys():
vnf_param_str += vnf_parameter_key + "=" + internal_parameters.get(vnf_parameter_key) + "\n"
log.debug(vnf_param_str)
f.write(vnf_param_str)
f.flush()
os.fsync(f.fileno())
def save_vnfc_parameters(parameters_file_path_bash, vnfc_parameters):
log.info("Reading VNFC Parameters")
with open(parameters_file_path_bash, 'a+') as f:
f.write("\n# VNFC Parameters\n")
# create a list of vnfc parameters existing in at least one vnfc of the same vnf_type
all_vnfc_parameter_keys_for_vnf_type = {}
for vnf_type in vnfc_parameters.keys():
all_vnfc_parameter_keys_for_vnf_type[vnf_type] = Set([])
for vnfc_id_for_vnf_type, vnfc_content_for_vnf_type in vnfc_parameters.get(vnf_type).get(
'parameters').iteritems():
parameters = vnfc_content_for_vnf_type.get('parameters')
for parameter_key in parameters.keys():
all_vnfc_parameter_keys_for_vnf_type[vnf_type].add(parameter_key)
all_vnfc_parameters = {}
for vnf_type in vnfc_parameters.keys():
log.debug("Reading VNFC parameters of VNF type: " + vnf_type + " which keys are:")
log.debug(all_vnfc_parameter_keys_for_vnf_type[vnf_type])
all_vnfc_parameters_for_vnf_type = {}
for parameter_key in all_vnfc_parameter_keys_for_vnf_type[vnf_type]:
log.debug("Reading values for VNFC parameter key: " + parameter_key)
vnfc_param_str = "export OB_" + vnf_type + "_VNFC_" + parameter_key + "="
# initialise list for values of a vnfc parameter key
if all_vnfc_parameters_for_vnf_type.get(parameter_key) is None:
all_vnfc_parameters_for_vnf_type.setdefault(parameter_key, [])
# read the values of each vnfc parameter of each vnf_type ...
for vnfc_content_for_vnf_type in vnfc_parameters.get(vnf_type).get('parameters').values():
parameters = vnfc_content_for_vnf_type.get('parameters')
# ... and for each parameter_key adds:
# - the vnfc parameter value, if this vnfc has this parameter
# - a "NA" value, otherwise
if parameter_key in parameters.keys():
vnfc_parameter_value = parameters.get(parameter_key)
else:
vnfc_parameter_value = "NA"
all_vnfc_parameters_for_vnf_type.get(parameter_key).append(vnfc_parameter_value)
vnfc_param_str += vnfc_parameter_value + ":"
# remove last ':'
vnfc_param_str = vnfc_param_str[:-1] + "\n"
log.debug(vnfc_param_str)
f.write(vnfc_param_str)
all_vnfc_parameters[vnf_type] = all_vnfc_parameters_for_vnf_type
f.flush()
os.fsync(f.fileno())
def save_vnfr_dependency(dict_msg):
log.info("Saving VNFR configuration and dependency parameters")
# get the base path where to save the file
parameters_file_path_base_dir = dict_msg.get('script-path')
parameters_file_path_base = parameters_file_path_base_dir + "/" + ob_parameters_file_name
# create file path for yaml, json and bash
parameters_file_path_json = parameters_file_path_base + ".json"
parameters_file_path_yaml = parameters_file_path_base + ".yaml"
parameters_file_path_bash = parameters_file_path_base + ".sh"
# get vnfr_dependency json (as string)
vnfr_dependency = dict_msg.get('payload')
# save to file as ob_parameters.json
f = open(parameters_file_path_json, "w")
f.write(vnfr_dependency)
f.close()
log.info("Saved file %s" % parameters_file_path_json)
# convert the json to yaml and write to the file ob_parameters.yaml
# get vnfr_dependency json (as json object)
vnfr_dependency = json.loads(vnfr_dependency)
f = open(parameters_file_path_yaml, "w")
yaml.safe_dump(vnfr_dependency, f, allow_unicode=True, default_flow_style=False)
f.close()
log.info("Saved file %s" % parameters_file_path_yaml)
# read json vnfr_dependency and save the parameters to the file ob_parameters.sh (to be sourced)
vnf_parameters = vnfr_dependency.get('parameters')
vnfc_parameters = vnfr_dependency.get('vnfcParameters')
save_vnf_parameters(parameters_file_path_bash, vnf_parameters)
save_vnfc_parameters(parameters_file_path_bash, vnfc_parameters)
log.info("Saved file %s" % parameters_file_path_bash)
out = str(os.listdir(parameters_file_path_base_dir))
err = ""
status = 0
# return response
return generate_response(out=out, err=err, status=status)
def on_message(message):
logging.basicConfig(filename=logging_dir + '/ems-receiver.log', level=logging.INFO)
# log.info('received a message: %s' % message)
try:
dict_msg = json.loads(message)
try:
log.info("Received message:")
log.info(str(message))
except:
log.info("Error while logging")
action = dict_msg.get('action')
payload = dict_msg.get('payload')
except ValueError: # this section deals with the case when the message is not a json message and as a result cannot be processed. Considering that the messages are generated by the NFVO this is not supposed to happen
log.info("Received a non-json object")
action = None
out = None
err = "Not a json message"
status = "1"
if action == 'SAVE_SCRIPTS':
return save_scripts(dict_msg=dict_msg)
if action == 'CLONE_SCRIPTS':
return clone_scripts(dict_msg)
elif action == "EXECUTE":
return execute(dict_msg)
elif action == "REPO_SCRIPTS_UPDATE":
return repos_scripts_update(dict_msg)
elif action == "SCRIPTS_UPDATE":
return scripts_update(dict_msg)
elif action == "SAVE_VNFR_DEPENDENCY":
return save_vnfr_dependency(dict_msg);
def generate_response(out, err, status):
if out is None:
out = ""
if err is None:
err = ""
resp = {
'output': out,
'err': err,
'status': status
}
json_str = json.dumps(resp)
log.info("answer is: " + json_str)
return json_str
| [] | [] | [
"SCRIPTS_PATH"
] | [] | ["SCRIPTS_PATH"] | python | 1 | 0 | |
tapestry-ioc/src/main/java/org/apache/tapestry5/ioc/internal/services/SystemEnvSymbolProvider.java | // Copyright 2011 The Apache Software Foundation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package org.apache.tapestry5.ioc.internal.services;
import org.apache.tapestry5.ioc.internal.util.CollectionFactory;
import org.apache.tapestry5.ioc.services.SymbolProvider;
import java.util.Map;
/**
* Provides <em>case insensitive</em> access to environment variables. Environment variable symbols
* are prefixed with "env.".
*
* @since 5.3
*/
public class SystemEnvSymbolProvider implements SymbolProvider
{
private final Map<String, String> symbols = CollectionFactory.newCaseInsensitiveMap();
@Override
public synchronized String valueForSymbol(String symbolName)
{
if (symbols.isEmpty())
{
Map<String, String> env = System.getenv();
for (Map.Entry<String, String> entry : env.entrySet())
{
symbols.put("env." + entry.getKey(), entry.getValue());
}
}
return symbols.get(symbolName);
}
}
| [] | [] | [] | [] | [] | java | 0 | 0 | |
docs/conf.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# SigFeat documentation build configuration file, created by
# sphinx-quickstart on Mon Feb 8 08:16:45 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
import mock
# Create Mock Modules
MOCK_MODULES = [
'numpy',
'scipy',
'matplotlib.pyplot',
'pylab',
'scipy.signal',
'scipy.fftpack',
'scipy.stats',
'pyfilterbank',
'pyfilterbank.melbank',
'soundfile',
]
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = mock.Mock()
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'numpydoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'SigFeat'
copyright = '2017, Siegfried Gündert'
author = 'Siegfried Gündert'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version
from sigfeat import get_version
version = get_version()
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
on_rtd = os.environ.get('READTHEDOCS') == 'True'
if on_rtd:
html_theme = 'default'
else:
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'SigFeatdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'SigFeat.tex', 'SigFeat Documentation',
'Siegfried Gündert', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'sigfeat', 'SigFeat Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'SigFeat', 'SigFeat Documentation',
author, 'SigFeat', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| [] | [] | [
"READTHEDOCS"
] | [] | ["READTHEDOCS"] | python | 1 | 0 | |
main.go | package main
import (
"bytes"
"encoding/json"
"fmt"
"log"
"net/http"
"os"
"github.com/gorilla/mux"
)
func HomeEndpoint(w http.ResponseWriter, r *http.Request) {
fmt.Fprintln(w, "Hello from mlabouardy :)")
}
func VerificationEndpoint(w http.ResponseWriter, r *http.Request) {
challenge := r.URL.Query().Get("hub.challenge")
token := r.URL.Query().Get("hub.verify_token")
if token == os.Getenv("VERIFY_TOKEN") {
w.WriteHeader(200)
w.Write([]byte(challenge))
} else {
w.WriteHeader(404)
w.Write([]byte("Error, wrong validation token"))
}
}
type User struct {
ID string `json:"id,omitempty"`
}
type Messaging struct {
Sender User `json:"sender,omitempty"`
Recipient User `json:"recipient,omitempty"`
Timestamp int `json:"timestamp,omitempty"`
Message Message `json:"message,omitempty"`
}
type DefaultAction struct {
Type string `json:"type,omitempty"`
URL string `json:"url,omitempty"`
MessangerExtensions bool `json:"messenger_extensions,omitempty"`
WebviewHeightRatio string `json:"webview_height_ratio,omitempty"`
FallbackURL string `json:"fallback_url,omitempty"`
}
type Button struct {
Type string `json:"type,omitempty"`
URL string `json:"url,omitempty"`
Title string `json:"title,omitempty"`
}
type Element struct {
Title string `json:"title,omitempty"`
Subtitle string `json:"subtitle,omitempty"`
ImageURL string `json:"image_url,omitempty"`
DefaultAction DefaultAction `json:"default_action,omitempty"`
Buttons []Button `json:"buttons,omitempty"`
}
type Payload struct {
URL string `json:"url,omitempty"`
TemplateType string `json:"template_type,omitempty"`
Sharable bool `json:"sharable,omitempty"`
ImageAspectRation string `json:"image_aspect_ratio,omitempty"`
Elements []Element `json:"elements,omitempty"`
}
type Attachment struct {
Type string `json:"type,omitempty"`
Payload Payload `json:"payload,omitempty"`
}
type Message struct {
MID string `json:"mid,omitempty"`
Text string `json:"text,omitempty"`
QuickReply *struct {
Payload string `json:"payload,omitempty"`
} `json:"quick_reply,omitempty"`
Attachments *[]Attachment `json:"attachments,omitempty"`
Attachment *Attachment `json:"attachment,omitempty"`
}
type Callback struct {
Object string `json:"object,omitempty"`
Entry []struct {
ID string `json:"id,omitempty"`
Time int `json:"time,omitempty"`
Messaging []Messaging `json:"messaging,omitempty"`
} `json:"entry,omitempty"`
}
func MessagesEndpoint(w http.ResponseWriter, r *http.Request) {
var callback Callback
json.NewDecoder(r.Body).Decode(&callback)
if callback.Object == "page" {
for _, entry := range callback.Entry {
for _, event := range entry.Messaging {
ProcessMessage(event)
}
}
w.WriteHeader(200)
w.Write([]byte("Got your message"))
} else {
w.WriteHeader(404)
w.Write([]byte("Message not supported"))
}
}
type Response struct {
Recipient User `json:"recipient,omitempty"`
Message Message `json:"message,omitempty"`
}
const (
FACEBOOK_API = "https://graph.facebook.com/v2.6/me/messages?access_token=%s"
IMAGE = "http://37.media.tumblr.com/e705e901302b5925ffb2bcf3cacb5bcd/tumblr_n6vxziSQD11slv6upo3_500.gif"
)
func ProcessMessage(event Messaging) {
client := http.Client{}
response := Response{
Recipient: User{
ID: event.Sender.ID,
},
Message: Message{
Attachment: &Attachment{
Type: "image",
Payload: Payload{
URL: IMAGE,
},
},
},
}
body := new(bytes.Buffer)
json.NewEncoder(body).Encode(&response)
url := fmt.Sprintf(FACEBOOK_API, os.Getenv("PAGE_ACCESS_TOKEN"))
req, err := http.NewRequest("POST", url, body)
req.Header.Add("Content-Type", "application/json")
if err != nil {
log.Fatal(err)
}
resp, err := client.Do(req)
if err != nil {
log.Fatal(err)
}
defer resp.Body.Close()
}
func main() {
r := mux.NewRouter()
r.HandleFunc("/webhook", VerificationEndpoint).Methods("GET")
r.HandleFunc("/webhook", MessagesEndpoint).Methods("POST")
log.Fatal(http.ListenAndServe(":8080", r))
}
| [
"\"VERIFY_TOKEN\"",
"\"PAGE_ACCESS_TOKEN\""
] | [] | [
"PAGE_ACCESS_TOKEN",
"VERIFY_TOKEN"
] | [] | ["PAGE_ACCESS_TOKEN", "VERIFY_TOKEN"] | go | 2 | 0 | |
tools/r3det_gwd/train.py | # -*- coding:utf-8 -*-
# Author: Xue Yang <[email protected]>
#
# License: Apache-2.0 license
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import os
import sys
import tensorflow as tf
import tensorflow.contrib.slim as slim
import numpy as np
sys.path.append("../../")
from tools.train_base import Train
from libs.configs import cfgs
from libs.models.detectors.r3det_gwd import build_whole_network
from libs.utils.coordinate_convert import backward_convert, get_horizen_minAreaRectangle
from dataloader.pretrained_weights.pretrain_zoo import PretrainModelZoo
os.environ["CUDA_VISIBLE_DEVICES"] = cfgs.GPU_GROUP
class TrainR3DetGWD(Train):
def get_gtboxes_and_label(self, gtboxes_and_label_h, gtboxes_and_label_r, num_objects):
return gtboxes_and_label_h[:int(num_objects), :].astype(np.float32), \
gtboxes_and_label_r[:int(num_objects), :].astype(np.float32)
def main(self):
with tf.Graph().as_default() as graph, tf.device('/cpu:0'):
num_gpu = len(cfgs.GPU_GROUP.strip().split(','))
global_step = slim.get_or_create_global_step()
lr = self.warmup_lr(cfgs.LR, global_step, cfgs.WARM_SETP, num_gpu)
tf.summary.scalar('lr', lr)
optimizer = tf.train.MomentumOptimizer(lr, momentum=cfgs.MOMENTUM)
r3det_gwd = build_whole_network.DetectionNetworkR3DetGWD(cfgs=self.cfgs,
is_training=True)
with tf.name_scope('get_batch'):
if cfgs.IMAGE_PYRAMID:
shortside_len_list = tf.constant(cfgs.IMG_SHORT_SIDE_LEN)
shortside_len = tf.random_shuffle(shortside_len_list)[0]
else:
shortside_len = cfgs.IMG_SHORT_SIDE_LEN
img_name_batch, img_batch, gtboxes_and_label_batch, num_objects_batch, img_h_batch, img_w_batch = \
self.reader.next_batch(dataset_name=cfgs.DATASET_NAME,
batch_size=cfgs.BATCH_SIZE * num_gpu,
shortside_len=shortside_len,
is_training=True)
# data processing
inputs_list = []
for i in range(num_gpu):
img = tf.expand_dims(img_batch[i], axis=0)
pretrain_zoo = PretrainModelZoo()
if self.cfgs.NET_NAME in pretrain_zoo.pth_zoo or self.cfgs.NET_NAME in pretrain_zoo.mxnet_zoo:
img = img / tf.constant([cfgs.PIXEL_STD])
gtboxes_and_label_r = tf.py_func(backward_convert,
inp=[gtboxes_and_label_batch[i]],
Tout=tf.float32)
gtboxes_and_label_r = tf.reshape(gtboxes_and_label_r, [-1, 6])
gtboxes_and_label_h = get_horizen_minAreaRectangle(gtboxes_and_label_batch[i])
gtboxes_and_label_h = tf.reshape(gtboxes_and_label_h, [-1, 5])
num_objects = num_objects_batch[i]
num_objects = tf.cast(tf.reshape(num_objects, [-1, ]), tf.float32)
img_h = img_h_batch[i]
img_w = img_w_batch[i]
inputs_list.append([img, gtboxes_and_label_h, gtboxes_and_label_r, num_objects, img_h, img_w])
tower_grads = []
biases_regularizer = tf.no_regularizer
weights_regularizer = tf.contrib.layers.l2_regularizer(cfgs.WEIGHT_DECAY)
with tf.variable_scope(tf.get_variable_scope()):
for i in range(num_gpu):
with tf.device('/gpu:%d' % i):
with tf.name_scope('tower_%d' % i):
with slim.arg_scope(
[slim.model_variable, slim.variable],
device='/device:CPU:0'):
with slim.arg_scope([slim.conv2d, slim.conv2d_in_plane,
slim.conv2d_transpose, slim.separable_conv2d,
slim.fully_connected],
weights_regularizer=weights_regularizer,
biases_regularizer=biases_regularizer,
biases_initializer=tf.constant_initializer(0.0)):
gtboxes_and_label_h, gtboxes_and_label_r = tf.py_func(self.get_gtboxes_and_label,
inp=[inputs_list[i][1],
inputs_list[i][2],
inputs_list[i][3]],
Tout=[tf.float32, tf.float32])
gtboxes_and_label_h = tf.reshape(gtboxes_and_label_h, [-1, 5])
gtboxes_and_label_r = tf.reshape(gtboxes_and_label_r, [-1, 6])
img = inputs_list[i][0]
img_shape = inputs_list[i][-2:]
img = tf.image.crop_to_bounding_box(image=img,
offset_height=0,
offset_width=0,
target_height=tf.cast(img_shape[0], tf.int32),
target_width=tf.cast(img_shape[1], tf.int32))
outputs = r3det_gwd.build_whole_detection_network(input_img_batch=img,
gtboxes_batch_h=gtboxes_and_label_h,
gtboxes_batch_r=gtboxes_and_label_r,
gpu_id=i)
gtboxes_in_img_h = self.drawer.draw_boxes_with_categories(img_batch=img,
boxes=gtboxes_and_label_h[
:, :-1],
labels=gtboxes_and_label_h[
:, -1],
method=0)
gtboxes_in_img_r = self.drawer.draw_boxes_with_categories(img_batch=img,
boxes=gtboxes_and_label_r[
:, :-1],
labels=gtboxes_and_label_r[
:, -1],
method=1)
tf.summary.image('Compare/gtboxes_h_gpu:%d' % i, gtboxes_in_img_h)
tf.summary.image('Compare/gtboxes_r_gpu:%d' % i, gtboxes_in_img_r)
if cfgs.ADD_BOX_IN_TENSORBOARD:
detections_in_img = self.drawer.draw_boxes_with_categories_and_scores(
img_batch=img,
boxes=outputs[0],
scores=outputs[1],
labels=outputs[2],
method=1)
tf.summary.image('Compare/final_detection_gpu:%d' % i, detections_in_img)
loss_dict = outputs[-1]
total_loss_dict, total_losses = self.loss_dict(loss_dict, num_gpu)
if i == num_gpu - 1:
regularization_losses = tf.get_collection(
tf.GraphKeys.REGULARIZATION_LOSSES)
# weight_decay_loss = tf.add_n(slim.losses.get_regularization_losses())
total_losses = total_losses + tf.add_n(regularization_losses)
tf.get_variable_scope().reuse_variables()
grads = optimizer.compute_gradients(total_losses)
if cfgs.GRADIENT_CLIPPING_BY_NORM is not None:
grads = slim.learning.clip_gradient_norms(grads, cfgs.GRADIENT_CLIPPING_BY_NORM)
tower_grads.append(grads)
self.log_printer(r3det_gwd, optimizer, global_step, tower_grads, total_loss_dict, num_gpu, graph)
if __name__ == '__main__':
trainer = TrainR3DetGWD(cfgs)
trainer.main() | [] | [] | [
"CUDA_VISIBLE_DEVICES"
] | [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
main.py | # Copyright 2021 TerminalWarlord under the terms of the MIT
# license found at https://github.com/TerminalWarlord/Subtitle-Downloader-Bot/blob/master/LICENSE
# Encoding = 'utf-8'
# Fork and Deploy, do not modify this repo and claim it yours
# For collaboration mail me at [email protected]
from pyrogram import Client, filters
from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton
import shutil
import requests
import os
import glob
from bs4 import BeautifulSoup as bs
import time
from datetime import timedelta
from dotenv import load_dotenv
import zipfile
load_dotenv()
bot_token = os.environ.get('BOT_TOKEN')
api = int(os.environ.get('API_KEY'))
hash = os.environ.get('API_HASH')
workers = int(os.environ.get('WORKERS'))
app = Client("JayBeeSubtitleDL", bot_token=bot_token, api_id=api, api_hash=hash, workers=workers)
cuttly = os.environ.get('CUTTLY_API')
timestarted = timedelta(seconds=int(time.time()))
@app.on_message(filters.command('start'))
def start(client,message):
kb = [[InlineKeyboardButton('Channel 🛡', url="https://t.me/Groupdcbots"),InlineKeyboardButton('Support Group 🔰', url="https://t.me/groupdc")]]
reply_markup = InlineKeyboardMarkup(kb)
app.send_message(chat_id=message.from_user.id, text=f"Hello there, I am a __**Subtitle Downloader Bot**__.\nGive me a Movie/Series name and I will fetch it from __**Subscene**__.\n\n"
"__**Developer :**__ __@selfiebd__\n"
"__**Language :**__ __Python__\n"
"__**Framework :**__ __🔥 Pyrogram__",
parse_mode='md',
reply_markup=reply_markup)
@app.on_message(filters.command('help'))
def help(client,message):
url = [[InlineKeyboardButton(f"PayPal Me ❤️", url=f"https://paypal.me/Balaselfie")],
[InlineKeyboardButton(f"Pay by UPI", url=f"9677804820@postbank")]]
reply_markup = InlineKeyboardMarkup(url)
message.reply_text(reply_to_message_id= message.message_id,text=f"Send me any Movie/Series name and I will -\n"
f"__ * Search for it on `Subscene.com`\n"
f" * Let you choose your preferable language.\n"
f" * Download the subtitle, unzip and upload in `.srt/.ass` format__", parse_mode='md', reply_markup=reply_markup)
@app.on_message(filters.command('uptime'))
def uptime(client, message):
timecheck = timedelta(seconds=int(time.time()))
uptime = timecheck - timestarted
app.send_message(chat_id=message.from_user.id, text=f"__**Uptime :**__ __{uptime}__",
parse_mode='md')
@app.on_message(filters.text)
def search(client, message):
query = message.text.replace(" ", "+")
data = {
'query' : query,
'l' : ''
}
res = requests.post('https://subscene.com/subtitles/searchbytitle', data=data)
soup = bs(res.text, 'html.parser')
results = soup.find('div', {'class': 'search-result'}).find_all('div', {'class': 'title'})
kb = []
i = 0
l = 0
for sub in results:
if l < 10:
sublink = sub.find('a').attrs['href'].split('/')[-1]
subtitlename = sub.find('a').text
if len(sublink)<64:
kb.append([InlineKeyboardButton(f"{subtitlename}", callback_data=f'LANG*{sublink}')])
i += 1
else:
pass
else:
pass
l += 1
if len(results) > i:
kb.append([InlineKeyboardButton(f"Next ⏭", callback_data=f'SRCNX*{i}*{query}')])
reply_markup = InlineKeyboardMarkup(kb)
app.send_message(chat_id=message.chat.id,
text=f"__Showing Result for **{query}**\n"
f"Choose your desired Movie/Series:__",
parse_mode='md',
reply_markup=reply_markup)
@app.on_callback_query(filters.regex('SRCNX'))
def searchnext(client, callback_query):
query = callback_query.data.split('*')[-1]
data = {
'query' : query,
'l' : ''
}
res = requests.post('https://subscene.com/subtitles/searchbytitle', data=data)
soup = bs(res.text, 'html.parser')
results = soup.find('div', {'class': 'search-result'}).find_all('div', {'class': 'title'})
kb = []
i = int(callback_query.data.split('*')[-2]) + 1
j = i - 1
k = i + 10
l = 0
for sub in results:
if l > j and l < k:
sublink = sub.find('a').attrs['href'].split('/')[-1]
subtitlename = sub.find('a').text
if len(sublink)<64:
kb.append([InlineKeyboardButton(f"{subtitlename}", callback_data=f'LANG*{sublink}')])
i += 1
else:
pass
else:
pass
l += 1
if len(results) > i:
kb.append([InlineKeyboardButton(f"Next ⏭", callback_data=f'SRCNX*{i}*{query}')])
kb.append([InlineKeyboardButton(f"Previous ⏮️", callback_data=f'SRCPR*{i}*{query}')])
reply_markup = InlineKeyboardMarkup(kb)
callback_query.edit_message_reply_markup(reply_markup=reply_markup)
@app.on_callback_query(filters.regex('SRCPR'))
def searchprev(client, callback_query):
query = callback_query.data.split('*')[-1]
data = {
'query' : query,
'l' : ''
}
res = requests.post('https://subscene.com/subtitles/searchbytitle', data=data)
soup = bs(res.text, 'html.parser')
results = soup.find('div', {'class': 'search-result'}).find_all('div', {'class': 'title'})
kb = []
i = int(callback_query.data.split('*')[-2])
j = i - 21
k = i - 10
l = 0
for sub in results:
if l > j and l < k:
sublink = sub.find('a').attrs['href'].split('/')[-1]
subtitlename = sub.find('a').text
if len(sublink)<64:
kb.append([InlineKeyboardButton(f"{subtitlename}", callback_data=f'LANG*{sublink}')])
i -= 1
else:
pass
else:
pass
l += 1
if j > 10:
kb.append([InlineKeyboardButton(f"Previous ⏮️", callback_data=f'SRCPR*{i}*{language}*{suburl}')])
if len(results) > i:
kb.append([InlineKeyboardButton(f"Next ⏭", callback_data=f'SRCNX*{i}*{query}')])
reply_markup = InlineKeyboardMarkup(kb)
callback_query.edit_message_reply_markup(reply_markup=reply_markup)
@app.on_callback_query(filters.regex('LANG'))
def chooselang(client, callback_query):
sublink = callback_query.data.split('*')[-1]
kb = [[InlineKeyboardButton("English 🇬🇧", callback_data=f'PREL*english*{sublink}')],
[InlineKeyboardButton("Bengali 🇧🇩", callback_data=f'PREL*bengali*{sublink}')],
[InlineKeyboardButton("Hindi 🇮🇳", callback_data=f'PRE*hindi*{sublink}')],
[InlineKeyboardButton("Indonesian 🇮🇩", callback_data=f'PREL*indonesian*{sublink}')]]
reply_markup = InlineKeyboardMarkup(kb)
app.edit_message_text(chat_id=callback_query.message.chat.id,
message_id=callback_query.message.message_id,
text=f"__Select a Subtitle Language__",
parse_mode='md',
reply_markup=reply_markup)
@app.on_callback_query(filters.regex('PREL'))
def langset(client, callback_query):
language = callback_query.data.split('*')[-2]
callback_query.answer(f"Preffered Language : {language.capitalize()}", show_alert=False)
suburl = callback_query.data.split('*')[-1]
url = f'https://subscene.com/subtitles/{suburl}/{language}'
r = requests.get(url)
soup = bs(r.text, 'html.parser')
allsubs = soup.find('tbody').find_all('tr')
kb = []
i = 0
for subs in allsubs:
try:
if i < 10:
subid = subs.find('td', {'class': 'a1'}).find('a').attrs['href'].split('/')[-1]
sublink = subs.find('td', {'class': 'a1'}).find('a').attrs['href'].split('/')[-3]
subname = subs.find('td', {'class': 'a1'}).find_all('span')[1].text.strip()
if len(sublink) < 64:
kb.append([InlineKeyboardButton(f"{subname}", callback_data=f'DTL*{language}*{sublink}*{subid}')])
i += 1
else:
pass
else:
break
except:
pass
if i > 10:
kb.append([InlineKeyboardButton(f"Next ⏭️", callback_data=f'NXT*{i}*{language}*{suburl}')])
try:
reply_markup = InlineKeyboardMarkup(kb)
app.edit_message_text(chat_id=callback_query.message.chat.id,
message_id=callback_query.message.message_id,
text=f"__Select a Subtitle__",
parse_mode='md',
reply_markup=reply_markup)
except:
app.edit_message_text(chat_id=callback_query.message.chat.id,
message_id=callback_query.message.message_id,
text=f"__Sorry no subtitle available for that specific language!\n"
f"Try another one!__",
parse_mode='md')
@app.on_callback_query(filters.regex('DTL'))
def subdetails(client, callback_query):
language = callback_query.data.split('*')[-3]
suburl = callback_query.data.split('*')[-2]
subid = callback_query.data.split('*')[-1]
kb = []
# getsub
url = f'https://subscene.com/subtitles/{suburl}/{language}/{subid}'
callback_query.answer(f"Getting sub from : {url}", show_alert=False)
r = requests.get(url)
soup = bs(r.text, 'html.parser')
poster = soup.find('div', {'class': 'poster'}).find('img').attrs['src'].replace('154-', '')
info = soup.find('div', {'id': 'details'}).find('ul').find_all('li')
dload = "https://subscene.com" + soup.find('a', {'id': 'downloadButton'}).attrs['href']
subdetails = []
for a in info:
try:
w = a.text.replace('-', '')
a = "".join(line.strip() for line in w.split("\n"))
subdetails.append(a)
except:
pass
subtext = "\n".join(subdetails)
#cuttly
data = requests.get(f"https://cutt.ly/api/api.php?key={cuttly}&short={dload}").json()["url"]
shortened_url = data["shortLink"]
kb = [[InlineKeyboardButton(f"Download", callback_data=f'DOWNLOAD*{shortened_url}')]]
reply_markup = InlineKeyboardMarkup(kb)
app.send_photo(caption=f'__{subtext}__',
photo=poster,
chat_id=callback_query.message.chat.id,
parse_mode='md',
reply_markup=reply_markup)
@app.on_callback_query(filters.regex('DOWNLOAD'))
def download(client, callback_query):
callback_query.answer(f"Downloading!!!", show_alert=False)
link = callback_query.data.split('*')[-1]
# unzip
url = requests.get(link).url
r = requests.head(url)
a = r.headers
filename = a['Content-Disposition'].split('=')[-1]
directory = a['Content-Disposition'].split('=')[-1].replace('.zip', '')
with open(filename, 'wb') as f:
im = requests.get(link)
f.write(im.content)
with zipfile.ZipFile(filename,"r") as zip_ref:
zip_ref.extractall(directory)
try:
a = glob.glob(f'./{directory}/*srt', recursive=True)
for file in a:
app.send_document(document=file,
chat_id=callback_query.message.chat.id,
parse_mode='md')
app.delete_messages(chat_id=callback_query.message.chat.id,
message_ids=callback_query.message.message_id)
except:
a = glob.glob(f'./{directory}/*', recursive=True)
for file in a:
app.send_document(document=file,
chat_id=callback_query.message.chat.id,
parse_mode='md')
app.delete_messages(chat_id=callback_query.message.chat.id,
message_ids=callback_query.message.message_id)
try:
os.remove(filename)
shutil.rmtree(directory)
except:
pass
@app.on_callback_query(filters.regex('NXT'))
def nextres(client, callback_query):
language = callback_query.data.split('*')[-2]
suburl = callback_query.data.split('*')[-1]
url = f'https://subscene.com/subtitles/{suburl}/{language}'
print(url)
r = requests.get(url)
soup = bs(r.text, 'html.parser')
allsubs = soup.find('tbody').find_all('tr')
kb = []
i = int(callback_query.data.split('*')[-3]) + 1
j = i - 1
k = i + 10
l = 0
for subs in allsubs:
try:
if l > j and l < k:
subid = subs.find('td', {'class': 'a1'}).find('a').attrs['href'].split('/')[-1]
sublink = subs.find('td', {'class': 'a1'}).find('a').attrs['href'].split('/')[-3]
subname = subs.find('td', {'class': 'a1'}).find_all('span')[1].text.strip()
if len(sublink) < 64:
kb.append([InlineKeyboardButton(f"{subname}", callback_data=f'DTL*{language}*{sublink}*{subid}')])
i += 1
else:
pass
else:
pass
l += 1
except:
pass
if len(allsubs) > i:
kb.append([InlineKeyboardButton(f"Next ⏭️", callback_data=f'NXT*{i}*{language}*{suburl}')])
kb.append([InlineKeyboardButton(f"Previous ⏮️", callback_data=f'PRV*{i}*{language}*{suburl}')])
reply_markup = InlineKeyboardMarkup(kb)
a = app.edit_message_text(chat_id=callback_query.message.chat.id,
message_id=callback_query.message.message_id,
text=f"__Select a Subtitle__",
parse_mode='md',
reply_markup=reply_markup)
@app.on_callback_query(filters.regex('PRV'))
def prevres(client, callback_query):
language = callback_query.data.split('*')[-2]
suburl = callback_query.data.split('*')[-1]
url = f'https://subscene.com/subtitles/{suburl}/{language}'
r = requests.get(url)
soup = bs(r.text, 'html.parser')
allsubs = soup.find('tbody').find_all('tr')
kb = []
i = int(callback_query.data.split('*')[-3])
j = i - 21
k = i - 10
l = 0
for subs in allsubs:
try:
if l > j and l < k:
subid = subs.find('td', {'class': 'a1'}).find('a').attrs['href'].split('/')[-1]
sublink = subs.find('td', {'class': 'a1'}).find('a').attrs['href'].split('/')[-3]
subname = subs.find('td', {'class': 'a1'}).find_all('span')[1].text.strip()
if len(sublink) < 64:
kb.append([InlineKeyboardButton(f"{subname}", callback_data=f'DTL*{language}*{sublink}*{subid}')])
i -= 1
else:
pass
else:
pass
l += 1
except:
pass
if j > 10:
kb.append([InlineKeyboardButton(f"Previous ⏮️", callback_data=f'PRV*{i}*{language}*{suburl}')])
if len(allsubs) > i:
kb.append([InlineKeyboardButton(f"Next ⏭️", callback_data=f'NXT*{i}*{language}*{suburl}')])
reply_markup = InlineKeyboardMarkup(kb)
app.edit_message_text(chat_id=callback_query.message.chat.id,
message_id=callback_query.message.message_id,
text=f"__Select a Subtitle__",
parse_mode='md',
reply_markup=reply_markup)
app.run()
| [] | [] | [
"API_KEY",
"BOT_TOKEN",
"CUTTLY_API",
"API_HASH",
"WORKERS"
] | [] | ["API_KEY", "BOT_TOKEN", "CUTTLY_API", "API_HASH", "WORKERS"] | python | 5 | 0 | |
tg/tl_contacts_resolve_username_gen.go | // Code generated by gotdgen, DO NOT EDIT.
package tg
import (
"context"
"errors"
"fmt"
"sort"
"strings"
"go.uber.org/multierr"
"github.com/nnqq/td/bin"
"github.com/nnqq/td/tdp"
"github.com/nnqq/td/tgerr"
)
// No-op definition for keeping imports.
var (
_ = bin.Buffer{}
_ = context.Background()
_ = fmt.Stringer(nil)
_ = strings.Builder{}
_ = errors.Is
_ = multierr.AppendInto
_ = sort.Ints
_ = tdp.Format
_ = tgerr.Error{}
)
// ContactsResolveUsernameRequest represents TL type `contacts.resolveUsername#f93ccba3`.
// Resolve a @username to get peer info
//
// See https://core.telegram.org/method/contacts.resolveUsername for reference.
type ContactsResolveUsernameRequest struct {
// @username to resolve
Username string
}
// ContactsResolveUsernameRequestTypeID is TL type id of ContactsResolveUsernameRequest.
const ContactsResolveUsernameRequestTypeID = 0xf93ccba3
// Ensuring interfaces in compile-time for ContactsResolveUsernameRequest.
var (
_ bin.Encoder = &ContactsResolveUsernameRequest{}
_ bin.Decoder = &ContactsResolveUsernameRequest{}
_ bin.BareEncoder = &ContactsResolveUsernameRequest{}
_ bin.BareDecoder = &ContactsResolveUsernameRequest{}
)
func (r *ContactsResolveUsernameRequest) Zero() bool {
if r == nil {
return true
}
if !(r.Username == "") {
return false
}
return true
}
// String implements fmt.Stringer.
func (r *ContactsResolveUsernameRequest) String() string {
if r == nil {
return "ContactsResolveUsernameRequest(nil)"
}
type Alias ContactsResolveUsernameRequest
return fmt.Sprintf("ContactsResolveUsernameRequest%+v", Alias(*r))
}
// FillFrom fills ContactsResolveUsernameRequest from given interface.
func (r *ContactsResolveUsernameRequest) FillFrom(from interface {
GetUsername() (value string)
}) {
r.Username = from.GetUsername()
}
// TypeID returns type id in TL schema.
//
// See https://core.telegram.org/mtproto/TL-tl#remarks.
func (*ContactsResolveUsernameRequest) TypeID() uint32 {
return ContactsResolveUsernameRequestTypeID
}
// TypeName returns name of type in TL schema.
func (*ContactsResolveUsernameRequest) TypeName() string {
return "contacts.resolveUsername"
}
// TypeInfo returns info about TL type.
func (r *ContactsResolveUsernameRequest) TypeInfo() tdp.Type {
typ := tdp.Type{
Name: "contacts.resolveUsername",
ID: ContactsResolveUsernameRequestTypeID,
}
if r == nil {
typ.Null = true
return typ
}
typ.Fields = []tdp.Field{
{
Name: "Username",
SchemaName: "username",
},
}
return typ
}
// Encode implements bin.Encoder.
func (r *ContactsResolveUsernameRequest) Encode(b *bin.Buffer) error {
if r == nil {
return fmt.Errorf("can't encode contacts.resolveUsername#f93ccba3 as nil")
}
b.PutID(ContactsResolveUsernameRequestTypeID)
return r.EncodeBare(b)
}
// EncodeBare implements bin.BareEncoder.
func (r *ContactsResolveUsernameRequest) EncodeBare(b *bin.Buffer) error {
if r == nil {
return fmt.Errorf("can't encode contacts.resolveUsername#f93ccba3 as nil")
}
b.PutString(r.Username)
return nil
}
// Decode implements bin.Decoder.
func (r *ContactsResolveUsernameRequest) Decode(b *bin.Buffer) error {
if r == nil {
return fmt.Errorf("can't decode contacts.resolveUsername#f93ccba3 to nil")
}
if err := b.ConsumeID(ContactsResolveUsernameRequestTypeID); err != nil {
return fmt.Errorf("unable to decode contacts.resolveUsername#f93ccba3: %w", err)
}
return r.DecodeBare(b)
}
// DecodeBare implements bin.BareDecoder.
func (r *ContactsResolveUsernameRequest) DecodeBare(b *bin.Buffer) error {
if r == nil {
return fmt.Errorf("can't decode contacts.resolveUsername#f93ccba3 to nil")
}
{
value, err := b.String()
if err != nil {
return fmt.Errorf("unable to decode contacts.resolveUsername#f93ccba3: field username: %w", err)
}
r.Username = value
}
return nil
}
// GetUsername returns value of Username field.
func (r *ContactsResolveUsernameRequest) GetUsername() (value string) {
return r.Username
}
// ContactsResolveUsername invokes method contacts.resolveUsername#f93ccba3 returning error if any.
// Resolve a @username to get peer info
//
// Possible errors:
// 401 AUTH_KEY_PERM_EMPTY: The temporary auth key must be binded to the permanent auth key to use these methods.
// 400 CONNECTION_DEVICE_MODEL_EMPTY: Device model empty
// 400 CONNECTION_LAYER_INVALID: Layer invalid
// 400 USERNAME_INVALID: The provided username is not valid
// 400 USERNAME_NOT_OCCUPIED: The provided username is not occupied
//
// See https://core.telegram.org/method/contacts.resolveUsername for reference.
// Can be used by bots.
func (c *Client) ContactsResolveUsername(ctx context.Context, username string) (*ContactsResolvedPeer, error) {
var result ContactsResolvedPeer
request := &ContactsResolveUsernameRequest{
Username: username,
}
if err := c.rpc.Invoke(ctx, request, &result); err != nil {
return nil, err
}
return &result, nil
}
| [] | [] | [] | [] | [] | go | null | null | null |
examples/classifications/run_user_task.py | import os
from PyCLUE.tasks.run_classifier import user_tasks, configs
# assign GPU devices or CPU devices
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
# default configs: see PyCLUE.utils.classifier_utils.core
# below are some necessary paramters required in running this task
# task_name: default is "user_defined_task"
configs["task_name"] = ""
# pretrained_lm_name:
# If None, should assign `vocab_file`, `bert_config_file`, `init_checkpoint`.
# Or you can choose the following models:
# bert, bert_wwm_ext, albert_xlarge, albert_large, albert_base, albert_base_ext,
# albert_small, albert_tiny, roberta, roberta_wwm_ext, roberta_wwm_ext_large
configs["pretrained_lm_name"] = None
# actions
configs["do_train"] = True
configs["do_eval"] = True
configs["do_predict"] = True
# data_dir: your own data path
# If `do_train` = True, should contain at least train.txt
# If `do_eval` = True, should contain at least dev.txt
# If `do_predict` = True, should contain at least test.txt
configs["data_dir"] = ""
# data configs:
# below are some examples
configs["labels"] = ["0", "1"]
# label_position, text_a_position , text_b_position & delimiter:
# examples_1:
# 0_!_我想要回家_!_我准备回家
# 1_!_我想要回家_!_我准备吃饭
# >> label_position = 0, text_a_position = 1, text_b_position = 2, delimiter = "_!_"
# examples_2:
# 0_!_我很生气
# 1_!_我很开心
# >> label_position = 0, text_a_position = 1, text_b_position = None, delimiter = "_!_"
configs["label_position"] = 0
configs["text_a_position"] = 1
configs["text_b_position"] = 2
configs["delimiter"] = "_!_"
# ignore_header:
# If to drop the first line of each file.
configs["ignore_header"] = True
# min_seq_length:
# If to drop sequence that has length less than `min_seq_length`
configs["min_seq_length"] = 3
# file_type:
# train, dev, test file type, can be "txt" or "tsv"
configs["file_type"] = "txt"
# output_dir: save trained model, evaluation results and tf_records data
configs["output_dir"] = ""
# your pretrained language model components
# If `pretrained_lm_name` is not None, these components will auto installed.
configs["vocab_file"] = "vocab.txt"
configs["bert_config_file"] = "XXX_config.json"
configs["init_checkpoint"] = "XXX_model.ckpt"
configs["max_seq_length"] = 128
configs["train_batch_size"] = 32
configs["learning_rate"] = 2e-5
configs["warmup_proportion"] = 0.1
configs["num_train_epochs"] = 3.0
if __name__ == "__main__":
user_tasks(configs)
| [] | [] | [
"CUDA_VISIBLE_DEVICES"
] | [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
pkg/issuer/acme/dns/route53/route53.go | // Package route53 implements a DNS provider for solving the DNS-01 challenge
// using AWS Route 53 DNS.
package route53
import (
"fmt"
"math/rand"
"os"
"strings"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/client"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/route53"
"github.com/jetstack/cert-manager/pkg/issuer/acme/dns/util"
)
const (
maxRetries = 5
route53TTL = 10
)
// DNSProvider implements the util.ChallengeProvider interface
type DNSProvider struct {
client *route53.Route53
hostedZoneID string
}
// customRetryer implements the client.Retryer interface by composing the
// DefaultRetryer. It controls the logic for retrying recoverable request
// errors (e.g. when rate limits are exceeded).
type customRetryer struct {
client.DefaultRetryer
}
// RetryRules overwrites the DefaultRetryer's method.
// It uses a basic exponential backoff algorithm that returns an initial
// delay of ~400ms with an upper limit of ~30 seconds which should prevent
// causing a high number of consecutive throttling errors.
// For reference: Route 53 enforces an account-wide(!) 5req/s query limit.
func (d customRetryer) RetryRules(r *request.Request) time.Duration {
retryCount := r.RetryCount
if retryCount > 7 {
retryCount = 7
}
delay := (1 << uint(retryCount)) * (rand.Intn(50) + 200)
return time.Duration(delay) * time.Millisecond
}
// NewDNSProvider returns a DNSProvider instance configured for the AWS
// Route 53 service.
//
// AWS Credentials are automatically detected in the following locations
// and prioritized in the following order:
// 1. Environment variables: AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY,
// AWS_REGION, [AWS_SESSION_TOKEN]
// 2. Shared credentials file (defaults to ~/.aws/credentials)
// 3. Amazon EC2 IAM role
//
// If AWS_HOSTED_ZONE_ID is not set, Lego tries to determine the correct
// public hosted zone via the FQDN.
//
// See also: https://github.com/aws/aws-sdk-go/wiki/configuring-sdk
func NewDNSProvider() (*DNSProvider, error) {
hostedZoneID := os.Getenv("AWS_HOSTED_ZONE_ID")
r := customRetryer{}
r.NumMaxRetries = maxRetries
config := request.WithRetryer(aws.NewConfig(), r)
client := route53.New(session.New(config))
return &DNSProvider{
client: client,
hostedZoneID: hostedZoneID,
}, nil
}
// NewDNSProviderAccessKey returns a DNSProvider instance configured for the AWS
// Route 53 service using static credentials from its parameters
func NewDNSProviderAccessKey(accessKeyID, secretAccessKey, hostedZoneID, region string) (*DNSProvider, error) {
creds := credentials.NewStaticCredentials(accessKeyID, secretAccessKey, "")
r := customRetryer{}
r.NumMaxRetries = maxRetries
config := request.WithRetryer(aws.NewConfig(), r).WithCredentials(creds)
if region != "" {
config.WithRegion(region)
}
client := route53.New(session.New(config))
return &DNSProvider{
client: client,
hostedZoneID: hostedZoneID,
}, nil
}
// Timeout returns the timeout and interval to use when checking for DNS
// propagation. Adjusting here to cope with spikes in propagation times.
func (c *DNSProvider) Timeout() (timeout, interval time.Duration) {
return 120 * time.Second, 2 * time.Second
}
// Present creates a TXT record using the specified parameters
func (r *DNSProvider) Present(domain, token, keyAuth string) error {
fqdn, value, _ := util.DNS01Record(domain, keyAuth)
value = `"` + value + `"`
return r.changeRecord("UPSERT", fqdn, value, route53TTL)
}
// CleanUp removes the TXT record matching the specified parameters
func (r *DNSProvider) CleanUp(domain, token, keyAuth string) error {
fqdn, value, _ := util.DNS01Record(domain, keyAuth)
value = `"` + value + `"`
return r.changeRecord("DELETE", fqdn, value, route53TTL)
}
func (r *DNSProvider) changeRecord(action, fqdn, value string, ttl int) error {
hostedZoneID, err := r.getHostedZoneID(fqdn)
if err != nil {
return fmt.Errorf("Failed to determine Route 53 hosted zone ID: %v", err)
}
recordSet := newTXTRecordSet(fqdn, value, ttl)
reqParams := &route53.ChangeResourceRecordSetsInput{
HostedZoneId: aws.String(hostedZoneID),
ChangeBatch: &route53.ChangeBatch{
Comment: aws.String("Managed by Lego"),
Changes: []*route53.Change{
{
Action: aws.String(action),
ResourceRecordSet: recordSet,
},
},
},
}
resp, err := r.client.ChangeResourceRecordSets(reqParams)
if err != nil {
return fmt.Errorf("Failed to change Route 53 record set: %v", err)
}
statusID := resp.ChangeInfo.Id
return util.WaitFor(120*time.Second, 4*time.Second, func() (bool, error) {
reqParams := &route53.GetChangeInput{
Id: statusID,
}
resp, err := r.client.GetChange(reqParams)
if err != nil {
return false, fmt.Errorf("Failed to query Route 53 change status: %v", err)
}
if *resp.ChangeInfo.Status == route53.ChangeStatusInsync {
return true, nil
}
return false, nil
})
}
func (r *DNSProvider) getHostedZoneID(fqdn string) (string, error) {
if r.hostedZoneID != "" {
return r.hostedZoneID, nil
}
authZone, err := util.FindZoneByFqdn(fqdn, util.RecursiveNameservers)
if err != nil {
return "", err
}
// .DNSName should not have a trailing dot
reqParams := &route53.ListHostedZonesByNameInput{
DNSName: aws.String(util.UnFqdn(authZone)),
}
resp, err := r.client.ListHostedZonesByName(reqParams)
if err != nil {
return "", err
}
var hostedZoneID string
for _, hostedZone := range resp.HostedZones {
// .Name has a trailing dot
if !*hostedZone.Config.PrivateZone && *hostedZone.Name == authZone {
hostedZoneID = *hostedZone.Id
break
}
}
if len(hostedZoneID) == 0 {
return "", fmt.Errorf("Zone %s not found in Route 53 for domain %s", authZone, fqdn)
}
if strings.HasPrefix(hostedZoneID, "/hostedzone/") {
hostedZoneID = strings.TrimPrefix(hostedZoneID, "/hostedzone/")
}
return hostedZoneID, nil
}
func newTXTRecordSet(fqdn, value string, ttl int) *route53.ResourceRecordSet {
return &route53.ResourceRecordSet{
Name: aws.String(fqdn),
Type: aws.String("TXT"),
TTL: aws.Int64(int64(ttl)),
ResourceRecords: []*route53.ResourceRecord{
{Value: aws.String(value)},
},
}
}
| [
"\"AWS_HOSTED_ZONE_ID\""
] | [] | [
"AWS_HOSTED_ZONE_ID"
] | [] | ["AWS_HOSTED_ZONE_ID"] | go | 1 | 0 | |
tests/library/test_ceph_volume.py | import sys
sys.path.append('./library')
import ceph_volume
import mock
import os
# Python 3
try:
from unittest.mock import MagicMock
except ImportError:
# Python 2
try:
from mock import MagicMock
except ImportError:
print('You need the mock library installed on python2.x to run tests')
@mock.patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': 'docker'})
class TestCephVolumeModule(object):
def test_data_no_vg(self):
result = ceph_volume.get_data("/dev/sda", None)
assert result == "/dev/sda"
def test_data_with_vg(self):
result = ceph_volume.get_data("data-lv", "data-vg")
assert result == "data-vg/data-lv"
def test_journal_no_vg(self):
result = ceph_volume.get_journal("/dev/sda1", None)
assert result == "/dev/sda1"
def test_journal_with_vg(self):
result = ceph_volume.get_journal("journal-lv", "journal-vg")
assert result == "journal-vg/journal-lv"
def test_db_no_vg(self):
result = ceph_volume.get_db("/dev/sda1", None)
assert result == "/dev/sda1"
def test_db_with_vg(self):
result = ceph_volume.get_db("db-lv", "db-vg")
assert result == "db-vg/db-lv"
def test_wal_no_vg(self):
result = ceph_volume.get_wal("/dev/sda1", None)
assert result == "/dev/sda1"
def test_wal_with_vg(self):
result = ceph_volume.get_wal("wal-lv", "wal-vg")
assert result == "wal-vg/wal-lv"
def test_container_exec(self):
fake_binary = "ceph-volume"
fake_container_image = "docker.io/ceph/daemon:latest-luminous"
expected_command_list = ['docker', 'run', '--rm', '--privileged', '--net=host', '--ipc=host', # noqa E501
'--ulimit', 'nofile=1024:4096',
'-v', '/run/lock/lvm:/run/lock/lvm:z',
'-v', '/var/run/udev/:/var/run/udev/:z',
'-v', '/dev:/dev', '-v', '/etc/ceph:/etc/ceph:z', # noqa E501
'-v', '/run/lvm/:/run/lvm/', # noqa E501
'-v', '/var/lib/ceph/:/var/lib/ceph/:z',
'-v', '/var/log/ceph/:/var/log/ceph/:z',
'--entrypoint=ceph-volume',
'docker.io/ceph/daemon:latest-luminous']
result = ceph_volume.container_exec(fake_binary, fake_container_image)
assert result == expected_command_list
def test_zap_osd_container(self):
fake_module = MagicMock()
fake_module.params = {'data': '/dev/sda'}
fake_container_image = "docker.io/ceph/daemon:latest-luminous"
expected_command_list = ['docker', 'run', '--rm', '--privileged', '--net=host', '--ipc=host', # noqa E501
'--ulimit', 'nofile=1024:4096',
'-v', '/run/lock/lvm:/run/lock/lvm:z',
'-v', '/var/run/udev/:/var/run/udev/:z',
'-v', '/dev:/dev', '-v', '/etc/ceph:/etc/ceph:z', # noqa E501
'-v', '/run/lvm/:/run/lvm/', # noqa E501
'-v', '/var/lib/ceph/:/var/lib/ceph/:z',
'-v', '/var/log/ceph/:/var/log/ceph/:z',
'--entrypoint=ceph-volume',
'docker.io/ceph/daemon:latest-luminous',
'lvm',
'zap',
'--destroy',
'/dev/sda']
result = ceph_volume.zap_devices(fake_module, fake_container_image)
assert result == expected_command_list
def test_zap_osd(self):
fake_module = MagicMock()
fake_module.params = {'data': '/dev/sda'}
fake_container_image = None
expected_command_list = ['ceph-volume',
'lvm',
'zap',
'--destroy',
'/dev/sda']
result = ceph_volume.zap_devices(fake_module, fake_container_image)
assert result == expected_command_list
def test_zap_osd_fsid(self):
fake_module = MagicMock()
fake_module.params = {'osd_fsid': 'a_uuid'}
fake_container_image = None
expected_command_list = ['ceph-volume',
'lvm',
'zap',
'--destroy',
'--osd-fsid',
'a_uuid']
result = ceph_volume.zap_devices(fake_module, fake_container_image)
assert result == expected_command_list
def test_activate_osd(self):
expected_command_list = ['ceph-volume',
'lvm',
'activate',
'--all']
result = ceph_volume.activate_osd()
assert result == expected_command_list
def test_list_osd(self):
fake_module = MagicMock()
fake_module.params = {'cluster': 'ceph', 'data': '/dev/sda'}
fake_container_image = None
expected_command_list = ['ceph-volume',
'--cluster',
'ceph',
'lvm',
'list',
'/dev/sda',
'--format=json',
]
result = ceph_volume.list_osd(fake_module, fake_container_image)
assert result == expected_command_list
def test_list_osd_container(self):
fake_module = MagicMock()
fake_module.params = {'cluster': 'ceph', 'data': '/dev/sda'}
fake_container_image = "docker.io/ceph/daemon:latest-luminous"
expected_command_list = ['docker', 'run', '--rm', '--privileged', '--net=host', '--ipc=host', # noqa E501
'--ulimit', 'nofile=1024:4096',
'-v', '/run/lock/lvm:/run/lock/lvm:z',
'-v', '/var/run/udev/:/var/run/udev/:z',
'-v', '/dev:/dev', '-v', '/etc/ceph:/etc/ceph:z', # noqa E501
'-v', '/run/lvm/:/run/lvm/', # noqa E501
'-v', '/var/lib/ceph/:/var/lib/ceph/:z',
'-v', '/var/log/ceph/:/var/log/ceph/:z',
'--entrypoint=ceph-volume',
'docker.io/ceph/daemon:latest-luminous',
'--cluster',
'ceph',
'lvm',
'list',
'/dev/sda',
'--format=json',
]
result = ceph_volume.list_osd(fake_module, fake_container_image)
assert result == expected_command_list
def test_list_storage_inventory(self):
fake_module = MagicMock()
fake_container_image = None
expected_command_list = ['ceph-volume',
'inventory',
'--format=json',
]
result = ceph_volume.list_storage_inventory(fake_module, fake_container_image)
assert result == expected_command_list
def test_list_storage_inventory_container(self):
fake_module = MagicMock()
fake_container_image = "docker.io/ceph/daemon:latest-luminous"
expected_command_list = ['docker', 'run', '--rm', '--privileged', '--net=host', '--ipc=host', # noqa E501
'--ulimit', 'nofile=1024:4096',
'-v', '/run/lock/lvm:/run/lock/lvm:z',
'-v', '/var/run/udev/:/var/run/udev/:z',
'-v', '/dev:/dev', '-v', '/etc/ceph:/etc/ceph:z', # noqa E501
'-v', '/run/lvm/:/run/lvm/', # noqa E501
'-v', '/var/lib/ceph/:/var/lib/ceph/:z',
'-v', '/var/log/ceph/:/var/log/ceph/:z',
'--entrypoint=ceph-volume',
'docker.io/ceph/daemon:latest-luminous',
'inventory',
'--format=json',
]
result = ceph_volume.list_storage_inventory(fake_module, fake_container_image)
assert result == expected_command_list
def test_create_osd_container(self):
fake_module = MagicMock()
fake_module.params = {'data': '/dev/sda',
'objectstore': 'filestore',
'cluster': 'ceph', }
fake_action = "create"
fake_container_image = "docker.io/ceph/daemon:latest-luminous"
expected_command_list = ['docker', 'run', '--rm', '--privileged', '--net=host', '--ipc=host', # noqa E501
'--ulimit', 'nofile=1024:4096',
'-v', '/run/lock/lvm:/run/lock/lvm:z',
'-v', '/var/run/udev/:/var/run/udev/:z',
'-v', '/dev:/dev', '-v', '/etc/ceph:/etc/ceph:z', # noqa E501
'-v', '/run/lvm/:/run/lvm/', # noqa E501
'-v', '/var/lib/ceph/:/var/lib/ceph/:z',
'-v', '/var/log/ceph/:/var/log/ceph/:z',
'--entrypoint=ceph-volume',
'docker.io/ceph/daemon:latest-luminous',
'--cluster',
'ceph',
'lvm',
'create',
'--filestore',
'--data',
'/dev/sda']
result = ceph_volume.prepare_or_create_osd(
fake_module, fake_action, fake_container_image)
assert result == expected_command_list
def test_create_osd(self):
fake_module = MagicMock()
fake_module.params = {'data': '/dev/sda',
'objectstore': 'filestore',
'cluster': 'ceph', }
fake_container_image = None
fake_action = "create"
expected_command_list = ['ceph-volume',
'--cluster',
'ceph',
'lvm',
'create',
'--filestore',
'--data',
'/dev/sda']
result = ceph_volume.prepare_or_create_osd(
fake_module, fake_action, fake_container_image)
assert result == expected_command_list
def test_prepare_osd_container(self):
fake_module = MagicMock()
fake_module.params = {'data': '/dev/sda',
'objectstore': 'filestore',
'cluster': 'ceph', }
fake_action = "prepare"
fake_container_image = "docker.io/ceph/daemon:latest-luminous"
expected_command_list = ['docker', 'run', '--rm', '--privileged', '--net=host', '--ipc=host', # noqa E501
'--ulimit', 'nofile=1024:4096',
'-v', '/run/lock/lvm:/run/lock/lvm:z',
'-v', '/var/run/udev/:/var/run/udev/:z',
'-v', '/dev:/dev', '-v', '/etc/ceph:/etc/ceph:z', # noqa E501
'-v', '/run/lvm/:/run/lvm/', # noqa E501
'-v', '/var/lib/ceph/:/var/lib/ceph/:z',
'-v', '/var/log/ceph/:/var/log/ceph/:z',
'--entrypoint=ceph-volume',
'docker.io/ceph/daemon:latest-luminous',
'--cluster',
'ceph',
'lvm',
'prepare',
'--filestore',
'--data',
'/dev/sda']
result = ceph_volume.prepare_or_create_osd(
fake_module, fake_action, fake_container_image)
assert result == expected_command_list
def test_prepare_osd(self):
fake_module = MagicMock()
fake_module.params = {'data': '/dev/sda',
'objectstore': 'filestore',
'cluster': 'ceph', }
fake_container_image = None
fake_action = "prepare"
expected_command_list = ['ceph-volume',
'--cluster',
'ceph',
'lvm',
'prepare',
'--filestore',
'--data',
'/dev/sda']
result = ceph_volume.prepare_or_create_osd(
fake_module, fake_action, fake_container_image)
assert result == expected_command_list
def test_batch_osd_container(self):
fake_module = MagicMock()
fake_module.params = {'data': '/dev/sda',
'objectstore': 'filestore',
'journal_size': '100',
'cluster': 'ceph',
'batch_devices': ["/dev/sda", "/dev/sdb"]}
fake_container_image = "docker.io/ceph/daemon:latest-luminous"
expected_command_list = ['docker', 'run', '--rm', '--privileged', '--net=host', '--ipc=host', # noqa E501
'--ulimit', 'nofile=1024:4096',
'-v', '/run/lock/lvm:/run/lock/lvm:z',
'-v', '/var/run/udev/:/var/run/udev/:z',
'-v', '/dev:/dev', '-v', '/etc/ceph:/etc/ceph:z', # noqa E501
'-v', '/run/lvm/:/run/lvm/', # noqa E501
'-v', '/var/lib/ceph/:/var/lib/ceph/:z',
'-v', '/var/log/ceph/:/var/log/ceph/:z',
'--entrypoint=ceph-volume',
'docker.io/ceph/daemon:latest-luminous',
'--cluster',
'ceph',
'lvm',
'batch',
'--filestore',
'--yes',
'--prepare',
'--journal-size',
'100',
'/dev/sda',
'/dev/sdb']
result = ceph_volume.batch(
fake_module, fake_container_image)
assert result == expected_command_list
def test_batch_osd(self):
fake_module = MagicMock()
fake_module.params = {'data': '/dev/sda',
'objectstore': 'filestore',
'journal_size': '100',
'cluster': 'ceph',
'batch_devices': ["/dev/sda", "/dev/sdb"]}
fake_container_image = None
expected_command_list = ['ceph-volume',
'--cluster',
'ceph',
'lvm',
'batch',
'--filestore',
'--yes',
'--journal-size',
'100',
'/dev/sda',
'/dev/sdb']
result = ceph_volume.batch(
fake_module, fake_container_image)
assert result == expected_command_list
def test_batch_filestore_with_dedicated_journal(self):
fake_module = MagicMock()
fake_module.params = {'objectstore': 'filestore',
'journal_size': '100',
'cluster': 'ceph',
'batch_devices': ["/dev/sda", "/dev/sdb"],
'journal_devices': ["/dev/sdc"]}
fake_container_image = None
expected_command_list = ['ceph-volume',
'--cluster',
'ceph',
'lvm',
'batch',
'--filestore',
'--yes',
'--journal-size',
'100',
'/dev/sda',
'/dev/sdb',
'--journal-devices',
'/dev/sdc']
result = ceph_volume.batch(
fake_module, fake_container_image)
assert result == expected_command_list
| [] | [] | [] | [] | [] | python | 0 | 0 | |
contrib/spendfrom/spendfrom.py | #!/usr/bin/env python
#
# Use the raw transactions API to spend UPCs received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a ultrapaycoind or ultrapaycoin-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the ultrapaycoin data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/UltraPayCoin/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "UltraPayCoin")
return os.path.expanduser("~/.ultrapaycoin")
def read_bitcoin_config(dbdir):
"""Read the ultrapaycoin.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "ultrapaycoin.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a ultrapaycoin JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 39799 if testnet else 13334
connect = "http://%s:%[email protected]:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the ultrapaycoind we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(ultrapaycoind):
info = ultrapaycoind.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
ultrapaycoind.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = ultrapaycoind.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(ultrapaycoind):
address_summary = dict()
address_to_account = dict()
for info in ultrapaycoind.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = ultrapaycoind.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = ultrapaycoind.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-ultrapaycoin-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(ultrapaycoind, fromaddresses, toaddress, amount, fee):
all_coins = list_available(ultrapaycoind)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to ultrapaycoind.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = ultrapaycoind.createrawtransaction(inputs, outputs)
signed_rawtx = ultrapaycoind.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(ultrapaycoind, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = ultrapaycoind.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(ultrapaycoind, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = ultrapaycoind.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(ultrapaycoind, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get UPCs from")
parser.add_option("--to", dest="to", default=None,
help="address to get send UPCs to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of ultrapaycoin.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_bitcoin_config(options.datadir)
if options.testnet: config['testnet'] = True
ultrapaycoind = connect_JSON(config)
if options.amount is None:
address_summary = list_available(ultrapaycoind)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(ultrapaycoind) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(ultrapaycoind, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(ultrapaycoind, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = ultrapaycoind.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
| [] | [] | [
"APPDATA"
] | [] | ["APPDATA"] | python | 1 | 0 | |
src/golang.org/x/crypto/ssh/agent/example_test.go | // Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package agent_test
import (
"log"
"net"
"os"
"golang.org/x/crypto/ssh"
"golang.org/x/crypto/ssh/agent"
)
func ExampleClientAgent() {
// ssh-agent has a UNIX socket under $SSH_AUTH_SOCK
socket := os.Getenv("SSH_AUTH_SOCK")
conn, err := net.Dial("unix", socket)
if err != nil {
log.Fatalf("net.Dial: %v", err)
}
agentClient := agent.NewClient(conn)
config := &ssh.ClientConfig{
User: "username",
Auth: []ssh.AuthMethod{
// Use a callback rather than PublicKeys
// so we only consult the agent once the remote server
// wants it.
ssh.PublicKeysCallback(agentClient.Signers),
},
}
sshc, err := ssh.Dial("tcp", "localhost:22", config)
if err != nil {
log.Fatalf("Dial: %v", err)
}
// .. use sshc
sshc.Close()
}
| [
"\"SSH_AUTH_SOCK\""
] | [] | [
"SSH_AUTH_SOCK"
] | [] | ["SSH_AUTH_SOCK"] | go | 1 | 0 | |
pkg/controller/acicontainersoperator.go | // Copyright 2020 Cisco Systems, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package controller
import (
"context"
"encoding/base64"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"os/exec"
"reflect"
"sync"
"time"
accprovisioninput "github.com/noironetworks/aci-containers/pkg/accprovisioninput/apis/aci.ctrl/v1"
accprovisioninputclientset "github.com/noironetworks/aci-containers/pkg/accprovisioninput/clientset/versioned"
operators "github.com/noironetworks/aci-containers/pkg/acicontainersoperator/apis/aci.ctrl/v1"
operatorclientset "github.com/noironetworks/aci-containers/pkg/acicontainersoperator/clientset/versioned"
configv1 "github.com/openshift/api/config/v1"
operatorv1 "github.com/openshift/api/operator/v1"
routesv1 "github.com/openshift/api/route/v1"
routesClientset "github.com/openshift/client-go/route/clientset/versioned"
log "github.com/sirupsen/logrus"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/kubernetes"
restclient "k8s.io/client-go/rest"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/config"
)
// AciResources is a struct for handeling the resources of aci fabric
type AciResources struct {
Deployment *appsv1.Deployment
HostDaemonset *appsv1.DaemonSet
OvsDaemonset *appsv1.DaemonSet
}
// Controller here defines the Operator code handler which list watch the AciContainerOperator
// Object and apply the aci_deployment.yaml in the cluster after creation/updation
type Controller struct {
Logger *log.Entry
indexMutex sync.Mutex
Operator_Clientset operatorclientset.Interface
AccProvisionInput_Clientset accprovisioninputclientset.Interface
K8s_Clientset kubernetes.Interface
Operator_Queue workqueue.RateLimitingInterface
Deployment_Queue workqueue.RateLimitingInterface
Daemonset_Queue workqueue.RateLimitingInterface
Node_Queue workqueue.RateLimitingInterface
Route_Queue workqueue.RateLimitingInterface
Config_Map_Queue workqueue.RateLimitingInterface
Informer_Operator cache.SharedIndexInformer
Informer_Deployment cache.SharedIndexInformer
Informer_Daemonset cache.SharedIndexInformer
Informer_Node cache.SharedIndexInformer
Informer_Route cache.SharedIndexInformer
Informer_Config cache.SharedIndexInformer
Resources AciResources
DnsOperatorClient client.Client // This client is specific dnsopenshift operator
RoutesClient routesClientset.Interface // This client is specific routes openshift operator
Openshiftflavor bool
routes map[string]bool // local cache to check the routes
}
var Version = map[string]bool{
"openshift-4.3": true,
"cloud": true,
"openshift-4.4-esx": true,
"openshift-4.4-openstack": true,
"openshift-4.5-openstack": true,
"openshift-4.6-openstack": true,
"openshift-4.6-baremetal": true,
"openshift-4.7-baremetal": true,
"openshift-4.8-baremetal": true,
"openshift-4.7-openstack": true,
"openshift-4.5-esx": true,
"openshift-4.6-esx": true,
"openshift-4.7-esx": true,
"openshift-4.8-esx": true,
}
var Dnsoper = map[string]bool{
"openshift-4.3": true,
}
const aciContainersController = "aci-containers-controller"
const aciContainersHostDaemonset = "aci-containers-host"
const aciContainersOvsDaemonset = "aci-containers-openvswitch"
var Aci_operator_config_path = "/usr/local/etc/aci-containers/aci-operator.conf"
var Acc_provision_config_path = "/usr/local/etc/acc-provision/acc-provision-operator.conf"
func NewAciContainersOperator(
acicnioperatorclient operatorclientset.Interface,
accprovisioninputclient accprovisioninputclientset.Interface,
k8sclient kubernetes.Interface) *Controller {
log.Info("Setting up the Queue")
operator_queue := workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter())
deployment_queue := workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter())
daemonset_queue := workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter())
node_queue := workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter())
route_queue := workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter())
config_map_queue := workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter())
log.Info("Intializing Informer")
aci_operator_informer := cache.NewSharedIndexInformer(
&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
return acicnioperatorclient.AciV1().AciContainersOperators(os.Getenv("SYSTEM_NAMESPACE")).List(context.TODO(), options)
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
return acicnioperatorclient.AciV1().AciContainersOperators(os.Getenv("SYSTEM_NAMESPACE")).Watch(context.TODO(), options)
},
},
&operators.AciContainersOperator{},
0,
cache.Indexers{},
)
aci_deployment_informer := cache.NewSharedIndexInformer(
&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
return k8sclient.AppsV1().Deployments(os.Getenv("SYSTEM_NAMESPACE")).List(context.TODO(), options)
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
return k8sclient.AppsV1().Deployments(os.Getenv("SYSTEM_NAMESPACE")).Watch(context.TODO(), options)
},
},
&appsv1.Deployment{},
0,
cache.Indexers{},
)
aci_daemonset_informer := cache.NewSharedIndexInformer(
&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
return k8sclient.AppsV1().DaemonSets(os.Getenv("SYSTEM_NAMESPACE")).List(context.TODO(), options)
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
return k8sclient.AppsV1().DaemonSets(os.Getenv("SYSTEM_NAMESPACE")).Watch(context.TODO(), options)
},
},
&appsv1.DaemonSet{},
0,
cache.Indexers{},
)
node_informer := cache.NewSharedIndexInformer(
&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
return k8sclient.CoreV1().Nodes().List(context.TODO(), options)
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
return k8sclient.CoreV1().Nodes().Watch(context.TODO(), options)
},
},
&v1.Node{},
0,
cache.Indexers{},
)
config_map_informer := cache.NewSharedIndexInformer(
&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
return k8sclient.CoreV1().ConfigMaps(os.Getenv("SYSTEM_NAMESPACE")).List(context.TODO(), options)
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
return k8sclient.CoreV1().ConfigMaps(os.Getenv("SYSTEM_NAMESPACE")).Watch(context.TODO(), options)
},
},
&v1.ConfigMap{},
0,
cache.Indexers{},
)
var routesClient routesClientset.Interface
var route_informer cache.SharedIndexInformer
flavor := os.Getenv("ACC_PROVISION_FLAVOR")
opflavor := false
// intializes route watchers for Openshift flavor
if Dnsoper[flavor] {
restconfig, err := restclient.InClusterConfig()
if err != nil {
log.Error("Failed to intialize the restConfig: ", err)
} else {
routesClient, err = routesClientset.NewForConfig(restconfig)
if err != nil {
log.Error("Failed to intialize OpenshiftRoute client: ", err)
} else {
opflavor = true
log.Info("Intializing the route informer")
route_informer = cache.NewSharedIndexInformer(
&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
return routesClient.RouteV1().Routes(metav1.NamespaceAll).List(context.TODO(), options)
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
return routesClient.RouteV1().Routes(metav1.NamespaceAll).Watch(context.TODO(), options)
},
},
&routesv1.Route{},
time.Duration(5)*time.Minute,
cache.Indexers{},
)
}
}
}
controller := &Controller{
Logger: log.NewEntry(log.New()),
Operator_Clientset: acicnioperatorclient,
AccProvisionInput_Clientset: accprovisioninputclient,
K8s_Clientset: k8sclient,
Informer_Operator: aci_operator_informer,
Informer_Deployment: aci_deployment_informer,
Informer_Daemonset: aci_daemonset_informer,
Informer_Node: node_informer,
Informer_Route: route_informer,
Informer_Config: config_map_informer,
Operator_Queue: operator_queue,
Deployment_Queue: deployment_queue,
Daemonset_Queue: daemonset_queue,
Node_Queue: node_queue,
Route_Queue: route_queue,
Config_Map_Queue: config_map_queue,
Resources: AciResources{},
DnsOperatorClient: nil,
RoutesClient: routesClient,
Openshiftflavor: opflavor,
routes: make(map[string]bool),
}
log.Info("Adding Event Handlers")
aci_operator_informer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
key, err := cache.MetaNamespaceKeyFunc(obj)
log.Debug("Added acicontainersoperator key: ", key)
if err == nil {
operator_queue.Add(key)
}
},
UpdateFunc: func(prevObj, currentObj interface{}) {
key, err := cache.MetaNamespaceKeyFunc(currentObj)
log.Debug("Updated acicontainersoperator key: ", key)
if err == nil {
operator_queue.Add(key)
}
},
DeleteFunc: func(obj interface{}) {
key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj)
log.Debug("Deleted acicontainersoperator key: ", key)
if err == nil {
operator_queue.Add(key)
}
},
})
aci_deployment_informer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
dep_obj := obj.(*appsv1.Deployment)
if dep_obj.Name == aciContainersController {
key, err := cache.MetaNamespaceKeyFunc(obj)
log.Debug("Added Deployment key :", key)
if err == nil {
deployment_queue.Add(key)
}
}
},
UpdateFunc: func(prevObj, currentObj interface{}) {
dep_obj := currentObj.(*appsv1.Deployment)
if dep_obj.Name == aciContainersController {
log.Debug("In UpdateFunc for Deployment")
controller.handledeploymentUpdate(prevObj, currentObj, deployment_queue)
}
},
DeleteFunc: func(obj interface{}) {
dep_obj := obj.(*appsv1.Deployment)
if dep_obj.Name == aciContainersController {
key, err := cache.MetaNamespaceKeyFunc(obj)
log.Debug("Deleted Deployment key is :", key)
if err == nil {
deployment_queue.Add(key)
}
}
},
})
aci_daemonset_informer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
key, err := cache.MetaNamespaceKeyFunc(obj)
log.Debug("The daemonset key: ", key)
if err == nil {
daemonset_queue.Add(key)
}
},
UpdateFunc: func(prevObj, currentObj interface{}) {
log.Debug("In UpdateFunc for Daemonset")
controller.handledaemonsetUpdate(prevObj, currentObj, daemonset_queue)
},
DeleteFunc: func(obj interface{}) {
key, err := cache.MetaNamespaceKeyFunc(obj)
log.Debug("Deleted daemonset key is :", key)
if err == nil {
daemonset_queue.Add(key)
}
},
})
node_informer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
key, err := cache.MetaNamespaceKeyFunc(obj)
log.Debug("The Node key: ", key)
if err == nil {
node_queue.Add(key)
}
},
UpdateFunc: func(prevObj, currentObj interface{}) {
//TODO: need to handle update
log.Debug("In UpdateFunc for Node")
controller.handleNodeUpdate(prevObj, currentObj, node_queue)
},
DeleteFunc: func(obj interface{}) {
key, err := cache.MetaNamespaceKeyFunc(obj)
log.Debug("Deleted Node key is :", key)
if err == nil {
node_queue.Add(key)
}
},
})
config_map_informer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
key, err := cache.MetaNamespaceKeyFunc(obj)
current_Obj := obj.(*corev1.ConfigMap)
if current_Obj.Name == "aci-operator-config" {
log.Info("In AddFunc for ConfigMap : ", current_Obj.Name)
if err == nil {
config_map_queue.Add(key)
}
}
},
UpdateFunc: func(prevObj, currentObj interface{}) {
current_Obj := currentObj.(*corev1.ConfigMap)
if current_Obj.Name == "aci-operator-config" {
log.Info("In UpdateFunc for ConfigMap : ", current_Obj.Name)
controller.handleConfigMapUpdate(prevObj, currentObj, config_map_queue)
}
},
DeleteFunc: func(obj interface{}) {
current_Obj := obj.(*corev1.ConfigMap)
if current_Obj.Name == "aci-operator-config" {
log.Info("In DeleteFunc for ConfigMap : ", current_Obj.Name)
controller.handleConfigMapDelete(obj)
}
},
})
if opflavor { //openshift flavor
route_informer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
key, err := cache.MetaNamespaceKeyFunc(obj)
log.Debug("Add Route key: ", key)
if err == nil {
route_queue.Add(key)
}
},
UpdateFunc: func(prevObj, currentObj interface{}) {
//TODO: need to handle update
log.Debug("In UpdateFunc for Route")
},
DeleteFunc: func(obj interface{}) {
key, err := cache.MetaNamespaceKeyFunc(obj)
log.Debug("Deleted route key: ", key)
if err == nil {
route_queue.Add(key)
}
},
})
}
return controller
}
func (c *Controller) handledeploymentUpdate(oldobj interface{}, newobj interface{}, queue workqueue.RateLimitingInterface) {
old_dep := oldobj.(*appsv1.Deployment)
new_dep := newobj.(*appsv1.Deployment)
if !reflect.DeepEqual(old_dep.OwnerReferences, new_dep.OwnerReferences) {
key, err := cache.MetaNamespaceKeyFunc(newobj)
if err == nil {
queue.Add(key)
}
} else {
log.Info("Owner Reference is unchanged for ", new_dep.Name)
}
}
func (c *Controller) handledaemonsetUpdate(oldobj interface{}, newobj interface{}, queue workqueue.RateLimitingInterface) {
old_ds := oldobj.(*appsv1.DaemonSet)
new_ds := newobj.(*appsv1.DaemonSet)
if !reflect.DeepEqual(old_ds.OwnerReferences, new_ds.OwnerReferences) {
key, err := cache.MetaNamespaceKeyFunc(newobj)
if err == nil {
queue.Add(key)
}
} else {
log.Info("Owner Reference is unchanged for ", new_ds.Name)
}
}
func (c *Controller) handleConfigMapCreate(newobj interface{}) bool {
new_config := newobj.(*corev1.ConfigMap)
if _, err := os.Stat("/tmp/aci-operator.conf"); os.IsNotExist(err) {
log.Info("File not present. Writing initial aci-operator-config configmap")
err := c.WriteConfigMap("/tmp/aci-operator.conf", new_config)
if err != nil {
log.Debugf("Failed to write ConfigMap, err: %v", err)
return true
}
return false
}
log.Info("Writing new aci-operator-config configmap")
err := c.WriteConfigMap("/tmp/aci-operator.conf", new_config)
if err != nil {
log.Debugf("Failed to write ConfigMap, err: %v", err)
return true
}
log.Info("Reading current aci-operator-config configmap")
rawSpec, err := c.ReadConfigMap("/tmp/aci-operator.conf")
if err != nil {
log.Debugf("Failed to read ConfigMap, err: %v", err)
return true
}
obj := c.CreateAciContainersOperatorObj()
log.Info("Unmarshalling the ConfigMap...")
err = json.Unmarshal(rawSpec, &obj.Spec)
if err != nil {
log.Infof("Failed to unmarshal ConfigMap, err: %v", err)
return true
}
acicnioperator, err := c.GetAciContainersOperatorCR()
if err != nil {
log.Errorf("Failed to find acicnioperator CR, err: %v", err)
return true
}
if (acicnioperator.Spec.Flavor != obj.Spec.Flavor) || (acicnioperator.Spec.Config != obj.Spec.Config) {
acicnioperator.Spec.Flavor = obj.Spec.Flavor
acicnioperator.Spec.Config = obj.Spec.Config
_, err = c.Operator_Clientset.AciV1().AciContainersOperators(os.Getenv("SYSTEM_NAMESPACE")).
Update(context.TODO(), acicnioperator, metav1.UpdateOptions{})
if err != nil {
log.Errorf("Failed to update acicnioperator CR, err: %v", err)
}
}
return false
}
func (c *Controller) handleConfigMapUpdate(oldobj interface{}, newobj interface{}, queue workqueue.RateLimitingInterface) {
old_cm := oldobj.(*corev1.ConfigMap)
new_cm := newobj.(*corev1.ConfigMap)
log.Info("In ConfigMap update handler: ", new_cm.Name)
if !reflect.DeepEqual(old_cm.Data, new_cm.Data) {
key, err := cache.MetaNamespaceKeyFunc(newobj)
if err == nil {
queue.Add(key)
}
} else {
log.Info("ConfigMap is unchanged for ", new_cm.Name)
}
}
func (c *Controller) handleConfigMapDelete(obj interface{}) bool {
new_obj := obj.(*corev1.ConfigMap)
log.Info("aci-containers-operator ConfigMap deleted: ", new_obj.Name)
return false
}
func (c *Controller) GetAciContainersOperatorCR() (*operators.AciContainersOperator, error) {
var options metav1.GetOptions
acicnioperator, er := c.Operator_Clientset.AciV1().AciContainersOperators(os.Getenv("SYSTEM_NAMESPACE")).Get(context.TODO(), "acicnioperator", options)
if er != nil {
return acicnioperator, er
}
return acicnioperator, nil
}
func (c *Controller) ReadConfigMap(field string) ([]byte, error) {
raw, err := ioutil.ReadFile(field)
if err != nil {
log.Error(err)
return nil, err
}
log.Debug("ConfigMap is: ", string(raw))
return raw, err
}
func (c *Controller) WriteConfigMap(field string, data *corev1.ConfigMap) error {
log.Info("Writing the ConfigMap: ", data.Name, " in the file")
rawIn := json.RawMessage(data.Data["spec"])
data_byte, err := rawIn.MarshalJSON()
if err != nil {
log.Error(err)
return err
}
err = ioutil.WriteFile(field, data_byte, 0777)
if err != nil {
log.Error(err)
return err
}
return nil
}
func (c *Controller) CreateAciContainersOperatorObj() *operators.AciContainersOperator {
obj := operators.AciContainersOperator{
ObjectMeta: metav1.ObjectMeta{
Name: "acicnioperator",
Namespace: os.Getenv("SYSTEM_NAMESPACE")},
}
obj.Status.Status = true //Setting it default true
return &obj
}
func (c *Controller) CreateAciContainersOperatorCR() error {
log.Info("Reading the aci-operator-config ConfigMap providing CR")
rawSpec, err := c.ReadConfigMap(Aci_operator_config_path)
if err != nil {
log.Info("Failed to read aci-operator-config ConfigMap")
log.Error(err)
return err
}
obj := c.CreateAciContainersOperatorObj()
log.Info("Unmarshalling the aci-operator-config ConfigMap...")
err = json.Unmarshal(rawSpec, &obj.Spec)
if err != nil {
log.Info("Failed to unmarshal aci-operator-config ConfigMap")
log.Error(err)
return err
}
log.Info("Unmarshalling Successful....")
log.Debug("acicnioperator CR recieved is", (obj.Spec))
if err = wait.PollInfinite(time.Second*2, func() (bool, error) {
_, er := c.Operator_Clientset.AciV1().AciContainersOperators(os.Getenv("SYSTEM_NAMESPACE")).Create(context.TODO(), obj, metav1.CreateOptions{})
if er != nil {
if errors.IsAlreadyExists(er) { //Happens due to etcd timeout
log.Info(er)
return true, nil
}
log.Info("Waiting for CRD to get registered to etcd....: ", er)
return false, nil
}
return true, nil
}); err != nil {
return err
}
return nil
}
func (c *Controller) GetAccProvisionInputCR() (*accprovisioninput.AccProvisionInput, error) {
var options metav1.GetOptions
accprovisioninput, er := c.AccProvisionInput_Clientset.AciV1().AccProvisionInputs(os.Getenv("SYSTEM_NAMESPACE")).Get(context.TODO(), "accprovisioninput", options)
if er != nil {
return accprovisioninput, er
}
return accprovisioninput, nil
}
func (c *Controller) CreateAccProvisionInputObj() *accprovisioninput.AccProvisionInput {
obj := accprovisioninput.AccProvisionInput{
ObjectMeta: metav1.ObjectMeta{
Name: "accprovisioninput",
Namespace: os.Getenv("SYSTEM_NAMESPACE")},
}
obj.Status.Status = true //Setting it default true
return &obj
}
func (c *Controller) CreateAccProvisionInputCR() error {
obj := c.CreateAccProvisionInputObj()
log.Info("Reading the acc-provision-operator-config ConfigMap providing CR")
rawACCSpec, errACC := c.ReadConfigMap(Acc_provision_config_path)
if errACC != nil {
log.Info("Failed to read acc-provision-operator-config ConfigMap")
log.Error(errACC)
return errACC
}
log.Info("Unmarshalling the acc-provision-operator-config ConfigMap...")
errACC = json.Unmarshal(rawACCSpec, &obj.Spec)
if errACC != nil {
log.Info("Failed to unmarshal acc-provision-operator-config ConfigMap")
log.Error(errACC)
return errACC
}
log.Info("Unmarshalling Successful....")
log.Debug("accprovisioninput CR recieved is ", (obj.Spec))
if err := wait.PollInfinite(time.Second*2, func() (bool, error) {
_, er := c.AccProvisionInput_Clientset.AciV1().AccProvisionInputs(os.Getenv("SYSTEM_NAMESPACE")).Create(context.TODO(), obj, metav1.CreateOptions{})
if er != nil {
if errors.IsAlreadyExists(er) { //Happens due to etcd timeout
log.Info(er)
return true, nil
}
log.Info("Waiting for CRD to get registered to etcd....: ", er)
return false, nil
}
return true, nil
}); err != nil {
return err
}
return nil
}
func (c *Controller) Run(stopCh <-chan struct{}) {
c.Logger.Info("Controller.Run: initiating")
log.Info("Checking if acicnioperator CR already present")
acicnioperatorsuccess := true
acicnioperator, err := c.GetAciContainersOperatorCR()
if err != nil {
log.Info("Not Present ..Creating acicnioperator CR")
er := c.CreateAciContainersOperatorCR()
if er != nil {
log.Error(er)
acicnioperatorsuccess = false
}
} else {
log.Info("acicnioperator CR already present")
log.Debug("Reading current aci-operator-config configmap")
rawSpec, errSpec := c.ReadConfigMap(Aci_operator_config_path)
if errSpec != nil {
log.Error(errSpec)
acicnioperatorsuccess = false
} else {
obj := c.CreateAciContainersOperatorObj()
log.Debug("Unmarshalling the ConfigMap...")
err = json.Unmarshal(rawSpec, &obj.Spec)
if err != nil {
log.Error(err)
acicnioperatorsuccess = false
}
if acicnioperator.Spec.Config != obj.Spec.Config {
acicnioperator.Spec.Config = obj.Spec.Config
log.Info("New Configuration detected...applying changes")
_, er := c.Operator_Clientset.AciV1().AciContainersOperators(os.Getenv("SYSTEM_NAMESPACE")).
Update(context.TODO(), acicnioperator, metav1.UpdateOptions{})
if er != nil {
log.Error(er)
acicnioperatorsuccess = false
}
}
}
}
if acicnioperatorsuccess == true {
log.Info("Checking if accprovisioninput CR already present")
accprovisioninput, err := c.GetAccProvisionInputCR()
if err != nil {
log.Info("Not Present ..Creating accprovisioninput CR")
er := c.CreateAccProvisionInputCR()
if er != nil {
log.Error(er)
}
} else {
log.Info("accprovisioninput CR already present")
log.Debug("Reading current acc-provision-operator-config ConfigMap")
rawACCSpec, errACCSpec := c.ReadConfigMap(Acc_provision_config_path)
if errACCSpec != nil {
log.Error(errACCSpec)
} else {
obj := c.CreateAccProvisionInputObj()
log.Debug("Unmarshalling the ConfigMap...")
err = json.Unmarshal(rawACCSpec, &obj.Spec)
if err != nil {
log.Error(err)
}
if accprovisioninput.Spec.Config != obj.Spec.Config {
accprovisioninput.Spec.Config = obj.Spec.Config
log.Info("New Configuration detected...applying changes")
_, er := c.AccProvisionInput_Clientset.AciV1().AccProvisionInputs(os.Getenv("SYSTEM_NAMESPACE")).Update(context.TODO(), accprovisioninput, metav1.UpdateOptions{})
if er != nil {
log.Error(er)
}
}
}
}
}
// Run informer to start watching and listening
go c.Informer_Operator.Run(stopCh)
go c.Informer_Deployment.Run(stopCh)
go c.Informer_Daemonset.Run(stopCh)
go c.Informer_Node.Run(stopCh)
go c.Informer_Config.Run(stopCh)
// Sync the current resources
if !cache.WaitForCacheSync(stopCh, c.Informer_Operator.HasSynced,
c.Informer_Deployment.HasSynced, c.Informer_Daemonset.HasSynced, c.Informer_Node.HasSynced,
c.Informer_Config.HasSynced) {
utilruntime.HandleError(fmt.Errorf("Controller.Sync: Error syncing the cache"))
}
c.Logger.Info("Controller.Sync: Cache sync complete")
// Run queue for each Informer
go c.processQueue(c.Operator_Queue, c.Informer_Operator.GetIndexer(),
func(obj interface{}) bool {
return c.handleOperatorCreate(obj)
},
func(obj interface{}) bool {
return c.handleOperatorDelete(obj)
},
stopCh)
go c.processQueue(c.Deployment_Queue, c.Informer_Deployment.GetIndexer(),
func(obj interface{}) bool {
return c.handleDeploymentCreate(obj)
}, func(obj interface{}) bool {
return c.handleDeploymentDelete(obj)
},
stopCh)
go c.processQueue(c.Daemonset_Queue, c.Informer_Daemonset.GetIndexer(),
func(obj interface{}) bool {
return c.handleDaemonsetCreate(obj)
}, func(obj interface{}) bool {
return c.handleDaemonsetDelete(obj)
},
stopCh)
go c.processQueue(c.Node_Queue, c.Informer_Node.GetIndexer(),
func(obj interface{}) bool {
return c.handleNodeCreate(obj)
}, func(obj interface{}) bool {
return c.handleNodeDelete(obj)
},
stopCh)
go c.processQueue(c.Config_Map_Queue, c.Informer_Config.GetIndexer(),
func(obj interface{}) bool {
return c.handleConfigMapCreate(obj)
}, func(obj interface{}) bool {
return c.handleConfigMapDelete(obj)
},
stopCh)
if c.Openshiftflavor {
c.enableRouteInformer(stopCh)
}
}
func (c *Controller) processQueue(queue workqueue.RateLimitingInterface,
store cache.Store, createhandler func(interface{}) bool,
deletehandler func(interface{}) bool,
stopCh <-chan struct{}) {
go wait.Until(func() {
log.Info("Starting the handlers....")
for {
key, quit := queue.Get()
if quit {
break
}
var requeue bool
switch key := key.(type) {
case chan struct{}:
close(key)
case string:
obj, exists, err := store.GetByKey(key)
if err == nil && exists {
log.Info("Controller.processNextItem: object Creation detected:", key)
requeue = createhandler(obj)
}
if !exists {
log.Info("Controller.processNextItem: object deleted detected:", key)
deletehandler(key)
}
}
if requeue {
log.Info("Adding the key back to the queue ", key)
queue.AddRateLimited(key)
} else {
queue.Forget(key)
}
queue.Done(key)
}
}, time.Second, stopCh)
<-stopCh
queue.ShutDown()
}
func (c *Controller) CheckOwnerReference(reference []metav1.OwnerReference) bool {
for _, ownerRef := range reference {
if ownerRef.Kind == "AciContainersOperator" {
log.Debug("OwnerReference Already Present")
return true
}
}
return false
}
func (c *Controller) UpdateDeploymentOwnerReference(acicontainersoperator *operators.AciContainersOperator) bool {
deploymentsClient := c.K8s_Clientset.AppsV1().Deployments(os.Getenv("SYSTEM_NAMESPACE"))
if deploymentsClient == nil {
log.Info("Error in Fetching deploymentsClient...")
return true
}
c.Resources.Deployment, _ = deploymentsClient.Get(context.TODO(), aciContainersController, metav1.GetOptions{})
if c.Resources.Deployment == nil {
log.Infof("%s deployment is nil..returning", aciContainersController)
return false
}
if !c.CheckOwnerReference(c.Resources.Deployment.ObjectMeta.OwnerReferences) {
c.Resources.Deployment.OwnerReferences = []metav1.OwnerReference{
*metav1.NewControllerRef(acicontainersoperator, operators.SchemeGroupVersion.WithKind("AciContainersOperator")),
}
_, err := deploymentsClient.Update(context.TODO(), c.Resources.Deployment, metav1.UpdateOptions{})
if err != nil {
log.Error(err.Error())
return false
}
log.Infof("Successfully updated owner reference to the %s deployment", aciContainersController)
} else {
log.Infof("Owner reference is unchanged for %s", aciContainersController)
}
return true
}
func (c *Controller) UpdateHostDaemonsetOwnerReference(acicontainersoperator *operators.AciContainersOperator) bool {
hostdaemonsetclient := c.K8s_Clientset.AppsV1().DaemonSets(os.Getenv("SYSTEM_NAMESPACE"))
if hostdaemonsetclient == nil {
log.Info("Error in Fetching hostdaemonsetclient...")
return true
}
c.Resources.HostDaemonset, _ = hostdaemonsetclient.Get(context.TODO(), aciContainersHostDaemonset, metav1.GetOptions{})
if c.Resources.HostDaemonset == nil {
log.Infof("%s daemonset is nil.....returning", aciContainersHostDaemonset)
return false
}
if !c.CheckOwnerReference(c.Resources.HostDaemonset.OwnerReferences) {
c.Resources.HostDaemonset.OwnerReferences = []metav1.OwnerReference{
*metav1.NewControllerRef(acicontainersoperator, operators.SchemeGroupVersion.WithKind("AciContainersOperator")),
}
_, err := hostdaemonsetclient.Update(context.TODO(), c.Resources.HostDaemonset, metav1.UpdateOptions{})
if err != nil {
log.Error(err.Error())
return false
}
log.Infof("Successfully updated owner reference to the %s daemonset", aciContainersHostDaemonset)
} else {
log.Infof("Owner reference is unchanged for %s", aciContainersHostDaemonset)
}
return true
}
func (c *Controller) UpdateOvsDaemonsetOwnerReference(acicontainersoperator *operators.AciContainersOperator) bool {
ovsdaemonsetclient := c.K8s_Clientset.AppsV1().DaemonSets(os.Getenv("SYSTEM_NAMESPACE"))
if ovsdaemonsetclient == nil {
log.Infof("Error in Fetching ovsdaemonsetclient...")
return true
}
c.Resources.OvsDaemonset, _ = ovsdaemonsetclient.Get(context.TODO(), aciContainersOvsDaemonset, metav1.GetOptions{})
if c.Resources.OvsDaemonset == nil {
log.Infof("%s daemonset is nil.....returning", aciContainersOvsDaemonset)
return false
}
if !c.CheckOwnerReference(c.Resources.OvsDaemonset.OwnerReferences) {
c.Resources.OvsDaemonset.OwnerReferences = []metav1.OwnerReference{
*metav1.NewControllerRef(acicontainersoperator, operators.SchemeGroupVersion.WithKind("AciContainersOperator")),
}
_, err := ovsdaemonsetclient.Update(context.TODO(), c.Resources.OvsDaemonset, metav1.UpdateOptions{})
if err != nil {
log.Error(err.Error())
return false
}
log.Infof("Successfully updated owner reference to the %s daemonset", aciContainersOvsDaemonset)
} else {
log.Infof("Owner reference is unchanged for %s", aciContainersOvsDaemonset)
}
return true
}
func (c *Controller) handleOperatorCreate(obj interface{}) bool {
log.Info("OperatorHandler.ObjectCreated")
acicontainersoperator := obj.(*operators.AciContainersOperator)
log.Debug(acicontainersoperator.Spec.Config)
if acicontainersoperator.Spec.Config == "" {
log.Info("acicnioperator CR config is Nil...Exiting")
acicontainersoperator.Status.Status = false
_, er := c.Operator_Clientset.AciV1().AciContainersOperators(os.Getenv("SYSTEM_NAMESPACE")).Update(context.TODO(), acicontainersoperator, metav1.UpdateOptions{})
if er != nil {
log.Error(er)
}
return false
}
dec, err := base64.StdEncoding.DecodeString(acicontainersoperator.Spec.Config)
if err != nil {
log.Error(err)
return true
}
f, err := os.Create("aci-deployment.yaml")
if err != nil {
log.Error(err)
return true
}
if _, err := f.Write(dec); err != nil {
log.Error(err)
return true
}
if err := f.Sync(); err != nil {
log.Error(err)
return true
}
if err := f.Close(); err != nil {
log.Error(err)
return true
}
log.Info("Platform flavor is ", acicontainersoperator.Spec.Flavor)
if Version[acicontainersoperator.Spec.Flavor] {
clusterConfig := &configv1.Network{
TypeMeta: metav1.TypeMeta{APIVersion: configv1.GroupVersion.String(), Kind: "Network"},
ObjectMeta: metav1.ObjectMeta{Name: "cluster"},
}
cfg, _ := config.GetConfig()
scheme := runtime.NewScheme()
err = configv1.Install(scheme)
if err != nil {
log.Error(err)
return true
}
rclient, err := client.New(cfg, client.Options{Scheme: scheme})
if err != nil {
log.Errorf("Failed to create client, err: %v", err)
return true
}
err = rclient.Get(context.TODO(), types.NamespacedName{
Name: "cluster",
}, clusterConfig)
if err != nil {
log.Info(err)
return true
}
log.Info("Current Configuration Spec of type Network is ", clusterConfig.Spec)
log.Info("Current status of type Network is ", clusterConfig.Status)
if !reflect.DeepEqual(clusterConfig.Status.ClusterNetwork, clusterConfig.Spec.ClusterNetwork) ||
!reflect.DeepEqual(clusterConfig.Status.NetworkType, clusterConfig.Spec.NetworkType) ||
!reflect.DeepEqual(clusterConfig.Status.NetworkType, clusterConfig.Spec.NetworkType) {
log.Info("Updating status field of openshift resource of type network ....")
clusterConfig.Status.ClusterNetwork = clusterConfig.Spec.ClusterNetwork
clusterConfig.Status.NetworkType = clusterConfig.Spec.NetworkType
clusterConfig.Status.ServiceNetwork = clusterConfig.Spec.ServiceNetwork
log.Info("Updated clusterConfig.Status is ", clusterConfig.Status)
ctx := context.TODO()
err = rclient.Update(ctx, clusterConfig)
if err != nil {
log.Info(err)
return true
}
}
}
log.Info("Applying Aci Deployment")
//Currently the Kubectl version is v.1.14. This will be updated by the acc-provision according
//to the platform specification
cmd := exec.Command("kubectl", "apply", "-f", "aci-deployment.yaml")
log.Debug(cmd)
_, err = cmd.Output()
if err != nil {
log.Error(err)
return true
}
log.Info("Adding Aci Operator OwnerRefrence to resources ....")
c.indexMutex.Lock()
if !(c.UpdateDeploymentOwnerReference(acicontainersoperator)) {
log.Info("Error Updating Deployment Owner Reference")
c.indexMutex.Unlock()
return true
}
if !(c.UpdateHostDaemonsetOwnerReference(acicontainersoperator)) {
log.Info("Error Updating HostAgent Daemonset Owner Reference")
c.indexMutex.Unlock()
return true
}
if !(c.UpdateOvsDaemonsetOwnerReference(acicontainersoperator)) {
log.Info("Error Updating Ovs Daemonset Owner Reference")
c.indexMutex.Unlock()
return true
}
c.indexMutex.Unlock()
return false
}
func (c *Controller) handleOperatorDelete(obj interface{}) bool {
log.Info("ACI CNI OperatorHandler.ObjectDeleted")
return false
}
func (c *Controller) handleDeploymentCreate(obj interface{}) bool {
acicontainersoperator, err := c.GetAciContainersOperatorCR()
if err != nil {
log.Info("Not Present ..Creating acicnioperator CR")
return true
}
c.indexMutex.Lock()
if !(c.UpdateDeploymentOwnerReference(acicontainersoperator)) {
log.Info("Error Updating Deployment Owner Reference")
c.indexMutex.Unlock()
return true
}
c.indexMutex.Unlock()
return false
}
func (c *Controller) handleDeploymentDelete(obj interface{}) bool {
log.Infof("%s Deployment Deleted", aciContainersController)
return false
}
func (c *Controller) handleDaemonsetCreate(obj interface{}) bool {
daemonset := obj.(*appsv1.DaemonSet)
acicontainersoperator, err := c.GetAciContainersOperatorCR()
if err != nil {
log.Info("Not Present ..Creating acicnioperator CR")
return true
}
c.indexMutex.Lock()
if daemonset.Name == aciContainersHostDaemonset {
if !(c.UpdateHostDaemonsetOwnerReference(acicontainersoperator)) {
log.Info("Error Updating HostDaemonset Owner Reference")
c.indexMutex.Unlock()
return true
}
} else {
if !(c.UpdateOvsDaemonsetOwnerReference(acicontainersoperator)) {
log.Info("Error Updating OvsDaemonset Owner Reference")
c.indexMutex.Unlock()
return true
}
}
c.indexMutex.Unlock()
return false
}
func (c *Controller) handleDaemonsetDelete(obj interface{}) bool {
log.Infof("aci-containers Daemonset Deleted")
return false
}
// intialize the dnsoperator client,
// computes the dnsSpec.
// local cache for all the routes will be updated.
// if there is change in the dns Spec, triggers the update
func (c *Controller) updatednsOperator() error {
log.Info("Update dnsoperator cr")
dnsInfo := &operatorv1.DNS{
TypeMeta: metav1.TypeMeta{APIVersion: operatorv1.GroupVersion.String(), Kind: "DNS"},
ObjectMeta: metav1.ObjectMeta{Name: "default"},
}
if c.DnsOperatorClient == nil {
cfg, _ := config.GetConfig()
scheme := runtime.NewScheme()
err := operatorv1.Install(scheme)
if err != nil {
log.Debugf("Failed to create operator, err: %v", err)
return err
}
c.DnsOperatorClient, err = client.New(cfg, client.Options{Scheme: scheme})
if err != nil {
log.Debugf("Failed to get client, err: %v", err)
return err
}
}
err := c.DnsOperatorClient.Get(context.TODO(), types.NamespacedName{
Name: "default"}, dnsInfo)
if err != nil {
log.Debugf("Failed to get dnsInfo, err: %v", err)
return err
}
if c.RoutesClient == nil {
log.Info("Route client is nil")
return nil
}
var options metav1.ListOptions
routes, err := c.RoutesClient.RouteV1().Routes(metav1.NamespaceAll).List(context.TODO(), options)
if err != nil {
log.Debugf("Failed to list Routes, err: %v", err)
return err
}
if len(routes.Items) == 0 {
return nil
}
var nodeAddress []string
nodeAddress, err = c.getNodeAddress()
if err != nil {
log.Debugf("Failed to get nodeAddress, err: %v", err)
return err
}
if len(nodeAddress) == 0 {
return nil
}
log.Info("NodeAddress: ", nodeAddress)
// compute the dns servers info
var servers []operatorv1.Server
for _, route := range routes.Items {
var server operatorv1.Server
key := route.ObjectMeta.Namespace + "/" + route.ObjectMeta.Name
server.Name = key
server.Zones = append(server.Zones, route.Spec.Host)
server.ForwardPlugin.Upstreams = nodeAddress
servers = append(servers, server)
}
if !reflect.DeepEqual(dnsInfo.Spec.Servers, servers) {
dnsInfo.Spec.Servers = servers
err = c.DnsOperatorClient.Update(context.TODO(), dnsInfo)
if err != nil {
log.Debugf("Failed to update dnsInfo, err: %v", err)
return err
}
}
c.indexMutex.Lock()
for _, route := range routes.Items {
key := route.ObjectMeta.Namespace + "/" + route.ObjectMeta.Name
log.Infof("Route added to cache: %s", key)
c.routes[key] = true
}
c.indexMutex.Unlock()
log.Infof("Updated dnsInfo: %+v", dnsInfo)
return nil
}
func (c *Controller) getNodeAddress() ([]string, error) {
var options metav1.ListOptions
nodelist, err := c.K8s_Clientset.CoreV1().Nodes().List(context.TODO(), options)
if err != nil {
log.Info("Failed to List the nodes: ", err)
return []string{}, err
}
var nodeAddress []string
for _, node := range nodelist.Items {
if node.DeletionTimestamp != nil {
continue
}
if _, ok := node.ObjectMeta.Labels["node-role.kubernetes.io/master"]; ok {
continue
}
address := node.Status.Addresses
for _, val := range address {
if val.Type == v1.NodeInternalIP {
nodeAddress = append(nodeAddress, val.Address)
}
}
}
return nodeAddress, nil
}
func (c *Controller) getDnsInfo() (*operatorv1.DNS, error) {
dnsInfo := &operatorv1.DNS{
TypeMeta: metav1.TypeMeta{APIVersion: operatorv1.GroupVersion.String(), Kind: "DNS"},
ObjectMeta: metav1.ObjectMeta{Name: "default"},
}
err := c.DnsOperatorClient.Get(context.TODO(), types.NamespacedName{
Name: "default"}, dnsInfo)
if err != nil {
log.Info(err)
return nil, err
}
return dnsInfo, nil
}
// it reads all the node ip address.
// updates if there is any changes in the address computed
func (c *Controller) updateDnsOperatorSpec(add bool) bool {
if c.DnsOperatorClient == nil || !c.Openshiftflavor {
return false
}
dnsInfo, err := c.getDnsInfo()
if err != nil {
return true
}
// Add and no servers present compute for all the routes
if add && len(dnsInfo.Spec.Servers) == 0 {
err = c.updatednsOperator()
if err != nil {
log.Info("Failed to update the dnsOperatorCr: ", err)
return true
}
return false
}
var nodeAddress []string
nodeAddress, err = c.getNodeAddress()
if err != nil {
log.Debugf("Failed to get nodeAddress, err: %v", err)
return true
}
if !reflect.DeepEqual(dnsInfo.Spec.Servers[0].ForwardPlugin.Upstreams, nodeAddress) {
// This is node delete case when there is no worker nodes present
// set the spec to nil
if !add && len(nodeAddress) == 0 {
dnsInfo.Spec = operatorv1.DNSSpec{}
} else {
for _, server := range dnsInfo.Spec.Servers {
server.ForwardPlugin.Upstreams = nodeAddress
}
}
err = c.DnsOperatorClient.Update(context.TODO(), dnsInfo)
if err != nil {
log.Info("Failed to update the dnsInfo: ", err)
return true
}
}
log.Infof("Updated dnsInfo: %+v", dnsInfo)
return false
}
// handle node create to update the dnsOperatorSpec
func (c *Controller) handleNodeCreate(obj interface{}) bool {
log.Infof("node created")
return c.updateDnsOperatorSpec(true)
}
// handle node delete
func (c *Controller) handleNodeDelete(obj interface{}) bool {
log.Infof("node Deleted")
return c.updateDnsOperatorSpec(false)
}
// handle route create
// local route cache will be updated
// if route is already present it will ignore silently as it isupdate happend in operator create
func (c *Controller) handleRouteCreate(obj interface{}) bool {
route := obj.(*routesv1.Route)
log.Infof("route created: %s", route.ObjectMeta.Name)
if c.DnsOperatorClient == nil {
return false
}
key, _ := cache.MetaNamespaceKeyFunc(obj)
c.indexMutex.Lock()
_, ok := c.routes[key]
c.indexMutex.Unlock()
if ok {
return false
}
dnsInfo, err := c.getDnsInfo()
if err != nil {
log.Errorf("Failed to get dnsInfo, err: %v", err)
return true
}
// Check if already exists in dnsInfo then no need to update dnsinfo
for _, server := range dnsInfo.Spec.Servers {
if key == server.Name {
return false
}
}
var server operatorv1.Server
server.Name = key
server.Zones = append(server.Zones, route.Spec.Host)
// if already computed update the cache
if len(dnsInfo.Spec.Servers) > 0 {
server.ForwardPlugin.Upstreams = dnsInfo.Spec.Servers[0].ForwardPlugin.Upstreams
} else { // compute the node ip's fresh
nodeaddr, err := c.getNodeAddress()
if err != nil {
log.Errorf("Failed to get Node list, err: %v", err)
return true
}
if len(nodeaddr) == 0 {
return false
}
server.ForwardPlugin.Upstreams = nodeaddr
}
dnsInfo.Spec.Servers = append(dnsInfo.Spec.Servers, server)
err = c.DnsOperatorClient.Update(context.TODO(), dnsInfo)
if err != nil {
log.Info("Failed to update the dnsInfo: ", err)
return true
}
c.indexMutex.Lock()
c.routes[key] = true
c.indexMutex.Unlock()
log.Infof("Route added to cache:%s", key)
log.Infof("Updated dnsInfo: %+v", dnsInfo)
return false
}
// handle route delete
func (c *Controller) handleRouteDelete(obj interface{}) bool {
key := fmt.Sprintf("%v", obj)
log.Infof("route deleted: %s", key)
c.indexMutex.Lock()
_, ok := c.routes[key]
c.indexMutex.Unlock()
if !ok {
return false
}
if c.DnsOperatorClient == nil {
return false
}
dnsInfo, err := c.getDnsInfo()
if err != nil {
log.Errorf("Failed to get dnsInfo, err: %v", err)
return true
}
for i := range dnsInfo.Spec.Servers {
if dnsInfo.Spec.Servers[i].Name == key {
dnsInfo.Spec.Servers = append(dnsInfo.Spec.Servers[:i], dnsInfo.Spec.Servers[i+1:]...)
break
}
}
err = c.DnsOperatorClient.Update(context.TODO(), dnsInfo)
if err != nil {
log.Info("Failed to update the dnsInfo: ", err)
return true
}
c.indexMutex.Lock()
delete(c.routes, key)
c.indexMutex.Unlock()
log.Infof("Route deleted from cache:%s", key)
log.Infof("Updated dnsInfo: %+v", dnsInfo)
return false
}
func (c *Controller) enableRouteInformer(stopCh <-chan struct{}) {
go func() {
var options metav1.ListOptions
for {
Pods, err := c.K8s_Clientset.CoreV1().Pods("openshift-apiserver").List(context.TODO(), options)
if err == nil && (len(Pods.Items) > 0 && Pods.Items[0].Status.ContainerStatuses[0].Ready == true) {
log.Info("Openshift-apiserver Pod found start router informer")
err = c.updatednsOperator()
if err != nil {
log.Info("Failed to update the dnsOperatorCr: ", err)
}
go c.Informer_Route.Run(stopCh)
cache.WaitForCacheSync(stopCh,
c.Informer_Route.HasSynced)
go c.processQueue(c.Route_Queue, c.Informer_Route.GetIndexer(),
func(obj interface{}) bool {
return c.handleRouteCreate(obj)
}, func(obj interface{}) bool {
return c.handleRouteDelete(obj)
},
stopCh)
break
}
time.Sleep(time.Minute)
}
}()
}
func (c *Controller) handleNodeUpdate(oldobj interface{}, newobj interface{}, queue workqueue.RateLimitingInterface) {
old_node := oldobj.(*v1.Node)
new_node := newobj.(*v1.Node)
if !reflect.DeepEqual(old_node.Status.Addresses, new_node.Status.Addresses) {
key, err := cache.MetaNamespaceKeyFunc(newobj)
if err == nil {
queue.Add(key)
}
}
}
| [
"\"SYSTEM_NAMESPACE\"",
"\"SYSTEM_NAMESPACE\"",
"\"SYSTEM_NAMESPACE\"",
"\"SYSTEM_NAMESPACE\"",
"\"SYSTEM_NAMESPACE\"",
"\"SYSTEM_NAMESPACE\"",
"\"SYSTEM_NAMESPACE\"",
"\"SYSTEM_NAMESPACE\"",
"\"ACC_PROVISION_FLAVOR\"",
"\"SYSTEM_NAMESPACE\"",
"\"SYSTEM_NAMESPACE\"",
"\"SYSTEM_NAMESPACE\"",
"\"SYSTEM_NAMESPACE\"",
"\"SYSTEM_NAMESPACE\"",
"\"SYSTEM_NAMESPACE\"",
"\"SYSTEM_NAMESPACE\"",
"\"SYSTEM_NAMESPACE\"",
"\"SYSTEM_NAMESPACE\"",
"\"SYSTEM_NAMESPACE\"",
"\"SYSTEM_NAMESPACE\"",
"\"SYSTEM_NAMESPACE\"",
"\"SYSTEM_NAMESPACE\""
] | [] | [
"ACC_PROVISION_FLAVOR",
"SYSTEM_NAMESPACE"
] | [] | ["ACC_PROVISION_FLAVOR", "SYSTEM_NAMESPACE"] | go | 2 | 0 | |
go/src/github.com/koding/kite/kontrol/kontrol_test.go | package kontrol
import (
"fmt"
"log"
"math/rand"
"net"
"net/url"
"os"
"runtime"
"strconv"
"strings"
"sync"
"testing"
"time"
"github.com/dgrijalva/jwt-go"
"github.com/koding/kite"
"github.com/koding/kite/kitekey"
"github.com/koding/kite/protocol"
"github.com/koding/kite/testkeys"
"github.com/koding/kite/testutil"
uuid "github.com/satori/go.uuid"
)
var (
conf *Config
kon *Kontrol
)
func init() {
rand.Seed(time.Now().UTC().UnixNano())
kon, conf = startKontrol(testkeys.Private, testkeys.Public, 5500)
}
func TestUpdateKeys(t *testing.T) {
if storage := os.Getenv("KONTROL_STORAGE"); storage != "postgres" {
t.Skip("skipping TestUpdateKeys for storage %q: not implemented", storage)
}
kon, conf := startKontrol(testkeys.PrivateThird, testkeys.PublicThird, 5501)
hk1, err := NewHelloKite("kite1", conf)
if err != nil {
t.Fatalf("error creating kite1: %s", err)
}
defer hk1.Close()
hk2, err := NewHelloKite("kite2", conf)
if err != nil {
t.Fatalf("error creating kite1: %s", err)
}
defer hk2.Close()
hk2.Token = true
calls := HelloKites{
hk1: hk2,
hk2: hk1,
}
if err := Call(calls); err != nil {
t.Fatal(err)
}
kon.Close()
if err := kon.DeleteKeyPair("", testkeys.PublicThird); err != nil {
t.Fatalf("error deleting key pair: %s", err)
}
kon, conf = startKontrol(testkeys.Private, testkeys.Public, 5501)
defer kon.Close()
reg, err := hk1.WaitRegister(15 * time.Second)
if err != nil {
t.Fatalf("kite1 register error: %s", err)
}
if reg.PublicKey != testkeys.Public {
t.Fatalf("kite1: got public key %q, want %q", reg.PublicKey, testkeys.Public)
}
if reg.KiteKey == "" {
t.Fatal("kite1: kite key was not updated")
}
reg, err = hk2.WaitRegister(15 * time.Second)
if err != nil {
t.Fatalf("kite2 register error: %s", err)
}
if reg.PublicKey != testkeys.Public {
t.Fatalf("kite2: got public key %q, want %q", reg.PublicKey, testkeys.Public)
}
if reg.KiteKey == "" {
t.Fatal("kite2: kite key was not updated")
}
pause("token renew")
calls = HelloKites{
hk1: hk2,
hk2: hk1,
}
if err := Call(calls); err == nil || !strings.Contains(err.Error(), "token is expired") {
t.Fatalf("got nil or unexpected error: %v", err)
}
if err := hk2.WaitTokenExpired(10 * time.Second); err != nil {
t.Fatalf("kite2: waiting for token expire: %s", err)
}
if err := hk2.WaitTokenRenew(10 * time.Second); err != nil {
t.Fatalf("kite2: waiting for token renew: %s", err)
}
calls = HelloKites{
hk1: hk2,
hk2: hk1,
}
if err := Call(calls); err != nil {
t.Fatal(err)
}
pause("started kontrol 2")
hk3, err := NewHelloKite("kite3", conf)
if err != nil {
t.Fatalf("error creating kite3: %s", err)
}
defer hk3.Close()
if err := WaitTillConnected(conf, 15*time.Second, hk1, hk2, hk3); err != nil {
t.Fatal(err)
}
calls = HelloKites{
hk3: hk1,
hk3: hk2,
hk2: hk1,
hk1: hk3,
hk1: hk2,
}
if err := Call(calls); err != nil {
t.Fatal(err)
}
pause("kite2 -> kite3 starting")
if err := Call(HelloKites{hk2: hk3}); err != nil {
t.Fatal(err)
}
}
func TestRegisterMachine(t *testing.T) {
key, err := kon.registerUser("foo", testkeys.Public, testkeys.Private)
if err != nil {
t.Errorf(err.Error())
return
}
claims := &kitekey.KiteClaims{}
_, err = jwt.ParseWithClaims(key, claims, kitekey.GetKontrolKey)
if err != nil {
t.Fatalf(err.Error())
}
if claims.Subject != "foo" {
t.Fatalf("invalid username: %s", claims.Subject)
}
}
func TestRegisterDenyEvil(t *testing.T) {
// TODO(rjeczalik): use sentinel error value instead
const authErr = "no valid authentication key found"
legit, err := NewHelloKite("testuser", conf)
if err != nil {
t.Fatal(err)
}
if _, err = legit.Kite.GetToken(legit.Kite.Kite()); err != nil {
t.Fatal(err)
}
evil := kite.New("testuser", "1.0.0")
evil.Config = conf.Config.Copy()
evil.Config.Port = 6767
evil.Config.Username = "testuser"
evil.Config.KontrolUser = "testuser"
evil.Config.KontrolURL = conf.Config.KontrolURL
evil.Config.KiteKey = testutil.NewToken("testuser", testkeys.PrivateEvil, testkeys.PublicEvil).Raw
// KontrolKey can be easily extracted from existing kite.key
evil.Config.KontrolKey = testkeys.Public
evil.Config.ReadEnvironmentVariables()
evilURL := &url.URL{
Scheme: "http",
Host: "127.0.0.1:6767",
Path: "/kite",
}
_, err = evil.Register(evilURL)
if err == nil {
t.Errorf("expected kontrol to deny register request: %s", evil.Kite())
} else {
t.Logf("register denied: %s", err)
}
_, err = evil.GetToken(legit.Kite.Kite())
if err == nil {
t.Errorf("expected kontrol to deny token request: %s", evil.Kite())
} else {
t.Logf("token denied: %s", err)
}
_, err = evil.TellKontrolWithTimeout("registerMachine", 4*time.Second, map[string]interface{}{})
if err == nil {
t.Fatal("expected registerMachine to fail")
}
if !strings.Contains(err.Error(), authErr) {
t.Fatalf("got %q, want %q error", err, authErr)
}
}
func TestMultipleRegister(t *testing.T) {
confCopy := *conf
confCopy.RegisterFunc = func(hk *HelloKite) error {
hk.Kite.RegisterHTTPForever(hk.URL)
if _, err := hk.WaitRegister(15 * time.Second); err != nil {
hk.Kite.Close()
return err
}
return nil
}
hk, err := NewHelloKite("kite", &confCopy)
if err != nil {
t.Fatalf("error creating kite: %s", err)
}
defer hk.Close()
query := &protocol.KontrolQuery{
ID: hk.Kite.Kite().ID,
}
c, err := hk.Kite.GetKites(query)
if err != nil {
t.Fatalf("GetKites()=%s", err)
}
klose(c)
if len(c) != 1 {
t.Fatalf("want len(c) = 1; got %d", len(c))
}
if c[0].URL != hk.URL.String() {
t.Fatalf("want url = %q; got %q", hk.URL, c[0].URL)
}
urlCopy := *hk.URL
_, port, err := net.SplitHostPort(hk.URL.Host)
if err != nil {
t.Fatal(err)
}
urlCopy.Host = net.JoinHostPort("localhost", port)
if _, err := hk.Kite.RegisterHTTP(&urlCopy); err != nil {
t.Fatal(err)
}
if _, err := hk.WaitRegister(15 * time.Second); err != nil {
t.Fatal(err)
}
timeout := time.After(2 * time.Minute)
query = &protocol.KontrolQuery{
ID: hk.Kite.Kite().ID,
}
for {
select {
case <-timeout:
t.Fatal("timed out waiting for RegisterURL to update")
default:
c, err := hk.Kite.GetKites(query)
if err != nil {
t.Fatalf("GetKites()=%s", err)
}
klose(c)
if len(c) != 1 {
t.Fatalf("want len(c) = 1; got %d", len(c))
}
if c[0].URL == urlCopy.String() {
return
}
time.Sleep(10 * time.Second)
}
}
}
func TestTokenInvalidation(t *testing.T) {
oldval := TokenTTL
defer func() {
TokenTTL = oldval
}()
TokenTTL = time.Millisecond * 500
TokenLeeway = 0
testName := "mathworker6"
testVersion := "1.1.1"
m := kite.New(testName, testVersion)
m.Config = conf.Config.Copy()
m.Config.Port = 6666
defer m.Close()
kiteURL := &url.URL{Scheme: "http", Host: "localhost:6666", Path: "/mathworker6"}
_, err := m.Register(kiteURL)
if err != nil {
t.Error(err)
}
token, err := m.GetToken(m.Kite())
if err != nil {
t.Error(err)
}
time.Sleep(time.Millisecond * 700)
token2, err := m.GetToken(m.Kite())
if err != nil {
t.Error(err)
}
if token == token2 {
t.Error("token invalidation doesn't work")
}
TokenTTL = time.Second * 4
token3, err := m.GetToken(m.Kite())
if err != nil {
t.Error(err)
}
token4, err := m.GetToken(m.Kite())
if err != nil {
t.Error(err)
}
if token3 != token4 {
t.Error("tokens should be the same")
}
}
func TestMultiple(t *testing.T) {
testDuration := time.Second * 10
// number of kites that will be queried. Means if there are 50 example
// kites available only 10 of them will be queried. Increasing this number
// makes the test fail.
kiteNumber := runtime.GOMAXPROCS(0)
// number of clients that will query example kites
clientNumber := 10
for i := 0; i < kiteNumber; i++ {
m := kite.New("example"+strconv.Itoa(i), "0.1."+strconv.Itoa(i))
m.Config = conf.Config.Copy()
kiteURL := &url.URL{Scheme: "http", Host: "localhost:4444", Path: "/kite"}
err := m.RegisterForever(kiteURL)
if err != nil {
t.Error(err)
}
defer m.Close()
}
clients := make([]*kite.Kite, clientNumber)
for i := 0; i < clientNumber; i++ {
c := kite.New("client"+strconv.Itoa(i), "0.0.1")
c.Config = conf.Config.Copy()
c.SetupKontrolClient()
clients[i] = c
defer c.Close()
}
var wg sync.WaitGroup
timeout := time.After(testDuration)
// every one second
for {
select {
case <-time.Tick(time.Second):
for i := 0; i < clientNumber; i++ {
wg.Add(1)
go func(i int) {
defer wg.Done()
time.Sleep(time.Millisecond * time.Duration(rand.Intn(500)))
query := &protocol.KontrolQuery{
Username: conf.Config.Username,
Environment: conf.Config.Environment,
Name: "example" + strconv.Itoa(rand.Intn(kiteNumber)),
}
start := time.Now()
k, err := clients[i].GetKites(query)
elapsedTime := time.Since(start)
if err != nil {
// we don't fail here otherwise pprof can't gather information
fmt.Printf("[%d] aborted, elapsed %f sec err: %s\n",
i, elapsedTime.Seconds(), err)
} else {
klose(k)
// fmt.Printf("[%d] finished, elapsed %f sec\n", i, elapsedTime.Seconds())
}
}(i)
wg.Wait()
}
case <-timeout:
return
}
}
}
func TestGetKites(t *testing.T) {
testName := "mathworker4"
testVersion := "1.1.1"
m := kite.New(testName, testVersion)
m.Config = conf.Config.Copy()
defer m.Close()
kiteURL := &url.URL{Scheme: "http", Host: "localhost:4444", Path: "/kite"}
_, err := m.Register(kiteURL)
if err != nil {
t.Error(err)
}
defer m.Close()
query := &protocol.KontrolQuery{
Username: conf.Config.Username,
Environment: conf.Config.Environment,
Name: testName,
Version: "1.1.1",
}
// exp2 queries for mathkite
exp3 := kite.New("exp3", "0.0.1")
exp3.Config = conf.Config.Copy()
defer exp3.Close()
kites, err := exp3.GetKites(query)
if err != nil {
t.Fatal(err)
}
defer klose(kites)
if len(kites) == 0 {
t.Fatal("No mathworker available")
}
if len(kites) != 1 {
t.Fatalf("Only one kite is registerd, we have %d", len(kites))
}
if kites[0].Name != testName {
t.Errorf("getkites got %s exptected %", kites[0].Name, testName)
}
if kites[0].Version != testVersion {
t.Errorf("getkites got %s exptected %", kites[0].Version, testVersion)
}
}
func TestGetToken(t *testing.T) {
testName := "mathworker5"
testVersion := "1.1.1"
m := kite.New(testName, testVersion)
m.Config = conf.Config.Copy()
m.Config.Port = 6666
defer m.Close()
kiteURL := &url.URL{Scheme: "http", Host: "localhost:6666", Path: "/kite"}
_, err := m.Register(kiteURL)
if err != nil {
t.Error(err)
}
_, err = m.GetToken(m.Kite())
if err != nil {
t.Error(err)
}
}
func TestRegisterKite(t *testing.T) {
kiteURL := &url.URL{Scheme: "http", Host: "localhost:4444", Path: "/kite"}
m := kite.New("mathworker3", "1.1.1")
m.Config = conf.Config.Copy()
res, err := m.Register(kiteURL)
if err != nil {
t.Fatal(err)
}
defer m.Close()
if kiteURL.String() != res.URL.String() {
t.Error("register: got %s expected %s", res.URL.String(), kiteURL.String())
}
}
func TestKontrol(t *testing.T) {
// Start mathworker
mathKite := kite.New("mathworker", "1.2.3")
mathKite.Config = conf.Config.Copy()
mathKite.Config.Port = 6161
mathKite.HandleFunc("square", Square)
go mathKite.Run()
<-mathKite.ServerReadyNotify()
defer mathKite.Close()
go mathKite.RegisterForever(&url.URL{Scheme: "http", Host: "127.0.0.1:" + strconv.Itoa(mathKite.Config.Port), Path: "/kite"})
<-mathKite.KontrolReadyNotify()
// exp2 kite is the mathworker client
exp2Kite := kite.New("exp2", "0.0.1")
exp2Kite.Config = conf.Config.Copy()
defer exp2Kite.Close()
query := &protocol.KontrolQuery{
Username: exp2Kite.Kite().Username,
Environment: exp2Kite.Kite().Environment,
Name: "mathworker",
Version: "~> 1.1",
}
// exp2 queries for mathkite
kites, err := exp2Kite.GetKites(query)
if err != nil {
t.Fatal(err)
}
defer klose(kites)
if len(kites) == 0 {
t.Fatal("No mathworker available")
}
// exp2 connectes to mathworker
remoteMathWorker := kites[0]
err = remoteMathWorker.Dial()
if err != nil {
t.Fatal("Cannot connect to remote mathworker", err)
}
// Test Kontrol.GetToken
// TODO(rjeczalik): rework test to not touch Kontrol internals
kon.tokenCacheMu.Lock()
kon.tokenCache = make(map[string]string)
kon.tokenCacheMu.Unlock()
_, err = exp2Kite.GetToken(&remoteMathWorker.Kite)
if err != nil {
t.Error(err)
}
// Run "square" method
response, err := remoteMathWorker.TellWithTimeout("square", 4*time.Second, 2)
if err != nil {
t.Fatal(err)
}
var result int
err = response.Unmarshal(&result)
if err != nil {
t.Fatal(err)
}
// Result must be "4"
if result != 4 {
t.Fatalf("Invalid result: %d", result)
}
}
func Square(r *kite.Request) (interface{}, error) {
a, err := r.Args.One().Float64()
if err != nil {
return nil, err
}
result := a * a
return result, nil
}
func TestGetQueryKey(t *testing.T) {
// This query is valid because there are no gaps between query fields.
q := &protocol.KontrolQuery{
Username: "cenk",
Environment: "production",
}
key, err := GetQueryKey(q)
if err != nil {
t.Errorf(err.Error())
}
if key != "/cenk/production" {
t.Errorf("Unexpected key: %s", key)
}
// This is wrong because Environment field is empty.
// We can't make a query on etcd because wildcards are not allowed in paths.
q = &protocol.KontrolQuery{
Username: "cenk",
Name: "fs",
}
key, err = GetQueryKey(q)
if err == nil {
t.Errorf("Error is expected")
}
if key != "" {
t.Errorf("Key is not expected: %s", key)
}
// This is also wrong becaus each query must have a non-empty username field.
q = &protocol.KontrolQuery{
Environment: "production",
Name: "fs",
}
key, err = GetQueryKey(q)
if err == nil {
t.Errorf("Error is expected")
}
if key != "" {
t.Errorf("Key is not expected: %s", key)
}
}
func TestKontrolMultiKey(t *testing.T) {
if storage := os.Getenv("KONTROL_STORAGE"); storage != "postgres" {
t.Skip("%q storage does not currently implement soft key pair deletes", storage)
}
i := uuid.NewV4()
secondID := i.String()
// add so we can use it as key
if err := kon.AddKeyPair(secondID, testkeys.PublicSecond, testkeys.PrivateSecond); err != nil {
t.Fatal(err)
}
// Start mathworker
mathKite := kite.New("mathworker2", "2.0.0")
mathKite.Config = conf.Config.Copy()
mathKite.Config.Port = 6162
mathKite.HandleFunc("square", Square)
go mathKite.Run()
<-mathKite.ServerReadyNotify()
defer mathKite.Close()
go mathKite.RegisterForever(&url.URL{Scheme: "http", Host: "127.0.0.1:" + strconv.Itoa(mathKite.Config.Port), Path: "/kite"})
<-mathKite.KontrolReadyNotify()
// exp3 kite is the mathworker client. However it uses a different public
// key
exp3Kite := kite.New("exp3", "0.0.1")
exp3Kite.Config = conf.Config.Copy()
exp3Kite.Config.KiteKey = testutil.NewKiteKeyWithKeyPair(testkeys.PrivateSecond, testkeys.PublicSecond).Raw
exp3Kite.Config.KontrolKey = testkeys.PublicSecond
defer exp3Kite.Close()
query := &protocol.KontrolQuery{
Username: exp3Kite.Kite().Username,
Environment: exp3Kite.Kite().Environment,
Name: "mathworker2",
Version: "2.0.0",
}
// exp3 queries for mathkite
kites, err := exp3Kite.GetKites(query)
if err != nil {
t.Fatal(err)
}
defer klose(kites)
if len(kites) == 0 {
t.Fatal("No mathworker available")
}
// exp3 connectes to mathworker
remoteMathWorker := kites[0]
err = remoteMathWorker.Dial()
if err != nil {
t.Fatal("Cannot connect to remote mathworker", err)
}
// Test Kontrol.GetToken
// TODO(rjeczalik): rework test to not touch Kontrol internals
kon.tokenCacheMu.Lock()
kon.tokenCache = make(map[string]string) // empty it
kon.tokenCacheMu.Unlock()
newToken, err := exp3Kite.GetToken(&remoteMathWorker.Kite)
if err != nil {
t.Error(err)
}
if remoteMathWorker.Auth.Key == newToken {
t.Errorf("Token renew failed. Tokens should be different after renew")
}
// Run "square" method
response, err := remoteMathWorker.TellWithTimeout("square", 4*time.Second, 2)
if err != nil {
t.Fatal(err)
}
var result int
err = response.Unmarshal(&result)
if err != nil {
t.Fatal(err)
}
// Result must be "4"
if result != 4 {
t.Fatalf("Invalid result: %d", result)
}
// now invalidate the second key
log.Printf("Invalidating %s\n", secondID)
if err := kon.DeleteKeyPair(secondID, ""); err != nil {
t.Fatal(err)
}
// try to get a new key, this should replace exp3Kite.Config.KontrolKey
// with the new (in our case because PublicSecond is invalidated, it's
// going to use Public (the first key). Also it should return the new Key.
publicKey, err := exp3Kite.GetKey()
if err != nil {
t.Fatal(err)
}
if publicKey != testkeys.Public {
t.Errorf("Key renew failed\n\twant:%s\n\tgot :%s\n", testkeys.Public, publicKey)
}
if exp3Kite.Config.KontrolKey != publicKey {
t.Errorf("Key renew should replace config.KontrolKey\n\twant:%s\n\tgot :%s\n",
testkeys.Public, publicKey)
}
}
| [
"\"KONTROL_STORAGE\"",
"\"KONTROL_STORAGE\""
] | [] | [
"KONTROL_STORAGE"
] | [] | ["KONTROL_STORAGE"] | go | 1 | 0 | |
service/env_test.go | /*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package service
import (
"os"
"path/filepath"
"testing"
"github.com/google/go-cmp/cmp"
"sigs.k8s.io/etcdadm/apis"
"sigs.k8s.io/yaml"
)
func TestBuildEnv(t *testing.T) {
basedir := "testdata/buildenvironment"
dirs, err := os.ReadDir(basedir)
if err != nil {
t.Fatalf("failed to read directory %q: %v", basedir, err)
}
for _, f := range dirs {
dir := filepath.Join(basedir, f.Name())
if !f.IsDir() {
t.Errorf("expected directory %s", dir)
continue
}
t.Run(f.Name(), func(t *testing.T) {
testBuildEnvDir(t, dir)
})
}
}
func testBuildEnvDir(t *testing.T, dir string) {
inputPath := filepath.Join(dir, "in.yaml")
inputBytes, err := os.ReadFile(inputPath)
if err != nil {
t.Fatalf("failed to read file %q: %v", inputPath, err)
}
cfg := &apis.EtcdAdmConfig{}
if err := yaml.Unmarshal(inputBytes, cfg); err != nil {
t.Fatalf("failed to parse file %q: %v", inputPath, err)
}
got, err := BuildEnvironment(cfg)
if err != nil {
t.Fatalf("BuildEnvironment failed: %v", err)
}
wantPath := filepath.Join(dir, "want.txt")
checkGolden(t, wantPath, got)
}
func checkGolden(t *testing.T, wantPath string, got []byte) {
updateGoldenOutput := os.Getenv("UPDATE_GOLDEN_OUTPUT") != ""
want, err := os.ReadFile(wantPath)
if err != nil {
if os.IsNotExist(err) && updateGoldenOutput {
// ignore
} else {
t.Fatalf("failed to read file %q: %v", wantPath, err)
}
}
if diff := cmp.Diff(got, want); diff != "" {
t.Errorf("unexpected result; diff %s", diff)
}
if updateGoldenOutput {
if err := os.WriteFile(wantPath, got, 0644); err != nil {
t.Errorf("failed to write file %q: %v", wantPath, err)
}
}
}
| [
"\"UPDATE_GOLDEN_OUTPUT\""
] | [] | [
"UPDATE_GOLDEN_OUTPUT"
] | [] | ["UPDATE_GOLDEN_OUTPUT"] | go | 1 | 0 | |
python-daemon/tests/engine_base/serializers/test_keras_serializer.py | #!/usr/bin/env python
# coding=utf-8
# Copyright [2020] [Apache Software Foundation]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import mock
import pytest
from tensorflow import keras
from marvin_python_daemon.engine_base import EngineBaseTraining
from marvin_python_daemon.engine_base.serializers.keras_serializer import KerasSerializer
@pytest.fixture
def engine():
class MyEngineAction(KerasSerializer, EngineBaseTraining):
def execute(self, **kwargs):
pass
return MyEngineAction(default_root_path="/tmp/.marvin")
class TestKerasSerializer(object):
def test__serializer_load_keras(self, engine):
mocked_path = os.path.join(os.environ['MARVIN_DATA_PATH'], 'model')
a = keras.layers.Input(shape=(2,))
x = keras.layers.Dense(3)(a)
b = keras.layers.Dense(1)(x)
model = keras.models.Model(a, b)
model.save(mocked_path)
obj = engine._serializer_load(object_file_path=mocked_path)
assert isinstance(obj, keras.models.Model)
@mock.patch('joblib.load')
def test__serializer_load_not_keras(self, mocked_load, engine):
mocked_path = "/tmp/engine/dataset"
mocked_load.return_value = {"me": "here"}
obj = engine._serializer_load(object_file_path=mocked_path)
mocked_load.assert_called_once_with(mocked_path)
assert obj == {"me": "here"}
def test__serializer_dump_keras(self, engine):
mocked_obj = mock.MagicMock()
mocked_path = "/tmp/engine/model"
engine._serializer_dump(mocked_obj, object_file_path=mocked_path)
mocked_obj.save.assert_called_once_with(mocked_path)
@mock.patch('marvin_python_daemon.engine_base.EngineBaseTraining._serializer_dump')
def test__serializer_dump_not_keras(self, mocked_dump, engine):
mocked_obj = mock.MagicMock()
mocked_path = "/tmp/engine/dataset"
engine._serializer_dump(mocked_obj, object_file_path=mocked_path)
mocked_dump.assert_called_once_with(mocked_obj, mocked_path)
| [] | [] | [
"MARVIN_DATA_PATH"
] | [] | ["MARVIN_DATA_PATH"] | python | 1 | 0 | |
cmd/imageproxy/main.go | // Copyright 2013 The imageproxy authors.
// SPDX-License-Identifier: Apache-2.0
// imageproxy starts an HTTP server that proxies requests for remote images.
package main
import (
"flag"
"fmt"
"io/ioutil"
"log"
"net/http"
"net/url"
"os"
"strconv"
"strings"
"time"
"github.com/PaulARoy/azurestoragecache"
"github.com/die-net/lrucache"
"github.com/die-net/lrucache/twotier"
"github.com/gomodule/redigo/redis"
"github.com/gorilla/mux"
"github.com/gregjones/httpcache/diskcache"
rediscache "github.com/gregjones/httpcache/redis"
"github.com/jamiealquiza/envy"
"github.com/peterbourgon/diskv"
"github.com/admpub/imageproxy"
"github.com/admpub/imageproxy/internal/gcscache"
"github.com/admpub/imageproxy/internal/s3cache"
)
const defaultMemorySize = 100
var addr = flag.String("addr", "localhost:8080", "TCP address to listen on")
var allowHosts = flag.String("allowHosts", "", "comma separated list of allowed remote hosts")
var denyHosts = flag.String("denyHosts", "", "comma separated list of denied remote hosts")
var referrers = flag.String("referrers", "", "comma separated list of allowed referring hosts")
var includeReferer = flag.Bool("includeReferer", false, "include referer header in remote requests")
var followRedirects = flag.Bool("followRedirects", true, "follow redirects")
var baseURL = flag.String("baseURL", "", "default base URL for relative remote URLs")
var cache tieredCache
var signatureKeys signatureKeyList
var scaleUp = flag.Bool("scaleUp", false, "allow images to scale beyond their original dimensions")
var timeout = flag.Duration("timeout", 0, "time limit for requests served by this proxy")
var verbose = flag.Bool("verbose", false, "print verbose logging messages")
var _ = flag.Bool("version", false, "Deprecated: this flag does nothing")
var contentTypes = flag.String("contentTypes", "image/*", "comma separated list of allowed content types")
var userAgent = flag.String("userAgent", "willnorris/imageproxy", "specify the user-agent used by imageproxy when fetching images from origin website")
func init() {
flag.Var(&cache, "cache", "location to cache images (see https://github.com/willnorris/imageproxy#cache)")
flag.Var(&signatureKeys, "signatureKey", "HMAC key used in calculating request signatures")
}
func main() {
envy.Parse("IMAGEPROXY")
flag.Parse()
p := imageproxy.NewProxy(nil, cache.Cache)
if *allowHosts != "" {
p.AllowHosts = strings.Split(*allowHosts, ",")
}
if *denyHosts != "" {
p.DenyHosts = strings.Split(*denyHosts, ",")
}
if *referrers != "" {
p.Referrers = strings.Split(*referrers, ",")
}
if *contentTypes != "" {
p.ContentTypes = strings.Split(*contentTypes, ",")
}
p.SignatureKeys = signatureKeys
if *baseURL != "" {
var err error
p.DefaultBaseURL, err = url.Parse(*baseURL)
if err != nil {
log.Fatalf("error parsing baseURL: %v", err)
}
}
p.IncludeReferer = *includeReferer
p.FollowRedirects = *followRedirects
p.Timeout = *timeout
p.ScaleUp = *scaleUp
p.Verbose = *verbose
p.UserAgent = *userAgent
server := &http.Server{
Addr: *addr,
Handler: p,
}
r := mux.NewRouter().SkipClean(true).UseEncodedPath()
r.PathPrefix("/").Handler(p)
fmt.Printf("imageproxy listening on %s\n", server.Addr)
log.Fatal(http.ListenAndServe(*addr, r))
}
type signatureKeyList [][]byte
func (skl *signatureKeyList) String() string {
return fmt.Sprint(*skl)
}
func (skl *signatureKeyList) Set(value string) error {
for _, v := range strings.Fields(value) {
key := []byte(v)
if strings.HasPrefix(v, "@") {
file := strings.TrimPrefix(v, "@")
var err error
key, err = ioutil.ReadFile(file)
if err != nil {
log.Fatalf("error reading signature file: %v", err)
}
}
*skl = append(*skl, key)
}
return nil
}
// tieredCache allows specifying multiple caches via flags, which will create
// tiered caches using the twotier package.
type tieredCache struct {
imageproxy.Cache
}
func (tc *tieredCache) String() string {
return fmt.Sprint(*tc)
}
func (tc *tieredCache) Set(value string) error {
for _, v := range strings.Fields(value) {
c, err := parseCache(v)
if err != nil {
return err
}
if tc.Cache == nil {
tc.Cache = c
} else {
tc.Cache = twotier.New(tc.Cache, c)
}
}
return nil
}
// parseCache parses c returns the specified Cache implementation.
func parseCache(c string) (imageproxy.Cache, error) {
if c == "" {
return nil, nil
}
if c == "memory" {
c = fmt.Sprintf("memory:%d", defaultMemorySize)
}
u, err := url.Parse(c)
if err != nil {
return nil, fmt.Errorf("error parsing cache flag: %v", err)
}
switch u.Scheme {
case "azure":
return azurestoragecache.New("", "", u.Host)
case "gcs":
return gcscache.New(u.Host, strings.TrimPrefix(u.Path, "/"))
case "memory":
return lruCache(u.Opaque)
case "redis":
conn, err := redis.DialURL(u.String(), redis.DialPassword(os.Getenv("REDIS_PASSWORD")))
if err != nil {
return nil, err
}
return rediscache.NewWithClient(conn), nil
case "s3":
return s3cache.New(u.String())
case "file":
return diskCache(u.Path), nil
default:
return diskCache(c), nil
}
}
// lruCache creates an LRU Cache with the specified options of the form
// "maxSize:maxAge". maxSize is specified in megabytes, maxAge is a duration.
func lruCache(options string) (*lrucache.LruCache, error) {
parts := strings.SplitN(options, ":", 2)
size, err := strconv.ParseInt(parts[0], 10, 64)
if err != nil {
return nil, err
}
var age time.Duration
if len(parts) > 1 {
age, err = time.ParseDuration(parts[1])
if err != nil {
return nil, err
}
}
return lrucache.New(size*1e6, int64(age.Seconds())), nil
}
func diskCache(path string) *diskcache.Cache {
d := diskv.New(diskv.Options{
BasePath: path,
// For file "c0ffee", store file as "c0/ff/c0ffee"
Transform: func(s string) []string { return []string{s[0:2], s[2:4]} },
})
return diskcache.NewWithDiskv(d)
}
| [
"\"REDIS_PASSWORD\""
] | [] | [
"REDIS_PASSWORD"
] | [] | ["REDIS_PASSWORD"] | go | 1 | 0 | |
ds18b20Pusher.py | #!/usr/bin/env python
import pika
import sys
from threading import Thread
import threading
import time
import logging
import os
w1Devices = []
rabbitMqHost = os.environ['RABBIT_MQ_HOST']
rabbitMqExchange = os.environ['RABBIT_MQ_EXCHANGE']
connection = pika.BlockingConnection
def checkDevices():
ct = threading.currentThread()
while getattr(ct, "do_checking", True):
logging.info("Checking for new One Wire Devices...")
global w1Devices
w1Devices = []
try:
listOfFiles = os.listdir('/sys/bus/w1/devices')
for entry in listOfFiles:
if entry != "w1_bus_master1":
logging.info("Found Device :" + entry)
w1Devices.append(entry)
except Exception as e:
logging.error(
"Error while try to get OneWire Device List:" + str(e))
finally:
logging.info("Check for new Sensors in 10 Minutes...")
time.sleep(600)
def openConnection():
global connection
global rabbitMqHost
connection = pika.BlockingConnection(
pika.ConnectionParameters(host=rabbitMqHost))
def closeConnection():
global connection
connection.close
def getTemp(channel):
global w1Devices
global rabbitMqExchange
if len(w1Devices) == 0:
logging.info("Sorry, no Sensors found :-(")
try:
for device in w1Devices:
# 1-wire Slave Datei lesen
file = open('/sys/bus/w1/devices/' + device + '/w1_slave')
filecontent = file.read()
file.close()
# Temperaturwerte auslesen und konvertieren
stringvalue = filecontent.split("\n")[1].split(" ")[9]
temp = float(stringvalue[2:]) / 1000
# Temperatur ausgeben
temp = '%6.2f' % temp
channel.basic_publish(
exchange=rabbitMqExchange, routing_key=device, body=temp)
logging.info("Publish Temp " + temp + " from Sensor:" + device)
except Exception as e:
logging.error("Error while Readding OneWireDevices" + str(e))
time.sleep(1000)
def main():
logging.basicConfig(level=logging.INFO)
logging.info("---------------------------------------------")
logging.info('Started')
# -
checkDevicesThread = Thread(target=checkDevices)
checkDevicesThread.join
checkDevicesThread.daemon = True
checkDevicesThread.start()
# -
openConnection()
channel = connection.channel()
logging.info("Start Reading from Sensor in 10 Seconds...")
time.sleep(10)
while True:
getTemp(channel)
time.sleep(8)
logging.info('Finished')
closeConnection()
logging.info("---------------------------------------------")
if __name__ == '__main__':
try:
main()
except:
logging.info("---------------------------------------------")
logging.info("-- CRITICAL ERROR OCCURED...")
logging.info("---------------------------------------------")
time.sleep(5)
sys.exit(2)
| [] | [] | [
"RABBIT_MQ_HOST",
"RABBIT_MQ_EXCHANGE"
] | [] | ["RABBIT_MQ_HOST", "RABBIT_MQ_EXCHANGE"] | python | 2 | 0 | |
tests/unit/test_line_minimization.py | # This must be done BEFORE importing numpy or anything else.
# Therefore it must be in your main script.
import os
os.environ["MKL_NUM_THREADS"] = "1"
os.environ["NUMEXPR_NUM_THREADS"] = "1"
os.environ["OMP_NUM_THREADS"] = "1"
import pandas as pd
from pyscf import lib, gto, scf
from pyqmc import default_sj, line_minimization, initial_guess, gradient_generator
import h5py
def test():
""" Optimize a Helium atom's wave function and check that it's
better than Hartree-Fock"""
mol = gto.M(atom="He 0. 0. 0.", basis="bfd_vdz", ecp="bfd", unit="bohr")
mf = scf.RHF(mol).run()
wf, to_opt = default_sj(mol, mf)
print(to_opt)
nconf = 500
wf, dfgrad = line_minimization(
wf, initial_guess(mol, nconf), gradient_generator(mol, wf, to_opt)
)
dfgrad = pd.DataFrame(dfgrad)
print(dfgrad)
mfen = mf.energy_tot()
enfinal = dfgrad["energy"].values[-1]
enfinal_err = dfgrad["energy_error"].values[-1]
assert mfen > enfinal
if __name__ == "__main__":
test()
| [] | [] | [
"MKL_NUM_THREADS",
"OMP_NUM_THREADS",
"NUMEXPR_NUM_THREADS"
] | [] | ["MKL_NUM_THREADS", "OMP_NUM_THREADS", "NUMEXPR_NUM_THREADS"] | python | 3 | 0 | |
src/main/java/com/mysynergis/soe/util/ServerUtil.java | package com.mysynergis.soe.util;
import java.io.File;
import java.io.IOException;
import java.util.Map;
import java.util.Map.Entry;
import com.esri.arcgis.server.SOIHelper;
import com.mysynergis.soe.log.impl.Log;
/**
* SOE/SOI specific helper util
*
* @author h.fleischer
* @since 08.01.2019
*
*/
public class ServerUtil {
public static final String UTF_8 = "UTF-8";
protected static final String ARCGISHOME_ENV = "AGSSERVER";
private ServerUtil() {
// no public instance
}
/**
* read the ArcGIS home directory path from the system's environment variables
*
* @return
*/
protected static String getArcGisHomeDir() {
String arcgisHome = null;
/* Not found in env, check system property */
if (System.getProperty(ARCGISHOME_ENV) != null) {
arcgisHome = System.getProperty(ARCGISHOME_ENV);
}
if (arcgisHome == null) {
/* To make env lookup case insensitive */
Map<String, String> envs = System.getenv();
for (Entry<String, String> envEntry : envs.entrySet()) {
if (envEntry.getKey().equalsIgnoreCase(ARCGISHOME_ENV)) {
arcgisHome = envEntry.getValue();
}
}
}
if (arcgisHome != null && !arcgisHome.endsWith(File.separator)) {
arcgisHome += File.separator;
}
return arcgisHome;
}
protected static String getMapServerWsdl(String arcgisHome) {
return String.format("%sXmlSchema%sMapServer.wsdl", arcgisHome, File.separator);
}
/**
* extracted from the ESRI stub implementation of the SOI,
* resolves arcgis home dire and build an instance of {@link SOIHelper}
*
* @return
* @throws IOException
*/
public static SOIHelper createSoiHelper() {
String arcgisHome = getArcGisHomeDir();
if (arcgisHome != null) {
return new SOIHelper(getMapServerWsdl(arcgisHome));
} else {
IllegalStateException environmentMissingException = new IllegalStateException("failed to get ArcGIS home directory. please check if environment variable " + ARCGISHOME_ENV + " exists",
null);
Log.getInstance().severe(environmentMissingException);
throw environmentMissingException;
}
}
}
| [] | [] | [] | [] | [] | java | 0 | 0 | |
app.py | #!/usr/bin/env python3
import os
from flask_restful import Api, Resource, abort
from flask_apispec import FlaskApiSpec
from flask_jwt_extended import JWTManager
from flask_cors import CORS
from webargs.flaskparser import parser
from resources import *
from database import app, db_session, init_db, RevokedToken
# Enable Cross Origin Resource Sharing for all domains on all routes
CORS(app)
# Add JSON Web Token authorization
app.config['JWT_SECRET_KEY'] = 'jwt-secret-string'
app.config['JWT_BLACKLIST_ENABLED'] = True
app.config['JWT_BLACKLIST_TOKEN_CHECKS'] = ['access', 'refresh']
jwt = JWTManager(app)
@jwt.token_in_blacklist_loader
def check_if_token_in_blacklist(decrypted_token):
jti = decrypted_token['jti']
return RevokedToken.is_jti_blacklisted(jti)
@app.teardown_appcontext
def shutdown_session(exception=None):
db_session.remove()
@app.after_request
def after_request(response):
"""Close the database connection after each request."""
db_session.commit()
return response
# Flask RESTful API
api = Api(app)
docs = FlaskApiSpec(app)
resources = {
# Account resources
'/api/account/api': ApiKey,
'/api/account/available_algorithms': AvailableAlgorithms,
'/api/account/available_exchanges': AvailableExchanges,
'/api/account/balances': AccountBalances,
'/api/account/save_email': SaveEmail,
'/api/account/save_password': SavePassword,
'/api/account/save_second_factor': SaveSecondFactor,
'/api/account/second_factor_secret': SecondFactorSecret,
'/api/account/save_setting': SaveUserSetting,
'/api/account/user': UserResource,
'/api/account/valuations': AccountValuations,
# Coincube API resources
'/api/v1/summary': ApiSummary,
'/api/v1/cube/summary': ApiCubeSummary,
'/api/v1/cube/details': ApiCubeDetails,
'/api/v1/get_portfolios': ApiPortfolios,
'/api/v1/post_allocations': ApiPostAllocations,
# Authorization resources
'/api/auth/login': Login,
'/api/auth/logout_access': LogoutAccess,
'/api/auth/logout_refresh': LogoutRefresh,
'/api/auth/validate_oauth/': OauthValidate,
'/api/auth/register': Register,
'/api/auth/refresh': TokenRefresh,
'/api/auth/reset_password_token': ResetPasswordToken,
'/api/auth/reset_password/<string:token>': ResetPassword,
'/api/auth/second_factor': SecondFactor,
# Front section API resources
'/api/chart/pie/<string:index_type>/<string:index_name>': PieChart,
'/api/chart/pie/<string:index_type>': PieCharts,
'/api/charts/pie': AllPieCharts,
'/api/cmc/id': CmcId,
'/api/cmc/ids': CmcIds,
'/api/indices': AllIndices,
'/api/supported_assets': SupportedAssets,
'/api/supported_exchanges': SupportedExchanges,
'/api/supported_exchange_assets': SupportedExchangeAssets,
'/api/supported_exchange_pairs': SupportedExchangePairs,
# Cube API resources
'/api/cube': CubeResource,
'/api/cube/allocations/current': AllocationsCurrent,
'/api/cube/allocations/target': AllocationsTarget,
'/api/cube/available_ex_assets': AvailableAssets,
'/api/cube/balances': Balances,
'/api/cube/connection': ConnectionResource,
'/api/cube/ex_pairs': ExPairResource,
'/api/cube/save_setting': SaveCubeSetting,
'/api/cube/transactions': Transactions,
'/api/cube/valuations': Valuations,
# Healthcheck
'/health': Healthcheck,
}
for key, value in resources.items():
# Add API resources
api.add_resource(value, key)
# Register documentation
docs.register(value)
# Build the database:
# This will create the database file using SQLAlchemy
try:
init_db()
print('DB INITALIZED')
except:
app.logger.exception('Empty database. Unable to run init_db().')
# This error handler is necessary for webargs usage with Flask-RESTful.
@parser.error_handler
def handle_request_parsing_error(err, req):
abort(422, errors=err.messages)
if __name__ == '__main__':
PORT = int(os.getenv('PORT'))
HOST = os.getenv('HOST')
print ("Starting server..")
| [] | [] | [
"PORT",
"HOST"
] | [] | ["PORT", "HOST"] | python | 2 | 0 | |
cmdtime.go | package main
import (
"os"
"time"
. "github.com/reujab/bronze/types"
)
func cmdTimeSegment(segment *Segment) {
duration, err := time.ParseDuration(os.Getenv("cmdtime"))
if err != nil {
if shell == "bash" {
dief("cmdtime: bash is not supported")
} else {
dief("cmdtime: invalid $cmdtime: %q", os.Getenv("cmdtime"))
}
}
threshold, err := time.ParseDuration(os.Getenv("BRONZE_CMDTIME_THRESHOLD"))
if err != nil || duration >= threshold {
segment.Value = duration.String()
}
}
| [
"\"cmdtime\"",
"\"cmdtime\"",
"\"BRONZE_CMDTIME_THRESHOLD\""
] | [] | [
"BRONZE_CMDTIME_THRESHOLD",
"cmdtime"
] | [] | ["BRONZE_CMDTIME_THRESHOLD", "cmdtime"] | go | 2 | 0 | |
server/container_create_linux.go | // +build linux
package server
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"sort"
"strings"
"sync"
"syscall"
"time"
"github.com/containers/buildah/pkg/secrets"
"github.com/containers/libpod/pkg/annotations"
"github.com/containers/libpod/pkg/rootless"
createconfig "github.com/containers/libpod/pkg/spec"
"github.com/containers/storage/pkg/mount"
"github.com/cri-o/cri-o/internal/lib"
"github.com/cri-o/cri-o/internal/lib/sandbox"
"github.com/cri-o/cri-o/internal/log"
oci "github.com/cri-o/cri-o/internal/oci"
"github.com/cri-o/cri-o/internal/storage"
libconfig "github.com/cri-o/cri-o/pkg/config"
"github.com/cri-o/cri-o/utils"
securejoin "github.com/cyphar/filepath-securejoin"
"github.com/opencontainers/runc/libcontainer/cgroups"
"github.com/opencontainers/runc/libcontainer/devices"
rspec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/opencontainers/runtime-tools/generate"
"github.com/opencontainers/selinux/go-selinux/label"
"github.com/pkg/errors"
"golang.org/x/net/context"
pb "k8s.io/cri-api/pkg/apis/runtime/v1alpha2"
)
// minMemoryLimit is the minimum memory that must be set for a container.
// A lower value would result in the container failing to start.
const minMemoryLimit = 12582912
// Copied from k8s.io/kubernetes/pkg/kubelet/kuberuntime/labels.go
const podTerminationGracePeriodLabel = "io.kubernetes.pod.terminationGracePeriod"
var (
_cgroupv1HasHugetlbOnce sync.Once
_cgroupv1HasHugetlb bool
_cgroupv1HasHugetlbErr error
_cgroupv2HasHugetlbOnce sync.Once
_cgroupv2HasHugetlb bool
_cgroupv2HasHugetlbErr error
_cgroupHasMemorySwapOnce sync.Once
_cgroupHasMemorySwap bool
)
// cgroupv1HasHugetlb returns whether the hugetlb controller is present on
// cgroup v1.
func cgroupv1HasHugetlb() (bool, error) {
_cgroupv1HasHugetlbOnce.Do(func() {
if _, err := ioutil.ReadDir("/sys/fs/cgroup/hugetlb"); err != nil {
_cgroupv1HasHugetlbErr = errors.Wrap(err, "readdir /sys/fs/cgroup/hugetlb")
_cgroupv1HasHugetlb = false
} else {
_cgroupv1HasHugetlbErr = nil
_cgroupv1HasHugetlb = true
}
})
return _cgroupv1HasHugetlb, _cgroupv1HasHugetlbErr
}
// cgroupv2HasHugetlb returns whether the hugetlb controller is present on
// cgroup v2.
func cgroupv2HasHugetlb() (bool, error) {
_cgroupv2HasHugetlbOnce.Do(func() {
controllers, err := ioutil.ReadFile("/sys/fs/cgroup/cgroup.controllers")
if err != nil {
_cgroupv2HasHugetlbErr = errors.Wrap(err, "read /sys/fs/cgroup/cgroup.controllers")
return
}
_cgroupv2HasHugetlb = strings.Contains(string(controllers), "hugetlb")
})
return _cgroupv2HasHugetlb, _cgroupv2HasHugetlbErr
}
func cgroupHasMemorySwap() bool {
_cgroupHasMemorySwapOnce.Do(func() {
if cgroups.IsCgroup2UnifiedMode() {
_cgroupHasMemorySwap = true
return
}
_, err := os.Stat("/sys/fs/cgroup/memory/memory.memsw.limit_in_bytes")
_cgroupHasMemorySwap = err == nil
})
return _cgroupHasMemorySwap
}
type configDevice struct {
Device rspec.LinuxDevice
Resource rspec.LinuxDeviceCgroup
}
func findCgroupMountpoint(name string) error {
// Set up pids limit if pids cgroup is mounted
_, err := cgroups.FindCgroupMountpoint("", name)
return err
}
func addDevicesPlatform(ctx context.Context, sb *sandbox.Sandbox, containerConfig *pb.ContainerConfig, privilegedWithoutHostDevices bool, specgen *generate.Generator) error {
sp := specgen.Config
if containerConfig.GetLinux().GetSecurityContext().GetPrivileged() && !privilegedWithoutHostDevices {
hostDevices, err := devices.HostDevices()
if err != nil {
return err
}
for _, hostDevice := range hostDevices {
rd := rspec.LinuxDevice{
Path: hostDevice.Path,
Type: string(hostDevice.Type),
Major: hostDevice.Major,
Minor: hostDevice.Minor,
UID: &hostDevice.Uid,
GID: &hostDevice.Gid,
}
if hostDevice.Major == 0 && hostDevice.Minor == 0 {
// Invalid device, most likely a symbolic link, skip it.
continue
}
specgen.AddDevice(rd)
}
sp.Linux.Resources.Devices = []rspec.LinuxDeviceCgroup{
{
Allow: true,
Access: "rwm",
},
}
}
for _, device := range containerConfig.GetDevices() {
// pin the device to avoid using `device` within the range scope as
// wrong function literal
device := device
// If we are privileged, we have access to devices on the host.
// If the requested container path already exists on the host, the container won't see the expected host path.
// Therefore, we must error out if the container path already exists
privileged := containerConfig.GetLinux().GetSecurityContext() != nil && containerConfig.GetLinux().GetSecurityContext().GetPrivileged()
if privileged && device.ContainerPath != device.HostPath {
// we expect this to not exist
_, err := os.Stat(device.ContainerPath)
if err == nil {
return errors.Errorf("privileged container was configured with a device container path that already exists on the host.")
}
if !os.IsNotExist(err) {
return errors.Wrap(err, "error checking if container path exists on host")
}
}
path, err := resolveSymbolicLink(device.HostPath, "/")
if err != nil {
return err
}
dev, err := devices.DeviceFromPath(path, device.Permissions)
// if there was no error, return the device
if err == nil {
rd := rspec.LinuxDevice{
Path: device.ContainerPath,
Type: string(dev.Type),
Major: dev.Major,
Minor: dev.Minor,
UID: &dev.Uid,
GID: &dev.Gid,
}
specgen.AddDevice(rd)
sp.Linux.Resources.Devices = append(sp.Linux.Resources.Devices, rspec.LinuxDeviceCgroup{
Allow: true,
Type: string(dev.Type),
Major: &dev.Major,
Minor: &dev.Minor,
Access: dev.Permissions,
})
continue
}
// if the device is not a device node
// try to see if it's a directory holding many devices
if err == devices.ErrNotADevice {
// check if it is a directory
if e := utils.IsDirectory(path); e == nil {
// mount the internal devices recursively
// nolint: errcheck
filepath.Walk(path, func(dpath string, f os.FileInfo, e error) error {
if e != nil {
log.Debugf(ctx, "addDevice walk: %v", e)
}
childDevice, e := devices.DeviceFromPath(dpath, device.Permissions)
if e != nil {
// ignore the device
return nil
}
cPath := strings.Replace(dpath, path, device.ContainerPath, 1)
rd := rspec.LinuxDevice{
Path: cPath,
Type: string(childDevice.Type),
Major: childDevice.Major,
Minor: childDevice.Minor,
UID: &childDevice.Uid,
GID: &childDevice.Gid,
}
specgen.AddDevice(rd)
sp.Linux.Resources.Devices = append(sp.Linux.Resources.Devices, rspec.LinuxDeviceCgroup{
Allow: true,
Type: string(childDevice.Type),
Major: &childDevice.Major,
Minor: &childDevice.Minor,
Access: childDevice.Permissions,
})
return nil
})
}
}
}
return nil
}
// createContainerPlatform performs platform dependent intermediate steps before calling the container's oci.Runtime().CreateContainer()
func (s *Server) createContainerPlatform(container *oci.Container, cgroupParent string) error {
if s.defaultIDMappings != nil && !s.defaultIDMappings.Empty() {
rootPair := s.defaultIDMappings.RootPair()
for _, path := range []string{container.BundlePath(), container.MountPoint()} {
if err := os.Chown(path, rootPair.UID, rootPair.GID); err != nil {
return errors.Wrapf(err, "cannot chown %s to %d:%d", path, rootPair.UID, rootPair.GID)
}
if err := makeAccessible(path, rootPair.UID, rootPair.GID); err != nil {
return errors.Wrapf(err, "cannot make %s accessible to %d:%d", path, rootPair.UID, rootPair.GID)
}
}
}
return s.Runtime().CreateContainer(container, cgroupParent)
}
// makeAccessible changes the path permission and each parent directory to have --x--x--x
func makeAccessible(path string, uid, gid int) error {
for ; path != "/"; path = filepath.Dir(path) {
st, err := os.Stat(path)
if err != nil {
if os.IsNotExist(err) {
return nil
}
return err
}
if int(st.Sys().(*syscall.Stat_t).Uid) == uid && int(st.Sys().(*syscall.Stat_t).Gid) == gid {
continue
}
if st.Mode()&0111 != 0111 {
if err := os.Chmod(path, st.Mode()|0111); err != nil {
return err
}
}
}
return nil
}
// nolint:gocyclo
func (s *Server) createSandboxContainer(ctx context.Context, containerID, containerName string, sb *sandbox.Sandbox, sandboxConfig *pb.PodSandboxConfig, containerConfig *pb.ContainerConfig) (cntr *oci.Container, errRet error) {
if sb == nil {
return nil, errors.New("createSandboxContainer needs a sandbox")
}
// TODO: simplify this function (cyclomatic complexity here is high)
// TODO: factor generating/updating the spec into something other projects can vendor
// creates a spec Generator with the default spec.
specgen, err := generate.New("linux")
if err != nil {
return nil, err
}
specgen.HostSpecific = true
specgen.ClearProcessRlimits()
ulimits, err := getUlimitsFromConfig(&s.config)
if err != nil {
return nil, err
}
for _, u := range ulimits {
specgen.AddProcessRlimits(u.name, u.hard, u.soft)
}
readOnlyRootfs := s.config.ReadOnly
var privileged bool
if containerConfig.GetLinux().GetSecurityContext() != nil {
if containerConfig.GetLinux().GetSecurityContext().GetPrivileged() {
privileged = true
}
if privileged {
if !sandboxConfig.GetLinux().GetSecurityContext().GetPrivileged() {
return nil, errors.New("no privileged container allowed in sandbox")
}
}
if containerConfig.GetLinux().GetSecurityContext().GetReadonlyRootfs() {
readOnlyRootfs = true
}
}
specgen.SetRootReadonly(readOnlyRootfs)
if s.config.ReadOnly {
// tmpcopyup is a runc extension and is not part of the OCI spec.
// WORK ON: Use "overlay" mounts as an alternative to tmpfs with tmpcopyup
// Look at https://github.com/cri-o/cri-o/pull/1434#discussion_r177200245 for more info on this
options := []string{"rw", "noexec", "nosuid", "nodev", "tmpcopyup"}
mounts := map[string]string{
"/run": "mode=0755",
"/tmp": "mode=1777",
"/var/tmp": "mode=1777",
}
for target, mode := range mounts {
if !isInCRIMounts(target, containerConfig.GetMounts()) {
mnt := rspec.Mount{
Destination: target,
Type: "tmpfs",
Source: "tmpfs",
Options: append(options, mode),
}
specgen.AddMount(mnt)
}
}
}
imageSpec := containerConfig.GetImage()
if imageSpec == nil {
return nil, fmt.Errorf("CreateContainerRequest.ContainerConfig.Image is nil")
}
image := imageSpec.Image
if image == "" {
return nil, fmt.Errorf("CreateContainerRequest.ContainerConfig.Image.Image is empty")
}
images, err := s.StorageImageServer().ResolveNames(s.config.SystemContext, image)
if err != nil {
if err == storage.ErrCannotParseImageID {
images = append(images, image)
} else {
return nil, err
}
}
// Get imageName and imageRef that are later requested in container status
var (
imgResult *storage.ImageResult
imgResultErr error
)
for _, img := range images {
imgResult, imgResultErr = s.StorageImageServer().ImageStatus(s.config.SystemContext, img)
if imgResultErr == nil {
break
}
}
if imgResultErr != nil {
return nil, imgResultErr
}
imageName := imgResult.Name
imageRef := imgResult.ID
if len(imgResult.RepoDigests) > 0 {
imageRef = imgResult.RepoDigests[0]
}
specgen.AddAnnotation(annotations.Image, image)
specgen.AddAnnotation(annotations.ImageName, imageName)
specgen.AddAnnotation(annotations.ImageRef, imageRef)
selinuxConfig := containerConfig.GetLinux().GetSecurityContext().GetSelinuxOptions()
var labelOptions []string
if selinuxConfig == nil {
labelOptions, err = label.DupSecOpt(sb.ProcessLabel())
if err != nil {
return nil, err
}
} else {
labelOptions = getLabelOptions(selinuxConfig)
}
containerIDMappings := s.defaultIDMappings
metadata := containerConfig.GetMetadata()
containerInfo, err := s.StorageRuntimeServer().CreateContainer(s.config.SystemContext,
sb.Name(), sb.ID(),
image, imgResult.ID,
containerName, containerID,
metadata.Name,
metadata.Attempt,
containerIDMappings,
labelOptions)
if err != nil {
return nil, err
}
mountLabel := containerInfo.MountLabel
var processLabel string
if !privileged {
processLabel = containerInfo.ProcessLabel
}
hostIPC := containerConfig.GetLinux().GetSecurityContext().GetNamespaceOptions().GetIpc() == pb.NamespaceMode_NODE
hostPID := containerConfig.GetLinux().GetSecurityContext().GetNamespaceOptions().GetPid() == pb.NamespaceMode_NODE
hostNet := containerConfig.GetLinux().GetSecurityContext().GetNamespaceOptions().GetNetwork() == pb.NamespaceMode_NODE
// Don't use SELinux separation with Host Pid or IPC Namespace or privileged.
if hostPID || hostIPC {
processLabel, mountLabel = "", ""
}
if hostNet {
processLabel = ""
}
defer func() {
if errRet != nil {
log.Infof(ctx, "createCtrLinux: deleting container %s from storage", containerInfo.ID)
err2 := s.StorageRuntimeServer().DeleteContainer(containerInfo.ID)
if err2 != nil {
log.Warnf(ctx, "Failed to cleanup container directory: %v", err2)
}
}
}()
specgen.SetLinuxMountLabel(mountLabel)
specgen.SetProcessSelinuxLabel(processLabel)
containerVolumes, ociMounts, err := addOCIBindMounts(ctx, mountLabel, containerConfig, &specgen, s.config.RuntimeConfig.BindMountPrefix)
if err != nil {
return nil, err
}
volumesJSON, err := json.Marshal(containerVolumes)
if err != nil {
return nil, err
}
specgen.AddAnnotation(annotations.Volumes, string(volumesJSON))
configuredDevices, err := getDevicesFromConfig(ctx, &s.config)
if err != nil {
return nil, err
}
for i := range configuredDevices {
d := &configuredDevices[i]
specgen.AddDevice(d.Device)
specgen.AddLinuxResourcesDevice(d.Resource.Allow, d.Resource.Type, d.Resource.Major, d.Resource.Minor, d.Resource.Access)
}
privilegedWithoutHostDevices, err := s.Runtime().PrivilegedWithoutHostDevices(sb.RuntimeHandler())
if err != nil {
return nil, err
}
if err := addDevices(ctx, sb, containerConfig, privilegedWithoutHostDevices, &specgen); err != nil {
return nil, err
}
labels := containerConfig.GetLabels()
if err := validateLabels(labels); err != nil {
return nil, err
}
kubeAnnotations := containerConfig.GetAnnotations()
for k, v := range kubeAnnotations {
specgen.AddAnnotation(k, v)
}
for k, v := range labels {
specgen.AddAnnotation(k, v)
}
// set this container's apparmor profile if it is set by sandbox
if s.Config().AppArmor().IsEnabled() && !privileged {
profile, err := s.Config().AppArmor().Apply(
containerConfig.GetLinux().GetSecurityContext().GetApparmorProfile(),
)
if err != nil {
return nil, errors.Wrapf(err, "applying apparmor profile to container %s", containerID)
}
log.Debugf(ctx, "Applied AppArmor profile %s to container %s", profile, containerID)
specgen.SetProcessApparmorProfile(profile)
}
sboxLogDir := sandboxConfig.GetLogDirectory()
if sboxLogDir == "" {
sboxLogDir = sb.LogDir()
}
logPath := containerConfig.GetLogPath()
if logPath == "" {
logPath = filepath.Join(sboxLogDir, containerID+".log")
} else {
logPath = filepath.Join(sboxLogDir, logPath)
}
// Handle https://issues.k8s.io/44043
if err := ensureSaneLogPath(logPath); err != nil {
return nil, err
}
log.Debugf(ctx, "setting container's log_path = %s, sbox.logdir = %s, ctr.logfile = %s",
sboxLogDir, containerConfig.GetLogPath(), logPath,
)
specgen.SetProcessTerminal(containerConfig.Tty)
if containerConfig.Tty {
specgen.AddProcessEnv("TERM", "xterm")
}
linux := containerConfig.GetLinux()
if linux != nil {
resources := linux.GetResources()
if resources != nil {
specgen.SetLinuxResourcesCPUPeriod(uint64(resources.GetCpuPeriod()))
specgen.SetLinuxResourcesCPUQuota(resources.GetCpuQuota())
specgen.SetLinuxResourcesCPUShares(uint64(resources.GetCpuShares()))
memoryLimit := resources.GetMemoryLimitInBytes()
if memoryLimit != 0 {
if memoryLimit < minMemoryLimit {
return nil, fmt.Errorf("set memory limit %v too low; should be at least %v", memoryLimit, minMemoryLimit)
}
specgen.SetLinuxResourcesMemoryLimit(memoryLimit)
if cgroupHasMemorySwap() {
specgen.SetLinuxResourcesMemorySwap(memoryLimit)
}
}
specgen.SetProcessOOMScoreAdj(int(resources.GetOomScoreAdj()))
specgen.SetLinuxResourcesCPUCpus(resources.GetCpusetCpus())
specgen.SetLinuxResourcesCPUMems(resources.GetCpusetMems())
supportsHugetlb := false
if cgroups.IsCgroup2UnifiedMode() {
supportsHugetlb, err = cgroupv2HasHugetlb()
if err != nil {
return nil, err
}
} else {
supportsHugetlb, err = cgroupv1HasHugetlb()
if err != nil {
return nil, err
}
}
// If the kernel has no support for hugetlb, silently ignore the limits
if supportsHugetlb {
hugepageLimits := resources.GetHugepageLimits()
for _, limit := range hugepageLimits {
specgen.AddLinuxResourcesHugepageLimit(limit.PageSize, limit.Limit)
}
}
}
var cgPath string
parent := defaultCgroupfsParent
useSystemd := s.config.CgroupManager == oci.SystemdCgroupsManager
if useSystemd {
parent = defaultSystemdParent
}
if sb.CgroupParent() != "" {
parent = sb.CgroupParent()
}
if useSystemd {
cgPath = parent + ":" + scopePrefix + ":" + containerID
} else {
cgPath = filepath.Join(parent, scopePrefix+"-"+containerID)
}
specgen.SetLinuxCgroupsPath(cgPath)
if t, ok := kubeAnnotations[podTerminationGracePeriodLabel]; ok {
// currently only supported by systemd, see
// https://github.com/opencontainers/runc/pull/2224
if useSystemd {
specgen.AddAnnotation("org.systemd.property.TimeoutStopUSec",
"uint64 "+t+"000000") // sec to usec
}
}
if privileged {
specgen.SetupPrivileged(true)
} else {
capabilities := linux.GetSecurityContext().GetCapabilities()
// Ensure we don't get a nil pointer error if the config
// doesn't set any capabilities
if capabilities == nil {
capabilities = &pb.Capability{}
}
// Clear default capabilities from spec
specgen.ClearProcessCapabilities()
capabilities.AddCapabilities = append(capabilities.AddCapabilities, s.config.DefaultCapabilities...)
err = setupCapabilities(&specgen, capabilities)
if err != nil {
return nil, err
}
}
specgen.SetProcessNoNewPrivileges(linux.GetSecurityContext().GetNoNewPrivs())
if containerConfig.GetLinux().GetSecurityContext() != nil &&
!containerConfig.GetLinux().GetSecurityContext().Privileged {
// TODO(runcom): have just one of this var at the top of the function
securityContext := containerConfig.GetLinux().GetSecurityContext()
for _, mp := range []string{
"/proc/acpi",
"/proc/kcore",
"/proc/keys",
"/proc/latency_stats",
"/proc/timer_list",
"/proc/timer_stats",
"/proc/sched_debug",
"/proc/scsi",
"/sys/firmware",
} {
specgen.AddLinuxMaskedPaths(mp)
}
if securityContext.GetMaskedPaths() != nil {
specgen.Config.Linux.MaskedPaths = nil
for _, path := range securityContext.GetMaskedPaths() {
specgen.AddLinuxMaskedPaths(path)
}
}
for _, rp := range []string{
"/proc/asound",
"/proc/bus",
"/proc/fs",
"/proc/irq",
"/proc/sys",
"/proc/sysrq-trigger",
} {
specgen.AddLinuxReadonlyPaths(rp)
}
if securityContext.GetReadonlyPaths() != nil {
specgen.Config.Linux.ReadonlyPaths = nil
for _, path := range securityContext.GetReadonlyPaths() {
specgen.AddLinuxReadonlyPaths(path)
}
}
}
}
// Join the namespace paths for the pod sandbox container.
if err := configureGeneratorGivenNamespacePaths(sb.NamespacePaths(), specgen); err != nil {
return nil, errors.Wrap(err, "failed to configure namespaces in container create")
}
if containerConfig.GetLinux().GetSecurityContext().GetNamespaceOptions().GetPid() == pb.NamespaceMode_NODE {
// kubernetes PodSpec specify to use Host PID namespace
if err := specgen.RemoveLinuxNamespace(string(rspec.PIDNamespace)); err != nil {
return nil, err
}
} else if containerConfig.GetLinux().GetSecurityContext().GetNamespaceOptions().GetPid() == pb.NamespaceMode_POD {
infra := sb.InfraContainer()
if infra == nil {
return nil, errors.New("PID namespace requested, but sandbox has no infra container")
}
// share Pod PID namespace
// SEE NOTE ABOVE
pidNsPath := fmt.Sprintf("/proc/%d/ns/pid", infra.State().Pid)
if err := specgen.AddOrReplaceLinuxNamespace(string(rspec.PIDNamespace), pidNsPath); err != nil {
return nil, err
}
}
// If the sandbox is configured to run in the host network, do not create a new network namespace
if sb.HostNetwork() {
if err := specgen.RemoveLinuxNamespace(string(rspec.NetworkNamespace)); err != nil {
return nil, err
}
if !isInCRIMounts("/sys", containerConfig.GetMounts()) {
specgen.RemoveMount("/sys")
specgen.RemoveMount("/sys/fs/cgroup")
sysMnt := rspec.Mount{
Destination: "/sys",
Type: "sysfs",
Source: "sysfs",
Options: []string{"nosuid", "noexec", "nodev", "ro"},
}
specgen.AddMount(sysMnt)
}
}
if privileged {
specgen.RemoveMount("/sys")
specgen.RemoveMount("/sys/fs/cgroup")
sysMnt := rspec.Mount{
Destination: "/sys",
Type: "sysfs",
Source: "sysfs",
Options: []string{"nosuid", "noexec", "nodev", "rw"},
}
specgen.AddMount(sysMnt)
cgroupMnt := rspec.Mount{
Destination: "/sys/fs/cgroup",
Type: "cgroup",
Source: "cgroup",
Options: []string{"nosuid", "noexec", "nodev", "rw", "relatime"},
}
specgen.AddMount(cgroupMnt)
}
containerImageConfig := containerInfo.Config
if containerImageConfig == nil {
err = fmt.Errorf("empty image config for %s", image)
return nil, err
}
processArgs, err := buildOCIProcessArgs(ctx, containerConfig, containerImageConfig)
if err != nil {
return nil, err
}
specgen.SetProcessArgs(processArgs)
if strings.Contains(processArgs[0], "/sbin/init") || (filepath.Base(processArgs[0]) == oci.SystemdCgroupsManager) {
setupSystemd(specgen.Mounts(), specgen)
}
// When running on cgroupv2, automatically add a cgroup namespace for not privileged containers.
if !privileged && cgroups.IsCgroup2UnifiedMode() {
if err := specgen.AddOrReplaceLinuxNamespace(string(rspec.CgroupNamespace), ""); err != nil {
return nil, err
}
}
for idx, ip := range sb.IPs() {
specgen.AddAnnotation(fmt.Sprintf("%s.%d", annotations.IP, idx), ip)
}
// Remove the default /dev/shm mount to ensure we overwrite it
specgen.RemoveMount("/dev/shm")
mnt := rspec.Mount{
Type: "bind",
Source: sb.ShmPath(),
Destination: "/dev/shm",
Options: []string{"rw", "bind"},
}
// bind mount the pod shm
specgen.AddMount(mnt)
options := []string{"rw"}
if readOnlyRootfs {
options = []string{"ro"}
}
if sb.ResolvPath() != "" {
if err := securityLabel(sb.ResolvPath(), mountLabel, false); err != nil {
return nil, err
}
mnt = rspec.Mount{
Type: "bind",
Source: sb.ResolvPath(),
Destination: "/etc/resolv.conf",
Options: []string{"bind", "nodev", "nosuid", "noexec"},
}
// bind mount the pod resolver file
specgen.AddMount(mnt)
}
if sb.HostnamePath() != "" {
if err := securityLabel(sb.HostnamePath(), mountLabel, false); err != nil {
return nil, err
}
mnt = rspec.Mount{
Type: "bind",
Source: sb.HostnamePath(),
Destination: "/etc/hostname",
Options: append(options, "bind"),
}
specgen.AddMount(mnt)
}
if !isInCRIMounts("/etc/hosts", containerConfig.GetMounts()) && hostNetwork(containerConfig) {
// Only bind mount for host netns and when CRI does not give us any hosts file
mnt = rspec.Mount{
Type: "bind",
Source: "/etc/hosts",
Destination: "/etc/hosts",
Options: append(options, "bind"),
}
specgen.AddMount(mnt)
}
if privileged {
setOCIBindMountsPrivileged(&specgen)
}
// Set hostname and add env for hostname
specgen.SetHostname(sb.Hostname())
specgen.AddProcessEnv("HOSTNAME", sb.Hostname())
specgen.AddAnnotation(annotations.Name, containerName)
specgen.AddAnnotation(annotations.ContainerID, containerID)
specgen.AddAnnotation(annotations.SandboxID, sb.ID())
specgen.AddAnnotation(annotations.SandboxName, sb.Name())
specgen.AddAnnotation(annotations.ContainerType, annotations.ContainerTypeContainer)
specgen.AddAnnotation(annotations.LogPath, logPath)
specgen.AddAnnotation(annotations.TTY, fmt.Sprintf("%v", containerConfig.Tty))
specgen.AddAnnotation(annotations.Stdin, fmt.Sprintf("%v", containerConfig.Stdin))
specgen.AddAnnotation(annotations.StdinOnce, fmt.Sprintf("%v", containerConfig.StdinOnce))
specgen.AddAnnotation(annotations.ResolvPath, sb.ResolvPath())
specgen.AddAnnotation(annotations.ContainerManager, lib.ContainerManagerCRIO)
created := time.Now()
specgen.AddAnnotation(annotations.Created, created.Format(time.RFC3339Nano))
metadataJSON, err := json.Marshal(metadata)
if err != nil {
return nil, err
}
specgen.AddAnnotation(annotations.Metadata, string(metadataJSON))
labelsJSON, err := json.Marshal(labels)
if err != nil {
return nil, err
}
specgen.AddAnnotation(annotations.Labels, string(labelsJSON))
kubeAnnotationsJSON, err := json.Marshal(kubeAnnotations)
if err != nil {
return nil, err
}
specgen.AddAnnotation(annotations.Annotations, string(kubeAnnotationsJSON))
spp := containerConfig.GetLinux().GetSecurityContext().GetSeccompProfilePath()
if !privileged {
if err := s.setupSeccomp(ctx, &specgen, spp); err != nil {
return nil, err
}
}
specgen.AddAnnotation(annotations.SeccompProfilePath, spp)
mountPoint, err := s.StorageRuntimeServer().StartContainer(containerID)
if err != nil {
return nil, fmt.Errorf("failed to mount container %s(%s): %v", containerName, containerID, err)
}
defer func() {
if errRet != nil {
log.Infof(ctx, "createCtrLinux: stopping storage container %s", containerID)
if err := s.StorageRuntimeServer().StopContainer(containerID); err != nil {
log.Warnf(ctx, "couldn't stop storage container: %v: %v", containerID, err)
}
}
}()
specgen.AddAnnotation(annotations.MountPoint, mountPoint)
if containerImageConfig.Config.StopSignal != "" {
// this key is defined in image-spec conversion document at https://github.com/opencontainers/image-spec/pull/492/files#diff-8aafbe2c3690162540381b8cdb157112R57
specgen.AddAnnotation("org.opencontainers.image.stopSignal", containerImageConfig.Config.StopSignal)
}
// First add any configured environment variables from crio config.
// They will get overridden if specified in the image or container config.
specgen.AddMultipleProcessEnv(s.Config().DefaultEnv)
// Add environment variables from image the CRI configuration
envs := mergeEnvs(containerImageConfig, containerConfig.GetEnvs())
for _, e := range envs {
parts := strings.SplitN(e, "=", 2)
specgen.AddProcessEnv(parts[0], parts[1])
}
// Setup user and groups
if linux != nil {
if err := setupContainerUser(ctx, &specgen, mountPoint, mountLabel, containerInfo.RunDir, linux.GetSecurityContext(), containerImageConfig); err != nil {
return nil, err
}
}
// Add image volumes
volumeMounts, err := addImageVolumes(ctx, mountPoint, s, &containerInfo, mountLabel, &specgen)
if err != nil {
return nil, err
}
// Set working directory
// Pick it up from image config first and override if specified in CRI
containerCwd := "/"
imageCwd := containerImageConfig.Config.WorkingDir
if imageCwd != "" {
containerCwd = imageCwd
}
runtimeCwd := containerConfig.WorkingDir
if runtimeCwd != "" {
containerCwd = runtimeCwd
}
specgen.SetProcessCwd(containerCwd)
if err := setupWorkingDirectory(mountPoint, mountLabel, containerCwd); err != nil {
return nil, err
}
var secretMounts []rspec.Mount
if len(s.config.DefaultMounts) > 0 {
// This option has been deprecated, once it is removed in the later versions, delete the server/secrets.go file as well
log.Warnf(ctx, "--default-mounts has been deprecated and will be removed in future versions. Add mounts to either %q or %q", secrets.DefaultMountsFile, secrets.OverrideMountsFile)
var err error
secretMounts, err = addSecretsBindMounts(ctx, mountLabel, containerInfo.RunDir, s.config.DefaultMounts, specgen)
if err != nil {
return nil, fmt.Errorf("failed to mount secrets: %v", err)
}
}
// Check for FIPS_DISABLE label in the pod config
disableFips := false
if value, ok := sandboxConfig.GetLabels()["FIPS_DISABLE"]; ok && value == "true" {
disableFips = true
}
// Add secrets from the default and override mounts.conf files
secretMounts = append(secretMounts, secrets.SecretMounts(mountLabel, containerInfo.RunDir, s.config.DefaultMountsFile, rootless.IsRootless(), disableFips)...)
mounts := []rspec.Mount{}
mounts = append(mounts, ociMounts...)
mounts = append(mounts, volumeMounts...)
mounts = append(mounts, secretMounts...)
sort.Sort(orderedMounts(mounts))
for _, m := range mounts {
mnt = rspec.Mount{
Type: "bind",
Source: m.Source,
Destination: m.Destination,
Options: append(m.Options, "bind"),
}
specgen.AddMount(mnt)
}
newAnnotations := map[string]string{}
for key, value := range containerConfig.GetAnnotations() {
newAnnotations[key] = value
}
for key, value := range sb.Annotations() {
newAnnotations[key] = value
}
if s.ContainerServer.Hooks != nil {
if _, err := s.ContainerServer.Hooks.Hooks(specgen.Config, newAnnotations, len(containerConfig.GetMounts()) > 0); err != nil {
return nil, err
}
}
// Set up pids limit if pids cgroup is mounted
if findCgroupMountpoint("pids") == nil {
specgen.SetLinuxResourcesPidsLimit(s.config.PidsLimit)
}
// by default, the root path is an empty string. set it now.
specgen.SetRootPath(mountPoint)
crioAnnotations := specgen.Config.Annotations
container, err := oci.NewContainer(containerID, containerName, containerInfo.RunDir, logPath, labels, crioAnnotations, kubeAnnotations, image, imageName, imageRef, metadata, sb.ID(), containerConfig.Tty, containerConfig.Stdin, containerConfig.StdinOnce, sb.Privileged(), sb.RuntimeHandler(), containerInfo.Dir, created, containerImageConfig.Config.StopSignal)
if err != nil {
return nil, err
}
container.SetIDMappings(containerIDMappings)
if s.defaultIDMappings != nil && !s.defaultIDMappings.Empty() {
for _, uidmap := range s.defaultIDMappings.UIDs() {
specgen.AddLinuxUIDMapping(uint32(uidmap.HostID), uint32(uidmap.ContainerID), uint32(uidmap.Size))
}
for _, gidmap := range s.defaultIDMappings.GIDs() {
specgen.AddLinuxGIDMapping(uint32(gidmap.HostID), uint32(gidmap.ContainerID), uint32(gidmap.Size))
}
} else if err := specgen.RemoveLinuxNamespace(string(rspec.UserNamespace)); err != nil {
return nil, err
}
if os.Getenv("_CRIO_ROOTLESS") != "" {
makeOCIConfigurationRootless(&specgen)
}
saveOptions := generate.ExportOptions{}
if err := specgen.SaveToFile(filepath.Join(containerInfo.Dir, "config.json"), saveOptions); err != nil {
return nil, err
}
if err := specgen.SaveToFile(filepath.Join(containerInfo.RunDir, "config.json"), saveOptions); err != nil {
return nil, err
}
container.SetSpec(specgen.Config)
container.SetMountPoint(mountPoint)
container.SetSeccompProfilePath(spp)
for _, cv := range containerVolumes {
container.AddVolume(cv)
}
return container, nil
}
func setupWorkingDirectory(rootfs, mountLabel, containerCwd string) error {
fp, err := securejoin.SecureJoin(rootfs, containerCwd)
if err != nil {
return err
}
if err := os.MkdirAll(fp, 0755); err != nil {
return err
}
if mountLabel != "" {
if err1 := securityLabel(fp, mountLabel, false); err1 != nil {
return err1
}
}
return nil
}
func setOCIBindMountsPrivileged(g *generate.Generator) {
spec := g.Config
// clear readonly for /sys and cgroup
for i := range spec.Mounts {
clearReadOnly(&spec.Mounts[i])
}
spec.Linux.ReadonlyPaths = nil
spec.Linux.MaskedPaths = nil
}
func clearReadOnly(m *rspec.Mount) {
var opt []string
for _, o := range m.Options {
if o == "rw" {
return
} else if o != "ro" {
opt = append(opt, o)
}
}
m.Options = opt
m.Options = append(m.Options, "rw")
}
func addOCIBindMounts(ctx context.Context, mountLabel string, containerConfig *pb.ContainerConfig, specgen *generate.Generator, bindMountPrefix string) ([]oci.ContainerVolume, []rspec.Mount, error) {
volumes := []oci.ContainerVolume{}
ociMounts := []rspec.Mount{}
mounts := containerConfig.GetMounts()
// Sort mounts in number of parts. This ensures that high level mounts don't
// shadow other mounts.
sort.Sort(criOrderedMounts(mounts))
// Copy all mounts from default mounts, except for
// - mounts overridden by supplied mount;
// - all mounts under /dev if a supplied /dev is present.
mountSet := make(map[string]struct{})
for _, m := range mounts {
mountSet[filepath.Clean(m.ContainerPath)] = struct{}{}
}
defaultMounts := specgen.Mounts()
specgen.ClearMounts()
for _, m := range defaultMounts {
dst := filepath.Clean(m.Destination)
if _, ok := mountSet[dst]; ok {
// filter out mount overridden by a supplied mount
continue
}
if _, mountDev := mountSet["/dev"]; mountDev && strings.HasPrefix(dst, "/dev/") {
// filter out everything under /dev if /dev is a supplied mount
continue
}
if _, mountSys := mountSet["/sys"]; mountSys && strings.HasPrefix(dst, "/sys/") {
// filter out everything under /sys if /sys is a supplied mount
continue
}
specgen.AddMount(m)
}
for _, m := range mounts {
dest := m.GetContainerPath()
if dest == "" {
return nil, nil, fmt.Errorf("mount.ContainerPath is empty")
}
if m.HostPath == "" {
return nil, nil, fmt.Errorf("mount.HostPath is empty")
}
src := filepath.Join(bindMountPrefix, m.GetHostPath())
resolvedSrc, err := resolveSymbolicLink(src, bindMountPrefix)
if err == nil {
src = resolvedSrc
} else {
if !os.IsNotExist(err) {
return nil, nil, fmt.Errorf("failed to resolve symlink %q: %v", src, err)
} else if err = os.MkdirAll(src, 0755); err != nil {
return nil, nil, fmt.Errorf("failed to mkdir %s: %s", src, err)
}
}
options := []string{"rw"}
if m.Readonly {
options = []string{"ro"}
}
options = append(options, "rbind")
// mount propagation
mountInfos, err := mount.GetMounts()
if err != nil {
return nil, nil, err
}
switch m.GetPropagation() {
case pb.MountPropagation_PROPAGATION_PRIVATE:
options = append(options, "rprivate")
// Since default root propagation in runc is rprivate ignore
// setting the root propagation
case pb.MountPropagation_PROPAGATION_BIDIRECTIONAL:
if err := ensureShared(src, mountInfos); err != nil {
return nil, nil, err
}
options = append(options, "rshared")
if err := specgen.SetLinuxRootPropagation("rshared"); err != nil {
return nil, nil, err
}
case pb.MountPropagation_PROPAGATION_HOST_TO_CONTAINER:
if err := ensureSharedOrSlave(src, mountInfos); err != nil {
return nil, nil, err
}
options = append(options, "rslave")
if specgen.Config.Linux.RootfsPropagation != "rshared" &&
specgen.Config.Linux.RootfsPropagation != "rslave" {
if err := specgen.SetLinuxRootPropagation("rslave"); err != nil {
return nil, nil, err
}
}
default:
log.Warnf(ctx, "unknown propagation mode for hostPath %q", m.HostPath)
options = append(options, "rprivate")
}
if m.SelinuxRelabel {
if err := securityLabel(src, mountLabel, false); err != nil {
return nil, nil, err
}
}
volumes = append(volumes, oci.ContainerVolume{
ContainerPath: dest,
HostPath: src,
Readonly: m.Readonly,
})
ociMounts = append(ociMounts, rspec.Mount{
Source: src,
Destination: dest,
Options: options,
})
}
if _, mountSys := mountSet["/sys"]; !mountSys {
m := rspec.Mount{
Destination: "/sys/fs/cgroup",
Type: "cgroup",
Source: "cgroup",
Options: []string{"nosuid", "noexec", "nodev", "relatime", "ro"},
}
specgen.AddMount(m)
}
return volumes, ociMounts, nil
}
func getDevicesFromConfig(ctx context.Context, config *libconfig.Config) ([]configDevice, error) {
linuxdevs := make([]configDevice, 0, len(config.RuntimeConfig.AdditionalDevices))
for _, d := range config.RuntimeConfig.AdditionalDevices {
src, dst, permissions, err := createconfig.ParseDevice(d)
if err != nil {
return nil, err
}
log.Debugf(ctx, "adding device src=%s dst=%s mode=%s", src, dst, permissions)
dev, err := devices.DeviceFromPath(src, permissions)
if err != nil {
return nil, errors.Wrapf(err, "%s is not a valid device", src)
}
dev.Path = dst
linuxdevs = append(linuxdevs,
configDevice{
Device: rspec.LinuxDevice{
Path: dev.Path,
Type: string(dev.Type),
Major: dev.Major,
Minor: dev.Minor,
FileMode: &dev.FileMode,
UID: &dev.Uid,
GID: &dev.Gid,
},
Resource: rspec.LinuxDeviceCgroup{
Allow: true,
Type: string(dev.Type),
Major: &dev.Major,
Minor: &dev.Minor,
Access: permissions,
},
})
}
return linuxdevs, nil
}
// mountExists returns true if dest exists in the list of mounts
func mountExists(specMounts []rspec.Mount, dest string) bool {
for _, m := range specMounts {
if m.Destination == dest {
return true
}
}
return false
}
// systemd expects to have /run, /run/lock and /tmp on tmpfs
// It also expects to be able to write to /sys/fs/cgroup/systemd and /var/log/journal
func setupSystemd(mounts []rspec.Mount, g generate.Generator) {
options := []string{"rw", "rprivate", "noexec", "nosuid", "nodev"}
for _, dest := range []string{"/run", "/run/lock"} {
if mountExists(mounts, dest) {
continue
}
tmpfsMnt := rspec.Mount{
Destination: dest,
Type: "tmpfs",
Source: "tmpfs",
Options: append(options, "tmpcopyup", "size=65536k"),
}
g.AddMount(tmpfsMnt)
}
for _, dest := range []string{"/tmp", "/var/log/journal"} {
if mountExists(mounts, dest) {
continue
}
tmpfsMnt := rspec.Mount{
Destination: dest,
Type: "tmpfs",
Source: "tmpfs",
Options: append(options, "tmpcopyup"),
}
g.AddMount(tmpfsMnt)
}
if cgroups.IsCgroup2UnifiedMode() {
g.RemoveMount("/sys/fs/cgroup")
systemdMnt := rspec.Mount{
Destination: "/sys/fs/cgroup",
Type: "cgroup",
Source: "cgroup",
Options: []string{"private", "rw"},
}
g.AddMount(systemdMnt)
} else {
systemdMnt := rspec.Mount{
Destination: "/sys/fs/cgroup/systemd",
Type: "bind",
Source: "/sys/fs/cgroup/systemd",
Options: []string{"bind", "nodev", "noexec", "nosuid"},
}
g.AddMount(systemdMnt)
g.AddLinuxMaskedPaths("/sys/fs/cgroup/systemd/release_agent")
}
}
| [
"\"_CRIO_ROOTLESS\""
] | [] | [
"_CRIO_ROOTLESS"
] | [] | ["_CRIO_ROOTLESS"] | go | 1 | 0 | |
step-functions/sfn-sqs/lambdas/sfn_trigger.py | import json
import boto3
import os
sfn = boto3.client('stepfunctions', region_name='us-east-1')
def handler(event, context):
state_machine_arn = os.getenv('STATE_MACHINE_ARN')
print(f'SFN Trigger received event' + json.dumps(event, indent=2))
# for the test case assume only a single message here
msg = event['Records'][0]
new_event = {
'parameters': {
'event': msg['body']
},
'eventSourceARN': msg['eventSourceARN']
}
response = sfn.start_execution(
stateMachineArn=state_machine_arn,
input=json.dumps(new_event)
)
print(f"SFN Execution ARN {response.get('executionArn')}")
print(f"SFN Execution Message {json.dumps(new_event, indent=2)}")
return event
| [] | [] | [
"STATE_MACHINE_ARN"
] | [] | ["STATE_MACHINE_ARN"] | python | 1 | 0 | |
config/redis.go | package config
import (
"os"
"gopkg.in/redis.v5"
)
func InitRedis() *redis.Client {
client := redis.NewClient(&redis.Options{
Addr: os.Getenv("REDIS_ADDR"),
Password: os.Getenv("REDIS_PASSWORD"),
DB: 0,
})
return client
}
| [
"\"REDIS_ADDR\"",
"\"REDIS_PASSWORD\""
] | [] | [
"REDIS_PASSWORD",
"REDIS_ADDR"
] | [] | ["REDIS_PASSWORD", "REDIS_ADDR"] | go | 2 | 0 | |
build/releases/release-0.577/src/lisp-itr.py | # -----------------------------------------------------------------------------
#
# Copyright 2013-2019 lispers.net - Dino Farinacci <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -----------------------------------------------------------------------------
#
# lisp-itr.py
#
# This file performs LISP Ingress Tunnel Router (ITR) functionality.
#
# -----------------------------------------------------------------------------
from future import standard_library
standard_library.install_aliases()
from builtins import str
from builtins import range
import lisp
import lispconfig
import socket
import select
import threading
import time
import os
from subprocess import getoutput
import struct
#------------------------------------------------------------------------------
#
# Global data structures relative to the lisp-itr process.
#
lisp_send_sockets = [None, None, None]
lisp_ipc_listen_socket = None
lisp_ipc_punt_socket = None
lisp_ephem_listen_socket = None
lisp_ephem_nat_socket = None
lisp_rloc_probe_socket = None
lisp_ephem_port = lisp.lisp_get_ephemeral_port()
lisp_ephem_nat_port = lisp.lisp_get_ephemeral_port()
lisp_raw_socket = None
lisp_raw_v6_socket = None
lisp_periodic_timer = None
lisp_itr_info_timer = None
#
# This is for testing sending from one local EID-prefix to another EID-prefix
# on the same system. Rather than natively forwarding a packet, the mapping
# system is used.
#
lisp_xtr_loopback = False
#
# Used to start pcap threads concurrently.
#
lisp_pcap_lock = threading.Lock()
#------------------------------------------------------------------------------
#
# lisp_itr_show_command
#
# Display state in an ITR.
#
def lisp_itr_show_command(parameter):
return(lispconfig.lisp_itr_rtr_show_command(parameter, "ITR", []))
#enddef
#
# lisp_itr_show_keys_command
#
# Call lispconfig.lisp_show_crypto_list().
#
def lisp_itr_show_keys_command(parameter):
return(lispconfig.lisp_show_crypto_list("ITR"))
#enddef
#
# lisp_itr_show_rloc_probe_command
#
# Display RLOC-probe list state in an ITR.
#
def lisp_itr_show_rloc_probe_command(parameter):
return(lispconfig.lisp_itr_rtr_show_rloc_probe_command("ITR"))
#enddef
#
# lisp_itr_process_timer
#
# This is the ITR's 60-second periodic timer routine. We typically use it
# to time-out map-cache entries. But the one case where we are acting as
# a L2-overlay ITR, we will send Map-Requests to retrieve the broadcast
# entry so we have the latest replication-list before we need it.
#
def lisp_itr_process_timer(lisp_sockets, lisp_ephem_port):
lisp.lisp_set_exception()
#
# Remove nonce entries from crypto-list.
#
for keys in list(lisp.lisp_crypto_keys_by_nonce.values()):
for key in keys: del(key)
#endfor
lisp.lisp_crypto_keys_by_nonce = {}
#
# If doing L2-overlays, get map-cache entry from (0000-0000-0000/0,
# ffff-ffff-ffff/48).
#
if (lisp.lisp_l2_overlay):
afi = lisp.LISP_AFI_MAC
iid = lisp.lisp_default_iid
s = lisp.lisp_address(afi, "0000-0000-0000", 0, iid)
s.mask_len = 0
d = lisp.lisp_address(afi, "ffff-ffff-ffff", 48, iid)
lisp.lisp_send_map_request(lisp_sockets, lisp_ephem_port, s, d, None)
#endif
#
# Timeout Map-Cache entries.
#
lisp.lisp_timeout_map_cache(lisp.lisp_map_cache)
#
# Restart periodic timer.
#
lisp_periodic_timer = threading.Timer(60, lisp_itr_process_timer,
[lisp_sockets, lisp_ephem_port])
lisp_periodic_timer.start()
return
#enddef
#
# lisp_itr_timeout_dynamic_eids
#
# Check to see if dyanmic-EIDs have stop sending data. If so, remove the
# state and stop registering them.
#
def lisp_itr_timeout_dynamic_eids(lisp_socket):
lisp.lisp_set_exception()
now = lisp.lisp_get_timestamp()
for db in lisp.lisp_db_list:
if (db.dynamic_eid_configured() == False): continue
delete_list = []
for dyn_eid in list(db.dynamic_eids.values()):
ts = dyn_eid.last_packet
if (ts == None): continue
if (ts + dyn_eid.timeout > now): continue
#
# Check hardware if dyn-EID has had packets SENT to. We want the
# opposite but this is all we get from Arista.
#
if (lisp.lisp_program_hardware):
prefix = dyn_eid.dynamic_eid.print_prefix_no_iid()
if (lisp.lisp_arista_is_alive(prefix)):
lisp.lprint(("Hardware indicates dynamic-EID {} " + \
"still active").format(lisp.green(prefix, False)))
continue
#endif
#endif
#
# Tell ETR process so it can register dynamic-EID.
#
eid_str = dyn_eid.dynamic_eid.print_address()
ipc = "learn%{}%None".format(eid_str)
ipc = lisp.lisp_command_ipc(ipc, "lisp-itr")
lisp.lisp_ipc(ipc, lisp_socket, "lisp-etr")
lisp.lprint("Dynamic-EID {}".format( \
lisp.bold(lisp.green(eid_str, False) + " activity timeout",
False)))
delete_list.append(eid_str)
#endfor
#
# Remove the timed out entries from db.dynamic_eids{}.
#
for eid_str in delete_list: db.dynamic_eids.pop(eid_str)
#endfor
#
# Restart periodic timer.
#
threading.Timer(lisp.LISP_DEFAULT_DYN_EID_TIMEOUT,
lisp_itr_timeout_dynamic_eids, [lisp_socket]).start()
return
#enddef
#
# lisp_get_active_interfaces
#
# Get interfaces that are plugged in. Including loopback interfaces.
#
# We need to test these 3 types of lines from "ifconfig" output:
#
# aten2 Link encap:Ethernet HWaddr 00:1F:A0:07:0C:04
# eth7: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500
# en0: flags=8863<UP,BROADCAST,SMART,RUNNING,SIMPLEX,MULTICAST> mtu 1500
#
def lisp_get_active_interfaces():
#
# Choose only actively connected physical interfaces. Plus loopback. This
# is needed for a roaming MAC to do telemetry measurements and wants to
# connect to an ethernet. Each dongle vendor comes in with a different
# interface/device name.
#
if (lisp.lisp_is_macos()):
lines = getoutput("netstat -rn | egrep default | egrep UGS")
interfaces = ["lo0"]
for line in lines.split("\n"):
intf = line.split()[-1]
interfaces.append(intf)
#endfor
return(interfaces)
#endif
#
# Linux distributions have different ifconfig output format.
#
gs = "Link encap"
interfaces = getoutput("ifconfig | egrep '{}'".format(gs))
if (interfaces == ""):
gs = ": flags="
interfaces = getoutput("ifconfig | egrep '{}'".format(gs))
#endif
interfaces = interfaces.split("\n")
return_interfaces = []
for interface in interfaces:
ifname = interface.split(gs)[0].replace(" ", "")
return_interfaces.append(ifname)
#endfor
return(return_interfaces)
#enddef
#
# lisp_itr_startup
#
# Intialize this LISP ITR process. This function returns no values.
#
def lisp_itr_startup():
global lisp_send_sockets
global lisp_ipc_listen_socket
global lisp_ipc_punt_socket
global lisp_ephem_listen_socket
global lisp_ephem_nat_socket
global lisp_raw_socket, lisp_raw_v6_socket
global lisp_rloc_probe_socket
lisp.lisp_i_am("itr")
lisp.lisp_set_exception()
lisp.lisp_print_banner("ITR starting up")
#
# Get local address for source RLOC for encapsulation.
#
lisp.lisp_get_local_interfaces()
lisp.lisp_get_local_macs()
if (lisp.lisp_get_local_addresses() == False): return(False)
#
# Open send socket.
#
lisp_send_sockets[0] = lisp.lisp_open_send_socket("", lisp.LISP_AFI_IPV4)
lisp_send_sockets[1] = lisp.lisp_open_send_socket("", lisp.LISP_AFI_IPV6)
lisp_ipc_listen_socket = lisp.lisp_open_listen_socket("", "lisp-itr")
lisp_ipc_punt_socket = lisp.lisp_open_listen_socket("", "lispers.net-itr")
lisp_send_sockets[2] = lisp_ipc_listen_socket
address = "0.0.0.0" if lisp.lisp_is_raspbian() else "0::0"
lisp_ephem_listen_socket = lisp.lisp_open_listen_socket(address,
str(lisp_ephem_port))
#
# Set multicsat TTL to LISP_RLOC_PROBE_TTL so we can send RLOC-probes
# to multicast RLOCs.
#
try:
s = lisp.lisp_open_send_socket("", lisp.LISP_AFI_IPV4)
ttl = lisp.LISP_RLOC_PROBE_TTL
s.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, ttl)
lisp_rloc_probe_socket = s
except socket.error as e:
lisp.lprint("socket.setsockopt() failed for RLOC-probe ttl: {}". \
format(e))
#endtry
#
# Used on for listening for Info-Replies for NAT-traversal support.
#
lisp_ephem_nat_socket = lisp.lisp_open_listen_socket("0.0.0.0",
str(lisp_ephem_nat_port))
#
# Open up raw socket so we can send with IP headers after decapsulation.
#
lisp_raw_socket = socket.socket(socket.AF_INET, socket.SOCK_RAW,
socket.IPPROTO_RAW)
lisp_raw_socket.setsockopt(socket.SOL_IP, socket.IP_HDRINCL, 1)
if (lisp.lisp_is_raspbian() == False):
lisp_raw_v6_socket = socket.socket(socket.AF_INET6, socket.SOCK_RAW,
socket.IPPROTO_UDP)
#endif
#
# This is used by the ITR to send RTR status change information to the
# ETR. Since RLOC-probing runs inside the lisp library, when state changes
# occur, an IPC will have to be sent from the timer thread. This is the
# only use-case for lisp.lisp_ipc_socket.
#
lisp.lisp_ipc_socket = lisp_ipc_listen_socket
#
# Start map-cache timeout timer.
#
threading.Thread(target=lisp_itr_get_capture_info).start()
#
# Load map-cache from checkpoint file before we start writing to it.
#
lisp.lisp_load_checkpoint()
#
# Should we load-split pings?
#
lisp.lisp_load_split_pings = (os.getenv("LISP_LOAD_SPLIT_PINGS") != None)
#
# Start map-cache timeout timer.
#
lisp_periodic_timer = threading.Timer(60, lisp_itr_process_timer,
[lisp_send_sockets, lisp_ephem_port])
lisp_periodic_timer.start()
#
# Start dynamic-EID timeout timer.
#
threading.Timer(lisp.LISP_DEFAULT_DYN_EID_TIMEOUT,
lisp_itr_timeout_dynamic_eids, [lisp_ipc_listen_socket]).start()
return(True)
#enddef
#
# lisp_itr_count_eid_prefixes
#
# Cound the number of "prefix" sub-commands inside of each "lisp database-
# mapping" command.
#
def lisp_itr_count_eid_prefixes():
f = open("./lisp.config", "r")
within = False
count = 0
for line in f:
if (line == "lisp database-mapping {\n"): within = True
if (line == "}\n"): within = False
if (within == False): continue
if (line[0] == " " and line.find("prefix {") != -1): count += 1
#endif
f.close()
return(count)
#enddef
#
# lisp_itr_get_local_eid_prefixes
#
# Check the number of "lisp database-mapping" commands we will process. Wait
# for them to be processed and only return when all are processed.
#
# Return array of static EID-prefixes and an array of dynamic EID-prefixes.
#
def lisp_itr_get_local_eid_prefixes():
#
# Count the number of "prefix" sub-commands within a "lisp database-
# mapping" command clause in the lisp.config file.
#
count = lisp_itr_count_eid_prefixes()
#
# Does user want us to wait longer than a second to check to see if
# commands are done. If the CPU is going to be busy during startup, the
# wait-time should be made longer..
#
wait_time = os.getenv("LISP_ITR_WAIT_TIME")
wait_time = 1 if (wait_time == None) else int(wait_time)
#
# Wait for database-mapping commands to execute. We need to retrieve
# EID-prefixes we need to listen on.
#
while (count != len(lisp.lisp_db_list)):
lisp.lprint(("Waiting {} second(s) for {} database-mapping EID-" + \
"prefixes, {} processed so far ...").format(wait_time, count,
len(lisp.lisp_db_list)))
time.sleep(wait_time)
#endwhile
#
# Return each IPv4, IPv6, or MAC EIDs. These are the ones we need to
# pass to pcap.
#
sources = []
dyn_eids = []
for db in lisp.lisp_db_list:
if (db.group.is_null() == False): continue
if (db.eid.is_ipv4() or db.eid.is_ipv6() or db.eid.is_mac()):
eid_str = db.eid.print_prefix_no_iid()
if (db.dynamic_eid_configured()): dyn_eids.append(eid_str)
sources.append(eid_str)
#endif
#endfor
return(sources, dyn_eids)
#enddef
#
# lisp_itr_get_capture_info
#
# Thead to wait for database-mapping commands to finish processing so we can
# get local EID-prefixes to be source filters for packet capture.
#
def lisp_itr_get_capture_info():
global lisp_pcap_lock
lisp.lisp_set_exception()
#
# Wait for database-mapping commands to execute. We need to retrieve
# EID-prefixes we need to listen on.
#
sources, dyn_eids = lisp_itr_get_local_eid_prefixes()
#
# If "ipc-data-plane = yes" is configured, we do not need to do any
# data-plane forwarding. There is another module running with the
# lispers.net control-plane that is doing data-plane forwarding. We'll
# get punts via the lispers.net-itr named socket. But we do have to
# packet capture RLOC-probe replies. Also capture multicast Map-Register
# messages for LISP-Decent.
#
cp_pfilter = None
if (lisp.lisp_ipc_data_plane):
lisp.lprint(lisp.bold("Data-plane packet capture disabled", False))
cp_pfilter = "(udp src port 4342 and ip[28] == 0x28)" + \
" or (ip[16] >= 224 and ip[16] < 240 and (ip[28] & 0xf0) == 0x30)"
lisp.lprint("Control-plane capture: '{}'".format(cp_pfilter))
else:
lisp.lprint("Capturing packets for source-EIDs {}".format( \
lisp.green(str(sources), False)))
#endif
if (lisp.lisp_pitr): lisp.lprint("Configured for PITR functionality")
#
# We want the kernel to handle any packets with source AND destination
# that matches any EID-prefixes for the site. Any other case, we want
# the pcap filters to get the packet to this lisp-itr process.
#
l2_overlay = lisp.lisp_l2_overlay
if (l2_overlay == False):
if (lisp.lisp_is_linux()): lisp_itr_kernel_filter(sources, dyn_eids)
#endif
#
# Build packet capture filter so we get packets for configured source EID-
# prefixes.
#
if (cp_pfilter == None):
if (lisp.lisp_pitr):
pfilter = lisp_itr_build_pcap_filter(sources, [], False, True)
else:
pfilter = lisp_itr_build_pcap_filter(sources, dyn_eids, l2_overlay,
False)
#endif
else:
pfilter = cp_pfilter
#endif
#
# User can select which interfaces to pcap on.
#
interfaces = lisp_get_active_interfaces()
pcap_list = os.getenv("LISP_PCAP_LIST")
lisp.lprint("User pcap-list: {}, active-interfaces: {}".format(pcap_list,
interfaces))
if (pcap_list == None):
us = ""
rloc_interfaces = []
else:
eid_interfaces = list(set(pcap_list.split()) & set(interfaces))
rloc_interfaces = list(set(pcap_list.split()) ^ set(interfaces))
us = "user-selected "
interfaces = eid_interfaces
#endif
#
# Start a pcap thread so we can receive packets from applications on this
# system. But make sure the device is up on A10 devices. If ethernet MAC
# capturing, do not listen on non ethernet interfaces.
#
mac_capturing = (pfilter.find("ether host") != -1)
for device in interfaces:
if (device in ["lo", "lispers.net"] and mac_capturing):
lisp.lprint(("Capturing suppressed on interface {}, " + \
"MAC filters configured").format(device))
continue
#endif
args = [device, pfilter, lisp_pcap_lock]
lisp.lprint("Capturing packets on {}interface {}".format(us, device))
threading.Thread(target=lisp_itr_pcap_thread, args=args).start()
#endfor
if (cp_pfilter): return
#
# Start a pcap thread so we can receive RLOC-probe Map-Replies packets on
# RLOC interfaces. This is only called when LISP_PCAP_LIST is set.
#
probe_pfilter = "(udp src port 4342 and ip[28] == 0x28)"
for device in rloc_interfaces:
args = [device, probe_pfilter, lisp_pcap_lock]
lisp.lprint("Capture RLOC-probe replies on RLOC interface {}".format( \
device))
threading.Thread(target=lisp_itr_pcap_thread, args=args).start()
#endfor
return
#enddef
#
# lisp_itr_shutdown
#
# Shut down this process.
#
def lisp_itr_shutdown():
#
# Cancel periodic Info timer threads.
#
if (lisp_itr_info_timer): lisp_itr_info_timer.cancel()
#
# Close sockets.
#
lisp.lisp_close_socket(lisp_send_sockets[0], "")
lisp.lisp_close_socket(lisp_send_sockets[1], "")
lisp.lisp_close_socket(lisp_ephem_listen_socket, "")
lisp.lisp_close_socket(lisp_rloc_probe_socket, "")
lisp.lisp_close_socket(lisp_ephem_nat_socket, "")
lisp.lisp_close_socket(lisp_ipc_listen_socket, "lisp-itr")
lisp.lisp_close_socket(lisp_ipc_punt_socket, "lispers.net-itr")
return
#enddef
#
# lisp_itr_data_plane
#
# Do map-cache lookup and encapsulate packet.
#
def lisp_itr_data_plane(packet, device, input_interface, macs, my_sa):
global lisp_send_sockets
global lisp_ephem_port
global lisp_raw_socket, lisp_raw_v6_socket
global lisp_ipc_listen_socket
#
# Check RLOC-probe Map-Reply. We need to grab the TTL from IP header.
#
orig_packet = packet
packet, source, port, ttl = lisp.lisp_is_rloc_probe(packet, 1)
if (orig_packet != packet):
if (source == None): return
lisp.lisp_parse_packet(lisp_send_sockets, packet, source, port, ttl)
return
#endif
packet = lisp.lisp_packet(packet)
if (packet.decode(False, None, None) == None): return
#
# For locally source packets from this system, the MAC address may
# be the default router. Check source to see if assigned to this system,
# and if so, accept on interface "device".
#
if (my_sa): input_interface = device
#
# Get instance-ID for incoming interface.
#
source_eid = packet.inner_source
iid = lisp.lisp_get_interface_instance_id(input_interface, source_eid)
packet.inner_dest.instance_id = iid
packet.inner_source.instance_id = iid
#
# Print some useful header fields and strip outer headers..
#
if (macs != ""): macs = ", MACs: " + macs + ","
packet.print_packet("Receive {}{}".format(device, macs), False)
#
# Drop packet if input interface not found based on MAC address used.
#
if (device != input_interface and device != "lispers.net"):
lisp.dprint("Not our MAC address on interface {}, pcap interface {}". \
format(input_interface, device))
return
#endif
lisp_decent = lisp.lisp_decent_push_configured
if (lisp_decent):
multicast = packet.inner_dest.is_multicast_address()
local = packet.inner_source.is_local()
lisp_decent = (local and multicast)
#endif
if (lisp_decent == False):
#
# Only forward packets from source-EIDs.
#
db = lisp.lisp_db_for_lookups.lookup_cache(packet.inner_source, False)
if (db == None):
lisp.dprint("Packet received from non-EID source")
return
#endif
#
# Check to see if we are doing dynamic-EID discovery.
#
if (db.dynamic_eid_configured()):
i = lisp.lisp_allow_dynamic_eid(input_interface,
packet.inner_source)
if (i):
lisp.lisp_itr_discover_eid(db, packet.inner_source,
input_interface, i, lisp_ipc_listen_socket)
else:
e = lisp.green(packet.inner_source.print_address(), False)
lisp.dprint("Disallow dynamic-EID {} on interface {}".format(e,
input_interface))
return
#endif
#endif
if (packet.inner_source.is_local() and
packet.udp_dport == lisp.LISP_CTRL_PORT): return
#endif
#
# Do input processing for currently supported packet types..
#
igmp = False
if (packet.inner_version == 4):
igmp, packet.packet = lisp.lisp_ipv4_input(packet.packet)
if (packet.packet == None): return
packet.inner_ttl -= 1
elif (packet.inner_version == 6):
packet.packet = lisp.lisp_ipv6_input(packet)
if (packet.packet == None): return
packet.inner_ttl -= 1
else:
packet.packet = lisp.lisp_mac_input(packet.packet)
if (packet.packet == None): return
packet.encap_port = lisp.LISP_L2_DATA_PORT
#endif
#
# First check if destination is to any local EID-prefixes from database-
# mapping commands. In this case, we need to natively forward.
#
if (lisp_xtr_loopback == False):
db = lisp.lisp_db_for_lookups.lookup_cache(packet.inner_dest, False)
if (db and db.dynamic_eid_configured == False):
lisp.dprint(("Packet destined to local EID-prefix {}, " + \
"natively forwarding").format(db.print_eid_tuple()))
packet.send_packet(lisp_raw_socket, packet.inner_dest)
return
#endif
#endif
#
# Do map-cache lookup.
#
mc = lisp.lisp_map_cache_lookup(packet.inner_source, packet.inner_dest)
if (mc): mc.add_recent_source(packet.inner_source)
#
# If "secondary-iid" is configured, we want to check the secondary
# map-cache if a lookup miss occured in the default IID for this source
# EID-prefix. If destination EID found in secondary map-cache, use it.
# Otherwise, send Map-Request for EID in default IID.
#
secondary_iid = db.secondary_iid if (db != None) else None
if (secondary_iid and mc and mc.action == lisp.LISP_NATIVE_FORWARD_ACTION):
dest_eid = packet.inner_dest
dest_eid.instance_id = secondary_iid
mc = lisp.lisp_map_cache_lookup(packet.inner_source, dest_eid)
if (mc): mc.add_recent_source(packet.inner_source)
#endif
#
# Map-cache lookup miss.
#
if (mc == None or lisp.lisp_mr_or_pubsub(mc.action)):
if (lisp.lisp_rate_limit_map_request(packet.inner_dest)): return
pubsub = (mc and mc.action == lisp.LISP_SEND_PUBSUB_ACTION)
lisp.lisp_send_map_request(lisp_send_sockets, lisp_ephem_port,
packet.inner_source, packet.inner_dest, None, pubsub)
if (packet.is_trace()):
lisp.lisp_trace_append(packet, reason="map-cache miss")
#endif
return
#endif
#
# Send Map-Request to see if there is a RLOC change or to refresh an
# entry that is about to time out.
#
if (mc and mc.is_active() and mc.has_ttl_elapsed()):
if (lisp.lisp_rate_limit_map_request(packet.inner_dest) == False):
lisp.lprint("Refresh map-cache entry {}".format( \
lisp.green(mc.print_eid_tuple(), False)))
lisp.lisp_send_map_request(lisp_send_sockets, lisp_ephem_port,
packet.inner_source, packet.inner_dest, None)
#endif
#endif
#
# Update stats for entry. Stats per RLOC is done in lisp_mapping.select_
# rloc().
#
mc.last_refresh_time = time.time()
mc.stats.increment(len(packet.packet))
#
# Encapsulate, native forward, or encapsulate-and-replciate packet.
#
dest_rloc, dest_port, nonce, action, rle, rloc_entry = \
mc.select_rloc(packet, lisp_ipc_listen_socket)
if (dest_rloc == None and rle == None):
if (action == lisp.LISP_NATIVE_FORWARD_ACTION):
lisp.dprint("Natively forwarding")
packet.send_packet(lisp_raw_socket, packet.inner_dest)
if (packet.is_trace()):
lisp.lisp_trace_append(packet, reason="not an EID")
#endif
return
#endif
r = "No reachable RLOCs found"
lisp.dprint(r)
if (packet.is_trace()): lisp.lisp_trace_append(packet, reason=r)
return
#endif
if (dest_rloc and dest_rloc.is_null()):
r = "Drop action RLOC found"
lisp.dprint(r)
if (packet.is_trace()): lisp.lisp_trace_append(packet, reason=r)
return
#endif
#
# Setup outer header for either unicast or multicast transmission..
#
packet.outer_tos = packet.inner_tos
packet.outer_ttl = 32 if (igmp) else packet.inner_ttl
#
# Do unicast encapsulation.
#
if (dest_rloc):
packet.outer_dest.copy_address(dest_rloc)
version = packet.outer_dest.afi_to_version()
packet.outer_version = version
source_rloc = lisp.lisp_myrlocs[0] if (version == 4) else \
lisp.lisp_myrlocs[1]
packet.outer_source.copy_address(source_rloc)
if (packet.is_trace()):
if (lisp.lisp_trace_append(packet, rloc_entry=rloc_entry) \
== False): return
#endif
#
# Encode new LISP, UDP, and outer header.
#
if (packet.encode(nonce) == None): return
if (len(packet.packet) <= 1500): packet.print_packet("Send", True)
#
# Send out on raw socket.
#
raw_socket = lisp_raw_v6_socket if version == 6 else lisp_raw_socket
packet.send_packet(raw_socket, packet.outer_dest)
elif (rle):
#
# Do replication of RLE is returned. Since we are an ITR, replicate to
# level-0 RTRs (or ETRs) only (or first-level boxes only)..
#
level = rle.rle_nodes[0].level
orig_len = len(packet.packet)
for node in rle.rle_forwarding_list:
if (node.level != level): return
packet.outer_dest.copy_address(node.address)
if (lisp_decent): packet.inner_dest.instance_id = 0xffffff
version = packet.outer_dest.afi_to_version()
packet.outer_version = version
source_rloc = lisp.lisp_myrlocs[0] if (version == 4) else \
lisp.lisp_myrlocs[1]
packet.outer_source.copy_address(source_rloc)
if (packet.is_trace()):
if (lisp.lisp_trace_append(packet) == False): return
#endif
if (packet.encode(None) == None): return
#
# Replicate out on raw socket.
#
packet.print_packet("Replicate-to-L{}".format(node.level), True)
packet.send_packet(lisp_raw_socket, packet.outer_dest)
#
# We need to strip the encapsulation header so we can add a new
# one for the next replication.
#
strip_len = len(packet.packet) - orig_len
packet.packet = packet.packet[strip_len::]
#endfor
#endif
#
# Don't need packet structure anymore.
#
del(packet)
return
#enddef
#
# lisp_itr_pcap_process_packet
#
# Receive LISP encapsulated packet from pcap.loop().
#
def lisp_itr_pcap_process_packet(device, not_used, packet):
offset = 4 if device == "lo0" else 0 if device == "lispers.net" else 14
if (lisp.lisp_frame_logging):
title = lisp.bold("Received frame on interface '{}'".format(device),
False)
frame = lisp.lisp_format_packet(packet[0:64])
lisp.lprint("{}: {}".format(title, frame))
#endif
#
# Get input interface based on source MAC address.
#
macs = ""
my_sa = False
interface = device
if (offset == 14):
interfaces, sa, da, my_sa = lisp.lisp_get_input_interface(packet)
interface = device if (device in interfaces) else interfaces[0]
macs = lisp.lisp_format_macs(sa, da)
if (interface.find("vlan") != -1): offset +=4
#
# If destination MAC address is multicast, set my_sa. Examine low-order
# bit of first byte by grabbing the second nibble and testing low-order
# bit after converting to integer.
#
if (int(da[1], 16) & 1): my_sa = True
#endif
#
# Check for VLAN encapsulation.
#
if (offset != 0):
ethertype = struct.unpack("H", packet[offset-2:offset])[0]
ethertype = socket.ntohs(ethertype)
if (ethertype == 0x8100):
vlan = struct.unpack("I", packet[offset:offset+4])[0]
vlan = socket.ntohl(vlan)
interface = "vlan" + str(vlan >> 16)
offset += 4
elif (ethertype == 0x806):
lisp.dprint("Dropping ARP packets, host should have default route")
return
#endif
#endif
if (lisp.lisp_l2_overlay): offset = 0
lisp_itr_data_plane(packet[offset::], device, interface, macs, my_sa)
return
#enddef
#
# lisp_itr_kernel_filter
#
# Supplied 'sources' array are the EID-prefixes we want the kernel to drop
# packets for. We will use iptables for Linux and ipfw for MacOS.
#
# We need this address combination support (notation S -> D):
#
# site-EID -> remote-EID processed by ITR
# site-EID -> non-EID processed by ITR
# site-EID -> site-EID processed by kernel
# non-EID -> non-EID processed by kernel
# non-EID -> remote-EID processed by kernel
# non-EID -> site-EID processed by kernel
#
# The pcap filters reflect the ITR processing combos and can be found in
# lisp_itr_build_pcap_filter(). This routine programs iptables to do the
# kernel processing combos.
#
# (1) iptables -t raw -A lisp -j ACCEPT -d <special-addresses>
# (2) iptables -t raw -A lisp -j ACCEPT -d <local-address> ...
# (3) iptables -t raw -A lisp -j ACCEPT -s <site-eid> -d <site-eid> ...
# (4) iptables -t raw -A lisp -j DROP -s <site-eid> ...
#
# (1) and (2), we want kernel to route packets. This allows loopback and
# multicast to be processed by kernel.
#
# For (3), we want the kernel to do local routing of packets inside of a site
# in this ITR.
#
# For (4), we want kernel to not touch any packets sourced from locally
# configured EIDs. That is each EID-prefix from a "lisp database-mapping"
# command. Because those EID-prefixes are pcap'ed and process by the lisp-itr
# process.
#
def lisp_itr_kernel_filter(sources, dyn_eids):
if (os.getenv("LISP_NO_IPTABLES") != None):
lisp.lprint("User selected to suppress installing iptables rules")
return
#endif
os.system("sudo iptables -t raw -N lisp")
os.system("sudo iptables -t raw -A PREROUTING -j lisp")
os.system("sudo ip6tables -t raw -N lisp")
os.system("sudo ip6tables -t raw -A PREROUTING -j lisp")
#
# Have kernel process packets for local addresses when sourced from site
# EIDs. We do not want the lisp-itr process to process such packets.
# We want the kernel to deliver packets to and from local applications.
# And we want the kernel to forward decapsulated packets out interfaces
# leading the EIDs.
#
add = "sudo ip{}tables -t raw -A lisp -j ACCEPT -d {}"
addr_set = ["127.0.0.1", "::1", "224.0.0.0/4 -p igmp", "ff00::/8",
"fe80::/16"]
addr_set += sources + lisp.lisp_get_all_addresses()
for addr in addr_set:
if (lisp.lisp_is_mac_string(addr)): continue
six = "" if addr.find(":") == -1 else "6"
os.system(add.format(six, addr))
#endfor
#
# When source and destination addresses are EIDs for this LISP site,
# we want the kernel to do local routing. But as a PITR, we don't want
# the kernel to route everything (EID-prefix 0.0.0.0/0) or we can't have
# this process encapsulate for any source address to a destination EID.
#
if (lisp.lisp_pitr == False):
add = "sudo ip{}tables -t raw -A lisp -j ACCEPT -s {} -d {}"
check = "sudo ip{}tables -t raw -C lisp -j ACCEPT -s {} -d {}"
for source in sources:
if (lisp.lisp_is_mac_string(source)): continue
if (source in dyn_eids): continue
six = "" if source.find(":") == -1 else "6"
for s in sources:
if (lisp.lisp_is_mac_string(s)): continue
if (s in dyn_eids): continue
if (s.find(".") != -1 and source.find(".") == -1): continue
if (s.find(":") != -1 and source.find(":") == -1): continue
if (getoutput(check.format(six, source, s)) == ""):
continue
#endif
os.system(add.format(six, source, s))
#endfor
#endfor
#endif
#
# Now put in drop rules for each "lisp database-mapping" EID-prefix.
#
drop = "sudo ip{}tables -t raw -A lisp -j DROP -s {}"
for source in sources:
if (lisp.lisp_is_mac_string(source)): continue
six = "" if source.find(":") == -1 else "6"
os.system(drop.format(six, source))
#endif
#
# Print out rules we just configured.
#
rules = getoutput("sudo iptables -t raw -S lisp").split("\n")
rules += getoutput("sudo ip6tables -t raw -S lisp").split("\n")
lisp.lprint("Using kernel filters: {}".format(rules))
#
# Check if we need to put in a iptables rule workaround for the virtio TCP
# checksum corruption problem for KVM guest OSes. Check environmnt
# variable LISP_VIRTIO_BUG.
#
# Note a debian host system that runs docker will need the following
# command so ip6tables works inside of the docker container:
#
# sudo modprobe ip6table_filter
#
if (os.getenv("LISP_VIRTIO_BUG") != None):
c = ("sudo iptables -A POSTROUTING -t mangle -p tcp -j " + \
"CHECKSUM --checksum-fill; ")
c += ("sudo iptables -A POSTROUTING -t mangle -p udp -j " + \
"CHECKSUM --checksum-fill; ")
c += ("sudo ip6tables -A POSTROUTING -t mangle -p tcp -j " + \
"CHECKSUM --checksum-fill; ")
c += ("sudo ip6tables -A POSTROUTING -t mangle -p udp -j " + \
"CHECKSUM --checksum-fill")
os.system(c)
virtio = lisp.bold("virtio", False)
lisp.lprint("{} bug workaround, configure '{}'".format(virtio, c))
#endif
return
#enddef
#
# lisp_itr_build_pcap_filter
#
# Build pcap filter and return string to caller.
#
def lisp_itr_build_pcap_filter(sources, dyn_eids, l2_overlay, pitr):
if (l2_overlay):
pfilter = "ether[6:4] >= 0 and ether[10:2] >= 0"
lisp.lprint("Using pcap filter: '{}'".format(pfilter))
return(pfilter)
#endif
ether_pfilter = "(not ether proto 0x806)"
probe_pfilter = " or (udp src port 4342 and ip[28] == 0x28)"
decent_pfilter = \
" or (ip[16] >= 224 and ip[16] < 240 and (ip[28] & 0xf0) == 0x30)"
src_pfilter = ""
dst_pfilter = ""
for source in sources:
insert_source = source
if (lisp.lisp_is_mac_string(source)):
insert_source = source.split("/")[0]
insert_source = insert_source.replace("-", "")
mac_str = []
for i in range(0, 12, 2): mac_str.append(insert_source[i:i+2])
insert_source = "ether host " + ":".join(mac_str)
#endif
src_pfilter += "{}".format(insert_source)
if (source not in dyn_eids): dst_pfilter += "{}".format(insert_source)
if (sources[-1] == source): break
src_pfilter += " or "
if (source not in dyn_eids): dst_pfilter += " or "
#endfor
if (dst_pfilter[-4::] == " or "): dst_pfilter = dst_pfilter[0:-4]
#
# If "lisp-nat = yes" is configured, then we are a PETR and we need
# to accept packets for local EIDs (assigned to loopback interfaces).
# So allow the first one to be accepted.
#
lisp_nat = getoutput("egrep 'lisp-nat = yes' ./lisp.config")
lisp_nat = (lisp_nat != "" and lisp_nat[0] == " ")
loopback = lisp.lisp_get_loopback_address() if (lisp_nat) else None
addr_pfilter = ""
addresses = lisp.lisp_get_all_addresses()
for addr in addresses:
if (addr == loopback): continue
addr_pfilter += "{}".format(addr)
if (addresses[-1] == addr): break
addr_pfilter += " or "
#endif
if (src_pfilter != ""):
src_pfilter = " and (src net {})".format(src_pfilter)
#endif
if (dst_pfilter != ""):
dst_pfilter = " and not (dst net {})".format(dst_pfilter)
#endif
if (addr_pfilter != ""):
addr_pfilter = " and not (dst host {})".format(addr_pfilter)
#endif
#
# A PITR wants to see packets from anywhere so it can encap to possible
# LISP sites. But we want the kernel to route and consume for RLOCs for
# this system.
#
if (pitr):
dst_pfilter = ""
addr_pfilter = addr_pfilter.replace("dst ", "")
#endif
#
# Concatenate all the filters.
#
pfilter = ether_pfilter + src_pfilter + dst_pfilter + addr_pfilter
pfilter += probe_pfilter
pfilter += decent_pfilter
lisp.lprint("Using pcap filter: '{}'".format(pfilter))
return(pfilter)
#enddef
#
# lisp_itr_pcap_thread
#
# Receive LISP encapsulated packet from pcap.
#
def lisp_itr_pcap_thread(device, pfilter, pcap_lock):
lisp.lisp_set_exception()
if (lisp.lisp_is_python2()):
import pcappy
pcap_lock.acquire()
pcap = pcappy.open_live(device, 9000, 0, 100)
pcap_lock.release()
pcap.filter = pfilter
pcap.loop(-1, lisp_itr_pcap_process_packet, device)
#endif
if (lisp.lisp_is_python3()):
import pcapy
pcap_lock.acquire()
pcap = pcapy.open_live(device, 9000, 0, 100)
pcap_lock.release()
pcap.setfilter(pfilter)
while(True):
header, packet = pcap.next()
if (len(packet) == 0): continue
lisp_itr_pcap_process_packet(device, None, packet)
#endwhile
#endif
return
#enddef
#
# lisp_itr_process_info_timer
#
# Time to send a periodic Info-Request message. This must be done less often
# then sending periodic Map-Registers as well as less the the NAT timeout
# value which is usually one minute.
#
def lisp_itr_process_info_timer():
global lisp_itr_info_timer
global lisp_ephem_nat_socket
global lisp_send_sockets
lisp.lisp_set_exception()
#
# Build Info-Request messages if we have any private RLOCs in database-
# mappings.
#
sockets = [lisp_ephem_nat_socket, lisp_ephem_nat_socket,
lisp_ipc_listen_socket]
lisp.lisp_build_info_requests(sockets, None, lisp.LISP_CTRL_PORT)
#
# Restart periodic timer.
#
lisp_itr_info_timer.cancel()
lisp_itr_info_timer = threading.Timer(lisp.LISP_INFO_INTERVAL,
lisp_itr_process_info_timer, [])
lisp_itr_info_timer.start()
return
#enddef
#
# lisp_itr_map_resolver_command
#
# Call lispconfig.lisp_map_resolver_command and set "test-mr" timer.
#
def lisp_itr_map_resolver_command(kv_pair):
global lisp_send_sockets
global lisp_ephem_port
global lisp_itr_info_timer
lispconfig.lisp_map_resolver_command(kv_pair)
if (lisp.lisp_test_mr_timer == None or
lisp.lisp_test_mr_timer.is_alive() == False):
lisp.lisp_test_mr_timer = threading.Timer(2, lisp.lisp_test_mr,
[lisp_send_sockets, lisp_ephem_port])
lisp.lisp_test_mr_timer.start()
#endif
#
# Trigger a Info-Request if we are doing NAT-traversal.
#
lisp_itr_info_timer = threading.Timer(0, lisp_itr_process_info_timer, [])
lisp_itr_info_timer.start()
return
#enddef
#
# lisp_itr_database_mapping_command
#
# Add database-mapping entry so ITR can packet capture on packets only from
# sources from the *first* database-mapping configured.
#
def lisp_itr_database_mapping_command(kv_pair):
lispconfig.lisp_database_mapping_command(kv_pair)
return
#enddef
#
# lisp_itr_xtr_command
#
# Call lispconfig.lisp_xtr_command() but pass socket parameters to starting
# the RLOC-probing timer if "rloc-probing = yes".
#
def lisp_itr_xtr_command(kv_pair):
global lisp_ephem_listen_socket
global lisp_rloc_probe_socket
#
# Cache current state for nat-traversal and rloc-probing so we know if
# we should trigger..
#
nat_traversal = lisp.lisp_nat_traversal
rloc_probing = lisp.lisp_rloc_probing
#
# Execute command.
#
lispconfig.lisp_xtr_command(kv_pair)
#
# Did "nat-traversal = yes" or "rloc-probing = yes" just happen?
#
nat_now_on = (nat_traversal == False and lisp.lisp_nat_traversal and \
lisp.lisp_rloc_probing)
rloc_probing_now_on = (rloc_probing == False and lisp.lisp_rloc_probing)
interval = 0
if (rloc_probing_now_on): interval = 1
if (nat_now_on): interval = 5
if (interval != 0):
lisp_sockets = [lisp_rloc_probe_socket, lisp_ephem_listen_socket]
lisp.lisp_start_rloc_probe_timer(interval, lisp_sockets)
#endif
#
# If nat-traversal=yes and data-plane-security=yes on an ITR, then we
# need to set source port in RLOC-probe requrests and encapsulated data
# packets to be the same value.
#
if (lisp.lisp_crypto_ephem_port == None and lisp.lisp_data_plane_security):
port = lisp_ephem_listen_socket.getsockname()[1]
lisp.lisp_crypto_ephem_port = port
lisp.lprint("Use port {} for lisp-crypto packets".format(port))
entry = { "type" : "itr-crypto-port", "port" : port }
lisp.lisp_write_to_dp_socket(entry)
#endif
#
# Write to external data-plane if enabled.
#
lisp.lisp_ipc_write_xtr_parameters(lisp.lisp_debug_logging,
lisp.lisp_data_plane_logging)
return
#enddef
#
# lisp_itr_process_nonce_ipc
#
# Process an nonce IPC message from the ETR. It wants to tell us that a
# request-nonce was received and we need to echo it or when this ITR requested
# a nonce to be echoed, the ETR is telling us it has been echoed.
#
# Variable "ipc" is a string and not a byte string. Caller converts.
#
def lisp_itr_process_nonce_ipc(ipc):
x, opcode, rloc_str, nonce = ipc.split("%")
nonce = int(nonce, 16)
echo_nonce = lisp.lisp_get_echo_nonce(None, rloc_str)
if (echo_nonce == None): echo_nonce = lisp.lisp_echo_nonce(rloc_str)
#
# If we are in request-nonce mode, exit it, so we can echo the nonce the
# other side is requesting.
#
if (opcode == "R"):
echo_nonce.request_nonce_rcvd = nonce
echo_nonce.last_request_nonce_rcvd = lisp.lisp_get_timestamp()
echo_nonce.echo_nonce_sent = nonce
echo_nonce.last_new_echo_nonce_sent = lisp.lisp_get_timestamp()
lisp.lprint("Start echo-nonce mode for {}, nonce 0x{}".format( \
lisp.red(echo_nonce.rloc_str, False), lisp.lisp_hex_string(nonce)))
#endif
if (opcode == "E"):
echo_nonce.echo_nonce_rcvd = nonce
echo_nonce.last_echo_nonce_rcvd = lisp.lisp_get_timestamp()
if (echo_nonce.request_nonce_sent == nonce):
en = lisp.bold("echoed nonce", False)
lisp.lprint("Received {} {} from {}".format(en,
lisp.lisp_hex_string(nonce),
lisp.red(echo_nonce.rloc_str, False)))
echo_nonce.request_nonce_sent = None
lisp.lprint("Stop request-nonce mode for {}".format( \
lisp.red(echo_nonce.rloc_str, False)))
echo_nonce.last_good_echo_nonce_rcvd = lisp.lisp_get_timestamp()
else:
rns = "none"
if (echo_nonce.request_nonce_sent):
rns = lisp.lisp_hex_string(echo_nonce.request_nonce_sent)
#endif
lisp.lprint(("Received echo-nonce 0x{} from {}, but request-" + \
"nonce is {}").format(lisp.lisp_hex_string(nonce),
lisp.red(echo_nonce.rloc_str, False), rns))
#endif
#endif
return
#enddef
#
# ITR commands procssed by this process.
#
lisp_itr_commands = {
"lisp xtr-parameters" : [lisp_itr_xtr_command, {
"rloc-probing" : [True, "yes", "no"],
"nonce-echoing" : [True, "yes", "no"],
"data-plane-security" : [True, "yes", "no"],
"data-plane-logging" : [True, "yes", "no"],
"frame-logging" : [True, "yes", "no"],
"flow-logging" : [True, "yes", "no"],
"nat-traversal" : [True, "yes", "no"],
"checkpoint-map-cache" : [True, "yes", "no"],
"ipc-data-plane" : [True, "yes", "no"],
"decentralized-push-xtr" : [True, "yes", "no"],
"decentralized-pull-xtr-modulus" : [True, 1, 0xff],
"decentralized-pull-xtr-dns-suffix" : [True],
"register-reachable-rtrs" : [True, "yes", "no"],
"program-hardware" : [True, "yes", "no"] }],
"lisp interface" : [lispconfig.lisp_interface_command, {
"interface-name" : [True],
"device" : [True],
"instance-id" : [True, 0, 0xffffffff],
"dynamic-eid" : [True],
"multi-tenant-eid" : [True],
"lisp-nat" : [True, "yes", "no"],
"dynamic-eid-device" : [True],
"dynamic-eid-timeout" : [True, 0, 0xff] }],
"lisp map-resolver" : [lisp_itr_map_resolver_command, {
"mr-name" : [True],
"ms-name" : [True],
"dns-name" : [True],
"address" : [True] }],
"lisp map-server" : [lispconfig.lisp_map_server_command, {
"ms-name" : [True],
"address" : [True],
"dns-name" : [True],
"authentication-type" : [False, "sha1", "sha2"],
"authentication-key" : [False],
"encryption-key" : [False],
"proxy-reply" : [False, "yes", "no"],
"want-map-notify" : [False, "yes", "no"],
"merge-registrations" : [False, "yes", "no"],
"refresh-registrations" : [False, "yes", "no"],
"site-id" : [False, 1, 0xffffffffffffffff] }],
"lisp database-mapping" : [lisp_itr_database_mapping_command, {
"prefix" : [],
"mr-name" : [True],
"ms-name" : [True],
"instance-id" : [True, 0, 0xffffffff],
"secondary-instance-id" : [True, 0, 0xffffffff],
"eid-prefix" : [True],
"group-prefix" : [True],
"dynamic-eid" : [True, "yes", "no"],
"signature-eid" : [True, "yes", "no"],
"register-ttl" : [True, 1, 0xffffffff],
"rloc" : [],
"rloc-record-name" : [True],
"elp-name" : [True],
"geo-name" : [True],
"rle-name" : [True],
"json-name" : [True],
"address" : [True],
"interface" : [True],
"priority" : [True, 0, 255],
"weight" : [True, 0, 100] }],
"lisp map-cache" : [lispconfig.lisp_map_cache_command, {
"prefix" : [],
"instance-id" : [True, 0, 0xffffffff],
"eid-prefix" : [True],
"group-prefix" : [True],
"send-map-request" : [True, "yes", "no"],
"subscribe-request" : [True, "yes", "no"],
"rloc" : [],
"rloc-record-name" : [True],
"rle-name" : [True],
"elp-name" : [True],
"address" : [True],
"priority" : [True, 0, 255],
"weight" : [True, 0, 100] }],
"lisp itr-map-cache" : [lispconfig.lisp_map_cache_command, {
"prefix" : [],
"instance-id" : [True, 0, 0xffffffff],
"eid-prefix" : [True],
"group-prefix" : [True],
"rloc" : [],
"rloc-record-name" : [True],
"rle-name" : [True],
"elp-name" : [True],
"address" : [True],
"priority" : [True, 0, 255],
"weight" : [True, 0, 100] }],
"lisp explicit-locator-path" : [lispconfig.lisp_elp_command, {
"elp-name" : [False],
"elp-node" : [],
"address" : [True],
"probe" : [True, "yes", "no"],
"strict" : [True, "yes", "no"],
"eid" : [True, "yes", "no"] }],
"lisp replication-list-entry" : [lispconfig.lisp_rle_command, {
"rle-name" : [False],
"rle-node" : [],
"address" : [True],
"level" : [True, 0, 255] }],
"lisp geo-coordinates" : [lispconfig.lisp_geo_command, {
"geo-name" : [False],
"geo-tag" : [False] }],
"lisp json" : [lispconfig.lisp_json_command, {
"json-name" : [False],
"json-string" : [False] }],
"show itr-map-cache" : [lisp_itr_show_command, { }],
"show itr-rloc-probing" : [lisp_itr_show_rloc_probe_command, { }],
"show itr-keys" : [lisp_itr_show_keys_command, {}],
"show itr-dynamic-eid" : [lispconfig.lisp_show_dynamic_eid_command, { }]
}
#------------------------------------------------------------------------------
#
# Main entry point for process.
#
if (lisp_itr_startup() == False):
lisp.lprint("lisp_itr_startup() failed")
lisp.lisp_print_banner("ITR abnormal exit")
exit(1)
#endif
socket_list = [lisp_ephem_listen_socket, lisp_ipc_listen_socket,
lisp_ephem_nat_socket, lisp_ipc_punt_socket]
#
# Should we listen to the map-cache/punt IPC socket if it exists.
#
listen_on_ipc_socket = True
ephem_sockets = [lisp_ephem_listen_socket] * 3
ephem_nat_sockets = [lisp_ephem_nat_socket] * 3
while (True):
try: ready_list, w, x = select.select(socket_list, [], [])
except: break
#
# Process Punt signal message from another data-plane (snabb).
#
if (lisp.lisp_ipc_data_plane and lisp_ipc_punt_socket in ready_list):
lisp.lisp_process_punt(lisp_ipc_punt_socket, lisp_send_sockets,
lisp_ephem_port)
#endif
#
# Process Map-Reply messages received on ephemeral port.
#
if (lisp_ephem_listen_socket in ready_list):
opcode, source, port, packet = lisp.lisp_receive(ephem_sockets[0],
False)
if (source == ""): break
if (lisp.lisp_is_rloc_probe_reply(packet[0:1])):
lisp.lprint("ITR ignoring RLOC-probe reply, using pcap")
continue
#endif
lisp.lisp_parse_packet(ephem_sockets, packet, source, port)
#endif
#
# Process Info-Reply messages received on NAT ephemeral port.
#
if (lisp_ephem_nat_socket in ready_list):
opcode, source, port, packet = lisp.lisp_receive(ephem_nat_sockets[0],
False)
if (source == ""): break
if (lisp.lisp_is_rloc_probe_reply(packet[0:1])):
lisp.lprint("ITR ignoring RLOC-probe reply, using pcap")
continue
#endif
probe = lisp.lisp_parse_packet(ephem_nat_sockets, packet, source, port)
#
# Info-Reply has new RTR-list, RLOC-probe the RTR RLOCs so we can
# lisp-crypto faster.
#
if (probe):
lisp_sockets = [lisp_ephem_listen_socket, lisp_ephem_listen_socket]
lisp.lisp_start_rloc_probe_timer(0, lisp_sockets)
#endif
#endif
#
# Process either commands, an IPC data-packet (for testing), or any
# protocol message on the IPC listen socket.
#
if (lisp_ipc_listen_socket in ready_list):
opcode, source, port, packet = \
lisp.lisp_receive(lisp_ipc_listen_socket, True)
if (source == ""): break
if (opcode == "command"):
packet = packet.decode()
if (packet == "clear"):
lisp.lisp_clear_map_cache()
continue
#endif
if (packet.find("nonce%") != -1):
lisp_itr_process_nonce_ipc(packet)
continue
#endif
lispconfig.lisp_process_command(lisp_ipc_listen_socket, opcode,
packet, "lisp-itr", [lisp_itr_commands])
elif (opcode == "api"):
packet = packet.decode()
lisp.lisp_process_api("lisp-itr", lisp_ipc_listen_socket, packet)
elif (opcode == "data-packet"):
lisp_itr_data_plane(packet, "ipc")
else:
if (lisp.lisp_is_rloc_probe_reply(packet[0:1])):
lisp.lprint("ITR ignoring RLOC-probe request, using pcap")
continue
#endif
lisp.lisp_parse_packet(lisp_send_sockets, packet, source, port)
#endif
#endif
#endwhile
lisp_itr_shutdown()
lisp.lisp_print_banner("ITR normal exit")
exit(0)
#------------------------------------------------------------------------------
| [] | [] | [
"LISP_ITR_WAIT_TIME",
"LISP_NO_IPTABLES",
"LISP_VIRTIO_BUG",
"LISP_PCAP_LIST",
"LISP_LOAD_SPLIT_PINGS"
] | [] | ["LISP_ITR_WAIT_TIME", "LISP_NO_IPTABLES", "LISP_VIRTIO_BUG", "LISP_PCAP_LIST", "LISP_LOAD_SPLIT_PINGS"] | python | 5 | 0 | |
detection_demo.py | #================================================================
#
# File name : detection_demo.py
# Author : PyLessons
# Created date: 2020-09-27
# Website : https://pylessons.com/
# GitHub : https://github.com/pythonlessons/TensorFlow-2.x-YOLOv3
# Description : object detection image and video example
#
#================================================================
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
import cv2
import numpy as np
import tensorflow as tf
from yolov3.utils import detect_image, detect_realtime, detect_video, Load_Yolo_model, detect_video_realtime_mp
from yolov3.configs import *
image_path = "./IMAGES/kite.jpg"
video_path = "./IMAGES/test.mp4"
yolo = Load_Yolo_model()
#detect_image(yolo, image_path, "./IMAGES/kite_pred.jpg", input_size=YOLO_INPUT_SIZE, show=True, rectangle_colors=(255,0,0))
#detect_video(yolo, video_path, "", input_size=YOLO_INPUT_SIZE, show=False, rectangle_colors=(255,0,0))
detect_realtime(yolo, '', input_size=YOLO_INPUT_SIZE, show=True, rectangle_colors=(255, 0, 0))
#detect_video_realtime_mp(video_path, "Output.mp4", input_size=YOLO_INPUT_SIZE, show=False, rectangle_colors=(255,0,0), realtime=False)
| [] | [] | [
"CUDA_VISIBLE_DEVICES"
] | [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
src/flask/cli.py | import ast
import inspect
import os
import platform
import re
import sys
import traceback
import warnings
from functools import update_wrapper
from operator import attrgetter
from threading import Lock
from threading import Thread
import click
from werkzeug.utils import import_string
from .globals import current_app
from .helpers import get_debug_flag
from .helpers import get_env
from .helpers import get_load_dotenv
try:
import dotenv
except ImportError:
dotenv = None
try:
import ssl
except ImportError:
ssl = None
class NoAppException(click.UsageError):
"""Raised if an application cannot be found or loaded."""
def find_best_app(script_info, module):
"""Given a module instance this tries to find the best possible
application in the module or raises an exception.
"""
from . import Flask
# Search for the most common names first.
for attr_name in ("app", "application"):
app = getattr(module, attr_name, None)
if isinstance(app, Flask):
return app
# Otherwise find the only object that is a Flask instance.
matches = [v for v in module.__dict__.values() if isinstance(v, Flask)]
if len(matches) == 1:
return matches[0]
elif len(matches) > 1:
raise NoAppException(
"Detected multiple Flask applications in module"
f" {module.__name__!r}. Use 'FLASK_APP={module.__name__}:name'"
f" to specify the correct one."
)
# Search for app factory functions.
for attr_name in ("create_app", "make_app"):
app_factory = getattr(module, attr_name, None)
if inspect.isfunction(app_factory):
try:
app = call_factory(script_info, app_factory)
if isinstance(app, Flask):
return app
except TypeError:
if not _called_with_wrong_args(app_factory):
raise
raise NoAppException(
f"Detected factory {attr_name!r} in module {module.__name__!r},"
" but could not call it without arguments. Use"
f" \"FLASK_APP='{module.__name__}:{attr_name}(args)'\""
" to specify arguments."
)
raise NoAppException(
"Failed to find Flask application or factory in module"
f" {module.__name__!r}. Use 'FLASK_APP={module.__name__}:name'"
" to specify one."
)
def call_factory(script_info, app_factory, args=None, kwargs=None):
"""Takes an app factory, a ``script_info` object and optionally a tuple
of arguments. Checks for the existence of a script_info argument and calls
the app_factory depending on that and the arguments provided.
"""
sig = inspect.signature(app_factory)
args = [] if args is None else args
kwargs = {} if kwargs is None else kwargs
if "script_info" in sig.parameters:
warnings.warn(
"The 'script_info' argument is deprecated and will not be"
" passed to the app factory function in Flask 2.1.",
DeprecationWarning,
)
kwargs["script_info"] = script_info
if (
not args
and len(sig.parameters) == 1
and next(iter(sig.parameters.values())).default is inspect.Parameter.empty
):
warnings.warn(
"Script info is deprecated and will not be passed as the"
" single argument to the app factory function in Flask"
" 2.1.",
DeprecationWarning,
)
args.append(script_info)
return app_factory(*args, **kwargs)
def _called_with_wrong_args(f):
"""Check whether calling a function raised a ``TypeError`` because
the call failed or because something in the factory raised the
error.
:param f: The function that was called.
:return: ``True`` if the call failed.
"""
tb = sys.exc_info()[2]
try:
while tb is not None:
if tb.tb_frame.f_code is f.__code__:
# In the function, it was called successfully.
return False
tb = tb.tb_next
# Didn't reach the function.
return True
finally:
# Delete tb to break a circular reference.
# https://docs.python.org/2/library/sys.html#sys.exc_info
del tb
def find_app_by_string(script_info, module, app_name):
"""Check if the given string is a variable name or a function. Call
a function to get the app instance, or return the variable directly.
"""
from . import Flask
# Parse app_name as a single expression to determine if it's a valid
# attribute name or function call.
try:
expr = ast.parse(app_name.strip(), mode="eval").body
except SyntaxError:
raise NoAppException(
f"Failed to parse {app_name!r} as an attribute name or function call."
)
if isinstance(expr, ast.Name):
name = expr.id
args = kwargs = None
elif isinstance(expr, ast.Call):
# Ensure the function name is an attribute name only.
if not isinstance(expr.func, ast.Name):
raise NoAppException(
f"Function reference must be a simple name: {app_name!r}."
)
name = expr.func.id
# Parse the positional and keyword arguments as literals.
try:
args = [ast.literal_eval(arg) for arg in expr.args]
kwargs = {kw.arg: ast.literal_eval(kw.value) for kw in expr.keywords}
except ValueError:
# literal_eval gives cryptic error messages, show a generic
# message with the full expression instead.
raise NoAppException(
f"Failed to parse arguments as literal values: {app_name!r}."
)
else:
raise NoAppException(
f"Failed to parse {app_name!r} as an attribute name or function call."
)
try:
attr = getattr(module, name)
except AttributeError:
raise NoAppException(
f"Failed to find attribute {name!r} in {module.__name__!r}."
)
# If the attribute is a function, call it with any args and kwargs
# to get the real application.
if inspect.isfunction(attr):
try:
app = call_factory(script_info, attr, args, kwargs)
except TypeError:
if not _called_with_wrong_args(attr):
raise
raise NoAppException(
f"The factory {app_name!r} in module"
f" {module.__name__!r} could not be called with the"
" specified arguments."
)
else:
app = attr
if isinstance(app, Flask):
return app
raise NoAppException(
"A valid Flask application was not obtained from"
f" '{module.__name__}:{app_name}'."
)
def prepare_import(path):
"""Given a filename this will try to calculate the python path, add it
to the search path and return the actual module name that is expected.
"""
path = os.path.realpath(path)
fname, ext = os.path.splitext(path)
if ext == ".py":
path = fname
if os.path.basename(path) == "__init__":
path = os.path.dirname(path)
module_name = []
# move up until outside package structure (no __init__.py)
while True:
path, name = os.path.split(path)
module_name.append(name)
if not os.path.exists(os.path.join(path, "__init__.py")):
break
if sys.path[0] != path:
sys.path.insert(0, path)
return ".".join(module_name[::-1])
def locate_app(script_info, module_name, app_name, raise_if_not_found=True):
__traceback_hide__ = True # noqa: F841
try:
__import__(module_name)
except ImportError:
# Reraise the ImportError if it occurred within the imported module.
# Determine this by checking whether the trace has a depth > 1.
if sys.exc_info()[2].tb_next:
raise NoAppException(
f"While importing {module_name!r}, an ImportError was"
f" raised:\n\n{traceback.format_exc()}"
)
elif raise_if_not_found:
raise NoAppException(f"Could not import {module_name!r}.")
else:
return
module = sys.modules[module_name]
if app_name is None:
return find_best_app(script_info, module)
else:
return find_app_by_string(script_info, module, app_name)
def get_version(ctx, param, value):
if not value or ctx.resilient_parsing:
return
import werkzeug
from . import __version__
click.echo(
f"Python {platform.python_version()}\n"
f"Flask {__version__}\n"
f"Werkzeug {werkzeug.__version__}",
color=ctx.color,
)
ctx.exit()
version_option = click.Option(
["--version"],
help="Show the flask version",
expose_value=False,
callback=get_version,
is_flag=True,
is_eager=True,
)
class DispatchingApp:
"""Special application that dispatches to a Flask application which
is imported by name in a background thread. If an error happens
it is recorded and shown as part of the WSGI handling which in case
of the Werkzeug debugger means that it shows up in the browser.
"""
def __init__(self, loader, use_eager_loading=None):
self.loader = loader
self._app = None
self._lock = Lock()
self._bg_loading_exc_info = None
if use_eager_loading is None:
use_eager_loading = os.environ.get("WERKZEUG_RUN_MAIN") != "true"
if use_eager_loading:
self._load_unlocked()
else:
self._load_in_background()
def _load_in_background(self):
def _load_app():
__traceback_hide__ = True # noqa: F841
with self._lock:
try:
self._load_unlocked()
except Exception:
self._bg_loading_exc_info = sys.exc_info()
t = Thread(target=_load_app, args=())
t.start()
def _flush_bg_loading_exception(self):
__traceback_hide__ = True # noqa: F841
exc_info = self._bg_loading_exc_info
if exc_info is not None:
self._bg_loading_exc_info = None
raise exc_info
def _load_unlocked(self):
__traceback_hide__ = True # noqa: F841
self._app = rv = self.loader()
self._bg_loading_exc_info = None
return rv
def __call__(self, environ, start_response):
__traceback_hide__ = True # noqa: F841
if self._app is not None:
return self._app(environ, start_response)
self._flush_bg_loading_exception()
with self._lock:
if self._app is not None:
rv = self._app
else:
rv = self._load_unlocked()
return rv(environ, start_response)
class ScriptInfo:
"""Helper object to deal with Flask applications. This is usually not
necessary to interface with as it's used internally in the dispatching
to click. In future versions of Flask this object will most likely play
a bigger role. Typically it's created automatically by the
:class:`FlaskGroup` but you can also manually create it and pass it
onwards as click object.
"""
def __init__(self, app_import_path=None, create_app=None, set_debug_flag=True):
#: Optionally the import path for the Flask application.
self.app_import_path = app_import_path or os.environ.get("FLASK_APP")
#: Optionally a function that is passed the script info to create
#: the instance of the application.
self.create_app = create_app
#: A dictionary with arbitrary data that can be associated with
#: this script info.
self.data = {}
self.set_debug_flag = set_debug_flag
self._loaded_app = None
def load_app(self):
"""Loads the Flask app (if not yet loaded) and returns it. Calling
this multiple times will just result in the already loaded app to
be returned.
"""
__traceback_hide__ = True # noqa: F841
if self._loaded_app is not None:
return self._loaded_app
if self.create_app is not None:
app = call_factory(self, self.create_app)
else:
if self.app_import_path:
path, name = (
re.split(r":(?![\\/])", self.app_import_path, 1) + [None]
)[:2]
import_name = prepare_import(path)
app = locate_app(self, import_name, name)
else:
for path in ("wsgi.py", "app.py"):
import_name = prepare_import(path)
app = locate_app(self, import_name, None, raise_if_not_found=False)
if app:
break
if not app:
raise NoAppException(
"Could not locate a Flask application. You did not provide "
'the "FLASK_APP" environment variable, and a "wsgi.py" or '
'"app.py" module was not found in the current directory.'
)
if self.set_debug_flag:
# Update the app's debug flag through the descriptor so that
# other values repopulate as well.
app.debug = get_debug_flag()
self._loaded_app = app
return app
pass_script_info = click.make_pass_decorator(ScriptInfo, ensure=True)
def with_appcontext(f):
"""Wraps a callback so that it's guaranteed to be executed with the
script's application context. If callbacks are registered directly
to the ``app.cli`` object then they are wrapped with this function
by default unless it's disabled.
"""
@click.pass_context
def decorator(__ctx, *args, **kwargs):
with __ctx.ensure_object(ScriptInfo).load_app().app_context():
return __ctx.invoke(f, *args, **kwargs)
return update_wrapper(decorator, f)
class AppGroup(click.Group):
"""This works similar to a regular click :class:`~click.Group` but it
changes the behavior of the :meth:`command` decorator so that it
automatically wraps the functions in :func:`with_appcontext`.
Not to be confused with :class:`FlaskGroup`.
"""
def command(self, *args, **kwargs):
"""This works exactly like the method of the same name on a regular
:class:`click.Group` but it wraps callbacks in :func:`with_appcontext`
unless it's disabled by passing ``with_appcontext=False``.
"""
wrap_for_ctx = kwargs.pop("with_appcontext", True)
def decorator(f):
if wrap_for_ctx:
f = with_appcontext(f)
return click.Group.command(self, *args, **kwargs)(f)
return decorator
def group(self, *args, **kwargs):
"""This works exactly like the method of the same name on a regular
:class:`click.Group` but it defaults the group class to
:class:`AppGroup`.
"""
kwargs.setdefault("cls", AppGroup)
return click.Group.group(self, *args, **kwargs)
class FlaskGroup(AppGroup):
"""Special subclass of the :class:`AppGroup` group that supports
loading more commands from the configured Flask app. Normally a
developer does not have to interface with this class but there are
some very advanced use cases for which it makes sense to create an
instance of this. see :ref:`custom-scripts`.
:param add_default_commands: if this is True then the default run and
shell commands will be added.
:param add_version_option: adds the ``--version`` option.
:param create_app: an optional callback that is passed the script info and
returns the loaded app.
:param load_dotenv: Load the nearest :file:`.env` and :file:`.flaskenv`
files to set environment variables. Will also change the working
directory to the directory containing the first file found.
:param set_debug_flag: Set the app's debug flag based on the active
environment
.. versionchanged:: 1.0
If installed, python-dotenv will be used to load environment variables
from :file:`.env` and :file:`.flaskenv` files.
"""
def __init__(
self,
add_default_commands=True,
create_app=None,
add_version_option=True,
load_dotenv=True,
set_debug_flag=True,
**extra,
):
params = list(extra.pop("params", None) or ())
if add_version_option:
params.append(version_option)
AppGroup.__init__(self, params=params, **extra)
self.create_app = create_app
self.load_dotenv = load_dotenv
self.set_debug_flag = set_debug_flag
if add_default_commands:
self.add_command(run_command)
self.add_command(shell_command)
self.add_command(routes_command)
self._loaded_plugin_commands = False
def _load_plugin_commands(self):
if self._loaded_plugin_commands:
return
try:
import pkg_resources
except ImportError:
self._loaded_plugin_commands = True
return
for ep in pkg_resources.iter_entry_points("flask.commands"):
self.add_command(ep.load(), ep.name)
self._loaded_plugin_commands = True
def get_command(self, ctx, name):
self._load_plugin_commands()
# Look up built-in and plugin commands, which should be
# available even if the app fails to load.
rv = super().get_command(ctx, name)
if rv is not None:
return rv
info = ctx.ensure_object(ScriptInfo)
# Look up commands provided by the app, showing an error and
# continuing if the app couldn't be loaded.
try:
return info.load_app().cli.get_command(ctx, name)
except NoAppException as e:
click.secho(f"Error: {e.format_message()}\n", err=True, fg="red")
def list_commands(self, ctx):
self._load_plugin_commands()
# Start with the built-in and plugin commands.
rv = set(super().list_commands(ctx))
info = ctx.ensure_object(ScriptInfo)
# Add commands provided by the app, showing an error and
# continuing if the app couldn't be loaded.
try:
rv.update(info.load_app().cli.list_commands(ctx))
except NoAppException as e:
# When an app couldn't be loaded, show the error message
# without the traceback.
click.secho(f"Error: {e.format_message()}\n", err=True, fg="red")
except Exception:
# When any other errors occurred during loading, show the
# full traceback.
click.secho(f"{traceback.format_exc()}\n", err=True, fg="red")
return sorted(rv)
def main(self, *args, **kwargs):
# Set a global flag that indicates that we were invoked from the
# command line interface. This is detected by Flask.run to make the
# call into a no-op. This is necessary to avoid ugly errors when the
# script that is loaded here also attempts to start a server.
os.environ["FLASK_RUN_FROM_CLI"] = "true"
if get_load_dotenv(self.load_dotenv):
load_dotenv()
obj = kwargs.get("obj")
if obj is None:
obj = ScriptInfo(
create_app=self.create_app, set_debug_flag=self.set_debug_flag
)
kwargs["obj"] = obj
kwargs.setdefault("auto_envvar_prefix", "FLASK")
return super().main(*args, **kwargs)
def _path_is_ancestor(path, other):
"""Take ``other`` and remove the length of ``path`` from it. Then join it
to ``path``. If it is the original value, ``path`` is an ancestor of
``other``."""
return os.path.join(path, other[len(path) :].lstrip(os.sep)) == other
def load_dotenv(path=None):
"""Load "dotenv" files in order of precedence to set environment variables.
If an env var is already set it is not overwritten, so earlier files in the
list are preferred over later files.
This is a no-op if `python-dotenv`_ is not installed.
.. _python-dotenv: https://github.com/theskumar/python-dotenv#readme
:param path: Load the file at this location instead of searching.
:return: ``True`` if a file was loaded.
.. versionchanged:: 1.1.0
Returns ``False`` when python-dotenv is not installed, or when
the given path isn't a file.
.. versionchanged:: 2.0
When loading the env files, set the default encoding to UTF-8.
.. versionadded:: 1.0
"""
if dotenv is None:
if path or os.path.isfile(".env") or os.path.isfile(".flaskenv"):
click.secho(
" * Tip: There are .env or .flaskenv files present."
' Do "pip install python-dotenv" to use them.',
fg="yellow",
err=True,
)
return False
# if the given path specifies the actual file then return True,
# else False
if path is not None:
if os.path.isfile(path):
return dotenv.load_dotenv(path, encoding="utf-8")
return False
new_dir = None
for name in (".env", ".flaskenv"):
path = dotenv.find_dotenv(name, usecwd=True)
if not path:
continue
if new_dir is None:
new_dir = os.path.dirname(path)
dotenv.load_dotenv(path, encoding="utf-8")
return new_dir is not None # at least one file was located and loaded
def show_server_banner(env, debug, app_import_path, eager_loading):
"""Show extra startup messages the first time the server is run,
ignoring the reloader.
"""
if os.environ.get("WERKZEUG_RUN_MAIN") == "true":
return
if app_import_path is not None:
message = f" * Serving Flask app {app_import_path!r}"
if not eager_loading:
message += " (lazy loading)"
click.echo(message)
click.echo(f" * Environment: {env}")
if env == "production":
click.secho(
" WARNING: This is a development server. Do not use it in"
" a production deployment.",
fg="red",
)
click.secho(" Use a production WSGI server instead.", dim=True)
if debug is not None:
click.echo(f" * Debug mode: {'on' if debug else 'off'}")
class CertParamType(click.ParamType):
"""Click option type for the ``--cert`` option. Allows either an
existing file, the string ``'adhoc'``, or an import for a
:class:`~ssl.SSLContext` object.
"""
name = "path"
def __init__(self):
self.path_type = click.Path(exists=True, dir_okay=False, resolve_path=True)
def convert(self, value, param, ctx):
if ssl is None:
raise click.BadParameter(
'Using "--cert" requires Python to be compiled with SSL support.',
ctx,
param,
)
try:
return self.path_type(value, param, ctx)
except click.BadParameter:
value = click.STRING(value, param, ctx).lower()
if value == "adhoc":
try:
import cryptography # noqa: F401
except ImportError:
raise click.BadParameter(
"Using ad-hoc certificates requires the cryptography library.",
ctx,
param,
)
return value
obj = import_string(value, silent=True)
if isinstance(obj, ssl.SSLContext):
return obj
raise
def _validate_key(ctx, param, value):
"""The ``--key`` option must be specified when ``--cert`` is a file.
Modifies the ``cert`` param to be a ``(cert, key)`` pair if needed.
"""
cert = ctx.params.get("cert")
is_adhoc = cert == "adhoc"
is_context = ssl and isinstance(cert, ssl.SSLContext)
if value is not None:
if is_adhoc:
raise click.BadParameter(
'When "--cert" is "adhoc", "--key" is not used.', ctx, param
)
if is_context:
raise click.BadParameter(
'When "--cert" is an SSLContext object, "--key is not used.', ctx, param
)
if not cert:
raise click.BadParameter('"--cert" must also be specified.', ctx, param)
ctx.params["cert"] = cert, value
else:
if cert and not (is_adhoc or is_context):
raise click.BadParameter('Required when using "--cert".', ctx, param)
return value
class SeparatedPathType(click.Path):
"""Click option type that accepts a list of values separated by the
OS's path separator (``:``, ``;`` on Windows). Each value is
validated as a :class:`click.Path` type.
"""
def convert(self, value, param, ctx):
items = self.split_envvar_value(value)
super_convert = super().convert
return [super_convert(item, param, ctx) for item in items]
@click.command("run", short_help="Run a development server.")
@click.option("--host", "-h", default="127.0.0.1", help="The interface to bind to.")
@click.option("--port", "-p", default=5000, help="The port to bind to.")
@click.option(
"--cert", type=CertParamType(), help="Specify a certificate file to use HTTPS."
)
@click.option(
"--key",
type=click.Path(exists=True, dir_okay=False, resolve_path=True),
callback=_validate_key,
expose_value=False,
help="The key file to use when specifying a certificate.",
)
@click.option(
"--reload/--no-reload",
default=None,
help="Enable or disable the reloader. By default the reloader "
"is active if debug is enabled.",
)
@click.option(
"--debugger/--no-debugger",
default=None,
help="Enable or disable the debugger. By default the debugger "
"is active if debug is enabled.",
)
@click.option(
"--eager-loading/--lazy-loading",
default=None,
help="Enable or disable eager loading. By default eager "
"loading is enabled if the reloader is disabled.",
)
@click.option(
"--with-threads/--without-threads",
default=True,
help="Enable or disable multithreading.",
)
@click.option(
"--extra-files",
default=None,
type=SeparatedPathType(),
help=(
"Extra files that trigger a reload on change. Multiple paths"
f" are separated by {os.path.pathsep!r}."
),
)
@pass_script_info
def run_command(
info, host, port, reload, debugger, eager_loading, with_threads, cert, extra_files
):
"""Run a local development server.
This server is for development purposes only. It does not provide
the stability, security, or performance of production WSGI servers.
The reloader and debugger are enabled by default if
FLASK_ENV=development or FLASK_DEBUG=1.
"""
debug = get_debug_flag()
if reload is None:
reload = debug
if debugger is None:
debugger = debug
show_server_banner(get_env(), debug, info.app_import_path, eager_loading)
app = DispatchingApp(info.load_app, use_eager_loading=eager_loading)
from werkzeug.serving import run_simple
run_simple(
host,
port,
app,
use_reloader=reload,
use_debugger=debugger,
threaded=with_threads,
ssl_context=cert,
extra_files=extra_files,
)
@click.command("shell", short_help="Run a shell in the app context.")
@with_appcontext
def shell_command():
"""Run an interactive Python shell in the context of a given
Flask application. The application will populate the default
namespace of this shell according to its configuration.
This is useful for executing small snippets of management code
without having to manually configure the application.
"""
import code
from .globals import _app_ctx_stack
app = _app_ctx_stack.top.app
banner = (
f"Python {sys.version} on {sys.platform}\n"
f"App: {app.import_name} [{app.env}]\n"
f"Instance: {app.instance_path}"
)
ctx = {}
# Support the regular Python interpreter startup script if someone
# is using it.
startup = os.environ.get("PYTHONSTARTUP")
if startup and os.path.isfile(startup):
with open(startup) as f:
eval(compile(f.read(), startup, "exec"), ctx)
ctx.update(app.make_shell_context())
# Site, customize, or startup script can set a hook to call when
# entering interactive mode. The default one sets up readline with
# tab and history completion.
interactive_hook = getattr(sys, "__interactivehook__", None)
if interactive_hook is not None:
try:
import readline
from rlcompleter import Completer
except ImportError:
pass
else:
# rlcompleter uses __main__.__dict__ by default, which is
# flask.__main__. Use the shell context instead.
readline.set_completer(Completer(ctx).complete)
interactive_hook()
code.interact(banner=banner, local=ctx)
@click.command("routes", short_help="Show the routes for the app.")
@click.option(
"--sort",
"-s",
type=click.Choice(("endpoint", "methods", "rule", "match")),
default="endpoint",
help=(
'Method to sort routes by. "match" is the order that Flask will match '
"routes when dispatching a request."
),
)
@click.option("--all-methods", is_flag=True, help="Show HEAD and OPTIONS methods.")
@with_appcontext
def routes_command(sort, all_methods):
"""Show all registered routes with endpoints and methods."""
rules = list(current_app.url_map.iter_rules())
if not rules:
click.echo("No routes were registered.")
return
ignored_methods = set(() if all_methods else ("HEAD", "OPTIONS"))
if sort in ("endpoint", "rule"):
rules = sorted(rules, key=attrgetter(sort))
elif sort == "methods":
rules = sorted(rules, key=lambda rule: sorted(rule.methods))
rule_methods = [", ".join(sorted(rule.methods - ignored_methods)) for rule in rules]
headers = ("Endpoint", "Methods", "Rule")
widths = (
max(len(rule.endpoint) for rule in rules),
max(len(methods) for methods in rule_methods),
max(len(rule.rule) for rule in rules),
)
widths = [max(len(h), w) for h, w in zip(headers, widths)]
row = "{{0:<{0}}} {{1:<{1}}} {{2:<{2}}}".format(*widths)
click.echo(row.format(*headers).strip())
click.echo(row.format(*("-" * width for width in widths)))
for rule, methods in zip(rules, rule_methods):
click.echo(row.format(rule.endpoint, methods, rule.rule).rstrip())
cli = FlaskGroup(
help="""\
A general utility script for Flask applications.
Provides commands from Flask, extensions, and the application. Loads the
application defined in the FLASK_APP environment variable, or from a wsgi.py
file. Setting the FLASK_ENV environment variable to 'development' will enable
debug mode.
\b
{prefix}{cmd} FLASK_APP=hello.py
{prefix}{cmd} FLASK_ENV=development
{prefix}flask run
""".format(
cmd="export" if os.name == "posix" else "set",
prefix="$ " if os.name == "posix" else "> ",
)
)
def main():
# TODO omit sys.argv once https://github.com/pallets/click/issues/536 is fixed
cli.main(args=sys.argv[1:])
if __name__ == "__main__":
main()
| [] | [] | [
"FLASK_RUN_FROM_CLI",
"PYTHONSTARTUP",
"FLASK_APP",
"WERKZEUG_RUN_MAIN"
] | [] | ["FLASK_RUN_FROM_CLI", "PYTHONSTARTUP", "FLASK_APP", "WERKZEUG_RUN_MAIN"] | python | 4 | 0 | |
pkg/database/psql_test.go | package database_test
import (
"github.com/bedag/kubernetes-dbaas/pkg/database"
. "github.com/bedag/kubernetes-dbaas/pkg/test"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"os"
)
var _ = Describe(FormatTestDesc(Integration, "Postgres CreateDb"), func() {
// Setting up connection to DBMS
dsn, err := database.Dsn(os.Getenv("POSTGRES_DSN")).GenPostgres()
Expect(err).ToNot(HaveOccurred())
conn, err := database.NewPsqlConn(dsn)
Expect(err).ToNot(HaveOccurred())
// Prepare assertion data
opResultAssertion := database.OpOutput{
Result: map[string]string{
"username": "testuser",
"password": "testpassword",
"dbName": "my-test-db",
"fqdn": "localhost",
"port": "5432",
"lastRotation": "",
},
Err: nil,
}
Context("when Operation is defined correctly", func() {
// Prepare test data
createOperation := database.Operation{
Name: PostgresCreateOpName,
Inputs: map[string]string{
"k8sName": "my-test-db",
},
}
// Execute tested operation
var result database.OpOutput
result = conn.CreateDb(createOperation)
It("should not return an error", func() {
Expect(result.Err).ToNot(HaveOccurred())
})
It("should return a non-nil stored procedure Result", func() {
Expect(result.Result).ToNot(BeNil())
})
It("should return a rowset as specified in the stored procedure", func() {
Expect(result).To(Equal(opResultAssertion))
})
})
Context("when Operation is defined wrongly", func() {
// Prepare test data
createOperation := database.Operation{
Name: "fake_sp_name",
Inputs: map[string]string{
"k8sName": "my-test-db",
},
}
// Execute tested operation
var result database.OpOutput
result = conn.CreateDb(createOperation)
It("should return an error", func() {
Expect(result.Err).To(HaveOccurred())
})
})
})
| [
"\"POSTGRES_DSN\""
] | [] | [
"POSTGRES_DSN"
] | [] | ["POSTGRES_DSN"] | go | 1 | 0 | |
kubernetestodoapp/todo/handler.go | package function
import (
"database/sql"
"encoding/json"
"fmt"
"io/ioutil"
"log"
"net/http"
"os"
"time"
"github.com/openfaas/openfaas-cloud/sdk"
_ "github.com/lib/pq"
)
var db *sql.DB
type Todo struct {
ID int `json:"id"`
Description string `json:"description"`
CreatedDate *time.Time `json:"created_date"`
CompletedDate *time.Time `json:"completed_date,omitempty"`
}
// init establishes a persistent connection to the remote database
// the function will panic if it cannot establish a link and the
// container will restart / go into a crash/back-off loop
func init() {
if _, err := os.Stat("/var/openfaas/secrets/password"); err == nil {
password, _ := sdk.ReadSecret("password")
user, _ := sdk.ReadSecret("username")
host, _ := sdk.ReadSecret("host")
dbName := os.Getenv("postgres_db")
port := os.Getenv("postgres_port")
sslmode := os.Getenv("postgres_sslmode")
connStr := "postgres://" + user + ":" + password + "@" + host + ":" + port + "/" + dbName + "?sslmode=" + sslmode
var err error
db, err = sql.Open("postgres", connStr)
if err != nil {
panic(err.Error())
}
err = db.Ping()
if err != nil {
panic(err.Error())
}
} else {
panic(err.Error())
}
}
func Handle(w http.ResponseWriter, r *http.Request) {
if r.Method == http.MethodPost && r.URL.Path == "/create" {
defer r.Body.Close()
body, _ := ioutil.ReadAll(r.Body)
if err := insert(string(body)); err != nil {
http.Error(w, fmt.Sprintf("unable to insert todo: %s", err.Error()), http.StatusInternalServerError)
}
} else if r.Method == http.MethodGet && r.URL.Path == "/list" {
todos, err := selectTodos()
if err != nil {
http.Error(w, fmt.Sprintf("unable to get todos: %s", err.Error()), http.StatusInternalServerError)
}
out, _ := json.Marshal(todos)
w.Header().Set("Content-Type", "application/json")
w.Write(out)
}
}
// insert into todo table:
func insert(description string) error {
res, err := db.Query(`insert into todo (id, description, created_date) values (DEFAULT, $1, now());`, description)
if err != nil {
return err
}
defer res.Close()
return nil
}
func selectTodos() ([]Todo, error) {
rows, getErr := db.Query(`select id, description, created_date, completed_date from todo;`)
if getErr != nil {
return []Todo{}, fmt.Errorf("unable to get from todo table: %v", getErr)
}
todos := []Todo{}
defer rows.Close()
for rows.Next() {
result := Todo{}
scanErr := rows.Scan(&result.ID, &result.Description, &result.CreatedDate, &result.CompletedDate)
if scanErr != nil {
log.Println("scan err:", scanErr)
}
todos = append(todos, result)
}
return todos, nil
}
| [
"\"postgres_db\"",
"\"postgres_port\"",
"\"postgres_sslmode\""
] | [] | [
"postgres_db",
"postgres_port",
"postgres_sslmode"
] | [] | ["postgres_db", "postgres_port", "postgres_sslmode"] | go | 3 | 0 | |
example/ewallet/main.go | package main
import (
"fmt"
"log"
"os"
"time"
"github.com/joho/godotenv"
"github.com/xendit/xendit-go"
"github.com/xendit/xendit-go/ewallet"
)
func main() {
godotenvErr := godotenv.Load()
if godotenvErr != nil {
log.Fatal(godotenvErr)
}
xendit.Opt.SecretKey = os.Getenv("SECRET_KEY")
createPaymentData := ewallet.CreatePaymentParams{
ExternalID: "dana-" + time.Now().String(),
Amount: 20000,
Phone: "08123123123",
EWalletType: xendit.EWalletTypeDANA,
CallbackURL: "mystore.com/callback",
RedirectURL: "mystore.com/redirect",
}
resp, err := ewallet.CreatePayment(&createPaymentData)
if err != nil {
log.Fatal(err)
}
fmt.Printf("created payment: %+v\n", resp)
getPaymentStatusData := ewallet.GetPaymentStatusParams{
ExternalID: resp.ExternalID,
EWalletType: resp.EWalletType,
}
resp, err = ewallet.GetPaymentStatus(&getPaymentStatusData)
if err != nil {
log.Fatal(err)
}
fmt.Printf("retrieved payment: %+v\n", resp)
metadata := map[string]interface{}{
"meta": "data",
}
ewalletBasketItem := xendit.EWalletBasketItem{
ReferenceID: "basket-product-ref-id",
Name: "product name",
Category: "mechanics",
Currency: "IDR",
Price: 50000,
Quantity: 5,
Type: "type",
SubCategory: "subcategory",
Metadata: metadata,
}
createEWalletChargeData := ewallet.CreateEWalletChargeParams{
ReferenceID: "test-reference-id",
Currency: "IDR",
Amount: 1688,
CheckoutMethod: "ONE_TIME_PAYMENT",
ChannelCode: "ID_SHOPEEPAY",
ChannelProperties: map[string]string{
"success_redirect_url": "https://yourwebsite.com/order/123",
},
Basket: []xendit.EWalletBasketItem{
ewalletBasketItem,
},
Metadata: metadata,
}
charge, chargeErr := ewallet.CreateEWalletCharge(&createEWalletChargeData)
if chargeErr != nil {
log.Fatal(chargeErr)
}
fmt.Printf("created e-wallet charge: %+v\n", charge)
getEWalletChargeStatusData := ewallet.GetEWalletChargeStatusParams{
ChargeID: charge.ID,
}
charge, chargeErr = ewallet.GetEWalletChargeStatus(&getEWalletChargeStatusData)
if chargeErr != nil {
log.Fatal(chargeErr)
}
fmt.Printf("retrieved e-wallet charge: %+v\n", charge)
}
| [
"\"SECRET_KEY\""
] | [] | [
"SECRET_KEY"
] | [] | ["SECRET_KEY"] | go | 1 | 0 | |
store/shared/db/dbtest/dbtest.go | // Copyright 2019 Drone IO, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package dbtest
import (
"os"
"strconv"
"github.com/drone/drone/store/shared/db"
// blank imports are used to load database drivers
// for unit tests. Only unit tests should be importing
// this package.
_ "github.com/go-sql-driver/mysql"
_ "github.com/lib/pq"
_ "github.com/mattn/go-sqlite3"
)
// Connect opens a new test database connection.
func Connect() (*db.DB, error) {
var (
driver = "sqlite3"
config = ":memory:?_foreign_keys=1"
maxConnections = 0
)
if os.Getenv("DRONE_DATABASE_DRIVER") != "" {
driver = os.Getenv("DRONE_DATABASE_DRIVER")
config = os.Getenv("DRONE_DATABASE_DATASOURCE")
maxConnectionsString := os.Getenv("DRONE_DATABASE_MAX_CONNECTIONS")
maxConnections, _ = strconv.Atoi(maxConnectionsString)
}
return db.Connect(driver, config, maxConnections)
}
// Reset resets the database state.
func Reset(d *db.DB) {
d.Lock(func(tx db.Execer, _ db.Binder) error {
tx.Exec("DELETE FROM cron")
tx.Exec("DELETE FROM cards")
tx.Exec("DELETE FROM logs")
tx.Exec("DELETE FROM steps")
tx.Exec("DELETE FROM stages")
tx.Exec("DELETE FROM latest")
tx.Exec("DELETE FROM builds")
tx.Exec("DELETE FROM perms")
tx.Exec("DELETE FROM repos")
tx.Exec("DELETE FROM users")
tx.Exec("DELETE FROM templates")
tx.Exec("DELETE FROM orgsecrets")
return nil
})
}
// Disconnect closes the database connection.
func Disconnect(d *db.DB) error {
return d.Close()
}
| [
"\"DRONE_DATABASE_DRIVER\"",
"\"DRONE_DATABASE_DRIVER\"",
"\"DRONE_DATABASE_DATASOURCE\"",
"\"DRONE_DATABASE_MAX_CONNECTIONS\""
] | [] | [
"DRONE_DATABASE_MAX_CONNECTIONS",
"DRONE_DATABASE_DATASOURCE",
"DRONE_DATABASE_DRIVER"
] | [] | ["DRONE_DATABASE_MAX_CONNECTIONS", "DRONE_DATABASE_DATASOURCE", "DRONE_DATABASE_DRIVER"] | go | 3 | 0 | |
testdata/e2e/e2e_test.go | package test
import (
"io/ioutil"
"os"
"path/filepath"
"strconv"
"strings"
"testing"
cadvisor "github.com/google/cadvisor/info/v1"
v1 "k8s.io/api/core/v1"
statsapi "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
)
func TestMetricSample(t *testing.T) {
const stress = "stress"
wd := os.Getenv("WORKING_DIR")
kv := os.Getenv("KUBERNETES_VERSION")
versionParts := strings.Split(kv, ".")
minorVersion, err := strconv.Atoi(versionParts[1])
if err != nil {
t.Errorf("Unable to determine kubernetes minor version: %s", err)
}
parsedK8sLists := &ParsedK8sLists{
NodeSummaries: make(map[string]statsapi.Summary),
BaselineNodeSummaries: make(map[string]statsapi.Summary),
NodeContainers: make(map[string]map[string]cadvisor.ContainerInfo),
BaselineNodeContainers: make(map[string]map[string]cadvisor.ContainerInfo),
CadvisorPrometheus: make(map[string]map[string]cadvisor.ContainerInfo),
BaselineCadvisorPrometheus: make(map[string]map[string]cadvisor.ContainerInfo),
}
t.Parallel()
t.Run("ensure that a metrics sample has expected files for cluster version", func(t *testing.T) {
seen := make(map[string]bool, len(knownFileTypes))
err := filepath.Walk(wd, func(path string, info os.FileInfo, e error) error {
if e != nil {
return e
}
// check if it is a regular file (not dir)
if info.Mode().IsRegular() {
n := info.Name()
ft := toAgentFileType(n)
seen[toAgentFileType(ft)] = true
if unmarshalFn, ok := knownFileTypes[ft]; ok {
t.Logf("Processing: %v", n)
f, err := ioutil.ReadFile(path)
if err != nil {
return err
}
if err := unmarshalFn(path, f, parsedK8sLists); err != nil {
return err
}
}
}
return nil
})
if err != nil {
t.Fatalf("Failed: %v", err)
}
err = checkForRequiredFiles(seen, minorVersion)
if err != nil {
t.Fatalf("Failed: %v", err)
}
})
t.Run("ensure that a metrics sample contains the cloudability namespace", func(t *testing.T) {
for _, ns := range parsedK8sLists.Namespaces.Items {
if ns.Name == "cloudability" {
return
}
}
t.Error("Namespace cloudability not found in metric sample")
})
t.Run("ensure that a metrics sample has expected pod data", func(t *testing.T) {
for _, po := range parsedK8sLists.Pods.Items {
if strings.HasPrefix(po.Name, stress) && po.Status.QOSClass == v1.PodQOSBestEffort {
return
}
}
t.Error("pod stress not found in metric sample")
})
t.Run("ensure that a metrics sample has expected containers summary data", func(t *testing.T) {
for _, ns := range parsedK8sLists.NodeSummaries {
for _, pf := range ns.Pods {
if strings.HasPrefix(pf.PodRef.Name, stress) && pf.PodRef.Namespace == stress && pf.CPU.UsageNanoCores != nil {
return
}
}
}
t.Error("pod summary data not found in metric sample")
})
// 2020.9.10 - TODO: Remove this test once we stop supporting minor versions below 18
t.Run("ensure that a metrics sample has expected containers stat data", func(t *testing.T) {
if minorVersion < 18 {
for _, nc := range parsedK8sLists.NodeContainers {
for _, s := range nc {
if strings.HasPrefix(s.Name, "/kubepods/besteffort/pod") && s.Namespace == "containerd" && strings.HasPrefix(
s.Spec.Labels["io.kubernetes.pod.name"], stress) {
return
}
}
}
t.Error("pod container stat data not found in metric sample")
}
return
})
t.Run("ensure that a metrics sample has expected cadvisor prometheus data", func(t *testing.T) {
for _, containerInfos := range parsedK8sLists.CadvisorPrometheus {
for _, containerInfo := range containerInfos {
if minorVersion >= 21 {
if strings.HasPrefix(containerInfo.Name, "/kubelet/kubepods/besteffort/pod") &&
containerInfo.Namespace == stress &&
strings.HasPrefix(containerInfo.Spec.Labels["io.kubernetes.pod.name"], stress) {
return
}
} else {
if strings.HasPrefix(containerInfo.Name, "/kubepods/besteffort/pod") &&
containerInfo.Namespace == stress &&
strings.HasPrefix(containerInfo.Spec.Labels["io.kubernetes.pod.name"], stress) {
return
}
}
}
}
t.Error("pod cadvisor prometheus data not found in metric sample")
})
}
| [
"\"WORKING_DIR\"",
"\"KUBERNETES_VERSION\""
] | [] | [
"WORKING_DIR",
"KUBERNETES_VERSION"
] | [] | ["WORKING_DIR", "KUBERNETES_VERSION"] | go | 2 | 0 | |
pkg/observability/observability.go | // Copyright 2020 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package observability provides tools for working with open census.
package observability
import (
"context"
"os"
"strconv"
"time"
"github.com/google/exposure-notifications-server/pkg/logging"
"github.com/google/exposure-notifications-verification-server/pkg/buildinfo"
"go.opencensus.io/stats"
"go.opencensus.io/tag"
)
const (
MetricRoot = "en-verification-server"
)
var (
BuildIDTagKey = tag.MustNewKey("build_id")
BuildTagTagKey = tag.MustNewKey("build_tag")
KnativeServiceTagKey = tag.MustNewKey("k_service")
KnativeRevisionTagKey = tag.MustNewKey("k_revision")
KnativeConfigurationTagKey = tag.MustNewKey("k_configuration")
RealmTagKey = tag.MustNewKey("realm")
knativeService = os.Getenv("K_SERVICE")
knativeRevision = os.Getenv("K_REVISION")
knativeConfiguration = os.Getenv("K_CONFIGURATION")
// blameTagKey indicating Who to blame for the API request failure.
// NONE: no failure
// CLIENT: the client is at fault (e.g. invalid request)
// SERVER: the server is at fault
// EXTERNAL: some third party is at fault
// UNKNOWN: for everything else
blameTagKey = tag.MustNewKey("blame")
// ResultTagKey contains a free format text describing the result of the
// request. Preferably ALL CAPS WITH UNDERSCORE.
// OK indicating a successful request.
// You can losely base this string on
// https://github.com/googleapis/googleapis/blob/master/google/rpc/code.proto
// but feel free to use any text as long as it's easy to filter.
ResultTagKey = tag.MustNewKey("result")
)
var (
// BlameNone indicate no API failure
BlameNone = tag.Upsert(blameTagKey, "NONE")
// BlameClient indicate the client is at fault (e.g. invalid request)
BlameClient = tag.Upsert(blameTagKey, "CLIENT")
// BlameServer indicate the server is at fault
BlameServer = tag.Upsert(blameTagKey, "SERVER")
// BlameExternal indicate some third party is at fault
BlameExternal = tag.Upsert(blameTagKey, "EXTERNAL")
// BlameUnknown can be used for everything else
BlameUnknown = tag.Upsert(blameTagKey, "UNKNOWN")
)
// ResultOK add a tag indicating the API call is a success.
func ResultOK() tag.Mutator {
return tag.Upsert(ResultTagKey, "OK")
}
// ResultNotOK add a tag indicating the API call is a failure.
func ResultNotOK() tag.Mutator {
return ResultError("NOT_OK")
}
// ResultError add a tag with the given string as the result.
func ResultError(result string) tag.Mutator {
return tag.Upsert(ResultTagKey, result)
}
// CommonTagKeys returns the slice of common tag keys that should used in all
// views.
func CommonTagKeys() []tag.Key {
return []tag.Key{
BuildIDTagKey,
BuildTagTagKey,
RealmTagKey,
}
}
// APITagKeys return a slice of tag.Key with common tag keys + additional API
// specific tag keys.
func APITagKeys() []tag.Key {
return append(CommonTagKeys(), blameTagKey, ResultTagKey)
}
// WithRealmID creates a new context with the realm id attached to the
// observability context.
func WithRealmID(octx context.Context, realmID uint) context.Context {
realmIDStr := strconv.FormatUint(uint64(realmID), 10)
ctx, err := tag.New(octx, tag.Upsert(RealmTagKey, realmIDStr))
if err != nil {
logger := logging.FromContext(octx).Named("observability.WithRealmID")
logger.Errorw("failed to upsert realm on observability context",
"error", err,
"realm_id", realmIDStr)
return octx
}
return ctx
}
// WithBuildInfo creates a new context with the build and revision info attached
// to the observability context.
func WithBuildInfo(octx context.Context) context.Context {
tags := make([]tag.Mutator, 0, 5)
tags = append(tags, tag.Upsert(BuildIDTagKey, buildinfo.BuildID))
tags = append(tags, tag.Upsert(BuildTagTagKey, buildinfo.BuildTag))
if knativeService != "" {
tags = append(tags, tag.Upsert(KnativeServiceTagKey, knativeService))
}
if knativeRevision != "" {
tags = append(tags, tag.Upsert(KnativeRevisionTagKey, knativeRevision))
}
if knativeConfiguration != "" {
tags = append(tags, tag.Upsert(KnativeConfigurationTagKey, knativeConfiguration))
}
ctx, err := tag.New(octx, tags...)
if err != nil {
logger := logging.FromContext(octx).Named("observability.WithBuildInfo")
logger.Errorw("failed to upsert buildinfo on observability context", "error", err)
return octx
}
return ctx
}
// RecordLatency calculate and record the latency.
// Usage example:
// func foo() {
// defer RecordLatency(&ctx, time.Now(), metric, tag1, tag2)
// // remaining of the function body.
// }
func RecordLatency(ctx *context.Context, start time.Time, m *stats.Float64Measure, mutators ...*tag.Mutator) {
var additionalMutators []tag.Mutator
for _, t := range mutators {
additionalMutators = append(additionalMutators, *t)
}
// Calculate the millisecond number as float64. time.Duration.Millisecond()
// returns an integer.
latency := float64(time.Since(start)) / float64(time.Millisecond)
stats.RecordWithTags(*ctx, additionalMutators, m.M(latency))
}
| [
"\"K_SERVICE\"",
"\"K_REVISION\"",
"\"K_CONFIGURATION\""
] | [] | [
"K_REVISION",
"K_CONFIGURATION",
"K_SERVICE"
] | [] | ["K_REVISION", "K_CONFIGURATION", "K_SERVICE"] | go | 3 | 0 | |
airflow/providers/sendgrid/utils/emailer.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Airflow module for emailer using sendgrid
"""
import base64
import logging
import mimetypes
import os
from typing import Dict, Iterable, Optional, Union
import sendgrid
from sendgrid.helpers.mail import (
Attachment,
Category,
Content,
CustomArg,
Email,
Mail,
MailSettings,
Personalization,
SandBoxMode,
)
from airflow.utils.email import get_email_address_list
log = logging.getLogger(__name__)
AddressesType = Union[str, Iterable[str]]
def send_email(
to: AddressesType,
subject: str,
html_content: str,
files: Optional[AddressesType] = None,
cc: Optional[AddressesType] = None,
bcc: Optional[AddressesType] = None,
sandbox_mode: bool = False,
**kwargs,
) -> None:
"""
Send an email with html content using `Sendgrid <https://sendgrid.com/>`__.
.. note::
For more information, see :ref:`email-configuration-sendgrid`
"""
if files is None:
files = []
mail = Mail()
from_email = kwargs.get('from_email') or os.environ.get('SENDGRID_MAIL_FROM')
from_name = kwargs.get('from_name') or os.environ.get('SENDGRID_MAIL_SENDER')
mail.from_email = Email(from_email, from_name)
mail.subject = subject
mail.mail_settings = MailSettings()
if sandbox_mode:
mail.mail_settings.sandbox_mode = SandBoxMode(enable=True)
# Add the recipient list of to emails.
personalization = Personalization()
to = get_email_address_list(to)
for to_address in to:
personalization.add_to(Email(to_address))
if cc:
cc = get_email_address_list(cc)
for cc_address in cc:
personalization.add_cc(Email(cc_address))
if bcc:
bcc = get_email_address_list(bcc)
for bcc_address in bcc:
personalization.add_bcc(Email(bcc_address))
# Add custom_args to personalization if present
pers_custom_args = kwargs.get('personalization_custom_args', None)
if isinstance(pers_custom_args, dict):
for key in pers_custom_args.keys():
personalization.add_custom_arg(CustomArg(key, pers_custom_args[key]))
mail.add_personalization(personalization)
mail.add_content(Content('text/html', html_content))
categories = kwargs.get('categories', [])
for cat in categories:
mail.add_category(Category(cat))
# Add email attachment.
for fname in files:
basename = os.path.basename(fname)
with open(fname, "rb") as file:
content = base64.b64encode(file.read()).decode('utf-8')
attachment = Attachment(
file_content=content,
file_type=mimetypes.guess_type(basename)[0],
file_name=basename,
disposition="attachment",
content_id=f"<{basename}>",
)
mail.add_attachment(attachment)
_post_sendgrid_mail(mail.get())
def _post_sendgrid_mail(mail_data: Dict) -> None:
sendgrid_client = sendgrid.SendGridAPIClient(api_key=os.environ.get('SENDGRID_API_KEY'))
response = sendgrid_client.client.mail.send.post(request_body=mail_data)
# 2xx status code.
if 200 <= response.status_code < 300:
log.info(
'Email with subject %s is successfully sent to recipients: %s',
mail_data['subject'],
mail_data['personalizations'],
)
else:
log.error(
'Failed to send out email with subject %s, status code: %s',
mail_data['subject'],
response.status_code,
)
| [] | [] | [
"SENDGRID_MAIL_FROM",
"SENDGRID_API_KEY",
"SENDGRID_MAIL_SENDER"
] | [] | ["SENDGRID_MAIL_FROM", "SENDGRID_API_KEY", "SENDGRID_MAIL_SENDER"] | python | 3 | 0 | |
cmd/sauron/main.go | package main
import (
"log"
"os"
"github.com/Bowbaq/belt"
"github.com/Bowbaq/sauron"
"github.com/Bowbaq/sauron/flagx"
"github.com/Bowbaq/sauron/model"
)
var (
// Version of the CLI, filled in at compile time
Version string
opts struct {
sauron.Options
model.WatchOptions `group:"github" namespace:"github"`
}
)
func init() {
flagx.MustParse(&opts)
if os.Getenv("DEBUG") != "" {
belt.Verbose = true
}
}
func main() {
s := sauron.New(opts.Options)
if err := s.Watch(opts.WatchOptions); err != nil {
log.Fatalf("sauron-cli: Error retrieving latest update: %v", err)
}
}
| [
"\"DEBUG\""
] | [] | [
"DEBUG"
] | [] | ["DEBUG"] | go | 1 | 0 | |
control-plane/pkg/reconciler/broker/controller.go | /*
* Copyright 2020 The Knative Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package broker
import (
"context"
"fmt"
"github.com/Shopify/sarama"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/cache"
eventing "knative.dev/eventing/pkg/apis/eventing/v1beta1"
kubeclient "knative.dev/pkg/client/injection/kube/client"
"knative.dev/pkg/configmap"
"knative.dev/pkg/controller"
"knative.dev/pkg/resolver"
brokerinformer "knative.dev/eventing/pkg/client/injection/informers/eventing/v1beta1/broker"
brokerreconciler "knative.dev/eventing/pkg/client/injection/reconciler/eventing/v1beta1/broker"
podinformer "knative.dev/pkg/client/injection/kube/informers/core/v1/pod"
"knative.dev/eventing-kafka-broker/control-plane/pkg/reconciler/base"
"knative.dev/eventing-kafka-broker/control-plane/pkg/reconciler/kafka"
)
const (
DefaultTopicNumPartitionConfigMapKey = "default.topic.partitions"
DefaultTopicReplicationFactorConfigMapKey = "default.topic.replication.factor"
BootstrapServersConfigMapKey = "bootstrap.servers"
DefaultNumPartitions = 10
DefaultReplicationFactor = 1
)
var NewClusterAdmin = sarama.NewClusterAdmin
func NewController(ctx context.Context, watcher configmap.Watcher, configs *Configs) *controller.Impl {
eventing.RegisterAlternateBrokerConditionSet(ConditionSet)
reconciler := &Reconciler{
Reconciler: &base.Reconciler{
KubeClient: kubeclient.Get(ctx),
PodLister: podinformer.Get(ctx).Lister(),
DataPlaneConfigMapNamespace: configs.DataPlaneConfigMapNamespace,
DataPlaneConfigMapName: configs.DataPlaneConfigMapName,
DataPlaneConfigFormat: configs.DataPlaneConfigFormat,
SystemNamespace: configs.SystemNamespace,
},
KafkaDefaultTopicDetails: sarama.TopicDetail{
NumPartitions: DefaultNumPartitions,
ReplicationFactor: DefaultReplicationFactor,
},
Configs: configs,
}
if configs.BootstrapServers != "" {
_ = reconciler.SetBootstrapServers(configs.BootstrapServers)
}
impl := brokerreconciler.NewImpl(ctx, reconciler, kafka.BrokerClass)
reconciler.Resolver = resolver.NewURIResolver(ctx, impl.EnqueueKey)
brokerInformer := brokerinformer.Get(ctx)
brokerInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{
FilterFunc: kafka.BrokerClassFilter(),
Handler: controller.HandleAll(impl.Enqueue),
})
cm, err := reconciler.KubeClient.CoreV1().ConfigMaps(configs.SystemNamespace).Get(configs.GeneralConfigMapName, metav1.GetOptions{})
if err != nil {
panic(fmt.Errorf("failed to get config map %s/%s: %w", configs.SystemNamespace, configs.GeneralConfigMapName, err))
}
reconciler.ConfigMapUpdated(ctx)(cm)
watcher.Watch(configs.GeneralConfigMapName, reconciler.ConfigMapUpdated(ctx))
return impl
}
| [] | [] | [] | [] | [] | go | null | null | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.