filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
docs/releasenotes/conf.py
|
# -*- coding: utf-8 -*-
#
# Release Notes build configuration file, created by
# sphinx-quickstart on Thu Feb 12 02:10:34 2009.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed
# automatically).
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If your extensions are in another directory, add it here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
import os
import sys
from datetime import datetime
sys.path.append(os.path.abspath('_ext'))
# Set this up to parse Django-driven code.
sys.path.insert(0, os.path.abspath(os.path.join(__file__, '..', '..', '..')))
sys.path.insert(0, os.path.abspath(os.path.join(__file__, '..', '..', '..',
'..', 'djblets')))
sys.path.insert(0, os.path.dirname(__file__))
import reviewboard
from reviewboard.dependencies import django_doc_major_version
# General configuration
# ---------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.intersphinx',
'beanbag_docutils.sphinx.ext.django_utils',
'beanbag_docutils.sphinx.ext.extlinks',
'beanbag_docutils.sphinx.ext.http_role',
'beanbag_docutils.sphinx.ext.intersphinx_utils',
'beanbag_docutils.sphinx.ext.retina_images',
'extralinks',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Release Notes'
copyright = '2009-%s, Beanbag, Inc.' % datetime.now().year
bugtracker_url = 'https://www.reviewboard.org/bugs/%s'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '.'.join([str(i) for i in reviewboard.__version_info__[:2]])
# The full version, including alpha/beta/rc tags.
release = reviewboard.get_version_string()
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
suppress_warnings = ['ref.option']
# Options for HTML output
# -----------------------
html_theme = 'classic'
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
html_style = 'classic.css'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "Release Notes"
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, the reST sources are included in the HTML build as _sources/<name>.
html_copy_source = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'ReleaseNotes'
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class
# [howto/manual]).
latex_documents = [
('index', 'ReleaseNotes.tex', 'Release Notes', 'Beanbag, Inc.', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
# Check whether reviewboard.org intersphinx lookups should use the local
# server.
if os.getenv('DOCS_USE_LOCAL_RBWEBSITE') == '1':
rbwebsite_url = 'http://localhost:8081'
else:
rbwebsite_url = 'https://www.reviewboard.org'
# Add references for intersphinx and custom roles.
django_doc_base_url_fmt = 'http://django.readthedocs.io/en/%s.x/'
django_doc_base_url = django_doc_base_url_fmt % django_doc_major_version
github_djblets_src_base_url = 'https://github.com/djblets/djblets/blob/'
github_rb_src_base_url = 'https://github.com/reviewboard/reviewboard/blob/'
intersphinx_mapping = {
'django': (django_doc_base_url, None),
'django1.6': (django_doc_base_url_fmt % '1.6', None),
'django1.11': (django_doc_base_url_fmt % '1.11', None),
'djblets0.9': ('%s/docs/djblets/0.9/' % rbwebsite_url, None),
'djblets0.10': ('%s/docs/djblets/1.0/' % rbwebsite_url, None),
'djblets1.0': ('%s/docs/djblets/1.0/' % rbwebsite_url, None),
'djblets2.0': ('%s/docs/djblets/2.0/' % rbwebsite_url, None),
'djblets2.x': ('%s/docs/djblets/2.x/' % rbwebsite_url, None),
'python': ('https://docs.python.org/2.7', None),
'python2': ('https://docs.python.org/2.7', None),
'python3': ('https://docs.python.org/3', None),
'rbt0.6': ('%s/docs/rbtools/0.6/' % rbwebsite_url, None),
'rbt0.7': ('%s/docs/rbtools/0.7/' % rbwebsite_url, None),
'rbt-latest': ('%s/docs/rbtools/latest/' % rbwebsite_url, None),
'rb1.7': ('%s/docs/manual/1.7/' % rbwebsite_url, None),
'rb2.0': ('%s/docs/manual/2.0/' % rbwebsite_url, None),
'rb2.5': ('%s/docs/manual/2.5/' % rbwebsite_url, None),
'rb3.0': ('%s/docs/manual/3.0/' % rbwebsite_url, None),
'rb4.0': ('%s/docs/manual/4.0/' % rbwebsite_url, None),
'rb-dev': ('%s/docs/manual/dev/' % rbwebsite_url, None),
}
extlinks = {
'djangodoc': ('%s%%s.html' % django_doc_base_url, None),
'backbonejs': ('http://backbonejs.org/#%s', 'Backbone.'),
'pypi': ('https://pypi.org/project/%s/', ''),
'rbintegration': ('%s/integrations/%%s' % rbwebsite_url, ''),
'rbintegrations-relnotes': (
'%s/docs/releasenotes/rbintegrations/%%s/' % rbwebsite_url,
'rbintegrations '),
'rbsrc-4.0.x': ('%srelease-4.0.x/%%s' % github_rb_src_base_url, ''),
'djbletssrc-2.0.x': ('%srelease-2.0.x/%%s' % github_djblets_src_base_url,
''),
}
|
[] |
[] |
[
"DOCS_USE_LOCAL_RBWEBSITE"
] |
[]
|
["DOCS_USE_LOCAL_RBWEBSITE"]
|
python
| 1 | 0 | |
setup.py
|
#! /usr/bin/env python3
import os
import re
import sys
import sysconfig
import platform
import subprocess
from distutils.version import LooseVersion
from setuptools import setup, Extension, find_packages
from setuptools.command.build_ext import build_ext
from setuptools.command.test import test as TestCommand
from shutil import copyfile, copymode
class CMakeExtension(Extension):
def __init__(self, name, sourcedir=''):
Extension.__init__(self, name, sources=[])
self.sourcedir = os.path.abspath(sourcedir)
class CMakeBuild(build_ext):
def run(self):
try:
out = subprocess.check_output(['cmake', '--version'])
except OSError:
raise RuntimeError(
"CMake must be installed to build the following extensions: " +
", ".join(e.name for e in self.extensions))
if platform.system() == "Windows":
cmake_version = LooseVersion(re.search(r'version\s*([\d.]+)',
out.decode()).group(1))
if cmake_version < '3.1.0':
raise RuntimeError("CMake >= 3.1.0 is required on Windows")
for ext in self.extensions:
self.build_extension(ext)
def build_extension(self, ext):
extdir = os.path.abspath(
os.path.dirname(self.get_ext_fullpath(ext.name)))
cmake_args = ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=' + extdir,
'-DPYTHON_EXECUTABLE=' + sys.executable]
cfg = 'Debug' if self.debug else 'Release'
build_args = ['--config', cfg]
if platform.system() == "Windows":
cmake_args += ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}'.format(
cfg.upper(),
extdir)]
if sys.maxsize > 2**32:
cmake_args += ['-A', 'x64']
build_args += ['--', '/m']
else:
cmake_args += ['-DCMAKE_BUILD_TYPE=' + cfg]
build_args += ['--', '-j2']
env = os.environ.copy()
env['CXXFLAGS'] = '{} -DVERSION_INFO=\\"{}\\"'.format(
env.get('CXXFLAGS', ''),
self.distribution.get_version())
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
subprocess.check_call(['cmake', ext.sourcedir] + cmake_args,
cwd=self.build_temp, env=env)
subprocess.check_call(['cmake', '--build', '.'] + build_args,
cwd=self.build_temp)
# Copy *_test file to tests directory
#test_bin = os.path.join(self.build_temp, 'python_cpp_example_test')
#self.copy_test_file(test_bin)
print() # Add an empty line for cleaner output
def copy_test_file(self, src_file):
'''
Copy ``src_file`` to ``dest_file`` ensuring parent directory exists.
By default, message like `creating directory /path/to/package` and
`copying directory /src/path/to/package -> path/to/package` are displayed on standard output. Adapted from scikit-build.
'''
# Create directory if needed
dest_dir = os.path.join(os.path.dirname(
os.path.abspath(__file__)), 'tests', 'bin')
if dest_dir != "" and not os.path.exists(dest_dir):
print("creating directory {}".format(dest_dir))
os.makedirs(dest_dir)
# Copy file
dest_file = os.path.join(dest_dir, os.path.basename(src_file))
print("copying {} -> {}".format(src_file, dest_file))
copyfile(src_file, dest_file)
copymode(src_file, dest_file)
requirements = [ 'cmake>=2.8.12', ]
setup(
name='pyransac',
version='0.1',
author='Ondra Chum, Dmytro Mishkin',
author_email='[email protected]',
description='RANSAC with bells and whistles for H and F estimation',
long_description='',
packages=find_packages('src'),
package_dir={'':'src'},
ext_modules=[CMakeExtension('pyransac/pyransac')],
cmdclass=dict(build_ext=CMakeBuild),
#test_suite='tests',
zip_safe=False,
install_requires=requirements,
)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
project/settings.py
|
# -*- coding:utf-8 -*-
import os
import environ
env = environ.Env(
DEBUG=(bool, False),
CACHE_URL=(str, 'locmemcache://'),
EMAIL_URL=(str, 'consolemail://')
)
environ.Env.read_env()
DEBUG = env('DEBUG')
SECRET_KEY = env('SECRET_KEY')
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
ADMINS = (
('Demo Classified Admin', os.environ.get('ADMIN_EMAIL', '[email protected]')),
)
MANAGERS = ADMINS
# Expected comma separated string with the ALLOWED_HOSTS list
ALLOWED_HOSTS = os.environ.get('ALLOWED_HOSTS', '127.0.0.1,.herokuapp.com').split(',')
DATABASES = {
'default': env.db(),
}
CACHES = {
'default': env.cache()
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'UTC'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: '/home/media/media.lawrence.com/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: 'http://media.lawrence.com/media/', 'http://example.com/media/'
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' 'static/' subdirectories and in STATICFILES_DIRS.
# Example: '/home/media/media.lawrence.com/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
# URL prefix for static files.
# Example: 'http://media.lawrence.com/static/'
STATIC_URL = '/static/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: 'http://foo.com/static/admin/', '/static/admin/'.
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like '/home/html/static' or 'C:/www/django/static'.
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage'
MIDDLEWARE = (
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
AUTHENTICATION_BACKENDS = (
'social_core.backends.facebook.FacebookOAuth2',
'django.contrib.auth.backends.ModelBackend',
)
ROOT_URLCONF = 'project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
# Social Auth context processors
'social_django.context_processors.backends',
'social_django.context_processors.login_redirect',
# Django Classified context processors
'django_classified.context_processors.common_values'
],
'debug': True
},
},
]
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.humanize',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.sitemaps',
'django.contrib.sites',
'django.contrib.staticfiles',
'bootstrapform',
'sorl.thumbnail',
'django_classified',
'social_django',
'storages',
'demo',
]
LOGIN_REDIRECT_URL = '/'
LOGIN_URL = '/login/'
LOGOUT_REDIRECT_URL = '/'
DCF_SITE_NAME = 'Django Classified Demo'
# You need to obtain Facebook Keys
# Check docs for more info here:
# https://python-social-auth.readthedocs.io/en/latest/backends/facebook.html
SOCIAL_AUTH_FACEBOOK_KEY = os.environ.get('SOCIAL_AUTH_FACEBOOK_KEY')
SOCIAL_AUTH_FACEBOOK_SECRET = os.environ.get('SOCIAL_AUTH_FACEBOOK_SECRET')
SOCIAL_AUTH_FACEBOOK_SCOPE = ['email']
SOCIAL_AUTH_FACEBOOK_PROFILE_EXTRA_PARAMS = {
'fields': 'id, name, email'
}
SOCIAL_AUTH_EMAIL_FORM_HTML = 'demo/email_signup.html'
SOCIAL_AUTH_EMAIL_VALIDATION_URL = '/email-sent/'
SOCIAL_AUTH_PIPELINE = (
'social_core.pipeline.social_auth.social_details',
'social_core.pipeline.social_auth.social_uid',
'social_core.pipeline.social_auth.auth_allowed',
'social_core.pipeline.social_auth.social_user',
'social_core.pipeline.user.get_username',
'social_core.pipeline.mail.mail_validation',
'social_core.pipeline.user.create_user',
'social_core.pipeline.social_auth.associate_user',
'social_core.pipeline.debug.debug',
'social_core.pipeline.social_auth.load_extra_data',
'social_core.pipeline.user.user_details',
'social_core.pipeline.debug.debug'
)
DEFAULT_FROM_EMAIL = os.environ.get('DEFAULT_FROM_EMAIL', '[email protected]')
# Configure email Backend via EMAIL_URL
vars().update(env.email_url())
DCF_CURRENCY = 'GBP'
DCF_DISPLAY_EMPTY_GROUPS = True
GOOGLE_ANALYTICS_PROPERTY_ID = os.environ.get('GOOGLE_ANALYTICS_PROPERTY_ID')
AWS_S3_ENDPOINT_URL = os.environ.get('AWS_S3_ENDPOINT_URL')
AWS_ACCESS_KEY_ID = os.environ.get('AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = os.environ.get('AWS_SECRET_ACCESS_KEY')
AWS_STORAGE_BUCKET_NAME = os.environ.get('AWS_STORAGE_BUCKET_NAME')
AWS_QUERYSTRING_AUTH = False
AWS_DEFAULT_ACL = 'public-read'
|
[] |
[] |
[
"ALLOWED_HOSTS",
"SOCIAL_AUTH_FACEBOOK_SECRET",
"AWS_SECRET_ACCESS_KEY",
"GOOGLE_ANALYTICS_PROPERTY_ID",
"AWS_S3_ENDPOINT_URL",
"ADMIN_EMAIL",
"AWS_STORAGE_BUCKET_NAME",
"DEFAULT_FROM_EMAIL",
"AWS_ACCESS_KEY_ID",
"SOCIAL_AUTH_FACEBOOK_KEY"
] |
[]
|
["ALLOWED_HOSTS", "SOCIAL_AUTH_FACEBOOK_SECRET", "AWS_SECRET_ACCESS_KEY", "GOOGLE_ANALYTICS_PROPERTY_ID", "AWS_S3_ENDPOINT_URL", "ADMIN_EMAIL", "AWS_STORAGE_BUCKET_NAME", "DEFAULT_FROM_EMAIL", "AWS_ACCESS_KEY_ID", "SOCIAL_AUTH_FACEBOOK_KEY"]
|
python
| 10 | 0 | |
main.go
|
package main
import (
"context"
"flag"
"fmt"
"net/http"
"os"
"os/signal"
"path/filepath"
goruntime "runtime"
"syscall"
"time"
"github.com/ghouscht/metrics-server-exporter/internal/exporter"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
// load all auth plugins
_ "k8s.io/client-go/plugin/pkg/client/auth"
)
//nolint:gochecknoglobals
var (
// set by goreleaser on build
version, date, commit string = "master", "?", "?"
// name of the binary
binaryName = filepath.Base(os.Args[0])
)
//nolint:gochecknoglobals
var (
inCluster = flag.Bool("in-cluster", true, "Run with kubernetes in-cluster config")
debug = flag.Bool("debug", false, "Enable debug loging")
listen = flag.String("listen", ":8080", "Address where the server should listen for requests")
ver = flag.Bool("version", false, "Print version and exit")
nodeScrapeInterval = flag.Duration("node-scrape-interval", time.Hour*1, "Interval how often to scrape node capacity/allocatable resources")
metricsScrapeInterval = flag.Duration("metrics-scrape-interval", time.Second*30, "Interval how often to scrape metrics-server metrics (node and pod metrics)")
)
func main() {
var (
// default to info loglevel
logLevel zapcore.Level = zap.InfoLevel
)
flag.Parse()
if *debug {
logLevel = zap.DebugLevel
}
if *ver {
printVersion()
return
}
// zap setup
atom := zap.NewAtomicLevelAt(logLevel)
config := zap.NewProductionConfig()
config.DisableStacktrace = true
config.Sampling = nil
config.Encoding = "console"
config.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder
config.EncoderConfig.EncodeDuration = zapcore.StringDurationEncoder
config.EncoderConfig.EncodeLevel = zapcore.CapitalLevelEncoder
config.Level = atom
zl, err := config.Build()
if err != nil {
panic(fmt.Sprintf("zap logger creation: %s", err))
}
l := zl.Sugar()
l.Infow("starting server", "version", version, "listen", *listen, "loglevel", atom.Level())
runServer(l, k8sRestConfig(l))
}
func k8sRestConfig(l *zap.SugaredLogger) *rest.Config {
var cfg *rest.Config
if *inCluster {
var err error
cfg, err = rest.InClusterConfig()
if err != nil {
l.Fatalf("in-cluster config creation: %s", err)
}
} else {
var err error
cfg, err = clientcmd.BuildConfigFromFlags("", filepath.Join(os.Getenv("HOME"), ".kube", "config"))
if err != nil {
l.Fatalf("config creation: %s", err)
}
}
cfg.UserAgent = fmt.Sprintf("%s/%s (%s/%s) ", binaryName, version, goruntime.GOOS, goruntime.GOARCH)
return cfg
}
func runServer(l *zap.SugaredLogger, cfg *rest.Config) {
stop := make(chan os.Signal, 1)
signal.Notify(stop,
syscall.SIGINT,
syscall.SIGTERM,
)
ctx, cancel := context.WithCancel(context.Background())
handler, err := exporter.New(ctx, cfg,
exporter.WithNodeScrapeInterval(*nodeScrapeInterval),
exporter.WithMetricsScrapeInterval(*metricsScrapeInterval),
exporter.WithLogger(l),
)
if err != nil {
l.Fatalf("creating exporter: %s", err)
}
server := http.Server{
Addr: *listen,
Handler: handler,
}
// as soon as a signal is received cancel the context to allow a graceful stop of all other
// components
go func() {
sig := <-stop
cancel()
l.Infof("stopping execution, received signal %q", sig)
ctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
defer cancel()
if err := server.Shutdown(ctx); err != nil {
l.Fatalf("shutdown server: %s", err)
}
l.Debug("gracefully stopped server")
}()
if err := server.ListenAndServe(); err != nil {
if err != http.ErrServerClosed {
l.Fatalf("server: %s", err)
}
}
}
func printVersion() {
fmt.Printf("%s, version %s (revision: %s)\n\tbuild date: %s\n\tgo version: %s\n",
binaryName,
version,
commit,
date,
goruntime.Version(),
)
}
|
[
"\"HOME\""
] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
go
| 1 | 0 | |
py/toolkit/print_repo_status.py
|
#!/usr/bin/env python3
#
# Copyright 2014 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Print the status of factory-related repositories.
This is used to generate the file containing the status of each repositories
in the factory toolkit.
"""
import argparse
import os
from cros.factory.utils.cros_board_utils import BuildBoard
from cros.factory.utils.process_utils import CheckOutput
NUM_COMMITS_PER_REPO = 50
SRC = os.path.join(os.environ['CROS_WORKON_SRCROOT'], 'src')
MERGED_MSG = ['Reviewed-on', 'Marking set of ebuilds as stable']
def GitLog(repo, skip=0, max_count=NUM_COMMITS_PER_REPO, extra_args=None):
cmd = ['git', 'log', '--max-count', '%d' % max_count, '--skip', '%d' % skip]
if extra_args:
cmd.extend(extra_args)
return CheckOutput(cmd, cwd=repo).strip()
def GetCommitList(repo, skip=0, max_count=NUM_COMMITS_PER_REPO):
if not max_count:
return []
return GitLog(repo, skip=skip, max_count=max_count,
extra_args=['--oneline']).split('\n')
def GetUncommittedFiles(repo):
files = CheckOutput(['git', 'status', '--porcelain'], cwd=repo)
if not files:
return []
return files.strip().split('\n')
def GetUnmergedCommits(repo):
for idx in range(NUM_COMMITS_PER_REPO):
commit_log = GitLog(repo, skip=idx, max_count=1)
for msg in MERGED_MSG:
if msg in commit_log:
return GetCommitList(repo, skip=0, max_count=idx)
return GetCommitList(repo, skip=0, max_count=NUM_COMMITS_PER_REPO)
def main():
parser = argparse.ArgumentParser(
description='Prints the status of factory-related repositories.')
parser.add_argument('--board', '-b', required=True,
help='The board to check overlay repositories for.')
args = parser.parse_args()
repos = ['platform/factory', BuildBoard(args.board).factory_board_files]
for repo_path in repos:
if not repo_path:
raise ValueError(
'No overlay available for %s! Please check if the board is correct '
'and you have done `setup_board --board %s`.'
% (args.board, args.board))
print('Repository %s' % repo_path)
repo_full_path = os.path.join(SRC, repo_path)
if not os.path.exists(repo_full_path):
print(' >>> Repository does not exist')
continue
uncommitted = GetUncommittedFiles(repo_full_path)
if uncommitted:
print(' >>> Repository contains uncommitted changes:')
for changed_file in uncommitted:
print('\t%s' % changed_file)
unmerged = GetUnmergedCommits(repo_full_path)
if unmerged:
print(' >>> Repository contains %d unmerged commits:' % len(unmerged))
for commit in unmerged:
print('\t%s' % commit)
commit_list = GetCommitList(repo_full_path)
print(' >>> Last %d commits in the repository:' % len(commit_list))
for commit in commit_list:
print('\t%s' % commit)
print('\n')
if __name__ == '__main__':
main()
|
[] |
[] |
[
"CROS_WORKON_SRCROOT"
] |
[]
|
["CROS_WORKON_SRCROOT"]
|
python
| 1 | 0 | |
pybay/prod_settings.py
|
import os
from pybay.settings import *
DEBUG = False
TEMPLATE_DEBUG = DEBUG
EMAIL_DEBUG = DEBUG
DATABASES = {
"default": {
"ENGINE": "django.db.backends.mysql",
'OPTIONS': {
'read_default_file': '/home/pybay/pybay_prod.cnf',
'charset': 'utf8mb4',
},
}
}
ALLOWED_HOSTS = ['pybay.com']
TIME_ZONE = "US/Pacific"
MEDIA_ROOT = '/data/websites/prod_site_media/media'
MEDIA_URL = "/site_media/media/"
STATIC_ROOT = '/data/websites/prod_site_media/static'
STATIC_URL = "/site_media/static/"
SECRET_KEY = os.environ["DJANGO_SECRET_KEY"]
# Add a filehandler for logging
LOGGING['handlers']['filelog'] = {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filename': '/data/websites/logs/prod.log',
'backupCount': 5,
'maxBytes': 1024 * 1024 * 3,
'formatter': 'verbose',
}
# Hook filehandler to django.request so we see 500 server errors in the file
LOGGING['loggers']['django.request']['handlers'].append('filelog')
# And also turn on for log.debug and up calls in pybay.* code
LOGGING['loggers']['pybay'] = {
'handlers': ['filelog'],
'level': 'DEBUG',
'propagate': True,
}
|
[] |
[] |
[
"DJANGO_SECRET_KEY"
] |
[]
|
["DJANGO_SECRET_KEY"]
|
python
| 1 | 0 | |
config.py
|
import os
# Bot token
BOT_TOKEN = os.getenv('BOT_TOKEN')
# Web application setup
WEBAPP_HOST = '0.0.0.0'
WEBAPP_PORT = int(os.getenv('PORT'))
# Webhook setup
WEBHOOK_HOST = 'https://neural-painter-bot.herokuapp.com'
WEBHOOK_PATH = f'/webhook/{BOT_TOKEN}'
WEBHOOK_URL = f'{WEBHOOK_HOST}{WEBHOOK_PATH}'
|
[] |
[] |
[
"PORT",
"BOT_TOKEN"
] |
[]
|
["PORT", "BOT_TOKEN"]
|
python
| 2 | 0 | |
oh_app_demo/settings.py
|
"""
Django settings for oh_app_demo project.
"""
import os
import django_heroku
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Customization for Heroku-specific settings
ON_HEROKU = os.getenv('ON_HEROKU', 'false').lower() == 'true'
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.getenv('SECRET_KEY', 'whopsthereshouldbeone')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True if os.getenv('DEBUG', '').lower() == 'true' else False
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'openhumans',
'main.apps.Main',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'oh_app_demo.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'oh_app_demo.wsgi.application'
# Database
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
STATIC_URL = '/static/'
# Logging
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'console': {
'class': 'logging.StreamHandler',
},
},
'loggers': {
'django': {
'handlers': ['console'],
'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),
},
'main': {
'handlers': ['console'],
'level': 'DEBUG' if DEBUG else 'INFO',
},
},
}
##################################################
# Custom app settings
#
# Configure these with .env/environment variables
# After log in, send users to the overview page.
LOGIN_REDIRECT_URL = 'overview'
# Project's page on Open Humans
OH_PROJ_PAGE = os.getenv(
'OH_PROJ_PAGE', '')
# Open Humans settings
OPENHUMANS_APP_BASE_URL = os.getenv(
'OPENHUMANS_APP_BASE_URL', 'http://localhost:5000')
OPENHUMANS_CLIENT_ID = os.getenv('OPENHUMANS_CLIENT_ID', 'your_client_id')
OPENHUMANS_CLIENT_SECRET = os.getenv(
'OPENHUMANS_CLIENT_SECRET', 'your_client_secret')
# Admin account password for configuration.
ADMIN_PASSWORD = os.getenv('ADMIN_PASSWORD', '')
if ON_HEROKU:
SECURE_SSL_REDIRECT = True
django_heroku.settings(locals())
|
[] |
[] |
[
"OPENHUMANS_CLIENT_ID",
"OPENHUMANS_CLIENT_SECRET",
"OPENHUMANS_APP_BASE_URL",
"OH_PROJ_PAGE",
"DJANGO_LOG_LEVEL",
"SECRET_KEY",
"DEBUG",
"ON_HEROKU",
"ADMIN_PASSWORD"
] |
[]
|
["OPENHUMANS_CLIENT_ID", "OPENHUMANS_CLIENT_SECRET", "OPENHUMANS_APP_BASE_URL", "OH_PROJ_PAGE", "DJANGO_LOG_LEVEL", "SECRET_KEY", "DEBUG", "ON_HEROKU", "ADMIN_PASSWORD"]
|
python
| 9 | 0 | |
datastore/nox.py
|
# Copyright 2016 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import os
import nox
LOCAL_DEPS = (
os.path.join('..', 'api_core'),
os.path.join('..', 'core'),
)
@nox.session
def default(session):
"""Default unit test session.
This is intended to be run **without** an interpreter set, so
that the current ``python`` (on the ``PATH``) or the version of
Python corresponding to the ``nox`` binary the ``PATH`` can
run the tests.
"""
# Install all test dependencies, then install local packages in-place.
session.install('mock', 'pytest', 'pytest-cov')
for local_dep in LOCAL_DEPS:
session.install('-e', local_dep)
session.install('-e', '.')
# Run py.test against the unit tests.
session.run(
'py.test',
'--quiet',
'--cov=google.cloud.datastore',
'--cov=tests.unit',
'--cov-append',
'--cov-config=.coveragerc',
'--cov-report=',
'--cov-fail-under=97',
os.path.join('tests', 'unit'),
*session.posargs
)
@nox.session
@nox.parametrize('py', ['2.7', '3.5', '3.6', '3.7'])
def unit(session, py):
"""Run the unit test suite."""
# Run unit tests against all supported versions of Python.
session.interpreter = 'python{}'.format(py)
# Set the virtualenv dirname.
session.virtualenv_dirname = 'unit-' + py
default(session)
@nox.session
@nox.parametrize('py', ['2.7', '3.6'])
def system(session, py):
"""Run the system test suite."""
# Sanity check: Only run system tests if the environment variable is set.
if not os.environ.get('GOOGLE_APPLICATION_CREDENTIALS', ''):
session.skip('Credentials must be set via environment variable.')
# Run the system tests against latest Python 2 and Python 3 only.
session.interpreter = 'python{}'.format(py)
# Set the virtualenv dirname.
session.virtualenv_dirname = 'sys-' + py
# Use pre-release gRPC for system tests.
session.install('--pre', 'grpcio')
# Install all test dependencies, then install local packages in-place.
session.install('mock', 'pytest')
for local_dep in LOCAL_DEPS:
session.install('-e', local_dep)
session.install('-e', '../test_utils/')
session.install('-e', '.')
# Run py.test against the system tests.
session.run('py.test', '--quiet', 'tests/system', *session.posargs)
@nox.session
def doctests(session):
"""Run the system test suite."""
# Sanity check: Only run system tests if the environment variable is set.
if not os.environ.get('GOOGLE_APPLICATION_CREDENTIALS', ''):
session.skip('Credentials must be set via environment variable.')
# Doctests run against Python 3.6 only.
# It is difficult to make doctests run against both Python 2 and Python 3
# because they test string output equivalence, which is difficult to
# make match (e.g. unicode literals starting with "u").
session.interpreter = 'python3.6'
# Install all test dependencies, then install this package into the
# virtualenv's dist-packages.
session.install('mock', 'pytest', 'sphinx')
for local_dep in LOCAL_DEPS:
session.install('-e', local_dep)
session.install('-e', '../test_utils/')
session.install('-e', '.')
# Run py.test against the system tests.
session.run('py.test', '--quiet', 'tests/doctests.py')
@nox.session
def lint(session):
"""Run linters.
Returns a failure if the linters find linting errors or sufficiently
serious code quality issues.
"""
session.interpreter = 'python3.6'
session.install('flake8', *LOCAL_DEPS)
session.install('.')
session.run('flake8', 'google', 'tests')
@nox.session
def lint_setup_py(session):
"""Verify that setup.py is valid (including RST check)."""
session.interpreter = 'python3.6'
# Set the virtualenv dirname.
session.virtualenv_dirname = 'setup'
session.install('docutils', 'Pygments')
session.run(
'python', 'setup.py', 'check', '--restructuredtext', '--strict')
@nox.session
def cover(session):
"""Run the final coverage report.
This outputs the coverage report aggregating coverage from the unit
test runs (not system test runs), and then erases coverage data.
"""
session.interpreter = 'python3.6'
session.install('coverage', 'pytest-cov')
session.run('coverage', 'report', '--show-missing', '--fail-under=100')
session.run('coverage', 'erase')
|
[] |
[] |
[
"GOOGLE_APPLICATION_CREDENTIALS"
] |
[]
|
["GOOGLE_APPLICATION_CREDENTIALS"]
|
python
| 1 | 0 | |
main_test.go
|
package main
import (
"os"
"testing"
"github.com/bitrise-io/go-utils/fileutil"
"github.com/bitrise-io/go-utils/pathutil"
"github.com/stretchr/testify/require"
)
var workDir = os.Getenv("BITRISE_SOURCE_DIR")
func Test_runScriptBash(t *testing.T) {
require.NoError(t, pathutil.EnsureDirExist("_tmp"))
t.Log("Successful execution")
{
require.NoError(t, fileutil.WriteStringToFile("_tmp/test.sh", "echo 'This is a Bash script'"))
exitCode, err := runScript("bash", "_tmp/test.sh", workDir)
require.NoError(t, err)
require.Equal(t, 0, exitCode)
}
t.Log("Exit with code 222")
{
require.NoError(t, fileutil.WriteStringToFile("_tmp/test_failing.sh", "exit 222"))
exitCode, err := runScript("bash", "_tmp/test_failing.sh", workDir)
require.Equal(t, 222, exitCode)
require.Error(t, err)
}
}
func Test_runScriptRuby(t *testing.T) {
require.NoError(t, pathutil.EnsureDirExist("_tmp"))
t.Log("Successful Ruby execution")
{
require.NoError(t, fileutil.WriteStringToFile("_tmp/test.rb", "puts 'This is a Ruby script'"))
exitCode, err := runScript("ruby", "_tmp/test.rb", workDir)
require.NoError(t, err)
require.Equal(t, 0, exitCode)
}
t.Log("Check if working_dir is set properly")
{
require.NoError(t, fileutil.WriteStringToFile("_tmp/test_workdir.rb", "puts IO.read(\".gitignore\")"))
exitCode, err := runScript("ruby", "_tmp/test_workdir.rb", workDir)
require.NoError(t, err)
require.Equal(t, 0, exitCode)
}
}
func Test_runScriptGo(t *testing.T) {
require.NoError(t, pathutil.EnsureDirExist("_tmp"))
goScript := `package main
import (
"fmt"
)
func main() {
fmt.Println("This is a Go script")
}
`
require.NoError(t, fileutil.WriteStringToFile("_tmp/test.go", goScript))
exitCode, err := runScript("go run", "_tmp/test.go", workDir)
require.NoError(t, err)
require.Equal(t, 0, exitCode)
}
|
[
"\"BITRISE_SOURCE_DIR\""
] |
[] |
[
"BITRISE_SOURCE_DIR"
] |
[]
|
["BITRISE_SOURCE_DIR"]
|
go
| 1 | 0 | |
tfx/examples/iris/iris_pipeline.py
|
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Iris flowers example using TFX."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
from typing import Text
from tfx.components.evaluator.component import Evaluator
from tfx.components.example_gen.csv_example_gen.component import CsvExampleGen
from tfx.components.example_validator.component import ExampleValidator
from tfx.components.model_validator.component import ModelValidator
from tfx.components.pusher.component import Pusher
from tfx.components.schema_gen.component import SchemaGen
from tfx.components.statistics_gen.component import StatisticsGen
from tfx.components.trainer.component import Trainer
from tfx.orchestration import metadata
from tfx.orchestration import pipeline
from tfx.orchestration.beam.beam_dag_runner import BeamDagRunner
from tfx.proto import pusher_pb2
from tfx.proto import trainer_pb2
from tfx.utils.dsl_utils import external_input
_pipeline_name = 'iris'
# This example assumes that Iris flowers data is stored in ~/iris/data and the
# utility function is in ~/iris. Feel free to customize as needed.
_iris_root = os.path.join(os.environ['HOME'], 'iris')
_data_root = os.path.join(_iris_root, 'data')
# Python module file to inject customized logic into the TFX components. The
# Transform and Trainer both require user-defined functions to run successfully.
_module_file = os.path.join(_iris_root, 'iris_utils.py')
# Path which can be listened to by the model server. Pusher will output the
# trained model here.
_serving_model_dir = os.path.join(_iris_root, 'serving_model', _pipeline_name)
# Directory and data locations. This example assumes all of the flowers
# example code and metadata library is relative to $HOME, but you can store
# these files anywhere on your local filesystem.
_tfx_root = os.path.join(os.environ['HOME'], 'tfx')
_pipeline_root = os.path.join(_tfx_root, 'pipelines', _pipeline_name)
# Sqlite ML-metadata db path.
_metadata_path = os.path.join(_tfx_root, 'metadata', _pipeline_name,
'metadata.db')
def _create_pipeline(pipeline_name: Text, pipeline_root: Text, data_root: Text,
module_file: Text, serving_model_dir: Text,
metadata_path: Text) -> pipeline.Pipeline:
"""Implements the Iris flowers pipeline with TFX."""
examples = external_input(data_root)
# Brings data into the pipeline or otherwise joins/converts training data.
example_gen = CsvExampleGen(input_base=examples)
# Computes statistics over data for visualization and example validation.
statistics_gen = StatisticsGen(input_data=example_gen.outputs['examples'])
# Generates schema based on statistics files.
infer_schema = SchemaGen(
stats=statistics_gen.outputs['output'], infer_feature_shape=True)
# Performs anomaly detection based on statistics and data schema.
validate_stats = ExampleValidator(
stats=statistics_gen.outputs['output'],
schema=infer_schema.outputs['output'])
# Uses user-provided Python function that implements a model using TF-Learn.
trainer = Trainer(
module_file=module_file,
examples=example_gen.outputs['examples'],
schema=infer_schema.outputs['output'],
train_args=trainer_pb2.TrainArgs(num_steps=10000),
eval_args=trainer_pb2.EvalArgs(num_steps=5000))
# Uses TFMA to compute a evaluation statistics over features of a model.
model_analyzer = Evaluator(
examples=example_gen.outputs['examples'],
model_exports=trainer.outputs['output'])
# Performs quality validation of a candidate model (compared to a baseline).
model_validator = ModelValidator(
examples=example_gen.outputs['examples'], model=trainer.outputs['output'])
# Checks whether the model passed the validation steps and pushes the model
# to a file destination if check passed.
pusher = Pusher(
model_export=trainer.outputs['output'],
model_blessing=model_validator.outputs['blessing'],
push_destination=pusher_pb2.PushDestination(
filesystem=pusher_pb2.PushDestination.Filesystem(
base_directory=serving_model_dir)))
return pipeline.Pipeline(
pipeline_name=pipeline_name,
pipeline_root=pipeline_root,
components=[
example_gen, statistics_gen, infer_schema, validate_stats, trainer,
model_analyzer, model_validator, pusher
],
enable_cache=True,
metadata_connection_config=metadata.sqlite_metadata_connection_config(
metadata_path),
additional_pipeline_args={},
)
# To run this pipeline from the python CLI:
# $python iris_pipeline.py
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
BeamDagRunner().run(
_create_pipeline(
pipeline_name=_pipeline_name,
pipeline_root=_pipeline_root,
data_root=_data_root,
module_file=_module_file,
serving_model_dir=_serving_model_dir,
metadata_path=_metadata_path))
|
[] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
python
| 1 | 0 | |
tests/integration/contracts/utils.go
|
package contracts
import (
"encoding/json"
"fmt"
"go/parser"
"go/token"
"os"
"path"
"reflect"
"sort"
"testing"
"github.com/gofed/symbols-extractor/pkg/analyzers/type/runner"
allocglobal "github.com/gofed/symbols-extractor/pkg/parser/alloctable/global"
"github.com/gofed/symbols-extractor/pkg/parser/contracts"
fileparser "github.com/gofed/symbols-extractor/pkg/parser/file"
"github.com/gofed/symbols-extractor/pkg/parser/types"
gotypes "github.com/gofed/symbols-extractor/pkg/types"
"github.com/gofed/symbols-extractor/tests/integration/utils"
)
func makePayload(t *testing.T, packageName, filename string) *fileparser.Payload {
gofile := path.Join(os.Getenv("GOPATH"), "src", packageName, filename)
f, err := parser.ParseFile(token.NewFileSet(), gofile, nil, 0)
if err != nil {
t.Fatalf("Unable to parse file %v, AST Parse error: %v", gofile, err)
}
payload, err := fileparser.MakePayload(f)
if err != nil {
t.Errorf("Unable to parse file %v, unable to make a payload due to: %v", gofile, err)
}
return payload
}
func storePackage(config *types.Config) {
table, err := config.SymbolTable.Table(0)
if err != nil {
panic(err)
}
config.GlobalSymbolTable.Add(config.PackageName, table, false)
}
func ParsePackage(t *testing.T, config *types.Config, fileParser *fileparser.FileParser, packageName, filename, pkg string) error {
config.PackageName = pkg
config.SymbolsAccessor.SetCurrentTable(pkg, config.SymbolTable)
payload := makePayload(t, packageName, filename)
if e := fileParser.Parse(payload); e != nil {
return fmt.Errorf("Unable to parse file %v: %v", filename, e)
}
storePackage(config)
if len(payload.DataTypes) > 0 {
return fmt.Errorf("Payload not fully consumed, missing %v DataTypes", len(payload.DataTypes))
}
if len(payload.Variables) > 0 {
return fmt.Errorf("Payload not fully consumed, missing %v Variables", len(payload.Variables))
}
if len(payload.Functions) > 0 {
return fmt.Errorf("Payload not fully consumed, missing %v Functions", len(payload.Functions))
}
table, err := config.SymbolTable.Table(0)
if err != nil {
panic(err)
}
table.PackageQID = pkg
fmt.Printf("Global storing %v\n", pkg)
config.GlobalSymbolTable.Add(pkg, table, false)
// reset symbol table stack
config.SymbolTable.Pop()
config.SymbolTable.Push()
return nil
}
func ParseAndCompareContracts(t *testing.T, gopkg, filename string, tests []contracts.Contract) {
fileParser, config, err := utils.InitFileParser(gopkg)
if err != nil {
t.Error(err)
}
if err := ParsePackage(t, config, fileParser, gopkg, filename, gopkg); err != nil {
t.Error(err)
return
}
var genContracts []contracts.Contract
cs := config.ContractTable.List()
var keys []string
for fncName := range cs {
keys = append(keys, fncName)
}
sort.Strings(keys)
for _, key := range keys {
genContracts = append(genContracts, cs[key]...)
}
CompareContracts(t, genContracts, tests)
}
func CompareContracts(t *testing.T, contractsList, tests []contracts.Contract) {
testsTotal := len(tests)
if len(contractsList) < testsTotal {
t.Errorf("Expected at least %v contracts, got %v instead", testsTotal, len(contractsList))
}
t.Logf("Got %v tests, %v contracts", testsTotal, len(contractsList))
c := 0
for j, exp := range tests {
t.Logf("Checking %v-th contract: %v\n", j, contracts.Contract2String(contractsList[c]))
utils.CompareContracts(t, exp, contractsList[c])
c++
}
for i := c; i < len(contractsList); i++ {
t.Logf("\n\n\n\nAbout to check %v-th contract: %v\n", i, contracts.Contract2String(contractsList[i]))
//t.Errorf("\n\n\n\nUnprocessed %v-th contract: %v\n", i, contracts.Contract2String(contractsList[i]))
}
}
type VarTableTest struct {
Name string
DataType gotypes.DataType
}
func CompareVarTable(t *testing.T, expected []VarTableTest, testedVarTable *runner.VarTable) {
t.Logf("#### Checking variables\n")
for _, e := range expected {
tested, exists := testedVarTable.GetVariable(e.Name)
if !exists {
t.Errorf("Variable %v does not exist", e.Name)
continue
}
t.Logf("Checking %q variable...\n", e.Name)
if !reflect.DeepEqual(tested.DataType(), e.DataType) {
tByteSlice, _ := json.Marshal(tested.DataType())
eByteSlice, _ := json.Marshal(e.DataType)
t.Errorf("%v: got\n%v, expected\n%v", e.Name, string(tByteSlice), string(eByteSlice))
}
}
names := testedVarTable.Names()
if len(names) > len(expected) {
var eNames []string
eNamesMap := map[string]struct{}{}
for _, n := range expected {
eNames = append(eNames, n.Name)
eNamesMap[n.Name] = struct{}{}
}
sort.Strings(eNames)
sort.Strings(names)
t.Logf("\n#### Once all expected variables are set, both columns will be equal\n")
for i := 0; i < len(names); i++ {
eName := ""
if _, exists := eNamesMap[names[i]]; exists {
eName = names[i]
}
fmt.Printf("test.name: %v\t\te.name: %v\n", names[i], eName)
}
if len(names)-len(expected) > 0 {
t.Logf("\n#### There is %v variables not yet checked\n", len(names)-len(expected))
for _, name := range names {
if _, exists := eNamesMap[name]; !exists {
t.Errorf("%v variables not yet checked", name)
}
}
}
}
// sort.Strings(names)
// for _, name := range names {
// fmt.Printf("Name: %v\tDataType: %#v\n", name, varTable.GetVariable(name).DataType())
// }
}
func ParseAndCompareVarTable(t *testing.T, gopkg, filename string, expected []VarTableTest) {
fileParser, config, err := utils.InitFileParser(gopkg)
if err != nil {
t.Error(err)
}
if err := ParsePackage(t, config, fileParser, config.PackageName, filename, config.PackageName); err != nil {
t.Error(err)
return
}
r := runner.New(config.PackageName, config.GlobalSymbolTable, allocglobal.New("", "", nil), config.ContractTable)
if err := r.Run(); err != nil {
t.Fatal(err)
}
r.VarTable().Dump()
CompareVarTable(t, expected, r.VarTable())
}
|
[
"\"GOPATH\""
] |
[] |
[
"GOPATH"
] |
[]
|
["GOPATH"]
|
go
| 1 | 0 | |
main.go
|
package main
import (
"context"
"fmt"
"io"
"log"
"net/http"
"os"
"strings"
"cloud.google.com/go/storage"
"google.golang.org/api/iterator"
)
// import (
// "log"
// "path/filepath"
// "runtime"
// "github.com/jhampac/pueblo/node"
// )
// var (
// _, b, _, _ = runtime.Caller(0)
// basePath = filepath.Dir(b)
// )
// func main() {
// if err := node.Run(basePath + "/database/.db"); err != nil {
// log.Fatalf("An error as occured: %v", err)
// }
// }
func main() {
ctx := context.Background()
client, err := storage.NewClient(ctx)
if err != nil {
log.Fatalf("Failed to create client: %v", err)
}
defer client.Close()
bucketName := os.Getenv("STORAGE_BUCKET")
bucket := client.Bucket(bucketName)
query := &storage.Query{}
it := bucket.Objects(ctx, query)
var names []string
for {
attrs, err := it.Next()
if err == iterator.Done {
break
}
if err != nil {
log.Fatal(err)
}
names = append(names, attrs.Name)
}
////////////////////////////////////////
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
if r.URL.Path != "/" {
http.NotFound(w, r)
return
}
indexHandler(w, names)
})
port := os.Getenv("PORT")
if port == "" {
port = "9000"
log.Printf("Defaulting to port %s", port)
}
log.Printf("Listening on port %s", port)
if err := http.ListenAndServe(":"+port, nil); err != nil {
log.Fatal(err)
}
}
func indexHandler(w io.Writer, fileNames []string) {
var sb strings.Builder
for _, n := range fileNames {
sb.WriteString(n)
sb.WriteString("\n")
}
fmt.Fprintf(w, "Files avaiable: %s\n", sb.String())
}
|
[
"\"STORAGE_BUCKET\"",
"\"PORT\""
] |
[] |
[
"PORT",
"STORAGE_BUCKET"
] |
[]
|
["PORT", "STORAGE_BUCKET"]
|
go
| 2 | 0 | |
python/ambassador_diag/diagd.py
|
#!python
# Copyright 2018 Datawire. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
import copy
import subprocess
from typing import Any, Callable, Dict, List, Optional, Tuple, Union, TYPE_CHECKING
import datetime
import functools
import json
import logging
import multiprocessing
import os
import queue
import re
import signal
import threading
import time
import uuid
import requests
import jsonpatch
from expiringdict import ExpiringDict
import concurrent.futures
from pkg_resources import Requirement, resource_filename
import clize
from clize import Parameter
from flask import Flask, render_template, send_from_directory, request, jsonify, Response
from flask import json as flask_json
import gunicorn.app.base
from gunicorn.six import iteritems
from ambassador import Config, IR, EnvoyConfig, Diagnostics, Scout, Version
from ambassador.utils import SystemInfo, PeriodicTrigger, SavedSecret, load_url_contents
from ambassador.utils import SecretHandler, KubewatchSecretHandler, FSSecretHandler
from ambassador.config.resourcefetcher import ResourceFetcher
from ambassador.diagnostics import EnvoyStats
from ambassador.constants import Constants
if TYPE_CHECKING:
from ambassador.ir.irtlscontext import IRTLSContext
__version__ = Version
boot_time = datetime.datetime.now()
tvars_cache = ExpiringDict(max_len=10, max_age_seconds=60)
logging.basicConfig(
level=logging.INFO,
format="%%(asctime)s diagd %s [P%%(process)dT%%(threadName)s] %%(levelname)s: %%(message)s" % __version__,
datefmt="%Y-%m-%d %H:%M:%S"
)
# Shut up Werkzeug's standard request logs -- they're just too noisy.
logging.getLogger("werkzeug").setLevel(logging.CRITICAL)
# Likewise make requests a bit quieter.
logging.getLogger("urllib3").setLevel(logging.WARNING)
logging.getLogger("requests").setLevel(logging.WARNING)
ambassador_targets = {
'mapping': 'https://www.getambassador.io/reference/configuration#mappings',
'module': 'https://www.getambassador.io/reference/configuration#modules',
}
# envoy_targets = {
# 'route': 'https://envoyproxy.github.io/envoy/configuration/http_conn_man/route_config/route.html',
# 'cluster': 'https://envoyproxy.github.io/envoy/configuration/cluster_manager/cluster.html',
# }
def number_of_workers():
return (multiprocessing.cpu_count() * 2) + 1
class DiagApp (Flask):
ambex_pid: int
kick: Optional[str]
estats: EnvoyStats
config_path: Optional[str]
snapshot_path: str
bootstrap_path: str
ads_path: str
health_checks: bool
no_envoy: bool
debugging: bool
allow_fs_commands: bool
report_action_keys: bool
verbose: bool
notice_path: str
logger: logging.Logger
aconf: Config
ir: Optional[IR]
econf: Optional[EnvoyConfig]
diag: Optional[Diagnostics]
notices: 'Notices'
scout: Scout
watcher: 'AmbassadorEventWatcher'
stats_updater: Optional[PeriodicTrigger]
scout_checker: Optional[PeriodicTrigger]
last_request_info: Dict[str, int]
last_request_time: Optional[datetime.datetime]
latest_snapshot: str
banner_endpoint: Optional[str]
def setup(self, snapshot_path: str, bootstrap_path: str, ads_path: str,
config_path: Optional[str], ambex_pid: int, kick: Optional[str], banner_endpoint: Optional[str],
k8s=False, do_checks=True, no_envoy=False, reload=False, debug=False, verbose=False,
notices=None, validation_retries=5, allow_fs_commands=False, local_scout=False,
report_action_keys=False):
self.estats = EnvoyStats()
self.health_checks = do_checks
self.no_envoy = no_envoy
self.debugging = reload
self.verbose = verbose
self.notice_path = notices
self.notices = Notices(self.notice_path)
self.notices.reset()
self.k8s = k8s
self.validation_retries = validation_retries
self.allow_fs_commands = allow_fs_commands
self.local_scout = local_scout
self.report_action_keys = report_action_keys
self.banner_endpoint = banner_endpoint
# This will raise an exception and crash if you pass it a string. That's intentional.
self.ambex_pid = int(ambex_pid)
self.kick = kick
# This feels like overkill.
self.logger = logging.getLogger("ambassador.diagd")
self.logger.setLevel(logging.INFO)
self.kubestatus = KubeStatus()
if debug:
self.logger.setLevel(logging.DEBUG)
logging.getLogger('ambassador').setLevel(logging.DEBUG)
self.config_path = config_path
self.bootstrap_path = bootstrap_path
self.ads_path = ads_path
self.snapshot_path = snapshot_path
self.ir = None
self.econf = None
self.diag = None
self.stats_updater = None
self.scout_checker = None
self.last_request_info = {}
self.last_request_time = None
# self.scout = Scout(update_frequency=datetime.timedelta(seconds=10))
self.scout = Scout(local_only=self.local_scout)
def check_scout(self, what: str) -> None:
self.watcher.post("SCOUT", (what, self.ir))
# Get the Flask app defined early. Setup happens later.
app = DiagApp(__name__,
template_folder=resource_filename(Requirement.parse("ambassador"), "templates"))
######## DECORATORS
def standard_handler(f):
func_name = getattr(f, '__name__', '<anonymous>')
@functools.wraps(f)
def wrapper(*args, **kwds):
reqid = str(uuid.uuid4()).upper()
prefix = "%s: %s \"%s %s\"" % (reqid, request.remote_addr, request.method, request.path)
app.logger.info("%s START" % prefix)
start = datetime.datetime.now()
app.logger.debug("%s handler %s" % (prefix, func_name))
# Default to the exception case
result_to_log = "server error"
status_to_log = 500
result_log_level = logging.ERROR
result = Response(result_to_log, status_to_log)
try:
result = f(*args, reqid=reqid, **kwds)
if not isinstance(result, Response):
result = Response(f"Invalid handler result {result}", status_to_log)
status_to_log = result.status_code
if (status_to_log // 100) == 2:
result_log_level = logging.INFO
result_to_log = "success"
else:
result_log_level = logging.ERROR
result_to_log = "failure"
except Exception as e:
app.logger.exception(e)
end = datetime.datetime.now()
ms = int(((end - start).total_seconds() * 1000) + .5)
app.logger.log(result_log_level, "%s %dms %d %s" % (prefix, ms, status_to_log, result_to_log))
return result
return wrapper
######## UTILITIES
class Notices:
def __init__(self, local_config_path: str) -> None:
self.local_path = local_config_path
self.notices: List[Dict[str, str]] = []
def reset(self):
local_notices: List[Dict[str, str]] = []
local_data = ''
try:
local_stream = open(self.local_path, "r")
local_data = local_stream.read()
local_notices = json.loads(local_data)
except OSError:
pass
except:
local_notices.append({ 'level': 'ERROR', 'message': 'bad local notices: %s' % local_data })
self.notices = local_notices
# app.logger.info("Notices: after RESET: %s" % json.dumps(self.notices))
def post(self, notice):
# app.logger.debug("Notices: POST %s" % notice)
self.notices.append(notice)
# app.logger.info("Notices: after POST: %s" % json.dumps(self.notices))
def prepend(self, notice):
# app.logger.debug("Notices: PREPEND %s" % notice)
self.notices.insert(0, notice)
# app.logger.info("Notices: after PREPEND: %s" % json.dumps(self.notices))
def extend(self, notices):
for notice in notices:
self.post(notice)
def td_format(td_object):
seconds = int(td_object.total_seconds())
periods = [
('year', 60*60*24*365),
('month', 60*60*24*30),
('day', 60*60*24),
('hour', 60*60),
('minute', 60),
('second', 1)
]
strings = []
for period_name, period_seconds in periods:
if seconds > period_seconds:
period_value, seconds = divmod(seconds, period_seconds)
strings.append("%d %s%s" %
(period_value, period_name, "" if (period_value == 1) else "s"))
formatted = ", ".join(strings)
if not formatted:
formatted = "0s"
return formatted
def interval_format(seconds, normal_format, now_message):
if seconds >= 1:
return normal_format % td_format(datetime.timedelta(seconds=seconds))
else:
return now_message
def system_info(app):
ir = app.ir
debug_mode = False
if ir:
amod = ir.ambassador_module
debug_mode = amod.get('debug_mode', False)
app.logger.info(f'DEBUG_MODE {debug_mode}')
status_dict = {'config failure': [False, 'no configuration loaded']}
env_status = getattr(app.watcher, 'env_status', None)
if env_status:
status_dict = env_status.to_dict()
print(f"status_dict {status_dict}")
return {
"version": __version__,
"hostname": SystemInfo.MyHostName,
"ambassador_id": Config.ambassador_id,
"ambassador_namespace": Config.ambassador_namespace,
"single_namespace": Config.single_namespace,
"knative_enabled": os.environ.get('AMBASSADOR_KNATIVE_SUPPORT', '').lower() == 'true',
"statsd_enabled": os.environ.get('STATSD_ENABLED', '').lower() == 'true',
"endpoints_enabled": Config.enable_endpoints,
"cluster_id": os.environ.get('AMBASSADOR_CLUSTER_ID',
os.environ.get('AMBASSADOR_SCOUT_ID', "00000000-0000-0000-0000-000000000000")),
"boot_time": boot_time,
"hr_uptime": td_format(datetime.datetime.now() - boot_time),
"latest_snapshot": app.latest_snapshot,
"env_good": getattr(app.watcher, 'env_good', False),
"env_failures": getattr(app.watcher, 'failure_list', [ 'no IR loaded' ]),
"env_status": status_dict,
"debug_mode": debug_mode
}
def envoy_status(estats):
since_boot = interval_format(estats.time_since_boot(), "%s", "less than a second")
since_update = "Never updated"
if estats.time_since_update():
since_update = interval_format(estats.time_since_update(), "%s ago", "within the last second")
return {
"alive": estats.is_alive(),
"ready": estats.is_ready(),
"uptime": since_boot,
"since_update": since_update
}
def drop_serializer_key(d: Dict[Any, Any]) -> Dict[Any, Any]:
"""
Delete the "serialization" key (if present) in any dictionary passed in and
return that dictionary. This function is intended to be used as the
object_hook value for json.load[s].
"""
_ = d.pop("serialization", None)
return d
@app.route('/_internal/v0/ping', methods=[ 'GET' ])
def handle_ping():
return "ACK\n", 200
@app.route('/_internal/v0/update', methods=[ 'POST' ])
def handle_kubewatch_update():
url = request.args.get('url', None)
if not url:
app.logger.error("error: update requested with no URL")
return "error: update requested with no URL\n", 400
app.logger.info("Update requested: kubewatch, %s" % url)
status, info = app.watcher.post('CONFIG', ( 'kw', url ))
return info, status
@app.route('/_internal/v0/watt', methods=[ 'POST' ])
def handle_watt_update():
url = request.args.get('url', None)
if not url:
app.logger.error("error: watt update requested with no URL")
return "error: watt update requested with no URL\n", 400
app.logger.info("Update requested: watt, %s" % url)
status, info = app.watcher.post('CONFIG', ( 'watt', url ))
return info, status
@app.route('/_internal/v0/fs', methods=[ 'POST' ])
def handle_fs():
path = request.args.get('path', None)
if not path:
app.logger.error("error: update requested with no PATH")
return "error: update requested with no PATH\n", 400
app.logger.info("Update requested from %s" % path)
status, info = app.watcher.post('CONFIG_FS', path)
return info, status
@app.route('/_internal/v0/events', methods=[ 'GET' ])
def handle_events():
if not app.local_scout:
return 'Local Scout is not enabled\n', 400
event_dump = [
( x['local_scout_timestamp'], x['mode'], x['action'], x ) for x in app.scout._scout.events
]
app.logger.info(f'Event dump {event_dump}')
return jsonify(event_dump)
@app.route('/ambassador/v0/favicon.ico', methods=[ 'GET' ])
def favicon():
template_path = resource_filename(Requirement.parse("ambassador"), "templates")
return send_from_directory(template_path, "favicon.ico")
@app.route('/ambassador/v0/check_alive', methods=[ 'GET' ])
def check_alive():
status = envoy_status(app.estats)
if status['alive']:
return "ambassador liveness check OK (%s)\n" % status['uptime'], 200
else:
return "ambassador seems to have died (%s)\n" % status['uptime'], 503
@app.route('/ambassador/v0/check_ready', methods=[ 'GET' ])
def check_ready():
if not (app.ir and app.diag):
return "ambassador waiting for config\n", 503
status = envoy_status(app.estats)
if status['ready']:
return "ambassador readiness check OK (%s)\n" % status['since_update'], 200
else:
return "ambassador not ready (%s)\n" % status['since_update'], 503
@app.route('/ambassador/v0/diag/', methods=[ 'GET' ])
@standard_handler
def show_overview(reqid=None):
app.logger.debug("OV %s - showing overview" % reqid)
diag = app.diag
if app.verbose:
app.logger.debug("OV %s: DIAG" % reqid)
app.logger.debug("%s" % json.dumps(diag.as_dict(), sort_keys=True, indent=4))
ov = diag.overview(request, app.estats)
if app.verbose:
app.logger.debug("OV %s: OV" % reqid)
app.logger.debug("%s" % json.dumps(ov, sort_keys=True, indent=4))
app.logger.debug("OV %s: collecting errors" % reqid)
ddict = collect_errors_and_notices(request, reqid, "overview", diag)
banner_content = None
if app.banner_endpoint and app.ir and app.ir.edge_stack_allowed:
try:
response = requests.get(app.banner_endpoint)
if response.status_code == 200:
banner_content = response.text
except Exception as e:
app.logger.error("could not get banner_content: %s" % e)
tvars = dict(system=system_info(app),
envoy_status=envoy_status(app.estats),
loginfo=app.estats.loginfo,
notices=app.notices.notices,
banner_content=banner_content,
**ov, **ddict)
patch_client = request.args.get('patch_client', None)
if request.args.get('json', None):
key = request.args.get('filter', None)
if key:
return jsonify(tvars.get(key, None))
elif patch_client:
# Assume this is the Admin UI. Recursively drop all "serialization"
# keys. This avoids leaking secrets and generally makes the
# snapshot a lot smaller without losing information that the Admin
# UI cares about. We do this below by setting the object_hook
# parameter of the json.loads(...) call.
# Get the previous full representation
cached_tvars_json = tvars_cache.get(patch_client, dict())
# Serialize the tvars into a json-string using the same jsonify Flask serializer, then load the json object
response_content = json.loads(flask_json.dumps(tvars), object_hook=drop_serializer_key)
# Diff between the previous representation and the current full representation (http://jsonpatch.com/)
patch = jsonpatch.make_patch(cached_tvars_json, response_content)
# Save the current full representation in memory
tvars_cache[patch_client] = response_content
# Return only the diff
return Response(patch.to_string(), mimetype="application/json")
else:
return jsonify(tvars)
else:
app.check_scout("overview")
return Response(render_template("overview.html", **tvars))
def collect_errors_and_notices(request, reqid, what: str, diag: Diagnostics) -> Dict:
loglevel = request.args.get('loglevel', None)
notice = None
if loglevel:
app.logger.debug("%s %s -- requesting loglevel %s" % (what, reqid, loglevel))
if not app.estats.update_log_levels(time.time(), level=loglevel):
notice = { 'level': 'WARNING', 'message': "Could not update log level!" }
# else:
# return redirect("/ambassador/v0/diag/", code=302)
# We need to grab errors and notices from diag.as_dict(), process the errors so
# they work for the HTML rendering, and post the notices to app.notices. Then we
# return the dict representation that our caller should work with.
ddict = diag.as_dict()
# app.logger.debug("ddict %s" % json.dumps(ddict, indent=4, sort_keys=True))
derrors = ddict.pop('errors', {})
errors = []
for err_key, err_list in derrors.items():
if err_key == "-global-":
err_key = ""
for err in err_list:
errors.append((err_key, err[ 'error' ]))
dnotices = ddict.pop('notices', {})
# Make sure that anything about the loglevel gets folded into this set.
if notice:
app.notices.prepend(notice)
for notice_key, notice_list in dnotices.items():
for notice in notice_list:
app.notices.post({'level': 'NOTICE', 'message': "%s: %s" % (notice_key, notice)})
ddict['errors'] = errors
return ddict
@app.route('/ambassador/v0/diag/<path:source>', methods=[ 'GET' ])
@standard_handler
def show_intermediate(source=None, reqid=None):
app.logger.debug("SRC %s - getting intermediate for '%s'" % (reqid, source))
diag = app.diag
method = request.args.get('method', None)
resource = request.args.get('resource', None)
result = diag.lookup(request, source, app.estats)
if app.verbose:
app.logger.debug("RESULT %s" % json.dumps(result, sort_keys=True, indent=4))
ddict = collect_errors_and_notices(request, reqid, "detail %s" % source, diag)
tvars = dict(system=system_info(app),
envoy_status=envoy_status(app.estats),
loginfo=app.estats.loginfo,
notices=app.notices.notices,
method=method, resource=resource,
**result, **ddict)
if request.args.get('json', None):
key = request.args.get('filter', None)
if key:
return jsonify(tvars.get(key, None))
else:
return jsonify(tvars)
else:
app.check_scout("detail: %s" % source)
return Response(render_template("diag.html", **tvars))
@app.template_filter('sort_by_key')
def sort_by_key(objects):
return sorted(objects, key=lambda x: x['key'])
@app.template_filter('pretty_json')
def pretty_json(obj):
if isinstance(obj, dict):
obj = dict(**obj)
keys_to_drop = [ key for key in obj.keys() if key.startswith('_') ]
for key in keys_to_drop:
del(obj[key])
return json.dumps(obj, indent=4, sort_keys=True)
@app.template_filter('sort_clusters_by_service')
def sort_clusters_by_service(clusters):
return sorted(clusters, key=lambda x: x['service'])
# return sorted([ c for c in clusters.values() ], key=lambda x: x['service'])
@app.template_filter('source_lookup')
def source_lookup(name, sources):
app.logger.info("%s => sources %s" % (name, sources))
source = sources.get(name, {})
app.logger.info("%s => source %s" % (name, source))
return source.get('_source', name)
@app.route('/metrics', methods=['GET'])
@standard_handler
def get_prometheus_metrics(*args, **kwargs):
return app.estats.get_prometheus_state()
def bool_fmt(b: bool) -> str:
return 'T' if b else 'F'
class StatusInfo:
def __init__(self) -> None:
self.status = True
self.specifics: List[Tuple[bool, str]] = []
def failure(self, message: str) -> None:
self.status = False
self.specifics.append((False, message))
def OK(self, message: str) -> None:
self.specifics.append((True, message))
def to_dict(self) -> Dict[str, Union[bool, List[Tuple[bool, str]]]]:
return {
'status': self.status,
'specifics': self.specifics
}
class SystemStatus:
def __init__(self) -> None:
self.status: Dict[str, StatusInfo] = {}
def failure(self, key: str, message: str) -> None:
self.info_for_key(key).failure(message)
def OK(self, key: str, message: str) -> None:
self.info_for_key(key).OK(message)
def info_for_key(self, key) -> StatusInfo:
if key not in self.status:
self.status[key] = StatusInfo()
return self.status[key]
def to_dict(self) -> Dict[str, Dict[str, Union[bool, List[Tuple[bool, str]]]]]:
return { key: info.to_dict() for key, info in self.status.items() }
class KubeStatus:
pool: concurrent.futures.ProcessPoolExecutor
def __init__(self) -> None:
self.live: Dict[str, bool] = {}
self.current_status: Dict[str, str] = {}
self.pool = concurrent.futures.ProcessPoolExecutor(max_workers=5)
def mark_live(self, kind: str, name: str, namespace: str) -> None:
key = f"{kind}/{name}.{namespace}"
print(f"KubeStatus MASTER {os.getpid()}: mark_live {key}")
self.live[key] = True
def prune(self) -> None:
drop: List[str] = []
for key in self.current_status.keys():
if not self.live.get(key, False):
drop.append(key)
for key in drop:
print(f"KubeStatus MASTER {os.getpid()}: prune {key}")
del(self.current_status[key])
self.live = {}
def post(self, kind: str, name: str, namespace: str, text: str) -> None:
key = f"{kind}/{name}.{namespace}"
extant = self.current_status.get(key, None)
if extant == text:
print(f"KubeStatus MASTER {os.getpid()}: {key} == {text}")
else:
print(f"KubeStatus MASTER {os.getpid()}: {key} needs {text}")
# For now we're going to assume that this works.
self.current_status[key] = text
f = self.pool.submit(kubestatus_update, kind, name, namespace, text)
f.add_done_callback(kubestatus_update_done)
def kubestatus_update(kind: str, name: str, namespace: str, text: str) -> str:
cmd = [ 'kubestatus', kind, '-f', f'metadata.name={name}', '-n', namespace, '-u', '/dev/fd/0' ]
print(f"KubeStatus UPDATE {os.getpid()}: running command: {cmd}")
try:
rc = subprocess.run(cmd, input=text.encode('utf-8'), timeout=5)
if rc.returncode == 0:
return f"{name}.{namespace}: update OK"
else:
return f"{name}.{namespace}: error {rc.returncode}"
except subprocess.TimeoutExpired as e:
return f"{name}.{namespace}: timed out"
def kubestatus_update_done(f: concurrent.futures.Future) -> None:
print(f"KubeStatus DONE {os.getpid()}: result {f.result()}")
class AmbassadorEventWatcher(threading.Thread):
# The key for 'Actions' is chimed - chimed_ok - env_good. This will make more sense
# if you read through the _load_ir method.
Actions = {
'F-F-F': ( 'unhealthy', True ), # make sure the first chime always gets out
'F-F-T': ( 'now-healthy', True ), # make sure the first chime always gets out
'F-T-F': ( 'now-unhealthy', True ), # this is actually impossible
'F-T-T': ( 'healthy', True ), # this is actually impossible
'T-F-F': ( 'unhealthy', False ),
'T-F-T': ( 'now-healthy', True ),
'T-T-F': ( 'now-unhealthy', True ),
'T-T-T': ( 'update', False ),
}
def __init__(self, app: DiagApp) -> None:
super().__init__(name="AEW", daemon=True)
self.app = app
self.logger = self.app.logger
self.events: queue.Queue = queue.Queue()
self.chimed = False # Have we ever sent a chime about the environment?
self.last_chime = False # What was the status of our last chime? (starts as False)
self.env_good = False # Is our environment currently believed to be OK?
self.failure_list: List[str] = [ 'unhealthy at boot' ] # What's making our environment not OK?
def post(self, cmd: str, arg: Union[str, Tuple[str, Optional[IR]]]) -> Tuple[int, str]:
rqueue: queue.Queue = queue.Queue()
self.events.put((cmd, arg, rqueue))
return rqueue.get()
def update_estats(self) -> None:
self.post('ESTATS', '')
def run(self):
self.logger.info("starting Scout checker")
self.app.scout_checker = PeriodicTrigger(lambda: self.check_scout("checkin"), period=86400) # Yup, one day.
self.logger.info("starting event watcher")
while True:
cmd, arg, rqueue = self.events.get()
# self.logger.info("EVENT: %s" % cmd)
if cmd == 'ESTATS':
# self.logger.info("updating estats")
try:
self._respond(rqueue, 200, 'updating')
self.app.estats.update()
except Exception as e:
self.logger.error("could not update estats: %s" % e)
self.logger.exception(e)
self._respond(rqueue, 500, 'Envoy stats update failed')
elif cmd == 'CONFIG_FS':
try:
self.load_config_fs(rqueue, arg)
except Exception as e:
self.logger.error("could not reconfigure: %s" % e)
self.logger.exception(e)
self._respond(rqueue, 500, 'configuration from filesystem failed')
elif cmd == 'CONFIG':
version, url = arg
try:
if version == 'kw':
self.load_config_kubewatch(rqueue, url)
elif version == 'watt':
self.load_config_watt(rqueue, url)
else:
raise RuntimeError("config from %s not supported" % version)
except Exception as e:
self.logger.error("could not reconfigure: %s" % e)
self.logger.exception(e)
self._respond(rqueue, 500, 'configuration failed')
elif cmd == 'SCOUT':
try:
self._respond(rqueue, 200, 'checking Scout')
self.check_scout(*arg)
except Exception as e:
self.logger.error("could not reconfigure: %s" % e)
self.logger.exception(e)
self._respond(rqueue, 500, 'scout check failed')
else:
self.logger.error(f"unknown event type: '{cmd}' '{arg}'")
self._respond(rqueue, 400, f"unknown event type '{cmd}' '{arg}'")
def _respond(self, rqueue: queue.Queue, status: int, info='') -> None:
self.logger.debug("responding to query with %s %s" % (status, info))
rqueue.put((status, info))
def load_config_fs(self, rqueue: queue.Queue, path: str) -> None:
self.logger.info("loading configuration from disk: %s" % path)
# The "path" here can just be a path, but it can also be a command for testing,
# if the user has chosen to allow that.
if self.app.allow_fs_commands and (':' in path):
pfx, rest = path.split(':', 1)
if pfx.lower() == 'cmd':
fields = rest.split(':', 1)
cmd = fields[0].upper()
args = fields[1:] if (len(fields) > 1) else None
if cmd.upper() == 'CHIME':
self.logger.info('CMD: Chiming')
self.chime()
self._respond(rqueue, 200, 'Chimed')
elif cmd.upper() == 'CHIME_RESET':
self.chimed = False
self.last_chime = False
self.env_good = False
self.app.scout.reset_events()
self.app.scout.report(mode="boot", action="boot1", no_cache=True)
self.logger.info('CMD: Reset chime state')
self._respond(rqueue, 200, 'CMD: Reset chime state')
elif cmd.upper() == 'SCOUT_CACHE_RESET':
self.app.scout.reset_cache_time()
self.logger.info('CMD: Reset Scout cache time')
self._respond(rqueue, 200, 'CMD: Reset Scout cache time')
elif cmd.upper() == 'ENV_OK':
self.env_good = True
self.failure_list = []
self.logger.info('CMD: Marked environment good')
self._respond(rqueue, 200, 'CMD: Marked environment good')
elif cmd.upper() == 'ENV_BAD':
self.env_good = False
self.failure_list = [ 'failure forced' ]
self.logger.info('CMD: Marked environment bad')
self._respond(rqueue, 200, 'CMD: Marked environment bad')
else:
self.logger.info(f'CMD: no such command "{cmd}"')
self._respond(rqueue, 400, f'CMD: no such command "{cmd}"')
return
else:
self.logger.info(f'CONFIG_FS: invalid prefix "{pfx}"')
self._respond(rqueue, 400, f'CONFIG_FS: invalid prefix "{pfx}"')
return
snapshot = re.sub(r'[^A-Za-z0-9_-]', '_', path)
scc = FSSecretHandler(app.logger, path, app.snapshot_path, "0")
aconf = Config()
fetcher = ResourceFetcher(app.logger, aconf)
fetcher.load_from_filesystem(path, k8s=app.k8s, recurse=True)
if not fetcher.elements:
self.logger.debug("no configuration resources found at %s" % path)
# self._respond(rqueue, 204, 'ignoring empty configuration')
# return
self._load_ir(rqueue, aconf, fetcher, scc, snapshot)
def load_config_kubewatch(self, rqueue: queue.Queue, url: str):
snapshot = url.split('/')[-1]
ss_path = os.path.join(app.snapshot_path, "snapshot-tmp.yaml")
self.logger.info("copying configuration: kubewatch, %s to %s" % (url, ss_path))
# Grab the serialization, and save it to disk too.
elements: List[str] = []
serialization = load_url_contents(self.logger, "%s/services" % url, stream2=open(ss_path, "w"))
if serialization:
elements.append(serialization)
else:
self.logger.debug("no services loaded from snapshot %s" % snapshot)
if Config.enable_endpoints:
serialization = load_url_contents(self.logger, "%s/endpoints" % url, stream2=open(ss_path, "a"))
if serialization:
elements.append(serialization)
else:
self.logger.debug("no endpoints loaded from snapshot %s" % snapshot)
serialization = "---\n".join(elements)
if not serialization:
self.logger.debug("no data loaded from snapshot %s" % snapshot)
# We never used to return here. I'm not sure if that's really correct?
# self._respond(rqueue, 204, 'ignoring: no data loaded from snapshot %s' % snapshot)
# return
scc = KubewatchSecretHandler(app.logger, url, app.snapshot_path, snapshot)
aconf = Config()
fetcher = ResourceFetcher(app.logger, aconf)
fetcher.parse_yaml(serialization, k8s=True)
if not fetcher.elements:
self.logger.debug("no configuration found in snapshot %s" % snapshot)
# Don't actually bail here. If they send over a valid config that happens
# to have nothing for us, it's still a legit config.
# self._respond(rqueue, 204, 'ignoring: no configuration found in snapshot %s' % snapshot)
# return
self._load_ir(rqueue, aconf, fetcher, scc, snapshot)
def load_config_watt(self, rqueue: queue.Queue, url: str):
snapshot = url.split('/')[-1]
ss_path = os.path.join(app.snapshot_path, "snapshot-tmp.yaml")
self.logger.info("copying configuration: watt, %s to %s" % (url, ss_path))
# Grab the serialization, and save it to disk too.
serialization = load_url_contents(self.logger, url, stream2=open(ss_path, "w"))
if not serialization:
self.logger.debug("no data loaded from snapshot %s" % snapshot)
# We never used to return here. I'm not sure if that's really correct?
# self._respond(rqueue, 204, 'ignoring: no data loaded from snapshot %s' % snapshot)
# return
# Weirdly, we don't need a special WattSecretHandler: parse_watt knows how to handle
# the secrets that watt sends.
scc = SecretHandler(app.logger, url, app.snapshot_path, snapshot)
aconf = Config()
fetcher = ResourceFetcher(app.logger, aconf)
if serialization:
fetcher.parse_watt(serialization)
if not fetcher.elements:
self.logger.debug("no configuration found in snapshot %s" % snapshot)
# Don't actually bail here. If they send over a valid config that happens
# to have nothing for us, it's still a legit config.
# self._respond(rqueue, 204, 'ignoring: no configuration found in snapshot %s' % snapshot)
# return
self._load_ir(rqueue, aconf, fetcher, scc, snapshot)
def _load_ir(self, rqueue: queue.Queue, aconf: Config, fetcher: ResourceFetcher,
secret_handler: SecretHandler, snapshot: str) -> None:
aconf.load_all(fetcher.sorted())
aconf_path = os.path.join(app.snapshot_path, "aconf-tmp.json")
open(aconf_path, "w").write(aconf.as_json())
ir = IR(aconf, secret_handler=secret_handler)
ir_path = os.path.join(app.snapshot_path, "ir-tmp.json")
open(ir_path, "w").write(ir.as_json())
econf = EnvoyConfig.generate(ir, "V2")
diag = Diagnostics(ir, econf)
bootstrap_config, ads_config = econf.split_config()
if not self.validate_envoy_config(config=ads_config, retries=self.app.validation_retries):
self.logger.info("no updates were performed due to invalid envoy configuration, continuing with current configuration...")
# Don't use app.check_scout; it will deadlock.
self.check_scout("attempted bad update")
self._respond(rqueue, 500, 'ignoring: invalid Envoy configuration in snapshot %s' % snapshot)
return
snapcount = int(os.environ.get('AMBASSADOR_SNAPSHOT_COUNT', "4"))
snaplist: List[Tuple[str, str]] = []
if snapcount > 0:
self.logger.debug("rotating snapshots for snapshot %s" % snapshot)
# If snapcount is 4, this range statement becomes range(-4, -1)
# which gives [ -4, -3, -2 ], which the list comprehension turns
# into [ ( "-3", "-4" ), ( "-2", "-3" ), ( "-1", "-2" ) ]...
# which is the list of suffixes to rename to rotate the snapshots.
snaplist += [ (str(x+1), str(x)) for x in range(-1 * snapcount, -1) ]
# After dealing with that, we need to rotate the current file into -1.
snaplist.append(( '', '-1' ))
# Whether or not we do any rotation, we need to cycle in the '-tmp' file.
snaplist.append(( '-tmp', '' ))
for from_suffix, to_suffix in snaplist:
for fmt in [ "aconf{}.json", "econf{}.json", "ir{}.json", "snapshot{}.yaml" ]:
from_path = os.path.join(app.snapshot_path, fmt.format(from_suffix))
to_path = os.path.join(app.snapshot_path, fmt.format(to_suffix))
try:
self.logger.debug("rotate: %s -> %s" % (from_path, to_path))
os.rename(from_path, to_path)
except IOError as e:
self.logger.debug("skip %s -> %s: %s" % (from_path, to_path, e))
pass
except Exception as e:
self.logger.debug("could not rename %s -> %s: %s" % (from_path, to_path, e))
app.latest_snapshot = snapshot
self.logger.info("saving Envoy configuration for snapshot %s" % snapshot)
with open(app.bootstrap_path, "w") as output:
output.write(json.dumps(bootstrap_config, sort_keys=True, indent=4))
with open(app.ads_path, "w") as output:
output.write(json.dumps(ads_config, sort_keys=True, indent=4))
app.aconf = aconf
app.ir = ir
app.econf = econf
app.diag = diag
if app.kick:
self.logger.info("running '%s'" % app.kick)
os.system(app.kick)
elif app.ambex_pid != 0:
self.logger.info("notifying PID %d ambex" % app.ambex_pid)
os.kill(app.ambex_pid, signal.SIGHUP)
# don't worry about TCPMappings yet
mappings = app.aconf.get_config('mappings')
if mappings:
for mapping_name, mapping in mappings.items():
app.kubestatus.mark_live("Mapping", mapping_name, mapping.get('namespace', Config.ambassador_namespace))
app.kubestatus.prune()
if app.ir.k8s_status_updates:
update_count = 0
for name in app.ir.k8s_status_updates.keys():
update_count += 1
# Strip off any namespace in the name.
resource_name = name.split('.', 1)[0]
kind, namespace, update = app.ir.k8s_status_updates[name]
text = json.dumps(update)
self.logger.info(f"K8s status update: {kind} {resource_name}.{namespace}, {text}...")
app.kubestatus.post(kind, resource_name, namespace, text)
self.logger.info("configuration updated from snapshot %s" % snapshot)
self._respond(rqueue, 200, 'configuration updated from snapshot %s' % snapshot)
if app.health_checks and not app.stats_updater:
app.logger.info("starting Envoy status updater")
app.stats_updater = PeriodicTrigger(app.watcher.update_estats, period=5)
# Check our environment...
self.check_environment()
self.chime()
def chime(self):
# In general, our reports here should be action "update", and they should honor the
# Scout cache, but we need to tweak that depending on whether we've done this before
# and on whether the environment looks OK.
already_chimed = bool_fmt(self.chimed)
was_ok = bool_fmt(self.last_chime)
now_ok = bool_fmt(self.env_good)
# Poor man's state machine...
action_key = f'{already_chimed}-{was_ok}-{now_ok}'
action, no_cache = AmbassadorEventWatcher.Actions[action_key]
self.logger.debug(f'CHIME: {action_key}')
chime_args = {
'no_cache': no_cache,
'failures': self.failure_list
}
if self.app.report_action_keys:
chime_args['action_key'] = action_key
# Don't use app.check_scout; it will deadlock.
self.check_scout(action, **chime_args)
# Remember that we have now chimed...
self.chimed = True
# ...and remember what we sent for that chime.
self.last_chime = self.env_good
def check_environment(self, ir: Optional[IR]=None) -> None:
env_good = True
chime_failures = {}
env_status = SystemStatus()
error_count = 0
tls_count = 0
mapping_count = 0
if not ir:
ir = app.ir
if not ir:
chime_failures['no config loaded'] = True
env_good = False
else:
if not ir.aconf:
chime_failures['completely empty config'] = True
env_good = False
else:
for err_key, err_list in ir.aconf.errors.items():
if err_key == "-global-":
err_key = ""
for err in err_list:
error_count += 1
err_text = err['error']
self.app.logger.info(f'error {err_key} {err_text}')
if err_text.find('CRD') >= 0:
if err_text.find('core') >= 0:
chime_failures['core CRDs'] = True
env_status.failure("CRDs", "Core CRD type definitions are missing")
else:
chime_failures['other CRDs'] = True
env_status.failure("CRDs", "Resolver CRD type definitions are missing")
env_good = False
elif err_text.find('TLS') >= 0:
chime_failures['TLS errors'] = True
env_status.failure('TLS', err_text)
env_good = False
for context in ir.tls_contexts:
if context:
tls_count += 1
break
for group in ir.groups.values():
for mapping in group.mappings:
pfx = mapping.get('prefix', None)
name = mapping.get('name', None)
if pfx:
if not pfx.startswith('/ambassador/v0') or not name.startswith('internal_'):
mapping_count += 1
if error_count:
env_status.failure('Error check', f'{error_count} total error{"" if (error_count == 1) else "s"} logged')
env_good = False
else:
env_status.OK('Error check', "No errors logged")
if tls_count:
env_status.OK('TLS', f'{tls_count} TLSContext{" is" if (tls_count == 1) else "s are"} active')
else:
chime_failures['no TLS contexts'] = True
env_status.failure('TLS', "No TLSContexts are active")
env_good = False
if mapping_count:
env_status.OK('Mappings', f'{mapping_count} Mapping{" is" if (mapping_count == 1) else "s are"} active')
else:
chime_failures['no Mappings'] = True
env_status.failure('Mappings', "No Mappings are active")
env_good = False
failure_list: List[str] = []
if not env_good:
failure_list = list(sorted(chime_failures.keys()))
self.env_good = env_good
self.env_status = env_status
self.failure_list = failure_list
def check_scout(self, what: str, no_cache: Optional[bool]=False,
ir: Optional[IR]=None, failures: Optional[List[str]]=None,
action_key: Optional[str]=None) -> None:
now = datetime.datetime.now()
uptime = now - boot_time
hr_uptime = td_format(uptime)
if not ir:
ir = app.ir
self.app.notices.reset()
scout_args = {
"uptime": int(uptime.total_seconds()),
"hr_uptime": hr_uptime
}
if failures:
scout_args['failures'] = failures
if action_key:
scout_args['action_key'] = action_key
if ir:
self.app.logger.debug("check_scout: we have an IR")
if not os.environ.get("AMBASSADOR_DISABLE_FEATURES", None):
self.app.logger.debug("check_scout: including features")
feat = ir.features()
request_data = app.estats.stats.get('requests', None)
if request_data:
self.app.logger.debug("check_scout: including requests")
for rkey in request_data.keys():
cur = request_data[rkey]
prev = app.last_request_info.get(rkey, 0)
feat[f'request_{rkey}_count'] = max(cur - prev, 0)
lrt = app.last_request_time or boot_time
since_lrt = now - lrt
elapsed = since_lrt.total_seconds()
hr_elapsed = td_format(since_lrt)
app.last_request_time = now
app.last_request_info = request_data
feat['request_elapsed'] = elapsed
feat['request_hr_elapsed'] = hr_elapsed
scout_args["features"] = feat
scout_result = self.app.scout.report(mode="diagd", action=what, no_cache=no_cache, **scout_args)
scout_notices = scout_result.pop('notices', [])
global_loglevel = self.app.logger.getEffectiveLevel()
self.app.logger.debug(f'Scout section: global loglevel {global_loglevel}')
for notice in scout_notices:
notice_level_name = notice.get('level') or 'INFO'
notice_level = logging.getLevelName(notice_level_name)
if notice_level >= global_loglevel:
self.app.logger.debug(f'Scout section: include {notice}')
self.app.notices.post(notice)
else:
self.app.logger.debug(f'Scout section: skip {notice}')
self.app.logger.info("Scout reports %s" % json.dumps(scout_result))
self.app.logger.info("Scout notices: %s" % json.dumps(scout_notices))
self.app.logger.debug("App notices after scout: %s" % json.dumps(app.notices.notices))
def validate_envoy_config(self, config, retries) -> bool:
if self.app.no_envoy:
self.app.logger.debug("Skipping validation")
return True
# We want to keep the original config untouched
validation_config = copy.deepcopy(config)
# Envoy fails to validate with @type field in envoy config, so removing that
validation_config.pop('@type')
config_json = json.dumps(validation_config, sort_keys=True, indent=4)
econf_validation_path = os.path.join(app.snapshot_path, "econf-tmp.json")
with open(econf_validation_path, "w") as output:
output.write(config_json)
command = ['envoy', '--config-path', econf_validation_path, '--mode', 'validate']
odict = {
'exit_code': 0,
'output': ''
}
# Try to validate the Envoy config. Short circuit and fall through
# immediately on concrete success or failure, and retry (up to the
# limit) on timeout.
timeout = 5
for retry in range(retries):
try:
odict['output'] = subprocess.check_output(command, stderr=subprocess.STDOUT, timeout=timeout)
odict['exit_code'] = 0
break
except subprocess.CalledProcessError as e:
odict['exit_code'] = e.returncode
odict['output'] = e.output
break
except subprocess.TimeoutExpired as e:
odict['exit_code'] = 1
odict['output'] = e.output
self.logger.warn("envoy configuration validation timed out after {} seconds{}\n{}",
timeout, ', retrying...' if retry < retries - 1 else '', e.output)
continue
if odict['exit_code'] == 0:
self.logger.info("successfully validated the resulting envoy configuration, continuing...")
return True
try:
decoded_error = odict['output'].decode('utf-8')
odict['output'] = decoded_error
except:
pass
self.logger.error("{}\ncould not validate the envoy configuration above after {} retries, failed with error \n{}\nAborting update...".format(config_json, retries, odict['output']))
return False
class StandaloneApplication(gunicorn.app.base.BaseApplication):
def __init__(self, app, options=None):
self.options = options or {}
self.application = app
super(StandaloneApplication, self).__init__()
# Boot chime. This is basically the earliest point at which we can consider an Ambassador
# to be "running".
scout_result = self.application.scout.report(mode="boot", action="boot1", no_cache=True)
self.application.logger.info(f'BOOT: Scout result {json.dumps(scout_result)}')
def load_config(self):
config = dict([(key, value) for key, value in iteritems(self.options)
if key in self.cfg.settings and value is not None])
for key, value in iteritems(config):
self.cfg.set(key.lower(), value)
def load(self):
# This is a little weird, but whatever.
self.application.watcher = AmbassadorEventWatcher(self.application)
self.application.watcher.start()
if self.application.config_path:
self.application.watcher.post("CONFIG_FS", self.application.config_path)
return self.application
def _main(snapshot_path=None, bootstrap_path=None, ads_path=None,
*, dev_magic=False, config_path=None, ambex_pid=0, kick=None,
banner_endpoint="http://127.0.0.1:8500/banner", k8s=False,
no_checks=False, no_envoy=False, reload=False, debug=False, verbose=False,
workers=None, port=Constants.DIAG_PORT, host='0.0.0.0', notices=None,
validation_retries=5, allow_fs_commands=False, local_scout=False,
report_action_keys=False):
"""
Run the diagnostic daemon.
:param snapshot_path: Path to directory in which to save configuration snapshots and dynamic secrets
:param bootstrap_path: Path to which to write bootstrap Envoy configuration
:param ads_path: Path to which to write ADS Envoy configuration
:param config_path: Optional configuration path to scan for Ambassador YAML files
:param k8s: If True, assume config_path contains Kubernetes resources (only relevant with config_path)
:param ambex_pid: Optional PID to signal with HUP after updating Envoy configuration
:param kick: Optional command to run after updating Envoy configuration
:param banner_endpoint: Optional endpoint of extra banner to include
:param no_checks: If True, don't do Envoy-cluster health checking
:param no_envoy: If True, don't interact with Envoy at all
:param reload: If True, run Flask in debug mode for live reloading
:param debug: If True, do debug logging
:param dev_magic: If True, override a bunch of things for Datawire dev-loop stuff
:param verbose: If True, do really verbose debug logging
:param workers: Number of workers; default is based on the number of CPUs present
:param host: Interface on which to listen
:param port: Port on which to listen
:param notices: Optional file to read for local notices
:param validation_retries: Number of times to retry Envoy configuration validation after a timeout
:param allow_fs_commands: If true, allow CONFIG_FS to support debug/testing commands
:param local_scout: Don't talk to remote Scout at all; keep everything purely local
:param report_action_keys: Report action keys when chiming
"""
if dev_magic:
# Override the world.
os.environ['SCOUT_HOST'] = '127.0.0.1:9999'
os.environ['SCOUT_HTTPS'] = 'no'
no_checks = True
no_envoy = True
os.makedirs('/tmp/snapshots', mode=0o755, exist_ok=True)
snapshot_path = '/tmp/snapshots'
bootstrap_path = '/tmp/boot.json'
ads_path = '/tmp/ads.json'
port = 9998
allow_fs_commands = True
local_scout = True
report_action_keys = True
if no_envoy:
no_checks = True
# Create the application itself.
app.setup(snapshot_path, bootstrap_path, ads_path, config_path, ambex_pid, kick, banner_endpoint,
k8s, not no_checks, no_envoy, reload, debug, verbose, notices,
validation_retries, allow_fs_commands, local_scout, report_action_keys)
if not workers:
workers = number_of_workers()
gunicorn_config = {
'bind': '%s:%s' % (host, port),
# 'workers': 1,
'threads': workers,
}
app.logger.info("thread count %d, listening on %s" % (gunicorn_config['threads'], gunicorn_config['bind']))
StandaloneApplication(app, gunicorn_config).run()
def main():
clize.run(_main)
if __name__ == "__main__":
main()
|
[] |
[] |
[
"AMBASSADOR_SNAPSHOT_COUNT",
"SCOUT_HTTPS",
"AMBASSADOR_SCOUT_ID",
"STATSD_ENABLED",
"AMBASSADOR_DISABLE_FEATURES",
"SCOUT_HOST",
"AMBASSADOR_KNATIVE_SUPPORT",
"AMBASSADOR_CLUSTER_ID"
] |
[]
|
["AMBASSADOR_SNAPSHOT_COUNT", "SCOUT_HTTPS", "AMBASSADOR_SCOUT_ID", "STATSD_ENABLED", "AMBASSADOR_DISABLE_FEATURES", "SCOUT_HOST", "AMBASSADOR_KNATIVE_SUPPORT", "AMBASSADOR_CLUSTER_ID"]
|
python
| 8 | 0 | |
internal/main.go
|
/*
Copyright 2020 Joseph Davis
For license infomation (MIT), see the containing github project.
As can be seen below, this version uses Gorilla Mux. It can be installed with:
go get github.com/gorilla/mux
And PostgreSQL with
go get -u github.com/lib/pq
I used https://dev.to/moficodes/build-your-first-rest-api-with-go-2gcj
as a starting point.
And added in https://semaphoreci.com/community/tutorials/building-and-testing-a-rest-api-in-go-with-gorilla-mux-and-postgresql
for database and testing.
Including running a basic postgres instance with
sudo docker run -it -p 5432:5432 -d postgres
*/
package main
import (
"fmt"
"log"
"net/http"
"os"
"strconv"
"github.com/gorilla/mux"
)
// WelcomeGet - A simple landing page response to give a few hints about usage
func WelcomeGet(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
// TODO: include more helpful information about the API
w.Write([]byte(`{"message": "Welcome to the Worst3D Printer Slicing Service!"}`))
}
// HealthCheckGet - simple health check endpoint, just returns 200
func HealthCheckGet(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
w.Write([]byte(`{"message": "Running"}`))
}
// Create a new user, if the request is authenticated
func UserCreate(w http.ResponseWriter, r *http.Request) {
}
/* Basic functions */
func post(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusCreated)
w.Write([]byte(`{"message": "post called"}`))
}
func put(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusAccepted)
w.Write([]byte(`{"message": "put called"}`))
}
func delete(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
w.Write([]byte(`{"message": "delete called"}`))
}
func params(w http.ResponseWriter, r *http.Request) {
pathParams := mux.Vars(r)
w.Header().Set("Content-Type", "application/json")
userID := -1
var err error
if val, ok := pathParams["userID"]; ok {
userID, err = strconv.Atoi(val)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(`{"message": "need a number"}`))
return
}
}
commentID := -1
if val, ok := pathParams["commentID"]; ok {
commentID, err = strconv.Atoi(val)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(`{"message": "need a number"}`))
return
}
}
query := r.URL.Query()
location := query.Get("location")
w.Write([]byte(fmt.Sprintf(`{"userID": %d, "commentID": %d, "location": "%s" }`, userID, commentID, location)))
}
func main() {
/* quick site map
/worst3d/v3
/user
/job
/project
/healthcheck
*/
//r := mux.NewRouter()
worstApp := Worst3DPSApp{}
worstApp.Initialize(
os.Getenv("WORST_DB_USERNAME"),
os.Getenv("WORST_DB_PASSWORD"),
os.Getenv("WORST_DB_NAME") )
//r := worstApp.Router
api := worstApp.Router.PathPrefix("/worst3d/v3").Subrouter()
api.HandleFunc("", WelcomeGet).Methods(http.MethodGet)
api.HandleFunc("/healthcheck", HealthCheckGet).Methods(http.MethodGet)
api.HandleFunc("/user", params).Methods(http.MethodPost) // user creation
api.HandleFunc("/user/{userID}", params).Methods(http.MethodGet)
api.HandleFunc("", post).Methods(http.MethodPost)
api.HandleFunc("", put).Methods(http.MethodPut)
api.HandleFunc("", delete).Methods(http.MethodDelete)
log.Print("Spinning up the Worst3D Printer Slicing Service...")
api.HandleFunc("/user/{userID}/comment/{commentID}", params).Methods(http.MethodGet)
log.Fatal(http.ListenAndServe(":8080", worstApp.Router))
}
|
[
"\"WORST_DB_USERNAME\"",
"\"WORST_DB_PASSWORD\"",
"\"WORST_DB_NAME\""
] |
[] |
[
"WORST_DB_PASSWORD",
"WORST_DB_USERNAME",
"WORST_DB_NAME"
] |
[]
|
["WORST_DB_PASSWORD", "WORST_DB_USERNAME", "WORST_DB_NAME"]
|
go
| 3 | 0 | |
cluster-operator/src/main/java/io/strimzi/operator/cluster/model/EntityTopicOperator.java
|
/*
* Copyright Strimzi authors.
* License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
*/
package io.strimzi.operator.cluster.model;
import io.fabric8.kubernetes.api.model.Container;
import io.fabric8.kubernetes.api.model.ContainerBuilder;
import io.fabric8.kubernetes.api.model.EnvVar;
import io.fabric8.kubernetes.api.model.Volume;
import io.fabric8.kubernetes.api.model.VolumeMount;
import io.fabric8.kubernetes.api.model.rbac.RoleBinding;
import io.fabric8.kubernetes.api.model.rbac.RoleBindingBuilder;
import io.fabric8.kubernetes.api.model.rbac.RoleRef;
import io.fabric8.kubernetes.api.model.rbac.RoleRefBuilder;
import io.fabric8.kubernetes.api.model.rbac.Subject;
import io.fabric8.kubernetes.api.model.rbac.SubjectBuilder;
import io.strimzi.api.kafka.model.ContainerEnvVar;
import io.strimzi.api.kafka.model.EntityOperatorSpec;
import io.strimzi.api.kafka.model.EntityTopicOperatorSpec;
import io.strimzi.api.kafka.model.Kafka;
import io.strimzi.api.kafka.model.Probe;
import io.strimzi.api.kafka.model.ProbeBuilder;
import io.strimzi.operator.cluster.ClusterOperatorConfig;
import io.strimzi.operator.common.model.Labels;
import java.util.ArrayList;
import java.util.List;
import static io.strimzi.operator.cluster.model.ModelUtils.createHttpProbe;
import static java.util.Arrays.asList;
import static java.util.Collections.singletonList;
/**
* Represents the Topic Operator deployment
*/
public class EntityTopicOperator extends AbstractModel {
protected static final String TOPIC_OPERATOR_CONTAINER_NAME = "topic-operator";
private static final String NAME_SUFFIX = "-entity-topic-operator";
protected static final String METRICS_AND_LOG_CONFIG_SUFFIX = NAME_SUFFIX + "-config";
// Port configuration
protected static final int HEALTHCHECK_PORT = 8080;
protected static final String HEALTHCHECK_PORT_NAME = "healthcheck";
// Topic Operator configuration keys
public static final String ENV_VAR_RESOURCE_LABELS = "STRIMZI_RESOURCE_LABELS";
public static final String ENV_VAR_KAFKA_BOOTSTRAP_SERVERS = "STRIMZI_KAFKA_BOOTSTRAP_SERVERS";
public static final String ENV_VAR_ZOOKEEPER_CONNECT = "STRIMZI_ZOOKEEPER_CONNECT";
public static final String ENV_VAR_WATCHED_NAMESPACE = "STRIMZI_NAMESPACE";
public static final String ENV_VAR_FULL_RECONCILIATION_INTERVAL_MS = "STRIMZI_FULL_RECONCILIATION_INTERVAL_MS";
public static final String ENV_VAR_ZOOKEEPER_SESSION_TIMEOUT_MS = "STRIMZI_ZOOKEEPER_SESSION_TIMEOUT_MS";
public static final String ENV_VAR_TOPIC_METADATA_MAX_ATTEMPTS = "STRIMZI_TOPIC_METADATA_MAX_ATTEMPTS";
public static final String ENV_VAR_TLS_ENABLED = "STRIMZI_TLS_ENABLED";
public static final Probe DEFAULT_HEALTHCHECK_OPTIONS = new ProbeBuilder()
.withInitialDelaySeconds(EntityTopicOperatorSpec.DEFAULT_HEALTHCHECK_DELAY)
.withTimeoutSeconds(EntityTopicOperatorSpec.DEFAULT_HEALTHCHECK_TIMEOUT).build();
// Kafka bootstrap servers and Zookeeper nodes can't be specified in the JSON
private String kafkaBootstrapServers;
private String zookeeperConnect;
private String watchedNamespace;
private int reconciliationIntervalMs;
private int zookeeperSessionTimeoutMs;
private String resourceLabels;
private int topicMetadataMaxAttempts;
protected List<ContainerEnvVar> templateContainerEnvVars;
/**
* @param namespace Kubernetes/OpenShift namespace where cluster resources are going to be created
* @param cluster overall cluster name
* @param labels
*/
protected EntityTopicOperator(String namespace, String cluster, Labels labels) {
super(namespace, cluster, labels);
this.name = topicOperatorName(cluster);
this.readinessPath = "/";
this.readinessProbeOptions = DEFAULT_HEALTHCHECK_OPTIONS;
this.livenessPath = "/";
this.livenessProbeOptions = DEFAULT_HEALTHCHECK_OPTIONS;
// create a default configuration
this.kafkaBootstrapServers = defaultBootstrapServers(cluster);
this.zookeeperConnect = defaultZookeeperConnect(cluster);
this.watchedNamespace = namespace;
this.reconciliationIntervalMs = EntityTopicOperatorSpec.DEFAULT_FULL_RECONCILIATION_INTERVAL_SECONDS * 1_000;
this.zookeeperSessionTimeoutMs = EntityTopicOperatorSpec.DEFAULT_ZOOKEEPER_SESSION_TIMEOUT_SECONDS * 1_000;
this.resourceLabels = ModelUtils.defaultResourceLabels(cluster);
this.topicMetadataMaxAttempts = EntityTopicOperatorSpec.DEFAULT_TOPIC_METADATA_MAX_ATTEMPTS;
this.ancillaryConfigName = metricAndLogConfigsName(cluster);
this.logAndMetricsConfigVolumeName = "entity-topic-operator-metrics-and-logging";
this.logAndMetricsConfigMountPath = "/opt/topic-operator/custom-config/";
}
public void setWatchedNamespace(String watchedNamespace) {
this.watchedNamespace = watchedNamespace;
}
public String getWatchedNamespace() {
return watchedNamespace;
}
public void setResourceLabels(String resourceLabels) {
this.resourceLabels = resourceLabels;
}
public String getResourceLabels() {
return resourceLabels;
}
public void setReconciliationIntervalMs(int reconciliationIntervalMs) {
this.reconciliationIntervalMs = reconciliationIntervalMs;
}
public int getReconciliationIntervalMs() {
return reconciliationIntervalMs;
}
public void setZookeeperSessionTimeoutMs(int zookeeperSessionTimeoutMs) {
this.zookeeperSessionTimeoutMs = zookeeperSessionTimeoutMs;
}
public int getZookeeperSessionTimeoutMs() {
return zookeeperSessionTimeoutMs;
}
public void setKafkaBootstrapServers(String kafkaBootstrapServers) {
this.kafkaBootstrapServers = kafkaBootstrapServers;
}
public String getKafkaBootstrapServers() {
return kafkaBootstrapServers;
}
public void setTopicMetadataMaxAttempts(int topicMetadataMaxAttempts) {
this.topicMetadataMaxAttempts = topicMetadataMaxAttempts;
}
public int getTopicMetadataMaxAttempts() {
return topicMetadataMaxAttempts;
}
protected static String defaultZookeeperConnect(String cluster) {
return String.format("%s:%d", "localhost", EntityTopicOperatorSpec.DEFAULT_ZOOKEEPER_PORT);
}
public void setZookeeperConnect(String zookeeperConnect) {
this.zookeeperConnect = zookeeperConnect;
}
public String getZookeeperConnect() {
return zookeeperConnect;
}
protected static String defaultBootstrapServers(String cluster) {
return KafkaCluster.serviceName(cluster) + ":" + EntityTopicOperatorSpec.DEFAULT_BOOTSTRAP_SERVERS_PORT;
}
public static String topicOperatorName(String cluster) {
return cluster + NAME_SUFFIX;
}
public static String metricAndLogConfigsName(String cluster) {
return cluster + METRICS_AND_LOG_CONFIG_SUFFIX;
}
/**
* Get the name of the TO role binding given the name of the {@code cluster}.
* @param cluster The cluster name.
* @return The name of the role binding.
*/
public static String roleBindingName(String cluster) {
return "strimzi-" + cluster + "-entity-topic-operator";
}
@Override
protected String getDefaultLogConfigFileName() {
return "entityTopicOperatorDefaultLoggingProperties";
}
@Override
String getAncillaryConfigMapKeyLogConfig() {
return "log4j2.properties";
}
/**
* Create an Entity Topic Operator from given desired resource
*
* @param kafkaAssembly desired resource with cluster configuration containing the Entity Topic Operator one
* @return Entity Topic Operator instance, null if not configured in the ConfigMap
*/
public static EntityTopicOperator fromCrd(Kafka kafkaAssembly) {
EntityTopicOperator result = null;
EntityOperatorSpec entityOperatorSpec = kafkaAssembly.getSpec().getEntityOperator();
if (entityOperatorSpec != null) {
EntityTopicOperatorSpec topicOperatorSpec = entityOperatorSpec.getTopicOperator();
if (topicOperatorSpec != null) {
String namespace = kafkaAssembly.getMetadata().getNamespace();
result = new EntityTopicOperator(
namespace,
kafkaAssembly.getMetadata().getName(),
Labels.fromResource(kafkaAssembly).withKind(kafkaAssembly.getKind()));
result.setOwnerReference(kafkaAssembly);
String image = topicOperatorSpec.getImage();
if (image == null) {
image = System.getenv().getOrDefault(ClusterOperatorConfig.STRIMZI_DEFAULT_TOPIC_OPERATOR_IMAGE, "strimzi/operator:latest");
}
result.setImage(image);
result.setWatchedNamespace(topicOperatorSpec.getWatchedNamespace() != null ? topicOperatorSpec.getWatchedNamespace() : namespace);
result.setReconciliationIntervalMs(topicOperatorSpec.getReconciliationIntervalSeconds() * 1_000);
result.setZookeeperSessionTimeoutMs(topicOperatorSpec.getZookeeperSessionTimeoutSeconds() * 1_000);
result.setTopicMetadataMaxAttempts(topicOperatorSpec.getTopicMetadataMaxAttempts());
result.setLogging(topicOperatorSpec.getLogging());
result.setGcLoggingEnabled(topicOperatorSpec.getJvmOptions() == null ? DEFAULT_JVM_GC_LOGGING_ENABLED : topicOperatorSpec.getJvmOptions().isGcLoggingEnabled());
result.setResources(topicOperatorSpec.getResources());
if (topicOperatorSpec.getReadinessProbe() != null) {
result.setReadinessProbe(topicOperatorSpec.getReadinessProbe());
}
if (topicOperatorSpec.getLivenessProbe() != null) {
result.setLivenessProbe(topicOperatorSpec.getLivenessProbe());
}
}
}
return result;
}
@Override
protected List<Container> getContainers(ImagePullPolicy imagePullPolicy) {
return singletonList(new ContainerBuilder()
.withName(TOPIC_OPERATOR_CONTAINER_NAME)
.withImage(getImage())
.withArgs("/opt/strimzi/bin/topic_operator_run.sh")
.withEnv(getEnvVars())
.withPorts(singletonList(createContainerPort(HEALTHCHECK_PORT_NAME, HEALTHCHECK_PORT, "TCP")))
.withLivenessProbe(createHttpProbe(livenessPath + "healthy", HEALTHCHECK_PORT_NAME, livenessProbeOptions))
.withReadinessProbe(createHttpProbe(readinessPath + "ready", HEALTHCHECK_PORT_NAME, readinessProbeOptions))
.withResources(getResources())
.withVolumeMounts(getVolumeMounts())
.withImagePullPolicy(determineImagePullPolicy(imagePullPolicy, getImage()))
.build());
}
@Override
protected List<EnvVar> getEnvVars() {
List<EnvVar> varList = new ArrayList<>();
varList.add(buildEnvVar(ENV_VAR_RESOURCE_LABELS, resourceLabels));
varList.add(buildEnvVar(ENV_VAR_KAFKA_BOOTSTRAP_SERVERS, kafkaBootstrapServers));
varList.add(buildEnvVar(ENV_VAR_ZOOKEEPER_CONNECT, zookeeperConnect));
varList.add(buildEnvVar(ENV_VAR_WATCHED_NAMESPACE, watchedNamespace));
varList.add(buildEnvVar(ENV_VAR_FULL_RECONCILIATION_INTERVAL_MS, Integer.toString(reconciliationIntervalMs)));
varList.add(buildEnvVar(ENV_VAR_ZOOKEEPER_SESSION_TIMEOUT_MS, Integer.toString(zookeeperSessionTimeoutMs)));
varList.add(buildEnvVar(ENV_VAR_TOPIC_METADATA_MAX_ATTEMPTS, String.valueOf(topicMetadataMaxAttempts)));
varList.add(buildEnvVar(ENV_VAR_TLS_ENABLED, Boolean.toString(true)));
varList.add(buildEnvVar(ENV_VAR_STRIMZI_GC_LOG_ENABLED, String.valueOf(gcLoggingEnabled)));
addContainerEnvsToExistingEnvs(varList, templateContainerEnvVars);
return varList;
}
public List<Volume> getVolumes() {
return singletonList(createConfigMapVolume(logAndMetricsConfigVolumeName, ancillaryConfigName));
}
private List<VolumeMount> getVolumeMounts() {
return asList(VolumeUtils.createVolumeMount(logAndMetricsConfigVolumeName, logAndMetricsConfigMountPath),
VolumeUtils.createVolumeMount(EntityOperator.TLS_SIDECAR_EO_CERTS_VOLUME_NAME, EntityOperator.TLS_SIDECAR_EO_CERTS_VOLUME_MOUNT),
VolumeUtils.createVolumeMount(EntityOperator.TLS_SIDECAR_CA_CERTS_VOLUME_NAME, EntityOperator.TLS_SIDECAR_CA_CERTS_VOLUME_MOUNT));
}
public RoleBinding generateRoleBinding(String namespace, String watchedNamespace) {
Subject ks = new SubjectBuilder()
.withKind("ServiceAccount")
.withName(EntityOperator.entityOperatorServiceAccountName(cluster))
.withNamespace(namespace)
.build();
RoleRef roleRef = new RoleRefBuilder()
.withName(EntityOperator.EO_CLUSTER_ROLE_NAME)
.withApiGroup("rbac.authorization.k8s.io")
.withKind("ClusterRole")
.build();
RoleBinding rb = new RoleBindingBuilder()
.withNewMetadata()
.withName(roleBindingName(cluster))
.withNamespace(watchedNamespace)
.withOwnerReferences(createOwnerReference())
.withLabels(labels.toMap())
.endMetadata()
.withRoleRef(roleRef)
.withSubjects(singletonList(ks))
.build();
return rb;
}
public void setContainerEnvVars(List<ContainerEnvVar> envVars) {
templateContainerEnvVars = envVars;
}
}
|
[] |
[] |
[] |
[]
|
[]
|
java
| 0 | 0 | |
testrunner/lib_python_tests.py
|
#!/usr/bin/env python
"""
This is what the buildbot runs to execute the lib-python tests
on top of pypy-c.
"""
import sys, os
import subprocess
rootdir = os.path.dirname(os.path.dirname(os.path.abspath(sys.argv[0])))
os.environ['PYTHONPATH'] = rootdir
os.environ['PYTEST_PLUGINS'] = ''
if sys.platform == 'win32':
pypyopt = "--pypy=pypy/goal/pypy3-c.exe"
else:
pypyopt = "--pypy=pypy/goal/pypy3-c"
popen = subprocess.Popen(
[sys.executable, "pypy/test_all.py",
pypyopt,
"--timeout=3600",
"-rs",
"--duration=10",
"--resultlog=cpython.log", "lib-python",
] + sys.argv[1:],
cwd=rootdir)
try:
ret = popen.wait()
except KeyboardInterrupt:
popen.kill()
print "\ninterrupted"
ret = 1
sys.exit(ret)
|
[] |
[] |
[
"PYTEST_PLUGINS",
"PYTHONPATH"
] |
[]
|
["PYTEST_PLUGINS", "PYTHONPATH"]
|
python
| 2 | 0 | |
Contents/Code/interface/menu.py
|
# coding=utf-8
import locale
import logging
import os
import logger
from item_details import ItemDetailsMenu
from refresh_item import RefreshItem
from menu_helpers import add_ignore_options, dig_tree, set_refresh_menu_state, \
should_display_ignore, default_thumb, debounce, ObjectContainer, SubFolderObjectContainer, route
from main import fatality, IgnoreMenu
from advanced import DispatchRestart
from subzero.constants import ART, PREFIX, DEPENDENCY_MODULE_NAMES
from support.scheduler import scheduler
from support.config import config
from support.helpers import timestamp, df, display_language
from support.ignore import ignore_list
from support.items import get_all_items, get_items_info, \
get_item_kind_from_rating_key, get_item
# init GUI
ObjectContainer.art = R(ART)
ObjectContainer.no_cache = True
# default thumb for DirectoryObjects
DirectoryObject.thumb = default_thumb
@route(PREFIX + '/section/firstLetter/key', deeper=bool)
def FirstLetterMetadataMenu(rating_key, key, title=None, base_title=None, display_items=False, previous_item_type=None,
previous_rating_key=None):
"""
displays the contents of a section filtered by the first letter
:param rating_key: actually is the section's key
:param key: the firstLetter wanted
:param title: the first letter, or #
:param deeper:
:return:
"""
title = base_title + " > " + unicode(title)
oc = SubFolderObjectContainer(title2=title, no_cache=True, no_history=True)
items = get_all_items(key="first_character", value=[rating_key, key], base="library/sections", flat=False)
kind, deeper = get_items_info(items)
dig_tree(oc, items, MetadataMenu,
pass_kwargs={"base_title": title, "display_items": deeper, "previous_item_type": kind,
"previous_rating_key": rating_key})
return oc
@route(PREFIX + '/section/contents', display_items=bool)
def MetadataMenu(rating_key, title=None, base_title=None, display_items=False, previous_item_type=None,
previous_rating_key=None, randomize=None):
"""
displays the contents of a section based on whether it has a deeper tree or not (movies->movie (item) list; series->series list)
:param rating_key:
:param title:
:param base_title:
:param display_items:
:param previous_item_type:
:param previous_rating_key:
:return:
"""
title = unicode(title)
item_title = title
title = base_title + " > " + title
oc = SubFolderObjectContainer(title2=title, no_cache=True, no_history=True)
current_kind = get_item_kind_from_rating_key(rating_key)
if display_items:
timeout = 30
# add back to series for season
if current_kind == "season":
timeout = 360
show = get_item(previous_rating_key)
oc.add(DirectoryObject(
key=Callback(MetadataMenu, rating_key=show.rating_key, title=show.title, base_title=show.section.title,
previous_item_type="section", display_items=True, randomize=timestamp()),
title=u"< Back to %s" % show.title,
thumb=show.thumb or default_thumb
))
elif current_kind == "series":
timeout = 1800
items = get_all_items(key="children", value=rating_key, base="library/metadata")
kind, deeper = get_items_info(items)
dig_tree(oc, items, MetadataMenu,
pass_kwargs={"base_title": title, "display_items": deeper, "previous_item_type": kind,
"previous_rating_key": rating_key})
# we don't know exactly where we are here, only add ignore option to series
if should_display_ignore(items, previous=previous_item_type):
add_ignore_options(oc, "series", title=item_title, rating_key=rating_key, callback_menu=IgnoreMenu)
# add refresh
oc.add(DirectoryObject(
key=Callback(RefreshItem, rating_key=rating_key, item_title=title, refresh_kind=current_kind,
previous_rating_key=previous_rating_key, timeout=timeout * 1000, randomize=timestamp()),
title=u"Refresh: %s" % item_title,
summary="Refreshes the %s, possibly searching for missing and picking up new subtitles on disk" % current_kind
))
oc.add(DirectoryObject(
key=Callback(RefreshItem, rating_key=rating_key, item_title=title, force=True,
refresh_kind=current_kind, previous_rating_key=previous_rating_key, timeout=timeout * 1000,
randomize=timestamp()),
title=u"Auto-Find subtitles: %s" % item_title,
summary="Issues a forced refresh, ignoring known subtitles and searching for new ones"
))
else:
return ItemDetailsMenu(rating_key=rating_key, title=title, item_title=item_title)
return oc
@route(PREFIX + '/ignore_list')
def IgnoreListMenu():
oc = SubFolderObjectContainer(title2="Ignore list", replace_parent=True)
for key in ignore_list.key_order:
values = ignore_list[key]
for value in values:
add_ignore_options(oc, key, title=ignore_list.get_title(key, value), rating_key=value,
callback_menu=IgnoreMenu)
return oc
@route(PREFIX + '/history')
def HistoryMenu():
from support.history import get_history
history = get_history()
oc = SubFolderObjectContainer(title2="History", replace_parent=True)
for item in history.history_items:
possible_language = item.language
language_display = item.lang_name if not possible_language else display_language(possible_language)
oc.add(DirectoryObject(
key=Callback(ItemDetailsMenu, title=item.title, item_title=item.item_title,
rating_key=item.rating_key),
title=u"%s (%s)" % (item.item_title, item.mode_verbose),
summary=u"%s in %s (%s, score: %s), %s" % (language_display, item.section_title,
item.provider_name, item.score, df(item.time))
))
return oc
@route(PREFIX + '/missing/refresh')
@debounce
def RefreshMissing(randomize=None):
scheduler.dispatch_task("SearchAllRecentlyAddedMissing")
header = "Refresh of recently added items with missing subtitles triggered"
return fatality(header=header, replace_parent=True)
@route(PREFIX + '/ValidatePrefs', enforce_route=True)
def ValidatePrefs():
Core.log.setLevel(logging.DEBUG)
if Prefs["log_console"]:
Core.log.addHandler(logger.console_handler)
Log.Debug("Logging to console from now on")
else:
Core.log.removeHandler(logger.console_handler)
Log.Debug("Stop logging to console")
# cache the channel state
update_dict = False
restart = False
# reset pin
Dict["pin_correct_time"] = None
config.initialize()
if "channel_enabled" not in Dict:
update_dict = True
elif Dict["channel_enabled"] != config.enable_channel:
Log.Debug("Channel features %s, restarting plugin", "enabled" if config.enable_channel else "disabled")
update_dict = True
restart = True
if "plugin_pin_mode" not in Dict:
update_dict = True
elif Dict["plugin_pin_mode"] != Prefs["plugin_pin_mode"]:
update_dict = True
restart = True
if update_dict:
Dict["channel_enabled"] = config.enable_channel
Dict["plugin_pin_mode"] = Prefs["plugin_pin_mode"]
Dict.Save()
if restart:
scheduler.stop()
DispatchRestart()
return
scheduler.setup_tasks()
scheduler.clear_task_data("MissingSubtitles")
set_refresh_menu_state(None)
Log.Debug("Validate Prefs called.")
# SZ config debug
Log.Debug("--- SZ Config-Debug ---")
for attr in [
"app_support_path", "data_path", "data_items_path", "enable_agent",
"enable_channel", "permissions_ok", "missing_permissions", "fs_encoding",
"subtitle_destination_folder", "dbm_supported", "lang_list", "providers"]:
Log.Debug("config.%s: %s", attr, getattr(config, attr))
for attr in ["plugin_log_path", "server_log_path"]:
value = getattr(config, attr)
access = os.access(value, os.R_OK)
if Core.runtime.os == "Windows":
try:
f = open(value, "r")
f.read(1)
f.close()
except:
access = False
Log.Debug("config.%s: %s (accessible: %s)", attr, value, access)
for attr in [
"subtitles.save.filesystem", ]:
Log.Debug("Pref.%s: %s", attr, Prefs[attr])
# fixme: check existance of and os access of logs
Log.Debug("Platform: %s", Core.runtime.platform)
Log.Debug("OS: %s", Core.runtime.os)
Log.Debug("----- Environment -----")
for key, value in os.environ.iteritems():
if key.startswith("PLEX") or key.startswith("SZ_"):
if "TOKEN" in key:
outval = "xxxxxxxxxxxxxxxxxxx"
else:
outval = value
Log.Debug("%s: %s", key, outval)
Log.Debug("Locale: %s", locale.getdefaultlocale())
Log.Debug("-----------------------")
Log.Debug("Setting log-level to %s", Prefs["log_level"])
logger.register_logging_handler(DEPENDENCY_MODULE_NAMES, level=Prefs["log_level"])
Core.log.setLevel(logging.getLevelName(Prefs["log_level"]))
os.environ['U1pfT01EQl9LRVk'] = '789CF30DAC2C8B0AF433F5C9AD34290A712DF30D7135F12D0FB3E502006FDE081E'
return
|
[] |
[] |
[
"U1pfT01EQl9LRVk"
] |
[]
|
["U1pfT01EQl9LRVk"]
|
python
| 1 | 0 | |
roles/lnls-ans-role-epics/molecule/default/tests/test_default.py
|
import os
import pytest
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ["MOLECULE_INVENTORY_FILE"]
).get_hosts("default_group")
@pytest.mark.parametrize(
"pkg",
[
"epics-dev",
],
)
def test_default_pkgs(host, pkg):
package = host.package(pkg)
assert package.is_installed
@pytest.mark.parametrize(
"tool",
[
"caget",
"caput",
],
)
def test_default_tools(host, tool):
try:
cmd = host.find_command(tool)
print("{} tool found in {}".format(tool, cmd))
except ValueError:
raise AssertionError()
|
[] |
[] |
[
"MOLECULE_INVENTORY_FILE"
] |
[]
|
["MOLECULE_INVENTORY_FILE"]
|
python
| 1 | 0 | |
internal/wg/device.go
|
package wg
import (
"fmt"
"io"
"os"
"strings"
"golang.zx2c4.com/wireguard/wgctrl/wgtypes"
"riasc.eu/wice/internal/util"
t "riasc.eu/wice/internal/util/terminal"
)
type Device wgtypes.Device
func (d *Device) DumpEnv(wr io.Writer) {
var color, hideKeys bool
switch os.Getenv("WG_COLOR_MODE") {
case "always":
color = true
case "never":
color = false
case "auto":
fallthrough
default:
color = util.IsATTY()
}
switch os.Getenv("WG_HIDE_KEYS") {
case "never":
hideKeys = false
case "always":
fallthrough
default:
hideKeys = true
}
d.Dump(wr, color, hideKeys)
}
func (d *Device) Dump(wr io.Writer, color bool, hideKeys bool) {
var kv = map[string]interface{}{
"public key": d.PublicKey,
"private key": "(hidden)",
"listening port": d.ListenPort,
}
if !hideKeys {
kv["private key"] = d.PrivateKey
}
if d.FirewallMark > 0 {
kv["fwmark"] = fmt.Sprintf("%#x", d.FirewallMark)
}
t.FprintfColored(wr, color, t.Color("interface", t.Bold, t.FgGreen)+": "+t.Color("%s", t.FgGreen)+"\n", d.Name)
t.PrintKeyValues(wr, color, " ", kv)
// TODO: sort peer list
// https://github.com/WireGuard/wireguard-tools/blob/1fd95708391088742c139010cc6b821add941dec/src/show.c#L47
for _, peer := range d.Peers {
var kv = map[string]interface{}{
"allowed ips": "(none)",
}
if peer.Endpoint != nil {
kv["endpoint"] = peer.Endpoint
}
if peer.LastHandshakeTime.Second() > 0 {
kv["latest handshake"] = util.Ago(peer.LastHandshakeTime, color)
}
if len(peer.AllowedIPs) > 0 {
allowedIPs := []string{}
for _, allowedIP := range peer.AllowedIPs {
allowedIPs = append(allowedIPs, allowedIP.String())
}
kv["allowed ips"] = strings.Join(allowedIPs, ", ")
} else {
kv["allowed ips"] = "(none)"
}
if peer.ReceiveBytes > 0 || peer.TransmitBytes > 0 {
kv["transfer"] = fmt.Sprintf("%s received, %s sent\n",
util.PrettyBytes(peer.ReceiveBytes, color),
util.PrettyBytes(peer.TransmitBytes, color))
}
if peer.PersistentKeepaliveInterval > 0 {
kv["persistent keepalive"] = util.Every(peer.PersistentKeepaliveInterval, color)
}
fmt.Fprintln(wr)
t.FprintfColored(wr, color, t.Color("peer", t.Bold, t.FgYellow)+": "+t.Color("%s", t.FgYellow)+"\n", peer.PublicKey.String())
t.PrintKeyValues(wr, color, " ", kv)
}
}
|
[
"\"WG_COLOR_MODE\"",
"\"WG_HIDE_KEYS\""
] |
[] |
[
"WG_HIDE_KEYS",
"WG_COLOR_MODE"
] |
[]
|
["WG_HIDE_KEYS", "WG_COLOR_MODE"]
|
go
| 2 | 0 | |
daemon/daemon.go
|
// Package daemon exposes the functions that occur on the host server
// that the Docker daemon is running.
//
// In implementing the various functions of the daemon, there is often
// a method-specific struct for configuring the runtime behavior.
package daemon // import "github.com/docker/docker/daemon"
import (
"context"
"fmt"
"io/ioutil"
"math/rand"
"net"
"net/url"
"os"
"path"
"path/filepath"
"runtime"
"strings"
"sync"
"time"
"google.golang.org/grpc"
"github.com/containerd/containerd"
"github.com/containerd/containerd/defaults"
"github.com/containerd/containerd/pkg/dialer"
"github.com/containerd/containerd/remotes/docker"
"github.com/docker/distribution/reference"
"github.com/docker/docker/api/types"
containertypes "github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/swarm"
"github.com/docker/docker/builder"
"github.com/docker/docker/container"
"github.com/docker/docker/daemon/config"
"github.com/docker/docker/daemon/discovery"
"github.com/docker/docker/daemon/events"
"github.com/docker/docker/daemon/exec"
"github.com/docker/docker/daemon/images"
"github.com/docker/docker/daemon/logger"
"github.com/docker/docker/daemon/network"
"github.com/docker/docker/errdefs"
"github.com/moby/buildkit/util/resolver"
"github.com/moby/buildkit/util/tracing"
"github.com/sirupsen/logrus"
// register graph drivers
_ "github.com/docker/docker/daemon/graphdriver/register"
"github.com/docker/docker/daemon/stats"
dmetadata "github.com/docker/docker/distribution/metadata"
"github.com/docker/docker/dockerversion"
"github.com/docker/docker/image"
"github.com/docker/docker/layer"
"github.com/docker/docker/libcontainerd"
libcontainerdtypes "github.com/docker/docker/libcontainerd/types"
"github.com/docker/docker/pkg/idtools"
"github.com/docker/docker/pkg/locker"
"github.com/docker/docker/pkg/plugingetter"
"github.com/docker/docker/pkg/sysinfo"
"github.com/docker/docker/pkg/system"
"github.com/docker/docker/pkg/truncindex"
"github.com/docker/docker/plugin"
pluginexec "github.com/docker/docker/plugin/executor/containerd"
refstore "github.com/docker/docker/reference"
"github.com/docker/docker/registry"
"github.com/docker/docker/runconfig"
volumesservice "github.com/docker/docker/volume/service"
"github.com/docker/libnetwork"
"github.com/docker/libnetwork/cluster"
nwconfig "github.com/docker/libnetwork/config"
"github.com/pkg/errors"
"golang.org/x/sync/semaphore"
)
// ContainersNamespace is the name of the namespace used for users containers
const ContainersNamespace = "moby"
var (
errSystemNotSupported = errors.New("the Docker daemon is not supported on this platform")
)
// Daemon holds information about the Docker daemon.
type Daemon struct {
ID string
repository string
containers container.Store
containersReplica container.ViewDB
execCommands *exec.Store
imageService *images.ImageService
idIndex *truncindex.TruncIndex
configStore *config.Config
statsCollector *stats.Collector
defaultLogConfig containertypes.LogConfig
RegistryService registry.Service
EventsService *events.Events
netController libnetwork.NetworkController
volumes *volumesservice.VolumesService
discoveryWatcher discovery.Reloader
root string
seccompEnabled bool
apparmorEnabled bool
shutdown bool
idMapping *idtools.IdentityMapping
// TODO: move graphDrivers field to an InfoService
graphDrivers map[string]string // By operating system
PluginStore *plugin.Store // todo: remove
pluginManager *plugin.Manager
linkIndex *linkIndex
containerdCli *containerd.Client
containerd libcontainerdtypes.Client
defaultIsolation containertypes.Isolation // Default isolation mode on Windows
clusterProvider cluster.Provider
cluster Cluster
genericResources []swarm.GenericResource
metricsPluginListener net.Listener
machineMemory uint64
seccompProfile []byte
seccompProfilePath string
diskUsageRunning int32
pruneRunning int32
hosts map[string]bool // hosts stores the addresses the daemon is listening on
startupDone chan struct{}
attachmentStore network.AttachmentStore
attachableNetworkLock *locker.Locker
}
// StoreHosts stores the addresses the daemon is listening on
func (daemon *Daemon) StoreHosts(hosts []string) {
if daemon.hosts == nil {
daemon.hosts = make(map[string]bool)
}
for _, h := range hosts {
daemon.hosts[h] = true
}
}
// HasExperimental returns whether the experimental features of the daemon are enabled or not
func (daemon *Daemon) HasExperimental() bool {
return daemon.configStore != nil && daemon.configStore.Experimental
}
// Features returns the features map from configStore
func (daemon *Daemon) Features() *map[string]bool {
return &daemon.configStore.Features
}
// NewResolveOptionsFunc returns a call back function to resolve "registry-mirrors" and
// "insecure-registries" for buildkit
func (daemon *Daemon) NewResolveOptionsFunc() resolver.ResolveOptionsFunc {
return func(ref string) docker.ResolverOptions {
var (
registryKey = "docker.io"
mirrors = make([]string, len(daemon.configStore.Mirrors))
m = map[string]resolver.RegistryConf{}
)
// must trim "https://" or "http://" prefix
for i, v := range daemon.configStore.Mirrors {
if uri, err := url.Parse(v); err == nil {
v = uri.Host
}
mirrors[i] = v
}
// set "registry-mirrors"
m[registryKey] = resolver.RegistryConf{Mirrors: mirrors}
// set "insecure-registries"
for _, v := range daemon.configStore.InsecureRegistries {
if uri, err := url.Parse(v); err == nil {
v = uri.Host
}
m[v] = resolver.RegistryConf{
PlainHTTP: true,
}
}
def := docker.ResolverOptions{
Client: tracing.DefaultClient,
}
parsed, err := reference.ParseNormalizedNamed(ref)
if err != nil {
return def
}
host := reference.Domain(parsed)
c, ok := m[host]
if !ok {
return def
}
if len(c.Mirrors) > 0 {
def.Host = func(string) (string, error) {
return c.Mirrors[rand.Intn(len(c.Mirrors))], nil
}
}
def.PlainHTTP = c.PlainHTTP
return def
}
}
func (daemon *Daemon) restore() error {
var mapLock sync.Mutex
containers := make(map[string]*container.Container)
logrus.Info("Loading containers: start.")
dir, err := ioutil.ReadDir(daemon.repository)
if err != nil {
return err
}
// parallelLimit is the maximum number of parallel startup jobs that we
// allow (this is the limited used for all startup semaphores). The multipler
// (128) was chosen after some fairly significant benchmarking -- don't change
// it unless you've tested it significantly (this value is adjusted if
// RLIMIT_NOFILE is small to avoid EMFILE).
parallelLimit := adjustParallelLimit(len(dir), 128*runtime.NumCPU())
// Re-used for all parallel startup jobs.
var group sync.WaitGroup
sem := semaphore.NewWeighted(int64(parallelLimit))
for _, v := range dir {
group.Add(1)
go func(id string) {
defer group.Done()
_ = sem.Acquire(context.Background(), 1)
defer sem.Release(1)
container, err := daemon.load(id)
if err != nil {
logrus.Errorf("Failed to load container %v: %v", id, err)
return
}
if !system.IsOSSupported(container.OS) {
logrus.Errorf("Failed to load container %v: %s (%q)", id, system.ErrNotSupportedOperatingSystem, container.OS)
return
}
// Ignore the container if it does not support the current driver being used by the graph
currentDriverForContainerOS := daemon.graphDrivers[container.OS]
if (container.Driver == "" && currentDriverForContainerOS == "aufs") || container.Driver == currentDriverForContainerOS {
rwlayer, err := daemon.imageService.GetLayerByID(container.ID, container.OS)
if err != nil {
logrus.Errorf("Failed to load container mount %v: %v", id, err)
return
}
container.RWLayer = rwlayer
logrus.Debugf("Loaded container %v, isRunning: %v", container.ID, container.IsRunning())
mapLock.Lock()
containers[container.ID] = container
mapLock.Unlock()
} else {
logrus.Debugf("Cannot load container %s because it was created with another graph driver.", container.ID)
}
}(v.Name())
}
group.Wait()
removeContainers := make(map[string]*container.Container)
restartContainers := make(map[*container.Container]chan struct{})
activeSandboxes := make(map[string]interface{})
for _, c := range containers {
group.Add(1)
go func(c *container.Container) {
defer group.Done()
_ = sem.Acquire(context.Background(), 1)
defer sem.Release(1)
if err := daemon.registerName(c); err != nil {
logrus.Errorf("Failed to register container name %s: %s", c.ID, err)
mapLock.Lock()
delete(containers, c.ID)
mapLock.Unlock()
return
}
if err := daemon.Register(c); err != nil {
logrus.Errorf("Failed to register container %s: %s", c.ID, err)
mapLock.Lock()
delete(containers, c.ID)
mapLock.Unlock()
return
}
// The LogConfig.Type is empty if the container was created before docker 1.12 with default log driver.
// We should rewrite it to use the daemon defaults.
// Fixes https://github.com/docker/docker/issues/22536
if c.HostConfig.LogConfig.Type == "" {
if err := daemon.mergeAndVerifyLogConfig(&c.HostConfig.LogConfig); err != nil {
logrus.Errorf("Failed to verify log config for container %s: %q", c.ID, err)
}
}
}(c)
}
group.Wait()
for _, c := range containers {
group.Add(1)
go func(c *container.Container) {
defer group.Done()
_ = sem.Acquire(context.Background(), 1)
defer sem.Release(1)
daemon.backportMountSpec(c)
if err := daemon.checkpointAndSave(c); err != nil {
logrus.WithError(err).WithField("container", c.ID).Error("error saving backported mountspec to disk")
}
daemon.setStateCounter(c)
logrus.WithFields(logrus.Fields{
"container": c.ID,
"running": c.IsRunning(),
"paused": c.IsPaused(),
}).Debug("restoring container")
var (
err error
alive bool
ec uint32
exitedAt time.Time
process libcontainerdtypes.Process
)
alive, _, process, err = daemon.containerd.Restore(context.Background(), c.ID, c.InitializeStdio)
if err != nil && !errdefs.IsNotFound(err) {
logrus.Errorf("Failed to restore container %s with containerd: %s", c.ID, err)
return
}
if !alive && process != nil {
ec, exitedAt, err = process.Delete(context.Background())
if err != nil && !errdefs.IsNotFound(err) {
logrus.WithError(err).Errorf("Failed to delete container %s from containerd", c.ID)
return
}
} else if !daemon.configStore.LiveRestoreEnabled {
if err := daemon.kill(c, c.StopSignal()); err != nil && !errdefs.IsNotFound(err) {
logrus.WithError(err).WithField("container", c.ID).Error("error shutting down container")
return
}
}
if c.IsRunning() || c.IsPaused() {
c.RestartManager().Cancel() // manually start containers because some need to wait for swarm networking
if c.IsPaused() && alive {
s, err := daemon.containerd.Status(context.Background(), c.ID)
if err != nil {
logrus.WithError(err).WithField("container", c.ID).
Errorf("Failed to get container status")
} else {
logrus.WithField("container", c.ID).WithField("state", s).
Info("restored container paused")
switch s {
case containerd.Paused, containerd.Pausing:
// nothing to do
case containerd.Stopped:
alive = false
case containerd.Unknown:
logrus.WithField("container", c.ID).
Error("Unknown status for container during restore")
default:
// running
c.Lock()
c.Paused = false
daemon.setStateCounter(c)
if err := c.CheckpointTo(daemon.containersReplica); err != nil {
logrus.WithError(err).WithField("container", c.ID).
Error("Failed to update stopped container state")
}
c.Unlock()
}
}
}
if !alive {
c.Lock()
c.SetStopped(&container.ExitStatus{ExitCode: int(ec), ExitedAt: exitedAt})
daemon.Cleanup(c)
if err := c.CheckpointTo(daemon.containersReplica); err != nil {
logrus.Errorf("Failed to update stopped container %s state: %v", c.ID, err)
}
c.Unlock()
}
// we call Mount and then Unmount to get BaseFs of the container
if err := daemon.Mount(c); err != nil {
// The mount is unlikely to fail. However, in case mount fails
// the container should be allowed to restore here. Some functionalities
// (like docker exec -u user) might be missing but container is able to be
// stopped/restarted/removed.
// See #29365 for related information.
// The error is only logged here.
logrus.Warnf("Failed to mount container on getting BaseFs path %v: %v", c.ID, err)
} else {
if err := daemon.Unmount(c); err != nil {
logrus.Warnf("Failed to umount container on getting BaseFs path %v: %v", c.ID, err)
}
}
c.ResetRestartManager(false)
if !c.HostConfig.NetworkMode.IsContainer() && c.IsRunning() {
options, err := daemon.buildSandboxOptions(c)
if err != nil {
logrus.Warnf("Failed build sandbox option to restore container %s: %v", c.ID, err)
}
mapLock.Lock()
activeSandboxes[c.NetworkSettings.SandboxID] = options
mapLock.Unlock()
}
}
// get list of containers we need to restart
// Do not autostart containers which
// has endpoints in a swarm scope
// network yet since the cluster is
// not initialized yet. We will start
// it after the cluster is
// initialized.
if daemon.configStore.AutoRestart && c.ShouldRestart() && !c.NetworkSettings.HasSwarmEndpoint && c.HasBeenStartedBefore {
mapLock.Lock()
restartContainers[c] = make(chan struct{})
mapLock.Unlock()
} else if c.HostConfig != nil && c.HostConfig.AutoRemove {
mapLock.Lock()
removeContainers[c.ID] = c
mapLock.Unlock()
}
c.Lock()
if c.RemovalInProgress {
// We probably crashed in the middle of a removal, reset
// the flag.
//
// We DO NOT remove the container here as we do not
// know if the user had requested for either the
// associated volumes, network links or both to also
// be removed. So we put the container in the "dead"
// state and leave further processing up to them.
logrus.Debugf("Resetting RemovalInProgress flag from %v", c.ID)
c.RemovalInProgress = false
c.Dead = true
if err := c.CheckpointTo(daemon.containersReplica); err != nil {
logrus.Errorf("Failed to update RemovalInProgress container %s state: %v", c.ID, err)
}
}
c.Unlock()
}(c)
}
group.Wait()
daemon.netController, err = daemon.initNetworkController(daemon.configStore, activeSandboxes)
if err != nil {
return fmt.Errorf("Error initializing network controller: %v", err)
}
// Now that all the containers are registered, register the links
for _, c := range containers {
group.Add(1)
go func(c *container.Container) {
_ = sem.Acquire(context.Background(), 1)
if err := daemon.registerLinks(c, c.HostConfig); err != nil {
logrus.Errorf("failed to register link for container %s: %v", c.ID, err)
}
sem.Release(1)
group.Done()
}(c)
}
group.Wait()
for c, notifier := range restartContainers {
group.Add(1)
go func(c *container.Container, chNotify chan struct{}) {
_ = sem.Acquire(context.Background(), 1)
logrus.Debugf("Starting container %s", c.ID)
// ignore errors here as this is a best effort to wait for children to be
// running before we try to start the container
children := daemon.children(c)
timeout := time.After(5 * time.Second)
for _, child := range children {
if notifier, exists := restartContainers[child]; exists {
select {
case <-notifier:
case <-timeout:
}
}
}
// Make sure networks are available before starting
daemon.waitForNetworks(c)
if err := daemon.containerStart(c, "", "", true); err != nil {
logrus.Errorf("Failed to start container %s: %s", c.ID, err)
}
close(chNotify)
sem.Release(1)
group.Done()
}(c, notifier)
}
group.Wait()
for id := range removeContainers {
group.Add(1)
go func(cid string) {
_ = sem.Acquire(context.Background(), 1)
if err := daemon.ContainerRm(cid, &types.ContainerRmConfig{ForceRemove: true, RemoveVolume: true}); err != nil {
logrus.Errorf("Failed to remove container %s: %s", cid, err)
}
sem.Release(1)
group.Done()
}(id)
}
group.Wait()
// any containers that were started above would already have had this done,
// however we need to now prepare the mountpoints for the rest of the containers as well.
// This shouldn't cause any issue running on the containers that already had this run.
// This must be run after any containers with a restart policy so that containerized plugins
// can have a chance to be running before we try to initialize them.
for _, c := range containers {
// if the container has restart policy, do not
// prepare the mountpoints since it has been done on restarting.
// This is to speed up the daemon start when a restart container
// has a volume and the volume driver is not available.
if _, ok := restartContainers[c]; ok {
continue
} else if _, ok := removeContainers[c.ID]; ok {
// container is automatically removed, skip it.
continue
}
group.Add(1)
go func(c *container.Container) {
_ = sem.Acquire(context.Background(), 1)
if err := daemon.prepareMountPoints(c); err != nil {
logrus.Error(err)
}
sem.Release(1)
group.Done()
}(c)
}
group.Wait()
logrus.Info("Loading containers: done.")
return nil
}
// RestartSwarmContainers restarts any autostart container which has a
// swarm endpoint.
func (daemon *Daemon) RestartSwarmContainers() {
ctx := context.Background()
// parallelLimit is the maximum number of parallel startup jobs that we
// allow (this is the limited used for all startup semaphores). The multipler
// (128) was chosen after some fairly significant benchmarking -- don't change
// it unless you've tested it significantly (this value is adjusted if
// RLIMIT_NOFILE is small to avoid EMFILE).
parallelLimit := adjustParallelLimit(len(daemon.List()), 128*runtime.NumCPU())
var group sync.WaitGroup
sem := semaphore.NewWeighted(int64(parallelLimit))
for _, c := range daemon.List() {
if !c.IsRunning() && !c.IsPaused() {
// Autostart all the containers which has a
// swarm endpoint now that the cluster is
// initialized.
if daemon.configStore.AutoRestart && c.ShouldRestart() && c.NetworkSettings.HasSwarmEndpoint && c.HasBeenStartedBefore {
group.Add(1)
go func(c *container.Container) {
if err := sem.Acquire(ctx, 1); err != nil {
// ctx is done.
group.Done()
return
}
if err := daemon.containerStart(c, "", "", true); err != nil {
logrus.Error(err)
}
sem.Release(1)
group.Done()
}(c)
}
}
}
group.Wait()
}
// waitForNetworks is used during daemon initialization when starting up containers
// It ensures that all of a container's networks are available before the daemon tries to start the container.
// In practice it just makes sure the discovery service is available for containers which use a network that require discovery.
func (daemon *Daemon) waitForNetworks(c *container.Container) {
if daemon.discoveryWatcher == nil {
return
}
// Make sure if the container has a network that requires discovery that the discovery service is available before starting
for netName := range c.NetworkSettings.Networks {
// If we get `ErrNoSuchNetwork` here, we can assume that it is due to discovery not being ready
// Most likely this is because the K/V store used for discovery is in a container and needs to be started
if _, err := daemon.netController.NetworkByName(netName); err != nil {
if _, ok := err.(libnetwork.ErrNoSuchNetwork); !ok {
continue
}
// use a longish timeout here due to some slowdowns in libnetwork if the k/v store is on anything other than --net=host
// FIXME: why is this slow???
logrus.Debugf("Container %s waiting for network to be ready", c.Name)
select {
case <-daemon.discoveryWatcher.ReadyCh():
case <-time.After(60 * time.Second):
}
return
}
}
}
func (daemon *Daemon) children(c *container.Container) map[string]*container.Container {
return daemon.linkIndex.children(c)
}
// parents returns the names of the parent containers of the container
// with the given name.
func (daemon *Daemon) parents(c *container.Container) map[string]*container.Container {
return daemon.linkIndex.parents(c)
}
func (daemon *Daemon) registerLink(parent, child *container.Container, alias string) error {
fullName := path.Join(parent.Name, alias)
if err := daemon.containersReplica.ReserveName(fullName, child.ID); err != nil {
if err == container.ErrNameReserved {
logrus.Warnf("error registering link for %s, to %s, as alias %s, ignoring: %v", parent.ID, child.ID, alias, err)
return nil
}
return err
}
daemon.linkIndex.link(parent, child, fullName)
return nil
}
// DaemonJoinsCluster informs the daemon has joined the cluster and provides
// the handler to query the cluster component
func (daemon *Daemon) DaemonJoinsCluster(clusterProvider cluster.Provider) {
daemon.setClusterProvider(clusterProvider)
}
// DaemonLeavesCluster informs the daemon has left the cluster
func (daemon *Daemon) DaemonLeavesCluster() {
// Daemon is in charge of removing the attachable networks with
// connected containers when the node leaves the swarm
daemon.clearAttachableNetworks()
// We no longer need the cluster provider, stop it now so that
// the network agent will stop listening to cluster events.
daemon.setClusterProvider(nil)
// Wait for the networking cluster agent to stop
daemon.netController.AgentStopWait()
// Daemon is in charge of removing the ingress network when the
// node leaves the swarm. Wait for job to be done or timeout.
// This is called also on graceful daemon shutdown. We need to
// wait, because the ingress release has to happen before the
// network controller is stopped.
if done, err := daemon.ReleaseIngress(); err == nil {
select {
case <-done:
case <-time.After(5 * time.Second):
logrus.Warn("timeout while waiting for ingress network removal")
}
} else {
logrus.Warnf("failed to initiate ingress network removal: %v", err)
}
daemon.attachmentStore.ClearAttachments()
}
// setClusterProvider sets a component for querying the current cluster state.
func (daemon *Daemon) setClusterProvider(clusterProvider cluster.Provider) {
daemon.clusterProvider = clusterProvider
daemon.netController.SetClusterProvider(clusterProvider)
daemon.attachableNetworkLock = locker.New()
}
// IsSwarmCompatible verifies if the current daemon
// configuration is compatible with the swarm mode
func (daemon *Daemon) IsSwarmCompatible() error {
if daemon.configStore == nil {
return nil
}
return daemon.configStore.IsSwarmCompatible()
}
// NewDaemon sets up everything for the daemon to be able to service
// requests from the webserver.
func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.Store) (daemon *Daemon, err error) {
setDefaultMtu(config)
registryService, err := registry.NewService(config.ServiceOptions)
if err != nil {
return nil, err
}
// Ensure that we have a correct root key limit for launching containers.
if err := ModifyRootKeyLimit(); err != nil {
logrus.Warnf("unable to modify root key limit, number of containers could be limited by this quota: %v", err)
}
// Ensure we have compatible and valid configuration options
if err := verifyDaemonSettings(config); err != nil {
return nil, err
}
// Do we have a disabled network?
config.DisableBridge = isBridgeNetworkDisabled(config)
// Setup the resolv.conf
setupResolvConf(config)
// Verify the platform is supported as a daemon
if !platformSupported {
return nil, errSystemNotSupported
}
// Validate platform-specific requirements
if err := checkSystem(); err != nil {
return nil, err
}
idMapping, err := setupRemappedRoot(config)
if err != nil {
return nil, err
}
rootIDs := idMapping.RootPair()
if err := setupDaemonProcess(config); err != nil {
return nil, err
}
// set up the tmpDir to use a canonical path
tmp, err := prepareTempDir(config.Root, rootIDs)
if err != nil {
return nil, fmt.Errorf("Unable to get the TempDir under %s: %s", config.Root, err)
}
realTmp, err := getRealPath(tmp)
if err != nil {
return nil, fmt.Errorf("Unable to get the full path to the TempDir (%s): %s", tmp, err)
}
if runtime.GOOS == "windows" {
if _, err := os.Stat(realTmp); err != nil && os.IsNotExist(err) {
if err := system.MkdirAll(realTmp, 0700, ""); err != nil {
return nil, fmt.Errorf("Unable to create the TempDir (%s): %s", realTmp, err)
}
}
os.Setenv("TEMP", realTmp)
os.Setenv("TMP", realTmp)
} else {
os.Setenv("TMPDIR", realTmp)
}
d := &Daemon{
configStore: config,
PluginStore: pluginStore,
startupDone: make(chan struct{}),
}
// Ensure the daemon is properly shutdown if there is a failure during
// initialization
defer func() {
if err != nil {
if err := d.Shutdown(); err != nil {
logrus.Error(err)
}
}
}()
if err := d.setGenericResources(config); err != nil {
return nil, err
}
// set up SIGUSR1 handler on Unix-like systems, or a Win32 global event
// on Windows to dump Go routine stacks
stackDumpDir := config.Root
if execRoot := config.GetExecRoot(); execRoot != "" {
stackDumpDir = execRoot
}
d.setupDumpStackTrap(stackDumpDir)
if err := d.setupSeccompProfile(); err != nil {
return nil, err
}
// Set the default isolation mode (only applicable on Windows)
if err := d.setDefaultIsolation(); err != nil {
return nil, fmt.Errorf("error setting default isolation mode: %v", err)
}
if err := configureMaxThreads(config); err != nil {
logrus.Warnf("Failed to configure golang's threads limit: %v", err)
}
// ensureDefaultAppArmorProfile does nothing if apparmor is disabled
if err := ensureDefaultAppArmorProfile(); err != nil {
logrus.Errorf(err.Error())
}
daemonRepo := filepath.Join(config.Root, "containers")
if err := idtools.MkdirAllAndChown(daemonRepo, 0700, rootIDs); err != nil {
return nil, err
}
// Create the directory where we'll store the runtime scripts (i.e. in
// order to support runtimeArgs)
daemonRuntimes := filepath.Join(config.Root, "runtimes")
if err := system.MkdirAll(daemonRuntimes, 0700, ""); err != nil {
return nil, err
}
if err := d.loadRuntimes(); err != nil {
return nil, err
}
if runtime.GOOS == "windows" {
if err := system.MkdirAll(filepath.Join(config.Root, "credentialspecs"), 0, ""); err != nil {
return nil, err
}
}
// On Windows we don't support the environment variable, or a user supplied graphdriver
// as Windows has no choice in terms of which graphdrivers to use. It's a case of
// running Windows containers on Windows - windowsfilter, running Linux containers on Windows,
// lcow. Unix platforms however run a single graphdriver for all containers, and it can
// be set through an environment variable, a daemon start parameter, or chosen through
// initialization of the layerstore through driver priority order for example.
d.graphDrivers = make(map[string]string)
layerStores := make(map[string]layer.Store)
if runtime.GOOS == "windows" {
d.graphDrivers[runtime.GOOS] = "windowsfilter"
if system.LCOWSupported() {
d.graphDrivers["linux"] = "lcow"
}
} else {
driverName := os.Getenv("DOCKER_DRIVER")
if driverName == "" {
driverName = config.GraphDriver
} else {
logrus.Infof("Setting the storage driver from the $DOCKER_DRIVER environment variable (%s)", driverName)
}
d.graphDrivers[runtime.GOOS] = driverName // May still be empty. Layerstore init determines instead.
}
d.RegistryService = registryService
logger.RegisterPluginGetter(d.PluginStore)
metricsSockPath, err := d.listenMetricsSock()
if err != nil {
return nil, err
}
registerMetricsPluginCallback(d.PluginStore, metricsSockPath)
gopts := []grpc.DialOption{
// WithBlock makes sure that the following containerd request
// is reliable.
//
// NOTE: In one edge case with high load pressure, kernel kills
// dockerd, containerd and containerd-shims caused by OOM.
// When both dockerd and containerd restart, but containerd
// will take time to recover all the existing containers. Before
// containerd serving, dockerd will failed with gRPC error.
// That bad thing is that restore action will still ignore the
// any non-NotFound errors and returns running state for
// already stopped container. It is unexpected behavior. And
// we need to restart dockerd to make sure that anything is OK.
//
// It is painful. Add WithBlock can prevent the edge case. And
// n common case, the containerd will be serving in shortly.
// It is not harm to add WithBlock for containerd connection.
grpc.WithBlock(),
grpc.WithInsecure(),
grpc.WithBackoffMaxDelay(3 * time.Second),
grpc.WithDialer(dialer.Dialer),
// TODO(stevvooe): We may need to allow configuration of this on the client.
grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(defaults.DefaultMaxRecvMsgSize)),
grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(defaults.DefaultMaxSendMsgSize)),
}
if config.ContainerdAddr != "" {
d.containerdCli, err = containerd.New(config.ContainerdAddr, containerd.WithDefaultNamespace(config.ContainerdNamespace), containerd.WithDialOpts(gopts), containerd.WithTimeout(60*time.Second))
if err != nil {
return nil, errors.Wrapf(err, "failed to dial %q", config.ContainerdAddr)
}
}
createPluginExec := func(m *plugin.Manager) (plugin.Executor, error) {
var pluginCli *containerd.Client
// Windows is not currently using containerd, keep the
// client as nil
if config.ContainerdAddr != "" {
pluginCli, err = containerd.New(config.ContainerdAddr, containerd.WithDefaultNamespace(config.ContainerdPluginNamespace), containerd.WithDialOpts(gopts), containerd.WithTimeout(60*time.Second))
if err != nil {
return nil, errors.Wrapf(err, "failed to dial %q", config.ContainerdAddr)
}
}
return pluginexec.New(ctx, getPluginExecRoot(config.Root), pluginCli, config.ContainerdPluginNamespace, m)
}
// Plugin system initialization should happen before restore. Do not change order.
d.pluginManager, err = plugin.NewManager(plugin.ManagerConfig{
Root: filepath.Join(config.Root, "plugins"),
ExecRoot: getPluginExecRoot(config.Root),
Store: d.PluginStore,
CreateExecutor: createPluginExec,
RegistryService: registryService,
LiveRestoreEnabled: config.LiveRestoreEnabled,
LogPluginEvent: d.LogPluginEvent, // todo: make private
AuthzMiddleware: config.AuthzMiddleware,
})
if err != nil {
return nil, errors.Wrap(err, "couldn't create plugin manager")
}
if err := d.setupDefaultLogConfig(); err != nil {
return nil, err
}
for operatingSystem, gd := range d.graphDrivers {
layerStores[operatingSystem], err = layer.NewStoreFromOptions(layer.StoreOptions{
Root: config.Root,
MetadataStorePathTemplate: filepath.Join(config.Root, "image", "%s", "layerdb"),
GraphDriver: gd,
GraphDriverOptions: config.GraphOptions,
IDMapping: idMapping,
PluginGetter: d.PluginStore,
ExperimentalEnabled: config.Experimental,
OS: operatingSystem,
})
if err != nil {
return nil, err
}
// As layerstore initialization may set the driver
d.graphDrivers[operatingSystem] = layerStores[operatingSystem].DriverName()
}
// Configure and validate the kernels security support. Note this is a Linux/FreeBSD
// operation only, so it is safe to pass *just* the runtime OS graphdriver.
if err := configureKernelSecuritySupport(config, d.graphDrivers[runtime.GOOS]); err != nil {
return nil, err
}
imageRoot := filepath.Join(config.Root, "image", d.graphDrivers[runtime.GOOS])
ifs, err := image.NewFSStoreBackend(filepath.Join(imageRoot, "imagedb"))
if err != nil {
return nil, err
}
lgrMap := make(map[string]image.LayerGetReleaser)
for os, ls := range layerStores {
lgrMap[os] = ls
}
imageStore, err := image.NewImageStore(ifs, lgrMap)
if err != nil {
return nil, err
}
d.volumes, err = volumesservice.NewVolumeService(config.Root, d.PluginStore, rootIDs, d)
if err != nil {
return nil, err
}
trustKey, err := loadOrCreateTrustKey(config.TrustKeyPath)
if err != nil {
return nil, err
}
trustDir := filepath.Join(config.Root, "trust")
if err := system.MkdirAll(trustDir, 0700, ""); err != nil {
return nil, err
}
// We have a single tag/reference store for the daemon globally. However, it's
// stored under the graphdriver. On host platforms which only support a single
// container OS, but multiple selectable graphdrivers, this means depending on which
// graphdriver is chosen, the global reference store is under there. For
// platforms which support multiple container operating systems, this is slightly
// more problematic as where does the global ref store get located? Fortunately,
// for Windows, which is currently the only daemon supporting multiple container
// operating systems, the list of graphdrivers available isn't user configurable.
// For backwards compatibility, we just put it under the windowsfilter
// directory regardless.
refStoreLocation := filepath.Join(imageRoot, `repositories.json`)
rs, err := refstore.NewReferenceStore(refStoreLocation)
if err != nil {
return nil, fmt.Errorf("Couldn't create reference store repository: %s", err)
}
distributionMetadataStore, err := dmetadata.NewFSMetadataStore(filepath.Join(imageRoot, "distribution"))
if err != nil {
return nil, err
}
// Discovery is only enabled when the daemon is launched with an address to advertise. When
// initialized, the daemon is registered and we can store the discovery backend as it's read-only
if err := d.initDiscovery(config); err != nil {
return nil, err
}
sysInfo := sysinfo.New(false)
// Check if Devices cgroup is mounted, it is hard requirement for container security,
// on Linux.
if runtime.GOOS == "linux" && !sysInfo.CgroupDevicesEnabled {
return nil, errors.New("Devices cgroup isn't mounted")
}
d.ID = trustKey.PublicKey().KeyID()
d.repository = daemonRepo
d.containers = container.NewMemoryStore()
if d.containersReplica, err = container.NewViewDB(); err != nil {
return nil, err
}
d.execCommands = exec.NewStore()
d.idIndex = truncindex.NewTruncIndex([]string{})
d.statsCollector = d.newStatsCollector(1 * time.Second)
d.EventsService = events.New()
d.root = config.Root
d.idMapping = idMapping
d.seccompEnabled = sysInfo.Seccomp
d.apparmorEnabled = sysInfo.AppArmor
d.linkIndex = newLinkIndex()
// TODO: imageStore, distributionMetadataStore, and ReferenceStore are only
// used above to run migration. They could be initialized in ImageService
// if migration is called from daemon/images. layerStore might move as well.
d.imageService = images.NewImageService(images.ImageServiceConfig{
ContainerStore: d.containers,
DistributionMetadataStore: distributionMetadataStore,
EventsService: d.EventsService,
ImageStore: imageStore,
LayerStores: layerStores,
MaxConcurrentDownloads: *config.MaxConcurrentDownloads,
MaxConcurrentUploads: *config.MaxConcurrentUploads,
ReferenceStore: rs,
RegistryService: registryService,
TrustKey: trustKey,
})
go d.execCommandGC()
d.containerd, err = libcontainerd.NewClient(ctx, d.containerdCli, filepath.Join(config.ExecRoot, "containerd"), config.ContainerdNamespace, d)
if err != nil {
return nil, err
}
if err := d.restore(); err != nil {
return nil, err
}
close(d.startupDone)
// FIXME: this method never returns an error
info, _ := d.SystemInfo()
engineInfo.WithValues(
dockerversion.Version,
dockerversion.GitCommit,
info.Architecture,
info.Driver,
info.KernelVersion,
info.OperatingSystem,
info.OSType,
info.ID,
).Set(1)
engineCpus.Set(float64(info.NCPU))
engineMemory.Set(float64(info.MemTotal))
gd := ""
for os, driver := range d.graphDrivers {
if len(gd) > 0 {
gd += ", "
}
gd += driver
if len(d.graphDrivers) > 1 {
gd = fmt.Sprintf("%s (%s)", gd, os)
}
}
logrus.WithFields(logrus.Fields{
"version": dockerversion.Version,
"commit": dockerversion.GitCommit,
"graphdriver(s)": gd,
}).Info("Docker daemon")
return d, nil
}
// DistributionServices returns services controlling daemon storage
func (daemon *Daemon) DistributionServices() images.DistributionServices {
return daemon.imageService.DistributionServices()
}
func (daemon *Daemon) waitForStartupDone() {
<-daemon.startupDone
}
func (daemon *Daemon) shutdownContainer(c *container.Container) error {
stopTimeout := c.StopTimeout()
// If container failed to exit in stopTimeout seconds of SIGTERM, then using the force
if err := daemon.containerStop(c, stopTimeout); err != nil {
return fmt.Errorf("Failed to stop container %s with error: %v", c.ID, err)
}
// Wait without timeout for the container to exit.
// Ignore the result.
<-c.Wait(context.Background(), container.WaitConditionNotRunning)
return nil
}
// ShutdownTimeout returns the timeout (in seconds) before containers are forcibly
// killed during shutdown. The default timeout can be configured both on the daemon
// and per container, and the longest timeout will be used. A grace-period of
// 5 seconds is added to the configured timeout.
//
// A negative (-1) timeout means "indefinitely", which means that containers
// are not forcibly killed, and the daemon shuts down after all containers exit.
func (daemon *Daemon) ShutdownTimeout() int {
shutdownTimeout := daemon.configStore.ShutdownTimeout
if shutdownTimeout < 0 {
return -1
}
if daemon.containers == nil {
return shutdownTimeout
}
graceTimeout := 5
for _, c := range daemon.containers.List() {
stopTimeout := c.StopTimeout()
if stopTimeout < 0 {
return -1
}
if stopTimeout+graceTimeout > shutdownTimeout {
shutdownTimeout = stopTimeout + graceTimeout
}
}
return shutdownTimeout
}
// Shutdown stops the daemon.
func (daemon *Daemon) Shutdown() error {
daemon.shutdown = true
// Keep mounts and networking running on daemon shutdown if
// we are to keep containers running and restore them.
if daemon.configStore.LiveRestoreEnabled && daemon.containers != nil {
// check if there are any running containers, if none we should do some cleanup
if ls, err := daemon.Containers(&types.ContainerListOptions{}); len(ls) != 0 || err != nil {
// metrics plugins still need some cleanup
daemon.cleanupMetricsPlugins()
return nil
}
}
if daemon.containers != nil {
logrus.Debugf("daemon configured with a %d seconds minimum shutdown timeout", daemon.configStore.ShutdownTimeout)
logrus.Debugf("start clean shutdown of all containers with a %d seconds timeout...", daemon.ShutdownTimeout())
daemon.containers.ApplyAll(func(c *container.Container) {
if !c.IsRunning() {
return
}
logrus.Debugf("stopping %s", c.ID)
if err := daemon.shutdownContainer(c); err != nil {
logrus.Errorf("Stop container error: %v", err)
return
}
if mountid, err := daemon.imageService.GetLayerMountID(c.ID, c.OS); err == nil {
daemon.cleanupMountsByID(mountid)
}
logrus.Debugf("container stopped %s", c.ID)
})
}
if daemon.volumes != nil {
if err := daemon.volumes.Shutdown(); err != nil {
logrus.Errorf("Error shutting down volume store: %v", err)
}
}
if daemon.imageService != nil {
daemon.imageService.Cleanup()
}
// If we are part of a cluster, clean up cluster's stuff
if daemon.clusterProvider != nil {
logrus.Debugf("start clean shutdown of cluster resources...")
daemon.DaemonLeavesCluster()
}
daemon.cleanupMetricsPlugins()
// Shutdown plugins after containers and layerstore. Don't change the order.
daemon.pluginShutdown()
// trigger libnetwork Stop only if it's initialized
if daemon.netController != nil {
daemon.netController.Stop()
}
if daemon.containerdCli != nil {
daemon.containerdCli.Close()
}
return daemon.cleanupMounts()
}
// Mount sets container.BaseFS
// (is it not set coming in? why is it unset?)
func (daemon *Daemon) Mount(container *container.Container) error {
if container.RWLayer == nil {
return errors.New("RWLayer of container " + container.ID + " is unexpectedly nil")
}
dir, err := container.RWLayer.Mount(container.GetMountLabel())
if err != nil {
return err
}
logrus.Debugf("container mounted via layerStore: %v", dir)
if container.BaseFS != nil && container.BaseFS.Path() != dir.Path() {
// The mount path reported by the graph driver should always be trusted on Windows, since the
// volume path for a given mounted layer may change over time. This should only be an error
// on non-Windows operating systems.
if runtime.GOOS != "windows" {
daemon.Unmount(container)
return fmt.Errorf("Error: driver %s is returning inconsistent paths for container %s ('%s' then '%s')",
daemon.imageService.GraphDriverForOS(container.OS), container.ID, container.BaseFS, dir)
}
}
container.BaseFS = dir // TODO: combine these fields
return nil
}
// Unmount unsets the container base filesystem
func (daemon *Daemon) Unmount(container *container.Container) error {
if container.RWLayer == nil {
return errors.New("RWLayer of container " + container.ID + " is unexpectedly nil")
}
if err := container.RWLayer.Unmount(); err != nil {
logrus.Errorf("Error unmounting container %s: %s", container.ID, err)
return err
}
return nil
}
// Subnets return the IPv4 and IPv6 subnets of networks that are manager by Docker.
func (daemon *Daemon) Subnets() ([]net.IPNet, []net.IPNet) {
var v4Subnets []net.IPNet
var v6Subnets []net.IPNet
managedNetworks := daemon.netController.Networks()
for _, managedNetwork := range managedNetworks {
v4infos, v6infos := managedNetwork.Info().IpamInfo()
for _, info := range v4infos {
if info.IPAMData.Pool != nil {
v4Subnets = append(v4Subnets, *info.IPAMData.Pool)
}
}
for _, info := range v6infos {
if info.IPAMData.Pool != nil {
v6Subnets = append(v6Subnets, *info.IPAMData.Pool)
}
}
}
return v4Subnets, v6Subnets
}
// prepareTempDir prepares and returns the default directory to use
// for temporary files.
// If it doesn't exist, it is created. If it exists, its content is removed.
func prepareTempDir(rootDir string, rootIdentity idtools.Identity) (string, error) {
var tmpDir string
if tmpDir = os.Getenv("DOCKER_TMPDIR"); tmpDir == "" {
tmpDir = filepath.Join(rootDir, "tmp")
newName := tmpDir + "-old"
if err := os.Rename(tmpDir, newName); err == nil {
go func() {
if err := os.RemoveAll(newName); err != nil {
logrus.Warnf("failed to delete old tmp directory: %s", newName)
}
}()
} else if !os.IsNotExist(err) {
logrus.Warnf("failed to rename %s for background deletion: %s. Deleting synchronously", tmpDir, err)
if err := os.RemoveAll(tmpDir); err != nil {
logrus.Warnf("failed to delete old tmp directory: %s", tmpDir)
}
}
}
// We don't remove the content of tmpdir if it's not the default,
// it may hold things that do not belong to us.
return tmpDir, idtools.MkdirAllAndChown(tmpDir, 0700, rootIdentity)
}
func (daemon *Daemon) setGenericResources(conf *config.Config) error {
genericResources, err := config.ParseGenericResources(conf.NodeGenericResources)
if err != nil {
return err
}
daemon.genericResources = genericResources
return nil
}
func setDefaultMtu(conf *config.Config) {
// do nothing if the config does not have the default 0 value.
if conf.Mtu != 0 {
return
}
conf.Mtu = config.DefaultNetworkMtu
}
// IsShuttingDown tells whether the daemon is shutting down or not
func (daemon *Daemon) IsShuttingDown() bool {
return daemon.shutdown
}
// initDiscovery initializes the discovery watcher for this daemon.
func (daemon *Daemon) initDiscovery(conf *config.Config) error {
advertise, err := config.ParseClusterAdvertiseSettings(conf.ClusterStore, conf.ClusterAdvertise)
if err != nil {
if err == discovery.ErrDiscoveryDisabled {
return nil
}
return err
}
conf.ClusterAdvertise = advertise
discoveryWatcher, err := discovery.Init(conf.ClusterStore, conf.ClusterAdvertise, conf.ClusterOpts)
if err != nil {
return fmt.Errorf("discovery initialization failed (%v)", err)
}
daemon.discoveryWatcher = discoveryWatcher
return nil
}
func isBridgeNetworkDisabled(conf *config.Config) bool {
return conf.BridgeConfig.Iface == config.DisableNetworkBridge
}
func (daemon *Daemon) networkOptions(dconfig *config.Config, pg plugingetter.PluginGetter, activeSandboxes map[string]interface{}) ([]nwconfig.Option, error) {
options := []nwconfig.Option{}
if dconfig == nil {
return options, nil
}
options = append(options, nwconfig.OptionExperimental(dconfig.Experimental))
options = append(options, nwconfig.OptionDataDir(dconfig.Root))
options = append(options, nwconfig.OptionExecRoot(dconfig.GetExecRoot()))
dd := runconfig.DefaultDaemonNetworkMode()
dn := runconfig.DefaultDaemonNetworkMode().NetworkName()
options = append(options, nwconfig.OptionDefaultDriver(string(dd)))
options = append(options, nwconfig.OptionDefaultNetwork(dn))
if strings.TrimSpace(dconfig.ClusterStore) != "" {
kv := strings.Split(dconfig.ClusterStore, "://")
if len(kv) != 2 {
return nil, errors.New("kv store daemon config must be of the form KV-PROVIDER://KV-URL")
}
options = append(options, nwconfig.OptionKVProvider(kv[0]))
options = append(options, nwconfig.OptionKVProviderURL(kv[1]))
}
if len(dconfig.ClusterOpts) > 0 {
options = append(options, nwconfig.OptionKVOpts(dconfig.ClusterOpts))
}
if daemon.discoveryWatcher != nil {
options = append(options, nwconfig.OptionDiscoveryWatcher(daemon.discoveryWatcher))
}
if dconfig.ClusterAdvertise != "" {
options = append(options, nwconfig.OptionDiscoveryAddress(dconfig.ClusterAdvertise))
}
options = append(options, nwconfig.OptionLabels(dconfig.Labels))
options = append(options, driverOptions(dconfig)...)
if len(dconfig.NetworkConfig.DefaultAddressPools.Value()) > 0 {
options = append(options, nwconfig.OptionDefaultAddressPoolConfig(dconfig.NetworkConfig.DefaultAddressPools.Value()))
}
if daemon.configStore != nil && daemon.configStore.LiveRestoreEnabled && len(activeSandboxes) != 0 {
options = append(options, nwconfig.OptionActiveSandboxes(activeSandboxes))
}
if pg != nil {
options = append(options, nwconfig.OptionPluginGetter(pg))
}
options = append(options, nwconfig.OptionNetworkControlPlaneMTU(dconfig.NetworkControlPlaneMTU))
return options, nil
}
// GetCluster returns the cluster
func (daemon *Daemon) GetCluster() Cluster {
return daemon.cluster
}
// SetCluster sets the cluster
func (daemon *Daemon) SetCluster(cluster Cluster) {
daemon.cluster = cluster
}
func (daemon *Daemon) pluginShutdown() {
manager := daemon.pluginManager
// Check for a valid manager object. In error conditions, daemon init can fail
// and shutdown called, before plugin manager is initialized.
if manager != nil {
manager.Shutdown()
}
}
// PluginManager returns current pluginManager associated with the daemon
func (daemon *Daemon) PluginManager() *plugin.Manager { // set up before daemon to avoid this method
return daemon.pluginManager
}
// PluginGetter returns current pluginStore associated with the daemon
func (daemon *Daemon) PluginGetter() *plugin.Store {
return daemon.PluginStore
}
// CreateDaemonRoot creates the root for the daemon
func CreateDaemonRoot(config *config.Config) error {
// get the canonical path to the Docker root directory
var realRoot string
if _, err := os.Stat(config.Root); err != nil && os.IsNotExist(err) {
realRoot = config.Root
} else {
realRoot, err = getRealPath(config.Root)
if err != nil {
return fmt.Errorf("Unable to get the full path to root (%s): %s", config.Root, err)
}
}
idMapping, err := setupRemappedRoot(config)
if err != nil {
return err
}
return setupDaemonRoot(config, realRoot, idMapping.RootPair())
}
// checkpointAndSave grabs a container lock to safely call container.CheckpointTo
func (daemon *Daemon) checkpointAndSave(container *container.Container) error {
container.Lock()
defer container.Unlock()
if err := container.CheckpointTo(daemon.containersReplica); err != nil {
return fmt.Errorf("Error saving container state: %v", err)
}
return nil
}
// because the CLI sends a -1 when it wants to unset the swappiness value
// we need to clear it on the server side
func fixMemorySwappiness(resources *containertypes.Resources) {
if resources.MemorySwappiness != nil && *resources.MemorySwappiness == -1 {
resources.MemorySwappiness = nil
}
}
// GetAttachmentStore returns current attachment store associated with the daemon
func (daemon *Daemon) GetAttachmentStore() *network.AttachmentStore {
return &daemon.attachmentStore
}
// IdentityMapping returns uid/gid mapping or a SID (in the case of Windows) for the builder
func (daemon *Daemon) IdentityMapping() *idtools.IdentityMapping {
return daemon.idMapping
}
// ImageService returns the Daemon's ImageService
func (daemon *Daemon) ImageService() *images.ImageService {
return daemon.imageService
}
// BuilderBackend returns the backend used by builder
func (daemon *Daemon) BuilderBackend() builder.Backend {
return struct {
*Daemon
*images.ImageService
}{daemon, daemon.imageService}
}
|
[
"\"DOCKER_DRIVER\"",
"\"DOCKER_TMPDIR\""
] |
[] |
[
"DOCKER_DRIVER",
"DOCKER_TMPDIR"
] |
[]
|
["DOCKER_DRIVER", "DOCKER_TMPDIR"]
|
go
| 2 | 0 | |
fw/nodemcu-firmware/sdk/esp_iot_sdk_v1.4.0/tools/gen_appbin.py
|
#!/usr/bin/python
#
# File : gen_appbin.py
# This file is part of Espressif's generate bin script.
# Copyright (C) 2013 - 2016, Espressif Systems
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of version 3 of the GNU General Public License as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
"""This file is part of Espressif's generate bin script.
argv[1] is elf file name
argv[2] is version num"""
import string
import sys
import os
import re
import binascii
import struct
import zlib
TEXT_ADDRESS = 0x40100000
# app_entry = 0
# data_address = 0x3ffb0000
# data_end = 0x40000000
# text_end = 0x40120000
CHECKSUM_INIT = 0xEF
chk_sum = CHECKSUM_INIT
blocks = 0
def write_file(file_name,data):
if file_name is None:
print 'file_name cannot be none\n'
sys.exit(0)
fp = open(file_name,'ab')
if fp:
fp.seek(0,os.SEEK_END)
fp.write(data)
fp.close()
else:
print '%s write fail\n'%(file_name)
def combine_bin(file_name,dest_file_name,start_offset_addr,need_chk):
global chk_sum
global blocks
if dest_file_name is None:
print 'dest_file_name cannot be none\n'
sys.exit(0)
if file_name:
fp = open(file_name,'rb')
if fp:
########## write text ##########
fp.seek(0,os.SEEK_END)
data_len = fp.tell()
if data_len:
if need_chk:
tmp_len = (data_len + 3) & (~3)
else:
tmp_len = (data_len + 15) & (~15)
data_bin = struct.pack('<II',start_offset_addr,tmp_len)
write_file(dest_file_name,data_bin)
fp.seek(0,os.SEEK_SET)
data_bin = fp.read(data_len)
write_file(dest_file_name,data_bin)
if need_chk:
for loop in range(len(data_bin)):
chk_sum ^= ord(data_bin[loop])
# print '%s size is %d(0x%x),align 4 bytes,\nultimate size is %d(0x%x)'%(file_name,data_len,data_len,tmp_len,tmp_len)
tmp_len = tmp_len - data_len
if tmp_len:
data_str = ['00']*(tmp_len)
data_bin = binascii.a2b_hex(''.join(data_str))
write_file(dest_file_name,data_bin)
if need_chk:
for loop in range(len(data_bin)):
chk_sum ^= ord(data_bin[loop])
blocks = blocks + 1
fp.close()
else:
print '!!!Open %s fail!!!'%(file_name)
def getFileCRC(_path):
try:
blocksize = 1024 * 64
f = open(_path,"rb")
str = f.read(blocksize)
crc = 0
while(len(str) != 0):
crc = binascii.crc32(str, crc)
str = f.read(blocksize)
f.close()
except:
print 'get file crc error!'
return 0
return crc
def gen_appbin():
global chk_sum
global crc_sum
global blocks
if len(sys.argv) != 6:
print 'Usage: gen_appbin.py eagle.app.out boot_mode flash_mode flash_clk_div flash_size_map'
sys.exit(0)
elf_file = sys.argv[1]
boot_mode = sys.argv[2]
flash_mode = sys.argv[3]
flash_clk_div = sys.argv[4]
flash_size_map = sys.argv[5]
flash_data_line = 16
data_line_bits = 0xf
irom0text_bin_name = 'eagle.app.v6.irom0text.bin'
text_bin_name = 'eagle.app.v6.text.bin'
data_bin_name = 'eagle.app.v6.data.bin'
rodata_bin_name = 'eagle.app.v6.rodata.bin'
flash_bin_name ='eagle.app.flash.bin'
BIN_MAGIC_FLASH = 0xE9
BIN_MAGIC_IROM = 0xEA
data_str = ''
sum_size = 0
if os.getenv('COMPILE')=='gcc' :
cmd = 'xtensa-lx106-elf-nm -g ' + elf_file + ' > eagle.app.sym'
else :
cmd = 'xt-nm -g ' + elf_file + ' > eagle.app.sym'
os.system(cmd)
fp = file('./eagle.app.sym')
if fp is None:
print "open sym file error\n"
sys.exit(0)
lines = fp.readlines()
fp.close()
entry_addr = None
p = re.compile('(\w*)(\sT\s)(call_user_start)$')
for line in lines:
m = p.search(line)
if m != None:
entry_addr = m.group(1)
# print entry_addr
if entry_addr is None:
print 'no entry point!!'
sys.exit(0)
data_start_addr = '0'
p = re.compile('(\w*)(\sA\s)(_data_start)$')
for line in lines:
m = p.search(line)
if m != None:
data_start_addr = m.group(1)
# print data_start_addr
rodata_start_addr = '0'
p = re.compile('(\w*)(\sA\s)(_rodata_start)$')
for line in lines:
m = p.search(line)
if m != None:
rodata_start_addr = m.group(1)
# print rodata_start_addr
# write flash bin header
#============================
# SPI FLASH PARAMS
#-------------------
#flash_mode=
# 0: QIO
# 1: QOUT
# 2: DIO
# 3: DOUT
#-------------------
#flash_clk_div=
# 0 : 80m / 2
# 1 : 80m / 3
# 2 : 80m / 4
# 0xf: 80m / 1
#-------------------
#flash_size_map=
# 0 : 512 KB (256 KB + 256 KB)
# 1 : 256 KB
# 2 : 1024 KB (512 KB + 512 KB)
# 3 : 2048 KB (512 KB + 512 KB)
# 4 : 4096 KB (512 KB + 512 KB)
# 5 : 2048 KB (1024 KB + 1024 KB)
# 6 : 4096 KB (1024 KB + 1024 KB)
#-------------------
# END OF SPI FLASH PARAMS
#============================
byte2=int(flash_mode)&0xff
byte3=(((int(flash_size_map)<<4)| int(flash_clk_div))&0xff)
if boot_mode == '2':
# write irom bin head
data_bin = struct.pack('<BBBBI',BIN_MAGIC_IROM,4,byte2,byte3,long(entry_addr,16))
sum_size = len(data_bin)
write_file(flash_bin_name,data_bin)
# irom0.text.bin
combine_bin(irom0text_bin_name,flash_bin_name,0x0,0)
data_bin = struct.pack('<BBBBI',BIN_MAGIC_FLASH,3,byte2,byte3,long(entry_addr,16))
sum_size = len(data_bin)
write_file(flash_bin_name,data_bin)
# text.bin
combine_bin(text_bin_name,flash_bin_name,TEXT_ADDRESS,1)
# data.bin
if data_start_addr:
combine_bin(data_bin_name,flash_bin_name,long(data_start_addr,16),1)
# rodata.bin
combine_bin(rodata_bin_name,flash_bin_name,long(rodata_start_addr,16),1)
# write checksum header
sum_size = os.path.getsize(flash_bin_name) + 1
sum_size = flash_data_line - (data_line_bits&sum_size)
if sum_size:
data_str = ['00']*(sum_size)
data_bin = binascii.a2b_hex(''.join(data_str))
write_file(flash_bin_name,data_bin)
write_file(flash_bin_name,chr(chk_sum & 0xFF))
if boot_mode == '1':
sum_size = os.path.getsize(flash_bin_name)
data_str = ['FF']*(0x10000-sum_size)
data_bin = binascii.a2b_hex(''.join(data_str))
write_file(flash_bin_name,data_bin)
fp = open(irom0text_bin_name,'rb')
if fp:
data_bin = fp.read()
write_file(flash_bin_name,data_bin)
fp.close()
else :
print '!!!Open %s fail!!!'%(flash_bin_name)
sys.exit(0)
if boot_mode == '1' or boot_mode == '2':
all_bin_crc = getFileCRC(flash_bin_name)
print all_bin_crc
if all_bin_crc < 0:
all_bin_crc = abs(all_bin_crc) - 1
else :
all_bin_crc = abs(all_bin_crc) + 1
print all_bin_crc
write_file(flash_bin_name,chr((all_bin_crc & 0x000000FF))+chr((all_bin_crc & 0x0000FF00) >> 8)+chr((all_bin_crc & 0x00FF0000) >> 16)+chr((all_bin_crc & 0xFF000000) >> 24))
cmd = 'rm eagle.app.sym'
os.system(cmd)
if __name__=='__main__':
gen_appbin()
|
[] |
[] |
[
"COMPILE"
] |
[]
|
["COMPILE"]
|
python
| 1 | 0 | |
pop_test.go
|
package pop
import (
stdlog "log"
"os"
"testing"
"time"
"github.com/gobuffalo/nulls"
"github.com/gobuffalo/validate/v3"
"github.com/gobuffalo/validate/v3/validators"
"github.com/gofrs/uuid"
"github.com/stretchr/testify/suite"
"github.com/gobuffalo/pop/v5/logging"
)
var PDB *Connection
type PostgreSQLSuite struct {
suite.Suite
}
type MySQLSuite struct {
suite.Suite
}
type SQLiteSuite struct {
suite.Suite
}
type CockroachSuite struct {
suite.Suite
}
func TestSpecificSuites(t *testing.T) {
switch os.Getenv("SODA_DIALECT") {
case "postgres":
suite.Run(t, &PostgreSQLSuite{})
case "mysql", "mysql_travis":
suite.Run(t, &MySQLSuite{})
case "sqlite":
suite.Run(t, &SQLiteSuite{})
case "cockroach":
suite.Run(t, &CockroachSuite{})
}
}
func init() {
Debug = false
AddLookupPaths("./")
dialect := os.Getenv("SODA_DIALECT")
if dialect != "" {
if err := LoadConfigFile(); err != nil {
stdlog.Panic(err)
}
var err error
PDB, err = Connect(dialect)
log(logging.Info, "Run test with dialect %v", dialect)
if err != nil {
stdlog.Panic(err)
}
} else {
log(logging.Info, "Skipping integration tests")
}
}
func transaction(fn func(tx *Connection)) {
err := PDB.Rollback(func(tx *Connection) {
fn(tx)
})
if err != nil {
stdlog.Fatal(err)
}
}
func ts(s string) string {
return PDB.Dialect.TranslateSQL(s)
}
type Client struct {
ClientID string `db:"id"`
}
func (c Client) TableName() string {
return "clients"
}
type User struct {
ID int `db:"id"`
UserName string `db:"user_name"`
Email string `db:"email"`
Name nulls.String `db:"name"`
Alive nulls.Bool `db:"alive"`
CreatedAt time.Time `db:"created_at"`
UpdatedAt time.Time `db:"updated_at"`
BirthDate nulls.Time `db:"birth_date"`
Bio nulls.String `db:"bio"`
Price nulls.Float64 `db:"price"`
FullName nulls.String `db:"full_name" select:"name as full_name"`
Books Books `has_many:"books" order_by:"title asc"`
FavoriteSong Song `has_one:"song" fk_id:"u_id"`
Houses Addresses `many_to_many:"users_addresses"`
}
// Validate gets run every time you call a "Validate*" (ValidateAndSave, ValidateAndCreate, ValidateAndUpdate) method.
// This method is not required and may be deleted.
func (u *User) Validate(tx *Connection) (*validate.Errors, error) {
return validate.Validate(
&validators.StringIsPresent{Field: u.Name.String, Name: "Name"},
), nil
}
type Users []User
type UserAttribute struct {
ID int `db:"id"`
UserName string `db:"user_name"`
NickName string `db:"nick_name"`
User User `json:"user" belongs_to:"user" fk_id:"UserName" primary_id:"UserName"`
}
type Book struct {
ID int `db:"id"`
Title string `db:"title"`
Isbn string `db:"isbn"`
UserID nulls.Int `db:"user_id"`
User User `belongs_to:"user"`
Description string `db:"description"`
Writers Writers `has_many:"writers"`
TaxiID nulls.Int `db:"taxi_id"`
Taxi Taxi `belongs_to:"taxi"`
CreatedAt time.Time `db:"created_at"`
UpdatedAt time.Time `db:"updated_at"`
}
type Taxi struct {
ID int `db:"id"`
Model string `db:"model"`
UserID nulls.Int `db:"user_id"`
AddressID nulls.Int `db:"address_id"`
Driver *User `belongs_to:"user" fk_id:"user_id"`
Address Address `belongs_to:"address"`
ToAddressID *int `db:"to_address_id"`
ToAddress *Address `belongs_to:"address"`
CreatedAt time.Time `db:"created_at"`
UpdatedAt time.Time `db:"updated_at"`
}
// Validate gets run every time you call a "Validate*" (ValidateAndSave, ValidateAndCreate, ValidateAndUpdate) method.
// This method is not required and may be deleted.
func (b *Book) Validate(tx *Connection) (*validate.Errors, error) {
return validate.Validate(
&validators.StringIsPresent{Field: b.Description, Name: "Description"},
), nil
}
type Books []Book
type Writer struct {
ID int `db:"id"`
Name string `db:"name"`
BookID int `db:"book_id"`
Book Book `belongs_to:"book"`
CreatedAt time.Time `db:"created_at"`
UpdatedAt time.Time `db:"updated_at"`
}
type Writers []Writer
type Address struct {
ID int `db:"id"`
Street string `db:"street"`
HouseNumber int `db:"house_number"`
CreatedAt time.Time `db:"created_at"`
UpdatedAt time.Time `db:"updated_at"`
}
type Addresses []Address
type UsersAddress struct {
ID int `db:"id"`
UserID int `db:"user_id"`
AddressID int `db:"address_id"`
CreatedAt time.Time `db:"created_at"`
UpdatedAt time.Time `db:"updated_at"`
}
type UsersAddressQuery struct {
ID int `db:"id"`
UserID int `db:"user_id"`
AddressID int `db:"address_id"`
CreatedAt time.Time `db:"created_at"`
UpdatedAt time.Time `db:"updated_at"`
UserName *string `db:"name" json:"user_name"`
UserEmail *string `db:"email" json:"user_email"`
}
func (UsersAddressQuery) TableName() string {
return "users_addresses"
}
type Friend struct {
ID int `db:"id"`
FirstName string `db:"first_name"`
LastName string `db:"last_name"`
CreatedAt time.Time `db:"created_at"`
UpdatedAt time.Time `db:"updated_at"`
}
func (Friend) TableName() string {
return "good_friends"
}
type Family struct {
ID int `db:"id"`
FirstName string `db:"first_name"`
LastName string `db:"last_name"`
CreatedAt time.Time `db:"created_at"`
UpdatedAt time.Time `db:"updated_at"`
}
func (Family) TableName() string {
// schema.table_name
return "family.members"
}
type Enemy struct {
A string
}
type Song struct {
ID uuid.UUID `db:"id"`
Title string `db:"title"`
UserID int `db:"u_id"`
CreatedAt time.Time `json:"created_at" db:"created_at"`
UpdatedAt time.Time `json:"updated_at" db:"updated_at"`
ComposedByID int `json:"composed_by_id" db:"composed_by_id"`
ComposedBy Composer `belongs_to:"composer"`
}
type Composer struct {
ID int `db:"id"`
Name string `db:"name"`
CreatedAt time.Time `db:"created_at"`
UpdatedAt time.Time `db:"updated_at"`
}
type Course struct {
ID uuid.UUID `json:"id" db:"id"`
CreatedAt time.Time `json:"created_at" db:"created_at"`
UpdatedAt time.Time `json:"updated_at" db:"updated_at"`
}
type CourseCode struct {
ID uuid.UUID `json:"id" db:"id"`
CreatedAt time.Time `json:"created_at" db:"created_at"`
UpdatedAt time.Time `json:"updated_at" db:"updated_at"`
CourseID uuid.UUID `json:"course_id" db:"course_id"`
Course Course `json:"-" belongs_to:"course"`
// Course Course `belongs_to:"course"`
}
type ValidatableCar struct {
ID int64 `db:"id"`
Name string `db:"name"`
CreatedAt time.Time `json:"created_at" db:"created_at"`
UpdatedAt time.Time `json:"updated_at" db:"updated_at"`
}
var validationLogs []string
func (v *ValidatableCar) Validate(tx *Connection) (*validate.Errors, error) {
validationLogs = append(validationLogs, "Validate")
verrs := validate.Validate(&validators.StringIsPresent{Field: v.Name, Name: "Name"})
return verrs, nil
}
func (v *ValidatableCar) ValidateSave(tx *Connection) (*validate.Errors, error) {
validationLogs = append(validationLogs, "ValidateSave")
return nil, nil
}
func (v *ValidatableCar) ValidateUpdate(tx *Connection) (*validate.Errors, error) {
validationLogs = append(validationLogs, "ValidateUpdate")
return nil, nil
}
func (v *ValidatableCar) ValidateCreate(tx *Connection) (*validate.Errors, error) {
validationLogs = append(validationLogs, "ValidateCreate")
return nil, nil
}
type NotValidatableCar struct {
ID int `db:"id"`
Name string `db:"name"`
CreatedAt time.Time `json:"created_at" db:"created_at"`
UpdatedAt time.Time `json:"updated_at" db:"updated_at"`
}
type CallbacksUser struct {
ID int `db:"id"`
BeforeS string `db:"before_s"`
BeforeC string `db:"before_c"`
BeforeU string `db:"before_u"`
BeforeD string `db:"before_d"`
BeforeV string `db:"before_v"`
AfterS string `db:"after_s"`
AfterC string `db:"after_c"`
AfterU string `db:"after_u"`
AfterD string `db:"after_d"`
AfterF string `db:"after_f"`
CreatedAt time.Time `json:"created_at" db:"created_at"`
UpdatedAt time.Time `json:"updated_at" db:"updated_at"`
}
type CallbacksUsers []CallbacksUser
func (u *CallbacksUser) BeforeSave(tx *Connection) error {
u.BeforeS = "BeforeSave"
return nil
}
func (u *CallbacksUser) BeforeUpdate(tx *Connection) error {
u.BeforeU = "BeforeUpdate"
return nil
}
func (u *CallbacksUser) BeforeCreate(tx *Connection) error {
u.BeforeC = "BeforeCreate"
return nil
}
func (u *CallbacksUser) BeforeDestroy(tx *Connection) error {
u.BeforeD = "BeforeDestroy"
return nil
}
func (u *CallbacksUser) BeforeValidate(tx *Connection) error {
u.BeforeV = "BeforeValidate"
return nil
}
func (u *CallbacksUser) AfterSave(tx *Connection) error {
u.AfterS = "AfterSave"
return nil
}
func (u *CallbacksUser) AfterUpdate(tx *Connection) error {
u.AfterU = "AfterUpdate"
return nil
}
func (u *CallbacksUser) AfterCreate(tx *Connection) error {
u.AfterC = "AfterCreate"
return nil
}
func (u *CallbacksUser) AfterDestroy(tx *Connection) error {
u.AfterD = "AfterDestroy"
return nil
}
func (u *CallbacksUser) AfterFind(tx *Connection) error {
u.AfterF = "AfterFind"
return nil
}
type Label struct {
ID string `db:"id"`
}
type SingleID struct {
ID int `db:"id"`
}
type Body struct {
ID int `json:"id" db:"id"`
Head *Head `json:"head" has_one:"head"`
}
type Head struct {
ID int `json:"id,omitempty" db:"id"`
BodyID int `json:"-" db:"body_id"`
Body *Body `json:"body,omitempty" belongs_to:"body"`
}
type HeadPtr struct {
ID int `json:"id,omitempty" db:"id"`
BodyID *int `json:"-" db:"body_id"`
Body *Body `json:"body,omitempty" belongs_to:"body"`
}
type Student struct {
ID uuid.UUID `json:"id" db:"id"`
CreatedAt time.Time `json:"created_at" db:"created_at"`
UpdatedAt time.Time `json:"updated_at" db:"updated_at"`
}
// https://github.com/gobuffalo/pop/issues/302
type Parent struct {
ID uuid.UUID `json:"id" db:"id"`
CreatedAt time.Time `json:"created_at" db:"created_at"`
UpdatedAt time.Time `json:"updated_at" db:"updated_at"`
Students []*Student `many_to_many:"parents_students"`
}
type CrookedColour struct {
ID int `db:"pk"`
Name string `db:"name"`
CreatedAt time.Time `db:"created_at"`
UpdatedAt time.Time `db:"updated_at"`
}
type CrookedSong struct {
ID string `db:"name"`
CreatedAt time.Time `db:"created_at"`
UpdatedAt time.Time `db:"updated_at"`
}
type NonStandardID struct {
ID int `db:"pk"`
OutfacingID string `db:"id"`
}
|
[
"\"SODA_DIALECT\"",
"\"SODA_DIALECT\""
] |
[] |
[
"SODA_DIALECT"
] |
[]
|
["SODA_DIALECT"]
|
go
| 1 | 0 | |
tasks/system_probe.py
|
import datetime
import glob
import os
import getpass
import contextlib
import shutil
import tempfile
from invoke import task
from invoke.exceptions import Exit
from subprocess import check_output, CalledProcessError
from .utils import (
bin_name,
get_build_flags,
REPO_PATH,
get_version,
get_git_branch_name,
get_go_version,
get_git_commit,
)
from .build_tags import get_default_build_tags
BIN_DIR = os.path.join(".", "bin", "system-probe")
BIN_PATH = os.path.join(BIN_DIR, bin_name("system-probe", android=False))
EBPF_BUILDER_IMAGE = 'datadog/tracer-bpf-builder'
EBPF_BUILDER_FILE = os.path.join(".", "tools", "ebpf", "Dockerfiles", "Dockerfile-ebpf")
BPF_TAG = "linux_bpf"
BCC_TAG = "bcc"
GIMME_ENV_VARS = ['GOROOT', 'PATH']
@task
def build(
ctx,
race=False,
go_version=None,
incremental_build=False,
major_version='7',
python_runtimes='3',
with_bcc=True,
go_mod="vendor",
windows=False,
):
"""
Build the system_probe
"""
# Only build ebpf files on unix
if not windows:
build_object_files(ctx, install=True)
# TODO use pkg/version for this
main = "main."
ld_vars = {
"Version": get_version(ctx, major_version=major_version),
"GoVersion": get_go_version(),
"GitBranch": get_git_branch_name(),
"GitCommit": get_git_commit(),
"BuildDate": datetime.datetime.now().strftime("%Y-%m-%dT%H:%M:%S"),
}
goenv = {}
if go_version:
lines = ctx.run("gimme {version}".format(version=go_version)).stdout.split("\n")
for line in lines:
for env_var in GIMME_ENV_VARS:
if env_var in line:
goenv[env_var] = line[line.find(env_var) + len(env_var) + 1 : -1].strip('\'\"')
ld_vars["GoVersion"] = go_version
ldflags, gcflags, env = get_build_flags(ctx, major_version=major_version, python_runtimes=python_runtimes)
# extend PATH from gimme with the one from get_build_flags
if "PATH" in os.environ and "PATH" in goenv:
goenv["PATH"] += ":" + os.environ["PATH"]
env.update(goenv)
# Add custom ld flags
ldflags += ' '.join(["-X '{name}={value}'".format(name=main + key, value=value) for key, value in ld_vars.items()])
if not windows:
build_tags = get_default_build_tags() + [BPF_TAG]
else:
build_tags = get_default_build_tags()
if with_bcc:
build_tags.append(BCC_TAG)
# TODO static option
cmd = 'go build -mod={go_mod} {race_opt} {build_type} -tags "{go_build_tags}" '
cmd += '-o {agent_bin} -gcflags="{gcflags}" -ldflags="{ldflags}" {REPO_PATH}/cmd/system-probe'
args = {
"go_mod": go_mod,
"race_opt": "-race" if race else "",
"build_type": "" if incremental_build else "-a",
"go_build_tags": " ".join(build_tags),
"agent_bin": BIN_PATH,
"gcflags": gcflags,
"ldflags": ldflags,
"REPO_PATH": REPO_PATH,
}
ctx.run(cmd.format(**args), env=env)
@task
def build_in_docker(ctx, rebuild_ebpf_builder=False, race=False, incremental_build=False, major_version='7'):
"""
Build the system_probe using a container
This can be used when the current OS don't have up to date linux headers
"""
if rebuild_ebpf_builder:
build_ebpf_builder(ctx)
docker_cmd = "docker run --rm \
-v {cwd}:/go/src/github.com/DataDog/datadog-agent \
--workdir=/go/src/github.com/DataDog/datadog-agent \
{builder} \
{cmd}"
if should_use_sudo(ctx):
docker_cmd = "sudo " + docker_cmd
cmd = "invoke -e system-probe.build --major-version {}".format(major_version)
if race:
cmd += " --race"
if incremental_build:
cmd += " --incremental-build"
ctx.run(docker_cmd.format(cwd=os.getcwd(), builder=EBPF_BUILDER_IMAGE, cmd=cmd))
@task
def test(ctx, skip_object_files=False, only_check_bpf_bytes=False):
"""
Run tests on eBPF parts
If skip_object_files is set to True, this won't rebuild object files
If only_check_bpf_bytes is set to True this will only check that the assets bundled are
matching the currently generated object files
"""
if not skip_object_files:
build_object_files(ctx, install=False)
pkg = "./pkg/ebpf/... ./pkg/network/..."
# Pass along the PATH env variable to retrieve the go binary path
path = os.environ['PATH']
cmd = 'go test -mod={go_mod} -v -tags "{bpf_tag}" {pkg}'
if not is_root():
cmd = 'sudo -E PATH={path} ' + cmd
if only_check_bpf_bytes:
cmd += " -run=TestEbpfBytesCorrect"
else:
if getpass.getuser() != "root":
print("system-probe tests must be run as root")
raise Exit(code=1)
if os.getenv("GOPATH") is None:
print(
"GOPATH is not set, if you are running tests with sudo, you may need to use the -E option to preserve your environment"
)
raise Exit(code=1)
ctx.run(cmd.format(path=path, go_mod="vendor", bpf_tag=BPF_TAG, pkg=pkg))
@task
def nettop(ctx, incremental_build=False, go_mod="vendor"):
"""
Build and run the `nettop` utility for testing
"""
build_object_files(ctx, install=True)
cmd = 'go build -mod={go_mod} {build_type} -tags "linux_bpf" -o {bin_path} {path}'
bin_path = os.path.join(BIN_DIR, "nettop")
# Build
ctx.run(
cmd.format(
path=os.path.join(REPO_PATH, "pkg", "ebpf", "nettop"),
bin_path=bin_path,
go_mod=go_mod,
build_type="" if incremental_build else "-a",
)
)
# Run
if should_use_sudo(ctx):
ctx.sudo(bin_path)
else:
ctx.run(bin_path)
@task
def cfmt(ctx):
"""
Format C code using clang-format
"""
fmtCmd = "clang-format -i -style='{{BasedOnStyle: WebKit, BreakBeforeBraces: Attach}}' {file}"
# This only works with gnu sed
sedCmd = r"sed -i 's/__attribute__((always_inline)) /__attribute__((always_inline))\
/g' {file}"
files = glob.glob("pkg/ebpf/c/*.[c,h]")
for file in files:
ctx.run(fmtCmd.format(file=file))
ctx.run(sedCmd.format(file=file))
@task
def build_dev_docker_image(ctx, image_name, push=False):
"""
Build a system-probe-agent Docker image (development only)
if push is set to true the image will be pushed to the given registry
"""
dev_file = os.path.join(".", "tools", "ebpf", "Dockerfiles", "Dockerfile-tracer-dev")
cmd = "docker build {directory} -t {image_name} -f {file}"
push_cmd = "docker push {image_name}"
# Build in a temporary directory to make the docker build context small
with tempdir() as d:
shutil.copy(BIN_PATH, d)
ctx.run(cmd.format(directory=d, image_name=image_name, file=dev_file))
if push:
ctx.run(push_cmd.format(image_name=image_name))
@task
def object_files(ctx, install=True):
"""object_files builds the eBPF object files"""
build_object_files(ctx, install=install)
def build_object_files(ctx, install=True):
"""build_object_files builds only the eBPF object
set install to False to disable replacing the assets
"""
# if clang is missing, subsequent calls to ctx.run("clang ...") will fail silently, and result in us not building a
# new .o file
print("checking for clang executable...")
ctx.run("which clang")
print("found clang")
centos_headers_dir = "/usr/src/kernels"
debian_headers_dir = "/usr/src"
if os.path.isdir(centos_headers_dir):
linux_headers = [os.path.join(centos_headers_dir, d) for d in os.listdir(centos_headers_dir)]
else:
linux_headers = [
os.path.join(debian_headers_dir, d) for d in os.listdir(debian_headers_dir) if d.startswith("linux-")
]
bpf_dir = os.path.join(".", "pkg", "ebpf")
c_dir = os.path.join(bpf_dir, "c")
flags = [
'-D__KERNEL__',
'-DCONFIG_64BIT',
'-D__BPF_TRACING__',
'-Wno-unused-value',
'-Wno-pointer-sign',
'-Wno-compare-distinct-pointer-types',
'-Wunused',
'-Wall',
'-Werror',
'-O2',
'-emit-llvm',
'-c',
os.path.join(c_dir, "tracer-ebpf.c"),
]
# Mapping used by the kernel, from https://elixir.bootlin.com/linux/latest/source/scripts/subarch.include
arch = (
check_output(
'''uname -m | sed -e s/i.86/x86/ -e s/x86_64/x86/ \
-e s/sun4u/sparc64/ \
-e s/arm.*/arm/ -e s/sa110/arm/ \
-e s/s390x/s390/ -e s/parisc64/parisc/ \
-e s/ppc.*/powerpc/ -e s/mips.*/mips/ \
-e s/sh[234].*/sh/ -e s/aarch64.*/arm64/ \
-e s/riscv.*/riscv/''',
shell=True,
)
.decode('utf-8')
.strip()
)
subdirs = [
"include",
"include/uapi",
"include/generated/uapi",
"arch/{}/include".format(arch),
"arch/{}/include/uapi".format(arch),
"arch/{}/include/generated".format(arch),
]
for d in linux_headers:
for s in subdirs:
flags.extend(["-isystem", os.path.join(d, s)])
cmd = "clang {flags} -o - | llc -march=bpf -filetype=obj -o '{file}'"
commands = []
# Build both the standard and debug version
obj_file = os.path.join(c_dir, "tracer-ebpf.o")
commands.append(cmd.format(flags=" ".join(flags), file=obj_file))
debug_obj_file = os.path.join(c_dir, "tracer-ebpf-debug.o")
commands.append(cmd.format(flags=" ".join(flags + ["-DDEBUG=1"]), file=debug_obj_file))
if install:
assets_cmd = (
os.environ["GOPATH"]
+ "/bin/go-bindata -pkg bytecode -prefix '{c_dir}' -modtime 1 -o '{go_file}' '{obj_file}' '{debug_obj_file}' "
+ "'{tcp_queue_length_kern_c_file}' '{tcp_queue_length_kern_user_h_file}' '{oom_kill_kern_c_file}' '{oom_kill_kern_user_h_file}' "
+ "'{bpf_common_h_file}' '{test_asset_file}' '{test_h_file}'"
)
go_file = os.path.join(bpf_dir, "bytecode", "tracer-ebpf.go")
test_dir = os.path.join(bpf_dir, "testdata")
commands.append(
assets_cmd.format(
c_dir=c_dir,
go_file=go_file,
obj_file=obj_file,
debug_obj_file=debug_obj_file,
tcp_queue_length_kern_c_file=os.path.join(c_dir, "tcp-queue-length-kern.c"),
tcp_queue_length_kern_user_h_file=os.path.join(c_dir, "tcp-queue-length-kern-user.h"),
oom_kill_kern_c_file=os.path.join(c_dir, "oom-kill-kern.c"),
oom_kill_kern_user_h_file=os.path.join(c_dir, "oom-kill-kern-user.h"),
bpf_common_h_file=os.path.join(c_dir, "bpf-common.h"),
test_asset_file=os.path.join(test_dir, "test-asset.c"),
test_h_file=os.path.join(test_dir, "test-header.h"),
)
)
commands.append("gofmt -w -s {go_file}".format(go_file=go_file))
for cmd in commands:
ctx.run(cmd)
def build_ebpf_builder(ctx):
"""build_ebpf_builder builds the docker image for the ebpf builder
"""
cmd = "docker build -t {image} -f {file} ."
if should_use_sudo(ctx):
cmd = "sudo " + cmd
ctx.run(cmd.format(image=EBPF_BUILDER_IMAGE, file=EBPF_BUILDER_FILE))
def is_root():
return os.getuid() == 0
def should_use_sudo(ctx):
# We are already root
if is_root():
return False
with open(os.devnull, 'w') as FNULL:
try:
check_output(['docker', 'info'], stderr=FNULL)
except CalledProcessError:
return True
return False
@contextlib.contextmanager
def tempdir():
"""
Helper to create a temp directory and clean it
"""
dirpath = tempfile.mkdtemp()
try:
yield dirpath
finally:
shutil.rmtree(dirpath)
|
[] |
[] |
[
"GOPATH",
"PATH"
] |
[]
|
["GOPATH", "PATH"]
|
python
| 2 | 0 | |
consume_test.go
|
package main
import (
"context"
"encoding/binary"
"encoding/json"
"net/http"
"net/http/httptest"
"os"
"sort"
"strings"
"testing"
"time"
"github.com/Shopify/sarama"
qt "github.com/frankban/quicktest"
"github.com/google/go-cmp/cmp"
"github.com/heetch/avro"
"github.com/heetch/avro/avroregistry"
"gopkg.in/retry.v1"
)
func TestParseOffsets(t *testing.T) {
data := []struct {
testName string
input string
expected map[int32]interval
expectedErr string
}{{
testName: "empty",
input: "",
expected: map[int32]interval{
-1: interval{
start: positionAtOffset(sarama.OffsetOldest),
end: positionAtOffset(maxOffset),
},
},
}, {
testName: "single-comma",
input: ",",
expected: map[int32]interval{
-1: interval{
start: positionAtOffset(sarama.OffsetOldest),
end: positionAtOffset(maxOffset),
},
},
}, {
testName: "all",
input: "all",
expected: map[int32]interval{
-1: interval{
start: positionAtOffset(sarama.OffsetOldest),
end: positionAtOffset(maxOffset),
},
},
}, {
testName: "oldest",
input: "oldest",
expected: map[int32]interval{
-1: interval{
start: positionAtOffset(sarama.OffsetOldest),
end: positionAtOffset(maxOffset),
},
},
}, {
testName: "all-with-space",
input: " all ",
expected: map[int32]interval{
-1: interval{
start: positionAtOffset(sarama.OffsetOldest),
end: positionAtOffset(maxOffset),
},
},
}, {
testName: "all-with-zero-initial-offset",
input: "all=+0:",
expected: map[int32]interval{
-1: interval{
start: positionAtOffset(sarama.OffsetOldest),
end: positionAtOffset(maxOffset),
},
},
}, {
testName: "several-partitions",
input: "1,2,4",
expected: map[int32]interval{
1: interval{
start: positionAtOffset(sarama.OffsetOldest),
end: positionAtOffset(maxOffset),
},
2: interval{
start: positionAtOffset(sarama.OffsetOldest),
end: positionAtOffset(maxOffset),
},
4: interval{
start: positionAtOffset(sarama.OffsetOldest),
end: positionAtOffset(maxOffset),
},
},
}, {
testName: "one-partition,empty-offsets",
input: "0=",
expected: map[int32]interval{
0: interval{
start: positionAtOffset(sarama.OffsetOldest),
end: positionAtOffset(maxOffset),
},
},
}, {
testName: "one-partition,one-offset",
input: "0=1",
expected: map[int32]interval{
0: interval{
start: positionAtOffset(1),
end: positionAtOffset(maxOffset),
},
},
}, {
testName: "one-partition,empty-after-colon",
input: "0=1:",
expected: map[int32]interval{
0: interval{
start: positionAtOffset(1),
end: positionAtOffset(maxOffset),
},
},
}, {
testName: "multiple-partitions",
input: "0=4:,2=1:10,6",
expected: map[int32]interval{
0: interval{
start: positionAtOffset(4),
end: positionAtOffset(maxOffset),
},
2: interval{
start: positionAtOffset(1),
end: positionAtOffset(10),
},
6: interval{
start: positionAtOffset(sarama.OffsetOldest),
end: positionAtOffset(maxOffset),
},
},
}, {
testName: "newest-relative",
input: "0=-1",
expected: map[int32]interval{
0: interval{
start: position{
anchor: anchorAtOffset(sarama.OffsetNewest),
diff: anchorDiff{offset: -1},
},
end: positionAtOffset(maxOffset),
},
},
}, {
testName: "newest-relative,empty-after-colon",
input: "0=-1:",
expected: map[int32]interval{
0: interval{
start: position{
anchor: anchorAtOffset(sarama.OffsetNewest),
diff: anchorDiff{offset: -1},
},
end: positionAtOffset(maxOffset),
},
},
}, {
testName: "oldest-relative",
input: "0=+1",
expected: map[int32]interval{
0: interval{
start: position{
anchor: anchorAtOffset(sarama.OffsetOldest),
diff: anchorDiff{offset: 1},
},
end: positionAtOffset(maxOffset),
},
},
}, {
testName: "oldest-relative,empty-after-colon",
input: "0=+1:",
expected: map[int32]interval{
0: interval{
start: position{
anchor: anchorAtOffset(sarama.OffsetOldest),
diff: anchorDiff{offset: 1},
},
end: positionAtOffset(maxOffset),
},
},
}, {
testName: "oldest-relative-to-newest-relative",
input: "0=+1:-1",
expected: map[int32]interval{
0: interval{
start: position{
anchor: anchorAtOffset(sarama.OffsetOldest),
diff: anchorDiff{offset: 1},
},
end: position{
anchor: anchorAtOffset(sarama.OffsetNewest),
diff: anchorDiff{offset: -1},
},
},
},
}, {
testName: "specific-partition-with-all-partitions",
input: "0=+1:-1,all=1:10",
expected: map[int32]interval{
0: interval{
start: position{
anchor: anchorAtOffset(sarama.OffsetOldest),
diff: anchorDiff{offset: 1},
},
end: position{
anchor: anchorAtOffset(sarama.OffsetNewest),
diff: anchorDiff{offset: -1},
},
},
-1: interval{
start: positionAtOffset(1),
end: positionAtOffset(10),
},
},
}, {
testName: "oldest-to-newest",
input: "0=oldest:newest",
expected: map[int32]interval{
0: interval{
start: positionAtOffset(sarama.OffsetOldest),
end: positionAtOffset(sarama.OffsetNewest),
},
},
}, {
testName: "oldest-to-newest-with-offsets",
input: "0=oldest+10:newest-10",
expected: map[int32]interval{
0: interval{
start: position{
anchor: anchorAtOffset(sarama.OffsetOldest),
diff: anchorDiff{offset: 10},
},
end: position{
anchor: anchorAtOffset(sarama.OffsetNewest),
diff: anchorDiff{offset: -10},
},
},
},
}, {
testName: "newest",
input: "newest",
expected: map[int32]interval{
-1: interval{
start: positionAtOffset(sarama.OffsetNewest),
end: positionAtOffset(maxOffset),
},
},
}, {
testName: "single-partition",
input: "10",
expected: map[int32]interval{
10: interval{
start: positionAtOffset(sarama.OffsetOldest),
end: positionAtOffset(maxOffset),
},
},
}, {
testName: "single-range,all-partitions",
input: "10:20",
expected: map[int32]interval{
-1: interval{
start: positionAtOffset(10),
end: positionAtOffset(20),
},
},
}, {
testName: "single-range,all-partitions,open-end",
input: "10:",
expected: map[int32]interval{
-1: interval{
start: positionAtOffset(10),
end: positionAtOffset(maxOffset),
},
},
}, {
testName: "all-newest",
input: "all=newest:",
expected: map[int32]interval{
-1: interval{
start: positionAtOffset(sarama.OffsetNewest),
end: positionAtOffset(maxOffset),
},
},
}, {
testName: "implicit-all-newest-with-offset",
input: "newest-10:",
expected: map[int32]interval{
-1: interval{
start: position{
anchor: anchorAtOffset(sarama.OffsetNewest),
diff: anchorDiff{offset: -10},
},
end: positionAtOffset(maxOffset),
},
},
}, {
testName: "implicit-all-oldest-with-offset",
input: "oldest+10:",
expected: map[int32]interval{
-1: interval{
start: position{
anchor: anchorAtOffset(sarama.OffsetOldest),
diff: anchorDiff{offset: 10},
},
end: positionAtOffset(maxOffset),
},
},
}, {
testName: "implicit-all-neg-offset-empty-colon",
input: "-10:",
expected: map[int32]interval{
-1: interval{
start: position{
anchor: anchorAtOffset(sarama.OffsetNewest),
diff: anchorDiff{offset: -10},
},
end: positionAtOffset(maxOffset),
},
},
}, {
testName: "implicit-all-pos-offset-empty-colon",
input: "+10:",
expected: map[int32]interval{
-1: interval{
start: position{
anchor: anchorAtOffset(sarama.OffsetOldest),
diff: anchorDiff{offset: 10},
},
end: positionAtOffset(maxOffset),
},
},
}, {
testName: "start-offset-combines-with-diff-offset",
input: "1000+3",
expected: map[int32]interval{
-1: interval{
start: positionAtOffset(1003),
end: positionAtOffset(maxOffset),
},
},
}, {
testName: "invalid-partition",
input: "bogus",
expectedErr: `invalid anchor position "bogus"`,
}, {
testName: "several-colons",
input: ":::",
expectedErr: `invalid position ":::"`,
}, {
testName: "bad-relative-offset-start",
input: "foo+20",
expectedErr: `invalid anchor position "foo"`,
}, {
testName: "bad-relative-offset-diff",
input: "oldest+bad",
expectedErr: `invalid relative position "\+bad"`,
}, {
testName: "bad-relative-offset-diff-at-start",
input: "+bad",
expectedErr: `invalid relative position "\+bad"`,
}, {
testName: "relative-offset-too-big",
input: "+9223372036854775808",
expectedErr: `offset "\+9223372036854775808" is too large`,
}, {
testName: "starting-offset-too-big",
input: "9223372036854775808:newest",
expectedErr: `anchor offset "9223372036854775808" is too large`,
}, {
testName: "ending-offset-too-big",
input: "oldest:9223372036854775808",
expectedErr: `anchor offset "9223372036854775808" is too large`,
}, {
testName: "partition-too-big",
input: "2147483648=oldest",
expectedErr: `partition number "2147483648" is too large`,
}, {
testName: "time-anchor-rfc3339",
input: "[2019-08-31T13:06:08.234Z]",
expected: map[int32]interval{
-1: {
start: positionAtTime(T("2019-08-31T13:06:08.234Z")),
end: positionAtOffset(maxOffset),
},
},
}, {
testName: "time-anchor-rfc3339-not-utc",
input: "[2019-08-31T13:06:08.234-04:00]",
expected: map[int32]interval{
-1: {
start: positionAtTime(T("2019-08-31T17:06:08.234Z")),
end: positionAtOffset(maxOffset),
},
},
}, {
testName: "time-anchor-date",
input: "[2019-08-31]",
expected: map[int32]interval{
-1: {
start: positionAtTime(T("2019-08-31T00:00:00Z")),
end: positionAtTime(T("2019-09-01T00:00:00Z")),
},
},
}, {
testName: "time-anchor-imprecise-explicit-colon",
input: "[2019-08-31]:",
expected: map[int32]interval{
-1: {
start: positionAtTime(T("2019-08-31T00:00:00Z")),
end: positionAtOffset(maxOffset),
},
},
}, {
testName: "time-anchor-date-explicit-end",
input: "[2019-08-31]:[2019-09-04]",
expected: map[int32]interval{
-1: {
start: positionAtTime(T("2019-08-31T00:00:00Z")),
end: positionAtTime(T("2019-09-05T00:00:00Z")),
},
},
}, {
testName: "time-anchor-month",
input: "[2019-08]",
expected: map[int32]interval{
-1: {
start: positionAtTime(T("2019-08-01T00:00:00Z")),
end: positionAtTime(T("2019-09-01T00:00:00Z")),
},
},
}, {
testName: "time-anchor-year",
input: "[2019]",
expected: map[int32]interval{
-1: {
start: positionAtTime(T("2019-01-01T00:00:00Z")),
end: positionAtTime(T("2020-01-01T00:00:00Z")),
},
},
}, {
testName: "time-anchor-minute",
input: "[13:45]",
expected: map[int32]interval{
-1: {
start: positionAtTime(T("2011-02-03T13:45:00Z")),
end: positionAtTime(T("2011-02-03T13:46:00Z")),
},
},
}, {
testName: "time-anchor-second",
input: "[13:45:12.345]",
expected: map[int32]interval{
-1: {
start: positionAtTime(T("2011-02-03T13:45:12.345Z")),
end: positionAtOffset(maxOffset),
},
},
}, {
testName: "time-anchor-hour",
input: "[4pm]",
expected: map[int32]interval{
-1: {
start: positionAtTime(T("2011-02-03T16:00:00Z")),
end: positionAtTime(T("2011-02-03T17:00:00Z")),
},
},
}, {
testName: "time-range",
input: "[2019-08-31T13:06:08.234Z]:[2023-02-05T12:01:02.6789Z]",
expected: map[int32]interval{
-1: {
start: positionAtTime(T("2019-08-31T13:06:08.234Z")),
end: positionAtTime(T("2023-02-05T12:01:02.6789Z")),
},
},
}, {
testName: "time-anchor-with-diff-offset",
input: "[4pm]-123",
expected: map[int32]interval{
-1: {
start: position{
anchor: anchorAtTime(T("2011-02-03T16:00:00Z")),
diff: anchorDiff{offset: -123},
},
end: position{
anchor: anchorAtTime(T("2011-02-03T17:00:00Z")),
diff: anchorDiff{offset: -123},
},
},
},
}, {
testName: "offset-anchor-with-negative-time-rel",
input: "1234-1h3s",
expected: map[int32]interval{
-1: {
start: position{
anchor: anchorAtOffset(1234),
diff: anchorDiff{
isDuration: true,
duration: -(time.Hour + 3*time.Second),
},
},
end: positionAtOffset(maxOffset),
},
},
}, {
testName: "offset-anchor-with-positive-time-rel",
input: "1234+555ms",
expected: map[int32]interval{
-1: {
start: position{
anchor: anchorAtOffset(1234),
diff: anchorDiff{
isDuration: true,
duration: 555 * time.Millisecond,
},
},
end: positionAtOffset(maxOffset),
},
},
}, {
testName: "time-anchor-combined-with-time-rel",
input: "[3pm]+5s",
expected: map[int32]interval{
-1: {
start: positionAtTime(T("2011-02-03T15:00:05Z")),
end: positionAtTime(T("2011-02-03T16:00:05Z")),
},
},
},
// TODO error cases
// TODO local time resolution
}
c := qt.New(t)
// Choose a reference date that's not UTC, so we can ensure
// that the timezone-dependent logic works correctly.
now := T("2011-02-03T16:05:06.500Z").In(time.FixedZone("UTC-8", -8*60*60))
for _, d := range data {
c.Run(d.testName, func(c *qt.C) {
actual, err := parseOffsets(d.input, now)
if d.expectedErr != "" {
c.Assert(err, qt.ErrorMatches, d.expectedErr)
return
}
c.Assert(err, qt.Equals, nil)
c.Assert(actual, deepEquals, d.expected)
})
}
}
func BenchmarkMerge(b *testing.B) {
c := qt.New(b)
const npart = 1000
nmsgs := b.N / npart
cs := make([]<-chan *sarama.ConsumerMessage, npart)
epoch := time.Date(2019, 10, 7, 12, 0, 0, 0, time.UTC)
for i := range cs {
c := make(chan *sarama.ConsumerMessage, 10)
cs[i] = c
go func() {
defer close(c)
t := epoch
for i := 0; i < nmsgs; i++ {
c <- &sarama.ConsumerMessage{
Timestamp: t,
}
t = t.Add(time.Second)
}
}()
}
b.ResetTimer()
total := 0
for range mergeConsumers(cs...) {
total++
}
c.Assert(total, qt.Equals, npart*nmsgs)
}
func BenchmarkNoMerge(b *testing.B) {
const npart = 1000
nmsgs := b.N / npart
epoch := time.Date(2019, 10, 7, 12, 0, 0, 0, time.UTC)
c := make(chan *sarama.ConsumerMessage, 10)
for i := 0; i < npart; i++ {
go func() {
t := epoch
for i := 0; i < nmsgs; i++ {
c <- &sarama.ConsumerMessage{
Timestamp: t,
}
t = t.Add(time.Second)
}
}()
}
b.ResetTimer()
for i := 0; i < npart*nmsgs; i++ {
<-c
}
}
func TestMerge(t *testing.T) {
c := qt.New(t)
epoch := time.Date(2019, 10, 7, 12, 0, 0, 0, time.UTC)
M := func(timestamp int) *sarama.ConsumerMessage {
return &sarama.ConsumerMessage{
Timestamp: epoch.Add(time.Duration(timestamp) * time.Hour),
}
}
partitionMsgs := map[int32][]*sarama.ConsumerMessage{
0: {M(0), M(2), M(3), M(10)},
1: {M(1), M(4), M(5), M(6)},
2: {M(7), M(8), M(9)},
3: {M(11), M(12)},
4: {},
}
var wantMsgs []*sarama.ConsumerMessage
for p, msgs := range partitionMsgs {
for i, m := range msgs {
m.Partition = p
m.Offset = int64(i)
}
wantMsgs = append(wantMsgs, msgs...)
}
sort.Slice(wantMsgs, func(i, j int) bool {
return wantMsgs[i].Timestamp.Before(wantMsgs[j].Timestamp)
})
// Start a consumer
chans := make([]<-chan *sarama.ConsumerMessage, 0, len(wantMsgs))
for _, msgs := range partitionMsgs {
msgs := msgs
c := make(chan *sarama.ConsumerMessage)
go func() {
defer close(c)
for _, m := range msgs {
c <- m
time.Sleep(time.Millisecond)
}
}()
chans = append(chans, c)
}
resultc := mergeConsumers(chans...)
var gotMsgs []*sarama.ConsumerMessage
loop:
for {
select {
case m, ok := <-resultc:
if !ok {
break loop
}
gotMsgs = append(gotMsgs, m)
case <-time.After(5 * time.Second):
c.Fatal("timed out waiting for messages")
}
}
c.Assert(gotMsgs, qt.HasLen, len(wantMsgs))
c.Assert(gotMsgs, qt.DeepEquals, wantMsgs)
}
func TestConsume(t *testing.T) {
c := qt.New(t)
closer := make(chan struct{})
messageChan := make(<-chan *sarama.ConsumerMessage)
calls := make(chan tConsumePartition)
consumer := tConsumer{
consumePartition: map[tConsumePartition]tPartitionConsumer{
tConsumePartition{"hans", 1, 1}: tPartitionConsumer{messages: messageChan},
tConsumePartition{"hans", 2, 1}: tPartitionConsumer{messages: messageChan},
},
calls: calls,
}
target := consumeCmd{consumer: consumer}
target.topic = "hans"
target.brokerStrs = []string{"localhost:9092"}
go target.consume(map[int32]resolvedInterval{
1: {1, 5},
2: {1, 5},
}, map[int32]int64{
1: 1,
2: 1,
})
defer close(closer)
var actual []tConsumePartition
expected := []tConsumePartition{
tConsumePartition{"hans", 1, 1},
tConsumePartition{"hans", 2, 1},
}
timeout := time.After(time.Second)
for {
select {
case call := <-calls:
actual = append(actual, call)
if len(actual) < len(expected) {
break
}
sort.Sort(ByPartitionOffset(actual))
c.Check(actual, qt.DeepEquals, expected)
return
case <-timeout:
c.Fatalf("Did not receive calls to consume partitions before timeout.")
}
}
}
type tConsumePartition struct {
Topic string
Partition int32
Offset int64
}
type ByPartitionOffset []tConsumePartition
func (a ByPartitionOffset) Len() int {
return len(a)
}
func (a ByPartitionOffset) Swap(i, j int) {
a[i], a[j] = a[j], a[i]
}
func (a ByPartitionOffset) Less(i, j int) bool {
if a[i].Partition != a[j].Partition {
return a[i].Partition < a[j].Partition
}
return a[i].Offset < a[j].Offset
}
type tPartitionConsumer struct {
closeErr error
highWaterMarkOffset int64
messages <-chan *sarama.ConsumerMessage
errors <-chan *sarama.ConsumerError
}
func (pc tPartitionConsumer) AsyncClose() {}
func (pc tPartitionConsumer) Close() error {
return pc.closeErr
}
func (pc tPartitionConsumer) HighWaterMarkOffset() int64 {
return pc.highWaterMarkOffset
}
func (pc tPartitionConsumer) Messages() <-chan *sarama.ConsumerMessage {
return pc.messages
}
func (pc tPartitionConsumer) Errors() <-chan *sarama.ConsumerError {
return pc.errors
}
type tConsumer struct {
topics []string
topicsErr error
partitions map[string][]int32
partitionsErr map[string]error
consumePartition map[tConsumePartition]tPartitionConsumer
consumePartitionErr map[tConsumePartition]error
closeErr error
calls chan tConsumePartition
}
func (c tConsumer) Topics() ([]string, error) {
return c.topics, c.topicsErr
}
func (c tConsumer) Partitions(topic string) ([]int32, error) {
return c.partitions[topic], c.partitionsErr[topic]
}
func (c tConsumer) ConsumePartition(topic string, partition int32, offset int64) (sarama.PartitionConsumer, error) {
cp := tConsumePartition{topic, partition, offset}
c.calls <- cp
return c.consumePartition[cp], c.consumePartitionErr[cp]
}
func (c tConsumer) Close() error {
return c.closeErr
}
func (c tConsumer) HighWaterMarks() map[string]map[int32]int64 {
return nil
}
func TestConsumeParseArgsUsesEnvVar(t *testing.T) {
c := qt.New(t)
defer c.Done()
registry := "localhost:8084"
broker := "hans:2000"
c.Setenv("KT_BROKERS", broker)
c.Setenv("KT_REGISTRY", registry)
cmd0, _, err := parseCmd("hkt", "consume")
c.Assert(err, qt.Equals, nil)
cmd := cmd0.(*consumeCmd)
c.Assert(cmd.brokers(), qt.DeepEquals, []string{broker})
c.Assert(cmd.registryURL, qt.Equals, registry)
}
// brokers default to localhost:9092
func TestConsumeParseArgsDefault(t *testing.T) {
c := qt.New(t)
defer c.Done()
c.Setenv("KT_BROKERS", "")
c.Setenv("KT_REGISTRY", "")
cmd0, _, err := parseCmd("hkt", "consume")
c.Assert(err, qt.Equals, nil)
cmd := cmd0.(*consumeCmd)
c.Assert(cmd.brokers(), qt.DeepEquals, []string{"localhost:9092"})
c.Assert(cmd.registryURL, qt.Equals, "")
}
func TestConsumeParseArgsFlagsOverrideEnv(t *testing.T) {
c := qt.New(t)
defer c.Done()
registry := "localhost:8084"
broker := "hans:2000"
// command line arg wins
c.Setenv("KT_BROKERS", "BLABB")
c.Setenv("KT_REGISTRY", "BLABB")
cmd0, _, err := parseCmd("hkt", "consume", "-brokers", broker, "-registry", registry)
c.Assert(err, qt.Equals, nil)
cmd := cmd0.(*consumeCmd)
c.Assert(cmd.brokers(), qt.DeepEquals, []string{broker})
c.Assert(cmd.registryURL, qt.Equals, registry)
}
func TestConsumeAvroMessage(t *testing.T) {
c := qt.New(t)
defer c.Done()
type record struct {
A int
B int
}
// In the byte slice below:
// 80: A=40
// 40: B=20
rec := record{A: 40, B: 20}
data := []byte{80, 40}
_, wType, err := avro.Marshal(rec)
c.Assert(err, qt.IsNil)
reg := newTestRegistry(c)
schemaID := reg.register(c, wType)
cmd := consumeCmd{registry: reg.registry}
enc, err := cmd.encoderForType("string")
c.Assert(err, qt.IsNil)
cmd.encodeKey = enc
enc, err = cmd.encoderForType("avro")
c.Assert(err, qt.IsNil)
cmd.encodeValue = enc
msg := &sarama.ConsumerMessage{
Key: []byte("foo"),
Value: createAvroMessage(schemaID, data),
Partition: 1,
Offset: 0,
}
consumed, err := cmd.newConsumedMessage(msg)
c.Assert(err, qt.IsNil)
var got record
err = json.Unmarshal(consumed.Value, &got)
c.Assert(err, qt.IsNil)
c.Assert(got, qt.DeepEquals, rec)
}
func T(s string) time.Time {
t, err := time.Parse(time.RFC3339, s)
if err != nil {
panic(err)
}
return t
}
// deepEquals allows comparison of the unexported fields inside the
// struct types that we test internally.
var deepEquals = qt.CmpEquals(cmp.AllowUnexported(
interval{},
position{},
anchor{},
anchorDiff{},
producerMessage{},
))
func positionAtOffset(off int64) position {
return position{
anchor: anchorAtOffset(off),
}
}
func positionAtTime(t time.Time) position {
return position{
anchor: anchorAtTime(t),
}
}
type testRegistry struct {
registry *avroregistry.Registry
srv *httptest.Server
faked bool
schema string
sub string
url string
}
func newTestRegistry(c *qt.C) *testRegistry {
ctx := context.Background()
reg := &testRegistry{
sub: randomString(10),
url: os.Getenv("KT_REGISTRY"),
}
// If KT_REGISTRY is not explicitly set, we use a fake server.
if reg.url == "" {
reg.faked = true
reg.srv = httptest.NewServer(http.HandlerFunc(reg.fakeServerHandler))
reg.url = reg.srv.URL
}
var err error
reg.registry, err = avroregistry.New(avroregistry.Params{
ServerURL: reg.url,
RetryStrategy: retry.Regular{},
})
c.Assert(err, qt.IsNil)
c.Defer(func() {
err := reg.registry.DeleteSubject(ctx, reg.sub)
c.Check(err, qt.IsNil)
if reg.srv != nil {
reg.srv.Close()
}
})
return reg
}
func (reg *testRegistry) register(c *qt.C, schema *avro.Type) int64 {
if reg.faked {
reg.schema = schema.String()
return 1
}
id, err := reg.registry.Register(context.Background(), reg.sub, schema)
c.Assert(err, qt.IsNil)
return id
}
func (reg *testRegistry) fakeServerHandler(w http.ResponseWriter, r *http.Request) {
var body []byte
if r.Method == http.MethodGet && strings.HasPrefix(r.RequestURI, "/schemas/ids") {
var err error
body, err = json.Marshal(struct {
Schema string `json:"schema"`
}{reg.schema})
if err != nil {
panic(err)
}
}
w.WriteHeader(http.StatusOK)
w.Header().Set("Content-Type", "application/vnd.schemaregistry.v1+json")
w.Write(body)
}
// createAvroMessage is a helper to create Avro message.
// See https://docs.confluent.io/current/schema-registry/serializer-formatter.html#wire-format.
func createAvroMessage(schemaID int64, data []byte) []byte {
b := []byte{0} // magic byte
id := make([]byte, 4) // 4-byte schema id
binary.BigEndian.PutUint32(id, uint32(schemaID))
b = append(b, id...)
return append(b, data...)
}
|
[
"\"KT_REGISTRY\""
] |
[] |
[
"KT_REGISTRY"
] |
[]
|
["KT_REGISTRY"]
|
go
| 1 | 0 | |
radiopadre_kernel/__init__.py
|
import os, traceback, atexit, logging
import iglesia
from iglesia.utils import message, warning, error
# these are set up in init
ROOTDIR = None
# SERVER_BASEDIR is set up in iglesia (as e.g. /home/user/path)
SHADOW_URL_PREFIX = None # URL prefix for HTTP server serving shadow tree (e.g. http://localhost:port/{SESSION_ID})
FILE_URL_ROOT = None # root URL for accessing files through Jupyter (e.g. /files/to)
NOTEBOOK_URL_ROOT = None # root URL for accessing notebooks through Jupyter (e.g. /notebooks/to)
CACHE_URL_BASE = None # base URL for cache, e.g. http://localhost:port/{SESSION_ID}/home/user/path
CACHE_URL_ROOT = None # URL for cache of root dir, e.g. http://localhost:port/{SESSION_ID}/home/user/path/to
NBCONVERT = None # set to True if running in notebook-convert mode (i.e. non-interactive)
casacore_tables = None
class PadreLogHandler(logging.Handler):
def __init__(self):
super(PadreLogHandler, self).__init__()
self.records = []
def emit(self, record):
self.records.append(record)
def get_records(self, min_level=logging.INFO):
"""Returns accumulated records from the specified level (or higher)"""
if type(min_level) is str:
min_level = getattr(logging, min_level)
return [(logging.getLevelName(rec.levelno), rec.msg) for rec in self.records if rec.levelno >= min_level]
log_handler = PadreLogHandler()
def _strip_slash(path):
return path if path == "/" or path is None else path.rstrip("/")
def _is_subdir(subdir, parent):
return subdir == parent or subdir.startswith(parent+"/")
def _make_symlink(source, link_name):
try:
if os.path.lexists(link_name):
if os.path.exists(link_name) and os.path.samefile(link_name, source):
return
else:
os.unlink(link_name)
os.symlink(source, link_name)
except Exception as exc:
traceback.print_exc()
raise
def init():
"""Initializes radiopadre kernel"""
iglesia.init()
global FILE_URL_ROOT, NOTEBOOK_URL_ROOT, CACHE_URL_BASE, CACHE_URL_ROOT, \
SHADOW_URL_PREFIX
global \
ABSROOTDIR, ROOTDIR, DISPLAY_ROOTDIR, SHADOW_HOME, SERVER_BASEDIR, SHADOW_BASEDIR, \
SHADOW_ROOTDIR, SESSION_DIR, SESSION_URL, SESSION_ID, \
VERBOSE, HOSTNAME, SNOOP_MODE
from iglesia import \
ABSROOTDIR, ROOTDIR, DISPLAY_ROOTDIR, SHADOW_HOME, SERVER_BASEDIR, SHADOW_BASEDIR, \
SHADOW_ROOTDIR, SESSION_DIR, SESSION_URL, SESSION_ID, \
VERBOSE, HOSTNAME, SNOOP_MODE
# setup for snoop mode. Browsing /home/other/path/to,
if SNOOP_MODE:
# for a Jupyter basedir of ~/.radiopadre/home/other/path, this becomes /home/other/path
unshadowed_server_base = SERVER_BASEDIR[len(SHADOW_HOME):]
# Otherwise it'd better have been /home/other/path/to to begin with!
if not _is_subdir(ABSROOTDIR, unshadowed_server_base):
error(f"""The requested directory {ABSROOTDIR} is not under {unshadowed_server_base}.
This is probably a bug! """)
# Since Jupyter is running under ~/.radiopadre/home/other/path, we can serve other's files from
# /home/other/path/to as /files/to/.content
subdir = SHADOW_ROOTDIR[len(SERVER_BASEDIR):] # this becomes "/to" (or "" if paths are the same)
# but do make sure that the .content symlink is in place!
_make_symlink(ABSROOTDIR, SHADOW_ROOTDIR + "/.radiopadre.content")
# else running in native mode
else:
if not _is_subdir(ABSROOTDIR, SERVER_BASEDIR):
warning(f"""The requested directory {ABSROOTDIR} is not under {SERVER_BASEDIR}.
This is probably a bug! """)
# for a server dir of /home/user/path, and an ABSROOTDIR of /home/oms/path/to, get the subdir
subdir = ABSROOTDIR[len(SERVER_BASEDIR):] # this becomes "/to" (or "" if paths are the same)
os.chdir(ABSROOTDIR)
ROOTDIR = '.'
## check casacore availability
global casacore_tables
try:
import casacore.tables as casacore_tables
except Exception as exc:
casacore_tables = None
warning("casacore.tables failed to import. Table browsing functionality will not be available.")
radiopadre_base = os.path.dirname(os.path.dirname(__file__))
# # pre-init JS9 stuff and run JS9 helper
# js9.preinit_js9(in_container, helper_port, userside_helper_port, http_rewrites)
iglesia.init_helpers(radiopadre_base)
# now a port is available (set up in init_helpers()), form up URLs
SHADOW_URL_PREFIX = f"http://localhost:{iglesia.HTTPSERVER_PORT}/{SESSION_ID}"
CACHE_URL_ROOT = SHADOW_URL_PREFIX + ABSROOTDIR
CACHE_URL_BASE = CACHE_URL_ROOT[:-len(subdir)] if subdir else CACHE_URL_ROOT
# when running nbconvert, it doesn't know about the magic "/files" URL, and just needs a local filename
global NBCONVERT
NBCONVERT = bool(os.environ.get("RADIOPADRE_NBCONVERT"))
files_prefix = "." if NBCONVERT else "/files"
if SNOOP_MODE:
FILE_URL_ROOT = f"{files_prefix}{subdir}/.radiopadre.content/"
NOTEBOOK_URL_ROOT = f"/notebooks{subdir}/.radiopadre.content/"
else:
FILE_URL_ROOT = f"{files_prefix}{subdir}/"
NOTEBOOK_URL_ROOT = f"/notebooks{subdir}/"
# init JS9 sources
from . import js9
js9.preinit_js9()
if ROOTDIR is None:
from iglesia import logger
# enable logging
log = logger.init("radiopadre.kernel") #, use_formatter=False)
log.setLevel(logging.DEBUG)
log.addHandler(log_handler)
LOGFILE = logger.enable_logfile("kernel")
logger.disable_printing()
message("initializing radiopadre_kernel")
init()
|
[] |
[] |
[
"RADIOPADRE_NBCONVERT"
] |
[]
|
["RADIOPADRE_NBCONVERT"]
|
python
| 1 | 0 | |
nessus/nessus.go
|
package nessus
import (
"bytes"
"context"
"crypto/tls"
"encoding/json"
"errors"
"fmt"
"log"
"net/http"
"net/http/httputil"
"os"
"reflect"
"strings"
)
// ClientWithResponses builds on ClientInterface to offer response payloads
type APIClient struct {
*ClientWithResponses
Vulnerabilities VulnerabilitiesInterface
AgentExclusions AgentExclusionsInterface
Assets AssetsInterface
Policies PoliciesInterface
AuditLog AuditLogInterface
Editor EditorInterface
AgentConfig AgentConfigInterface
AgentGroup AgentGroupInterface
IoScans IoScansInterface
Exclusions ExclusionsInterface
IoScanner IoScannerInterface
IoAgent IoAgentInterface
Scanners ScannersInterface
Scans ScansInterface
TargetGroups TargetGroupsInterface
Workbenches WorkbenchesInterface
ExportsVulns ExportsVulnsInterface
Folders FoldersInterface
Tags TagsInterface
Permissions PermissionsInterface
Scanner ScannerInterface
Bulk BulkInterface
IoNetworks IoNetworksInterface
Networks NetworksInterface
Credentials CredentialsInterface
IoExportsCompliance IoExportsComplianceInterface
IoFilters IoFiltersInterface
IoPlugins IoPluginsInterface
IoV1 IoV1Interface
IoV2 IoV2Interface
}
// do execute and evaluate the request
func (c *Client) do(ctx context.Context, req *http.Request) error {
// Headers for all request
req.Header.Set("User-Agent", c.userAgent)
if c.token != "" {
req.Header.Set("X-Cookie", "token="+c.token)
}
return nil
}
// NewAPIClient Create a new API (yes, naming is awkward)
func NewAPIClient(baseURL string, opts ...ClientOption) (*APIClient, error) {
cl, err := NewClient(baseURL, opts...)
cl.RequestEditors = append(cl.RequestEditors, cl.do)
if err != nil {
return nil, err
}
clientWithResponses := &ClientWithResponses{cl}
return &APIClient{
ClientWithResponses: clientWithResponses,
Credentials: &Credentials{clientWithResponses},
IoExportsCompliance: &IoExportsCompliance{clientWithResponses},
IoNetworks: &IoNetworks{clientWithResponses},
Networks: &Networks{clientWithResponses},
AgentExclusions: &AgentExclusions{clientWithResponses},
Assets: &Assets{clientWithResponses},
Vulnerabilities: &Vulnerabilities{clientWithResponses},
Policies: &Policies{clientWithResponses},
AgentConfig: &AgentConfig{clientWithResponses},
AgentGroup: &AgentGroup{clientWithResponses},
AuditLog: &AuditLog{clientWithResponses},
Editor: &Editor{clientWithResponses},
IoFilters: &IoFilters{clientWithResponses},
IoPlugins: &IoPlugins{clientWithResponses},
IoV2: &IoV2{clientWithResponses},
Exclusions: &Exclusions{clientWithResponses},
IoScanner: &IoScanner{clientWithResponses},
IoScans: &IoScans{clientWithResponses},
IoV1: &IoV1{clientWithResponses},
ExportsVulns: &ExportsVulns{clientWithResponses},
Folders: &Folders{clientWithResponses},
IoAgent: &IoAgent{clientWithResponses},
Scanners: &Scanners{clientWithResponses},
Scans: &Scans{clientWithResponses},
TargetGroups: &TargetGroups{clientWithResponses},
Workbenches: &Workbenches{clientWithResponses},
Permissions: &Permissions{clientWithResponses},
Scanner: &Scanner{clientWithResponses},
Tags: &Tags{clientWithResponses},
Bulk: &Bulk{clientWithResponses},
}, nil
}
// RequestEditorFn is the function signature for the RequestEditor callback function
type RequestEditorFn func(ctx context.Context, req *http.Request) error
// Doer performs HTTP requests.
//
// The standard http.Client implements this interface.
type HttpRequestDoer interface {
Do(req *http.Request) (*http.Response, error)
}
// Client which conforms to the OpenAPI3 specification for this service.
type Client struct {
// The endpoint of the server conforming to this interface, with scheme,
// https://api.deepmap.com for example. This can contain a path relative
// to the server, such as https://api.deepmap.com/dev-test, and all the
// paths in the swagger spec will be appended to the server.
Server string
// Doer for performing requests, typically a *http.Client with any
// customized settings, such as certificate chains.
Client HttpRequestDoer
innerClient HttpRequestDoer
// A list of callbacks for modifying requests which are generated before sending over
// the network.
RequestEditors []RequestEditorFn
ctx context.Context
userAgent string
token string
user string
password string
insecure bool
trace bool
}
// ClientOption allows setting custom parameters during construction
type ClientOption func(*Client) error
// NewClientFromEnvironment creates a new client from default environment variables
func NewClientFromEnvironment(opts ...ClientOption) (*APIClient, error) {
baseURL := os.Getenv("NESSUS_URL")
user := os.Getenv("NESSUS_USER")
password := os.Getenv("NESSUS_PASSWORD")
if os.Getenv("NESSUS_INSECURE") == "true" {
opts = append(opts, WithInsecure(true))
}
opts = append(opts, WithLogin(user, password))
c, err := NewAPIClient(baseURL, opts...)
if err != nil {
return nil, err
}
return c, nil
}
// Creates a new Client, with reasonable defaults
func NewClient(server string, opts ...ClientOption) (*Client, error) {
// create a client with sane default values
c := Client{
Server: server,
userAgent: "go-nessus",
}
// mutate client and add all optional params
for _, o := range opts {
if err := o(&c); err != nil {
return nil, err
}
}
// ensure the server URL always has a trailing slash
if !strings.HasSuffix(c.Server, "/") {
c.Server += "/"
}
if c.ctx == nil {
c.ctx = context.Background()
}
// create httpClient, if not already present
if c.Client == nil {
c.Client = &http.Client{}
}
// create httpClient, if not already present
// c.Client = c
c.innerClient = &http.Client{
Transport: &http.Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: c.insecure, // test server certificate is not trusted.
},
},
}
return &c, nil
}
// WithHTTPClient allows overriding the default Doer, which is
// automatically created using http.Client. This is useful for tests.
func WithHTTPClient(doer HttpRequestDoer) ClientOption {
return func(c *Client) error {
c.Client = doer
return nil
}
}
// WithTrace write all requests to the log
func WithTrace(trace bool) ClientOption {
return func(c *Client) error {
c.trace = trace
return nil
}
}
// WithUserAgent specify a user agent string to identify the client
func WithUserAgent(userAgent string) ClientOption {
return func(c *Client) error {
c.userAgent = userAgent
return nil
}
}
// WithLogin specifies the credentials for
func WithLogin(user string, password string) ClientOption {
return func(c *Client) error {
c.user = user
c.password = password
return nil
}
}
// WithContext specifies the credentials for
func WithContext(ctx context.Context) ClientOption {
return func(c *Client) error {
c.ctx = ctx
return nil
}
}
// WithInsecure accept all certificates
func WithInsecure(insecure bool) ClientOption {
return func(c *Client) error {
c.insecure = insecure
return nil
}
}
// WithRequestEditorFn allows setting up a callback function, which will be
// called right before sending the request. This can be used to mutate the request.
func WithRequestEditorFn(fn RequestEditorFn) ClientOption {
return func(c *Client) error {
c.RequestEditors = append(c.RequestEditors, fn)
return nil
}
}
// Do wrap the doer for tracing
func (c *Client) Do(req *http.Request) (*http.Response, error) {
r, e := c.innerClient.Do(req)
if c.trace {
var reqStr = ""
dump, err := httputil.DumpRequestOut(req, true)
if err == nil {
reqStr = strings.ReplaceAll(strings.TrimRight(string(dump), "\r\n"), "\n", "\n ")
}
if r == nil {
dump = nil
err = nil
} else {
dump, err = httputil.DumpResponse(r, true)
}
if err == nil {
c.Tracef("%s\n\n %s\n", reqStr, strings.ReplaceAll(strings.TrimRight(string(dump), "\r\n"), "\n", "\n "))
}
}
return r, e
}
// Errorf logs errors
func (c *Client) Errorf(format string, v ...interface{}) {
log.Printf("[ERROR] %s", fmt.Sprintf(format, v...))
}
// Warnf logs warings
func (c *Client) Warnf(format string, v ...interface{}) {
log.Printf("[WARN] %s", fmt.Sprintf(format, v...))
}
// Debugf logs debug info
func (c *Client) Debugf(format string, v ...interface{}) {
log.Printf("[DEBUG] %s", fmt.Sprintf(format, v...))
}
// Tracef logs trace info
func (c *Client) Tracef(format string, v ...interface{}) {
log.Printf("[TRACE] %s", fmt.Sprintf(format, v...))
}
// RawAPIResponse generic response wrapper
type RawAPIResponse interface {
Status() string
StatusCode() int
}
func getResponseObject(sr RawAPIResponse) (interface{}, error) {
fldForCode := fmt.Sprintf("JSON%d", sr.StatusCode())
v := reflect.ValueOf(sr).Elem()
if _, ok := v.Type().FieldByName(fldForCode); ok {
s := v.FieldByName(fldForCode).Interface()
v := reflect.ValueOf(s).Elem()
if _, ok := v.Type().FieldByName("Data"); ok {
d := v.FieldByName("Data").Interface()
return d, nil
}
if sr.StatusCode() > 399 {
return s, errors.New(sr.Status())
}
return s, nil
}
if sr.StatusCode() > 399 {
return sr, errors.New(sr.Status())
}
return sr, nil
}
func (c *ClientWithResponses) Authenticated() bool {
return c.ClientInterface.(*Client).token != ""
}
type AuthResponse struct {
MD5SumWizardTemplates string "json:\"md5sum_wizard_templates,omitempty\""
Token string "json:\"token,omitempty\""
MD5SumTenableLinks string "json:\"md5sum_tenable_links,omitempty\""
}
//Authenticate login using basic auth to optain a token
func (c *ClientWithResponses) Authenticate() error {
// Authenticate
c.ClientInterface.(*Client).token = ""
var authJson = []byte(fmt.Sprintf("{\"username\":\"%s\",\"password\":\"%s\" }", c.ClientInterface.(*Client).user, c.ClientInterface.(*Client).password))
req, err := http.NewRequest("POST", c.ClientInterface.(*Client).Server+"session", bytes.NewBuffer(authJson))
req.Header.Set("Content-Type", "application/json")
if err != nil {
return err
}
res, err := c.ClientInterface.(*Client).Do(req)
if err != nil {
return err
}
if res == nil {
return fmt.Errorf("authentication failed")
}
if res.StatusCode > 399 {
return fmt.Errorf("%s returned %s", c.ClientInterface.(*Client).Server, res.Status)
}
if (res.Body == nil) {
return fmt.Errorf("%s returned empty result", c.ClientInterface.(*Client).Server)
}
decoder := json.NewDecoder(res.Body)
var authResponse AuthResponse
err = decoder.Decode(&authResponse)
if res == nil {
return fmt.Errorf("%s returned corrupt result", c.ClientInterface.(*Client).Server)
}
c.ClientInterface.(*Client).token = authResponse.Token
return nil
}
func (c *ClientWithResponses) evaluateResponse(response RawAPIResponse, err error) (interface{}, error) {
if err != nil {
return nil, err
}
return getResponseObject(response)
}
|
[
"\"NESSUS_URL\"",
"\"NESSUS_USER\"",
"\"NESSUS_PASSWORD\"",
"\"NESSUS_INSECURE\""
] |
[] |
[
"NESSUS_PASSWORD",
"NESSUS_URL",
"NESSUS_INSECURE",
"NESSUS_USER"
] |
[]
|
["NESSUS_PASSWORD", "NESSUS_URL", "NESSUS_INSECURE", "NESSUS_USER"]
|
go
| 4 | 0 | |
integration_test/integration_symlinks_test.go
|
/*
Sniperkit-Bot
- Status: analyzed
*/
// Copyright 2016 Palantir Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package integration_test
import (
"fmt"
"io/ioutil"
"os"
"os/exec"
"path"
"path/filepath"
"strings"
"testing"
"github.com/nmiyake/pkg/dirs"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// * Symlink "test-go" -> $GOPATH
// * Set current directory to test project inside the symlink
// * Verify that "./godelw check" works in sym-linked path
func TestCheckInGoPathSymLink(t *testing.T) {
testProjectDir := setUpGödelTestAndDownload(t, testRootDir, gödelTGZ, version)
src := `package foo_test
import "testing"
func TestFoo(t *testing.T) {}`
err := ioutil.WriteFile(path.Join(testProjectDir, "foo_test.go"), []byte(src), 0644)
require.NoError(t, err)
symLinkParentDir, cleanup, err := dirs.TempDir("", "")
defer cleanup()
require.NoError(t, err)
symLinkPath := path.Join(symLinkParentDir, "test-go")
originalGoPath := os.Getenv("GOPATH")
err = os.Symlink(originalGoPath, symLinkPath)
require.NoError(t, err)
testProjectRelPath, err := filepath.Rel(originalGoPath, testProjectDir)
require.NoError(t, err)
// use script to set cd because setting wd on exec.Command does not work for symlinks
projectPathInSymLink := path.Join(symLinkPath, testProjectRelPath)
scriptTemplate := `#!/bin/bash
cd %v
pwd
`
scriptFilePath := path.Join(symLinkParentDir, "script.sh")
err = ioutil.WriteFile(scriptFilePath, []byte(fmt.Sprintf(scriptTemplate, projectPathInSymLink)), 0755)
require.NoError(t, err)
cmd := exec.Command(scriptFilePath)
output, err := cmd.CombinedOutput()
require.NoError(t, err, "Command %v failed. Output:\n%v", cmd.Args, string(output))
assert.Equal(t, projectPathInSymLink, strings.TrimSpace(string(output)))
scriptTemplate = `#!/bin/bash
cd %v
./godelw check
`
err = ioutil.WriteFile(scriptFilePath, []byte(fmt.Sprintf(scriptTemplate, projectPathInSymLink)), 0755)
require.NoError(t, err)
cmd = exec.Command(scriptFilePath)
output, err = cmd.CombinedOutput()
require.NoError(t, err, "Command %v failed. Output:\n%v", cmd.Args, string(output))
}
// * Symlink "test-go" -> $GOPATH
// * Set $GOPATH to be the symlink ("test-go")
// * Set current directory to test project inside the symlink
// * Verify that "./godelw check" works in sym-linked path
// * Restore $GOPATH to original value
func TestCheckInGoPathSymLinkGoPathSymLink(t *testing.T) {
testProjectDir := setUpGödelTestAndDownload(t, testRootDir, gödelTGZ, version)
src := `package foo_test
import "testing"
func TestFoo(t *testing.T) {}`
err := ioutil.WriteFile(path.Join(testProjectDir, "foo_test.go"), []byte(src), 0644)
require.NoError(t, err)
symLinkParentDir, cleanup, err := dirs.TempDir("", "")
defer cleanup()
require.NoError(t, err)
symLinkPath := path.Join(symLinkParentDir, "test-go")
originalGoPath := os.Getenv("GOPATH")
err = os.Symlink(originalGoPath, symLinkPath)
require.NoError(t, err)
err = os.Setenv("GOPATH", symLinkPath)
require.NoError(t, err)
defer func() {
if err := os.Setenv("GOPATH", originalGoPath); err != nil {
require.NoError(t, err, "failed to restore GOPATH environment variable in defer")
}
}()
testProjectRelPath, err := filepath.Rel(originalGoPath, testProjectDir)
require.NoError(t, err)
// use script to set cd because setting wd on exec.Command does not work for symlinks
projectPathInSymLink := path.Join(symLinkPath, testProjectRelPath)
scriptTemplate := `#!/bin/bash
cd %v
pwd
`
scriptFilePath := path.Join(symLinkParentDir, "script.sh")
err = ioutil.WriteFile(scriptFilePath, []byte(fmt.Sprintf(scriptTemplate, projectPathInSymLink)), 0755)
require.NoError(t, err)
cmd := exec.Command(scriptFilePath)
output, err := cmd.CombinedOutput()
require.NoError(t, err, "Command %v failed. Output:\n%v", cmd.Args, string(output))
assert.Equal(t, projectPathInSymLink, strings.TrimSpace(string(output)))
scriptTemplate = `#!/bin/bash
cd %v
./godelw check
`
err = ioutil.WriteFile(scriptFilePath, []byte(fmt.Sprintf(scriptTemplate, projectPathInSymLink)), 0755)
require.NoError(t, err)
cmd = exec.Command(scriptFilePath)
output, err = cmd.CombinedOutput()
require.NoError(t, err, "Command %v failed. Output:\n%v", cmd.Args, string(output))
}
// * Symlink "test-go" -> $GOPATH
// * Set $GOPATH to be the symlink ("test-go")
// * Set current directory to real project (not inside symlink)
// * Verify that "./godelw check" works in real path
// * Restore $GOPATH to original value
func TestCheckInGoPathNonSymLinkWhenGoPathIsSymLink(t *testing.T) {
testProjectDir := setUpGödelTestAndDownload(t, testRootDir, gödelTGZ, version)
src := `package foo_test
import "testing"
func TestFoo(t *testing.T) {}`
err := ioutil.WriteFile(path.Join(testProjectDir, "foo_test.go"), []byte(src), 0644)
require.NoError(t, err)
symLinkParentDir, cleanup, err := dirs.TempDir("", "")
defer cleanup()
require.NoError(t, err)
symLinkPath := path.Join(symLinkParentDir, "test-go")
originalGoPath := os.Getenv("GOPATH")
err = os.Symlink(originalGoPath, symLinkPath)
require.NoError(t, err)
err = os.Setenv("GOPATH", symLinkPath)
require.NoError(t, err)
defer func() {
if err := os.Setenv("GOPATH", originalGoPath); err != nil {
require.NoError(t, err, "failed to restore GOPATH environment variable in defer")
}
}()
cmd := exec.Command("./godelw", "check")
cmd.Dir = testProjectDir
output, err := cmd.CombinedOutput()
require.NoError(t, err, "Command %v failed. Output:\n%v", cmd.Args, string(output))
}
|
[
"\"GOPATH\"",
"\"GOPATH\"",
"\"GOPATH\""
] |
[] |
[
"GOPATH"
] |
[]
|
["GOPATH"]
|
go
| 1 | 0 | |
python/ray/tune/automlboard/run.py
|
import logging
import os
import re
import django
import argparse
from django.core.management import execute_from_command_line
from common.exception import DatabaseError
root_path = os.path.dirname(os.path.abspath(__file__))
logger = logging.getLogger(__name__)
def run_board(args):
"""
Run main entry for AutoMLBoard.
Args:
args: args parsed from command line
"""
init_config(args)
# backend service, should import after django settings initialized
from backend.collector import CollectorService
service = CollectorService(
args.logdir, args.reload_interval, standalone=False, log_level=args.log_level
)
service.run()
# frontend service
logger.info("Try to start automlboard on port %s\n" % args.port)
command = [
os.path.join(root_path, "manage.py"),
"runserver",
"0.0.0.0:%s" % args.port,
"--noreload",
]
execute_from_command_line(command)
def init_config(args):
"""
Initialize configs of the service.
Do the following things:
1. automl board settings
2. database settings
3. django settings
"""
os.environ["AUTOMLBOARD_LOGDIR"] = args.logdir
os.environ["AUTOMLBOARD_LOGLEVEL"] = args.log_level
os.environ["AUTOMLBOARD_RELOAD_INTERVAL"] = str(args.reload_interval)
if args.db:
try:
db_address_reg = re.compile(r"(.*)://(.*):(.*)@(.*):(.*)/(.*)")
match = re.match(db_address_reg, args.db_address)
os.environ["AUTOMLBOARD_DB_ENGINE"] = match.group(1)
os.environ["AUTOMLBOARD_DB_USER"] = match.group(2)
os.environ["AUTOMLBOARD_DB_PASSWORD"] = match.group(3)
os.environ["AUTOMLBOARD_DB_HOST"] = match.group(4)
os.environ["AUTOMLBOARD_DB_PORT"] = match.group(5)
os.environ["AUTOMLBOARD_DB_NAME"] = match.group(6)
logger.info("Using %s as the database backend." % match.group(1))
except BaseException as e:
raise DatabaseError(e)
else:
logger.info(
"Using sqlite3 as the database backend, "
"information will be stored in automlboard.db"
)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ray.tune.automlboard.settings")
django.setup()
command = [os.path.join(root_path, "manage.py"), "migrate", "--run-syncdb"]
execute_from_command_line(command)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--logdir",
type=str,
required=True,
help="Directory where AutoML Board will "
"look to find tuning logs it can display",
)
parser.add_argument(
"--port",
type=int,
default=8008,
help="What port to serve AutoMLBoard on, " "(default: %(default)s)",
)
parser.add_argument(
"--db",
type=str,
default=None,
help="Set SQL database URI in "
"schema://user:password@host:port/database, "
"(default: sqlite3)",
),
parser.add_argument(
"--reload_interval",
type=int,
default=5,
help="How often the backend should load more data, " "(default: %(default)s)",
)
parser.add_argument(
"--log_level",
type=str,
default="INFO",
help="Set the logging level, " "(default: %(default)s)",
)
cmd_args = parser.parse_args()
run_board(cmd_args)
if __name__ == "__main__":
main()
|
[] |
[] |
[
"AUTOMLBOARD_DB_NAME",
"AUTOMLBOARD_DB_USER",
"AUTOMLBOARD_LOGLEVEL",
"AUTOMLBOARD_DB_PORT",
"AUTOMLBOARD_LOGDIR",
"AUTOMLBOARD_DB_ENGINE",
"AUTOMLBOARD_RELOAD_INTERVAL",
"AUTOMLBOARD_DB_PASSWORD",
"AUTOMLBOARD_DB_HOST"
] |
[]
|
["AUTOMLBOARD_DB_NAME", "AUTOMLBOARD_DB_USER", "AUTOMLBOARD_LOGLEVEL", "AUTOMLBOARD_DB_PORT", "AUTOMLBOARD_LOGDIR", "AUTOMLBOARD_DB_ENGINE", "AUTOMLBOARD_RELOAD_INTERVAL", "AUTOMLBOARD_DB_PASSWORD", "AUTOMLBOARD_DB_HOST"]
|
python
| 9 | 0 | |
main.go
|
/*
Copyright 2022.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"flag"
"os"
"sigs.k8s.io/controller-runtime/pkg/client"
// Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.)
// to ensure that exec-entrypoint and run can make use of them.
_ "k8s.io/client-go/plugin/pkg/client/auth"
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/healthz"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
cdv1alpha1 "github.com/DevYoungHulk/smart-cd-operator/api/v1alpha1"
"github.com/DevYoungHulk/smart-cd-operator/controllers"
//+kubebuilder:scaffold:imports
)
var (
scheme = runtime.NewScheme()
setupLog = ctrl.Log.WithName("setup")
)
var KClient client.Client
func init() {
utilruntime.Must(clientgoscheme.AddToScheme(scheme))
utilruntime.Must(cdv1alpha1.AddToScheme(scheme))
//+kubebuilder:scaffold:scheme
}
func main() {
var metricsAddr string
var enableLeaderElection bool
var probeAddr string
flag.StringVar(&metricsAddr, "metrics-bind-address", ":8080", "The address the metric endpoint binds to.")
flag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.")
flag.BoolVar(&enableLeaderElection, "leader-elect", false,
"Enable leader election for controller manager. "+
"Enabling this will ensure there is only one active controller manager.")
opts := zap.Options{
Development: true,
}
opts.BindFlags(flag.CommandLine)
flag.Parse()
ctrl.SetLogger(zap.New(zap.UseFlagOptions(&opts)))
mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{
Scheme: scheme,
MetricsBindAddress: metricsAddr,
Port: 9443,
HealthProbeBindAddress: probeAddr,
LeaderElection: enableLeaderElection,
LeaderElectionID: "f3e4c8c0.org.smart",
})
if err != nil {
setupLog.Error(err, "unable to start manager")
os.Exit(1)
}
KClient = mgr.GetClient()
if err = (&controllers.CanaryReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "Canary")
os.Exit(1)
}
if os.Getenv("ENABLE_WEBHOOKS") != "false" {
if err = (&cdv1alpha1.Canary{}).SetupWebhookWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create webhook", "webhook", "Canary")
os.Exit(1)
}
}
//+kubebuilder:scaf
//fold:builder
if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil {
setupLog.Error(err, "unable to set up health check")
os.Exit(1)
}
if err := mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil {
setupLog.Error(err, "unable to set up ready check")
os.Exit(1)
}
setupLog.Info("starting manager")
if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil {
setupLog.Error(err, "problem running manager")
os.Exit(1)
}
}
|
[
"\"ENABLE_WEBHOOKS\""
] |
[] |
[
"ENABLE_WEBHOOKS"
] |
[]
|
["ENABLE_WEBHOOKS"]
|
go
| 1 | 0 | |
api/datastore/sql/sql_test.go
|
package sql
import (
"context"
"net/url"
"os"
"testing"
"github.com/jmoiron/sqlx"
"github.com/lean-mu/mu/api/datastore/datastoretest"
"github.com/lean-mu/mu/api/datastore/internal/datastoreutil"
"github.com/lean-mu/mu/api/datastore/sql/migratex"
"github.com/lean-mu/mu/api/datastore/sql/migrations"
_ "github.com/lean-mu/mu/api/datastore/sql/mysql"
_ "github.com/lean-mu/mu/api/datastore/sql/postgres"
_ "github.com/lean-mu/mu/api/datastore/sql/sqlite"
"github.com/lean-mu/mu/api/models"
)
// since New with fresh dbs skips all migrations:
// * open a fresh db on latest version
// * run all down migrations
// * run all up migrations
// [ then run tests against that db ]
func newWithMigrations(ctx context.Context, url *url.URL) (*SQLStore, error) {
ds, err := newDS(ctx, url)
if err != nil {
return nil, err
}
err = ds.Tx(func(tx *sqlx.Tx) error {
return migratex.Down(ctx, tx, migrations.Migrations)
})
if err != nil {
return nil, err
}
// go through New, to ensure our Up logic works in there...
ds, err = newDS(ctx, url)
if err != nil {
return nil, err
}
return ds, nil
}
func TestDatastore(t *testing.T) {
ctx := context.Background()
defer os.RemoveAll("sqlite_test_dir")
u, err := url.Parse("sqlite3://sqlite_test_dir")
if err != nil {
t.Fatal(err)
}
f := func(t *testing.T) *SQLStore {
os.RemoveAll("sqlite_test_dir")
ds, err := newDS(ctx, u)
if err != nil {
t.Fatal(err)
}
// we don't want to test the validator, really
return ds
}
f2 := func(t *testing.T) models.Datastore {
ds := f(t)
return datastoreutil.NewValidator(ds)
}
t.Run(u.Scheme, func(t *testing.T) {
datastoretest.RunAllTests(t, f2, datastoretest.NewBasicResourceProvider())
})
// NOTE: sqlite3 does not like ALTER TABLE DROP COLUMN so do not run
// migration tests against it, only pg and mysql -- should prove UP migrations
// will likely work for sqlite3, but may need separate testing by devs :(
// if being run from test script (CI) poke around for pg and mysql containers
// to run tests against them too. this runs with a fresh db first run, then
// will down migrate all migrations, up migrate, and run tests again.
both := func(u *url.URL) {
f := func(t *testing.T) *SQLStore {
ds, err := newDS(ctx, u)
if err != nil {
t.Fatal(err)
}
ds.clear()
if err != nil {
t.Fatal(err)
}
return ds
}
f2 := func(t *testing.T) models.Datastore {
ds := f(t)
return datastoreutil.NewValidator(ds)
}
// test fresh w/o migrations
t.Run(u.Scheme, func(t *testing.T) { datastoretest.RunAllTests(t, f2, datastoretest.NewBasicResourceProvider()) })
f = func(t *testing.T) *SQLStore {
t.Log("with migrations now!")
ds, err := newWithMigrations(ctx, u)
if err != nil {
t.Fatal(err)
}
ds.clear()
if err != nil {
t.Fatal(err)
}
return ds
}
f2 = func(t *testing.T) models.Datastore {
ds := f(t)
return datastoreutil.NewValidator(ds)
}
// test that migrations work & things work with them
t.Run(u.Scheme, func(t *testing.T) { datastoretest.RunAllTests(t, f2, datastoretest.NewBasicResourceProvider()) })
}
if pg := os.Getenv("POSTGRES_URL"); pg != "" {
u, err := url.Parse(pg)
if err != nil {
t.Fatal(err)
}
both(u)
}
if mysql := os.Getenv("MYSQL_URL"); mysql != "" {
u, err := url.Parse(mysql)
if err != nil {
t.Fatal(err)
}
both(u)
}
}
func TestClose(t *testing.T) {
ctx := context.Background()
defer os.RemoveAll("sqlite_test_dir")
u, err := url.Parse("sqlite3://sqlite_test_dir")
if err != nil {
t.Fatal(err)
}
os.RemoveAll("sqlite_test_dir")
ds, err := newDS(ctx, u)
if err != nil {
t.Fatal(err)
}
if err := ds.Close(); err != nil {
t.Fatalf("Failed to close datastore: %v", err)
}
}
|
[
"\"POSTGRES_URL\"",
"\"MYSQL_URL\""
] |
[] |
[
"MYSQL_URL",
"POSTGRES_URL"
] |
[]
|
["MYSQL_URL", "POSTGRES_URL"]
|
go
| 2 | 0 | |
tests/test_plotting.py
|
from __future__ import print_function
import os
import pytest
import numpy as np
from lifelines.estimation import NelsonAalenFitter, KaplanMeierFitter, AalenAdditiveFitter
from lifelines.generate_datasets import generate_random_lifetimes, generate_hazard_rates
from lifelines.plotting import plot_lifetimes
@pytest.mark.plottest
@pytest.mark.skipif("DISPLAY" not in os.environ, reason="requires display")
class TestPlotting():
def setup_method(self, method):
pytest.importorskip("matplotlib")
from matplotlib import pyplot as plt
self.plt = plt
def test_negative_times_still_plots(self, block):
n = 40
T = np.linspace(-2, 3, n)
C = np.random.randint(2, size=n)
kmf = KaplanMeierFitter()
kmf.fit(T, C)
ax = kmf.plot()
self.plt.title('test_negative_times_still_plots')
self.plt.show(block=block)
return
def test_kmf_plotting(self, block):
data1 = np.random.exponential(10, size=(100))
data2 = np.random.exponential(2, size=(200, 1))
data3 = np.random.exponential(4, size=(500, 1))
kmf = KaplanMeierFitter()
kmf.fit(data1, label='test label 1')
ax = kmf.plot()
kmf.fit(data2, label='test label 2')
kmf.plot(ax=ax)
kmf.fit(data3, label='test label 3')
kmf.plot(ax=ax)
self.plt.title("test_kmf_plotting")
self.plt.show(block=block)
return
def test_kmf_with_risk_counts(self, block):
data1 = np.random.exponential(10, size=(100))
kmf = KaplanMeierFitter()
kmf.fit(data1)
kmf.plot(at_risk_counts=True)
self.plt.title("test_kmf_with_risk_counts")
self.plt.show(block=block)
def test_naf_plotting_with_custom_colours(self, block):
data1 = np.random.exponential(5, size=(200, 1))
data2 = np.random.exponential(1, size=(500))
naf = NelsonAalenFitter()
naf.fit(data1)
ax = naf.plot(color="r")
naf.fit(data2)
naf.plot(ax=ax, c="k")
self.plt.title('test_naf_plotting_with_custom_coloirs')
self.plt.show(block=block)
return
def test_aalen_additive_plot(self, block):
# this is a visual test of the fitting the cumulative
# hazards.
n = 2500
d = 3
timeline = np.linspace(0, 70, 10000)
hz, coef, X = generate_hazard_rates(n, d, timeline)
T = generate_random_lifetimes(hz, timeline)
C = np.random.binomial(1, 1., size=n)
X['T'] = T
X['E'] = C
# fit the aaf, no intercept as it is already built into X, X[2] is ones
aaf = AalenAdditiveFitter(coef_penalizer=0.1, fit_intercept=False)
aaf.fit(X, 'T', 'E')
ax = aaf.plot(iloc=slice(0, aaf.cumulative_hazards_.shape[0] - 100))
ax.set_xlabel("time")
ax.set_title('test_aalen_additive_plot')
self.plt.show(block=block)
return
def test_aalen_additive_smoothed_plot(self, block):
# this is a visual test of the fitting the cumulative
# hazards.
n = 2500
d = 3
timeline = np.linspace(0, 150, 5000)
hz, coef, X = generate_hazard_rates(n, d, timeline)
T = generate_random_lifetimes(hz, timeline) + 0.1 * np.random.uniform(size=(n, 1))
C = np.random.binomial(1, 0.8, size=n)
X['T'] = T
X['E'] = C
# fit the aaf, no intercept as it is already built into X, X[2] is ones
aaf = AalenAdditiveFitter(coef_penalizer=0.1, fit_intercept=False)
aaf.fit(X, 'T', 'E')
ax = aaf.smoothed_hazards_(1).iloc[0:aaf.cumulative_hazards_.shape[0] - 500].plot()
ax.set_xlabel("time")
ax.set_title('test_aalen_additive_smoothed_plot')
self.plt.show(block=block)
return
def test_naf_plotting_slice(self, block):
data1 = np.random.exponential(5, size=(200, 1))
data2 = np.random.exponential(1, size=(200, 1))
naf = NelsonAalenFitter()
naf.fit(data1)
ax = naf.plot(ix=slice(0, None))
naf.fit(data2)
naf.plot(ax=ax, ci_force_lines=True, iloc=slice(100, 180))
self.plt.title('test_naf_plotting_slice')
self.plt.show(block=block)
return
def test_plot_lifetimes_calendar(self, block):
self.plt.figure()
t = np.linspace(0, 20, 1000)
hz, coef, covrt = generate_hazard_rates(1, 5, t)
N = 20
current = 10
birthtimes = current * np.random.uniform(size=(N,))
T, C = generate_random_lifetimes(hz, t, size=N, censor=current - birthtimes)
plot_lifetimes(T, event_observed=C, birthtimes=birthtimes, block=block)
def test_plot_lifetimes_relative(self, block):
self.plt.figure()
t = np.linspace(0, 20, 1000)
hz, coef, covrt = generate_hazard_rates(1, 5, t)
N = 20
T, C = generate_random_lifetimes(hz, t, size=N, censor=True)
plot_lifetimes(T, event_observed=C, block=block)
def test_naf_plot_cumulative_hazard(self, block):
data1 = np.random.exponential(5, size=(200, 1))
naf = NelsonAalenFitter()
naf.fit(data1)
ax = naf.plot()
naf.plot_cumulative_hazard(ax=ax, ci_force_lines=True)
self.plt.title("I should have plotted the same thing, but different styles + color!")
self.plt.show(block=block)
return
def test_naf_plot_cumulative_hazard_bandwidth_2(self, block):
data1 = np.random.exponential(5, size=(2000, 1))
naf = NelsonAalenFitter()
naf.fit(data1)
naf.plot_hazard(bandwidth=1., ix=slice(0, 7.))
self.plt.title('test_naf_plot_cumulative_hazard_bandwidth_2')
self.plt.show(block=block)
return
def test_naf_plot_cumulative_hazard_bandwith_1(self, block):
data1 = np.random.exponential(5, size=(2000, 1)) ** 2
naf = NelsonAalenFitter()
naf.fit(data1)
naf.plot_hazard(bandwidth=5., iloc=slice(0, 1700))
self.plt.title('test_naf_plot_cumulative_hazard_bandwith_1')
self.plt.show(block=block)
return
def test_show_censor_with_discrete_date(self, block):
T = np.random.binomial(20, 0.1, size=100)
C = np.random.binomial(1, 0.8, size=100)
kmf = KaplanMeierFitter()
kmf.fit(T, C).plot(show_censors=True)
self.plt.title('test_show_censor_with_discrete_date')
self.plt.show(block=block)
return
def test_show_censor_with_index_0(self, block):
T = np.random.binomial(20, 0.9, size=100) # lifelines should auto put a 0 in.
C = np.random.binomial(1, 0.8, size=100)
kmf = KaplanMeierFitter()
kmf.fit(T, C).plot(show_censors=True)
self.plt.title('test_show_censor_with_index_0')
self.plt.show(block=block)
return
def test_flat_style_and_marker(self, block):
data1 = np.random.exponential(10, size=200)
data2 = np.random.exponential(2, size=200)
C1 = np.random.binomial(1, 0.9, size=200)
C2 = np.random.binomial(1, 0.95, size=200)
kmf = KaplanMeierFitter()
kmf.fit(data1, C1, label='test label 1')
ax = kmf.plot(flat=True, censor_styles={'marker': '+', 'mew': 2, 'ms': 7})
kmf.fit(data2, C2, label='test label 2')
kmf.plot(ax=ax, censor_styles={'marker': 'o', 'ms': 7}, flat=True)
self.plt.title("testing kmf flat styling + marker")
self.plt.show(block=block)
return
def test_flat_style_no_censor(self, block):
data1 = np.random.exponential(10, size=200)
kmf = KaplanMeierFitter()
kmf.fit(data1, label='test label 1')
ax = kmf.plot(flat=True, censor_styles={'marker': '+', 'mew': 2, 'ms': 7})
self.plt.title('test_flat_style_no_censor')
self.plt.show(block=block)
return
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
pytorch_disco/config_files/hyperparams.py
|
import os
B = 2 # batch size
MB = 1 # batch size for metric learning
max_clusters = 2
commitment_cost = 0.25
is_refine_net = False
is_init_cluter_with_instance = False
top_grasp_only = False
H = 240 # height
W = 320 # width
# BY = 200*2 # bird height (y axis, [-40, 40])
# BX = 176*2 # bird width (x axis, [0, 70.4])
# BZ = 20 # bird depth (z axis, [-3.0, 1.0])
# MH = 200*2
# MW = 176*2
# MD = 20
Z = 128
Y = 64
X = 128
PH = int(128/4)
PW = int(384/4)
fix_crop = False
# ZY = 32
# ZX = 32
# ZZ = 16
N = 50 # number of boxes produced by the rcnn (not all are good)
K = 1 # number of boxes to actually use
S = 2 # seq length
T = 256 # height & width of birdview map
V = 100000 # num velodyne points
sensor_S = 10 # sensor length for sequence
#----------- loading -----------#
loadname = None
emb2D_init = ""
feat_init = ""
obj_init = ""
box_init = ""
ort_init = ""
inp_init = ""
traj_init = ""
occ_init = ""
view_init = ""
vis_init = ""
flow_init = ""
ego_init = ""
total_init = True
touch_feat_init = "" # path to initialize the touch featnet
touch_forward_init = "" # path to initialize the context net
reset_iter = False
#--------- training mode ----------#
do_compute_cluster_center = False
do_freeze_emb2D = False
do_freeze_feat = False
do_freeze_obj = False
do_freeze_box = False
do_freeze_ort = False
do_freeze_inp = False
do_freeze_traj = False
do_freeze_occ = False
do_freeze_view = False
do_freeze_vis = False
do_freeze_flow = False
do_freeze_ego = False
do_freeze_touch_feat = False
do_freeze_touch_forward = False
do_resume = False
do_profile = False
# by default, only backprop on "train" iters
backprop_on_train = True
backprop_on_val = False
backprop_on_test = False
# eval mode: save npys
do_eval_map = False
do_eval_recall = False # keep a buffer and eval recall within it
do_save_embs = False
do_save_ego = False
#----------- augs -----------#
# do_aug2D = False
# do_aug3D = False
do_aug_color = False
do_time_flip = False
do_horz_flip = False
do_synth_rt = False
do_synth_nomotion = False
do_piecewise_rt = False
do_sparsify_pointcloud = 0 # choose a number here, for # pts to use
#----------- net design -----------#
# run nothing
do_emb2D = False
do_emb3D = False
do_feat = False
do_obj = False
do_box = False
do_ort = False
do_inp = False
do_traj = False
do_occ = False
do_view = False
do_flow = False
do_ego = False
do_vis = False
do_touch_embML = False
do_touch_feat = False
do_touch_occ = False
do_touch_forward = False
do_moc = False
do_metric_learning = False
do_validation = False
do_generate_data = False
do_det = False
deeper_det = False
#----------- general hypers -----------#
lr = 0.0
#----------- emb hypers -----------#
emb_2D_smooth_coeff = 0.0
emb_3D_smooth_coeff = 0.0
emb_2D_ml_coeff = 0.0
emb_3D_ml_coeff = 0.0
emb_2D_l2_coeff = 0.0
emb_3D_l2_coeff = 0.0
emb_2D_mindist = 0.0
emb_3D_mindist = 0.0
emb_2D_num_samples = 0
emb_3D_num_samples = 0
# ..... Added for touch embedding .... #
emb_3D_touch_num_samples = 0
emb_3D_touch_mindist = 0.0
emb_3D_touch_ml_coeff = 0.0
emb_3D_touch_l2_coeff = 0.0
#----------- feat hypers -----------#
feat_coeff = 0.0
feat_rigid_coeff = 0.0
feat_do_vae = False
feat_do_sb = False
feat_do_resnet = False
feat_do_sparse_invar = False
feat_kl_coeff = 0.0
feat_dim = 8
feat_do_flip = False
feat_do_rt = False
#----------- obj hypers -----------#
obj_coeff = 0.0
obj_dim = 8
#----------- box hypers -----------#
box_sup_coeff = 0.0
box_cs_coeff = 0.0
box_dim = 8
#----------- ort hypers -----------#
ort_coeff = 0.0
ort_warp_coeff = 0.0
ort_dim = 8
#----------- inp hypers -----------#
inp_coeff = 0.0
inp_dim = 8
#----------- traj hypers -----------#
traj_coeff = 0.0
traj_dim = 8
#----------- occ hypers -----------#
occ_do_cheap = False
occ_coeff = 0.0
occ_smooth_coeff = 0.0
#----------- view hypers -----------#
view_depth = 64
view_pred_embs = False
view_pred_rgb = False
view_l1_coeff = 0.0
view_ce_coeff = 0.0
view_dl_coeff = 0.0
#----------- vis hypers-------------#
vis_softmax_coeff = 0.0
vis_hard_coeff = 0.0
vis_l1_coeff = 0.0
vis_debug = False
#----------- flow hypers -----------#
flow_warp_coeff = 0.0
flow_cycle_coeff = 0.0
flow_smooth_coeff = 0.0
flow_l1_coeff = 0.0
flow_synth_l1_coeff = 0.0
flow_do_synth_rt = False
flow_patch_size = 4
#----------- ego hypers -----------#
ego_use_gt = False
ego_use_precomputed = False
ego_rtd_coeff = 0.0
ego_rta_coeff = 0.0
ego_traj_coeff = 0.0
ego_warp_coeff = 0.0
# ---------- Place holder for forward prediction hyper if any ----------- #
contextH = 4
contextW = 4
contextD = 4
# ---- metric learning loss ---- #
metric_learning_loss_type = "cluster_id" # success_rate
# --------- moc hypers ------------- #
dict_len = 10000
num_neg_samples = 2000
do_bn = True # Do I have the capability of doing batch normalization
num_pos_samples = 1024 # helpful for doing voxel level moco_learning
# --------- det hypers ------------- #
det_anchor_size = 12.0
det_prob_coeff = 1.0
det_reg_coeff = 1.0
alpha_pos = 1.5
beta_neg = 1.0
det_anchor_size_x = 0
det_anchor_size_y = 0
det_anchor_size_z = 0
#----------- mod -----------#
mod = '""'
############ slower-to-change hyperparams below here ############
## logging
log_freq_train = 100
log_freq_val = 100
log_freq_test = 100
snap_freq = 5000
max_iters = 10000
shuffle_train = True
shuffle_val = True
shuffle_test = True
dataset_name = ""
seqname = ""
trainset = ""
valset = ""
testset = ""
dataset_list_dir = ""
dataset_location = ""
validation_path = ""
validate_after = 1
dataset_format = "py" #can be py or npz
# mode selection
do_zoom = False
do_carla_det = False
do_carla_mot = False
do_carla_flo = False
do_carla_sta = False
do_mujoco_offline = False
do_mujoco_offline_metric = False
do_touch_embed = False
############ rev up the experiment ############
train_mode = "train"
mode = os.environ["MODE"]
print('os.environ mode is %s' % mode)
if mode=="CARLA_DET":
exec(compile(open('config_files/exp_carla_det.py').read(), 'exp_carla_det.py', 'exec'))
elif mode=="CARLA_MOT":
exec(compile(open('config_files/exp_carla_mot.py').read(), 'exp_carla_mot.py', 'exec'))
elif mode=="CARLA_FLO":
exec(compile(open('config_files/exp_carla_flo.py').read(), 'exp_carla_flo.py', 'exec'))
elif mode=="CARLA_STA":
exec(compile(open('config_files/exp_carla_sta.py').read(), 'exp_carla_sta.py', 'exec'))
elif mode=="MUJOCO_OFFLINE":
exec(open('config_files/exp_mujoco_offline.py').read())
elif mode=="MUJOCO_OFFLINE_METRIC":
exec(open('config_files/exp_mujoco_offline_metric.py').read())
elif mode=="MUJOCO_OFFLINE_METRIC_2D":
exec(open('config_files/exp_mujoco_offline_metric_2d.py').read())
elif mode == "TOUCH_EMB":
exec(compile(open('config_files/exp_touch_emb.py').read(), 'exp_touch_emb.py', 'exec'))
elif mode=="CUSTOM":
exec(compile(open('exp_custom.py').read(), 'exp_custom.py', 'exec'))
else:
assert(False) # what mode is this?
############ make some final adjustments ############
trainset_path = "%s/%s.txt" % (dataset_list_dir, trainset)
valset_path = "%s/%s.txt" % (dataset_list_dir, valset)
testset_path = "%s/%s.txt" % (dataset_list_dir, testset)
data_paths = {}
data_paths['train'] = trainset_path
data_paths['val'] = valset_path
data_paths['test'] = testset_path
set_nums = {}
set_nums['train'] = 0
set_nums['val'] = 1
set_nums['test'] = 2
set_names = ['train', 'val', 'test']
log_freqs = {}
log_freqs['train'] = log_freq_train
log_freqs['val'] = log_freq_val
log_freqs['test'] = log_freq_test
shuffles = {}
shuffles['train'] = shuffle_train
shuffles['val'] = shuffle_val
shuffles['test'] = shuffle_test
############ autogen a name; don't touch any hypers! ############
def strnum(x):
s = '%g' % x
if '.' in s:
s = s[s.index('.'):]
return s
name = "%02d_m%dx%dx%d" % (B, Z,Y,X)
if do_view or do_emb2D:
name += "_p%dx%d" % (PH,PW)
if lr > 0.0:
lrn = "%.1e" % lr
# e.g., 5.0e-04
lrn = lrn[0] + lrn[3:5] + lrn[-1]
name += "_%s" % lrn
if do_feat:
name += "_F"
name += "%d" % feat_dim
if feat_do_flip:
name += "l"
if feat_do_rt:
name += "r"
if feat_do_vae:
name += "v"
if feat_do_sb:
name += 'b'
if feat_do_resnet:
name += 'r'
if feat_do_sparse_invar:
name += 'i'
if do_freeze_feat:
name += "f"
else:
feat_losses = [feat_rigid_coeff,
feat_kl_coeff,
]
feat_prefixes = ["r",
"k",
]
for l_, l in enumerate(feat_losses):
if l > 0:
name += "_%s%s" % (feat_prefixes[l_],strnum(l))
if do_touch_feat:
name += "_TF"
name += "%d" % feat_dim
if do_ego:
name += "_G"
if ego_use_gt:
name += "gt"
elif ego_use_precomputed:
name += "pr"
else:
if do_freeze_ego:
name += "f"
else:
ego_losses = [ego_rtd_coeff,
ego_rta_coeff,
ego_traj_coeff,
ego_warp_coeff,
]
ego_prefixes = ["rtd",
"rta",
"t",
"w",
]
for l_, l in enumerate(ego_losses):
if l > 0:
name += "_%s%s" % (ego_prefixes[l_],strnum(l))
if do_obj:
name += "_J"
# name += "%d" % obj_dim
if do_freeze_obj:
name += "f"
else:
# no real hyps here
pass
if do_box:
name += "_B"
# name += "%d" % box_dim
if do_freeze_box:
name += "f"
else:
box_coeffs = [box_sup_coeff,
box_cs_coeff,
# box_smooth_coeff,
]
box_prefixes = ["su",
"cs",
# "s",
]
for l_, l in enumerate(box_coeffs):
if l > 0:
name += "_%s%s" % (box_prefixes[l_],strnum(l))
if do_ort:
name += "_O"
# name += "%d" % ort_dim
if do_freeze_ort:
name += "f"
else:
ort_coeffs = [ort_coeff,
ort_warp_coeff,
# ort_smooth_coeff,
]
ort_prefixes = ["c",
"w",
# "s",
]
for l_, l in enumerate(ort_coeffs):
if l > 0:
name += "_%s%s" % (ort_prefixes[l_],strnum(l))
if do_inp:
name += "_I"
# name += "%d" % inp_dim
if do_freeze_inp:
name += "f"
else:
inp_coeffs = [inp_coeff,
# inp_smooth_coeff,
]
inp_prefixes = ["c",
# "s",
]
for l_, l in enumerate(inp_coeffs):
if l > 0:
name += "_%s%s" % (inp_prefixes[l_],strnum(l))
if do_traj:
name += "_T"
name += "%d" % traj_dim
if do_freeze_traj:
name += "f"
else:
# no real hyps here
pass
if do_occ:
name += "_O"
if occ_do_cheap:
name += "c"
if do_freeze_occ:
name += "f"
else:
occ_coeffs = [occ_coeff,
occ_smooth_coeff,
]
occ_prefixes = ["c",
"s",
]
for l_, l in enumerate(occ_coeffs):
if l > 0:
name += "_%s%s" % (occ_prefixes[l_],strnum(l))
if do_touch_occ:
name += "_TO"
if occ_do_cheap:
name += "c"
if do_freeze_occ:
name += "f"
else:
occ_coeffs = [occ_coeff,
occ_smooth_coeff,
]
occ_prefixes = ["c",
"s",
]
for l_, l in enumerate(occ_coeffs):
if l > 0:
name += "_%s%s" % (occ_prefixes[l_],strnum(l))
if do_view:
name += "_V"
if view_pred_embs:
name += "e"
if view_pred_rgb:
name += "r"
if do_freeze_view:
name += "f"
# sometimes, even if view is frozen, we use the loss
# to train other nets
view_coeffs = [view_depth,
view_l1_coeff,
view_ce_coeff,
view_dl_coeff,
]
view_prefixes = ["d",
"c",
"e",
"s",
]
for l_, l in enumerate(view_coeffs):
if l > 0:
name += "_%s%s" % (view_prefixes[l_],strnum(l))
if do_vis:
name += "_V"
if vis_debug:
name += 'd'
if do_freeze_vis:
name += "f"
else:
vis_coeffs = [vis_softmax_coeff,
vis_hard_coeff,
vis_l1_coeff,
]
vis_prefixes = ["s",
"h",
"c",
]
for l_, l in enumerate(vis_coeffs):
if l > 0:
name += "_%s%s" % (vis_prefixes[l_],strnum(l))
if do_emb2D:
name += "_E2"
if do_freeze_emb2D:
name += "f"
emb_coeffs = [emb_2D_smooth_coeff,
emb_2D_ml_coeff,
emb_2D_l2_coeff,
emb_2D_num_samples,
emb_2D_mindist,
]
emb_prefixes = ["s",
"m",
"e",
"n",
"d",
]
for l_, l in enumerate(emb_coeffs):
if l > 0:
name += "_%s%s" % (emb_prefixes[l_],strnum(l))
if do_emb3D:
name += "_E3"
emb_coeffs = [emb_3D_smooth_coeff,
emb_3D_ml_coeff,
emb_3D_l2_coeff,
emb_3D_num_samples,
emb_3D_mindist,
]
emb_prefixes = ["s",
"m",
"e",
"n",
"d",
]
for l_, l in enumerate(emb_coeffs):
if l > 0:
name += "_%s%s" % (emb_prefixes[l_],strnum(l))
if do_touch_embML:
name += "_touchE3"
emb_coeffs = [emb_3D_smooth_coeff,
emb_3D_ml_coeff,
emb_3D_l2_coeff,
emb_3D_num_samples,
emb_3D_mindist,
]
emb_prefixes = ["s",
"m",
"e",
"n",
"d",
]
for l_, l in enumerate(emb_coeffs):
if l > 0:
name += "_%s%s" % (emb_prefixes[l_],strnum(l))
if do_touch_forward:
name += "_tforward"
# hyperparams if any go here
forward_vars = [contextH,
contextW,
contextD]
forward_prefixes = ['ch', 'cw', 'cd']
for l_, l in enumerate(forward_vars):
if l > 0:
name += "_%s%s" % (forward_prefixes[l_], strnum(l))
if do_moc:
name += "_mocml"
moc_vars = [num_neg_samples,
num_pos_samples,
dict_len,
do_bn,
emb_3D_mindist]
moc_prefixes = ['nns', 'nps', 'dl', 'do_bn', 'md']
for l_, l in enumerate(moc_vars):
if l > 0:
name += "_%s%s" % (moc_prefixes[l_], strnum(l))
if do_flow:
name += "_F"
if do_freeze_flow:
name += "f"
else:
flow_coeffs = [flow_warp_coeff,
flow_cycle_coeff,
flow_smooth_coeff,
flow_l1_coeff,
flow_synth_l1_coeff,
]
flow_prefixes = ["w",
"c",
"s",
"e",
"y",
]
for l_, l in enumerate(flow_coeffs):
if l > 0:
name += "_%s%s" % (flow_prefixes[l_],strnum(l))
##### end model description
# add some training data info
sets_to_run = {}
if trainset:
name = "%s_%s" % (name, trainset)
sets_to_run['train'] = True
else:
sets_to_run['train'] = False
if valset:
name = "%s_%s" % (name, valset)
sets_to_run['val'] = True
else:
sets_to_run['val'] = False
if testset:
name = "%s_%s" % (name, testset)
sets_to_run['test'] = True
else:
sets_to_run['test'] = False
sets_to_backprop = {}
sets_to_backprop['train'] = backprop_on_train
sets_to_backprop['val'] = backprop_on_val
sets_to_backprop['test'] = backprop_on_test
if (do_aug_color or
do_horz_flip or
do_time_flip or
do_synth_rt or
do_piecewise_rt or
do_synth_nomotion or
do_sparsify_pointcloud):
name += "_A"
if do_aug_color:
name += "c"
if do_horz_flip:
name += "h"
if do_time_flip:
name += "t"
if do_synth_rt:
assert(not do_piecewise_rt)
name += "s"
if do_piecewise_rt:
assert(not do_synth_rt)
name += "p"
if do_synth_nomotion:
name += "n"
if do_sparsify_pointcloud:
name += "v"
if (not shuffle_train) or (not shuffle_val) or (not shuffle_test):
name += "_ns"
if do_profile:
name += "_PR"
if mod:
name = "%s_%s" % (name, mod)
if do_resume:
total_init = name
if do_eval_recall:
name += '_ev_re1_evaluation'
if do_validation:
splits = validation_path.split('/')
val_path = splits[-1][:-4]
name += f'val_{val_path}'
print(name)
|
[] |
[] |
[
"MODE"
] |
[]
|
["MODE"]
|
python
| 1 | 0 | |
main.go
|
/**
@author: xinyulu
@date: 2021/1/8 14:19
@note:
**/
package main
import (
_ "cloudbatch/conf"
"cloudbatch/interact"
"fmt"
"io/ioutil"
"os"
"os/signal"
"strings"
"syscall"
//"github.com/c-bata/go-prompt"
)
func main() {
//cmd.Execute()
pdAddr := os.Getenv("PD_ADDR")
if pdAddr != "" {
os.Args = append(os.Args, "-u", pdAddr)
}
sc := make(chan os.Signal, 1)
signal.Notify(sc,
syscall.SIGHUP,
syscall.SIGINT,
syscall.SIGTERM,
syscall.SIGQUIT)
go func() {
sig := <-sc
fmt.Printf("\nGot signal [%v] to exit.\n", sig)
switch sig {
case syscall.SIGTERM:
os.Exit(0)
default:
os.Exit(1)
}
}()
var input []string
stat, _ := os.Stdin.Stat()
if (stat.Mode() & os.ModeCharDevice) == 0 {
b, err := ioutil.ReadAll(os.Stdin)
if err != nil {
fmt.Println(err)
return
}
input = strings.Split(strings.TrimSpace(string(b[:])), " ")
}
interact.MainStart(append(os.Args[1:], input...))
}
|
[
"\"PD_ADDR\""
] |
[] |
[
"PD_ADDR"
] |
[]
|
["PD_ADDR"]
|
go
| 1 | 0 | |
src/proxies.py
|
import json
import random
import re
import time
from base64 import b64decode
from concurrent.futures import ThreadPoolExecutor, as_completed
import requests
import typing
from PyRoxy import ProxyType, Proxy
import logging
logging.basicConfig(format='[%(asctime)s - %(levelname)s] %(message)s', datefmt="%H:%M:%S")
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
TIMEOUT = 10
PROXY_TIMEOUT = 5
SCRAPE_THREADS = 20
PROXY_THREADS = 100
IP_REGEX = r'(?:[0-9]{1,3}\.){3}[0-9]{1,3}'
PORT_REGEX = r'[0-9]+'
IP_PORT_REGEX = rf'({IP_REGEX}):({PORT_REGEX})'
IP_PORT_TABLE_REGEX = rf'({IP_REGEX})\s*</td>\s*<td>\s*({PORT_REGEX})'
USER_AGENTS = [
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.111 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_2) AppleWebKit/601.3.9 (KHTML, like Gecko) Version/9.0.2 Safari/601.3.9',
'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:15.0) Gecko/20100101 Firefox/15.0.1',
'Mozilla/5.0 (X11; CrOS x86_64 8172.45.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.64 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.79 Safari/537.36 Edge/14.14393',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36',
]
def get_headers():
return {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'en-US,en;q=0.9',
'Cache-Control': 'max-age=0',
'User-Agent': random.choice(USER_AGENTS),
'Referer': 'https://www.google.com/',
'Pragma': 'no-cache',
'Sec-Fetch-Dest': 'document',
'Sec-Fetch-Mode': 'navigate',
'Sec-Fetch-Site': 'none',
'Sec-Fetch-User': '?1',
'Sec-Gpc': '1',
'Upgrade-Insecure-Requests': '1',
}
class Provider:
def __init__(self, url, proto):
self.url = url
self.proto = proto
def scrape(self):
return self.parse(self.fetch(self.url))
def fetch(self, url):
response = requests.get(url=url, timeout=TIMEOUT, headers=get_headers())
response.raise_for_status()
return response.text
def parse(self, data):
raise NotImplementedError
def __str__(self):
return f'{self.proto.name} | {self.url}'
class RegexProvider(Provider):
def __init__(self, url, proto, regex):
super().__init__(url, proto)
self.regex = regex
def parse(self, data):
for ip, port in re.findall(self.regex, data):
yield ip, port, self.proto
class PubProxyProvider(RegexProvider):
def __init__(self, url, proto, regex=IP_PORT_REGEX):
super().__init__(url, proto, regex)
def scrape(self):
for _ in range(10):
yield from super().scrape()
time.sleep(1)
class GeonodeProvider(Provider):
def parse(self, data):
data = json.loads(data)
for row in data['data']:
yield row['ip'], row['port'], self.proto
class UaShieldProvider(Provider):
def __init__(self, url):
super().__init__(url, proto=ProxyType.HTTP)
def parse(self, data):
data = json.loads(data)
for obj in data:
if 'auth' in obj:
continue
ip, port = obj['ip'].split(':')
yield ip, port, ProxyType[obj['scheme'].upper()]
class HideMyNameProvider(RegexProvider):
def __init__(self, url, proto, regex=IP_PORT_TABLE_REGEX, pages=(1, 10)):
self.pages = pages
super().__init__(url, proto, regex)
def scrape(self):
for page in range(*self.pages):
url = self.url
if page != 1:
url = url + '&start=' + str(64 * (page - 1))
result = list(self.parse(self.fetch(url)))
if not result:
return
yield from result
class ProxyListProvider(RegexProvider):
def __init__(self, url, proto, regex=r"Proxy\('([\w=]+)'\)"):
super().__init__(url, proto, regex)
def scrape(self):
for page in range(1, 20):
url = self.url + '?p=' + str(page)
result = list(self.parse(self.fetch(url)))
if not result:
return
yield from result
time.sleep(1)
def parse(self, data):
for proxy in re.findall(self.regex, data):
ip, port = b64decode(proxy).decode().split(':')
yield ip, port, self.proto
class FarmProxyProvider(RegexProvider):
def __init__(self, api_key, proxy):
self.proxy = proxy
super().__init__(
url=f'https://panel.farmproxy.net/api/v1/proxies.protocol-ip-port.txt?api_key={api_key}',
proto=ProxyType.HTTP,
regex=rf'(socks4|socks5|http)://({IP_REGEX}):({PORT_REGEX})'
)
def parse(self, data):
for proto, ip, port in re.findall(self.regex, data):
yield ip, port, ProxyType[proto.upper()]
def fetch(self, url):
response = requests.get(url=url, timeout=TIMEOUT, headers=get_headers(), proxies={'https': self.proxy})
response.raise_for_status()
return response.text
# noinspection LongLine
PROVIDERS = [
# Manual
RegexProvider('https://raw.githubusercontent.com/porthole-ascend-cinnamon/proxy_scraper/main/manual/socks4.txt', ProxyType.SOCKS4, IP_PORT_REGEX),
RegexProvider('https://raw.githubusercontent.com/porthole-ascend-cinnamon/proxy_scraper/main/manual/socks5.txt', ProxyType.SOCKS5, IP_PORT_REGEX),
RegexProvider('https://raw.githubusercontent.com/porthole-ascend-cinnamon/proxy_scraper/main/manual/http.txt', ProxyType.HTTP, IP_PORT_REGEX),
# Multi-scheme
UaShieldProvider('https://raw.githubusercontent.com/opengs/uashieldtargets/v2/proxy.json'),
# FarmProxyProvider(
# os.getenv('FARM_PROXY_API_KEY'),
# os.getenv('STABLE_IP_PROXY')
# ),
# SOCKS4
RegexProvider('https://api.proxyscrape.com/v2/?request=displayproxies&protocol=socks4', ProxyType.SOCKS4, IP_PORT_REGEX),
RegexProvider('https://api.proxyscrape.com/?request=displayproxies&proxytype=socks4', ProxyType.SOCKS4, IP_PORT_REGEX),
RegexProvider('https://raw.githubusercontent.com/jetkai/proxy-list/main/online-proxies/txt/proxies-socks4.txt', ProxyType.SOCKS4, IP_PORT_REGEX),
RegexProvider('https://raw.githubusercontent.com/roosterkid/openproxylist/main/SOCKS4_RAW.txt', ProxyType.SOCKS4, IP_PORT_REGEX),
RegexProvider('https://raw.githubusercontent.com/TheSpeedX/PROXY-List/master/socks4.txt', ProxyType.SOCKS4, IP_PORT_REGEX),
RegexProvider('https://raw.githubusercontent.com/UserR3X/proxy-list/main/online/socks4.txt', ProxyType.SOCKS4, IP_PORT_REGEX),
RegexProvider('https://www.my-proxy.com/free-socks-4-proxy.html', ProxyType.SOCKS4, IP_PORT_REGEX),
RegexProvider('https://www.socks-proxy.net/', ProxyType.SOCKS4, IP_PORT_REGEX),
RegexProvider('https://www.freeproxychecker.com/result/socks4_proxies.txt', ProxyType.SOCKS4, IP_PORT_REGEX),
RegexProvider('https://raw.githubusercontent.com/ShiftyTR/Proxy-List/master/socks4.txt', ProxyType.SOCKS4, IP_PORT_REGEX),
RegexProvider('http://proxydb.net/?protocol=socks4', ProxyType.SOCKS4, IP_PORT_REGEX),
RegexProvider('https://socks-proxy.net/', ProxyType.SOCKS4, IP_PORT_REGEX),
PubProxyProvider('http://pubproxy.com/api/proxy?limit=5&format=txt&type=socks4', ProxyType.SOCKS4),
RegexProvider('https://www.proxy-list.download/SOCKS4', ProxyType.SOCKS4, IP_PORT_TABLE_REGEX),
GeonodeProvider('https://proxylist.geonode.com/api/proxy-list?limit=500&page=1&speed=fast&protocols=socks4', ProxyType.SOCKS4),
GeonodeProvider('https://proxylist.geonode.com/api/proxy-list?limit=500&page=1&speed=medium&protocols=socks4', ProxyType.SOCKS4),
HideMyNameProvider('https://hidemy.name/ru/proxy-list/?type=4', ProxyType.SOCKS4),
RegexProvider('http://www.proxylists.net/socks4.txt', ProxyType.SOCKS4, IP_PORT_REGEX),
RegexProvider('http://proxysearcher.sourceforge.net/Proxy%20List.php?type=socks', ProxyType.SOCKS4, IP_PORT_REGEX),
# SOCKS5
RegexProvider('https://api.proxyscrape.com/v2/?request=displayproxies&protocol=socks5', ProxyType.SOCKS5, IP_PORT_REGEX),
RegexProvider('https://api.proxyscrape.com/?request=displayproxies&proxytype=socks5', ProxyType.SOCKS5, IP_PORT_REGEX),
RegexProvider('https://raw.githubusercontent.com/hookzof/socks5_list/master/proxy.txt', ProxyType.SOCKS5, IP_PORT_REGEX),
RegexProvider('https://raw.githubusercontent.com/jetkai/proxy-list/main/online-proxies/txt/proxies-socks5.txt', ProxyType.SOCKS5, IP_PORT_REGEX),
RegexProvider('https://raw.githubusercontent.com/manuGMG/proxy-365/main/SOCKS5.txt', ProxyType.SOCKS5, IP_PORT_REGEX),
RegexProvider('https://raw.githubusercontent.com/mmpx12/proxy-list/master/socks5.txt', ProxyType.SOCKS5, IP_PORT_REGEX),
RegexProvider('https://raw.githubusercontent.com/TheSpeedX/PROXY-List/master/socks5.txt', ProxyType.SOCKS5, IP_PORT_REGEX),
RegexProvider('https://raw.githubusercontent.com/UserR3X/proxy-list/main/online/socks5.txt', ProxyType.SOCKS5, IP_PORT_REGEX),
RegexProvider('https://spys.me/socks.txt', ProxyType.SOCKS5, IP_PORT_REGEX),
RegexProvider('https://www.my-proxy.com/free-socks-5-proxy.html', ProxyType.SOCKS5, IP_PORT_REGEX),
RegexProvider('https://raw.githubusercontent.com/ShiftyTR/Proxy-List/master/socks5.txt', ProxyType.SOCKS5, IP_PORT_REGEX),
RegexProvider('http://proxydb.net/?protocol=socks5', ProxyType.SOCKS5, IP_PORT_REGEX),
RegexProvider('https://www.proxy-list.download/api/v1/get?type=socks5', ProxyType.SOCKS5, IP_PORT_REGEX),
RegexProvider('https://api.openproxylist.xyz/socks5.txt', ProxyType.SOCKS5, IP_PORT_REGEX),
RegexProvider('https://openproxy.space/list/socks5', ProxyType.SOCKS5, f'"{IP_PORT_REGEX}"'),
PubProxyProvider('http://pubproxy.com/api/proxy?limit=5&format=txt&type=socks5', ProxyType.SOCKS5, IP_PORT_REGEX),
RegexProvider('https://www.proxy-list.download/SOCKS5', ProxyType.SOCKS5, IP_PORT_TABLE_REGEX),
GeonodeProvider('https://proxylist.geonode.com/api/proxy-list?limit=500&page=1&speed=fast&protocols=socks5', ProxyType.SOCKS5),
GeonodeProvider('https://proxylist.geonode.com/api/proxy-list?limit=500&page=1&speed=medium&protocols=socks5', ProxyType.SOCKS5),
RegexProvider('https://www.freeproxychecker.com/result/socks5_proxies.txt', ProxyType.SOCKS5, IP_PORT_REGEX),
RegexProvider('http://www.proxylists.net/socks5.txt', ProxyType.SOCKS5, IP_PORT_REGEX),
HideMyNameProvider('https://hidemy.name/ru/proxy-list/?type=5', ProxyType.SOCKS5),
# HTTP(S)
RegexProvider('https://api.proxyscrape.com/v2/?request=displayproxies&protocol=http', ProxyType.HTTP, IP_PORT_REGEX),
RegexProvider('https://api.proxyscrape.com/?request=displayproxies&proxytype=http', ProxyType.HTTP, IP_PORT_REGEX),
RegexProvider('https://raw.githubusercontent.com/almroot/proxylist/master/list.txt', ProxyType.HTTP, IP_PORT_REGEX),
RegexProvider('https://raw.githubusercontent.com/clarketm/proxy-list/master/proxy-list-raw.txt', ProxyType.HTTP, IP_PORT_REGEX),
RegexProvider('https://raw.githubusercontent.com/hendrikbgr/Free-Proxy-Repo/master/proxy_list.txt', ProxyType.HTTP, IP_PORT_REGEX),
RegexProvider('https://raw.githubusercontent.com/jetkai/proxy-list/main/online-proxies/txt/proxies-http%2Bhttps.txt', ProxyType.HTTP, IP_PORT_REGEX),
RegexProvider('https://raw.githubusercontent.com/mmpx12/proxy-list/master/http.txt', ProxyType.HTTP, IP_PORT_REGEX),
RegexProvider('https://raw.githubusercontent.com/mmpx12/proxy-list/master/https.txt', ProxyType.HTTP, IP_PORT_REGEX),
RegexProvider('https://raw.githubusercontent.com/roosterkid/openproxylist/main/HTTPS_RAW.txt', ProxyType.HTTP, IP_PORT_REGEX),
RegexProvider('https://raw.githubusercontent.com/sunny9577/proxy-scraper/master/proxies.txt', ProxyType.HTTP, IP_PORT_REGEX),
RegexProvider('https://raw.githubusercontent.com/TheSpeedX/PROXY-List/master/http.txt', ProxyType.HTTP, IP_PORT_REGEX),
RegexProvider('https://raw.githubusercontent.com/UserR3X/proxy-list/main/online/http%2Bs.txt', ProxyType.HTTP, IP_PORT_REGEX),
RegexProvider('https://www.proxy-list.download/api/v1/get?type=http', ProxyType.HTTP, IP_PORT_REGEX),
RegexProvider('https://www.proxy-list.download/api/v1/get?type=https', ProxyType.HTTP, IP_PORT_REGEX),
RegexProvider('http://spys.me/proxy.txt', ProxyType.HTTP, IP_PORT_REGEX),
RegexProvider('https://www.sslproxies.org/', ProxyType.HTTP, IP_PORT_REGEX),
RegexProvider('https://www.my-proxy.com/free-anonymous-proxy.html', ProxyType.HTTP, IP_PORT_REGEX),
RegexProvider('https://www.my-proxy.com/free-transparent-proxy.html', ProxyType.HTTP, IP_PORT_REGEX),
*(
RegexProvider(f'https://www.my-proxy.com/free-proxy-list-{i}.html', ProxyType.HTTP, IP_PORT_REGEX)
for i in range(1, 11)
),
RegexProvider('https://raw.githubusercontent.com/ShiftyTR/Proxy-List/master/http.txt', ProxyType.HTTP, IP_PORT_REGEX),
RegexProvider('https://raw.githubusercontent.com/ShiftyTR/Proxy-List/master/https.txt', ProxyType.HTTP, IP_PORT_REGEX),
RegexProvider('http://proxydb.net/?protocol=http&protocol=https', ProxyType.HTTP, IP_PORT_REGEX),
RegexProvider('https://api.openproxylist.xyz/http.txt', ProxyType.HTTP, IP_PORT_REGEX),
RegexProvider('http://www.google-proxy.net/', ProxyType.HTTP, IP_PORT_REGEX),
RegexProvider('https://free-proxy-list.net/', ProxyType.HTTP, IP_PORT_REGEX),
RegexProvider('https://www.us-proxy.org/', ProxyType.HTTP, IP_PORT_REGEX),
RegexProvider('https://free-proxy-list.net/uk-proxy.html', ProxyType.HTTP, IP_PORT_REGEX),
RegexProvider('https://free-proxy-list.net/anonymous-proxy.html', ProxyType.HTTP, IP_PORT_REGEX),
PubProxyProvider('http://pubproxy.com/api/proxy?limit=5&format=txt&type=http', ProxyType.HTTP, IP_PORT_REGEX),
RegexProvider('http://www.proxylists.net/http.txt', ProxyType.HTTP, IP_PORT_REGEX),
RegexProvider('https://openproxy.space/list/http', ProxyType.HTTP, f'"{IP_PORT_REGEX}"'),
RegexProvider('https://www.proxy-list.download/HTTPS', ProxyType.HTTP, IP_PORT_TABLE_REGEX),
RegexProvider('https://www.proxy-list.download/HTTP', ProxyType.HTTP, IP_PORT_TABLE_REGEX),
GeonodeProvider('https://proxylist.geonode.com/api/proxy-list?limit=500&page=1&speed=fast&protocols=http%2Chttps', ProxyType.HTTP),
GeonodeProvider('https://proxylist.geonode.com/api/proxy-list?limit=500&page=1&speed=medium&protocols=http%2Chttps', ProxyType.HTTP),
RegexProvider('http://www.httptunnel.ge/ProxyListForFree.aspx', ProxyType.HTTP, IP_PORT_REGEX),
RegexProvider('http://api.foxtools.ru/v2/Proxy.txt', ProxyType.HTTP, IP_PORT_REGEX),
RegexProvider('http://proxysearcher.sourceforge.net/Proxy%20List.php?type=http', ProxyType.HTTP, IP_PORT_REGEX),
RegexProvider('https://www.ipaddress.com/proxy-list/', ProxyType.HTTP, rf'({IP_REGEX})</a>:({PORT_REGEX})'),
ProxyListProvider('https://proxy-list.org/english/index.php', ProxyType.HTTP),
HideMyNameProvider('https://hidemy.name/ru/proxy-list/?type=hs', ProxyType.HTTP, pages=(1, 11)),
]
def scrape_all():
with ThreadPoolExecutor(SCRAPE_THREADS) as executor:
futures = {
executor.submit(provider.scrape): provider
for provider in PROVIDERS
}
for future in as_completed(futures):
provider = futures[future]
try:
result = set(future.result())
logger.info(f'Success: {provider} : {len(result)}')
yield from result
except Exception as exc:
logger.error(f'{provider} : {exc}')
def check_proxies(proxies):
urls = [
'http://httpbin.org/get',
'http://azenv.net/',
'http://www.proxy-listen.de/azenv.php',
'http://www.meow.org.uk/cgi-bin/env.pl',
'https://users.ugent.be/~bfdwever/start/env.cgi',
'https://www2.htw-dresden.de/~beck/cgi-bin/env.cgi',
'http://mojeip.net.pl/asdfa/azenv.php',
]
future_to_proxy = {}
with ThreadPoolExecutor(PROXY_THREADS) as executor:
for url, proxies_chunk in zip(urls, (proxies[i::len(urls)] for i in range(len(urls)))):
logger.info(f'Checking {len(proxies_chunk)} proxies against {url}')
future_to_proxy.update({
executor.submit(proxy.check, url, PROXY_TIMEOUT): proxy
for proxy in proxies_chunk
})
for future in as_completed(future_to_proxy):
if future.result():
yield future_to_proxy[future]
def refresh_proxies():
proxies = set(scrape_all())
logger.info(f'Proxies: {len(proxies)}')
proxies = [
Proxy(ip, int(port), proto)
for ip, port, proto in proxies
]
random.shuffle(proxies)
return proxies
def update_proxies_file(proxies: typing.List[Proxy], proxies_file_path="MHDDoS/files/proxies/proxylist.txt"):
with open(proxies_file_path, 'w') as out:
out.writelines((str(proxy) + '\n' for proxy in proxies))
if __name__ == '__main__':
expected_at_least = 10000
proxies = refresh_proxies()
if len(proxies) < expected_at_least:
logger.error('Found too few proxies')
exit(1)
update_proxies_file(proxies)
|
[] |
[] |
[
"FARM_PROXY_API_KEY",
"STABLE_IP_PROXY"
] |
[]
|
["FARM_PROXY_API_KEY", "STABLE_IP_PROXY"]
|
python
| 2 | 0 | |
release/serve_tests/workloads/autoscaling_single_deployment.py
|
#!/usr/bin/env python3
"""
Benchmark test for single deployment at 1k no-op replica scale with
autoscaling.
1) Start with a single head node.
2) Autoscale up to 1k no-op replicas over N nodes.
3) Launch wrk in each running node to simulate load balanced request
5) Run a 10-minute wrk trial on each node, aggregate results.
Report:
per_thread_latency_avg_ms
per_thread_latency_max_ms
per_thread_avg_tps
per_thread_max_tps
per_node_avg_tps
per_node_avg_transfer_per_sec_KB
cluster_total_thoughput
cluster_total_transfer_KB
cluster_max_P50_latency_ms
cluster_max_P75_latency_ms
cluster_max_P90_latency_ms
cluster_max_P99_latency_ms
"""
import click
import json
import math
import os
from ray import serve
from ray.serve.utils import logger
from serve_test_utils import (
aggregate_all_metrics,
run_wrk_on_all_nodes,
save_test_results,
is_smoke_test,
)
from serve_test_cluster_utils import (
setup_local_single_node_cluster,
setup_anyscale_cluster,
warm_up_one_cluster,
NUM_CPU_PER_NODE,
NUM_CONNECTIONS,
)
from typing import Optional
# Experiment configs
DEFAULT_SMOKE_TEST_MIN_NUM_REPLICA = 1
DEFAULT_SMOKE_TEST_MAX_NUM_REPLICA = 4
DEFAULT_FULL_TEST_MIN_NUM_REPLICA = 1
DEFAULT_FULL_TEST_MAX_NUM_REPLICA = 1000
# Deployment configs
DEFAULT_MAX_BATCH_SIZE = 16
# Experiment configs - wrk specific
DEFAULT_SMOKE_TEST_TRIAL_LENGTH = "15s"
DEFAULT_FULL_TEST_TRIAL_LENGTH = "10m"
def deploy_replicas(min_replicas, max_replicas, max_batch_size):
@serve.deployment(
name="echo",
_autoscaling_config={
"metrics_interval_s": 0.1,
"min_replicas": min_replicas,
"max_replicas": max_replicas,
"look_back_period_s": 0.2,
"downscale_delay_s": 0.2,
"upscale_delay_s": 0.2,
},
version="v1",
)
class Echo:
@serve.batch(max_batch_size=max_batch_size)
async def handle_batch(self, requests):
return ["hi" for _ in range(len(requests))]
async def __call__(self, request):
return await self.handle_batch(request)
Echo.deploy()
def save_results(final_result, default_name):
test_output_json = os.environ.get(
"TEST_OUTPUT_JSON", "/tmp/single_deployment_1k_noop_replica.json"
)
with open(test_output_json, "wt") as f:
json.dump(final_result, f)
@click.command()
@click.option("--min-replicas", "-min", type=int)
@click.option("--max-replicas", "-max", type=int)
@click.option("--trial-length", "-tl", type=str)
@click.option("--max-batch-size", type=int, default=DEFAULT_MAX_BATCH_SIZE)
def main(
min_replicas: Optional[int],
max_replicas: Optional[int],
trial_length: Optional[str],
max_batch_size: Optional[int],
):
# Give default cluster parameter values based on smoke_test config
# if user provided values explicitly, use them instead.
# IS_SMOKE_TEST is set by args of releaser's e2e.py
if is_smoke_test():
min_replicas = min_replicas or DEFAULT_SMOKE_TEST_MIN_NUM_REPLICA
max_replicas = max_replicas or DEFAULT_SMOKE_TEST_MAX_NUM_REPLICA
trial_length = trial_length or DEFAULT_SMOKE_TEST_TRIAL_LENGTH
logger.info(
f"Running local / smoke test with min {min_replicas} and max "
f"{max_replicas} replicas ..\n"
)
# Choose cluster setup based on user config. Local test uses Cluster()
# to mock actors that requires # of nodes to be specified, but ray
# client doesn't need to
num_nodes = int(math.ceil(max_replicas / NUM_CPU_PER_NODE))
logger.info(f"Setting up local ray cluster with {num_nodes} nodes ..\n")
serve_client = setup_local_single_node_cluster(num_nodes)[0]
else:
min_replicas = min_replicas or DEFAULT_FULL_TEST_MIN_NUM_REPLICA
max_replicas = max_replicas or DEFAULT_FULL_TEST_MAX_NUM_REPLICA
trial_length = trial_length or DEFAULT_FULL_TEST_TRIAL_LENGTH
logger.info(
f"Running full test with min {min_replicas} and max "
f"{max_replicas} replicas ..\n"
)
logger.info("Setting up anyscale ray cluster .. \n")
serve_client = setup_anyscale_cluster()
http_host = str(serve_client._http_config.host)
http_port = str(serve_client._http_config.port)
logger.info(f"Ray serve http_host: {http_host}, http_port: {http_port}")
logger.info(
f"Deploying with min {min_replicas} and max {max_replicas} "
f"target replicas ....\n"
)
deploy_replicas(min_replicas, max_replicas, max_batch_size)
logger.info("Warming up cluster ....\n")
warm_up_one_cluster.remote(10, http_host, http_port, "echo")
logger.info(f"Starting wrk trial on all nodes for {trial_length} ....\n")
# For detailed discussion, see https://github.com/wg/wrk/issues/205
# TODO:(jiaodong) What's the best number to use here ?
all_endpoints = list(serve.list_deployments().keys())
all_metrics, all_wrk_stdout = run_wrk_on_all_nodes(
trial_length, NUM_CONNECTIONS, http_host, http_port, all_endpoints=all_endpoints
)
aggregated_metrics = aggregate_all_metrics(all_metrics)
logger.info("Wrk stdout on each node: ")
for wrk_stdout in all_wrk_stdout:
logger.info(wrk_stdout)
logger.info("Final aggregated metrics: ")
for key, val in aggregated_metrics.items():
logger.info(f"{key}: {val}")
save_test_results(
aggregated_metrics,
default_output_file="/tmp/autoscaling_single_deployment.json",
)
if __name__ == "__main__":
main()
import pytest
import sys
sys.exit(pytest.main(["-v", "-s", __file__]))
|
[] |
[] |
[
"TEST_OUTPUT_JSON"
] |
[]
|
["TEST_OUTPUT_JSON"]
|
python
| 1 | 0 | |
src/emailservice/email_server.py
|
#!/usr/bin/python
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import sys
import time
from concurrent import futures
import grpc
import traceback
from jinja2 import Environment, FileSystemLoader, select_autoescape, TemplateError
from google.api_core.exceptions import GoogleAPICallError
from grpc_health.v1 import health_pb2, health_pb2_grpc
from opentelemetry import trace
from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter
from opentelemetry.instrumentation.grpc import GrpcInstrumentorServer
from opentelemetry.sdk.resources import Resource
from opentelemetry.sdk.trace import TracerProvider
from opentelemetry.sdk.trace.export import BatchSpanProcessor
import demo_pb2
import demo_pb2_grpc
from grpc_health.v1 import health_pb2
from grpc_health.v1 import health_pb2_grpc
from logger import getJSONLogger
logger = getJSONLogger("emailservice-server")
grpc_server_instrumentor = GrpcInstrumentorServer()
grpc_server_instrumentor.instrument()
otlp_exporter = OTLPSpanExporter()
# Loads confirmation email template from file
env = Environment(
loader=FileSystemLoader("templates"), autoescape=select_autoescape(["html", "xml"])
)
template = env.get_template("confirmation.html")
class BaseEmailService(demo_pb2_grpc.EmailServiceServicer):
def Check(self, request, context):
return health_pb2.HealthCheckResponse(
status=health_pb2.HealthCheckResponse.SERVING)
def Watch(self, request, context):
return health_pb2.HealthCheckResponse(
status=health_pb2.HealthCheckResponse.UNIMPLEMENTED)
def Watch(self, request, context):
return health_pb2.HealthCheckResponse(
status=health_pb2.HealthCheckResponse.UNIMPLEMENTED
)
class EmailService(BaseEmailService):
def __init__(self):
raise Exception("cloud mail client not implemented")
super().__init__()
@staticmethod
def send_email(client, email_address, content):
response = client.send_message(
sender=client.sender_path(project_id, region, sender_id),
envelope_from_authority="",
header_from_authority="",
envelope_from_address=from_address,
simple_message={
"from": {
"address_spec": from_address,
},
"to": [{"address_spec": email_address}],
"subject": "Your Confirmation Email",
"html_body": content,
},
)
logger.info("Message sent: {}".format(response.rfc822_message_id))
def SendOrderConfirmation(self, request, context):
email = request.email
order = request.order
try:
confirmation = template.render(order=order)
except TemplateError as err:
context.set_details(
"An error occurred when preparing the confirmation mail."
)
logger.error(err.message)
context.set_code(grpc.StatusCode.INTERNAL)
return demo_pb2.Empty()
try:
EmailService.send_email(self.client, email, confirmation)
except GoogleAPICallError as err:
context.set_details("An error occurred when sending the email.")
print(err.message)
context.set_code(grpc.StatusCode.INTERNAL)
return demo_pb2.Empty()
return demo_pb2.Empty()
class DummyEmailService(BaseEmailService):
def SendOrderConfirmation(self, request, context):
logger.info(
"A request to send order confirmation email to {} has been received.".format(
request.email
)
)
return demo_pb2.Empty()
class HealthCheck:
def Check(self, request, context):
return health_pb2.HealthCheckResponse(
status=health_pb2.HealthCheckResponse.SERVING
)
def start(dummy_mode):
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
service = None
if dummy_mode:
service = DummyEmailService()
else:
raise Exception("non-dummy mode not implemented yet")
demo_pb2_grpc.add_EmailServiceServicer_to_server(service, server)
health_pb2_grpc.add_HealthServicer_to_server(service, server)
port = os.environ.get("PORT", "8080")
logger.info("listening on port: " + port)
server.add_insecure_port("[::]:" + port)
server.start()
try:
while True:
time.sleep(3600)
except KeyboardInterrupt:
server.stop(0)
if __name__ == "__main__":
logger.info("starting the email service in dummy mode.")
resource = Resource(attributes={"service.name": "emailservice"})
trace.set_tracer_provider(TracerProvider(resource=resource))
trace.get_tracer_provider().add_span_processor(BatchSpanProcessor(otlp_exporter))
start(dummy_mode=True)
|
[] |
[] |
[
"PORT"
] |
[]
|
["PORT"]
|
python
| 1 | 0 | |
orm/tables.go
|
package orm
import (
"os"
// TODO: Use log15 instead of fmt?
"fmt"
"context"
"database/sql"
"github.com/pkg/errors"
"github.com/rbastic/dyndao/schema"
)
// CreateTables executes a CreateTable operation for every table specified in
// the schema.
func (o *ORM) CreateTables(ctx context.Context) error {
for tName := range o.s.Tables {
err := o.CreateTable(ctx, o.s, tName)
if err != nil {
return err
}
}
return nil
}
// DropTables executes a DropTable operation for every table specified in the
// schema.
func (o *ORM) DropTables(ctx context.Context) error {
for tName := range o.s.Tables {
err := o.DropTable(ctx, tName)
if err != nil {
return err
}
}
return nil
}
// CreateTable will execute a CreateTable operation for the specified table in
// a given schema.
func (o *ORM) CreateTable(ctx context.Context, sch *schema.Schema, tableName string) error {
sqlStr, err := o.sqlGen.CreateTable(o.sqlGen, sch, tableName)
if err != nil {
return err
}
debug := os.Getenv("DB_TRACE")
if debug != "" {
// Currently, DEBUG is either on or off.
fmt.Println("CreateTable:", sqlStr)
}
_, err = prepareAndExecSQL(ctx, o.RawConn, sqlStr)
if err != nil {
return errors.Wrap(err, "CreateTable")
}
return nil
}
// DropTable will execute a DropTable operation for the specified table in
// a given schema.
func (o *ORM) DropTable(ctx context.Context, tableName string) error {
sqlStr := o.sqlGen.DropTable(tableName)
_, err := prepareAndExecSQL(ctx, o.RawConn, sqlStr)
if err != nil {
return errors.Wrap(err, "DropTable")
}
return nil
}
func prepareAndExecSQL(ctx context.Context, db *sql.DB, sqlStr string) (sql.Result, error) {
stmt, err := db.PrepareContext(ctx, sqlStr)
if err != nil {
return nil, errors.Wrap(err, "prepareAndExecSQL/PrepareContext ("+sqlStr+")")
}
defer func() {
stmtErr := stmt.Close()
if stmtErr != nil {
fmt.Println(stmtErr) // TODO: logging implementation
}
}()
r, err := stmt.ExecContext(ctx)
if err != nil {
return nil, errors.Wrap(err, "prepareAndExecSQL/ExecContext ("+sqlStr+")")
}
return r, nil
}
|
[
"\"DB_TRACE\""
] |
[] |
[
"DB_TRACE"
] |
[]
|
["DB_TRACE"]
|
go
| 1 | 0 | |
tests/trainer/test_supporters.py
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from collections import Sequence
from unittest import mock
import pytest
import torch
from torch.utils.data import DataLoader, TensorDataset
from torch.utils.data.dataset import Dataset, IterableDataset
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data.sampler import Sampler, SequentialSampler
from pytorch_lightning import Trainer
from pytorch_lightning.trainer.supporters import (
_nested_calc_num_data,
CombinedDataset,
CombinedLoader,
CombinedLoaderIterator,
CycleIterator,
TensorRunningAccum,
)
from pytorch_lightning.utilities.apply_func import apply_to_collection
from pytorch_lightning.utilities.exceptions import MisconfigurationException
def test_tensor_running_accum_reset():
"""Test that reset would set all attributes to the initialization state."""
window_length = 10
accum = TensorRunningAccum(window_length=window_length)
assert accum.last() is None
assert accum.mean() is None
accum.append(torch.tensor(1.5))
assert accum.last() == torch.tensor(1.5)
assert accum.mean() == torch.tensor(1.5)
accum.reset()
assert accum.window_length == window_length
assert accum.memory is None
assert accum.current_idx == 0
assert accum.last_idx is None
assert not accum.rotated
def test_cycle_iterator():
"""Test the cycling function of `CycleIterator`"""
iterator = CycleIterator(range(100), 1000)
assert len(iterator) == 1000
for idx, item in enumerate(iterator):
assert item < 100
assert idx == len(iterator) - 1
def test_none_length_cycle_iterator():
"""Test the infinite cycling function of `CycleIterator`"""
iterator = CycleIterator(range(100))
assert iterator.__len__() == float("inf")
# test infinite loop
for idx, item in enumerate(iterator):
if idx == 1000:
break
assert item == 0
@pytest.mark.parametrize(
["dataset_1", "dataset_2"],
[
([list(range(10)), list(range(20))]),
([range(10), range(20)]),
([torch.randn(10, 3, 2), torch.randn(20, 5, 6)]),
([TensorDataset(torch.randn(10, 3, 2)), TensorDataset(torch.randn(20, 5, 6))]),
],
)
def test_combined_dataset(dataset_1, dataset_2):
"""Verify the length of the CombinedDataset."""
datasets = [dataset_1, dataset_2]
combined_dataset = CombinedDataset(datasets)
assert combined_dataset.max_len == 20
assert combined_dataset.min_len == len(combined_dataset) == 10
def test_combined_dataset_length_mode_error():
dset = CombinedDataset([range(10)])
with pytest.raises(MisconfigurationException, match="Invalid Mode"):
dset._calc_num_data([range(10)], "test")
def test_combined_loader_iterator_dict_min_size():
"""Test `CombinedLoaderIterator` given mapping loaders."""
loaders = {
"a": torch.utils.data.DataLoader(range(10), batch_size=4),
"b": torch.utils.data.DataLoader(range(20), batch_size=5),
}
combined_iter = CombinedLoaderIterator(loaders)
for idx, item in enumerate(combined_iter):
assert isinstance(item, dict)
assert len(item) == 2
assert "a" in item and "b" in item
assert idx == min(len(loaders["a"]), len(loaders["b"])) - 1
def test_combined_loader_init_mode_error():
"""Test the ValueError when constructing `CombinedLoader`"""
with pytest.raises(MisconfigurationException, match="Invalid Mode"):
CombinedLoader([range(10)], "testtt")
def test_combined_loader_loader_type_error():
"""Test the ValueError when wrapping the loaders."""
with pytest.raises(TypeError, match="Expected data to be int, Sequence or Mapping, but got NoneType"):
CombinedLoader(None, "max_size_cycle")
def test_combined_loader_calc_length_mode_error():
"""Test the ValueError when calculating the number of batches."""
with pytest.raises(TypeError, match="Expected data to be int, Sequence or Mapping, but got NoneType"):
CombinedLoader._calc_num_batches(None)
def test_combined_loader_dict_min_size():
"""Test `CombinedLoader` of mode 'min_size' given mapping loaders."""
loaders = {
"a": torch.utils.data.DataLoader(range(10), batch_size=4),
"b": torch.utils.data.DataLoader(range(20), batch_size=5),
}
combined_loader = CombinedLoader(loaders, "min_size")
assert len(combined_loader) == min(len(v) for v in loaders.values())
for idx, item in enumerate(combined_loader):
assert isinstance(item, dict)
assert len(item) == 2
assert "a" in item and "b" in item
assert idx == len(combined_loader) - 1
def test_combined_loader_dict_max_size_cycle():
"""Test `CombinedLoader` of mode 'max_size_cycle' given mapping loaders."""
loaders = {
"a": torch.utils.data.DataLoader(range(10), batch_size=4),
"b": torch.utils.data.DataLoader(range(20), batch_size=5),
}
combined_loader = CombinedLoader(loaders, "max_size_cycle")
assert len(combined_loader) == max(len(v) for v in loaders.values())
for idx, item in enumerate(combined_loader):
assert isinstance(item, dict)
assert len(item) == 2
assert "a" in item and "b" in item
assert idx == len(combined_loader) - 1
def test_combined_loader_sequence_min_size():
"""Test `CombinedLoader` of mode 'min_size' given sequence loaders."""
loaders = [
torch.utils.data.DataLoader(range(10), batch_size=4),
torch.utils.data.DataLoader(range(20), batch_size=5),
]
combined_loader = CombinedLoader(loaders, "min_size")
assert len(combined_loader) == min(len(v) for v in loaders)
for idx, item in enumerate(combined_loader):
assert isinstance(item, Sequence)
assert len(item) == 2
assert idx == len(combined_loader) - 1
class TestIterableDataset(IterableDataset):
def __init__(self, size: int = 10):
self.size = size
def __iter__(self):
self.sampler = SequentialSampler(range(self.size))
self.sampler_iter = iter(self.sampler)
return self
def __next__(self):
return next(self.sampler_iter)
@pytest.mark.parametrize("mode", ["min_size", "max_size_cycle"])
@pytest.mark.parametrize("use_multiple_dataloaders", [False, True])
def test_combined_loader_sequence_iterable_dataset(mode, use_multiple_dataloaders):
"""Test `CombinedLoader` of mode 'min_size' given sequence loaders."""
if use_multiple_dataloaders:
loaders = [
torch.utils.data.DataLoader(TestIterableDataset(10), batch_size=2),
torch.utils.data.DataLoader(TestIterableDataset(20), batch_size=2),
]
else:
loaders = [
torch.utils.data.DataLoader(TestIterableDataset(10), batch_size=2),
]
combined_loader = CombinedLoader(loaders, mode)
has_break = False
for idx, item in enumerate(combined_loader):
assert isinstance(item, Sequence)
assert len(item) == 2 if use_multiple_dataloaders else 1
if not use_multiple_dataloaders and idx == 4:
has_break = True
break
if mode == "max_size_cycle":
assert combined_loader.loaders[0].state.done == (not has_break)
expected = (10 if mode == "max_size_cycle" else 5) if use_multiple_dataloaders else 5
assert (expected - 1) == idx, (mode, use_multiple_dataloaders)
@pytest.mark.parametrize("lengths", [[4, 6], [5, 5], [6, 4]])
def test_combined_loader_sequence_with_map_and_iterable(lengths):
class MyIterableDataset(IterableDataset):
def __init__(self, size: int = 10):
self.size = size
def __iter__(self):
self.sampler = SequentialSampler(range(self.size))
self.iter_sampler = iter(self.sampler)
return self
def __next__(self):
return next(self.iter_sampler)
class MyMapDataset(Dataset):
def __init__(self, size: int = 10):
self.size = size
def __getitem__(self, index):
return index
def __len__(self):
return self.size
x, y = lengths
loaders = [DataLoader(MyIterableDataset(x)), DataLoader(MyMapDataset(y))]
dataloader = CombinedLoader(loaders, mode="max_size_cycle")
counter = 0
for _ in dataloader:
counter += 1
assert counter == max(x, y)
def test_combined_loader_sequence_max_size_cycle():
"""Test `CombinedLoader` of mode 'max_size_cycle' given sequence loaders."""
loaders = [
torch.utils.data.DataLoader(range(10), batch_size=4),
torch.utils.data.DataLoader(range(20), batch_size=5),
]
combined_loader = CombinedLoader(loaders, "max_size_cycle")
assert len(combined_loader) == max(len(v) for v in loaders)
for idx, item in enumerate(combined_loader):
assert isinstance(item, Sequence)
assert len(item) == 2
assert idx == len(combined_loader) - 1
@pytest.mark.parametrize(
["input_data", "compute_func", "expected_length"],
[
([*range(10), list(range(1, 20))], min, 0),
([*range(10), list(range(1, 20))], max, 19),
([*range(10), {str(i): i for i in range(1, 20)}], min, 0),
([*range(10), {str(i): i for i in range(1, 20)}], max, 19),
({**{str(i): i for i in range(10)}, "nested": {str(i): i for i in range(1, 20)}}, min, 0),
({**{str(i): i for i in range(10)}, "nested": {str(i): i for i in range(1, 20)}}, max, 19),
({**{str(i): i for i in range(10)}, "nested": list(range(20))}, min, 0),
({**{str(i): i for i in range(10)}, "nested": list(range(20))}, max, 19),
],
)
def test_nested_calc_num_data(input_data, compute_func, expected_length):
calculated_length = _nested_calc_num_data(input_data, compute_func)
assert calculated_length == expected_length
@mock.patch.dict(os.environ, {"CUDA_VISIBLE_DEVICES": "0,1", "PL_TRAINER_GPUS": "2"})
@mock.patch("torch.cuda.device_count", return_value=2)
@mock.patch("torch.cuda.is_available", return_value=True)
def test_combined_data_loader_validation_test(cuda_available_mock, device_count_mock, tmpdir):
"""This test makes sure distributed sampler has been properly injected in dataloaders when using
CombinedLoader."""
class CustomDataset(Dataset):
def __init__(self, data):
self.data = data
def __len__(self):
return len(self.data)
def __getitem__(self, index):
return self.data[index]
dataloader = CombinedLoader(
{
"a": DataLoader(CustomDataset(range(10))),
"b": {"c": DataLoader(CustomDataset(range(10))), "d": DataLoader(CustomDataset(range(10)))},
"e": [DataLoader(CustomDataset(range(10))), DataLoader(CustomDataset(range(10)))],
}
)
trainer = Trainer(replace_sampler_ddp=True, accelerator="ddp", gpus=2)
dataloader = trainer.auto_add_sampler(dataloader, shuffle=True)
_count = 0
def _assert_distributed_sampler(v):
nonlocal _count
_count += 1
assert isinstance(v, DistributedSampler)
apply_to_collection(dataloader.sampler, Sampler, _assert_distributed_sampler)
assert _count == 5
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
libgo/go/syscall/exec_linux_test.go
|
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build linux
package syscall_test
import (
"flag"
"fmt"
"internal/testenv"
"io"
"io/ioutil"
"os"
"os/exec"
"os/user"
"path/filepath"
"runtime"
"strconv"
"strings"
"syscall"
"testing"
"unsafe"
)
func isDocker() bool {
_, err := os.Stat("/.dockerenv")
return err == nil
}
func isLXC() bool {
return os.Getenv("container") == "lxc"
}
func skipInContainer(t *testing.T) {
// TODO: the callers of this func are using this func to skip
// tests when running as some sort of "fake root" that's uid 0
// but lacks certain Linux capabilities. Most of the Go builds
// run in privileged containers, though, where root is much
// closer (if not identical) to the real root. We should test
// for what we need exactly (which capabilities are active?),
// instead of just assuming "docker == bad". Then we'd get more test
// coverage on a bunch of builders too.
if isDocker() {
t.Skip("skip this test in Docker container")
}
if isLXC() {
t.Skip("skip this test in LXC container")
}
}
func skipNoUserNamespaces(t *testing.T) {
if _, err := os.Stat("/proc/self/ns/user"); err != nil {
if os.IsNotExist(err) {
t.Skip("kernel doesn't support user namespaces")
}
if os.IsPermission(err) {
t.Skip("unable to test user namespaces due to permissions")
}
t.Fatalf("Failed to stat /proc/self/ns/user: %v", err)
}
}
func skipUnprivilegedUserClone(t *testing.T) {
// Skip the test if the sysctl that prevents unprivileged user
// from creating user namespaces is enabled.
data, errRead := ioutil.ReadFile("/proc/sys/kernel/unprivileged_userns_clone")
if errRead != nil || len(data) < 1 || data[0] == '0' {
t.Skip("kernel prohibits user namespace in unprivileged process")
}
}
// Check if we are in a chroot by checking if the inode of / is
// different from 2 (there is no better test available to non-root on
// linux).
func isChrooted(t *testing.T) bool {
root, err := os.Stat("/")
if err != nil {
t.Fatalf("cannot stat /: %v", err)
}
return root.Sys().(*syscall.Stat_t).Ino != 2
}
func checkUserNS(t *testing.T) {
skipInContainer(t)
skipNoUserNamespaces(t)
if isChrooted(t) {
// create_user_ns in the kernel (see
// https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/tree/kernel/user_namespace.c)
// forbids the creation of user namespaces when chrooted.
t.Skip("cannot create user namespaces when chrooted")
}
// On some systems, there is a sysctl setting.
if os.Getuid() != 0 {
skipUnprivilegedUserClone(t)
}
// On Centos 7 make sure they set the kernel parameter user_namespace=1
// See issue 16283 and 20796.
if _, err := os.Stat("/sys/module/user_namespace/parameters/enable"); err == nil {
buf, _ := ioutil.ReadFile("/sys/module/user_namespace/parameters/enabled")
if !strings.HasPrefix(string(buf), "Y") {
t.Skip("kernel doesn't support user namespaces")
}
}
// On Centos 7.5+, user namespaces are disabled if user.max_user_namespaces = 0
if _, err := os.Stat("/proc/sys/user/max_user_namespaces"); err == nil {
buf, errRead := ioutil.ReadFile("/proc/sys/user/max_user_namespaces")
if errRead == nil && buf[0] == '0' {
t.Skip("kernel doesn't support user namespaces")
}
}
// When running under the Go continuous build, skip tests for
// now when under Kubernetes. (where things are root but not quite)
// Both of these are our own environment variables.
// See Issue 12815.
if os.Getenv("GO_BUILDER_NAME") != "" && os.Getenv("IN_KUBERNETES") == "1" {
t.Skip("skipping test on Kubernetes-based builders; see Issue 12815")
}
}
func whoamiCmd(t *testing.T, uid, gid int, setgroups bool) *exec.Cmd {
checkUserNS(t)
cmd := exec.Command("whoami")
cmd.SysProcAttr = &syscall.SysProcAttr{
Cloneflags: syscall.CLONE_NEWUSER,
UidMappings: []syscall.SysProcIDMap{
{ContainerID: 0, HostID: uid, Size: 1},
},
GidMappings: []syscall.SysProcIDMap{
{ContainerID: 0, HostID: gid, Size: 1},
},
GidMappingsEnableSetgroups: setgroups,
}
return cmd
}
func testNEWUSERRemap(t *testing.T, uid, gid int, setgroups bool) {
cmd := whoamiCmd(t, uid, gid, setgroups)
out, err := cmd.CombinedOutput()
if err != nil {
t.Fatalf("Cmd failed with err %v, output: %s", err, out)
}
sout := strings.TrimSpace(string(out))
want := "root"
if sout != want {
t.Fatalf("whoami = %q; want %q", out, want)
}
}
func TestCloneNEWUSERAndRemapRootDisableSetgroups(t *testing.T) {
if os.Getuid() != 0 {
t.Skip("skipping root only test")
}
testNEWUSERRemap(t, 0, 0, false)
}
func TestCloneNEWUSERAndRemapRootEnableSetgroups(t *testing.T) {
if os.Getuid() != 0 {
t.Skip("skipping root only test")
}
testNEWUSERRemap(t, 0, 0, true)
}
func TestCloneNEWUSERAndRemapNoRootDisableSetgroups(t *testing.T) {
if os.Getuid() == 0 {
t.Skip("skipping unprivileged user only test")
}
testNEWUSERRemap(t, os.Getuid(), os.Getgid(), false)
}
func TestCloneNEWUSERAndRemapNoRootSetgroupsEnableSetgroups(t *testing.T) {
if os.Getuid() == 0 {
t.Skip("skipping unprivileged user only test")
}
cmd := whoamiCmd(t, os.Getuid(), os.Getgid(), true)
err := cmd.Run()
if err == nil {
t.Skip("probably old kernel without security fix")
}
if !os.IsPermission(err) {
t.Fatalf("Unprivileged gid_map rewriting with GidMappingsEnableSetgroups must fail with permission error; got %v", err)
}
}
func TestEmptyCredGroupsDisableSetgroups(t *testing.T) {
cmd := whoamiCmd(t, os.Getuid(), os.Getgid(), false)
cmd.SysProcAttr.Credential = &syscall.Credential{}
if err := cmd.Run(); err != nil {
t.Fatal(err)
}
}
func TestUnshare(t *testing.T) {
skipInContainer(t)
// Make sure we are running as root so we have permissions to use unshare
// and create a network namespace.
if os.Getuid() != 0 {
t.Skip("kernel prohibits unshare in unprivileged process, unless using user namespace")
}
// When running under the Go continuous build, skip tests for
// now when under Kubernetes. (where things are root but not quite)
// Both of these are our own environment variables.
// See Issue 12815.
if os.Getenv("GO_BUILDER_NAME") != "" && os.Getenv("IN_KUBERNETES") == "1" {
t.Skip("skipping test on Kubernetes-based builders; see Issue 12815")
}
path := "/proc/net/dev"
if _, err := os.Stat(path); err != nil {
if os.IsNotExist(err) {
t.Skip("kernel doesn't support proc filesystem")
}
if os.IsPermission(err) {
t.Skip("unable to test proc filesystem due to permissions")
}
t.Fatal(err)
}
if _, err := os.Stat("/proc/self/ns/net"); err != nil {
if os.IsNotExist(err) {
t.Skip("kernel doesn't support net namespace")
}
t.Fatal(err)
}
orig, err := ioutil.ReadFile(path)
if err != nil {
t.Fatal(err)
}
origLines := strings.Split(strings.TrimSpace(string(orig)), "\n")
cmd := exec.Command("cat", path)
cmd.SysProcAttr = &syscall.SysProcAttr{
Unshareflags: syscall.CLONE_NEWNET,
}
out, err := cmd.CombinedOutput()
if err != nil {
if strings.Contains(err.Error(), "operation not permitted") {
// Issue 17206: despite all the checks above,
// this still reportedly fails for some users.
// (older kernels?). Just skip.
t.Skip("skipping due to permission error")
}
t.Fatalf("Cmd failed with err %v, output: %s", err, out)
}
// Check there is only the local network interface
sout := strings.TrimSpace(string(out))
if !strings.Contains(sout, "lo:") {
t.Fatalf("Expected lo network interface to exist, got %s", sout)
}
lines := strings.Split(sout, "\n")
if len(lines) >= len(origLines) {
t.Fatalf("Got %d lines of output, want <%d", len(lines), len(origLines))
}
}
func TestGroupCleanup(t *testing.T) {
if os.Getuid() != 0 {
t.Skip("we need root for credential")
}
cmd := exec.Command("id")
cmd.SysProcAttr = &syscall.SysProcAttr{
Credential: &syscall.Credential{
Uid: 0,
Gid: 0,
},
}
out, err := cmd.CombinedOutput()
if err != nil {
t.Fatalf("Cmd failed with err %v, output: %s", err, out)
}
strOut := strings.TrimSpace(string(out))
expected := "uid=0(root) gid=0(root)"
// Just check prefix because some distros reportedly output a
// context parameter; see https://golang.org/issue/16224.
// Alpine does not output groups; see https://golang.org/issue/19938.
if !strings.HasPrefix(strOut, expected) {
t.Errorf("id command output: %q, expected prefix: %q", strOut, expected)
}
}
func TestGroupCleanupUserNamespace(t *testing.T) {
if os.Getuid() != 0 {
t.Skip("we need root for credential")
}
checkUserNS(t)
cmd := exec.Command("id")
uid, gid := os.Getuid(), os.Getgid()
cmd.SysProcAttr = &syscall.SysProcAttr{
Cloneflags: syscall.CLONE_NEWUSER,
Credential: &syscall.Credential{
Uid: uint32(uid),
Gid: uint32(gid),
},
UidMappings: []syscall.SysProcIDMap{
{ContainerID: 0, HostID: uid, Size: 1},
},
GidMappings: []syscall.SysProcIDMap{
{ContainerID: 0, HostID: gid, Size: 1},
},
}
out, err := cmd.CombinedOutput()
if err != nil {
t.Fatalf("Cmd failed with err %v, output: %s", err, out)
}
strOut := strings.TrimSpace(string(out))
// Strings we've seen in the wild.
expected := []string{
"uid=0(root) gid=0(root) groups=0(root)",
"uid=0(root) gid=0(root) groups=0(root),65534(nobody)",
"uid=0(root) gid=0(root) groups=0(root),65534(nogroup)",
"uid=0(root) gid=0(root) groups=0(root),65534",
"uid=0(root) gid=0(root) groups=0(root),65534(nobody),65534(nobody),65534(nobody),65534(nobody),65534(nobody),65534(nobody),65534(nobody),65534(nobody),65534(nobody),65534(nobody)", // Alpine; see https://golang.org/issue/19938
"uid=0(root) gid=0(root) groups=0(root) context=unconfined_u:unconfined_r:unconfined_t:s0-s0:c0.c1023", // CentOS with SELinux context, see https://golang.org/issue/34547
}
for _, e := range expected {
if strOut == e {
return
}
}
t.Errorf("id command output: %q, expected one of %q", strOut, expected)
}
// TestUnshareHelperProcess isn't a real test. It's used as a helper process
// for TestUnshareMountNameSpace.
func TestUnshareMountNameSpaceHelper(*testing.T) {
if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" {
return
}
defer os.Exit(0)
if err := syscall.Mount("none", flag.Args()[0], "proc", 0, ""); err != nil {
fmt.Fprintf(os.Stderr, "unshare: mount %v failed: %v", os.Args, err)
os.Exit(2)
}
}
// Test for Issue 38471: unshare fails because systemd has forced / to be shared
func TestUnshareMountNameSpace(t *testing.T) {
skipInContainer(t)
// Make sure we are running as root so we have permissions to use unshare
// and create a network namespace.
if os.Getuid() != 0 {
t.Skip("kernel prohibits unshare in unprivileged process, unless using user namespace")
}
d, err := ioutil.TempDir("", "unshare")
if err != nil {
t.Fatalf("tempdir: %v", err)
}
cmd := exec.Command(os.Args[0], "-test.run=TestUnshareMountNameSpaceHelper", d)
cmd.Env = append(os.Environ(), "GO_WANT_HELPER_PROCESS=1")
cmd.SysProcAttr = &syscall.SysProcAttr{Unshareflags: syscall.CLONE_NEWNS}
o, err := cmd.CombinedOutput()
if err != nil {
if strings.Contains(err.Error(), ": permission denied") {
t.Skipf("Skipping test (golang.org/issue/19698); unshare failed due to permissions: %s, %v", o, err)
}
t.Fatalf("unshare failed: %s, %v", o, err)
}
// How do we tell if the namespace was really unshared? It turns out
// to be simple: just try to remove the directory. If it's still mounted
// on the rm will fail with EBUSY. Then we have some cleanup to do:
// we must unmount it, then try to remove it again.
if err := os.Remove(d); err != nil {
t.Errorf("rmdir failed on %v: %v", d, err)
if err := syscall.Unmount(d, syscall.MNT_FORCE); err != nil {
t.Errorf("Can't unmount %v: %v", d, err)
}
if err := os.Remove(d); err != nil {
t.Errorf("rmdir after unmount failed on %v: %v", d, err)
}
}
}
// Test for Issue 20103: unshare fails when chroot is used
func TestUnshareMountNameSpaceChroot(t *testing.T) {
skipInContainer(t)
// Make sure we are running as root so we have permissions to use unshare
// and create a network namespace.
if os.Getuid() != 0 {
t.Skip("kernel prohibits unshare in unprivileged process, unless using user namespace")
}
d, err := ioutil.TempDir("", "unshare")
if err != nil {
t.Fatalf("tempdir: %v", err)
}
// Since we are doing a chroot, we need the binary there,
// and it must be statically linked.
x := filepath.Join(d, "syscall.test")
cmd := exec.Command(testenv.GoToolPath(t), "test", "-c", "-o", x, "syscall")
cmd.Env = append(os.Environ(), "CGO_ENABLED=0")
if o, err := cmd.CombinedOutput(); err != nil {
t.Fatalf("Build of syscall in chroot failed, output %v, err %v", o, err)
}
cmd = exec.Command("/syscall.test", "-test.run=TestUnshareMountNameSpaceHelper", "/")
cmd.Env = append(os.Environ(), "GO_WANT_HELPER_PROCESS=1")
cmd.SysProcAttr = &syscall.SysProcAttr{Chroot: d, Unshareflags: syscall.CLONE_NEWNS}
o, err := cmd.CombinedOutput()
if err != nil {
if strings.Contains(err.Error(), ": permission denied") {
t.Skipf("Skipping test (golang.org/issue/19698); unshare failed due to permissions: %s, %v", o, err)
}
t.Fatalf("unshare failed: %s, %v", o, err)
}
// How do we tell if the namespace was really unshared? It turns out
// to be simple: just try to remove the executable. If it's still mounted
// on, the rm will fail. Then we have some cleanup to do:
// we must force unmount it, then try to remove it again.
if err := os.Remove(x); err != nil {
t.Errorf("rm failed on %v: %v", x, err)
if err := syscall.Unmount(d, syscall.MNT_FORCE); err != nil {
t.Fatalf("Can't unmount %v: %v", d, err)
}
if err := os.Remove(x); err != nil {
t.Fatalf("rm failed on %v: %v", x, err)
}
}
if err := os.Remove(d); err != nil {
t.Errorf("rmdir failed on %v: %v", d, err)
}
}
func TestUnshareUidGidMappingHelper(*testing.T) {
if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" {
return
}
defer os.Exit(0)
if err := syscall.Chroot(os.TempDir()); err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(2)
}
}
// Test for Issue 29789: unshare fails when uid/gid mapping is specified
func TestUnshareUidGidMapping(t *testing.T) {
if os.Getuid() == 0 {
t.Skip("test exercises unprivileged user namespace, fails with privileges")
}
checkUserNS(t)
cmd := exec.Command(os.Args[0], "-test.run=TestUnshareUidGidMappingHelper")
cmd.Env = append(os.Environ(), "GO_WANT_HELPER_PROCESS=1")
cmd.SysProcAttr = &syscall.SysProcAttr{
Unshareflags: syscall.CLONE_NEWNS | syscall.CLONE_NEWUSER,
GidMappingsEnableSetgroups: false,
UidMappings: []syscall.SysProcIDMap{
{
ContainerID: 0,
HostID: syscall.Getuid(),
Size: 1,
},
},
GidMappings: []syscall.SysProcIDMap{
{
ContainerID: 0,
HostID: syscall.Getgid(),
Size: 1,
},
},
}
out, err := cmd.CombinedOutput()
if err != nil {
t.Fatalf("Cmd failed with err %v, output: %s", err, out)
}
}
type capHeader struct {
version uint32
pid int32
}
type capData struct {
effective uint32
permitted uint32
inheritable uint32
}
const CAP_SYS_TIME = 25
const CAP_SYSLOG = 34
type caps struct {
hdr capHeader
data [2]capData
}
func getCaps() (caps, error) {
var c caps
// Get capability version
if _, _, errno := syscall.Syscall(syscall.SYS_CAPGET, uintptr(unsafe.Pointer(&c.hdr)), uintptr(unsafe.Pointer(nil)), 0); errno != 0 {
return c, fmt.Errorf("SYS_CAPGET: %v", errno)
}
// Get current capabilities
if _, _, errno := syscall.Syscall(syscall.SYS_CAPGET, uintptr(unsafe.Pointer(&c.hdr)), uintptr(unsafe.Pointer(&c.data[0])), 0); errno != 0 {
return c, fmt.Errorf("SYS_CAPGET: %v", errno)
}
return c, nil
}
func mustSupportAmbientCaps(t *testing.T) {
var uname syscall.Utsname
if err := syscall.Uname(&uname); err != nil {
t.Fatalf("Uname: %v", err)
}
var buf [65]byte
for i, b := range uname.Release {
buf[i] = byte(b)
}
ver := string(buf[:])
if i := strings.Index(ver, "\x00"); i != -1 {
ver = ver[:i]
}
if strings.HasPrefix(ver, "2.") ||
strings.HasPrefix(ver, "3.") ||
strings.HasPrefix(ver, "4.1.") ||
strings.HasPrefix(ver, "4.2.") {
t.Skipf("kernel version %q predates required 4.3; skipping test", ver)
}
}
// TestAmbientCapsHelper isn't a real test. It's used as a helper process for
// TestAmbientCaps.
func TestAmbientCapsHelper(*testing.T) {
if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" {
return
}
defer os.Exit(0)
caps, err := getCaps()
if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(2)
}
if caps.data[0].effective&(1<<uint(CAP_SYS_TIME)) == 0 {
fmt.Fprintln(os.Stderr, "CAP_SYS_TIME unexpectedly not in the effective capability mask")
os.Exit(2)
}
if caps.data[1].effective&(1<<uint(CAP_SYSLOG&31)) == 0 {
fmt.Fprintln(os.Stderr, "CAP_SYSLOG unexpectedly not in the effective capability mask")
os.Exit(2)
}
}
func TestAmbientCaps(t *testing.T) {
// Make sure we are running as root so we have permissions to use unshare
// and create a network namespace.
if os.Getuid() != 0 {
t.Skip("kernel prohibits unshare in unprivileged process, unless using user namespace")
}
testAmbientCaps(t, false)
}
func TestAmbientCapsUserns(t *testing.T) {
checkUserNS(t)
testAmbientCaps(t, true)
}
func testAmbientCaps(t *testing.T, userns bool) {
skipInContainer(t)
mustSupportAmbientCaps(t)
skipUnprivilegedUserClone(t)
// skip on android, due to lack of lookup support
if runtime.GOOS == "android" {
t.Skip("skipping test on android; see Issue 27327")
}
u, err := user.Lookup("nobody")
if err != nil {
t.Fatal(err)
}
uid, err := strconv.ParseInt(u.Uid, 0, 32)
if err != nil {
t.Fatal(err)
}
gid, err := strconv.ParseInt(u.Gid, 0, 32)
if err != nil {
t.Fatal(err)
}
// Copy the test binary to a temporary location which is readable by nobody.
f, err := ioutil.TempFile("", "gotest")
if err != nil {
t.Fatal(err)
}
defer os.Remove(f.Name())
defer f.Close()
e, err := os.Open(os.Args[0])
if err != nil {
t.Fatal(err)
}
defer e.Close()
if _, err := io.Copy(f, e); err != nil {
t.Fatal(err)
}
if err := f.Chmod(0755); err != nil {
t.Fatal(err)
}
if err := f.Close(); err != nil {
t.Fatal(err)
}
cmd := exec.Command(f.Name(), "-test.run=TestAmbientCapsHelper")
cmd.Env = append(os.Environ(), "GO_WANT_HELPER_PROCESS=1")
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
cmd.SysProcAttr = &syscall.SysProcAttr{
Credential: &syscall.Credential{
Uid: uint32(uid),
Gid: uint32(gid),
},
AmbientCaps: []uintptr{CAP_SYS_TIME, CAP_SYSLOG},
}
if userns {
cmd.SysProcAttr.Cloneflags = syscall.CLONE_NEWUSER
const nobody = 65534
uid := os.Getuid()
gid := os.Getgid()
cmd.SysProcAttr.UidMappings = []syscall.SysProcIDMap{{
ContainerID: int(nobody),
HostID: int(uid),
Size: int(1),
}}
cmd.SysProcAttr.GidMappings = []syscall.SysProcIDMap{{
ContainerID: int(nobody),
HostID: int(gid),
Size: int(1),
}}
// Set credentials to run as user and group nobody.
cmd.SysProcAttr.Credential = &syscall.Credential{
Uid: nobody,
Gid: nobody,
}
}
if err := cmd.Run(); err != nil {
t.Fatal(err.Error())
}
}
|
[
"\"container\"",
"\"GO_BUILDER_NAME\"",
"\"IN_KUBERNETES\"",
"\"GO_BUILDER_NAME\"",
"\"IN_KUBERNETES\"",
"\"GO_WANT_HELPER_PROCESS\"",
"\"GO_WANT_HELPER_PROCESS\"",
"\"GO_WANT_HELPER_PROCESS\""
] |
[] |
[
"GO_BUILDER_NAME",
"GO_WANT_HELPER_PROCESS",
"IN_KUBERNETES",
"container"
] |
[]
|
["GO_BUILDER_NAME", "GO_WANT_HELPER_PROCESS", "IN_KUBERNETES", "container"]
|
go
| 4 | 0 | |
code/taskA/models.py
|
from django.db import models
class TaskA_table(models.Model):
asset_contract_address = models.CharField(max_length=100)
token_id = models.IntegerField()
pred_price = models.FloatField()
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
internal/models/database_test.go
|
// Copyright 2020 Teserakt AG
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package models
import (
"context"
"database/sql"
"io/ioutil"
"log"
"os"
"reflect"
"testing"
"github.com/jinzhu/gorm"
e4crypto "github.com/teserakt-io/e4go/crypto"
slibcfg "github.com/teserakt-io/serverlib/config"
"github.com/teserakt-io/c2/internal/config"
)
// setupFunc defines a database setup function,
// returning a Database instance and a tearDown function
type setupFunc func(t *testing.T) (Database, func())
func TestDBSQLite(t *testing.T) {
setup := func(t *testing.T) (Database, func()) {
f, err := ioutil.TempFile(os.TempDir(), "c2TestDb")
if err != nil {
t.Fatalf("Cannot create temporary file: %v", err)
}
dbCfg := config.DBCfg{
Type: DBDialectSQLite,
File: f.Name(),
Passphrase: "testpass",
SecureConnection: slibcfg.DBSecureConnectionEnabled,
Logging: false,
}
logger := log.New(os.Stdout, "", 0)
db, err := NewDB(dbCfg, logger)
if err != nil {
t.Fatalf("Cannot create db: %v", err)
}
if err := db.Migrate(); err != nil {
t.Fatalf("Expected no error when migrating database, got %v", err)
}
tearDown := func() {
db.Close()
f.Close()
os.Remove(f.Name())
}
return db, tearDown
}
testDatabase(t, setup)
}
func TestDBPostgres(t *testing.T) {
if os.Getenv("C2TEST_POSTGRES") == "" {
t.Skip("C2TEST_POSTGRES environment variable isn't set, skipping postgres tests")
}
setup := func(t *testing.T) (Database, func()) {
dbCfg := config.DBCfg{
Type: slibcfg.DBTypePostgres,
Passphrase: "testpass",
SecureConnection: slibcfg.DBSecureConnectionInsecure,
Host: "127.0.0.1",
Database: "e4",
Username: "e4_c2_test",
Password: "teserakte4",
Schema: "e4_c2_test_unit",
Logging: false,
}
logger := log.New(os.Stdout, "", 0)
db, err := NewDB(dbCfg, logger)
if err != nil {
t.Fatalf("Error connecting to postgres server: %v", err)
}
db.Connection().Exec("CREATE SCHEMA e4_c2_test_unit;")
if err := db.Migrate(); err != nil {
t.Fatalf("Expected no error when migrating database, got %v", err)
}
tearDown := func() {
db.Connection().Exec("DROP SCHEMA e4_c2_test_unit CASCADE;")
db.Close()
}
return db, tearDown
}
testDatabase(t, setup)
}
func testDatabase(t *testing.T, setup setupFunc) {
t.Run("Insert and Get properly insert or update and retrieve", func(t *testing.T) {
db, tearDown := setup(t)
defer tearDown()
expectedClient := Client{
ID: 1,
Name: "expectedName",
E4ID: e4crypto.HashIDAlias("expectedName"),
Key: []byte("someKey"),
}
err := db.InsertClient(expectedClient.Name, expectedClient.E4ID, expectedClient.Key)
if err != nil {
t.Errorf("Expected no error, got %v", err)
}
client, err := db.GetClientByID(expectedClient.E4ID)
if err != nil {
t.Errorf("Expected no error, got %v", err)
}
if reflect.DeepEqual(client, expectedClient) == false {
t.Errorf("Expected client to be %#v, got %#v", expectedClient, client)
}
expectedClient.Key = []byte("newKey")
err = db.InsertClient(expectedClient.Name, expectedClient.E4ID, expectedClient.Key)
if err != nil {
t.Errorf("Expected no error, got %v", err)
}
client, err = db.GetClientByID(expectedClient.E4ID)
if err != nil {
t.Errorf("Expected no error, got %v", err)
}
if reflect.DeepEqual(client, expectedClient) == false {
t.Errorf("Expected client to be %#v, got %#v", expectedClient, client)
}
})
t.Run("GetClient with unknown id return record not found error", func(t *testing.T) {
db, tearDown := setup(t)
defer tearDown()
_, err := db.GetClientByID([]byte("unknown"))
if err != gorm.ErrRecordNotFound {
t.Errorf("Expected error to be %v, got %v", gorm.ErrRecordNotFound, err)
}
})
t.Run("Insert and Get properly insert or update and retrieve", func(t *testing.T) {
db, tearDown := setup(t)
defer tearDown()
expectedTopicKey := TopicKey{
ID: 1,
Key: []byte("some-key"),
Topic: "someTopic",
}
err := db.InsertTopicKey(expectedTopicKey.Topic, expectedTopicKey.Key)
if err != nil {
t.Errorf("Expected no error, got %v", err)
}
topicKey, err := db.GetTopicKey(expectedTopicKey.Topic)
if err != nil {
t.Errorf("Expected no error, got %v", err)
}
if reflect.DeepEqual(topicKey, expectedTopicKey) == false {
t.Errorf("Expected topicKey to be %#v, got %#v", expectedTopicKey, topicKey)
}
expectedTopicKey.Key = []byte("newKey")
err = db.InsertTopicKey(expectedTopicKey.Topic, expectedTopicKey.Key)
if err != nil {
t.Errorf("Expected no error, got %v", err)
}
topicKey, err = db.GetTopicKey(expectedTopicKey.Topic)
if err != nil {
t.Errorf("Expected no error, got %v", err)
}
if reflect.DeepEqual(topicKey, expectedTopicKey) == false {
t.Errorf("Expected topicKey to be %#v, got %#v", expectedTopicKey, topicKey)
}
})
t.Run("GetTopicKey with unknown topic return record not found error", func(t *testing.T) {
db, tearDown := setup(t)
defer tearDown()
_, err := db.GetTopicKey("unknown")
if err != gorm.ErrRecordNotFound {
t.Errorf("Expected error to be %v, got %v", gorm.ErrRecordNotFound, err)
}
})
t.Run("Delete properly delete Client", func(t *testing.T) {
db, tearDown := setup(t)
defer tearDown()
expectedClient := Client{
ID: 1,
Name: "someName",
E4ID: e4crypto.HashIDAlias("someName"),
Key: []byte("someKey"),
}
err := db.InsertClient(expectedClient.Name, expectedClient.E4ID, expectedClient.Key)
if err != nil {
t.Errorf("Expected no error, got %v", err)
}
err = db.DeleteClientByID(expectedClient.E4ID)
if err != nil {
t.Errorf("Expected no error, got %v", err)
}
_, err = db.GetClientByID(expectedClient.E4ID)
if err != gorm.ErrRecordNotFound {
t.Errorf("Expected error to be %v, got %v", gorm.ErrRecordNotFound, err)
}
})
t.Run("Delete unknown Client return record not found", func(t *testing.T) {
db, tearDown := setup(t)
defer tearDown()
err := db.DeleteClientByID([]byte("unknown"))
if err != gorm.ErrRecordNotFound {
t.Errorf("Expected error to be %v, got %v", gorm.ErrRecordNotFound, err)
}
})
t.Run("Delete properly delete TopicKey", func(t *testing.T) {
db, tearDown := setup(t)
defer tearDown()
expectedTopicKey := TopicKey{
ID: 1,
Key: []byte("some-key"),
Topic: "someTopic",
}
err := db.InsertTopicKey(expectedTopicKey.Topic, expectedTopicKey.Key)
if err != nil {
t.Errorf("Expected no error, got %v", err)
}
err = db.DeleteTopicKey(expectedTopicKey.Topic)
if err != nil {
t.Errorf("Expected no error, got %v", err)
}
_, err = db.GetTopicKey(expectedTopicKey.Topic)
if err != gorm.ErrRecordNotFound {
t.Errorf("Expected error to be %v, got %v", gorm.ErrRecordNotFound, err)
}
})
t.Run("Delete unknown topicKey returns record not found error", func(t *testing.T) {
db, tearDown := setup(t)
defer tearDown()
err := db.DeleteTopicKey("unknown")
if err != gorm.ErrRecordNotFound {
t.Errorf("Expected error to be %v, got %v", gorm.ErrRecordNotFound, err)
}
})
t.Run("CountClients properly count Clients", func(t *testing.T) {
db, tearDown := setup(t)
defer tearDown()
clients := []string{
"a",
"b",
"c",
"d",
"e",
}
for i, name := range clients {
c, err := db.CountClients()
if err != nil {
t.Errorf("Expected no error, got %v", err)
}
if c != i {
t.Errorf("Expected count to be %d, got %d", i, c)
}
err = db.InsertClient(name, e4crypto.HashIDAlias(name), []byte("key"))
if err != nil {
t.Errorf("Expected no error, got %v", err)
}
}
for i, name := range clients {
c, err := db.CountClients()
if err != nil {
t.Errorf("Expected no error, got %v", err)
}
if c != len(clients)-i {
t.Errorf("Expected count to be %d, got %d", len(clients)-i, c)
}
err = db.DeleteClientByID(e4crypto.HashIDAlias(name))
if err != nil {
t.Errorf("Expected no error, got %v", err)
}
}
})
t.Run("CountTopicKeys properly count topicKeys", func(t *testing.T) {
db, tearDown := setup(t)
defer tearDown()
topics := []string{
"a",
"b",
"c",
"d",
"e",
}
for i, topic := range topics {
c, err := db.CountTopicKeys()
if err != nil {
t.Fatalf("Expected no error, got %v", err)
}
if c != i {
t.Fatalf("Expected count to be %d, got %d", i, c)
}
err = db.InsertTopicKey(topic, []byte("key"))
if err != nil {
t.Fatalf("Expected no error, got %v", err)
}
}
for i, topic := range topics {
c, err := db.CountTopicKeys()
if err != nil {
t.Fatalf("Expected no error, got %v", err)
}
if c != len(topics)-i {
t.Fatalf("Expected count to be %d, got %d", len(topics)-i, c)
}
err = db.DeleteTopicKey(topic)
if err != nil {
t.Fatalf("Expected no error, got %v", err)
}
}
})
t.Run("GetAllClients returns all Clients", func(t *testing.T) {
db, tearDown := setup(t)
defer tearDown()
clients, err := db.GetAllClients()
if err != nil {
t.Errorf("Expected no error, got %v", err)
}
if len(clients) != 0 {
t.Errorf("Expected %d Clients, got %d", 0, len(clients))
}
expectedClients := []Client{
Client{ID: 1, Name: "Client1", E4ID: e4crypto.HashIDAlias("Client1"), Key: []byte("key1")},
Client{ID: 2, Name: "Client2", E4ID: e4crypto.HashIDAlias("Client2"), Key: []byte("key2")},
Client{ID: 3, Name: "Client3", E4ID: e4crypto.HashIDAlias("Client3"), Key: []byte("key3")},
}
for _, client := range expectedClients {
err = db.InsertClient(client.Name, client.E4ID, client.Key)
if err != nil {
t.Errorf("Expected no error, got %v", err)
}
}
clients, err = db.GetAllClients()
if err != nil {
t.Errorf("Expected no error, got %v", err)
}
if reflect.DeepEqual(clients, expectedClients) == false {
t.Errorf("Expected clients to be %#v, got %#v", expectedClients, clients)
}
})
t.Run("GetAllTopics returns all topics", func(t *testing.T) {
db, tearDown := setup(t)
defer tearDown()
topics, err := db.GetAllTopics()
if err != nil {
t.Errorf("Expected no error, got %v", err)
}
if len(topics) != 0 {
t.Errorf("Expected %d topics, got %d", 0, len(topics))
}
expectedTopics := []TopicKey{
TopicKey{ID: 1, Topic: "a", Key: []byte("key1")},
TopicKey{ID: 2, Topic: "b", Key: []byte("key2")},
TopicKey{ID: 3, Topic: "c", Key: []byte("key3")},
}
for _, topicKey := range expectedTopics {
err = db.InsertTopicKey(topicKey.Topic, topicKey.Key)
if err != nil {
t.Errorf("Expected no error, got %v", err)
}
}
topics, err = db.GetAllTopics()
if err != nil {
t.Errorf("Expected no error, got %v", err)
}
if reflect.DeepEqual(topics, expectedTopics) == false {
t.Errorf("Expected clients to be %#v, got %#v", expectedTopics, topics)
}
})
t.Run("LinkClientTopic properly link an Client to a TopicKey", func(t *testing.T) {
db, tearDown := setup(t)
defer tearDown()
client := Client{ID: 1, Name: "i-1", E4ID: e4crypto.HashIDAlias("i-1"), Key: []byte("key")}
if err := db.InsertClient(client.Name, client.E4ID, client.Key); err != nil {
t.Fatalf("Failed to insert Client: %v", err)
}
topic := TopicKey{ID: 1, Topic: "t-1", Key: []byte("key")}
if err := db.InsertTopicKey(topic.Topic, topic.Key); err != nil {
t.Fatalf("Failed to insert TopicKey: %v", err)
}
if err := db.LinkClientTopic(client, topic); err != nil {
t.Errorf("Expected no error, got %v", err)
}
count, err := db.CountClientsForTopic(topic.Topic)
if err != nil {
t.Errorf("Expected no error, got %v", err)
}
if count != 1 {
t.Errorf("Expected count to be 1, got %d", count)
}
count, err = db.CountTopicsForClientByID(client.E4ID)
if err != nil {
t.Errorf("Expected no error, got %v", err)
}
if count != 1 {
t.Errorf("Expected count to be 1, got %d", count)
}
topics, err := db.GetTopicsForClientByID(client.E4ID, 0, 10)
if err != nil {
t.Errorf("Expected no error, got %v", err)
}
if reflect.DeepEqual(topics, []TopicKey{topic}) == false {
t.Errorf("Expected topics to be %#v, got %#v", []TopicKey{topic}, topics)
}
clients, err := db.GetClientsForTopic(topic.Topic, 0, 10)
if err != nil {
t.Errorf("Expected no error, got %v", err)
}
if reflect.DeepEqual(clients, []Client{client}) == false {
t.Errorf("Expected clients to be %#v, got %#v", []Client{client}, clients)
}
if err := db.UnlinkClientTopic(client, topic); err != nil {
t.Errorf("Expected no error, got %v", err)
}
topics, err = db.GetTopicsForClientByID(client.E4ID, 0, 10)
if err != nil {
t.Errorf("Expected no error, got %v", err)
}
if len(topics) != 0 {
t.Errorf("Expected no topics, got %#v", topics)
}
clients, err = db.GetClientsForTopic(topic.Topic, 0, 10)
if err != nil {
t.Errorf("Expected no error, got %v", err)
}
if len(clients) != 0 {
t.Errorf("Expected no clients, got %#v", clients)
}
count, err = db.CountClientsForTopic(topic.Topic)
if err != nil {
t.Errorf("Expected no error, got %v", err)
}
if count != 0 {
t.Errorf("Expected count to be 0, got %d", count)
}
count, err = db.CountTopicsForClientByID(client.E4ID)
if err != nil {
t.Errorf("Expected no error, got %v", err)
}
if count != 0 {
t.Errorf("Expected count to be 0, got %d", count)
}
})
t.Run("Link with unknown records return errors", func(t *testing.T) {
db, tearDown := setup(t)
defer tearDown()
client := Client{Name: "a", E4ID: e4crypto.HashIDAlias("a"), Key: []byte("b")}
topicKey := TopicKey{Topic: "c", Key: []byte("d")}
if err := db.LinkClientTopic(client, topicKey); err != ErrClientNoPrimaryKey {
t.Errorf("Expected error to be %v, got %v", ErrClientNoPrimaryKey, err)
}
if err := db.InsertClient(client.Name, client.E4ID, client.Key); err != nil {
t.Errorf("Expected no error, got %v", err)
}
client.ID = 1
if err := db.LinkClientTopic(client, topicKey); err != ErrTopicKeyNoPrimaryKey {
t.Errorf("Expected error to be %v, got %v", ErrTopicKeyNoPrimaryKey, err)
}
})
t.Run("Unlink with unknown records return errors", func(t *testing.T) {
db, tearDown := setup(t)
defer tearDown()
client := Client{Name: "a", E4ID: e4crypto.HashIDAlias("a"), Key: []byte("b")}
topicKey := TopicKey{Topic: "c", Key: []byte("d")}
if err := db.UnlinkClientTopic(client, topicKey); err != ErrClientNoPrimaryKey {
t.Errorf("Expected error to be %v, got %v", ErrClientNoPrimaryKey, err)
}
if err := db.InsertClient(client.Name, client.E4ID, client.Key); err != nil {
t.Errorf("Expected no error, got %v", err)
}
client.ID = 1
if err := db.UnlinkClientTopic(client, topicKey); err != ErrTopicKeyNoPrimaryKey {
t.Errorf("Expected error to be %v, got %v", ErrTopicKeyNoPrimaryKey, err)
}
})
t.Run("GetIdsforTopic with unknown topic returns a RecordNotFound error", func(t *testing.T) {
db, tearDown := setup(t)
defer tearDown()
_, err := db.GetClientsForTopic("unknown", 0, 1)
if err != gorm.ErrRecordNotFound {
t.Errorf("Expected error to be %v, got %v", gorm.ErrRecordNotFound, err)
}
})
t.Run("GetTopicsForClientByXxx with unknown topic returns a RecordNotFound error", func(t *testing.T) {
db, tearDown := setup(t)
defer tearDown()
_, err := db.GetTopicsForClientByID([]byte("unknown"), 0, 1)
if err != gorm.ErrRecordNotFound {
t.Errorf("Expected error to be %v, got %v", gorm.ErrRecordNotFound, err)
}
_, err = db.GetTopicsForClientByID([]byte("unknown"), 0, 1)
if err != gorm.ErrRecordNotFound {
t.Errorf("Expected error to be %v, got %v", gorm.ErrRecordNotFound, err)
}
})
t.Run("CountClientsForTopic returns a record not found when topic doesn't exists", func(t *testing.T) {
db, tearDown := setup(t)
defer tearDown()
_, err := db.CountClientsForTopic("unknown")
if err != gorm.ErrRecordNotFound {
t.Errorf("Expected error to be %v, got %v", gorm.ErrRecordNotFound, err)
}
})
t.Run("CountTopicsForID returns a record not found when topic doesn't exists", func(t *testing.T) {
db, tearDown := setup(t)
defer tearDown()
_, err := db.CountTopicsForClientByID([]byte("unknown"))
if err != gorm.ErrRecordNotFound {
t.Errorf("Expected error to be %v, got %v", gorm.ErrRecordNotFound, err)
}
})
t.Run("Migrate on already migrated DB succeeds", func(t *testing.T) {
db, tearDown := setup(t)
defer tearDown()
err := db.Migrate()
if err != nil {
t.Errorf("Expected no error, got %v", err)
}
})
t.Run("Transactions properly commits", func(t *testing.T) {
db, tearDown := setup(t)
defer tearDown()
txDb, err := db.BeginTx(context.Background(), &sql.TxOptions{})
if err != nil {
t.Fatalf("got error '%v' when beginning tx", err)
}
clientName := "client1"
clientID := e4crypto.HashIDAlias(clientName)
if err := txDb.InsertClient(clientName, clientID, []byte("client1key")); err != nil {
t.Fatalf("got error '%v' when inserting client", err)
}
if _, err := db.GetClientByID(clientID); !IsErrRecordNotFound(err) {
t.Fatalf("Uncommitted transaction: got error '%v' fetching client, want '%v' ", err, gorm.ErrRecordNotFound)
}
if err := txDb.CommitTx(); err != nil {
t.Fatalf("got error '%v' when committing tx", err)
}
if _, err := db.GetClientByID(clientID); err != nil {
t.Fatalf("Committed transaction: got error '%v' fetching client", err)
}
})
t.Run("Transactions properly rollback", func(t *testing.T) {
db, tearDown := setup(t)
defer tearDown()
txDb, err := db.BeginTx(context.Background(), &sql.TxOptions{})
if err != nil {
t.Fatalf("got error '%v' when beginning tx", err)
}
clientName := "client1"
clientID := e4crypto.HashIDAlias(clientName)
if err := txDb.InsertClient(clientName, clientID, []byte("client1key")); err != nil {
t.Fatalf("got error '%v' when inserting client", err)
}
if _, err := db.GetClientByID(clientID); !IsErrRecordNotFound(err) {
t.Fatalf("Uncommitted transaction: got error '%v' fetching client, want '%v' ", err, gorm.ErrRecordNotFound)
}
if err := txDb.Rollback(); err != nil {
t.Fatalf("got error '%v' when committing tx", err)
}
if _, err := db.GetClientByID(clientID); !IsErrRecordNotFound(err) {
t.Fatalf("Rollback transaction: got error '%v' fetching client, want '%v' ", err, gorm.ErrRecordNotFound)
}
})
t.Run("LinkClient properly links a client to another", func(t *testing.T) {
db, tearDown := setup(t)
defer tearDown()
sourceClientName := "client1"
sourceClientID := e4crypto.HashIDAlias(sourceClientName)
targetClientName := "client2"
targetClientID := e4crypto.HashIDAlias(targetClientName)
db.InsertClient(sourceClientName, sourceClientID, []byte("client1key"))
db.InsertClient(targetClientName, targetClientID, []byte("client2key"))
sourceClient, err := db.GetClientByID(sourceClientID)
if err != nil {
t.Fatalf("failed to get sourceClient: %v", err)
}
targetClient, err := db.GetClientByID(targetClientID)
if err != nil {
t.Fatalf("failed to get sourceClient: %v", err)
}
if err := db.LinkClient(sourceClient, targetClient); err != nil {
t.Fatalf("failed to link clients: %v", err)
}
linked1Count, err := db.CountLinkedClients(targetClientID)
if err != nil {
t.Fatalf("failed to count linked clients for sourceClient: %v", err)
}
if linked1Count != 1 {
t.Fatalf("got %d linked clients, want %d", linked1Count, 1)
}
linked2Count, err := db.CountLinkedClients(sourceClientID)
if err != nil {
t.Fatalf("failed to count linked clients for targetClient: %v", err)
}
if linked2Count != 0 {
t.Fatalf("got %d linked clients, want %d", linked2Count, 0)
}
clients, err := db.GetLinkedClientsForClientByID(targetClientID, 0, 10)
if err != nil {
t.Fatalf("failed to get clients for client: %v", err)
}
want := []Client{sourceClient}
if !reflect.DeepEqual(clients, want) {
t.Fatalf("Invalid linked clients, got %#v, want %#v", clients, want)
}
clients2, err := db.GetLinkedClientsForClientByID(sourceClientID, 0, 10)
if err != nil {
t.Fatalf("failed to get clients for client: %v", err)
}
if g, w := len(clients2), 0; g != w {
t.Fatalf("Invalid linked clients count for targetClient, got %d, want %d", g, w)
}
if err := db.UnlinkClient(sourceClient, targetClient); err != nil {
t.Fatalf("failed to unlink clients: %v", err)
}
clients, err = db.GetLinkedClientsForClientByID(sourceClientID, 0, 10)
if err != nil {
t.Fatalf("failed to get clients for client: %v", err)
}
if len(clients) != 0 {
t.Fatalf("expected no linked clients, got %d", len(clients))
}
// Unlinking not linked clients just do nothing
if err := db.UnlinkClient(targetClient, sourceClient); err != nil {
t.Fatalf("failed to unlink clients: %v", err)
}
})
}
|
[
"\"C2TEST_POSTGRES\""
] |
[] |
[
"C2TEST_POSTGRES"
] |
[]
|
["C2TEST_POSTGRES"]
|
go
| 1 | 0 | |
clients/google-api-services-firebasedatabase/v1beta/1.31.0/com/google/api/services/firebasedatabase/v1beta/FirebaseRealtimeDatabase.java
|
/*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
/*
* This code was generated by https://github.com/googleapis/google-api-java-client-services/
* Modify at your own risk.
*/
package com.google.api.services.firebasedatabase.v1beta;
/**
* Service definition for FirebaseRealtimeDatabase (v1beta).
*
* <p>
* The Firebase Realtime Database Management API enables programmatic provisioning and management of Realtime Database instances.
* </p>
*
* <p>
* For more information about this service, see the
* <a href="https://firebase.google.com/docs/reference/rest/database/database-management/rest/" target="_blank">API Documentation</a>
* </p>
*
* <p>
* This service uses {@link FirebaseRealtimeDatabaseRequestInitializer} to initialize global parameters via its
* {@link Builder}.
* </p>
*
* @since 1.3
* @author Google, Inc.
*/
@SuppressWarnings("javadoc")
public class FirebaseRealtimeDatabase extends com.google.api.client.googleapis.services.json.AbstractGoogleJsonClient {
// Note: Leave this static initializer at the top of the file.
static {
com.google.api.client.util.Preconditions.checkState(
com.google.api.client.googleapis.GoogleUtils.MAJOR_VERSION == 1 &&
(com.google.api.client.googleapis.GoogleUtils.MINOR_VERSION >= 32 ||
(com.google.api.client.googleapis.GoogleUtils.MINOR_VERSION == 31 &&
com.google.api.client.googleapis.GoogleUtils.BUGFIX_VERSION >= 1)),
"You are currently running with version %s of google-api-client. " +
"You need at least version 1.31.1 of google-api-client to run version " +
"1.31.0 of the Firebase Realtime Database Management API library.", com.google.api.client.googleapis.GoogleUtils.VERSION);
}
/**
* The default encoded root URL of the service. This is determined when the library is generated
* and normally should not be changed.
*
* @since 1.7
*/
public static final String DEFAULT_ROOT_URL = "https://firebasedatabase.googleapis.com/";
/**
* The default encoded mTLS root URL of the service. This is determined when the library is generated
* and normally should not be changed.
*
* @since 1.31
*/
public static final String DEFAULT_MTLS_ROOT_URL = "https://firebasedatabase.mtls.googleapis.com/";
/**
* The default encoded service path of the service. This is determined when the library is
* generated and normally should not be changed.
*
* @since 1.7
*/
public static final String DEFAULT_SERVICE_PATH = "";
/**
* The default encoded batch path of the service. This is determined when the library is
* generated and normally should not be changed.
*
* @since 1.23
*/
public static final String DEFAULT_BATCH_PATH = "batch";
/**
* The default encoded base URL of the service. This is determined when the library is generated
* and normally should not be changed.
*/
public static final String DEFAULT_BASE_URL = DEFAULT_ROOT_URL + DEFAULT_SERVICE_PATH;
/**
* Constructor.
*
* <p>
* Use {@link Builder} if you need to specify any of the optional parameters.
* </p>
*
* @param transport HTTP transport, which should normally be:
* <ul>
* <li>Google App Engine:
* {@code com.google.api.client.extensions.appengine.http.UrlFetchTransport}</li>
* <li>Android: {@code newCompatibleTransport} from
* {@code com.google.api.client.extensions.android.http.AndroidHttp}</li>
* <li>Java: {@link com.google.api.client.googleapis.javanet.GoogleNetHttpTransport#newTrustedTransport()}
* </li>
* </ul>
* @param jsonFactory JSON factory, which may be:
* <ul>
* <li>Jackson: {@code com.google.api.client.json.jackson2.JacksonFactory}</li>
* <li>Google GSON: {@code com.google.api.client.json.gson.GsonFactory}</li>
* <li>Android Honeycomb or higher:
* {@code com.google.api.client.extensions.android.json.AndroidJsonFactory}</li>
* </ul>
* @param httpRequestInitializer HTTP request initializer or {@code null} for none
* @since 1.7
*/
public FirebaseRealtimeDatabase(com.google.api.client.http.HttpTransport transport, com.google.api.client.json.JsonFactory jsonFactory,
com.google.api.client.http.HttpRequestInitializer httpRequestInitializer) {
this(new Builder(transport, jsonFactory, httpRequestInitializer));
}
/**
* @param builder builder
*/
FirebaseRealtimeDatabase(Builder builder) {
super(builder);
}
@Override
protected void initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest<?> httpClientRequest) throws java.io.IOException {
super.initialize(httpClientRequest);
}
/**
* An accessor for creating requests from the Projects collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code FirebaseRealtimeDatabase firebasedatabase = new FirebaseRealtimeDatabase(...);}
* {@code FirebaseRealtimeDatabase.Projects.List request = firebasedatabase.projects().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public Projects projects() {
return new Projects();
}
/**
* The "projects" collection of methods.
*/
public class Projects {
/**
* An accessor for creating requests from the Locations collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code FirebaseRealtimeDatabase firebasedatabase = new FirebaseRealtimeDatabase(...);}
* {@code FirebaseRealtimeDatabase.Locations.List request = firebasedatabase.locations().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public Locations locations() {
return new Locations();
}
/**
* The "locations" collection of methods.
*/
public class Locations {
/**
* An accessor for creating requests from the Instances collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code FirebaseRealtimeDatabase firebasedatabase = new FirebaseRealtimeDatabase(...);}
* {@code FirebaseRealtimeDatabase.Instances.List request = firebasedatabase.instances().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public Instances instances() {
return new Instances();
}
/**
* The "instances" collection of methods.
*/
public class Instances {
/**
* Requests that a new DatabaseInstance be created. The state of a successfully created
* DatabaseInstance is ACTIVE. Only available for projects on the Blaze plan. Projects can be
* upgraded using the Cloud Billing API
* https://cloud.google.com/billing/reference/rest/v1/projects/updateBillingInfo. Note that it might
* take a few minutes for billing enablement state to propagate to Firebase systems.
*
* Create a request for the method "instances.create".
*
* This request holds the parameters needed by the firebasedatabase server. After setting any
* optional parameters, call the {@link Create#execute()} method to invoke the remote operation.
*
* @param parent The parent project for which to create a database instance, in the form: `projects/{project-
* number}/locations/{location-id}`.
* @param content the {@link com.google.api.services.firebasedatabase.v1beta.model.DatabaseInstance}
* @return the request
*/
public Create create(java.lang.String parent, com.google.api.services.firebasedatabase.v1beta.model.DatabaseInstance content) throws java.io.IOException {
Create result = new Create(parent, content);
initialize(result);
return result;
}
public class Create extends FirebaseRealtimeDatabaseRequest<com.google.api.services.firebasedatabase.v1beta.model.DatabaseInstance> {
private static final String REST_PATH = "v1beta/{+parent}/instances";
private final java.util.regex.Pattern PARENT_PATTERN =
java.util.regex.Pattern.compile("^projects/[^/]+/locations/[^/]+$");
/**
* Requests that a new DatabaseInstance be created. The state of a successfully created
* DatabaseInstance is ACTIVE. Only available for projects on the Blaze plan. Projects can be
* upgraded using the Cloud Billing API
* https://cloud.google.com/billing/reference/rest/v1/projects/updateBillingInfo. Note that it
* might take a few minutes for billing enablement state to propagate to Firebase systems.
*
* Create a request for the method "instances.create".
*
* This request holds the parameters needed by the the firebasedatabase server. After setting any
* optional parameters, call the {@link Create#execute()} method to invoke the remote operation.
* <p> {@link
* Create#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param parent The parent project for which to create a database instance, in the form: `projects/{project-
* number}/locations/{location-id}`.
* @param content the {@link com.google.api.services.firebasedatabase.v1beta.model.DatabaseInstance}
* @since 1.13
*/
protected Create(java.lang.String parent, com.google.api.services.firebasedatabase.v1beta.model.DatabaseInstance content) {
super(FirebaseRealtimeDatabase.this, "POST", REST_PATH, content, com.google.api.services.firebasedatabase.v1beta.model.DatabaseInstance.class);
this.parent = com.google.api.client.util.Preconditions.checkNotNull(parent, "Required parameter parent must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(PARENT_PATTERN.matcher(parent).matches(),
"Parameter parent must conform to the pattern " +
"^projects/[^/]+/locations/[^/]+$");
}
}
@Override
public Create set$Xgafv(java.lang.String $Xgafv) {
return (Create) super.set$Xgafv($Xgafv);
}
@Override
public Create setAccessToken(java.lang.String accessToken) {
return (Create) super.setAccessToken(accessToken);
}
@Override
public Create setAlt(java.lang.String alt) {
return (Create) super.setAlt(alt);
}
@Override
public Create setCallback(java.lang.String callback) {
return (Create) super.setCallback(callback);
}
@Override
public Create setFields(java.lang.String fields) {
return (Create) super.setFields(fields);
}
@Override
public Create setKey(java.lang.String key) {
return (Create) super.setKey(key);
}
@Override
public Create setOauthToken(java.lang.String oauthToken) {
return (Create) super.setOauthToken(oauthToken);
}
@Override
public Create setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Create) super.setPrettyPrint(prettyPrint);
}
@Override
public Create setQuotaUser(java.lang.String quotaUser) {
return (Create) super.setQuotaUser(quotaUser);
}
@Override
public Create setUploadType(java.lang.String uploadType) {
return (Create) super.setUploadType(uploadType);
}
@Override
public Create setUploadProtocol(java.lang.String uploadProtocol) {
return (Create) super.setUploadProtocol(uploadProtocol);
}
/**
* The parent project for which to create a database instance, in the form: `projects
* /{project-number}/locations/{location-id}`.
*/
@com.google.api.client.util.Key
private java.lang.String parent;
/** The parent project for which to create a database instance, in the form: `projects/{project-
number}/locations/{location-id}`.
*/
public java.lang.String getParent() {
return parent;
}
/**
* The parent project for which to create a database instance, in the form: `projects
* /{project-number}/locations/{location-id}`.
*/
public Create setParent(java.lang.String parent) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(PARENT_PATTERN.matcher(parent).matches(),
"Parameter parent must conform to the pattern " +
"^projects/[^/]+/locations/[^/]+$");
}
this.parent = parent;
return this;
}
/** The globally unique identifier of the database instance. */
@com.google.api.client.util.Key
private java.lang.String databaseId;
/** The globally unique identifier of the database instance.
*/
public java.lang.String getDatabaseId() {
return databaseId;
}
/** The globally unique identifier of the database instance. */
public Create setDatabaseId(java.lang.String databaseId) {
this.databaseId = databaseId;
return this;
}
/** When set to true, the request will be validated but not submitted. */
@com.google.api.client.util.Key
private java.lang.Boolean validateOnly;
/** When set to true, the request will be validated but not submitted.
*/
public java.lang.Boolean getValidateOnly() {
return validateOnly;
}
/** When set to true, the request will be validated but not submitted. */
public Create setValidateOnly(java.lang.Boolean validateOnly) {
this.validateOnly = validateOnly;
return this;
}
@Override
public Create set(String parameterName, Object value) {
return (Create) super.set(parameterName, value);
}
}
/**
* Marks a DatabaseInstance to be deleted. The DatabaseInstance will be purged within 30 days. The
* default database cannot be deleted. IDs for deleted database instances may never be recovered or
* re-used. The Database may only be deleted if it is already in a DISABLED state.
*
* Create a request for the method "instances.delete".
*
* This request holds the parameters needed by the firebasedatabase server. After setting any
* optional parameters, call the {@link Delete#execute()} method to invoke the remote operation.
*
* @param name The fully qualified resource name of the database instance, in the form: `projects/{project-
* number}/locations/{location-id}/instances/{database-id}`
* @return the request
*/
public Delete delete(java.lang.String name) throws java.io.IOException {
Delete result = new Delete(name);
initialize(result);
return result;
}
public class Delete extends FirebaseRealtimeDatabaseRequest<com.google.api.services.firebasedatabase.v1beta.model.DatabaseInstance> {
private static final String REST_PATH = "v1beta/{+name}";
private final java.util.regex.Pattern NAME_PATTERN =
java.util.regex.Pattern.compile("^projects/[^/]+/locations/[^/]+/instances/[^/]+$");
/**
* Marks a DatabaseInstance to be deleted. The DatabaseInstance will be purged within 30 days. The
* default database cannot be deleted. IDs for deleted database instances may never be recovered
* or re-used. The Database may only be deleted if it is already in a DISABLED state.
*
* Create a request for the method "instances.delete".
*
* This request holds the parameters needed by the the firebasedatabase server. After setting any
* optional parameters, call the {@link Delete#execute()} method to invoke the remote operation.
* <p> {@link
* Delete#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param name The fully qualified resource name of the database instance, in the form: `projects/{project-
* number}/locations/{location-id}/instances/{database-id}`
* @since 1.13
*/
protected Delete(java.lang.String name) {
super(FirebaseRealtimeDatabase.this, "DELETE", REST_PATH, null, com.google.api.services.firebasedatabase.v1beta.model.DatabaseInstance.class);
this.name = com.google.api.client.util.Preconditions.checkNotNull(name, "Required parameter name must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^projects/[^/]+/locations/[^/]+/instances/[^/]+$");
}
}
@Override
public Delete set$Xgafv(java.lang.String $Xgafv) {
return (Delete) super.set$Xgafv($Xgafv);
}
@Override
public Delete setAccessToken(java.lang.String accessToken) {
return (Delete) super.setAccessToken(accessToken);
}
@Override
public Delete setAlt(java.lang.String alt) {
return (Delete) super.setAlt(alt);
}
@Override
public Delete setCallback(java.lang.String callback) {
return (Delete) super.setCallback(callback);
}
@Override
public Delete setFields(java.lang.String fields) {
return (Delete) super.setFields(fields);
}
@Override
public Delete setKey(java.lang.String key) {
return (Delete) super.setKey(key);
}
@Override
public Delete setOauthToken(java.lang.String oauthToken) {
return (Delete) super.setOauthToken(oauthToken);
}
@Override
public Delete setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Delete) super.setPrettyPrint(prettyPrint);
}
@Override
public Delete setQuotaUser(java.lang.String quotaUser) {
return (Delete) super.setQuotaUser(quotaUser);
}
@Override
public Delete setUploadType(java.lang.String uploadType) {
return (Delete) super.setUploadType(uploadType);
}
@Override
public Delete setUploadProtocol(java.lang.String uploadProtocol) {
return (Delete) super.setUploadProtocol(uploadProtocol);
}
/**
* The fully qualified resource name of the database instance, in the form: `projects
* /{project-number}/locations/{location-id}/instances/{database-id}`
*/
@com.google.api.client.util.Key
private java.lang.String name;
/** The fully qualified resource name of the database instance, in the form: `projects/{project-
number}/locations/{location-id}/instances/{database-id}`
*/
public java.lang.String getName() {
return name;
}
/**
* The fully qualified resource name of the database instance, in the form: `projects
* /{project-number}/locations/{location-id}/instances/{database-id}`
*/
public Delete setName(java.lang.String name) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^projects/[^/]+/locations/[^/]+/instances/[^/]+$");
}
this.name = name;
return this;
}
@Override
public Delete set(String parameterName, Object value) {
return (Delete) super.set(parameterName, value);
}
}
/**
* Disables a DatabaseInstance. The database can be re-enabled later using ReenableDatabaseInstance.
* When a database is disabled, all reads and writes are denied, including view access in the
* Firebase console.
*
* Create a request for the method "instances.disable".
*
* This request holds the parameters needed by the firebasedatabase server. After setting any
* optional parameters, call the {@link Disable#execute()} method to invoke the remote operation.
*
* @param name The fully qualified resource name of the database instance, in the form: `projects/{project-
* number}/locations/{location-id}/instances/{database-id}`
* @param content the {@link com.google.api.services.firebasedatabase.v1beta.model.DisableDatabaseInstanceRequest}
* @return the request
*/
public Disable disable(java.lang.String name, com.google.api.services.firebasedatabase.v1beta.model.DisableDatabaseInstanceRequest content) throws java.io.IOException {
Disable result = new Disable(name, content);
initialize(result);
return result;
}
public class Disable extends FirebaseRealtimeDatabaseRequest<com.google.api.services.firebasedatabase.v1beta.model.DatabaseInstance> {
private static final String REST_PATH = "v1beta/{+name}:disable";
private final java.util.regex.Pattern NAME_PATTERN =
java.util.regex.Pattern.compile("^projects/[^/]+/locations/[^/]+/instances/[^/]+$");
/**
* Disables a DatabaseInstance. The database can be re-enabled later using
* ReenableDatabaseInstance. When a database is disabled, all reads and writes are denied,
* including view access in the Firebase console.
*
* Create a request for the method "instances.disable".
*
* This request holds the parameters needed by the the firebasedatabase server. After setting any
* optional parameters, call the {@link Disable#execute()} method to invoke the remote operation.
* <p> {@link
* Disable#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param name The fully qualified resource name of the database instance, in the form: `projects/{project-
* number}/locations/{location-id}/instances/{database-id}`
* @param content the {@link com.google.api.services.firebasedatabase.v1beta.model.DisableDatabaseInstanceRequest}
* @since 1.13
*/
protected Disable(java.lang.String name, com.google.api.services.firebasedatabase.v1beta.model.DisableDatabaseInstanceRequest content) {
super(FirebaseRealtimeDatabase.this, "POST", REST_PATH, content, com.google.api.services.firebasedatabase.v1beta.model.DatabaseInstance.class);
this.name = com.google.api.client.util.Preconditions.checkNotNull(name, "Required parameter name must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^projects/[^/]+/locations/[^/]+/instances/[^/]+$");
}
}
@Override
public Disable set$Xgafv(java.lang.String $Xgafv) {
return (Disable) super.set$Xgafv($Xgafv);
}
@Override
public Disable setAccessToken(java.lang.String accessToken) {
return (Disable) super.setAccessToken(accessToken);
}
@Override
public Disable setAlt(java.lang.String alt) {
return (Disable) super.setAlt(alt);
}
@Override
public Disable setCallback(java.lang.String callback) {
return (Disable) super.setCallback(callback);
}
@Override
public Disable setFields(java.lang.String fields) {
return (Disable) super.setFields(fields);
}
@Override
public Disable setKey(java.lang.String key) {
return (Disable) super.setKey(key);
}
@Override
public Disable setOauthToken(java.lang.String oauthToken) {
return (Disable) super.setOauthToken(oauthToken);
}
@Override
public Disable setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Disable) super.setPrettyPrint(prettyPrint);
}
@Override
public Disable setQuotaUser(java.lang.String quotaUser) {
return (Disable) super.setQuotaUser(quotaUser);
}
@Override
public Disable setUploadType(java.lang.String uploadType) {
return (Disable) super.setUploadType(uploadType);
}
@Override
public Disable setUploadProtocol(java.lang.String uploadProtocol) {
return (Disable) super.setUploadProtocol(uploadProtocol);
}
/**
* The fully qualified resource name of the database instance, in the form: `projects
* /{project-number}/locations/{location-id}/instances/{database-id}`
*/
@com.google.api.client.util.Key
private java.lang.String name;
/** The fully qualified resource name of the database instance, in the form: `projects/{project-
number}/locations/{location-id}/instances/{database-id}`
*/
public java.lang.String getName() {
return name;
}
/**
* The fully qualified resource name of the database instance, in the form: `projects
* /{project-number}/locations/{location-id}/instances/{database-id}`
*/
public Disable setName(java.lang.String name) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^projects/[^/]+/locations/[^/]+/instances/[^/]+$");
}
this.name = name;
return this;
}
@Override
public Disable set(String parameterName, Object value) {
return (Disable) super.set(parameterName, value);
}
}
/**
* Gets the DatabaseInstance identified by the specified resource name.
*
* Create a request for the method "instances.get".
*
* This request holds the parameters needed by the firebasedatabase server. After setting any
* optional parameters, call the {@link Get#execute()} method to invoke the remote operation.
*
* @param name The fully qualified resource name of the database instance, in the form: `projects/{project-
* number}/locations/{location-id}/instances/{database-id}`. `database-id` is a globally
* unique identifier across all parent collections. For convenience, this method allows you
* to supply `-` as a wildcard character in place of specific collections under `projects`
* and `locations`. The resulting wildcarding form of the method is:
* `projects/-/locations/-/instances/{database-id}`.
* @return the request
*/
public Get get(java.lang.String name) throws java.io.IOException {
Get result = new Get(name);
initialize(result);
return result;
}
public class Get extends FirebaseRealtimeDatabaseRequest<com.google.api.services.firebasedatabase.v1beta.model.DatabaseInstance> {
private static final String REST_PATH = "v1beta/{+name}";
private final java.util.regex.Pattern NAME_PATTERN =
java.util.regex.Pattern.compile("^projects/[^/]+/locations/[^/]+/instances/[^/]+$");
/**
* Gets the DatabaseInstance identified by the specified resource name.
*
* Create a request for the method "instances.get".
*
* This request holds the parameters needed by the the firebasedatabase server. After setting any
* optional parameters, call the {@link Get#execute()} method to invoke the remote operation. <p>
* {@link Get#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param name The fully qualified resource name of the database instance, in the form: `projects/{project-
* number}/locations/{location-id}/instances/{database-id}`. `database-id` is a globally
* unique identifier across all parent collections. For convenience, this method allows you
* to supply `-` as a wildcard character in place of specific collections under `projects`
* and `locations`. The resulting wildcarding form of the method is:
* `projects/-/locations/-/instances/{database-id}`.
* @since 1.13
*/
protected Get(java.lang.String name) {
super(FirebaseRealtimeDatabase.this, "GET", REST_PATH, null, com.google.api.services.firebasedatabase.v1beta.model.DatabaseInstance.class);
this.name = com.google.api.client.util.Preconditions.checkNotNull(name, "Required parameter name must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^projects/[^/]+/locations/[^/]+/instances/[^/]+$");
}
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public Get set$Xgafv(java.lang.String $Xgafv) {
return (Get) super.set$Xgafv($Xgafv);
}
@Override
public Get setAccessToken(java.lang.String accessToken) {
return (Get) super.setAccessToken(accessToken);
}
@Override
public Get setAlt(java.lang.String alt) {
return (Get) super.setAlt(alt);
}
@Override
public Get setCallback(java.lang.String callback) {
return (Get) super.setCallback(callback);
}
@Override
public Get setFields(java.lang.String fields) {
return (Get) super.setFields(fields);
}
@Override
public Get setKey(java.lang.String key) {
return (Get) super.setKey(key);
}
@Override
public Get setOauthToken(java.lang.String oauthToken) {
return (Get) super.setOauthToken(oauthToken);
}
@Override
public Get setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Get) super.setPrettyPrint(prettyPrint);
}
@Override
public Get setQuotaUser(java.lang.String quotaUser) {
return (Get) super.setQuotaUser(quotaUser);
}
@Override
public Get setUploadType(java.lang.String uploadType) {
return (Get) super.setUploadType(uploadType);
}
@Override
public Get setUploadProtocol(java.lang.String uploadProtocol) {
return (Get) super.setUploadProtocol(uploadProtocol);
}
/**
* The fully qualified resource name of the database instance, in the form: `projects
* /{project-number}/locations/{location-id}/instances/{database-id}`. `database-id` is a
* globally unique identifier across all parent collections. For convenience, this method
* allows you to supply `-` as a wildcard character in place of specific collections under
* `projects` and `locations`. The resulting wildcarding form of the method is:
* `projects/-/locations/-/instances/{database-id}`.
*/
@com.google.api.client.util.Key
private java.lang.String name;
/** The fully qualified resource name of the database instance, in the form: `projects/{project-
number}/locations/{location-id}/instances/{database-id}`. `database-id` is a globally unique
identifier across all parent collections. For convenience, this method allows you to supply `-` as
a wildcard character in place of specific collections under `projects` and `locations`. The
resulting wildcarding form of the method is: `projects/-/locations/-/instances/{database-id}`.
*/
public java.lang.String getName() {
return name;
}
/**
* The fully qualified resource name of the database instance, in the form: `projects
* /{project-number}/locations/{location-id}/instances/{database-id}`. `database-id` is a
* globally unique identifier across all parent collections. For convenience, this method
* allows you to supply `-` as a wildcard character in place of specific collections under
* `projects` and `locations`. The resulting wildcarding form of the method is:
* `projects/-/locations/-/instances/{database-id}`.
*/
public Get setName(java.lang.String name) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^projects/[^/]+/locations/[^/]+/instances/[^/]+$");
}
this.name = name;
return this;
}
@Override
public Get set(String parameterName, Object value) {
return (Get) super.set(parameterName, value);
}
}
/**
* Lists each DatabaseInstance associated with the specified parent project. The list items are
* returned in no particular order, but will be a consistent view of the database instances when
* additional requests are made with a `pageToken`. The resulting list contains instances in any
* STATE. The list results may be stale by a few seconds. Use GetDatabaseInstance for consistent
* reads.
*
* Create a request for the method "instances.list".
*
* This request holds the parameters needed by the firebasedatabase server. After setting any
* optional parameters, call the {@link List#execute()} method to invoke the remote operation.
*
* @param parent The parent project for which to list database instances, in the form: `projects/{project-
* number}/locations/{location-id}` To list across all locations, use a parent in the form:
* `projects/{project-number}/locations/-`
* @return the request
*/
public List list(java.lang.String parent) throws java.io.IOException {
List result = new List(parent);
initialize(result);
return result;
}
public class List extends FirebaseRealtimeDatabaseRequest<com.google.api.services.firebasedatabase.v1beta.model.ListDatabaseInstancesResponse> {
private static final String REST_PATH = "v1beta/{+parent}/instances";
private final java.util.regex.Pattern PARENT_PATTERN =
java.util.regex.Pattern.compile("^projects/[^/]+/locations/[^/]+$");
/**
* Lists each DatabaseInstance associated with the specified parent project. The list items are
* returned in no particular order, but will be a consistent view of the database instances when
* additional requests are made with a `pageToken`. The resulting list contains instances in any
* STATE. The list results may be stale by a few seconds. Use GetDatabaseInstance for consistent
* reads.
*
* Create a request for the method "instances.list".
*
* This request holds the parameters needed by the the firebasedatabase server. After setting any
* optional parameters, call the {@link List#execute()} method to invoke the remote operation. <p>
* {@link List#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param parent The parent project for which to list database instances, in the form: `projects/{project-
* number}/locations/{location-id}` To list across all locations, use a parent in the form:
* `projects/{project-number}/locations/-`
* @since 1.13
*/
protected List(java.lang.String parent) {
super(FirebaseRealtimeDatabase.this, "GET", REST_PATH, null, com.google.api.services.firebasedatabase.v1beta.model.ListDatabaseInstancesResponse.class);
this.parent = com.google.api.client.util.Preconditions.checkNotNull(parent, "Required parameter parent must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(PARENT_PATTERN.matcher(parent).matches(),
"Parameter parent must conform to the pattern " +
"^projects/[^/]+/locations/[^/]+$");
}
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public List set$Xgafv(java.lang.String $Xgafv) {
return (List) super.set$Xgafv($Xgafv);
}
@Override
public List setAccessToken(java.lang.String accessToken) {
return (List) super.setAccessToken(accessToken);
}
@Override
public List setAlt(java.lang.String alt) {
return (List) super.setAlt(alt);
}
@Override
public List setCallback(java.lang.String callback) {
return (List) super.setCallback(callback);
}
@Override
public List setFields(java.lang.String fields) {
return (List) super.setFields(fields);
}
@Override
public List setKey(java.lang.String key) {
return (List) super.setKey(key);
}
@Override
public List setOauthToken(java.lang.String oauthToken) {
return (List) super.setOauthToken(oauthToken);
}
@Override
public List setPrettyPrint(java.lang.Boolean prettyPrint) {
return (List) super.setPrettyPrint(prettyPrint);
}
@Override
public List setQuotaUser(java.lang.String quotaUser) {
return (List) super.setQuotaUser(quotaUser);
}
@Override
public List setUploadType(java.lang.String uploadType) {
return (List) super.setUploadType(uploadType);
}
@Override
public List setUploadProtocol(java.lang.String uploadProtocol) {
return (List) super.setUploadProtocol(uploadProtocol);
}
/**
* The parent project for which to list database instances, in the form: `projects
* /{project-number}/locations/{location-id}` To list across all locations, use a parent
* in the form: `projects/{project-number}/locations/-`
*/
@com.google.api.client.util.Key
private java.lang.String parent;
/** The parent project for which to list database instances, in the form: `projects/{project-
number}/locations/{location-id}` To list across all locations, use a parent in the form: `projects
/{project-number}/locations/-`
*/
public java.lang.String getParent() {
return parent;
}
/**
* The parent project for which to list database instances, in the form: `projects
* /{project-number}/locations/{location-id}` To list across all locations, use a parent
* in the form: `projects/{project-number}/locations/-`
*/
public List setParent(java.lang.String parent) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(PARENT_PATTERN.matcher(parent).matches(),
"Parameter parent must conform to the pattern " +
"^projects/[^/]+/locations/[^/]+$");
}
this.parent = parent;
return this;
}
/**
* The maximum number of database instances to return in the response. The server may
* return fewer than this at its discretion. If no value is specified (or too large a
* value is specified), then the server will impose its own limit.
*/
@com.google.api.client.util.Key
private java.lang.Integer pageSize;
/** The maximum number of database instances to return in the response. The server may return fewer
than this at its discretion. If no value is specified (or too large a value is specified), then the
server will impose its own limit.
*/
public java.lang.Integer getPageSize() {
return pageSize;
}
/**
* The maximum number of database instances to return in the response. The server may
* return fewer than this at its discretion. If no value is specified (or too large a
* value is specified), then the server will impose its own limit.
*/
public List setPageSize(java.lang.Integer pageSize) {
this.pageSize = pageSize;
return this;
}
/**
* Token returned from a previous call to `ListDatabaseInstances` indicating where in the
* set of database instances to resume listing.
*/
@com.google.api.client.util.Key
private java.lang.String pageToken;
/** Token returned from a previous call to `ListDatabaseInstances` indicating where in the set of
database instances to resume listing.
*/
public java.lang.String getPageToken() {
return pageToken;
}
/**
* Token returned from a previous call to `ListDatabaseInstances` indicating where in the
* set of database instances to resume listing.
*/
public List setPageToken(java.lang.String pageToken) {
this.pageToken = pageToken;
return this;
}
@Override
public List set(String parameterName, Object value) {
return (List) super.set(parameterName, value);
}
}
/**
* Enables a DatabaseInstance. The database must have been disabled previously using
* DisableDatabaseInstance. The state of a successfully reenabled DatabaseInstance is ACTIVE.
*
* Create a request for the method "instances.reenable".
*
* This request holds the parameters needed by the firebasedatabase server. After setting any
* optional parameters, call the {@link Reenable#execute()} method to invoke the remote operation.
*
* @param name The fully qualified resource name of the database instance, in the form: `projects/{project-
* number}/locations/{location-id}/instances/{database-id}`
* @param content the {@link com.google.api.services.firebasedatabase.v1beta.model.ReenableDatabaseInstanceRequest}
* @return the request
*/
public Reenable reenable(java.lang.String name, com.google.api.services.firebasedatabase.v1beta.model.ReenableDatabaseInstanceRequest content) throws java.io.IOException {
Reenable result = new Reenable(name, content);
initialize(result);
return result;
}
public class Reenable extends FirebaseRealtimeDatabaseRequest<com.google.api.services.firebasedatabase.v1beta.model.DatabaseInstance> {
private static final String REST_PATH = "v1beta/{+name}:reenable";
private final java.util.regex.Pattern NAME_PATTERN =
java.util.regex.Pattern.compile("^projects/[^/]+/locations/[^/]+/instances/[^/]+$");
/**
* Enables a DatabaseInstance. The database must have been disabled previously using
* DisableDatabaseInstance. The state of a successfully reenabled DatabaseInstance is ACTIVE.
*
* Create a request for the method "instances.reenable".
*
* This request holds the parameters needed by the the firebasedatabase server. After setting any
* optional parameters, call the {@link Reenable#execute()} method to invoke the remote operation.
* <p> {@link
* Reenable#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)}
* must be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param name The fully qualified resource name of the database instance, in the form: `projects/{project-
* number}/locations/{location-id}/instances/{database-id}`
* @param content the {@link com.google.api.services.firebasedatabase.v1beta.model.ReenableDatabaseInstanceRequest}
* @since 1.13
*/
protected Reenable(java.lang.String name, com.google.api.services.firebasedatabase.v1beta.model.ReenableDatabaseInstanceRequest content) {
super(FirebaseRealtimeDatabase.this, "POST", REST_PATH, content, com.google.api.services.firebasedatabase.v1beta.model.DatabaseInstance.class);
this.name = com.google.api.client.util.Preconditions.checkNotNull(name, "Required parameter name must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^projects/[^/]+/locations/[^/]+/instances/[^/]+$");
}
}
@Override
public Reenable set$Xgafv(java.lang.String $Xgafv) {
return (Reenable) super.set$Xgafv($Xgafv);
}
@Override
public Reenable setAccessToken(java.lang.String accessToken) {
return (Reenable) super.setAccessToken(accessToken);
}
@Override
public Reenable setAlt(java.lang.String alt) {
return (Reenable) super.setAlt(alt);
}
@Override
public Reenable setCallback(java.lang.String callback) {
return (Reenable) super.setCallback(callback);
}
@Override
public Reenable setFields(java.lang.String fields) {
return (Reenable) super.setFields(fields);
}
@Override
public Reenable setKey(java.lang.String key) {
return (Reenable) super.setKey(key);
}
@Override
public Reenable setOauthToken(java.lang.String oauthToken) {
return (Reenable) super.setOauthToken(oauthToken);
}
@Override
public Reenable setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Reenable) super.setPrettyPrint(prettyPrint);
}
@Override
public Reenable setQuotaUser(java.lang.String quotaUser) {
return (Reenable) super.setQuotaUser(quotaUser);
}
@Override
public Reenable setUploadType(java.lang.String uploadType) {
return (Reenable) super.setUploadType(uploadType);
}
@Override
public Reenable setUploadProtocol(java.lang.String uploadProtocol) {
return (Reenable) super.setUploadProtocol(uploadProtocol);
}
/**
* The fully qualified resource name of the database instance, in the form: `projects
* /{project-number}/locations/{location-id}/instances/{database-id}`
*/
@com.google.api.client.util.Key
private java.lang.String name;
/** The fully qualified resource name of the database instance, in the form: `projects/{project-
number}/locations/{location-id}/instances/{database-id}`
*/
public java.lang.String getName() {
return name;
}
/**
* The fully qualified resource name of the database instance, in the form: `projects
* /{project-number}/locations/{location-id}/instances/{database-id}`
*/
public Reenable setName(java.lang.String name) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^projects/[^/]+/locations/[^/]+/instances/[^/]+$");
}
this.name = name;
return this;
}
@Override
public Reenable set(String parameterName, Object value) {
return (Reenable) super.set(parameterName, value);
}
}
}
}
}
/**
* Builder for {@link FirebaseRealtimeDatabase}.
*
* <p>
* Implementation is not thread-safe.
* </p>
*
* @since 1.3.0
*/
public static final class Builder extends com.google.api.client.googleapis.services.json.AbstractGoogleJsonClient.Builder {
private static String chooseEndpoint(com.google.api.client.http.HttpTransport transport) {
// If the GOOGLE_API_USE_MTLS_ENDPOINT environment variable value is "always", use mTLS endpoint.
// If the env variable is "auto", use mTLS endpoint if and only if the transport is mTLS.
// Use the regular endpoint for all other cases.
String useMtlsEndpoint = System.getenv("GOOGLE_API_USE_MTLS_ENDPOINT");
useMtlsEndpoint = useMtlsEndpoint == null ? "auto" : useMtlsEndpoint;
if ("always".equals(useMtlsEndpoint) || ("auto".equals(useMtlsEndpoint) && transport != null && transport.isMtls())) {
return DEFAULT_MTLS_ROOT_URL;
}
return DEFAULT_ROOT_URL;
}
/**
* Returns an instance of a new builder.
*
* @param transport HTTP transport, which should normally be:
* <ul>
* <li>Google App Engine:
* {@code com.google.api.client.extensions.appengine.http.UrlFetchTransport}</li>
* <li>Android: {@code newCompatibleTransport} from
* {@code com.google.api.client.extensions.android.http.AndroidHttp}</li>
* <li>Java: {@link com.google.api.client.googleapis.javanet.GoogleNetHttpTransport#newTrustedTransport()}
* </li>
* </ul>
* @param jsonFactory JSON factory, which may be:
* <ul>
* <li>Jackson: {@code com.google.api.client.json.jackson2.JacksonFactory}</li>
* <li>Google GSON: {@code com.google.api.client.json.gson.GsonFactory}</li>
* <li>Android Honeycomb or higher:
* {@code com.google.api.client.extensions.android.json.AndroidJsonFactory}</li>
* </ul>
* @param httpRequestInitializer HTTP request initializer or {@code null} for none
* @since 1.7
*/
public Builder(com.google.api.client.http.HttpTransport transport, com.google.api.client.json.JsonFactory jsonFactory,
com.google.api.client.http.HttpRequestInitializer httpRequestInitializer) {
super(
transport,
jsonFactory,
Builder.chooseEndpoint(transport),
DEFAULT_SERVICE_PATH,
httpRequestInitializer,
false);
setBatchPath(DEFAULT_BATCH_PATH);
}
/** Builds a new instance of {@link FirebaseRealtimeDatabase}. */
@Override
public FirebaseRealtimeDatabase build() {
return new FirebaseRealtimeDatabase(this);
}
@Override
public Builder setRootUrl(String rootUrl) {
return (Builder) super.setRootUrl(rootUrl);
}
@Override
public Builder setServicePath(String servicePath) {
return (Builder) super.setServicePath(servicePath);
}
@Override
public Builder setBatchPath(String batchPath) {
return (Builder) super.setBatchPath(batchPath);
}
@Override
public Builder setHttpRequestInitializer(com.google.api.client.http.HttpRequestInitializer httpRequestInitializer) {
return (Builder) super.setHttpRequestInitializer(httpRequestInitializer);
}
@Override
public Builder setApplicationName(String applicationName) {
return (Builder) super.setApplicationName(applicationName);
}
@Override
public Builder setSuppressPatternChecks(boolean suppressPatternChecks) {
return (Builder) super.setSuppressPatternChecks(suppressPatternChecks);
}
@Override
public Builder setSuppressRequiredParameterChecks(boolean suppressRequiredParameterChecks) {
return (Builder) super.setSuppressRequiredParameterChecks(suppressRequiredParameterChecks);
}
@Override
public Builder setSuppressAllChecks(boolean suppressAllChecks) {
return (Builder) super.setSuppressAllChecks(suppressAllChecks);
}
/**
* Set the {@link FirebaseRealtimeDatabaseRequestInitializer}.
*
* @since 1.12
*/
public Builder setFirebaseRealtimeDatabaseRequestInitializer(
FirebaseRealtimeDatabaseRequestInitializer firebaserealtimedatabaseRequestInitializer) {
return (Builder) super.setGoogleClientRequestInitializer(firebaserealtimedatabaseRequestInitializer);
}
@Override
public Builder setGoogleClientRequestInitializer(
com.google.api.client.googleapis.services.GoogleClientRequestInitializer googleClientRequestInitializer) {
return (Builder) super.setGoogleClientRequestInitializer(googleClientRequestInitializer);
}
}
}
|
[
"\"GOOGLE_API_USE_MTLS_ENDPOINT\""
] |
[] |
[
"GOOGLE_API_USE_MTLS_ENDPOINT"
] |
[]
|
["GOOGLE_API_USE_MTLS_ENDPOINT"]
|
java
| 1 | 0 | |
misc/docker/dock.go
|
/*
Copyright 2015 The Camlistore Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Command dock builds Camlistore's various Docker images.
// It can also generate a tarball of the Camlistore server and tools.
package main // import "camlistore.org/misc/docker"
import (
"archive/tar"
"archive/zip"
"compress/gzip"
"flag"
"fmt"
"io"
"io/ioutil"
"log"
"os"
"os/exec"
"path"
"path/filepath"
"runtime"
"strings"
"camlistore.org/pkg/osutil"
"golang.org/x/net/context"
"golang.org/x/oauth2"
"golang.org/x/oauth2/google"
"google.golang.org/cloud"
"google.golang.org/cloud/storage"
)
var (
flagRev = flag.String("rev", "", "Camlistore revision to build (tag or commit hash). For development purposes, you can instead specify the path to a local Camlistore source tree from which to build, with the form \"WIP:/path/to/dir\".")
flagVersion = flag.String("tarball_version", "", "For --build_release mode, the version number (e.g. 0.9) used for the release tarball name. It also defines the destination directory where the release tarball is uploaded.")
buildOS = flag.String("os", runtime.GOOS, "Operating system to build for. Requires --build_release.")
doImage = flag.Bool("build_image", true, "build the Camlistore server as a docker image. Conflicts with --build_release.")
doUpload = flag.Bool("upload", false, "With build_image, upload a snapshot of the server in docker as a tarball to https://storage.googleapis.com/camlistore-release/docker/. With build_release, upload the generated tarball at https://storage.googleapis.com/camlistore-release/dl/VERSION/.")
doBinaries = flag.Bool("build_release", false, "build the Camlistore server and tools as standalone binaries to a tarball in misc/docker/release. Requires --build_image=false.")
doZipSource = flag.Bool("zip_source", false, "pack the Camlistore source for a release in a zip file in misc/docker/release. Requires --build_image=false.")
flagSanity = flag.Bool("sanity", true, "When doing --zip_source, check the source used is buildable with \"go run make.go\".")
)
// buildDockerImage builds a docker image from the Dockerfile located in
// imageDir, which is a path relative to dockDir. The image will be named after
// imageName. dockDir should have been set behorehand.
func buildDockerImage(imageDir, imageName string) {
if dockDir == "" {
panic("dockDir should be set before calling buildDockerImage")
}
cmd := exec.Command("docker", "build", "-t", imageName, ".")
cmd.Dir = filepath.Join(dockDir, imageDir)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
log.Fatalf("Error building docker image %v: %v", imageName, err)
}
}
var (
dockDir string
releaseTarball string // file path to the tarball generated with -build_release or -zip_source
)
const (
goDockerImage = "camlistore/go"
djpegDockerImage = "camlistore/djpeg"
zoneinfoDockerImage = "camlistore/zoneinfo"
serverImage = "camlistore/server"
goCmd = "/usr/local/go/bin/go"
// Path to where the Camlistore builder is mounted on the camlistore/go image.
genCamliProgram = "/usr/local/bin/build-camlistore-server.go"
genBinariesProgram = "/usr/local/bin/build-binaries.go"
zipSourceProgram = "/usr/local/bin/zip-source.go"
)
func isWIP() bool {
return strings.HasPrefix(*flagRev, "WIP")
}
// localCamliSource returns the path to the local Camlistore source tree
// that should be specified in *flagRev if *flagRev starts with "WIP:",
// empty string otherwise.
func localCamliSource() string {
if !isWIP() {
return ""
}
return strings.TrimPrefix(*flagRev, "WIP:")
}
func rev() string {
if isWIP() {
return "WORKINPROGRESS"
}
return *flagRev
}
func genCamlistore(ctxDir string) {
check(os.Mkdir(filepath.Join(ctxDir, "/camlistore.org"), 0755))
args := []string{
"run",
"--rm",
"--volume=" + ctxDir + "/camlistore.org:/OUT",
"--volume=" + path.Join(dockDir, "server/build-camlistore-server.go") + ":" + genCamliProgram + ":ro",
}
if isWIP() {
args = append(args, "--volume="+localCamliSource()+":/IN:ro",
goDockerImage, goCmd, "run", genCamliProgram, "--rev=WIP:/IN")
} else {
args = append(args, goDockerImage, goCmd, "run", genCamliProgram, "--rev="+rev())
}
cmd := exec.Command("docker", args...)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
log.Fatalf("Error building camlistored in go container: %v", err)
}
}
func genBinaries(ctxDir string) {
check(os.Mkdir(filepath.Join(ctxDir, "/camlistore.org"), 0755))
image := goDockerImage
args := []string{
"run",
"--rm",
"--volume=" + ctxDir + "/camlistore.org:/OUT",
"--volume=" + path.Join(dockDir, "release/build-binaries.go") + ":" + genBinariesProgram + ":ro",
}
if isWIP() {
args = append(args, "--volume="+localCamliSource()+":/IN:ro",
image, goCmd, "run", genBinariesProgram, "--rev=WIP:/IN", "--os="+*buildOS)
} else {
args = append(args, image, goCmd, "run", genBinariesProgram, "--rev="+rev(), "--os="+*buildOS)
}
if *flagVersion != "" {
args = append(args, "--version="+*flagVersion)
}
cmd := exec.Command("docker", args...)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
log.Fatalf("Error building binaries in go container: %v", err)
}
fmt.Printf("Camlistore binaries successfully generated in %v\n", filepath.Join(ctxDir, "camlistore.org", "bin"))
}
func zipSource(ctxDir string) {
image := goDockerImage
args := []string{
"run",
"--rm",
"--volume=" + ctxDir + ":/OUT",
"--volume=" + path.Join(dockDir, "release/zip-source.go") + ":" + zipSourceProgram + ":ro",
}
if isWIP() {
args = append(args, "--volume="+localCamliSource()+":/IN:ro",
image, goCmd, "run", zipSourceProgram, "--rev=WIP:/IN")
} else {
args = append(args, image, goCmd, "run", zipSourceProgram, "--rev="+rev())
}
if *flagVersion != "" {
args = append(args, "--version="+*flagVersion)
}
if !*flagSanity {
args = append(args, "--sanity=false")
}
cmd := exec.Command("docker", args...)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
log.Fatalf("Error zipping Camlistore source in go container: %v", err)
}
setReleaseTarballName()
// can't use os.Rename because invalid cross-device link error likely
cmd = exec.Command("mv", filepath.Join(ctxDir, "camlistore-src.zip"), releaseTarball)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
log.Fatalf("Error moving source zip from %v to %v: %v", filepath.Join(ctxDir, "camlistore-src.zip"), releaseTarball, err)
}
fmt.Printf("Camlistore source successfully zipped in %v\n", releaseTarball)
}
func copyFinalDockerfile(ctxDir string) {
// Copy Dockerfile into the temp dir.
serverDockerFile, err := ioutil.ReadFile(filepath.Join(dockDir, "server", "Dockerfile"))
check(err)
check(ioutil.WriteFile(filepath.Join(ctxDir, "Dockerfile"), serverDockerFile, 0644))
}
func genDjpeg(ctxDir string) {
cmd := exec.Command("docker", "run",
"--rm",
"--volume="+ctxDir+":/OUT",
djpegDockerImage, "/bin/bash", "-c", "mkdir -p /OUT && cp /src/libjpeg-turbo-1.4.1/djpeg /OUT/djpeg")
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
log.Fatalf("Error building djpeg in go container: %v", err)
}
}
func genZoneinfo(ctxDir string) {
cmd := exec.Command("docker", "run",
"--rm",
"--volume="+ctxDir+":/OUT",
zoneinfoDockerImage, "/bin/bash", "-c", "mkdir -p /OUT && cp -a /usr/share/zoneinfo /OUT/zoneinfo")
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
log.Fatalf("Error generating zoneinfo in go container: %v", err)
}
}
func buildServer(ctxDir string) {
copyFinalDockerfile(ctxDir)
cmd := exec.Command("docker", "build", "-t", serverImage, ".")
cmd.Dir = ctxDir
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
log.Fatalf("Error building %v: %v", serverImage, err)
}
}
func publicACL(proj string) []storage.ACLRule {
return []storage.ACLRule{
// If you don't give the owners access, the web UI seems to
// have a bug and doesn't have access to see that it's public, so
// won't render the "Shared Publicly" link. So we do that, even
// though it's dumb and unnecessary otherwise:
{Entity: storage.ACLEntity("project-owners-" + proj), Role: storage.RoleOwner},
{Entity: storage.AllUsers, Role: storage.RoleReader},
}
}
// uploadReleaseTarball uploads the generated tarball of binaries in
// camlistore-release/VERSION/camlistoreVERSION-REV-CONTENTS.EXT. It then makes a copy in
// the same bucket and path, as camlistoreVERSION-CONTENTS.EXT.
func uploadReleaseTarball() {
proj := "camlistore-website"
bucket := "camlistore-release"
tarball := *flagVersion + "/" + filepath.Base(releaseTarball)
versionedTarball := strings.Replace(tarball, "camlistore"+*flagVersion, "camlistore"+*flagVersion+"-"+rev(), 1)
log.Printf("Uploading %s/%s ...", bucket, versionedTarball)
ts, err := tokenSource(bucket)
if err != nil {
log.Fatal(err)
}
ctx := context.Background()
stoClient, err := storage.NewClient(ctx, cloud.WithTokenSource(ts), cloud.WithBaseHTTP(oauth2.NewClient(ctx, ts)))
if err != nil {
log.Fatal(err)
}
w := stoClient.Bucket(bucket).Object(versionedTarball).NewWriter(ctx)
w.ACL = publicACL(proj)
w.CacheControl = "no-cache" // TODO: remove for non-tip releases? set expirations?
contentType := "application/x-gtar"
if *buildOS == "windows" {
contentType = "application/zip"
}
w.ContentType = contentType
src, err := os.Open(releaseTarball)
if err != nil {
log.Fatal(err)
}
defer src.Close()
if _, err := io.Copy(w, src); err != nil {
log.Fatalf("io.Copy: %v", err)
}
if err := w.Close(); err != nil {
log.Fatalf("closing GCS storage writer: %v", err)
}
log.Printf("Uploaded tarball to %s", versionedTarball)
if !isWIP() {
log.Printf("Copying tarball to %s/%s ...", bucket, tarball)
dest := stoClient.Bucket(bucket).Object(tarball)
if _, err := stoClient.Bucket(bucket).Object(versionedTarball).CopyTo(
ctx,
dest,
&storage.ObjectAttrs{
ACL: publicACL(proj),
ContentType: contentType,
}); err != nil {
log.Fatalf("Error uploading %v: %v", tarball, err)
}
log.Printf("Uploaded tarball to %s", tarball)
}
}
// uploadDockerImage makes a tar.gz snapshot of the camlistored docker image,
// and uploads it at camlistore-release/docker/camlistored-REV.tar.gz. It then
// makes a copy in the same bucket and path as camlistored.tar.gz.
func uploadDockerImage() {
proj := "camlistore-website"
bucket := "camlistore-release"
versionedTarball := "docker/camlistored-" + rev() + ".tar.gz"
tarball := "docker/camlistored.tar.gz"
log.Printf("Uploading %s/%s ...", bucket, versionedTarball)
ts, err := tokenSource(bucket)
if err != nil {
log.Fatal(err)
}
ctx := context.Background()
stoClient, err := storage.NewClient(ctx, cloud.WithTokenSource(ts), cloud.WithBaseHTTP(oauth2.NewClient(ctx, ts)))
if err != nil {
log.Fatal(err)
}
w := stoClient.Bucket(bucket).Object(versionedTarball).NewWriter(ctx)
w.ACL = publicACL(proj)
w.CacheControl = "no-cache" // TODO: remove for non-tip releases? set expirations?
w.ContentType = "application/x-gtar"
dockerSave := exec.Command("docker", "save", serverImage)
dockerSave.Stderr = os.Stderr
tar, err := dockerSave.StdoutPipe()
if err != nil {
log.Fatal(err)
}
targz, pw := io.Pipe()
go func() {
zw := gzip.NewWriter(pw)
n, err := io.Copy(zw, tar)
if err != nil {
log.Fatalf("Error copying to gzip writer: after %d bytes, %v", n, err)
}
if err := zw.Close(); err != nil {
log.Fatalf("gzip.Close: %v", err)
}
pw.CloseWithError(err)
}()
if err := dockerSave.Start(); err != nil {
log.Fatalf("Error starting docker save %v: %v", serverImage, err)
}
if _, err := io.Copy(w, targz); err != nil {
log.Fatalf("io.Copy: %v", err)
}
if err := w.Close(); err != nil {
log.Fatalf("closing GCS storage writer: %v", err)
}
if err := dockerSave.Wait(); err != nil {
log.Fatalf("Error waiting for docker save %v: %v", serverImage, err)
}
log.Printf("Uploaded tarball to %s", versionedTarball)
if !isWIP() {
log.Printf("Copying tarball to %s/%s ...", bucket, tarball)
dest := stoClient.Bucket(bucket).Object(tarball)
if _, err := stoClient.Bucket(bucket).Object(versionedTarball).CopyTo(
ctx,
dest,
&storage.ObjectAttrs{
ACL: publicACL(proj),
CacheControl: "no-cache",
ContentType: "application/x-gtar",
}); err != nil {
log.Fatalf("Error uploading %v: %v", tarball, err)
}
log.Printf("Uploaded tarball to %s", tarball)
}
}
func exeName(s string) string {
if *buildOS == "windows" {
return s + ".exe"
}
return s
}
// setReleaseTarballName sets releaseTarball.
func setReleaseTarballName() {
var filename, extension, contents string
if *doZipSource {
contents = "src"
} else {
contents = *buildOS
}
if *buildOS == "windows" || contents == "src" {
extension = ".zip"
} else {
extension = ".tar.gz"
}
if *flagVersion != "" {
filename = "camlistore" + *flagVersion + "-" + contents + extension
} else {
filename = "camlistore-" + contents + extension
}
releaseTarball = path.Join(dockDir, "release", filename)
}
func packBinaries(ctxDir string) {
binaries := map[string]bool{
exeName("camlistored"): false,
exeName("camget"): false,
exeName("camput"): false,
exeName("camtool"): false,
exeName("publisher"): false,
}
switch *buildOS {
case "linux", "darwin":
binaries["cammount"] = false
}
toPack := func(bin string) bool {
for k, _ := range binaries {
if bin == k {
binaries[k] = true
return true
}
}
return false
}
defer func() {
for name, found := range binaries {
if !found {
log.Fatalf("%v was not packed in tarball", name)
}
}
fmt.Printf("Camlistore binaries successfully packed in %v\n", releaseTarball)
}()
binDir := path.Join(ctxDir, "camlistore.org", "bin")
check(os.Chdir(binDir))
dir, err := os.Open(binDir)
check(err)
defer dir.Close()
setReleaseTarballName()
if *buildOS == "windows" {
fw, err := os.Create(releaseTarball)
check(err)
defer func() {
check(fw.Close())
}()
w := zip.NewWriter(fw)
defer func() {
check(w.Close())
}()
names, err := dir.Readdirnames(-1)
check(err)
for _, name := range names {
if !toPack(name) {
continue
}
b, err := ioutil.ReadFile(path.Join(binDir, name))
check(err)
f, err := w.Create(name)
check(err)
_, err = f.Write(b)
check(err)
}
return
}
fw, err := os.Create(releaseTarball)
check(err)
defer func() {
check(fw.Close())
}()
pr, pw := io.Pipe()
go func() {
tw := tar.NewWriter(pw)
fis, err := dir.Readdir(-1)
check(err)
for _, file := range fis {
if !toPack(file.Name()) {
continue
}
hdr, err := tar.FileInfoHeader(file, "")
check(err)
check(tw.WriteHeader(hdr))
fr, err := os.Open(file.Name())
check(err)
n, err := io.Copy(tw, fr)
check(err)
fr.Close()
if n != file.Size() {
log.Fatalf("failed to tar all of %v; got %v, wanted %v", file.Name(), n, file.Size())
}
}
check(tw.Close())
check(pw.CloseWithError(io.EOF))
}()
zw := gzip.NewWriter(fw)
n, err := io.Copy(zw, pr)
if err != nil {
log.Fatalf("Error copying to gzip writer: after %d bytes, %v", n, err)
}
if err := zw.Close(); err != nil {
log.Fatalf("gzip.Close: %v", err)
}
}
func usage() {
fmt.Fprintf(os.Stderr, "Usage:\n")
fmt.Fprintf(os.Stderr, "%s [-rev camlistore_revision | -rev WIP:/path/to/camli/source]\n", os.Args[0])
flag.PrintDefaults()
os.Exit(1)
}
// TODO(mpl): I copied numSet from genconfig.go. Move it to some *util package? go4.org?
func numSet(vv ...interface{}) (num int) {
for _, vi := range vv {
switch v := vi.(type) {
case string:
if v != "" {
num++
}
case bool:
if v {
num++
}
default:
panic("unknown type")
}
}
return
}
func checkFlags() {
if flag.NArg() != 0 {
usage()
}
if *flagRev == "" {
fmt.Fprintf(os.Stderr, "Usage error: --rev is required.\n")
usage()
}
numModes := numSet(*doBinaries, *doImage, *doZipSource)
if numModes != 1 {
fmt.Fprintf(os.Stderr, "Usage error: --build_release, --build_image, and --zip_source are mutually exclusive.\n")
usage()
}
if (*doBinaries || *doZipSource) && *doUpload && *flagVersion == "" {
fmt.Fprintf(os.Stderr, "Usage error: --tarball_version required for uploading the release tarball.\n")
usage()
}
if *doImage && *flagVersion != "" {
fmt.Fprintf(os.Stderr, "Usage error: --tarball_version not applicable in --build_image mode.\n")
usage()
}
if isWIP() {
if _, err := os.Stat(localCamliSource()); err != nil {
fmt.Fprintf(os.Stderr, "Usage error: could not stat path %q provided with --rev: %v", localCamliSource(), err)
usage()
}
}
}
func main() {
flag.Usage = usage
flag.Parse()
checkFlags()
camDir, err := osutil.GoPackagePath("camlistore.org")
if err != nil {
log.Fatalf("Error looking up camlistore.org dir: %v", err)
}
dockDir = filepath.Join(camDir, "misc", "docker")
buildDockerImage("go", goDockerImage)
// ctxDir is where we run "docker build" to produce the final
// "FROM scratch" Docker image.
ctxDir, err := ioutil.TempDir("", "camli-build")
if err != nil {
log.Fatal(err)
}
defer os.RemoveAll(ctxDir)
switch {
case *doImage:
buildDockerImage("djpeg-static", djpegDockerImage)
buildDockerImage("zoneinfo", zoneinfoDockerImage)
genCamlistore(ctxDir)
genDjpeg(ctxDir)
genZoneinfo(ctxDir)
buildServer(ctxDir)
case *doBinaries:
genBinaries(ctxDir)
packBinaries(ctxDir)
case *doZipSource:
zipSource(ctxDir)
}
if !*doUpload {
return
}
if *doImage {
uploadDockerImage()
} else {
uploadReleaseTarball()
}
}
func check(err error) {
if err != nil {
log.Fatal(err)
}
}
func homedir() string {
if runtime.GOOS == "windows" {
return os.Getenv("HOMEDRIVE") + os.Getenv("HOMEPATH")
}
return os.Getenv("HOME")
}
// ProjectTokenSource returns an OAuth2 TokenSource for the given Google Project ID.
func ProjectTokenSource(proj string, scopes ...string) (oauth2.TokenSource, error) {
// TODO(bradfitz): try different strategies too, like
// three-legged flow if the service account doesn't exist, and
// then cache the token file on disk somewhere. Or maybe that should be an
// option, for environments without stdin/stdout available to the user.
// We'll figure it out as needed.
fileName := filepath.Join(homedir(), "keys", proj+".key.json")
jsonConf, err := ioutil.ReadFile(fileName)
if err != nil {
if os.IsNotExist(err) {
return nil, fmt.Errorf("Missing JSON key configuration. Download the Service Account JSON key from https://console.developers.google.com/project/%s/apiui/credential and place it at %s", proj, fileName)
}
return nil, err
}
conf, err := google.JWTConfigFromJSON(jsonConf, scopes...)
if err != nil {
return nil, fmt.Errorf("reading JSON config from %s: %v", fileName, err)
}
return conf.TokenSource(oauth2.NoContext), nil
}
var bucketProject = map[string]string{
"camlistore-release": "camlistore-website",
}
func tokenSource(bucket string) (oauth2.TokenSource, error) {
proj, ok := bucketProject[bucket]
if !ok {
return nil, fmt.Errorf("unknown project for bucket %q", bucket)
}
return ProjectTokenSource(proj, storage.ScopeReadWrite)
}
|
[
"\"HOMEDRIVE\"",
"\"HOMEPATH\"",
"\"HOME\""
] |
[] |
[
"HOME",
"HOMEPATH",
"HOMEDRIVE"
] |
[]
|
["HOME", "HOMEPATH", "HOMEDRIVE"]
|
go
| 3 | 0 | |
samples/snippets/create_training_pipeline_image_classification_sample_test.py
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from uuid import uuid4
import pytest
import create_training_pipeline_image_classification_sample
import helpers
PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT")
DATASET_ID = "1084241610289446912" # Permanent 50 Flowers Dataset
DISPLAY_NAME = f"temp_create_training_pipeline_image_classification_test_{uuid4()}"
@pytest.fixture(scope="function", autouse=True)
def teardown(teardown_training_pipeline):
yield
@pytest.mark.skip(reason="https://github.com/googleapis/java-aiplatform/issues/420")
def test_ucaip_generated_create_training_pipeline_video_classification_sample(
capsys, shared_state
):
create_training_pipeline_image_classification_sample.create_training_pipeline_image_classification_sample(
project=PROJECT_ID,
display_name=DISPLAY_NAME,
dataset_id=DATASET_ID,
model_display_name=f"Temp Model for {DISPLAY_NAME}",
)
out, _ = capsys.readouterr()
assert "response:" in out
# Save resource name of the newly created training pipeline
shared_state["training_pipeline_name"] = helpers.get_name(out)
|
[] |
[] |
[
"BUILD_SPECIFIC_GCLOUD_PROJECT"
] |
[]
|
["BUILD_SPECIFIC_GCLOUD_PROJECT"]
|
python
| 1 | 0 | |
main.go
|
package main
import (
"bytes"
"flag"
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
"os"
"path"
"strings"
"time"
)
var baseUrl = "https://api.github.com/repos/"
var cacheLocation = "/tmp/gh-issues-to-rss-cache"
func getModesFromList(m []string) RssModes {
modes := RssModes{false, false, false, false}
for _, entry := range m {
switch entry {
case "io":
modes.IssueOpen = true
case "ic":
modes.IssuesClosed = true
case "po":
modes.PROpen = true
case "pc":
modes.PRClosed = true
}
}
return modes
}
func setupResponse(w *http.ResponseWriter, req *http.Request) {
(*w).Header().Set("Access-Control-Allow-Origin", "*")
(*w).Header().Set("Access-Control-Allow-Methods", "GET, OPTIONS")
(*w).Header().Set("Access-Control-Allow-Headers", "*")
}
func handler(w http.ResponseWriter, r *http.Request) {
setupResponse(&w, r)
if (*r).Method == "OPTIONS" {
return
}
if r.Method != "GET" {
http.Error(w, "Method is not supported", http.StatusNotFound)
return
}
url := r.URL.Path
if url == "/" {
data, err := ioutil.ReadFile("index.html")
if err != nil {
http.Error(w, "Unable to fetch index.html", http.StatusNotFound)
return
}
http.ServeContent(w, r, "index.html", time.Now(), bytes.NewReader(data))
return
}
if url == "/_ping" {
io.WriteString(w, "PONG")
return
}
params := r.URL.Query()
m, ok := params["m"]
modes := RssModes{true, true, true, true}
if ok {
modes = getModesFromList(m)
}
l, ok := params["l"]
var labels []string
for _, label := range l {
labels = append(labels, label)
}
splits := strings.Split(url, "/")
if len(splits) != 3 { // url starts with /
http.Error(w, "Invalid request: call `<url>/org/repo`", http.StatusBadRequest)
return
}
repo := splits[1] + "/" + splits[2]
rss, err := getIssueFeed(repo, modes, labels)
if err != nil {
http.Error(w, "Unable to fetch atom feed", http.StatusNotFound)
return
}
fmt.Println(time.Now().Format("2006-01-02 15:04:05"), "[OK]", repo)
io.WriteString(w, rss)
}
func getCliArgs() (string, RssModes, []string, bool, bool) {
var modes string
var labels string
var server bool
flag.StringVar(&modes, "m", "", "Comma separated list of modes [io,ic,po,pc]")
flag.StringVar(&labels, "l", "", "Comma separated list of labels")
flag.BoolVar(&server, "server", false, "display in uppercase")
flag.Parse() // after declaring flags we need to call it
if !server && len(flag.Args()) != 1 {
return "", RssModes{}, nil, false, false
}
modeItems := RssModes{true, true, true, true}
if modes != "" {
modeItems = getModesFromList(strings.Split(modes, ","))
}
var labelItems []string
if labels != "" {
labelItems = strings.Split(labels, ",")
}
var repo = ""
if !server {
repo = flag.Args()[0]
}
return repo, modeItems, labelItems, server, true
}
func main() {
flag.Usage = func() {
fmt.Println(path.Base(os.Args[0]), "[FLAGS] [repo] [--server]")
flag.PrintDefaults()
}
var repo, modes, labels, server, valid = getCliArgs()
if !valid {
flag.Usage()
os.Exit(1)
}
if !server {
atom, err := getIssueFeed(repo, modes, labels)
if err != nil {
log.Fatal("Unable to create feed for repo", repo, ":", err)
}
fmt.Println(atom)
} else {
http.HandleFunc("/", handler)
//Use the default DefaultServeMux.
port := os.Getenv("PORT")
if port == "" {
port = ":8080"
} else {
port = ":" + port
}
fmt.Println("Starting server on", port)
err := http.ListenAndServe(port, nil)
if err != nil {
log.Fatal(err)
}
}
}
|
[
"\"PORT\""
] |
[] |
[
"PORT"
] |
[]
|
["PORT"]
|
go
| 1 | 0 | |
src/atlinter/vendor/MaskFlowNet/predict_new_data.py
|
# SPDX-License-Identifier: MIT
# Source: https://github.com/microsoft/MaskFlownet/tree/5cba12772e2201f0d1c1e27161d224e585334571
import os
import sys
# ======== PLEASE MODIFY ========
# where is the repo
repoRoot = r'.'
# to CUDA\vX.Y\bin
#os.environ['PATH'] = r'path\to\your\NVIDIA GPU Computing Toolkit\CUDA\v9.0\bin' + ';' + os.environ['PATH']
import argparse
import yaml
import numpy as np
import mxnet as mx
import cv2
import flow_vis
from moviepy.editor import ImageSequenceClip
from moviepy.audio.AudioClip import AudioArrayClip
from .network import config
from .network import get_pipeline
from . import path
from . import logger
def find_checkpoint(checkpoint_str):
# find checkpoint
steps = 0
if checkpoint_str is not None:
if ':' in checkpoint_str:
prefix, steps = checkpoint_str.split(':')
else:
prefix = checkpoint_str
steps = None
log_file, run_id = path.find_log(prefix)
if steps is None:
checkpoint, steps = path.find_checkpoints(run_id)[-1]
else:
checkpoints = path.find_checkpoints(run_id)
try:
checkpoint, steps = next(filter(lambda t : t[1] == steps, checkpoints))
except StopIteration:
print('The steps not found in checkpoints', steps, checkpoints)
sys.stdout.flush()
raise StopIteration
steps = int(steps)
if args.clear_steps:
steps = 0
else:
_, exp_info = path.read_log(log_file)
exp_info = exp_info[-1]
for k in args.__dict__:
if k in exp_info and k in ('tag',):
setattr(args, k, eval(exp_info[k]))
print('{}={}, '.format(k, exp_info[k]), end='')
print()
sys.stdout.flush()
return checkpoint, steps
def load_model(config_str):
# load configuration
with open(os.path.join(repoRoot, 'network', 'config', config_str)) as f:
config = config.Reader(yaml.load(f))
return config
def instantiate_model(gpu_device, config):
ctx = [mx.cpu()] if gpu_device == '' else [mx.gpu(gpu_id) for gpu_id in map(int, gpu_device.split(','))]
# initiate
pipe = get_pipeline(args.network, ctx=ctx, config=config)
return pipe
def load_checkpoint(pipe, config, checkpoint):
# load parameters from given checkpoint
print('Load Checkpoint {}'.format(checkpoint))
sys.stdout.flush()
network_class = getattr(config.network, 'class').get()
print('load the weight for the network')
pipe.load(checkpoint)
if network_class == 'MaskFlownet':
print('fix the weight for the head network')
pipe.fix_head()
sys.stdout.flush()
return pipe
def predict_image_pair_flow(img1, img2, pipe, resize=None):
for result in pipe.predict([img1], [img2], batch_size = 1, resize=resize):
flow, occ_mask, warped = result
return flow, occ_mask, warped
def create_video_clip_from_frames(frame_list, fps):
""" Function takes a list of video frames and puts them together in a sequence"""
visual_clip = ImageSequenceClip(frame_list, fps=fps) #put frames together using moviepy
return visual_clip #return the ImageSequenceClip
def predict_video_flow(video_filename, batch_size, resize=None):
cap = cv2.VideoCapture(video_filename)
fps = cap.get(cv2.CAP_PROP_FPS)
prev_frames = []
new_frames = []
has_frames, frame = cap.read()
prev_frames.append(frame)
while True:
has_frames, frame = cap.read()
if not has_frames:
cap.release()
break
new_frames.append(frame)
prev_frames.append(frame)
del prev_frames[-1] #delete the last frame of the video from prev_frames
flow_video = [flow for flow, occ_mask, warped in pipe.predict(prev_frames, new_frames, batch_size=batch_size, resize=resize)]
return flow_video, fps
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('flow_filepath', type=str, help='destination filepath of the flow image/video')
parser.add_argument('config', type=str, nargs='?', default=None)
parser.add_argument('--image_1', type=str, help='filepath of the first image')
parser.add_argument('--image_2', type=str, help='filepath of the second image')
parser.add_argument('--video_filepath', type=str, help='filepath of the input video')
parser.add_argument('-g', '--gpu_device', type=str, default='', help='Specify gpu device(s)')
parser.add_argument('-c', '--checkpoint', type=str, default=None,
help='model checkpoint to load; by default, the latest one.'
'You can use checkpoint:steps to load to a specific steps')
parser.add_argument('--clear_steps', action='store_true')
parser.add_argument('-n', '--network', type=str, default='MaskFlownet', help='The choice of network')
parser.add_argument('--batch', type=int, default=8, help='minibatch size of samples per device')
parser.add_argument('--resize', type=str, default='', help='shape to resize image frames before inference')
parser.add_argument('--threads', type=str, default=8, help='Number of threads to use when writing flow video to file')
args = parser.parse_args()
# Get desired image resize from the string argument
infer_resize = [int(s) for s in args.resize.split(',')] if args.resize else None
checkpoint, steps = find_checkpoint(args.checkpoint)
config = load_model(args.config)
pipe = instantiate_model(args.gpu_device, config)
pipe = load_checkpoint(pipe, config, checkpoint)
if args.image_1 is not None:
image_1 = cv2.imread(args.image_1)
image_2 = cv2.imread(args.image_2)
flow, occ_mask, warped = predict_image_pair_flow(image_1, image_2, pipe)
cv2.imwrite(args.flow_filepath, flow_vis.flow_to_color(flow, convert_to_bgr=False))
else:
flow_video, fps = predict_video_flow(args.video_filepath, batch_size=args.batch)
flow_video_visualisations = [flow_vis.flow_to_color(flow, convert_to_bgr=False) for flow in flow_video]
flow_video_clip = create_video_clip_from_frames(flow_video_visualisations, fps)
flow_video_clip.write_videofile(args.flow_filepath, threads=args.threads, logger=None) #export the video
sys.exit(0)
|
[] |
[] |
[
"PATH"
] |
[]
|
["PATH"]
|
python
| 1 | 0 | |
tests/unit/jobs/test_stream_processor.py
|
import json
import os
from types import SimpleNamespace
import pytest
import boto3
from botocore.exceptions import ClientError
from mock import patch, Mock, ANY, MagicMock, call
with patch.dict(
os.environ,
{"JobTable": "test", "DeletionQueueTable": "test", "StateMachineArn": "sm-arn"},
):
from backend.lambdas.jobs.stream_processor import (
cleanup_manifests,
clear_deletion_queue,
handler,
is_operation,
is_record_type,
process_job,
)
pytestmark = [pytest.mark.unit, pytest.mark.jobs]
def test_it_processes_matches_events():
assert is_operation({"eventName": "INSERT"}, "INSERT")
assert is_operation({"eventName": "MODIFY"}, "MODIFY")
def test_it_recognises_jobs():
assert is_record_type(
{
"dynamodb": {
"NewImage": {
"Id": {"S": "job123"},
"Sk": {"S": "job123"},
"Type": {"S": "Job"},
}
}
},
"Job",
True,
)
assert not is_record_type(
{
"dynamodb": {
"NewImage": {
"Id": {"S": "job123"},
"Sk": {"S": "123456"},
"Type": {"S": "JobEvent"},
}
}
},
"Job",
True,
)
assert not is_record_type({"dynamodb": {}}, "Job", True)
def test_it_recognises_job_events():
assert is_record_type(
{
"dynamodb": {
"NewImage": {
"Id": {"S": "job123"},
"Sk": {"S": "123456"},
"Type": {"S": "JobEvent"},
}
}
},
"JobEvent",
True,
)
assert not is_record_type(
{
"dynamodb": {
"NewImage": {
"Id": {"S": "job123"},
"Sk": {"S": "123456"},
"Type": {"S": "Job"},
}
}
},
"JobEvent",
True,
)
@patch("backend.lambdas.jobs.stream_processor.is_operation", Mock(return_value=True))
@patch("backend.lambdas.jobs.stream_processor.is_record_type")
@patch("backend.lambdas.jobs.stream_processor.process_job")
@patch("backend.lambdas.jobs.stream_processor.deserialize_item")
def test_it_handles_job_records(mock_deserializer, mock_process, mock_is_record):
mock_deserializer.return_value = {
"Id": "job123",
"Sk": "job123",
"Type": "Job",
}
mock_is_record.side_effect = [True, False, False]
handler(
{
"Records": [
{
"eventName": "INSERT",
"dynamodb": {
"NewImage": {
"Id": {"S": "job123"},
"Sk": {"S": "job123"},
"Type": {"S": "Job"},
}
},
}
]
},
SimpleNamespace(),
)
assert 1 == mock_process.call_count
assert 1 == mock_deserializer.call_count
@patch("backend.lambdas.jobs.stream_processor.is_operation", Mock(return_value=True))
@patch("backend.lambdas.jobs.stream_processor.is_record_type")
@patch("backend.lambdas.jobs.stream_processor.cleanup_manifests")
@patch("backend.lambdas.jobs.stream_processor.deserialize_item")
def test_it_handles_job_deletions(mock_deserializer, mock_cleanup, mock_is_record):
mock_deserializer.return_value = {
"Id": "job123",
"Sk": "job123",
"Type": "Job",
}
mock_is_record.side_effect = [False, True, False]
handler(
{
"Records": [
{
"eventName": "REMOVE",
"dynamodb": {
"OldImage": {
"Id": {"S": "job123"},
"Sk": {"S": "job123"},
"Type": {"S": "Job"},
}
},
}
]
},
SimpleNamespace(),
)
assert 1 == mock_cleanup.call_count
assert 1 == mock_deserializer.call_count
@patch("backend.lambdas.jobs.stream_processor.is_operation", Mock(return_value=True))
@patch("backend.lambdas.jobs.stream_processor.is_record_type")
@patch("backend.lambdas.jobs.stream_processor.update_status")
@patch("backend.lambdas.jobs.stream_processor.update_stats")
@patch("backend.lambdas.jobs.stream_processor.deserialize_item")
def test_it_handles_job_event_records(
mock_deserializer, mock_stats, mock_status, mock_is_record
):
mock_deserializer.return_value = {
"Id": "job123",
"Sk": "123456",
"Type": "JobEvent",
}
mock_is_record.side_effect = [False, False, True]
mock_status.return_value = {"JobStatus": "RUNNING"}
mock_stats.return_value = {}
handler(
{
"Records": [
{
"eventName": "INSERT",
"dynamodb": {
"NewImage": {
"Id": {"S": "job123"},
"Sk": {"S": "123456"},
"Type": {"S": "JobEvent"},
}
},
}
]
},
SimpleNamespace(),
)
mock_is_record.side_effect = [False, False, True]
assert 1 == mock_status.call_count
assert 1 == mock_stats.call_count
assert 1 == mock_deserializer.call_count
@patch("backend.lambdas.jobs.stream_processor.is_operation", Mock(return_value=True))
@patch("backend.lambdas.jobs.stream_processor.is_record_type")
@patch("backend.lambdas.jobs.stream_processor.update_status")
@patch("backend.lambdas.jobs.stream_processor.update_stats")
@patch("backend.lambdas.jobs.stream_processor.deserialize_item")
def test_it_does_not_update_status_if_stats_fails(
mock_deserializer, mock_stats, mock_status, mock_is_record
):
mock_deserializer.return_value = {
"Id": "job123",
"Sk": "123456",
"Type": "JobEvent",
}
mock_stats.side_effect = ValueError
mock_is_record.side_effect = [False, False, True]
with pytest.raises(ValueError):
handler(
{
"Records": [
{
"eventName": "INSERT",
"dynamodb": {
"NewImage": {
"Id": {"S": "job123"},
"Sk": {"S": "123456"},
"Type": {"S": "JobEvent"},
}
},
}
]
},
SimpleNamespace(),
)
mock_status.assert_not_called()
@patch("backend.lambdas.jobs.stream_processor.is_operation", Mock(return_value=True))
@patch("backend.lambdas.jobs.stream_processor.client")
def test_it_starts_state_machine(mock_client):
process_job(
{
"Id": "job123",
"Sk": "job123",
"Type": "Job",
"AthenaConcurrencyLimit": 15,
"AthenaQueryMaxRetries": 2,
"DeletionTasksMaxNumber": 50,
"QueryExecutionWaitSeconds": 5,
"QueryQueueWaitSeconds": 5,
"ForgetQueueWaitSeconds": 30,
}
)
mock_client.start_execution.assert_called_with(
stateMachineArn="sm-arn",
name="job123",
input=json.dumps(
{
"AthenaConcurrencyLimit": 15,
"AthenaQueryMaxRetries": 2,
"DeletionTasksMaxNumber": 50,
"ForgetQueueWaitSeconds": 30,
"Id": "job123",
"QueryExecutionWaitSeconds": 5,
"QueryQueueWaitSeconds": 5,
}
),
)
@patch("backend.lambdas.jobs.stream_processor.glue")
def test_it_removes_manifest_partitions(glue_mock):
job = {
"Id": "job-id",
"Manifests": [
"s3://bucket/manifests/job-id/dm-1/manifest.json",
"s3://bucket/manifests/job-id/dm-2/manifest.json",
],
}
cleanup_manifests(job)
glue_mock.batch_delete_partition.assert_called_with(
DatabaseName="s3f2_manifests_database",
TableName="s3f2_manifests_table",
PartitionsToDelete=[
{"Values": ["job-id", "dm-1"]},
{"Values": ["job-id", "dm-2"]},
],
)
@patch("backend.lambdas.jobs.stream_processor.is_operation", Mock(return_value=True))
@patch("backend.lambdas.jobs.stream_processor.is_record_type")
@patch("backend.lambdas.jobs.stream_processor.client")
def test_it_handles_already_existing_executions(mock_client, mock_is_record):
e = boto3.client("stepfunctions").exceptions.ExecutionAlreadyExists
mock_client.exceptions.ExecutionAlreadyExists = e
mock_client.start_execution.side_effect = e({}, "ExecutionAlreadyExists")
mock_is_record.side_effect = [True, False, False]
process_job(
{
"Id": "job123",
"Sk": "job123",
"Type": "Job",
"CreatedAt": 123.0,
"AthenaConcurrencyLimit": 15,
"AthenaQueryMaxRetries": 2,
"DeletionTasksMaxNumber": 3,
"ForgetQueueWaitSeconds": 30,
"QueryExecutionWaitSeconds": 5,
"QueryQueueWaitSeconds": 30,
}
)
@patch("backend.lambdas.jobs.stream_processor.is_operation", Mock(return_value=True))
@patch("backend.lambdas.jobs.stream_processor.client")
@patch("backend.lambdas.jobs.stream_processor.emit_event")
def test_it_handles_execution_failure(mock_emit, mock_client):
mock_client.start_execution.side_effect = ClientError({}, "start_execution")
mock_client.exceptions.ExecutionAlreadyExists = boto3.client(
"stepfunctions"
).exceptions.ExecutionAlreadyExists
process_job(
{
"Id": "job123",
"Sk": "job123",
"Type": "Job",
"CreatedAt": 123.0,
"AthenaConcurrencyLimit": 15,
"AthenaQueryMaxRetries": 2,
"DeletionTasksMaxNumber": 3,
"ForgetQueueWaitSeconds": 30,
"QueryExecutionWaitSeconds": 5,
"QueryQueueWaitSeconds": 30,
}
)
mock_emit.assert_called_with(
"job123",
"Exception",
{
"Error": "ExecutionFailure",
"Cause": "Unable to start StepFunction execution: An error occurred (Unknown) when calling the start_execution operation: Unknown",
},
"StreamProcessor",
)
@patch("backend.lambdas.jobs.stream_processor.process_job", Mock(return_value=None))
@patch("backend.lambdas.jobs.stream_processor.is_operation", Mock(return_value=True))
@patch("backend.lambdas.jobs.stream_processor.update_stats", Mock())
@patch("backend.lambdas.jobs.stream_processor.is_record_type")
@patch("backend.lambdas.jobs.stream_processor.update_status")
@patch("backend.lambdas.jobs.stream_processor.clear_deletion_queue")
@patch("backend.lambdas.jobs.stream_processor.emit_event")
@patch("backend.lambdas.jobs.stream_processor.deserialize_item")
def test_it_cleans_up_on_forget_complete(
mock_deserializer, mock_emit, mock_clear, mock_status, mock_is_record
):
mock_is_record.side_effect = [False, False, True]
mock_deserializer.return_value = {
"Id": "job123",
"Sk": "event123",
"Type": "JobEvent",
"EventName": "ForgetPhaseSucceeded",
}
mock_status.return_value = {
"Id": "job123",
"Sk": "event123",
"Type": "Job",
"JobStatus": "FORGET_COMPLETED_CLEANUP_IN_PROGRESS",
}
handler(
{
"Records": [
{
"eventName": "INSERT",
"dynamodb": {
"NewImage": {
"Id": {"S": "job123"},
"Sk": {"S": "job123"},
"Type": {"S": "JobEvent"},
"EventName": {"S": "ForgetPhaseComplete"},
}
},
}
]
},
SimpleNamespace(),
)
mock_clear.assert_called()
mock_emit.assert_called_with(ANY, "CleanupSucceeded", ANY, ANY)
@patch("backend.lambdas.jobs.stream_processor.process_job", Mock(return_value=None))
@patch("backend.lambdas.jobs.stream_processor.is_operation", Mock(return_value=True))
@patch("backend.lambdas.jobs.stream_processor.update_stats", Mock())
@patch("backend.lambdas.jobs.stream_processor.is_record_type")
@patch("backend.lambdas.jobs.stream_processor.update_status")
@patch("backend.lambdas.jobs.stream_processor.clear_deletion_queue")
@patch("backend.lambdas.jobs.stream_processor.emit_event")
@patch("backend.lambdas.jobs.stream_processor.deserialize_item")
def test_it_emits_skipped_event_for_failures(
mock_deserializer, mock_emit, mock_clear, mock_status, mock_is_record
):
mock_deserializer.return_value = {
"Id": "job123",
"Sk": "event123",
"Type": "JobEvent",
"EventName": "AnEvent",
}
locked_statuses = [
"FIND_FAILED",
"FORGET_FAILED",
"FAILED",
]
mock_status.side_effect = [
{"Id": "job123", "Sk": "event123", "Type": "JobEvent", "JobStatus": status,}
for status in locked_statuses
]
mock_is_record.side_effect = list(
sum([(False, False, True) for _ in locked_statuses], ())
)
for _ in locked_statuses:
handler(
{
"Records": [
{
"eventName": "INSERT",
"dynamodb": {
"NewImage": {
"Id": {"S": "job123"},
"Sk": {"S": "event123"},
"Type": {"S": "JobEvent"},
"EventName": {"S": "ForgetPhaseEnded"},
}
},
}
]
},
SimpleNamespace(),
)
mock_clear.assert_not_called()
mock_emit.assert_called_with(ANY, "CleanupSkipped", ANY, ANY)
@patch("backend.lambdas.jobs.stream_processor.process_job", Mock(return_value=None))
@patch("backend.lambdas.jobs.stream_processor.is_operation", Mock(return_value=True))
@patch("backend.lambdas.jobs.stream_processor.update_stats", Mock())
@patch("backend.lambdas.jobs.stream_processor.is_record_type")
@patch("backend.lambdas.jobs.stream_processor.update_status")
@patch("backend.lambdas.jobs.stream_processor.clear_deletion_queue")
@patch("backend.lambdas.jobs.stream_processor.emit_event")
@patch("backend.lambdas.jobs.stream_processor.deserialize_item")
def test_it_does_not_emit_skipped_event_for_non_failures(
mock_deserializer, mock_emit, mock_clear, mock_status, mock_is_record
):
mock_deserializer.return_value = {
"Id": "job123",
"Sk": "event123",
"Type": "JobEvent",
"EventName": "AnEvent",
}
statuses = [
"RUNNING",
"QUEUED",
"COMPLETED",
"COMPLETED_CLEANUP_FAILED",
]
mock_is_record.side_effect = list(sum([(False, False, True) for _ in statuses], ()))
mock_status.side_effect = [
{"Id": "job123", "Sk": "event123", "Type": "JobEvent", "JobStatus": status,}
for status in statuses
]
for _ in statuses:
handler(
{
"Records": [
{
"eventName": "INSERT",
"dynamodb": {
"NewImage": {
"Id": {"S": "job123"},
"Sk": {"S": "event123"},
"Type": {"S": "JobEvent"},
"EventName": {"S": "ForgetPhaseEnded"},
}
},
}
]
},
SimpleNamespace(),
)
for call in mock_emit.call_args_list:
assert call[0][1] != "CleanupSkipped"
@patch("backend.lambdas.jobs.stream_processor.process_job", Mock(return_value=None))
@patch("backend.lambdas.jobs.stream_processor.is_operation", Mock(return_value=True))
@patch("backend.lambdas.jobs.stream_processor.update_stats", Mock())
@patch("backend.lambdas.jobs.stream_processor.is_record_type")
@patch("backend.lambdas.jobs.stream_processor.update_status")
@patch("backend.lambdas.jobs.stream_processor.clear_deletion_queue")
@patch("backend.lambdas.jobs.stream_processor.emit_event")
@patch("backend.lambdas.jobs.stream_processor.deserialize_item")
def test_it_emits_event_for_cleanup_error(
mock_deserializer, mock_emit, mock_clear, mock_status, mock_is_record
):
mock_is_record.side_effect = [False, False, True]
mock_deserializer.return_value = {
"Id": "job123",
"Sk": "event123",
"Type": "JobEvent",
"EventName": "ForgetPhaseSucceeded",
}
mock_clear.side_effect = ClientError({}, "delete_item")
mock_status.return_value = {
"Id": "job123",
"Sk": "event123",
"Type": "JobEvent",
"JobStatus": "FORGET_COMPLETED_CLEANUP_IN_PROGRESS",
}
handler(
{
"Records": [
{
"eventName": "INSERT",
"dynamodb": {
"NewImage": {
"Id": {"S": "job123"},
"Sk": {"S": "job123"},
"Type": {"S": "Job"},
"EventName": {"S": "ForgetPhaseComplete"},
}
},
}
]
},
SimpleNamespace(),
)
mock_clear.assert_called()
mock_emit.assert_called_with(ANY, "CleanupFailed", ANY, ANY)
@patch("backend.lambdas.jobs.stream_processor.q_table.batch_writer")
@patch("backend.lambdas.jobs.stream_processor.fetch_job_manifest", MagicMock())
@patch("backend.lambdas.jobs.stream_processor.json_lines_iterator")
def test_it_clears_queue(mock_json, mock_writer):
mock_json.side_effect = [
[{"DeletionQueueItemId": "id-1"}, {"DeletionQueueItemId": "id-2"}],
[
{"DeletionQueueItemId": "id-3"},
{"DeletionQueueItemId": "id-4"},
{"DeletionQueueItemId": "id-5"},
],
]
mock_writer.return_value.__enter__.return_value = mock_writer
clear_deletion_queue(
{
"Id": "job123",
"Sk": "job123",
"Type": "Job",
"JobStatus": "FORGET_COMPLETED_CLEANUP_IN_PROGRESS",
"DeletionQueueSize": 1,
"Manifests": [
"s3://temp-bucket/manifests/job123/dm_01/manifest.json",
"s3://temp-bucket/manifests/job123/dm_02/manifest.json",
],
}
)
mock_writer.delete_item.assert_has_calls(
[
call(Key={"DeletionQueueItemId": "id-1"}),
call(Key={"DeletionQueueItemId": "id-2"}),
call(Key={"DeletionQueueItemId": "id-3"}),
call(Key={"DeletionQueueItemId": "id-4"}),
call(Key={"DeletionQueueItemId": "id-5"}),
],
any_order=True,
)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
app/controllers/metric_routes.py
|
"""
This is where all the metric routes and controllers are defined.
"""
from flask import Blueprint, current_app
from prometheus_client.exposition import make_wsgi_app
metric_blueprint = Blueprint('metric_blueprint', __name__)
@metric_blueprint.route('/prometheus')
def metrics():
return make_wsgi_app(current_app.prom_init.registry)
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
main.go
|
// A simple HTTP server for world-best-score in SUBLEERUNKER. It remembers
// a score for about a week.
package main
import (
"context"
"crypto/md5"
"encoding/binary"
"encoding/hex"
"encoding/json"
"fmt"
"log"
"math/rand"
"net/http"
"os"
"regexp"
"strconv"
"strings"
"time"
)
import (
"cloud.google.com/go/datastore"
"google.golang.org/api/iterator"
)
const TTL time.Duration = 7 * 24 * time.Hour // 7 days
type Champion struct {
Score int
Name string
Replay string
Duration time.Duration
RecordedAt time.Time
ExpiresIn time.Duration
Token string
}
func (c *Champion) ExpiresAt() time.Time {
return c.RecordedAt.Add(c.ExpiresIn)
}
func (c *Champion) IsExpired(t time.Time) bool {
return t.After(c.ExpiresAt())
}
var NoChampion = &Champion{0, "", "", 0, time.Time{}, 0, ""}
type NotHigherScore struct {
Score int
PrevScore int
}
func (n *NotHigherScore) Error() string {
return fmt.Sprintf(
"score %d is not higher than prev score %d",
n.Score, n.PrevScore,
)
}
type NotAuthorized struct {
}
func (n *NotAuthorized) Error() string {
return "not authorized"
}
func IssueToken(seed int64) string {
data := make([]byte, 8)
binary.PutVarint(data, seed)
hash := md5.Sum(data)
return hex.EncodeToString(hash[:])
}
func WriteResult(w http.ResponseWriter, result interface{}) {
output, err := json.Marshal(result)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.Header().Set("Content-Type", "application/json")
w.Write(output)
}
func ConnectDatastore(c context.Context) *datastore.Client {
client, err := datastore.NewClient(c, "subleerunker-166907")
if err != nil {
log.Fatalf("Failed to create Cloud Datastore client: %v", err)
}
return client
}
// Loads the current best score from the Google Cloud Datastore.
// Returns (score, name, authorized, err).
func LoadChampion(c context.Context, ds *datastore.Client, t time.Time, ttl time.Duration) (*Champion, *datastore.Key, error) {
root := datastore.NameKey("champion", "_", nil)
query := datastore.NewQuery("champion").Ancestor(root).
Filter("RecordedAt >", t.Add(-ttl)).
Order("-RecordedAt").Limit(10)
for i := ds.Run(c, query); ; {
var champion Champion
key, err := i.Next(&champion)
if err == iterator.Done {
break
}
if err != nil {
return NoChampion, nil, err
} else if champion.IsExpired(t) {
continue
} else {
return &champion, key, nil
}
}
return NoChampion, nil, nil
}
// A handler for "GET /champion".
func GetChampion(w http.ResponseWriter, r *http.Request) {
c := r.Context()
ds := ConnectDatastore(c)
defer ds.Close()
champion, _, err := LoadChampion(c, ds, time.Now(), TTL)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
_, token, _ := r.BasicAuth()
WriteResult(w, struct {
Score int `json:"score"`
Name string `json:"name"`
Replay string `json:"replay"`
ExpiresAt time.Time `json:"expiresAt"`
Authorized bool `json:"authorized"`
}{
champion.Score,
champion.Name,
champion.Replay,
champion.ExpiresAt(),
token != "" && token == champion.Token,
})
}
func WriteAuthorizedChampion(w http.ResponseWriter, champion *Champion) {
WriteResult(w, struct {
Score int `json:"score"`
Name string `json:"name"`
Replay string `json:"replay"`
ExpiresAt time.Time `json:"expiresAt"`
Token string `json:"token"`
}{
champion.Score,
champion.Name,
champion.Replay,
champion.ExpiresAt(),
champion.Token,
})
}
func NormalizeName(name string) string {
name = strings.ToUpper(name)
p := regexp.MustCompile("[A-Z]+")
name = strings.Join(p.FindAllString(name, -1), "")
if len(name) > 3 {
name = name[:3]
}
return name
}
func SuggestName(r *rand.Rand) string {
letters := "ABCDEFGHIJKLMNOPQRSTUVWXWZ"
letter := letters[r.Int()%len(letters)]
return strings.Repeat(string(letter), 3)
}
// A handler for "PUT /champion" to beat the previous record.
func BeatChampion(w http.ResponseWriter, r *http.Request) {
c := r.Context()
score, err := strconv.Atoi(r.FormValue("score"))
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
duration, err := strconv.ParseFloat(r.FormValue("duration"), 64)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
name := r.FormValue("name")
name = NormalizeName(name)
if name == "" {
rand_ := rand.New(rand.NewSource(time.Now().UnixNano()))
name = SuggestName(rand_)
}
replay := r.FormValue("replay")
log.Printf(
"Trying to beat champion: %d by '%s' in %.3f sec",
score, name, duration,
)
t := time.Now()
token := IssueToken(t.Unix())
champion := &Champion{
Score: score,
Name: name,
Replay: replay,
Duration: time.Duration(duration * float64(time.Second)),
RecordedAt: t,
ExpiresIn: TTL,
Token: token,
}
var prevScore int
var prevName string
ds := ConnectDatastore(c)
defer ds.Close()
_, err = ds.RunInTransaction(c, func(tx *datastore.Transaction) error {
prevChampion, _, err := LoadChampion(c, ds, t, TTL)
if err != nil {
return err
}
prevScore = prevChampion.Score
prevName = prevChampion.Name
if score <= prevScore {
return &NotHigherScore{
Score: score,
PrevScore: prevScore,
}
}
root := datastore.NameKey("champion", "_", nil)
key := datastore.IncompleteKey("champion", root)
_, err = tx.Put(key, champion)
return err
})
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
log.Printf(
"Champion has been beaten: %d by '%s' -> %d by '%s' in %.3f sec",
prevScore, prevName, score, name, duration,
)
WriteAuthorizedChampion(w, champion)
}
// A handler for "PUT /champion" to rename the current record.
func RenameChampion(w http.ResponseWriter, r *http.Request) {
c := r.Context()
name := r.FormValue("name")
name = NormalizeName(name)
log.Printf("Trying to rename champion: '%s'", name)
_, token, _ := r.BasicAuth()
t := time.Now()
var _champion Champion
var prevName string
ds := ConnectDatastore(c)
defer ds.Close()
_, err := ds.RunInTransaction(c, func(tx *datastore.Transaction) error {
champion, key, err := LoadChampion(c, ds, t, TTL)
if err != nil {
return err
}
prevName = champion.Name
if champion.Token != token {
return &NotAuthorized{}
}
champion.Name = name
_, err = tx.Put(key, champion)
_champion = *champion
return err
})
switch err.(type) {
case nil:
break
case *NotAuthorized:
http.Error(w, err.Error(), http.StatusUnauthorized)
return
default:
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
log.Printf("Champion has been renamed: '%s' -> '%s'", prevName, name)
WriteAuthorizedChampion(w, &_champion)
}
// A combined handler for every methods of "/champion".
func HandleChampion(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Access-Control-Allow-Origin", "https://sublee.github.io")
switch strings.ToUpper(r.Method) {
case "OPTIONS":
w.Header().Set("Access-Control-Allow-Methods", "GET, PUT, OPTIONS")
w.Header().Set("Access-Control-Allow-Headers", "Authorization")
w.Header().Set("Access-Control-Max-Age", "86400")
case "GET":
GetChampion(w, r)
case "PUT":
if r.FormValue("score") != "" {
BeatChampion(w, r)
} else {
RenameChampion(w, r)
}
default:
http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
}
}
func init() {
http.HandleFunc("/champion", HandleChampion)
}
func main() {
port := os.Getenv("PORT")
if port == "" {
port = "8080"
}
http.ListenAndServe(fmt.Sprintf(":%s", port), nil)
}
|
[
"\"PORT\""
] |
[] |
[
"PORT"
] |
[]
|
["PORT"]
|
go
| 1 | 0 | |
news_fetch/python/tests.py
|
import six
import unittest
import io
import os
from bs4 import BeautifulSoup
from official import ZhihuDailyOfficial, Story
DATE = 'Date'
STORY = Story(1, 'Story Title', 'Thumbnail URL')
OFFICIAL = ZhihuDailyOfficial(DATE)
def read_file(file_name):
prefix = os.environ['TEST_SRCDIR']
test_files = 'ZhihuDailyPurify/news_fetch/test_files'
file_path = '{}/{}/{}'.format(prefix, test_files, file_name)
with io.open(file_path, encoding='utf-8') as f:
return f.read()
def setup_document(file_name):
return BeautifulSoup(read_file(file_name), 'html.parser')
def setup_pair(file_name):
return STORY, setup_document(file_name)
class TestStories(unittest.TestCase):
def test_error_response(self):
content = read_file('json/error_stories.json')
self.assertEqual(Story.from_json(content), [])
def test_no_stories(self):
content = read_file('json/no_stories.json')
self.assertEqual(Story.from_json(content), [])
def test_empty_stories(self):
content = read_file('json/empty_stories.json')
self.assertEqual(Story.from_json(content), [])
def test_no_thumbnail_url(self):
content = read_file('json/empty_images.json')
stories = Story.from_json(content)
self.assertEqual(stories[0].thumbnail_url, '')
def test_multiple_thumbnail_urls(self):
content = read_file('json/multiple_images.json')
stories = Story.from_json(content)
self.assertEqual(stories[0].thumbnail_url, 'should be selected')
def test_normal_scenario(self):
content = read_file('json/normal.json')
stories = Story.from_json(content)
self.assertEqual(len(stories), 2)
first_story = stories[0]
self.assertEqual(first_story.story_id, 1)
self.assertEqual(first_story.title, 'first title')
self.assertEqual(first_story.thumbnail_url, 'image url 1')
second_story = stories[1]
self.assertEqual(second_story.story_id, 2)
self.assertEqual(second_story.title, 'second title')
self.assertEqual(second_story.thumbnail_url, 'image url 2')
class TestToNews(unittest.TestCase):
def test_no_questions(self):
pair = setup_pair('html/no_questions.html')
news = OFFICIAL.to_news(pair)
self.assertEqual(news, None)
def test_no_question_title(self):
pair = setup_pair('html/no_title.html')
news = OFFICIAL.to_news(pair)
self.assertEqual(news.questions[0].title, STORY.title)
def test_empty_question_title(self):
pair = setup_pair('html/empty_question_title.html')
news = OFFICIAL.to_news(pair)
self.assertEqual(news.questions[0].title, STORY.title)
def test_no_question_url(self):
pair = setup_pair('html/no_question_url.html')
news = OFFICIAL.to_news(pair)
self.assertEqual(news, None)
def test_invalid_question_url(self):
pair = setup_pair('html/invalid_question_url.html')
news = OFFICIAL.to_news(pair)
self.assertEqual(news, None)
def test_normal_scenario(self):
pair = setup_pair('html/normal.html')
news = OFFICIAL.to_news(pair)
self.assertEqual(len(news.questions), 2)
first_question = news.questions[0]
self.assertEqual(first_question.title, 'First')
self.assertTrue(first_question.url.endswith('1234567'))
second_question = news.questions[1]
self.assertEqual(second_question.title, 'Second')
self.assertTrue(second_question.url.endswith('2345678'))
if __name__ == '__main__':
unittest.main()
|
[] |
[] |
[
"TEST_SRCDIR"
] |
[]
|
["TEST_SRCDIR"]
|
python
| 1 | 0 | |
pkg/buildermgr/envwatcher.go
|
/*
Copyright 2021 The Fission Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package buildermgr
import (
"fmt"
"os"
"strconv"
"time"
"github.com/pkg/errors"
"go.uber.org/zap"
appsv1 "k8s.io/api/apps/v1"
apiv1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/kubernetes"
fv1 "github.com/fnlize/fnlize/pkg/apis/core/v1"
"github.com/fnlize/fnlize/pkg/crd"
"github.com/fnlize/fnlize/pkg/executor/util"
fetcherConfig "github.com/fnlize/fnlize/pkg/fetcher/config"
"github.com/fnlize/fnlize/pkg/utils"
)
type requestType int
const (
GET_BUILDER requestType = iota
CLEANUP_BUILDERS
LABEL_ENV_NAME = "envName"
LABEL_ENV_NAMESPACE = "envNamespace"
LABEL_ENV_RESOURCEVERSION = "envResourceVersion"
LABEL_DEPLOYMENT_OWNER = "owner"
BUILDER_MGR = "buildermgr"
)
var (
deletePropagation = metav1.DeletePropagationBackground
delOpt = metav1.DeleteOptions{PropagationPolicy: &deletePropagation}
)
type (
builderInfo struct {
envMetadata *metav1.ObjectMeta
deployment *appsv1.Deployment
service *apiv1.Service
}
envwRequest struct {
requestType
env *fv1.Environment
envList []fv1.Environment
respChan chan envwResponse
}
envwResponse struct {
builderInfo *builderInfo
err error
}
environmentWatcher struct {
logger *zap.Logger
cache map[string]*builderInfo
requestChan chan envwRequest
builderNamespace string
fissionClient *crd.FissionClient
kubernetesClient *kubernetes.Clientset
fetcherConfig *fetcherConfig.Config
builderImagePullPolicy apiv1.PullPolicy
useIstio bool
}
)
func makeEnvironmentWatcher(
logger *zap.Logger,
fissionClient *crd.FissionClient,
kubernetesClient *kubernetes.Clientset,
fetcherConfig *fetcherConfig.Config,
builderNamespace string) *environmentWatcher {
useIstio := false
enableIstio := os.Getenv("ENABLE_ISTIO")
if len(enableIstio) > 0 {
istio, err := strconv.ParseBool(enableIstio)
if err != nil {
logger.Error("Failed to parse ENABLE_ISTIO, defaults to false")
}
useIstio = istio
}
builderImagePullPolicy := utils.GetImagePullPolicy(os.Getenv("BUILDER_IMAGE_PULL_POLICY"))
envWatcher := &environmentWatcher{
logger: logger.Named("environment_watcher"),
cache: make(map[string]*builderInfo),
requestChan: make(chan envwRequest),
builderNamespace: builderNamespace,
fissionClient: fissionClient,
kubernetesClient: kubernetesClient,
builderImagePullPolicy: builderImagePullPolicy,
useIstio: useIstio,
fetcherConfig: fetcherConfig,
}
go envWatcher.service()
return envWatcher
}
func (envw *environmentWatcher) getCacheKey(envName string, envNamespace string, envResourceVersion string) string {
return fmt.Sprintf("%v-%v-%v", envName, envNamespace, envResourceVersion)
}
func (env *environmentWatcher) getLabelForDeploymentOwner() map[string]string {
return map[string]string{
LABEL_DEPLOYMENT_OWNER: BUILDER_MGR,
}
}
func (envw *environmentWatcher) getLabels(envName string, envNamespace string, envResourceVersion string) map[string]string {
return map[string]string{
LABEL_ENV_NAME: envName,
LABEL_ENV_NAMESPACE: envNamespace,
LABEL_ENV_RESOURCEVERSION: envResourceVersion,
LABEL_DEPLOYMENT_OWNER: BUILDER_MGR,
}
}
func (envw *environmentWatcher) watchEnvironments() {
rv := ""
for {
wi, err := envw.fissionClient.CoreV1().Environments(metav1.NamespaceAll).Watch(metav1.ListOptions{
ResourceVersion: rv,
})
if err != nil {
if utils.IsNetworkError(err) {
envw.logger.Error("encountered network error, retrying later", zap.Error(err))
time.Sleep(5 * time.Second)
continue
}
envw.logger.Fatal("error watching environment list", zap.Error(err))
}
for {
ev, more := <-wi.ResultChan()
if !more {
// restart watch from last rv
break
}
if ev.Type == watch.Error {
// restart watch from the start
rv = ""
time.Sleep(time.Second)
break
}
env := ev.Object.(*fv1.Environment)
rv = env.ObjectMeta.ResourceVersion
envw.sync()
}
}
}
func (envw *environmentWatcher) sync() {
maxRetries := 10
for i := 0; i < maxRetries; i++ {
envList, err := envw.fissionClient.CoreV1().Environments(metav1.NamespaceAll).List(metav1.ListOptions{})
if err != nil {
if utils.IsNetworkError(err) {
envw.logger.Error("error syncing environment CRD resources due to network error, retrying later", zap.Error(err))
time.Sleep(50 * time.Duration(2*i) * time.Millisecond)
continue
}
envw.logger.Fatal("error syncing environment CRD resources", zap.Error(err))
}
// Create environment builders for all environments
for i := range envList.Items {
env := envList.Items[i]
if env.Spec.Version == 1 || // builder is not supported with v1 interface
len(env.Spec.Builder.Image) == 0 { // ignore env without builder image
continue
}
_, err := envw.getEnvBuilder(&env)
if err != nil {
envw.logger.Error("error creating builder", zap.Error(err), zap.String("builder_target", env.ObjectMeta.Name))
}
}
// Remove environment builders no longer needed
envw.cleanupEnvBuilders(envList.Items)
break
}
}
func (envw *environmentWatcher) service() {
for {
req := <-envw.requestChan
switch req.requestType {
case GET_BUILDER:
// In order to support backward compatibility, for all environments with builder image created in default env,
// the pods will be created in fission-builder namespace
ns := envw.builderNamespace
if req.env.ObjectMeta.Namespace != metav1.NamespaceDefault {
ns = req.env.ObjectMeta.Namespace
}
key := envw.getCacheKey(req.env.ObjectMeta.Name, ns, req.env.ObjectMeta.ResourceVersion)
builderInfo, ok := envw.cache[key]
if !ok {
builderInfo, err := envw.createBuilder(req.env, ns)
if err != nil {
req.respChan <- envwResponse{err: err}
continue
}
envw.cache[key] = builderInfo
}
req.respChan <- envwResponse{builderInfo: builderInfo}
case CLEANUP_BUILDERS:
latestEnvList := make(map[string]*fv1.Environment)
for i := range req.envList {
env := req.envList[i]
// In order to support backward compatibility, for all builder images created in default
// env, the pods are created in fission-builder namespace
ns := envw.builderNamespace
if env.ObjectMeta.Namespace != metav1.NamespaceDefault {
ns = env.ObjectMeta.Namespace
}
key := envw.getCacheKey(env.ObjectMeta.Name, ns, env.ObjectMeta.ResourceVersion)
latestEnvList[key] = &env
}
// If an environment is deleted when builder manager down,
// the builder belongs to the environment will be out-of-
// control (an orphan builder) since there is no record in
// cache and CRD. We need to iterate over the services &
// deployments to remove both normal and orphan builders.
svcList, err := envw.getBuilderServiceList(envw.getLabelForDeploymentOwner(), metav1.NamespaceAll)
if err != nil {
envw.logger.Error("error getting the builder service list", zap.Error(err))
}
for _, svc := range svcList {
envName := svc.ObjectMeta.Labels[LABEL_ENV_NAME]
envNamespace := svc.ObjectMeta.Labels[LABEL_ENV_NAMESPACE]
envResourceVersion := svc.ObjectMeta.Labels[LABEL_ENV_RESOURCEVERSION]
key := envw.getCacheKey(envName, envNamespace, envResourceVersion)
if _, ok := latestEnvList[key]; !ok {
err := envw.deleteBuilderServiceByName(svc.ObjectMeta.Name, svc.ObjectMeta.Namespace)
if err != nil {
envw.logger.Error("error removing builder service", zap.Error(err),
zap.String("service_name", svc.ObjectMeta.Name),
zap.String("service_namespace", svc.ObjectMeta.Namespace))
}
}
delete(envw.cache, key)
}
deployList, err := envw.getBuilderDeploymentList(envw.getLabelForDeploymentOwner(), metav1.NamespaceAll)
if err != nil {
envw.logger.Error("error getting the builder deployment list", zap.Error(err))
}
for _, deploy := range deployList {
envName := deploy.ObjectMeta.Labels[LABEL_ENV_NAME]
envNamespace := deploy.ObjectMeta.Labels[LABEL_ENV_NAMESPACE]
envResourceVersion := deploy.ObjectMeta.Labels[LABEL_ENV_RESOURCEVERSION]
key := envw.getCacheKey(envName, envNamespace, envResourceVersion)
if _, ok := latestEnvList[key]; !ok {
err := envw.deleteBuilderDeploymentByName(deploy.ObjectMeta.Name, deploy.ObjectMeta.Namespace)
if err != nil {
envw.logger.Error("error removing builder deployment", zap.Error(err),
zap.String("deployment_name", deploy.ObjectMeta.Name),
zap.String("deployment_namespace", deploy.ObjectMeta.Namespace))
}
}
delete(envw.cache, key)
}
}
}
}
func (envw *environmentWatcher) getEnvBuilder(env *fv1.Environment) (*builderInfo, error) {
respChan := make(chan envwResponse)
envw.requestChan <- envwRequest{
requestType: GET_BUILDER,
env: env,
respChan: respChan,
}
resp := <-respChan
return resp.builderInfo, resp.err
}
func (envw *environmentWatcher) cleanupEnvBuilders(envs []fv1.Environment) {
envw.requestChan <- envwRequest{
requestType: CLEANUP_BUILDERS,
envList: envs,
}
}
func (envw *environmentWatcher) createBuilder(env *fv1.Environment, ns string) (*builderInfo, error) {
var svc *apiv1.Service
var deploy *appsv1.Deployment
sel := envw.getLabels(env.ObjectMeta.Name, ns, env.ObjectMeta.ResourceVersion)
svcList, err := envw.getBuilderServiceList(sel, ns)
if err != nil {
return nil, err
}
// there should be only one service in svcList
if len(svcList) == 0 {
svc, err = envw.createBuilderService(env, ns)
if err != nil {
return nil, errors.Wrap(err, "error creating builder service")
}
} else if len(svcList) == 1 {
svc = &svcList[0]
} else {
return nil, fmt.Errorf("found more than one builder service for environment %q", env.ObjectMeta.Name)
}
deployList, err := envw.getBuilderDeploymentList(sel, ns)
if err != nil {
return nil, err
}
// there should be only one deploy in deployList
if len(deployList) == 0 {
// create builder SA in this ns, if not already created
_, err := utils.SetupSA(envw.kubernetesClient, fv1.FissionBuilderSA, ns)
if err != nil {
return nil, errors.Wrapf(err, "error creating %q in ns: %s", fv1.FissionBuilderSA, ns)
}
deploy, err = envw.createBuilderDeployment(env, ns)
if err != nil {
return nil, errors.Wrap(err, "error creating builder deployment")
}
} else if len(deployList) == 1 {
deploy = &deployList[0]
} else {
return nil, fmt.Errorf("found more than one builder deployment for environment %q", env.ObjectMeta.Name)
}
return &builderInfo{
envMetadata: &env.ObjectMeta,
service: svc,
deployment: deploy,
}, nil
}
func (envw *environmentWatcher) deleteBuilderServiceByName(name, namespace string) error {
err := envw.kubernetesClient.CoreV1().
Services(namespace).
Delete(name, &delOpt)
if err != nil {
return errors.Wrapf(err, "error deleting builder service %s.%s", name, namespace)
}
return nil
}
func (envw *environmentWatcher) deleteBuilderDeploymentByName(name, namespace string) error {
err := envw.kubernetesClient.AppsV1().
Deployments(namespace).
Delete(name, &delOpt)
if err != nil {
return errors.Wrapf(err, "error deleting builder deployment %s.%s", name, namespace)
}
return nil
}
func (envw *environmentWatcher) getBuilderServiceList(sel map[string]string, ns string) ([]apiv1.Service, error) {
svcList, err := envw.kubernetesClient.CoreV1().Services(ns).List(
metav1.ListOptions{
LabelSelector: labels.Set(sel).AsSelector().String(),
})
if err != nil {
return nil, errors.Wrap(err, "error getting builder service list")
}
return svcList.Items, nil
}
func (envw *environmentWatcher) createBuilderService(env *fv1.Environment, ns string) (*apiv1.Service, error) {
name := fmt.Sprintf("%v-%v", env.ObjectMeta.Name, env.ObjectMeta.ResourceVersion)
sel := envw.getLabels(env.ObjectMeta.Name, ns, env.ObjectMeta.ResourceVersion)
service := apiv1.Service{
ObjectMeta: metav1.ObjectMeta{
Namespace: ns,
Name: name,
Labels: sel,
},
Spec: apiv1.ServiceSpec{
Selector: sel,
Type: apiv1.ServiceTypeClusterIP,
Ports: []apiv1.ServicePort{
{
Name: "fetcher-port",
Protocol: apiv1.ProtocolTCP,
Port: 8000,
TargetPort: intstr.IntOrString{
Type: intstr.Int,
IntVal: 8000,
},
},
{
Name: "builder-port",
Protocol: apiv1.ProtocolTCP,
Port: 8001,
TargetPort: intstr.IntOrString{
Type: intstr.Int,
IntVal: 8001,
},
},
},
},
}
envw.logger.Info("creating builder service", zap.String("service_name", name))
_, err := envw.kubernetesClient.CoreV1().Services(ns).Create(&service)
if err != nil {
return nil, err
}
return &service, nil
}
func (envw *environmentWatcher) getBuilderDeploymentList(sel map[string]string, ns string) ([]appsv1.Deployment, error) {
deployList, err := envw.kubernetesClient.AppsV1().Deployments(ns).List(
metav1.ListOptions{
LabelSelector: labels.Set(sel).AsSelector().String(),
})
if err != nil {
return nil, errors.Wrap(err, "error getting builder deployment list")
}
return deployList.Items, nil
}
func (envw *environmentWatcher) createBuilderDeployment(env *fv1.Environment, ns string) (*appsv1.Deployment, error) {
name := fmt.Sprintf("%v-%v", env.ObjectMeta.Name, env.ObjectMeta.ResourceVersion)
sel := envw.getLabels(env.ObjectMeta.Name, ns, env.ObjectMeta.ResourceVersion)
var replicas int32 = 1
podAnnotations := env.ObjectMeta.Annotations
if podAnnotations == nil {
podAnnotations = make(map[string]string)
}
if envw.useIstio && env.Spec.AllowAccessToExternalNetwork {
podAnnotations["sidecar.istio.io/inject"] = "false"
}
container, err := util.MergeContainer(&apiv1.Container{
Name: "builder",
Image: env.Spec.Builder.Image,
ImagePullPolicy: envw.builderImagePullPolicy,
TerminationMessagePath: "/dev/termination-log",
Command: []string{"/builder", envw.fetcherConfig.SharedMountPath()},
ReadinessProbe: &apiv1.Probe{
InitialDelaySeconds: 5,
PeriodSeconds: 2,
Handler: apiv1.Handler{
HTTPGet: &apiv1.HTTPGetAction{
Path: "/healthz",
Port: intstr.IntOrString{
Type: intstr.Int,
IntVal: 8001,
},
},
},
},
}, env.Spec.Builder.Container)
if err != nil {
return nil, err
}
pod := apiv1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: sel,
Annotations: podAnnotations,
},
Spec: apiv1.PodSpec{
Containers: []apiv1.Container{*container},
ServiceAccountName: "fission-builder",
},
}
pod.Spec = *(util.ApplyImagePullSecret(env.Spec.ImagePullSecret, pod.Spec))
deployment := &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Namespace: ns,
Name: name,
Labels: sel,
},
Spec: appsv1.DeploymentSpec{
Replicas: &replicas,
Selector: &metav1.LabelSelector{
MatchLabels: sel,
},
Template: pod,
},
}
err = envw.fetcherConfig.AddFetcherToPodSpec(&deployment.Spec.Template.Spec, "builder")
if err != nil {
return nil, err
}
if env.Spec.Builder.PodSpec != nil {
newPodSpec, err := util.MergePodSpec(&deployment.Spec.Template.Spec, env.Spec.Builder.PodSpec)
if err != nil {
return nil, err
}
deployment.Spec.Template.Spec = *newPodSpec
}
_, err = envw.kubernetesClient.AppsV1().Deployments(ns).Create(deployment)
if err != nil {
return nil, err
}
envw.logger.Info("creating builder deployment", zap.String("deployment", name))
return deployment, nil
}
|
[
"\"ENABLE_ISTIO\"",
"\"BUILDER_IMAGE_PULL_POLICY\""
] |
[] |
[
"BUILDER_IMAGE_PULL_POLICY",
"ENABLE_ISTIO"
] |
[]
|
["BUILDER_IMAGE_PULL_POLICY", "ENABLE_ISTIO"]
|
go
| 2 | 0 | |
newapi/ooniapi/views.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import traceback
from flask import current_app, render_template
from flask import make_response
from flask.json import jsonify
from ooniapi.auth import auth_blueprint
from ooniapi.citizenlab import cz_blueprint
from ooniapi.private import api_private_blueprint
from ooniapi.measurements import api_msm_blueprint
from ooniapi.pages import pages_blueprint
from ooniapi.probe_services import probe_services_blueprint
from ooniapi.prio import prio_bp
HERE = os.path.abspath(os.path.dirname(__file__))
#def render_problem_exception(exception):
# response = exception.to_problem()
# return FlaskApi.get_response(response)
# def render_generic_exception(exception):
# if not isinstance(exception, werkzeug.exceptions.HTTPException):
# exc_name = "{}.{}".format(type(exception).__module__, type(exception).__name__)
# exc_desc = str(exception)
# if hasattr(exception, "__traceback__"):
# current_app.logger.error(
# "".join(traceback.format_tb(exception.__traceback__))
# )
# current_app.logger.error(
# "Unhandled error occurred, {}: {}".format(exc_name, exc_desc)
# )
# exception = werkzeug.exceptions.InternalServerError(
# description="An unhandled application error occurred: {}".format(exc_name)
# )
#
# response = problem(
# title=exception.name, detail=exception.description, status=exception.code
# )
# return FlaskApi.get_response(response)
def render_generic_exception(exception):
"""Log a traceback and return code 500 with a simple JSON
The CORS header is set as usual. Without this, an error could lead to browsers
caching a response without the correct CORS header.
"""
# TODO: render_template 500.html instead?
current_app.logger.error(f"Exception: {exception}")
current_app.logger.error(
"".join(traceback.format_tb(exception.__traceback__))
)
try:
return make_response(jsonify(error=str(exception)), 500)
except:
return make_response("unhandled error", 500)
def page_not_found(e):
return render_template("404.html"), 404
def bad_request(e):
return render_template("400.html", exception=e), 400
def register(app):
#app.register_blueprint(api_docs_blueprint, url_prefix="/api")
# Measurements API:
app.register_blueprint(api_msm_blueprint, url_prefix="/api")
#app.register_blueprint(connexion_api.blueprint)
app.register_blueprint(auth_blueprint, url_prefix="")
app.register_blueprint(cz_blueprint, url_prefix="")
# Private API
app.register_blueprint(api_private_blueprint, url_prefix="/api/_")
# The index is here:
app.register_blueprint(pages_blueprint, url_prefix="")
# Probe services
app.register_blueprint(probe_services_blueprint, url_prefix="")
app.register_blueprint(prio_bp, url_prefix="")
if "PYTEST_CURRENT_TEST" not in os.environ:
app.register_error_handler(Exception, render_generic_exception)
app.errorhandler(404)(page_not_found)
app.errorhandler(400)(bad_request)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
django-rgd-fmv/tests/test_fmv.py
|
import os
import pytest
from rgd.models.mixins import Status
from rgd_fmv.models import FMVMeta
NO_KWIVER = os.environ.get('NO_KWIVER', False)
@pytest.mark.django_db(transaction=True)
def test_populate_fmv_entry_from_klv_file(fmv_klv_file):
# Since we provide the KLV file and the factory provides a dummpy MP4 file,
# the ETL routine will skip over those parts and just generate the FMVMeta
fmv_entry = FMVMeta.objects.filter(fmv_file=fmv_klv_file).first()
assert fmv_entry.ground_frames is not None
@pytest.mark.skipif(NO_KWIVER, reason='User set NO_KWIVER')
@pytest.mark.django_db(transaction=True)
def test_full_fmv_etl(fmv_video_file):
assert fmv_video_file.status == Status.SUCCEEDED, fmv_video_file.failure_reason
fmv_entry = FMVMeta.objects.get(fmv_file=fmv_video_file)
assert fmv_entry.ground_frames is not None
|
[] |
[] |
[
"NO_KWIVER"
] |
[]
|
["NO_KWIVER"]
|
python
| 1 | 0 | |
pkg/core/agent/agent.go
|
package agent
import (
"errors"
"fmt"
"github.com/cloud-barista/cb-dragonfly/pkg/config"
"io/ioutil"
"net/http"
"os"
"strings"
"time"
"github.com/bramvdbogaerde/go-scp"
sshrun "github.com/cloud-barista/cb-spider/cloud-control-manager/vm-ssh"
"github.com/google/uuid"
"github.com/sirupsen/logrus"
"golang.org/x/crypto/ssh"
"github.com/cloud-barista/cb-dragonfly/pkg/util"
)
const (
UBUNTU = "UBUNTU"
CENTOS = "CENTOS"
)
func InstallTelegraf(nsId string, mcisId string, vmId string, publicIp string, userName string, sshKey string) (int, error) {
sshInfo := sshrun.SSHInfo{
ServerPort: publicIp + ":22",
UserName: userName,
PrivateKey: []byte(sshKey),
}
// {사용자계정}/cb-dragonfly 폴더 생성
createFolderCmd := fmt.Sprintf("mkdir $HOME/cb-dragonfly")
if _, err := sshrun.SSHRun(sshInfo, createFolderCmd); err != nil {
return http.StatusInternalServerError, errors.New(fmt.Sprintf("failed to make directory cb-dragonfly, error=%s", err))
}
// 리눅스 OS 환경 체크
osType, err := sshrun.SSHRun(sshInfo, "hostnamectl | grep 'Operating System' | awk '{print $3}' | tr 'a-z' 'A-Z'")
if err != nil {
cleanTelegrafInstall(sshInfo, osType)
return http.StatusInternalServerError, errors.New(fmt.Sprintf("failed to check linux OS environments, error=%s", err))
}
rootPath := os.Getenv("CBMON_ROOT")
var sourceFile, targetFile, installCmd string
if strings.Contains(osType, CENTOS) {
sourceFile = rootPath + "/file/pkg/centos/x64/telegraf-1.12.0~f09f2b5-0.x86_64.rpm"
targetFile = fmt.Sprintf("$HOME/cb-dragonfly/cb-agent.rpm")
installCmd = fmt.Sprintf("sudo rpm -ivh $HOME/cb-dragonfly/cb-agent.rpm")
} else if strings.Contains(osType, UBUNTU) {
sourceFile = rootPath + "/file/pkg/ubuntu/x64/telegraf_1.12.0~f09f2b5-0_amd64.deb"
targetFile = fmt.Sprintf("$HOME/cb-dragonfly/cb-agent.deb")
installCmd = fmt.Sprintf("sudo dpkg -i $HOME/cb-dragonfly/cb-agent.deb")
}
// 에이전트 설치 패키지 다운로드
if err := sshCopyWithTimeout(sshInfo, sourceFile, targetFile); err != nil {
cleanTelegrafInstall(sshInfo, osType)
return http.StatusInternalServerError, errors.New(fmt.Sprintf("failed to download agent package, error=%s", err))
}
// 패키지 설치 실행
if _, err := sshrun.SSHRun(sshInfo, installCmd); err != nil {
cleanTelegrafInstall(sshInfo, osType)
return http.StatusInternalServerError, errors.New(fmt.Sprintf("failed to install agent package, error=%s", err))
}
sshrun.SSHRun(sshInfo, "sudo rm /etc/telegraf/telegraf.conf")
// telegraf_conf 파일 복사
telegrafConfSourceFile, err := createTelegrafConfigFile(nsId, mcisId, vmId)
telegrafConfTargetFile := "$HOME/cb-dragonfly/telegraf.conf"
if err != nil {
cleanTelegrafInstall(sshInfo, osType)
return http.StatusInternalServerError, errors.New(fmt.Sprintf("failed to create telegraf.conf, error=%s", err))
}
if err := sshrun.SSHCopy(sshInfo, telegrafConfSourceFile, telegrafConfTargetFile); err != nil {
cleanTelegrafInstall(sshInfo, osType)
return http.StatusInternalServerError, errors.New(fmt.Sprintf("failed to copy telegraf.conf, error=%s", err))
}
if _, err := sshrun.SSHRun(sshInfo, "sudo mv $HOME/cb-dragonfly/telegraf.conf /etc/telegraf/"); err != nil {
cleanTelegrafInstall(sshInfo, osType)
return http.StatusInternalServerError, errors.New(fmt.Sprintf("failed to move telegraf.conf, error=%s", err))
}
// 공통 서비스 활성화 및 실행
if _, err := sshrun.SSHRun(sshInfo, "sudo systemctl enable telegraf && sudo systemctl restart telegraf"); err != nil {
cleanTelegrafInstall(sshInfo, osType)
return http.StatusInternalServerError, errors.New(fmt.Sprintf("failed to enable and start telegraf service, error=%s", err))
}
// telegraf UUId conf 파일 삭제
err = os.Remove(telegrafConfSourceFile)
if err != nil {
cleanTelegrafInstall(sshInfo, osType)
return http.StatusInternalServerError, errors.New(fmt.Sprintf("failed to remove temporary telegraf.conf file, error=%s", err))
}
// 에이전트 설치에 사용한 파일 폴더 채로 제거
removeRpmCmd := fmt.Sprintf("sudo rm -rf $HOME/cb-dragonfly")
if _, err := sshrun.SSHRun(sshInfo, removeRpmCmd); err != nil {
cleanTelegrafInstall(sshInfo, osType)
return http.StatusInternalServerError, errors.New(fmt.Sprintf("failed to remove cb-dragonfly directory, error=%s", err))
}
// 정상 설치 확인
checkCmd := "telegraf --version"
if result, err := util.RunCommand(publicIp, userName, sshKey, checkCmd); err != nil {
cleanTelegrafInstall(sshInfo, osType)
return http.StatusInternalServerError, errors.New(fmt.Sprintf("failed to run telegraf command, error=%s", err))
} else {
if strings.Contains(*result, "command not found") {
cleanTelegrafInstall(sshInfo, osType)
return http.StatusInternalServerError, errors.New(fmt.Sprintf("failed to run telegraf command, error=%s", err))
}
}
return http.StatusOK, nil
}
func cleanTelegrafInstall(sshInfo sshrun.SSHInfo, osType string) {
// Uninstall Telegraf
var uninstallCmd string
if strings.Contains(osType, CENTOS) {
uninstallCmd = fmt.Sprintf("sudo rpm -e telegraf")
} else if strings.Contains(osType, UBUNTU) {
uninstallCmd = fmt.Sprintf("sudo dpkg -r telegraf")
}
sshrun.SSHRun(sshInfo, uninstallCmd)
// Delete Install Files
removeRpmCmd := fmt.Sprintf("sudo rm -rf $HOME/cb-dragonfly")
sshrun.SSHRun(sshInfo, removeRpmCmd)
removeDirCmd := fmt.Sprintf("sudo rm -rf /etc/telegraf/cb-dragonfly")
sshrun.SSHRun(sshInfo, removeDirCmd)
}
func createTelegrafConfigFile(nsId string, mcisId string, vmId string) (string, error) {
collectorServer := fmt.Sprintf("udp://%s:%d", config.GetInstance().CollectManager.CollectorIP, config.GetInstance().CollectManager.CollectorPort)
influxDBServer := fmt.Sprintf("http://%s:8086", config.GetInstance().CollectManager.CollectorIP)
rootPath := os.Getenv("CBMON_ROOT")
filePath := rootPath + "/file/conf/telegraf.conf"
read, err := ioutil.ReadFile(filePath)
if err != nil {
// ERROR 정보 출럭
logrus.Error("failed to read telegraf.conf file.")
return "", err
}
// 파일 내의 변수 값 설정 (hostId, collectorServer)
strConf := string(read)
strConf = strings.ReplaceAll(strConf, "{{ns_id}}", nsId)
strConf = strings.ReplaceAll(strConf, "{{mcis_id}}", mcisId)
strConf = strings.ReplaceAll(strConf, "{{vm_id}}", vmId)
strConf = strings.ReplaceAll(strConf, "{{collector_server}}", collectorServer)
strConf = strings.ReplaceAll(strConf, "{{influxdb_server}}", influxDBServer)
// telegraf.conf 파일 생성
telegrafFilePath := rootPath + "/file/conf/"
createFileName := "telegraf-" + uuid.New().String() + ".conf"
telegrafConfFile := telegrafFilePath + createFileName
err = ioutil.WriteFile(telegrafConfFile, []byte(strConf), os.FileMode(777))
if err != nil {
logrus.Error("failed to create telegraf.conf file.")
return "", err
}
return telegrafConfFile, err
}
func sshCopyWithTimeout(sshInfo sshrun.SSHInfo, sourceFile string, targetFile string) error {
signer, err := ssh.ParsePrivateKey(sshInfo.PrivateKey)
if err != nil {
return err
}
clientConfig := ssh.ClientConfig{
User: sshInfo.UserName,
Auth: []ssh.AuthMethod{
ssh.PublicKeys(signer),
},
HostKeyCallback: ssh.InsecureIgnoreHostKey(),
}
client := scp.NewClientWithTimeout(sshInfo.ServerPort, &clientConfig, 600*time.Second)
err = client.Connect()
defer client.Close()
if err != nil {
return err
}
file, err := os.Open(sourceFile)
defer file.Close()
if err != nil {
return err
}
return client.CopyFile(file, targetFile, "0755")
}
|
[
"\"CBMON_ROOT\"",
"\"CBMON_ROOT\""
] |
[] |
[
"CBMON_ROOT"
] |
[]
|
["CBMON_ROOT"]
|
go
| 1 | 0 | |
dataplicity/tags.py
|
from __future__ import unicode_literals
import os
import subprocess
import re
import logging
log = logging.getLogger("agent")
class TagError(Exception):
"""Custom exception raised when get_tag_list has an exception"""
def get_tag_list():
"""Run the dataplicity.tags script, get output as a list of tags"""
home_dir = os.environ.get("HOME", "/home/dataplicity/")
tag_executable = os.path.join(home_dir, "dataplicity_tags")
# Early out if the script isn't there.
if not os.path.exists(tag_executable):
log.debug("tag executable %s does not exist", tag_executable)
return []
log.debug("reading tags from %s", tag_executable)
try:
output = subprocess.check_output(tag_executable)
except OSError as error:
log.debug("failed to run %s; %s", tag_executable, error)
return []
except Exception as error:
log.error("error running %s; %s", tag_executable, error)
raise TagError("error running %s" % tag_executable)
str_output = output.decode("utf-8", errors="ignore")
# regex split on comma, spaces, newline and tabs
tag_list = re.split(r"[,\s\n\t]", str_output)
tags = [tag.strip()[:25] for tag in tag_list if tag]
return tags
|
[] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
python
| 1 | 0 | |
config.py
|
from pathlib import Path
import os
import socket
basedir = os.path.abspath(os.path.dirname(__file__))
class Config:
# HOST_NAME = socket.getfqdn(socket.gethostname())
# HOST_IP = socket.gethostbyname(HOST_NAME)
# s3 setting
AWS_REGION_NAME = 'us-east-1'
S3_BUCKET_NAME = 'default'
S3_PREFIX = 'dfs'
# DIR
DATA_ROOT = None
MODEL_ROOT = None
# Debug setting
DEBUG = True
# train monitor
ML_URI = None
ML_ARTIFACTS = None
# proj config
PROJ_TITLE = os.getenv(
'PROJ_TITLE',
'MEL_DATATHRON'
)
PROJ_DESC = os.getenv(
'PROJ_DESC',
'Melbourne Datathon: http://www.datasciencemelbourne.com/datathon/'
)
PROJ_VER = os.getenv(
'PROJ_VER',
'0.0.1'
)
# DIR config
STATIC_DIR = os.getenv(
'PROJ_VER',
f'{basedir}/data/interim/sugar_files_FLI/'
)
BASE_URL = os.getenv(
'BASE_URL',
'/api/v1'
)
STATIC_URL = f'{BASE_URL}/static'
# MODEL SETTING
ML_PARMS = dict(
bs=512,
)
@staticmethod
def init_app(app):
pass
class OnPremiseWorker(Config):
DATA_ROOT = Path(f'/data/ecg/data')
MODEL_ROOT = Path(f'/data/ecg/models')
ML_URI = 'mysql://healsci:HealsciAWS1@pathology.cc2wi7jayqzb.rds.cn-north-1.amazonaws.com.cn:3306/anmlflow'
DEBUG = False
ML_ARTIFACTS = 's3://hs-ai/an-mlflow/ecg'
class DebugWorker(Config):
DATA_ROOT = Path(f'{basedir}/data')
MODEL_ROOT = Path(f'{basedir}/models')
ML_URI = 'mysql://root:[email protected]:3306/mlflow-db-v1'
ML_ARTIFACTS = 's3://hs-ai/an-mlflow/ecg'
config_cls = {
'default': DebugWorker,
'prem': OnPremiseWorker,
'debug': DebugWorker,
}
|
[] |
[] |
[
"BASE_URL",
"PROJ_DESC",
"PROJ_VER",
"PROJ_TITLE"
] |
[]
|
["BASE_URL", "PROJ_DESC", "PROJ_VER", "PROJ_TITLE"]
|
python
| 4 | 0 | |
tests/framework/installer/ceph_settings.go
|
/*
Copyright 2021 The Rook Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package installer
import (
"fmt"
"io/ioutil"
"net/http"
"os"
"path"
"strings"
"time"
"github.com/pkg/errors"
cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1"
"github.com/rook/rook/tests/framework/utils"
)
// TestCephSettings struct for handling panic and test suite tear down
type TestCephSettings struct {
DataDirHostPath string
ClusterName string
Namespace string
OperatorNamespace string
StorageClassName string
UseHelm bool
UsePVC bool
Mons int
UseCrashPruner bool
MultipleMgrs bool
SkipOSDCreation bool
UseCSI bool
EnableDiscovery bool
EnableAdmissionController bool
IsExternal bool
SkipClusterCleanup bool
SkipCleanupPolicy bool
DirectMountToolbox bool
EnableVolumeReplication bool
RookVersion string
CephVersion cephv1.CephVersionSpec
}
func (s *TestCephSettings) ApplyEnvVars() {
// skip the cleanup by default
s.SkipClusterCleanup = true
if os.Getenv("SKIP_TEST_CLEANUP") == "false" {
s.SkipClusterCleanup = false
}
s.SkipCleanupPolicy = true
if os.Getenv("SKIP_CLEANUP_POLICY") == "false" {
s.SkipCleanupPolicy = false
}
}
func (s *TestCephSettings) readManifest(filename string) string {
rootDir, err := utils.FindRookRoot()
if err != nil {
panic(err)
}
manifest := path.Join(rootDir, "cluster/examples/kubernetes/ceph", filename)
logger.Infof("Reading manifest: %s", manifest)
contents, err := ioutil.ReadFile(manifest)
if err != nil {
panic(errors.Wrapf(err, "failed to read manifest at %s", manifest))
}
return replaceNamespaces(manifest, string(contents), s.OperatorNamespace, s.Namespace)
}
func (s *TestCephSettings) readManifestFromGithub(filename string) string {
return s.readManifestFromGithubWithClusterNamespace(filename, s.Namespace)
}
func (s *TestCephSettings) readManifestFromGithubWithClusterNamespace(filename, clusterNamespace string) string {
url := fmt.Sprintf("https://raw.githubusercontent.com/rook/rook/%s/cluster/examples/kubernetes/ceph/%s", s.RookVersion, filename)
logger.Infof("Retrieving manifest: %s", url)
var response *http.Response
var err error
for i := 1; i <= 3; i++ {
// #nosec G107 This is only test code and is expected to read from a url
response, err = http.Get(url)
if err != nil {
if i == 3 {
panic(errors.Wrapf(err, "failed to read manifest from %s", url))
}
logger.Warningf("failed to read manifest from %s. retrying in 1sec. %v", url, err)
time.Sleep(time.Second)
continue
}
break
}
defer response.Body.Close()
content, err := ioutil.ReadAll(response.Body)
if err != nil {
panic(errors.Wrapf(err, "failed to read content from %s", url))
}
return replaceNamespaces(url, string(content), s.OperatorNamespace, clusterNamespace)
}
func (s *TestCephSettings) replaceOperatorSettings(manifest string) string {
manifest = strings.ReplaceAll(manifest, `# CSI_LOG_LEVEL: "0"`, `CSI_LOG_LEVEL: "5"`)
manifest = strings.ReplaceAll(manifest, `ROOK_ENABLE_DISCOVERY_DAEMON: "false"`, fmt.Sprintf(`ROOK_ENABLE_DISCOVERY_DAEMON: "%t"`, s.EnableDiscovery))
manifest = strings.ReplaceAll(manifest, `ROOK_ENABLE_FLEX_DRIVER: "false"`, fmt.Sprintf(`ROOK_ENABLE_FLEX_DRIVER: "%t"`, !s.UseCSI))
manifest = strings.ReplaceAll(manifest, `ROOK_CSI_ENABLE_CEPHFS: "true"`, fmt.Sprintf(`ROOK_CSI_ENABLE_CEPHFS: "%t"`, s.UseCSI))
manifest = strings.ReplaceAll(manifest, `ROOK_CSI_ENABLE_RBD: "true"`, fmt.Sprintf(`ROOK_CSI_ENABLE_RBD: "%t"`, s.UseCSI))
manifest = strings.ReplaceAll(manifest, `CSI_ENABLE_VOLUME_REPLICATION: "false"`, fmt.Sprintf(`CSI_ENABLE_VOLUME_REPLICATION: "%t"`, s.EnableVolumeReplication))
return manifest
}
func replaceNamespaces(name, manifest, operatorNamespace, clusterNamespace string) string {
// RBAC and related namespaces
manifest = strings.ReplaceAll(manifest, "rook-ceph # namespace:operator", operatorNamespace)
manifest = strings.ReplaceAll(manifest, "rook-ceph # namespace:cluster", clusterNamespace)
manifest = strings.ReplaceAll(manifest, "rook-ceph-external # namespace:cluster", clusterNamespace)
// Double space only needed for v1.5 upgrade test
manifest = strings.ReplaceAll(manifest, "rook-ceph # namespace:operator", operatorNamespace)
// SCC namespaces for operator and Ceph daemons
manifest = strings.ReplaceAll(manifest, "rook-ceph:rook-ceph-system # serviceaccount:namespace:operator", operatorNamespace+":rook-ceph-system")
manifest = strings.ReplaceAll(manifest, "rook-ceph:rook-ceph-mgr # serviceaccount:namespace:cluster", clusterNamespace+":rook-ceph-mgr")
manifest = strings.ReplaceAll(manifest, "rook-ceph:rook-ceph-osd # serviceaccount:namespace:cluster", clusterNamespace+":rook-ceph-osd")
// SCC namespaces for CSI driver
manifest = strings.ReplaceAll(manifest, "rook-ceph:rook-csi-rbd-plugin-sa # serviceaccount:namespace:operator", operatorNamespace+":rook-csi-rbd-plugin-sa")
manifest = strings.ReplaceAll(manifest, "rook-ceph:rook-csi-rbd-provisioner-sa # serviceaccount:namespace:operator", operatorNamespace+":rook-csi-rbd-provisioner-sa")
manifest = strings.ReplaceAll(manifest, "rook-ceph:rook-csi-cephfs-plugin-sa # serviceaccount:namespace:operator", operatorNamespace+":rook-csi-cephfs-plugin-sa")
manifest = strings.ReplaceAll(manifest, "rook-ceph:rook-csi-cephfs-provisioner-sa # serviceaccount:namespace:operator", operatorNamespace+":rook-csi-cephfs-provisioner-sa")
// CSI Drivers
manifest = strings.ReplaceAll(manifest, "rook-ceph.cephfs.csi.ceph.com # driver:namespace:operator", operatorNamespace+".cephfs.csi.ceph.com")
manifest = strings.ReplaceAll(manifest, "rook-ceph.rbd.csi.ceph.com # driver:namespace:operator", operatorNamespace+".rbd.csi.ceph.com")
// Bucket storage class
manifest = strings.ReplaceAll(manifest, "rook-ceph.ceph.rook.io/bucket # driver:namespace:cluster", clusterNamespace+".ceph.rook.io/bucket")
if strings.Contains(manifest, "namespace:operator") || strings.Contains(manifest, "namespace:cluster") || strings.Contains(manifest, "driver:namespace:") || strings.Contains(manifest, "serviceaccount:namespace:") {
logger.Infof("BAD MANIFEST:\n%s", manifest)
panic(fmt.Sprintf("manifest %s still contains a namespace identifier", name))
}
return manifest
}
|
[
"\"SKIP_TEST_CLEANUP\"",
"\"SKIP_CLEANUP_POLICY\""
] |
[] |
[
"SKIP_TEST_CLEANUP",
"SKIP_CLEANUP_POLICY"
] |
[]
|
["SKIP_TEST_CLEANUP", "SKIP_CLEANUP_POLICY"]
|
go
| 2 | 0 | |
ebcli/core/fileoperations.py
|
# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import codecs
import glob
import json
import os
import shutil
import stat
import sys
import zipfile
import yaml
from cement.utils.misc import minimal_logger
from ebcli.objects.buildconfiguration import BuildConfiguration
from six import StringIO
from yaml import load, safe_dump
from yaml.parser import ParserError
from yaml.scanner import ScannerError
try:
import configparser
except ImportError:
import ConfigParser as configparser
from ebcli.core import io
from ebcli.resources.strings import prompts, strings
from ebcli.objects.exceptions import NotInitializedError, InvalidSyntaxError, \
NotFoundError, ValidationError
from ebcli.core.ebglobals import Constants
LOG = minimal_logger(__name__)
def get_aws_home():
sep = os.path.sep
p = '~' + sep + '.aws' + sep
return os.path.expanduser(p)
def get_ssh_folder():
sep = os.path.sep
p = '~' + sep + '.ssh' + sep
p = os.path.expanduser(p)
if not os.path.exists(p):
os.makedirs(p)
return p
beanstalk_directory = '.elasticbeanstalk' + os.path.sep
# TODO: Need to support yaml and yml
buildspec_name = "buildspec.yml"
buildspec_config_header = 'eb_codebuild_settings'
global_config_file = beanstalk_directory + 'config.global.yml'
local_config_file = beanstalk_directory + 'config.yml'
aws_config_folder = get_aws_home()
aws_config_location = aws_config_folder + 'config'
aws_credentials_location = aws_config_folder + 'credentials'
aws_access_key = 'aws_access_key_id'
aws_secret_key = 'aws_secret_access_key'
region_key = 'region'
default_section = 'default'
ebcli_section = 'profile eb-cli'
app_version_folder = beanstalk_directory + 'app_versions'
logs_folder = beanstalk_directory + 'logs' + os.path.sep
env_yaml = 'env.yaml'
_marker = object()
def _get_option(config, section, key, default):
try:
return config.get(section, key)
except (configparser.NoSectionError, configparser.NoOptionError):
return default
def is_git_directory_present():
return os.path.isdir('.git')
def clean_up():
# remove dir
cwd = os.getcwd()
try:
_traverse_to_project_root()
if os.path.isdir(beanstalk_directory):
shutil.rmtree(beanstalk_directory)
finally:
os.chdir(cwd)
def _set_not_none(config, section, option, value):
if value:
config.set(section, option, value)
def get_war_file_location():
cwd = os.getcwd()
try:
_traverse_to_project_root()
lst = glob.glob('build/libs/*.war')
try:
return os.path.join(os.getcwd(), lst[0])
except IndexError:
raise NotFoundError('Can not find .war artifact in build' +
os.path.sep + 'libs' + os.path.sep)
finally:
os.chdir(cwd)
def old_eb_config_present():
return os.path.isfile(beanstalk_directory + 'config')
def config_file_present():
return os.path.isfile(local_config_file)
def project_file_path(filename):
return os.path.join(get_project_root(), filename)
def project_file_exists(filename):
return file_exists(project_file_path(filename))
def get_values_from_old_eb():
old_config_file = beanstalk_directory + 'config'
config = configparser.ConfigParser()
config.read(old_config_file)
app_name = _get_option(config, 'global', 'ApplicationName', None)
cred_file = _get_option(config, 'global', 'AwsCredentialFile', None)
default_env = _get_option(config, 'global', 'EnvironmentName', None)
solution_stack_name = _get_option(config, 'global', 'SolutionStack', None)
region = _get_option(config, 'global', 'Region', None)
access_id, secret_key = read_old_credentials(cred_file)
return {'app_name': app_name,
'access_id': access_id,
'secret_key': secret_key,
'default_env': default_env,
'platform': solution_stack_name,
'region': region,
}
def read_old_credentials(file_location):
if file_location is None:
return None, None
config_str = '[default]\n' + open(file_location, 'r').read()
config_fp = StringIO(config_str)
config = configparser.ConfigParser()
config.readfp(config_fp)
access_id = _get_option(config, 'default', 'AWSAccessKeyId', None)
secret_key = _get_option(config, 'default', 'AWSSecretKey', None)
return access_id, secret_key
def save_to_aws_config(access_key, secret_key):
config = configparser.ConfigParser()
if not os.path.isdir(aws_config_folder):
os.makedirs(aws_config_folder)
config.read(aws_config_location)
if ebcli_section not in config.sections():
config.add_section(ebcli_section)
_set_not_none(config, ebcli_section, aws_access_key, access_key)
_set_not_none(config, ebcli_section, aws_secret_key, secret_key)
with open(aws_config_location, 'w') as f:
config.write(f)
set_user_only_permissions(aws_config_location)
def set_user_only_permissions(location):
"""
Sets permissions so that only a user can read/write (chmod 400).
Can be a folder or a file.
:param location: Full location of either a folder or a location
"""
if os.path.isdir(location):
for root, dirs, files in os.walk(location):
for d in dirs:
pass
_set_user_only_permissions_file(os.path.join(root, d), ex=True)
for f in files:
_set_user_only_permissions_file(os.path.join(root, f))
else:
_set_user_only_permissions_file(location)
def _set_user_only_permissions_file(location, ex=False):
"""
:param ex: Boolean: add executable permission
"""
permission = stat.S_IRUSR | stat.S_IWUSR
if ex:
permission |= stat.S_IXUSR
os.chmod(location, permission)
def set_all_unrestricted_permissions(location):
"""
Set permissions so that user, group, and others all have read,
write and execute permissions (chmod 777).
:param location: Full location of either a folder or a location
"""
os.chmod(location, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
def get_current_directory_name():
dirname, filename = os.path.split(os.getcwd())
from ebcli.lib.utils import decode_bytes
filename = decode_bytes(filename)
return filename
def get_platform_version(default=_marker):
try:
return get_global_value('platform_version')
except NotInitializedError:
return None
def get_instance_profile(default=None):
try:
return get_global_value('instance_profile', default)
except NotInitializedError:
return default
def get_application_name(default=_marker):
return get_global_value('application_name')
def get_platform_name(default=_marker):
return get_global_value('platform_name')
def get_workspace_type(default=_marker):
try:
return get_global_value('workspace_type', default)
except NotInitializedError:
if default == _marker:
raise NotInitializedError
return default
def get_global_value(key, default=_marker):
result = get_config_setting('global', key)
if result is not None:
return result
# get_config_setting should throw error if directory is not set up
LOG.debug('Directory found, but no config or app name exists')
if default is _marker:
raise NotInitializedError
return default
def touch_config_folder(dir_path=None):
if not os.path.isdir(os.path.join(dir_path, beanstalk_directory)
if dir_path
else beanstalk_directory):
os.makedirs(os.path.join(dir_path, beanstalk_directory)
if dir_path
else beanstalk_directory)
def create_config_file(
app_name,
region,
solution_stack,
workspace_type=Constants.WorkSpaceTypes.APPLICATION,
platform_name=None,
platform_version=None,
instance_profile=None,
dir_path=None,
repository=None,
branch=None):
"""
We want to make sure we do not override the file if it already exists,
but we do want to fill in all missing pieces
:param app_name: name of the application
:return: VOID: no return value
"""
LOG.debug('Creating config file at ' + os.getcwd())
if not os.path.isdir(os.path.join(dir_path, beanstalk_directory)
if dir_path
else beanstalk_directory):
os.makedirs(os.path.join(dir_path, beanstalk_directory)
if dir_path
else beanstalk_directory)
# add to global without writing over any settings if they exist
write_config_setting('global', 'application_name', app_name, dir_path=dir_path)
write_config_setting('global', 'default_region', region, dir_path=dir_path)
write_config_setting('global', 'default_platform', solution_stack, dir_path=dir_path)
write_config_setting('global', 'workspace_type', workspace_type, dir_path=dir_path)
write_config_setting('global', 'platform_name', platform_name, dir_path=dir_path)
write_config_setting('global', 'platform_version', platform_version, dir_path=dir_path)
write_config_setting('global', 'instance_profile', instance_profile, dir_path=dir_path)
from ebcli.operations import gitops
gitops.set_repo_default_for_current_environment(repository)
gitops.set_branch_default_for_current_environment(branch)
def _traverse_to_project_root():
cwd = os.getcwd()
if not os.path.isdir(beanstalk_directory):
LOG.debug('beanstalk directory not found in ' + cwd +
' -Going up a level')
os.chdir(os.path.pardir) # Go up one directory
if cwd == os.getcwd(): # We can't move any further
LOG.debug('Still at the same directory ' + cwd)
raise NotInitializedError('EB is not yet initialized')
_traverse_to_project_root()
else:
LOG.debug('Project root found at: ' + cwd)
def get_project_root():
cwd = os.getcwd()
try:
_traverse_to_project_root()
return os.getcwd()
finally:
os.chdir(cwd)
def get_zip_location(file_name):
cwd = os.getcwd()
try:
_traverse_to_project_root()
if not os.path.isdir(app_version_folder):
# create it
os.makedirs(app_version_folder)
return os.path.abspath(app_version_folder) + os.path.sep + file_name
finally:
os.chdir(cwd)
def get_logs_location(folder_name):
cwd = os.getcwd()
try:
_traverse_to_project_root()
if not os.path.isdir(logs_folder):
# create it
os.makedirs(logs_folder)
return os.path.abspath(os.path.join(logs_folder, folder_name))
finally:
os.chdir(cwd)
def program_is_installed(program):
return False if os_which(program) is None else True
def os_which(program):
path = os.getenv('PATH')
for p in path.split(os.path.pathsep):
p = os.path.join(p, program)
if sys.platform.startswith('win'):
# Add .exe for windows
p += '.exe'
if os.path.exists(p) and os.access(p, os.X_OK):
return p
def delete_file(location):
if os.path.exists(location):
os.remove(location)
def delete_directory(location):
if os.path.isdir(location):
shutil.rmtree(location)
def delete_app_versions():
cwd = os.getcwd()
try:
_traverse_to_project_root()
delete_directory(app_version_folder)
finally:
os.chdir(cwd)
def zip_up_folder(directory, location, ignore_list=None):
cwd = os.getcwd()
try:
os.chdir(directory)
io.log_info('Zipping up folder at location: ' + str(os.getcwd()))
zipf = zipfile.ZipFile(location, 'w', zipfile.ZIP_DEFLATED)
_zipdir('./', zipf, ignore_list=ignore_list)
zipf.close()
LOG.debug('File size: ' + str(os.path.getsize(location)))
finally:
os.chdir(cwd)
def zip_up_project(location, ignore_list=None):
cwd = os.getcwd()
try:
_traverse_to_project_root()
zip_up_folder('./', location, ignore_list=ignore_list)
finally:
os.chdir(cwd)
def _zipdir(path, zipf, ignore_list=None):
if ignore_list is None:
ignore_list = ['.gitignore']
ignore_list = ['./' + i for i in ignore_list]
zipped_roots = []
for root, dirs, files in os.walk(path):
if '.elasticbeanstalk' in root:
io.log_info(' -skipping: {}'.format(root))
continue
for d in dirs:
cur_dir = os.path.join(root, d)
if os.path.islink(cur_dir):
zipInfo = zipfile.ZipInfo()
zipInfo.filename = os.path.join(root, d)
# 2716663808L is the "magic code" for symlinks
# Python 3 merged "int" and "long" into int, so we must check the version
# to determine what type to use
if sys.version_info > (3,):
zipInfo.external_attr = 2716663808
else:
zipInfo.external_attr = long(2716663808)
zipf.writestr(zipInfo, os.readlink(cur_dir))
for f in files:
cur_file = os.path.join(root, f)
if cur_file.endswith('~') or cur_file in ignore_list:
# Ignore editor backup files (like file.txt~)
# Ignore anything in the .ebignore file
io.log_info(' -skipping: {}'.format(cur_file))
else:
if root not in zipped_roots:
# Windows requires us to index the folders.
io.log_info(' +adding: {}/'.format(root))
zipf.write(root)
zipped_roots.append(root)
io.log_info(' +adding: {}'.format(cur_file))
if os.path.islink(cur_file):
zipInfo = zipfile.ZipInfo()
zipInfo.filename = os.path.join(root, f)
# 2716663808L is the "magic code" for symlinks
# Python 3 merged "int" and "long" into int, so we must check the
# version to determine what type to use
if sys.version_info > (3,):
zipInfo.external_attr = 2716663808
else:
zipInfo.external_attr = long(2716663808)
zipf.writestr(zipInfo, os.readlink(cur_file))
else:
zipf.write(cur_file)
def unzip_folder(file_location, directory):
if not os.path.isdir(directory):
os.makedirs(directory)
zip = zipfile.ZipFile(file_location, 'r')
for cur_file in zip.namelist():
if not cur_file.endswith('/'):
root, name = os.path.split(cur_file)
path = os.path.normpath(os.path.join(directory, root))
if not os.path.isdir(path):
os.makedirs(path)
open(os.path.join(path, name), 'wb').write(zip.read(cur_file))
def save_to_file(data, location, filename):
if not os.path.isdir(location):
os.makedirs(location)
file_location = os.path.join(location, filename)
data_file = open(file_location, 'wb')
data_file.write(data)
data_file.close()
return file_location
def delete_app_file(app_name):
cwd = os.getcwd()
file_name = beanstalk_directory + app_name
try:
_traverse_to_project_root()
for file_ext in ['.app.yml']:
path = file_name + file_ext
delete_file(path)
finally:
os.chdir(cwd)
def delete_env_file(env_name):
cwd = os.getcwd()
file_name = beanstalk_directory + env_name
try:
_traverse_to_project_root()
for file_ext in ['.ebe.yml', '.env.yml']:
path = file_name + file_ext
delete_file(path)
finally:
os.chdir(cwd)
def get_editor():
editor = get_config_setting('global', 'editor')
if not editor:
editor = os.getenv('EDITOR')
if not editor:
platform = sys.platform
windows = platform.startswith('win')
if windows:
editor = None
else:
editor = 'nano'
return editor
def save_app_file(app):
cwd = os.getcwd()
env_name = app['ApplicationName']
# ..yml extension helps editors enable syntax highlighting
file_name = env_name + '.app.yml'
file_name = beanstalk_directory + file_name
try:
_traverse_to_project_root()
file_name = os.path.abspath(file_name)
with codecs.open(file_name, 'w', encoding='utf8') as f:
f.write(safe_dump(app, default_flow_style=False,
line_break=os.linesep))
finally:
os.chdir(cwd)
return file_name
def save_env_file(env):
cwd = os.getcwd()
env_name = env['EnvironmentName']
# ..yml extension helps editors enable syntax highlighting
file_name = env_name + '.env.yml'
file_name = beanstalk_directory + file_name
try:
_traverse_to_project_root()
file_name = os.path.abspath(file_name)
with codecs.open(file_name, 'w', encoding='utf8') as f:
f.write(safe_dump(env, default_flow_style=False,
line_break=os.linesep))
finally:
os.chdir(cwd)
return file_name
def get_environment_from_file(env_name):
cwd = os.getcwd()
file_name = beanstalk_directory + env_name
try:
_traverse_to_project_root()
file_ext = '.env.yml'
path = file_name + file_ext
if os.path.exists(path):
with codecs.open(path, 'r', encoding='utf8') as f:
env = load(f)
except (ScannerError, ParserError):
raise InvalidSyntaxError('The environment file contains '
'invalid syntax.')
finally:
os.chdir(cwd)
return env
def get_application_from_file(app_name):
cwd = os.getcwd()
file_name = beanstalk_directory + app_name
try:
_traverse_to_project_root()
file_ext = '.app.yml'
path = file_name + file_ext
if os.path.exists(path):
with codecs.open(path, 'r', encoding='utf8') as f:
app = load(f)
except (ScannerError, ParserError):
raise InvalidSyntaxError('The application file contains '
'invalid syntax.')
finally:
os.chdir(cwd)
return app
def update_platform_version(version):
if version:
write_config_setting('global', 'platform_version', version)
def update_platform_name(platform_name):
if platform_name:
write_config_setting('global', 'platform_name', platform_name)
def write_keyname(keyname):
write_config_setting('global', 'default_ec2_keyname', keyname)
def get_keyname():
return get_config_setting('global', 'default_ec2_keyname', None)
def write_config_setting(section, key_name, value, dir_path=None, file=local_config_file):
cwd = os.getcwd() # save working directory
if dir_path:
os.chdir(dir_path)
try:
_traverse_to_project_root()
config = _get_yaml_dict(file)
if not config:
config = {}
# Value will be a dict when we are passing in branch config settings
if type(value) is dict:
for key in value.keys():
config.setdefault(section, {}).setdefault(key_name, {})[key] = value[key]
else:
if config.get(section) is None:
config[section] = {}
config.setdefault(section, {})[key_name] = value
with codecs.open(file, 'w', encoding='utf8') as f:
f.write(safe_dump(config, default_flow_style=False,
line_break=os.linesep))
finally:
os.chdir(cwd) # go back to working directory
def get_config_setting(section, key_name, default=_marker):
# get setting from global if it exists
cwd = os.getcwd() # save working directory
try:
_traverse_to_project_root()
config_global = _get_yaml_dict(global_config_file)
config_local = _get_yaml_dict(local_config_file)
# Grab value, local gets priority
try:
value = config_global[section][key_name]
except KeyError:
value = None
try:
if config_local:
value = config_local[section][key_name]
except KeyError:
pass # Revert to global value
if value is None and default != _marker:
return default
except NotInitializedError:
if default == _marker:
raise
else:
return default
finally:
os.chdir(cwd) # move back to working directory
return value
def get_json_dict(fullpath):
"""
Read json file at fullpath and deserialize as dict.
:param fullpath: str: path to the json file
:return: dict
"""
return json.loads(read_from_text_file(fullpath))
def write_json_dict(json_data, fullpath):
def date_handler(obj):
return obj.isoformat() if hasattr(obj, 'isoformat') else obj
data = json.dumps(json_data, sort_keys=True, indent=4,
default=date_handler)
write_to_text_file(data, fullpath)
def _get_yaml_dict(filename):
try:
with codecs.open(filename, 'r', encoding='utf8') as f:
return load(f)
except IOError:
return {}
def file_exists(full_path):
return os.path.isfile(full_path)
def eb_file_exists(location):
cwd = os.getcwd()
try:
_traverse_to_project_root()
path = beanstalk_directory + location
return os.path.isfile(path)
finally:
os.chdir(cwd)
def build_spec_exists():
cwd = os.getcwd()
try:
_traverse_to_project_root()
return os.path.isfile(buildspec_name)
finally:
os.chdir(cwd)
def get_build_configuration():
# Values expected in the eb config section in BuildSpec
service_role_key = 'CodeBuildServiceRole'
image_key = 'Image'
compute_key = 'ComputeType'
timeout_key = 'Timeout'
# get setting from global if it exists
cwd = os.getcwd() # save working directory
try:
_traverse_to_project_root()
build_spec = _get_yaml_dict(buildspec_name)
# Assert that special beanstalk section exists
if build_spec is None or buildspec_config_header not in build_spec.keys():
LOG.debug("Buildspec Keys: {0}".format(build_spec.keys()))
io.log_warning(strings['codebuild.noheader'].replace('{header}', buildspec_config_header))
return None
build_configuration = BuildConfiguration()
beanstalk_build_configs = build_spec[buildspec_config_header]
if beanstalk_build_configs is None:
LOG.debug("No values for EB header in buildspec file")
return build_configuration
LOG.debug("EB Config Keys: {0}".format(beanstalk_build_configs.keys()))
if service_role_key in beanstalk_build_configs.keys():
build_configuration.service_role = beanstalk_build_configs[service_role_key]
if image_key in beanstalk_build_configs.keys():
build_configuration.image = beanstalk_build_configs[image_key]
if compute_key in beanstalk_build_configs.keys():
build_configuration.compute_type = beanstalk_build_configs[compute_key]
if timeout_key in beanstalk_build_configs.keys():
build_configuration.timeout = beanstalk_build_configs[timeout_key]
finally:
os.chdir(cwd) # move back to working directory
return build_configuration
def directory_empty(location):
return not os.listdir(location)
def get_ebignore_list():
EB_IGNORE_FILE_NAME = '.ebignore'
location = get_project_file_full_location(EB_IGNORE_FILE_NAME)
if not os.path.isfile(location):
return None
'''
This library will parse the ignore file, compare it to the current files
and give us a list of files to ignore
'''
# Patch iter_tree to not throw recursion error on non recursive links
from pathspec import pathspec, util
def iter_tree(root):
"""
Walks the specified root path for all files.
*root* (``str``) is the root directory to search for files.
Raises ``RecursionError`` if recursion is detected.
Returns an ``Iterable`` yielding each file path (``str``) relative to
*root*.
.. _`recursion`: http://docs.python.org/2/library/os.html#os.walk
"""
# Keep track of files encountered. Map real path to relative path.
memo = {}
root = os.path.abspath(root)
for parent, _dirs, files in os.walk(root, followlinks=True):
# Get parent path relative to root path.
parent = os.path.relpath(parent, root)
# Check for recursion.
real = os.path.realpath(parent)
if real in memo:
abspath = os.path.abspath(parent)
if real != abspath and real in abspath:
# if real is a parent of current parent
raise util.RecursionError(real_path=real, first_path=memo[real], second_path=parent)
else:
# not recursion, just a sideways link
continue
memo[real] = parent
# Yield files.
for path in files:
if parent != '.':
path = os.path.join(parent, path)
yield path
util.iter_tree = iter_tree
with open(location, 'r') as f:
spec = pathspec.PathSpec.from_lines('gitignore', f)
ignore_list = [f for f in spec.match_tree(get_project_root())]
ignore_list.append(EB_IGNORE_FILE_NAME)
return ignore_list
def make_eb_dir(location):
cwd = os.getcwd()
try:
_traverse_to_project_root()
path = beanstalk_directory + location
if not os.path.isdir(path):
os.makedirs(path)
finally:
os.chdir(cwd)
def write_to_eb_data_file(location, data):
cwd = os.getcwd()
try:
_traverse_to_project_root()
path = beanstalk_directory + location
write_to_data_file(path, data)
finally:
os.chdir(cwd)
def read_from_eb_data_file(location):
cwd = os.getcwd()
try:
_traverse_to_project_root()
path = beanstalk_directory + location
read_from_data_file(path)
finally:
os.chdir(cwd)
def write_to_data_file(location, data):
with codecs.open(location, 'wb', encoding=None) as f:
f.write(data)
def read_from_data_file(location):
with codecs.open(location, 'rb', encoding=None) as f:
return f.read()
def read_from_text_file(location):
with codecs.open(location, 'rt', encoding=None) as f:
return f.read()
def write_to_text_file(data, location):
with codecs.open(location, 'wt', encoding=None) as f:
f.write(data)
def append_to_text_file(location, data):
with codecs.open(location, 'at', encoding=None) as f:
f.write(data)
def readlines_from_text_file(location):
with codecs.open(location, 'rt', encoding=None) as f:
return f.readlines()
def get_project_file_full_location(location):
cwd = os.getcwd()
try:
_traverse_to_project_root()
full_path = os.path.abspath(location)
return full_path
finally:
os.chdir(cwd)
def get_eb_file_full_location(location):
return get_project_file_full_location(beanstalk_directory + location)
def get_home():
return os.path.expanduser('~')
def get_filename_without_extension(file_location):
filename = os.path.basename(file_location)
extension = 'fake'
while extension != '':
# Split multiple extensions
filename, extension = os.path.splitext(filename)
return filename
def env_yaml_exists():
return os.path.isfile(os.path.join(os.getcwd(), env_yaml))
def get_env_name_from_env_yaml():
with open(os.path.join(os.getcwd(), env_yaml), 'r') as f:
data = yaml.load(f)
try:
env_name = data['EnvironmentName']
return env_name
except KeyError:
return None
def get_platform_from_env_yaml():
with open(os.path.join(os.getcwd(), env_yaml), 'r') as f:
data = yaml.load(f)
try:
env_name = data['SolutionStack']
return env_name
except KeyError:
return None
def open_file_for_editing(file_location):
# Added this line for windows whitespace escaping
file_location = '"{0}"'.format(file_location)
editor = get_editor()
if editor:
try:
os.system(editor + ' ' + file_location)
except OSError:
io.log_error(prompts['fileopen.error1'].replace('{editor}',
editor))
else:
try:
os.system(file_location)
except OSError:
io.log_error(prompts['fileopen.error2'])
|
[] |
[] |
[
"EDITOR",
"PATH"
] |
[]
|
["EDITOR", "PATH"]
|
python
| 2 | 0 | |
python/pyspark/pandas/utils.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Commonly used utils in pandas-on-Spark.
"""
import functools
from collections import OrderedDict
from contextlib import contextmanager
import os
from typing import (
Any,
Callable,
Dict,
Iterator,
List,
Optional,
Tuple,
Union,
TYPE_CHECKING,
cast,
no_type_check,
overload,
)
import warnings
from pyspark import sql as spark
from pyspark.sql import functions as F
from pyspark.sql.types import DoubleType
import pandas as pd
from pandas.api.types import is_list_like
# For running doctests and reference resolution in PyCharm.
from pyspark import pandas as ps # noqa: F401
from pyspark.pandas.typedef.typehints import as_spark_type
if TYPE_CHECKING:
# This is required in old Python 3.5 to prevent circular reference.
from pyspark.pandas.base import IndexOpsMixin # noqa: F401 (SPARK-34943)
from pyspark.pandas.frame import DataFrame # noqa: F401 (SPARK-34943)
from pyspark.pandas.internal import InternalFrame # noqa: F401 (SPARK-34943)
from pyspark.pandas.series import Series # noqa: F401 (SPARK-34943)
ERROR_MESSAGE_CANNOT_COMBINE = (
"Cannot combine the series or dataframe because it comes from a different dataframe. "
"In order to allow this operation, enable 'compute.ops_on_diff_frames' option."
)
SPARK_CONF_ARROW_ENABLED = "spark.sql.execution.arrow.pyspark.enabled"
def same_anchor(
this: Union["DataFrame", "IndexOpsMixin", "InternalFrame"],
that: Union["DataFrame", "IndexOpsMixin", "InternalFrame"],
) -> bool:
"""
Check if the anchors of the given DataFrame or Series are the same or not.
"""
from pyspark.pandas.base import IndexOpsMixin
from pyspark.pandas.frame import DataFrame
from pyspark.pandas.internal import InternalFrame
if isinstance(this, InternalFrame):
this_internal = this
else:
assert isinstance(this, (DataFrame, IndexOpsMixin)), type(this)
this_internal = this._internal
if isinstance(that, InternalFrame):
that_internal = that
else:
assert isinstance(that, (DataFrame, IndexOpsMixin)), type(that)
that_internal = that._internal
return (
this_internal.spark_frame is that_internal.spark_frame
and this_internal.index_level == that_internal.index_level
and all(
spark_column_equals(this_scol, that_scol)
for this_scol, that_scol in zip(
this_internal.index_spark_columns, that_internal.index_spark_columns
)
)
)
def combine_frames(
this: "DataFrame",
*args: Union["DataFrame", "Series"],
how: str = "full",
preserve_order_column: bool = False
) -> "DataFrame":
"""
This method combines `this` DataFrame with a different `that` DataFrame or
Series from a different DataFrame.
It returns a DataFrame that has prefix `this_` and `that_` to distinct
the columns names from both DataFrames
It internally performs a join operation which can be expensive in general.
So, if `compute.ops_on_diff_frames` option is False,
this method throws an exception.
"""
from pyspark.pandas.config import get_option
from pyspark.pandas.frame import DataFrame
from pyspark.pandas.internal import (
InternalField,
InternalFrame,
HIDDEN_COLUMNS,
NATURAL_ORDER_COLUMN_NAME,
SPARK_INDEX_NAME_FORMAT,
)
from pyspark.pandas.series import Series
if all(isinstance(arg, Series) for arg in args):
assert all(
same_anchor(arg, args[0]) for arg in args
), "Currently only one different DataFrame (from given Series) is supported"
assert not same_anchor(this, args[0]), "We don't need to combine. All series is in this."
that = args[0]._psdf[list(args)]
elif len(args) == 1 and isinstance(args[0], DataFrame):
assert isinstance(args[0], DataFrame)
assert not same_anchor(
this, args[0]
), "We don't need to combine. `this` and `that` are same."
that = args[0]
else:
raise AssertionError("args should be single DataFrame or " "single/multiple Series")
if get_option("compute.ops_on_diff_frames"):
def resolve(internal: InternalFrame, side: str) -> InternalFrame:
rename = lambda col: "__{}_{}".format(side, col)
internal = internal.resolved_copy
sdf = internal.spark_frame
sdf = internal.spark_frame.select(
*[
scol_for(sdf, col).alias(rename(col))
for col in sdf.columns
if col not in HIDDEN_COLUMNS
],
*HIDDEN_COLUMNS
)
return internal.copy(
spark_frame=sdf,
index_spark_columns=[
scol_for(sdf, rename(col)) for col in internal.index_spark_column_names
],
index_fields=[
field.copy(name=rename(field.name)) for field in internal.index_fields
],
data_spark_columns=[
scol_for(sdf, rename(col)) for col in internal.data_spark_column_names
],
data_fields=[field.copy(name=rename(field.name)) for field in internal.data_fields],
)
this_internal = resolve(this._internal, "this")
that_internal = resolve(that._internal, "that")
this_index_map = list(
zip(
this_internal.index_spark_column_names,
this_internal.index_names,
this_internal.index_fields,
)
)
that_index_map = list(
zip(
that_internal.index_spark_column_names,
that_internal.index_names,
that_internal.index_fields,
)
)
assert len(this_index_map) == len(that_index_map)
join_scols = []
merged_index_scols = []
# Note that the order of each element in index_map is guaranteed according to the index
# level.
this_and_that_index_map = list(zip(this_index_map, that_index_map))
this_sdf = this_internal.spark_frame.alias("this")
that_sdf = that_internal.spark_frame.alias("that")
# If the same named index is found, that's used.
index_column_names = []
index_use_extension_dtypes = []
for (
i,
((this_column, this_name, this_field), (that_column, that_name, that_field)),
) in enumerate(this_and_that_index_map):
if this_name == that_name:
# We should merge the Spark columns into one
# to mimic pandas' behavior.
this_scol = scol_for(this_sdf, this_column)
that_scol = scol_for(that_sdf, that_column)
join_scol = this_scol == that_scol
join_scols.append(join_scol)
column_name = SPARK_INDEX_NAME_FORMAT(i)
index_column_names.append(column_name)
index_use_extension_dtypes.append(
any(field.is_extension_dtype for field in [this_field, that_field])
)
merged_index_scols.append(
F.when(this_scol.isNotNull(), this_scol).otherwise(that_scol).alias(column_name)
)
else:
raise ValueError("Index names must be exactly matched currently.")
assert len(join_scols) > 0, "cannot join with no overlapping index names"
joined_df = this_sdf.join(that_sdf, on=join_scols, how=how)
if preserve_order_column:
order_column = [scol_for(this_sdf, NATURAL_ORDER_COLUMN_NAME)]
else:
order_column = []
joined_df = joined_df.select(
*merged_index_scols,
*(
scol_for(this_sdf, this_internal.spark_column_name_for(label))
for label in this_internal.column_labels
),
*(
scol_for(that_sdf, that_internal.spark_column_name_for(label))
for label in that_internal.column_labels
),
*order_column
)
index_spark_columns = [scol_for(joined_df, col) for col in index_column_names]
index_columns = set(index_column_names)
new_data_columns = [
col
for col in joined_df.columns
if col not in index_columns and col != NATURAL_ORDER_COLUMN_NAME
]
schema = joined_df.select(*index_spark_columns, *new_data_columns).schema
index_fields = [
InternalField.from_struct_field(struct_field, use_extension_dtypes=use_extension_dtypes)
for struct_field, use_extension_dtypes in zip(
schema.fields[: len(index_spark_columns)], index_use_extension_dtypes
)
]
data_fields = [
InternalField.from_struct_field(
struct_field, use_extension_dtypes=field.is_extension_dtype
)
for struct_field, field in zip(
schema.fields[len(index_spark_columns) :],
this_internal.data_fields + that_internal.data_fields,
)
]
level = max(this_internal.column_labels_level, that_internal.column_labels_level)
def fill_label(label: Optional[Tuple]) -> List:
if label is None:
return ([""] * (level - 1)) + [None]
else:
return ([""] * (level - len(label))) + list(label)
column_labels = [
tuple(["this"] + fill_label(label)) for label in this_internal.column_labels
] + [tuple(["that"] + fill_label(label)) for label in that_internal.column_labels]
column_label_names = (
cast(List[Optional[Tuple]], [None]) * (1 + level - this_internal.column_labels_level)
) + this_internal.column_label_names
return DataFrame(
InternalFrame(
spark_frame=joined_df,
index_spark_columns=index_spark_columns,
index_names=this_internal.index_names,
index_fields=index_fields,
column_labels=column_labels,
data_spark_columns=[scol_for(joined_df, col) for col in new_data_columns],
data_fields=data_fields,
column_label_names=column_label_names,
)
)
else:
raise ValueError(ERROR_MESSAGE_CANNOT_COMBINE)
def align_diff_frames(
resolve_func: Callable[["DataFrame", List[Tuple], List[Tuple]], Tuple["Series", Tuple]],
this: "DataFrame",
that: "DataFrame",
fillna: bool = True,
how: str = "full",
preserve_order_column: bool = False,
) -> "DataFrame":
"""
This method aligns two different DataFrames with a given `func`. Columns are resolved and
handled within the given `func`.
To use this, `compute.ops_on_diff_frames` should be True, for now.
:param resolve_func: Takes aligned (joined) DataFrame, the column of the current DataFrame, and
the column of another DataFrame. It returns an iterable that produces Series.
>>> from pyspark.pandas.config import set_option, reset_option
>>>
>>> set_option("compute.ops_on_diff_frames", True)
>>>
>>> psdf1 = ps.DataFrame({'a': [9, 8, 7, 6, 5, 4, 3, 2, 1]})
>>> psdf2 = ps.DataFrame({'a': [9, 8, 7, 6, 5, 4, 3, 2, 1]})
>>>
>>> def func(psdf, this_column_labels, that_column_labels):
... psdf # conceptually this is A + B.
...
... # Within this function, Series from A or B can be performed against `psdf`.
... this_label = this_column_labels[0] # this is ('a',) from psdf1.
... that_label = that_column_labels[0] # this is ('a',) from psdf2.
... new_series = (psdf[this_label] - psdf[that_label]).rename(str(this_label))
...
... # This new series will be placed in new DataFrame.
... yield (new_series, this_label)
>>>
>>>
>>> align_diff_frames(func, psdf1, psdf2).sort_index()
a
0 0
1 0
2 0
3 0
4 0
5 0
6 0
7 0
8 0
>>> reset_option("compute.ops_on_diff_frames")
:param this: a DataFrame to align
:param that: another DataFrame to align
:param fillna: If True, it fills missing values in non-common columns in both `this` and `that`.
Otherwise, it returns as are.
:param how: join way. In addition, it affects how `resolve_func` resolves the column conflict.
- full: `resolve_func` should resolve only common columns from 'this' and 'that' DataFrames.
For instance, if 'this' has columns A, B, C and that has B, C, D, `this_columns` and
'that_columns' in this function are B, C and B, C.
- left: `resolve_func` should resolve columns including that columns.
For instance, if 'this' has columns A, B, C and that has B, C, D, `this_columns` is
B, C but `that_columns` are B, C, D.
- inner: Same as 'full' mode; however, internally performs inner join instead.
:return: Aligned DataFrame
"""
from pyspark.pandas.frame import DataFrame
assert how == "full" or how == "left" or how == "inner"
this_column_labels = this._internal.column_labels
that_column_labels = that._internal.column_labels
common_column_labels = set(this_column_labels).intersection(that_column_labels)
# 1. Perform the join given two dataframes.
combined = combine_frames(this, that, how=how, preserve_order_column=preserve_order_column)
# 2. Apply the given function to transform the columns in a batch and keep the new columns.
combined_column_labels = combined._internal.column_labels
that_columns_to_apply = []
this_columns_to_apply = []
additional_that_columns = []
columns_to_keep = []
column_labels_to_keep = []
for combined_label in combined_column_labels:
for common_label in common_column_labels:
if combined_label == tuple(["this", *common_label]):
this_columns_to_apply.append(combined_label)
break
elif combined_label == tuple(["that", *common_label]):
that_columns_to_apply.append(combined_label)
break
else:
if how == "left" and combined_label in [
tuple(["that", *label]) for label in that_column_labels
]:
# In this case, we will drop `that_columns` in `columns_to_keep` but passes
# it later to `func`. `func` should resolve it.
# Note that adding this into a separate list (`additional_that_columns`)
# is intentional so that `this_columns` and `that_columns` can be paired.
additional_that_columns.append(combined_label)
elif fillna:
columns_to_keep.append(F.lit(None).cast(DoubleType()).alias(str(combined_label)))
column_labels_to_keep.append(combined_label)
else:
columns_to_keep.append(combined._psser_for(combined_label))
column_labels_to_keep.append(combined_label)
that_columns_to_apply += additional_that_columns
# Should extract columns to apply and do it in a batch in case
# it adds new columns for example.
if len(this_columns_to_apply) > 0 or len(that_columns_to_apply) > 0:
psser_set, column_labels_set = zip(
*resolve_func(combined, this_columns_to_apply, that_columns_to_apply)
)
columns_applied = list(psser_set)
column_labels_applied = list(column_labels_set)
else:
columns_applied = []
column_labels_applied = []
applied = DataFrame(
combined._internal.with_new_columns(
columns_applied + columns_to_keep,
column_labels=column_labels_applied + column_labels_to_keep,
)
) # type: DataFrame
# 3. Restore the names back and deduplicate columns.
this_labels = OrderedDict()
# Add columns in an order of its original frame.
for this_label in this_column_labels:
for new_label in applied._internal.column_labels:
if new_label[1:] not in this_labels and this_label == new_label[1:]:
this_labels[new_label[1:]] = new_label
# After that, we will add the rest columns.
other_labels = OrderedDict()
for new_label in applied._internal.column_labels:
if new_label[1:] not in this_labels:
other_labels[new_label[1:]] = new_label
psdf = applied[list(this_labels.values()) + list(other_labels.values())]
psdf.columns = psdf.columns.droplevel()
return psdf
def is_testing() -> bool:
"""Indicates whether Spark is currently running tests."""
return "SPARK_TESTING" in os.environ
def default_session(conf: Optional[Dict[str, Any]] = None) -> spark.SparkSession:
if conf is None:
conf = dict()
builder = spark.SparkSession.builder.appName("pandas-on-Spark")
for key, value in conf.items():
builder = builder.config(key, value)
# Currently, pandas-on-Spark is dependent on such join due to 'compute.ops_on_diff_frames'
# configuration. This is needed with Spark 3.0+.
builder.config("spark.sql.analyzer.failAmbiguousSelfJoin", False)
if is_testing():
builder.config("spark.executor.allowSparkContext", False)
return builder.getOrCreate()
@contextmanager
def sql_conf(
pairs: Dict[str, Any], *, spark: Optional[spark.SparkSession] = None
) -> Iterator[None]:
"""
A convenient context manager to set `value` to the Spark SQL configuration `key` and
then restores it back when it exits.
"""
assert isinstance(pairs, dict), "pairs should be a dictionary."
if spark is None:
spark = default_session()
keys = pairs.keys()
new_values = pairs.values()
old_values = [spark.conf.get(key, None) for key in keys]
for key, new_value in zip(keys, new_values):
spark.conf.set(key, new_value)
try:
yield
finally:
for key, old_value in zip(keys, old_values):
if old_value is None:
spark.conf.unset(key)
else:
spark.conf.set(key, old_value)
def validate_arguments_and_invoke_function(
pobj: Union[pd.DataFrame, pd.Series],
pandas_on_spark_func: Callable,
pandas_func: Callable,
input_args: Dict,
) -> Any:
"""
Invokes a pandas function.
This is created because different versions of pandas support different parameters, and as a
result when we code against the latest version, our users might get a confusing
"got an unexpected keyword argument" error if they are using an older version of pandas.
This function validates all the arguments, removes the ones that are not supported if they
are simply the default value (i.e. most likely the user didn't explicitly specify it). It
throws a TypeError if the user explicitly specify an argument that is not supported by the
pandas version available.
For example usage, look at DataFrame.to_html().
:param pobj: the pandas DataFrame or Series to operate on
:param pandas_on_spark_func: pandas-on-Spark function, used to get default parameter values
:param pandas_func: pandas function, used to check whether pandas supports all the arguments
:param input_args: arguments to pass to the pandas function, often created by using locals().
Make sure locals() call is at the top of the function so it captures only
input parameters, rather than local variables.
:return: whatever pandas_func returns
"""
import inspect
# Makes a copy since whatever passed in is likely created by locals(), and we can't delete
# 'self' key from that.
args = input_args.copy()
del args["self"]
if "kwargs" in args:
# explode kwargs
kwargs = args["kwargs"]
del args["kwargs"]
args = {**args, **kwargs}
pandas_on_spark_params = inspect.signature(pandas_on_spark_func).parameters
pandas_params = inspect.signature(pandas_func).parameters
for param in pandas_on_spark_params.values():
if param.name not in pandas_params:
if args[param.name] == param.default:
del args[param.name]
else:
raise TypeError(
(
"The pandas version [%s] available does not support parameter '%s' "
+ "for function '%s'."
)
% (pd.__version__, param.name, pandas_func.__name__)
)
args["self"] = pobj
return pandas_func(**args)
@no_type_check
def lazy_property(fn: Callable[[Any], Any]) -> property:
"""
Decorator that makes a property lazy-evaluated.
Copied from https://stevenloria.com/lazy-properties/
"""
attr_name = "_lazy_" + fn.__name__
@property
@functools.wraps(fn)
def wrapped_lazy_property(self):
if not hasattr(self, attr_name):
setattr(self, attr_name, fn(self))
return getattr(self, attr_name)
def deleter(self):
if hasattr(self, attr_name):
delattr(self, attr_name)
return wrapped_lazy_property.deleter(deleter)
def scol_for(sdf: spark.DataFrame, column_name: str) -> spark.Column:
"""Return Spark Column for the given column name."""
return sdf["`{}`".format(column_name)]
def column_labels_level(column_labels: List[Tuple]) -> int:
"""Return the level of the column index."""
if len(column_labels) == 0:
return 1
else:
levels = set(1 if label is None else len(label) for label in column_labels)
assert len(levels) == 1, levels
return list(levels)[0]
def name_like_string(name: Optional[Union[Any, Tuple]]) -> str:
"""
Return the name-like strings from str or tuple of str
Examples
--------
>>> name = 'abc'
>>> name_like_string(name)
'abc'
>>> name = ('abc',)
>>> name_like_string(name)
'abc'
>>> name = ('a', 'b', 'c')
>>> name_like_string(name)
'(a, b, c)'
"""
if name is None:
name = ("__none__",)
elif is_list_like(name):
name = tuple([str(n) for n in name])
else:
name = (str(name),)
return ("(%s)" % ", ".join(name)) if len(name) > 1 else name[0]
def is_name_like_tuple(value: Any, allow_none: bool = True, check_type: bool = False) -> bool:
"""
Check the given tuple is be able to be used as a name.
Examples
--------
>>> is_name_like_tuple(('abc',))
True
>>> is_name_like_tuple((1,))
True
>>> is_name_like_tuple(('abc', 1, None))
True
>>> is_name_like_tuple(('abc', 1, None), check_type=True)
True
>>> is_name_like_tuple((1.0j,))
True
>>> is_name_like_tuple(tuple())
False
>>> is_name_like_tuple((list('abc'),))
False
>>> is_name_like_tuple(('abc', 1, None), allow_none=False)
False
>>> is_name_like_tuple((1.0j,), check_type=True)
False
"""
if value is None:
return allow_none
elif not isinstance(value, tuple):
return False
elif len(value) == 0:
return False
elif not allow_none and any(v is None for v in value):
return False
elif any(is_list_like(v) or isinstance(v, slice) for v in value):
return False
elif check_type:
return all(
v is None or as_spark_type(type(v), raise_error=False) is not None for v in value
)
else:
return True
def is_name_like_value(
value: Any, allow_none: bool = True, allow_tuple: bool = True, check_type: bool = False
) -> bool:
"""
Check the given value is like a name.
Examples
--------
>>> is_name_like_value('abc')
True
>>> is_name_like_value(1)
True
>>> is_name_like_value(None)
True
>>> is_name_like_value(('abc',))
True
>>> is_name_like_value(1.0j)
True
>>> is_name_like_value(list('abc'))
False
>>> is_name_like_value(None, allow_none=False)
False
>>> is_name_like_value(('abc',), allow_tuple=False)
False
>>> is_name_like_value(1.0j, check_type=True)
False
"""
if value is None:
return allow_none
elif isinstance(value, tuple):
return allow_tuple and is_name_like_tuple(
value, allow_none=allow_none, check_type=check_type
)
elif is_list_like(value) or isinstance(value, slice):
return False
elif check_type:
return as_spark_type(type(value), raise_error=False) is not None
else:
return True
def validate_axis(axis: Optional[Union[int, str]] = 0, none_axis: int = 0) -> int:
"""Check the given axis is valid."""
# convert to numeric axis
axis = cast(
Dict[Optional[Union[int, str]], int], {None: none_axis, "index": 0, "columns": 1}
).get(axis, axis)
if axis in (none_axis, 0, 1):
return cast(int, axis)
else:
raise ValueError("No axis named {0}".format(axis))
def validate_bool_kwarg(value: Any, arg_name: str) -> Optional[bool]:
"""Ensures that argument passed in arg_name is of type bool."""
if not (isinstance(value, bool) or value is None):
raise TypeError(
'For argument "{}" expected type bool, received '
"type {}.".format(arg_name, type(value).__name__)
)
return value
def validate_how(how: str) -> str:
"""Check the given how for join is valid."""
if how == "full":
warnings.warn(
"Warning: While pandas-on-Spark will accept 'full', you should use 'outer' "
+ "instead to be compatible with the pandas merge API",
UserWarning,
)
if how == "outer":
# 'outer' in pandas equals 'full' in Spark
how = "full"
if how not in ("inner", "left", "right", "full"):
raise ValueError(
"The 'how' parameter has to be amongst the following values: ",
"['inner', 'left', 'right', 'outer']",
)
return how
@overload
def verify_temp_column_name(df: spark.DataFrame, column_name_or_label: str) -> str:
...
@overload
def verify_temp_column_name(
df: "DataFrame", column_name_or_label: Union[Any, Tuple]
) -> Union[Any, Tuple]:
...
def verify_temp_column_name(
df: Union["DataFrame", spark.DataFrame], column_name_or_label: Union[Any, Tuple]
) -> Union[Any, Tuple]:
"""
Verify that the given column name does not exist in the given pandas-on-Spark or
Spark DataFrame.
The temporary column names should start and end with `__`. In addition, `column_name_or_label`
expects a single string, or column labels when `df` is a pandas-on-Spark DataFrame.
>>> psdf = ps.DataFrame({("x", "a"): ['a', 'b', 'c']})
>>> psdf["__dummy__"] = 0
>>> psdf[("", "__dummy__")] = 1
>>> psdf # doctest: +NORMALIZE_WHITESPACE
x __dummy__
a __dummy__
0 a 0 1
1 b 0 1
2 c 0 1
>>> verify_temp_column_name(psdf, '__tmp__')
('__tmp__', '')
>>> verify_temp_column_name(psdf, ('', '__tmp__'))
('', '__tmp__')
>>> verify_temp_column_name(psdf, '__dummy__')
Traceback (most recent call last):
...
AssertionError: ... `(__dummy__, )` ...
>>> verify_temp_column_name(psdf, ('', '__dummy__'))
Traceback (most recent call last):
...
AssertionError: ... `(, __dummy__)` ...
>>> verify_temp_column_name(psdf, 'dummy')
Traceback (most recent call last):
...
AssertionError: ... should be empty or start and end with `__`: ('dummy', '')
>>> verify_temp_column_name(psdf, ('', 'dummy'))
Traceback (most recent call last):
...
AssertionError: ... should be empty or start and end with `__`: ('', 'dummy')
>>> internal = psdf._internal.resolved_copy
>>> sdf = internal.spark_frame
>>> sdf.select(internal.data_spark_columns).show() # doctest: +NORMALIZE_WHITESPACE
+------+---------+-------------+
|(x, a)|__dummy__|(, __dummy__)|
+------+---------+-------------+
| a| 0| 1|
| b| 0| 1|
| c| 0| 1|
+------+---------+-------------+
>>> verify_temp_column_name(sdf, '__tmp__')
'__tmp__'
>>> verify_temp_column_name(sdf, '__dummy__')
Traceback (most recent call last):
...
AssertionError: ... `__dummy__` ... '(x, a)', '__dummy__', '(, __dummy__)', ...
>>> verify_temp_column_name(sdf, ('', '__dummy__'))
Traceback (most recent call last):
...
AssertionError: <class 'tuple'>
>>> verify_temp_column_name(sdf, 'dummy')
Traceback (most recent call last):
...
AssertionError: ... should start and end with `__`: dummy
"""
from pyspark.pandas.frame import DataFrame
if isinstance(df, DataFrame):
if isinstance(column_name_or_label, str):
column_name = column_name_or_label
level = df._internal.column_labels_level
column_name_or_label = tuple([column_name_or_label] + ([""] * (level - 1)))
else:
column_name = name_like_string(column_name_or_label)
assert any(len(label) > 0 for label in column_name_or_label) and all(
label == "" or (label.startswith("__") and label.endswith("__"))
for label in column_name_or_label
), "The temporary column name should be empty or start and end with `__`: {}".format(
column_name_or_label
)
assert all(
column_name_or_label != label for label in df._internal.column_labels
), "The given column name `{}` already exists in the pandas-on-Spark DataFrame: {}".format(
name_like_string(column_name_or_label), df.columns
)
df = df._internal.resolved_copy.spark_frame
else:
assert isinstance(column_name_or_label, str), type(column_name_or_label)
assert column_name_or_label.startswith("__") and column_name_or_label.endswith(
"__"
), "The temporary column name should start and end with `__`: {}".format(
column_name_or_label
)
column_name = column_name_or_label
assert isinstance(df, spark.DataFrame), type(df)
assert (
column_name not in df.columns
), "The given column name `{}` already exists in the Spark DataFrame: {}".format(
column_name, df.columns
)
return column_name_or_label
def spark_column_equals(left: spark.Column, right: spark.Column) -> bool:
"""
Check both `left` and `right` have the same expressions.
>>> spark_column_equals(F.lit(0), F.lit(0))
True
>>> spark_column_equals(F.lit(0) + 1, F.lit(0) + 1)
True
>>> spark_column_equals(F.lit(0) + 1, F.lit(0) + 2)
False
>>> sdf1 = ps.DataFrame({"x": ['a', 'b', 'c']}).to_spark()
>>> spark_column_equals(sdf1["x"] + 1, sdf1["x"] + 1)
True
>>> sdf2 = ps.DataFrame({"x": ['a', 'b', 'c']}).to_spark()
>>> spark_column_equals(sdf1["x"] + 1, sdf2["x"] + 1)
False
"""
return left._jc.equals(right._jc) # type: ignore
def compare_null_first(
left: spark.Column,
right: spark.Column,
comp: Callable[[spark.Column, spark.Column], spark.Column],
) -> spark.Column:
return (left.isNotNull() & right.isNotNull() & comp(left, right)) | (
left.isNull() & right.isNotNull()
)
def compare_null_last(
left: spark.Column,
right: spark.Column,
comp: Callable[[spark.Column, spark.Column], spark.Column],
) -> spark.Column:
return (left.isNotNull() & right.isNotNull() & comp(left, right)) | (
left.isNotNull() & right.isNull()
)
def compare_disallow_null(
left: spark.Column,
right: spark.Column,
comp: Callable[[spark.Column, spark.Column], spark.Column],
) -> spark.Column:
return left.isNotNull() & right.isNotNull() & comp(left, right)
def compare_allow_null(
left: spark.Column,
right: spark.Column,
comp: Callable[[spark.Column, spark.Column], spark.Column],
) -> spark.Column:
return left.isNull() | right.isNull() | comp(left, right)
def _test() -> None:
import os
import doctest
import sys
from pyspark.sql import SparkSession
import pyspark.pandas.utils
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.pandas.utils.__dict__.copy()
globs["ps"] = pyspark.pandas
spark = (
SparkSession.builder.master("local[4]").appName("pyspark.pandas.utils tests").getOrCreate()
)
(failure_count, test_count) = doctest.testmod(
pyspark.pandas.utils,
globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE,
)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
|
[] |
[] |
[
"SPARK_HOME"
] |
[]
|
["SPARK_HOME"]
|
python
| 1 | 0 | |
filebeat/scripts/mage/generate/fileset.go
|
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package generate
import (
"fmt"
"os"
devtools "github.com/elastic/beats/v7/dev-tools/mage"
genfileset "github.com/elastic/beats/v7/filebeat/generator/fileset"
)
// Fileset creates a new fileset for an existing Filebeat module.
// Use MODULE=module to specify the name of the existing module
// Use FILESET=fileset to specify the name of the new fileset
func Fileset() error {
targetModule := os.Getenv("MODULE")
targetFileset := os.Getenv("FILESET")
if targetModule == "" || targetFileset == "" {
return fmt.Errorf("you must specify the module and fileset: MODULE=module FILESET=fileset createFileset")
}
ossDir := devtools.OSSBeatDir()
xPackDir := devtools.XPackBeatDir()
switch devtools.CWD() {
case ossDir:
return genfileset.Generate(targetModule, targetFileset, ossDir, ossDir)
case xPackDir:
return genfileset.Generate(targetModule, targetFileset, xPackDir, ossDir)
default:
return fmt.Errorf("you must be in a filebeat directory")
}
}
|
[
"\"MODULE\"",
"\"FILESET\""
] |
[] |
[
"MODULE",
"FILESET"
] |
[]
|
["MODULE", "FILESET"]
|
go
| 2 | 0 | |
uncertainty/plots/plot_prec_cov_twoparams.py
|
import os, sys
import argparse
import types
import numpy as np
import pickle
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import tensorflow as tf
import data
import model
from util import *
from learning import LearnerCls, LearnerDACls, LearnerClsRT, LearnerConfPred
from learning import TempScalingCls as CalibratorCls
##TODO: clean-up tf options
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
#gpus = tf.config.experimental.list_physical_devices('GPU')
#tf.config.experimental.set_memory_growth(gpus[0], True)
def plot_prec_cov(T, prec, cov, fn, fontsize=15):
plt.figure(1)
plt.clf()
plt.plot(cov, prec, 'rs-')
plt.xlabel('coverage (%)', fontsize=fontsize)
plt.ylabel('precision (%)', fontsize=fontsize)
plt.grid('on')
plt.savefig(fn+'.png', bbox_inches='tight')
def main(args):
data_fn = 'plots/prec_cov_list_twoparams.pk'
fig_fn = 'plots/prec_cov_twoparams'
if os.path.exists(data_fn):
pc_data = pickle.load(open(data_fn, 'rb'))
plot_prec_cov(pc_data['T_list'], pc_data['prec_list'], pc_data['cov_list'], fig_fn)
return
## init a snapshot path
os.makedirs(args.train.save_root, exist_ok=True)
## init logger
sys.stdout = Logger(os.path.join(args.train.save_root, 'out'))
## print args
print_args(args)
## init gpus
if not args.cpu:
print("##GPUs Available: ", len(tf.config.experimental.list_physical_devices('GPU')))
print()
## init datasets
print("## init datasets")
ds_src = data.MultiSourceDataset(
args.data.src,
args.aug_params,
batch_size=args.data.batch_size,
val_shuffle=True,
val_aug=True,
domain_id=1,
color=False if args.data.img_size[2]==1 else True,
size=args.data.img_size[0],
sample_ratio=args.data.sample_ratio[0])
assert(len(args.aug_params) == 1) ##TODO
ds_tar = getattr(data, args.data.tar)(
root=os.path.join('data', args.data.tar.lower()),
batch_size=args.data.batch_size,
aug_list=args.aug_params[0],
val_shuffle=True,
val_aug=True,
domain_id=0,
color=False if args.data.img_size[2]==1 else True,
size=args.data.img_size[0],
sample_ratio=args.data.sample_ratio[1])
ds_dom = data.DomainDataset(
data.MultiSourceDataset(
args.data.src,
args.aug_params,
batch_size=args.data.batch_size,
val_shuffle=True,
val_aug=True,
test_aug=True, #diff
domain_id=1,
color=False if args.data.img_size[2]==1 else True,
size=args.data.img_size[0],
sample_ratio=args.data.sample_ratio[0]),
getattr(data, args.data.tar)(
root=os.path.join('data', args.data.tar.lower()),
batch_size=args.data.batch_size,
aug_list=args.aug_params[0],
val_shuffle=True,
val_aug=True,
test_aug=True, #diff
domain_id=0,
color=False if args.data.img_size[2]==1 else True,
size=args.data.img_size[0],
sample_ratio=args.data.sample_ratio[1]))
print()
####
## reliable teacher learning
####
mdl_st_base = getattr(model, args.model.base)(num_class=args.model.n_labels, input_shape=args.model.img_size)
#mdl_st_base = model.TempCls(mdl_st_base)
mdl_st = model.Student(args.model, mdl_st_base, ds_src, ds_tar, ideal=args.ideal)
mdl_tc_base = getattr(model, args.model.base)(num_class=args.model.n_labels, input_shape=args.model.img_size)
#mdl_tc_base = model.TempCls(mdl_tc_base)
mdl_tc = model.Teacher(args.model, mdl_tc_base, ds_src, ds_tar, ideal=args.ideal)
## rename
model_t = mdl_tc
model_s = mdl_st
model_c = model_s.model_base
params = args.train
params_base = args.train_base
params_advtr = args.train_advtr
params_iw = args.train_iw
params_iw_cal = args.cal_iw
params_conf = args.est_conf
i_epoch = 1
## init a model
if params.init == 'sourceonly':
##TODO: assume classification
print("## init the student model with sourceonly training")
model.set_trainable(model_c, True)
## init a learner
learner = LearnerCls(params_base, model_c, model_name_postfix='_sourceonlyinit')
## train the model
learner.train(ds_src.train, ds_src.val, ds_src.test)
## test the model
learner.test(ds_src.test, ld_name='src', verbose=True)
print()
elif params.init == 'advtr':
##TODO: assume classification
print("## init a base model with adversarial training")
model.set_trainable(model_c, True)
## init a adv model
mdl_adv = getattr(model, params_advtr.model_advtr)(n_in=model_c.dim_feat)
## init a learner
learner = LearnerDACls(params_advtr, model.DAN(model_c, mdl_adv), model_name_postfix='_advtrinit')
## train the model
learner.train([ds_src.train, ds_dom.train], None, ds_tar.test)
## test the model
learner.test(ds_tar.test, ld_name='tar', verbose=True)
print()
else:
raise NotImplementedError
## init iw
if model_t.train.model_conf.model_iw is not None:
print("## learn IW")
model_sd = model_t.train.model_conf.model_iw.model_sd.model
model_sd.train()
## init a learner
learner_sd = LearnerCls(params_iw, model_sd, model_name_postfix='_iw_epoch_%d'%(i_epoch))
## train the model
learner_sd.train(ds_dom.train, ds_dom.val, ds_dom.test)
## test the model
learner_sd.test(ds_dom.test, ld_name='domain', verbose=True)
print()
## init a calibraton model
model_sd_cal = model_t.train.model_conf.model_iw.model_sd
model_sd_cal.train()
## init a calibrator
calibrator_iw = CalibratorCls(params_iw_cal, model_sd_cal, model_name_postfix='_iw_cal_epoch_%d'%(i_epoch))
## calibrate the model
calibrator_iw.train(ds_dom.val, ds_dom.val, ds_dom.test)
## test the model
calibrator_iw.test(ds_dom.test, ld_name='domain', verbose=True)
print()
## 2. learn confidence predictor
model_base = model_t.train.model_base
#model_conf = model_t.train.model_conf
model_iw = model_t.train.model_conf.model_iw
#model_iw_cond = model.CondIW(model_iw, model_conf, ds_src.train, ds_tar.train)
model_conf = model.TwoParamsConfPred(model_base, model_iw)
## init a learner
learner = LearnerConfPred(params_conf, model_conf, model_base, None, model_name_postfix='_confpred_epoch_%d'%(i_epoch))
# ## train the model
# learner.train(ds_src.val, ds_src.val, ds_tar.test)
# ## test the model
# learner.test(ds_tar.test, ld_name='tar', verbose=True)
# learner.test(ds_tar.train, ld_name='tar (train)', verbose=True)
# print()
else:
model_base = model_t.train.model_base
model_conf = model_t.train.model_conf
## init a learner
learner = LearnerConfPred(params_conf, model_conf, model_base, None, model_name_postfix='_confpred_epoch_%d'%(i_epoch))
## train the model
model_conf.T = tf.Variable(1.0 - params_conf.eps) ##TODO
print("T = %f"%(model_conf.T.numpy()))
## test the model
learner.test(ds_tar.test, ld_name='tar', verbose=True)
learner.test(ds_tar.train, ld_name='tar (train)', verbose=True)
## compute precision and coverage
T_list, prec_list, cov_list = [], [], []
rng = [0.99, 0.98, 0.97, 0.96, 0.95, 0.90, 0.8, 0.7, 0.6, 0.5]
for T_bnd in rng:
for T_iw in rng:
model_conf.T_bnd = T_bnd
model_conf.T_iw = T_iw
prec, n_conf, n = learner.test(ds_tar.train, ld_name='tar (train)', verbose=True)
T_list.append((T_bnd, T_iw))
prec_list.append(prec.numpy())
cov_list.append(float(n_conf)/float(n))
print(T_list)
print(prec_list)
print(cov_list)
print()
T_list = np.array(T_list)
prec_list = np.array(prec_list)
cov_list = np.array(cov_list)
pickle.dump({'T_list': T_list, 'prec_list': prec_list, 'cov_list': cov_list}, open(data_fn, 'wb'))
def parse_args():
## inint a parser
parser = argparse.ArgumentParser(description='digit dataset training')
## meta args
parser.add_argument('--exp_name', required=True, type=str, help='experiment name')
parser.add_argument('--snapshot_root', default='snapshots', type=str, help='snapshot root name')
parser.add_argument('--cpu', action='store_true', help='use CPU')
parser.add_argument('--ideal', action='store_true', help='enable cheatkey')
## dataset args
parser.add_argument('--data.batch_size', default=100, type=int, help='batch size')
parser.add_argument('--data.n_labels', default=10, type=int, help='the number of labels')
parser.add_argument('--data.src', type=str, nargs='*', default=['MNIST'], help='list of sources')
parser.add_argument('--data.tar', type=str, default='USPS', help='target')
parser.add_argument('--data.aug', type=str, nargs='*', default=[''], help='list of data augmentation')
parser.add_argument('--data.img_size', type=int, nargs=3, default=(32, 32, 3), help='image size')
parser.add_argument('--data.sample_ratio', type=float, nargs=2, default=[1.0, 1.0])
## model args
parser.add_argument('--model.base', default='ResNet18', type=str, help='model name')
parser.add_argument('--model.iw', default='BigFNN', type=str, help='model name')
parser.add_argument('--model.conf', default='ConfPred', type=str, help='model name')
## RT train args
parser.add_argument('--train.find_best', action='store_true', help='find the best model')
parser.add_argument('--train.load_final', action='store_true', help='load the final model')
parser.add_argument('--train.n_epochs', type=int, default=5, help='the number of training iterations')
parser.add_argument('--train.init', type=str, default='advtr', help='model initialization approach')
parser.add_argument('--train.val_period', default=1, type=int, help='validation period in epochs')
## base model train args
parser.add_argument('--train_base.find_best', action='store_true', help='find the best model')
parser.add_argument('--train_base.load_final', action='store_true', help='load the final model')
parser.add_argument('--train_base.optim', default='SGD', type=str, help='optimizer')
parser.add_argument('--train_base.lr', default=0.01, type=float, help='learning rate')
parser.add_argument('--train_base.lr_step_size', default=20, type=float, help='stepsize for step learning rate scheduler')
parser.add_argument('--train_base.lr_step_decay_rate', default=0.5, type=float, help='decay rate for step learning rate scheduler')
parser.add_argument('--train_base.weight_decay', type=float, default=0.0, help='L2 weight decay')
parser.add_argument('--train_base.momentum', default=0.9, type=float, help='momentum')
parser.add_argument('--train_base.n_epochs', default=100, type=int, help='the number of epochs')
parser.add_argument('--train_base.val_period', default=1, type=int, help='validation period in epochs')
## iw train args
parser.add_argument('--train_iw.find_best', action='store_true', help='find the best model')
parser.add_argument('--train_iw.load_final', action='store_true', help='load the final model')
parser.add_argument('--train_iw.optim', default='SGD', type=str, help='optimizer')
parser.add_argument('--train_iw.lr', default=0.01, type=float, help='learning rate')
parser.add_argument('--train_iw.lr_step_size', default=20, type=float, help='stepsize for step learning rate scheduler')
parser.add_argument('--train_iw.lr_step_decay_rate', default=0.5, type=float, help='decay rate for step learning rate scheduler')
parser.add_argument('--train_iw.weight_decay', type=float, default=0.0, help='L2 weight decay')
parser.add_argument('--train_iw.momentum', default=0.9, type=float, help='momentum')
parser.add_argument('--train_iw.n_epochs', default=100, type=int, help='the number of epochs')
parser.add_argument('--train_iw.val_period', default=1, type=int, help='validation period in epochs')
## cal args
parser.add_argument('--cal_iw.find_best', action='store_true', help='find the best model')
parser.add_argument('--cal_iw.load_final', action='store_true', help='load the final model')
parser.add_argument('--cal_iw.optim', default='SGD', type=str, help='optimizer')
parser.add_argument('--cal_iw.lr', default=0.01, type=float, help='learning rate')
parser.add_argument('--cal_iw.lr_step_size', default=50, type=float, help='stepsize for step learning rate scheduler')
parser.add_argument('--cal_iw.lr_step_decay_rate', default=0.5, type=float, help='decay rate for step learning rate scheduler')
parser.add_argument('--cal_iw.weight_decay', type=float, default=0.0, help='L2 weight decay')
parser.add_argument('--cal_iw.momentum', default=0.9, type=float, help='momentum')
parser.add_argument('--cal_iw.n_epochs', default=500, type=int, help='the number of epochs')
parser.add_argument('--cal_iw.val_period', default=1, type=int, help='validation period in epochs')
## train args
parser.add_argument('--train_advtr.find_best', action='store_true', help='find the best model')
parser.add_argument('--train_advtr.load_final', action='store_true', help='load the final model')
parser.add_argument('--train_advtr.optim', default='SGD', type=str, help='optimizer')
parser.add_argument('--train_advtr.lr', default=0.01, type=float, help='learning rate')
parser.add_argument('--train_advtr.lr_step_size', default=20, type=float, help='stepsize for step learning rate scheduler')
parser.add_argument('--train_advtr.lr_step_decay_rate', default=0.5, type=float, help='decay rate for step learning rate scheduler')
parser.add_argument('--train_advtr.weight_decay', type=float, default=0.0, help='L2 weight decay')
parser.add_argument('--train_advtr.momentum', default=0.9, type=float, help='momentum')
parser.add_argument('--train_advtr.n_epochs', default=100, type=int, help='the number of epochs')
parser.add_argument('--train_advtr.val_period', default=1, type=int, help='validation period in epochs')
parser.add_argument('--train_advtr.advtr_type', type=str, default='DANN', help='domain-adversarial training type')
parser.add_argument('--train_advtr.model_advtr', type=str, default='BigAdvFNN', help='adversarial network name')
parser.add_argument('--train_advtr.reg_param_adv', type=float, default=1.0, help='adversarial loss regularization parameter')
#parser.add_argument('--train_advtr.schedule_reg_param_adv', action='store_true', help='schedule the adversarial loss regularization parameter')
parser.add_argument('--train_advtr.no_adv_reg_schedule', action='store_true', help='do not schedule the adversarial loss regularization parameter')
## conf args
parser.add_argument('--est_conf.find_best', action='store_true', help='find the best model')
parser.add_argument('--est_conf.load_final', action='store_true', help='load the final model')
#parser.add_argument('--est_conf.model', type=str, default='c+w', help='model name')
parser.add_argument('--est_conf.eps', type=float, default=0.01, help='epsilon')
parser.add_argument('--est_conf.T_max', type=float, default=1.0, help='T max range')
parser.add_argument('--est_conf.T_min', type=float, default=1e-6, help='T min range')
parser.add_argument('--est_conf.T_step', type=float, default=0.01, help='T step size')
args = parser.parse_args()
args = to_tree_namespace(args)
## duplicate
args.train.save_root = os.path.join(args.snapshot_root, args.exp_name)
args.train_base.save_root = args.train.save_root
args.train_iw.save_root = args.train.save_root
args.cal_iw.save_root = args.train.save_root
args.train_advtr.save_root = args.train.save_root
args.est_conf.save_root = args.train.save_root
args.model.n_labels = args.data.n_labels
args.model.img_size = args.data.img_size
args.train_advtr.schedule_reg_param_adv = not args.train_advtr.no_adv_reg_schedule
## init aug parameters
args.aug_params = []
for a in args.data.aug:
if a == 'jitter':
args.aug_params.append([('jitter', {'brightness': 0.4, 'contrast': 0.4, 'saturation': 0.4})])
elif a == 'shake':
args.aug_params.append([('randaug', {'size': 32, 'mode': 'SHAKE'})])
elif a == 'svhnspec':
args.aug_params.append([
('intensity_flip', {}),
('intensity_scaling', {'min': -1.5, 'max': 1.5}),
('intensity_offset', {'min': -0.5, 'max': 0.5}),
('affine', {'std': 0.1}),
('translation', {'x_max': 2.0, 'y_max': 2.0}),
('gaussian', {'std': 0.1}),
])
else:
##TODO: simplify
args.aug_params.append(None)
return args
if __name__ == '__main__':
args = parse_args()
main(args)
|
[] |
[] |
[
"TF_CPP_MIN_LOG_LEVEL"
] |
[]
|
["TF_CPP_MIN_LOG_LEVEL"]
|
python
| 1 | 0 | |
exporters/otlp/otlphttp/envconfig.go
|
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package otlphttp
import (
"crypto/tls"
"fmt"
"io/ioutil"
"net/url"
"os"
"strconv"
"strings"
"time"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/exporters/otlp/internal/otlpconfig"
)
func applyEnvConfigs(cfg *config) {
e := envOptionsReader{
getEnv: os.Getenv,
readFile: ioutil.ReadFile,
}
opts := e.getOptionsFromEnv()
for _, opt := range opts {
opt.Apply(cfg)
}
}
type envOptionsReader struct {
getEnv func(string) string
readFile func(filename string) ([]byte, error)
}
func (e *envOptionsReader) applyEnvConfigs(cfg *config) {
opts := e.getOptionsFromEnv()
for _, opt := range opts {
opt.Apply(cfg)
}
}
func (e *envOptionsReader) getOptionsFromEnv() []Option {
var opts []Option
// Endpoint
if v, ok := e.getEnvValue("ENDPOINT"); ok {
opts = append(opts, WithEndpoint(v))
}
if v, ok := e.getEnvValue("TRACES_ENDPOINT"); ok {
opts = append(opts, WithTracesEndpoint(v))
}
if v, ok := e.getEnvValue("METRICS_ENDPOINT"); ok {
opts = append(opts, WithMetricsEndpoint(v))
}
// Certificate File
if path, ok := e.getEnvValue("CERTIFICATE"); ok {
if tls, err := e.readTLSConfig(path); err == nil {
opts = append(opts, WithTLSClientConfig(tls))
} else {
otel.Handle(fmt.Errorf("failed to configure otlp exporter certificate '%s': %w", path, err))
}
}
if path, ok := e.getEnvValue("TRACES_CERTIFICATE"); ok {
if tls, err := e.readTLSConfig(path); err == nil {
opts = append(opts, WithTracesTLSClientConfig(tls))
} else {
otel.Handle(fmt.Errorf("failed to configure otlp traces exporter certificate '%s': %w", path, err))
}
}
if path, ok := e.getEnvValue("METRICS_CERTIFICATE"); ok {
if tls, err := e.readTLSConfig(path); err == nil {
opts = append(opts, WithMetricsTLSClientConfig(tls))
} else {
otel.Handle(fmt.Errorf("failed to configure otlp metrics exporter certificate '%s': %w", path, err))
}
}
// Headers
if h, ok := e.getEnvValue("HEADERS"); ok {
opts = append(opts, WithHeaders(stringToHeader(h)))
}
if h, ok := e.getEnvValue("TRACES_HEADERS"); ok {
opts = append(opts, WithTracesHeaders(stringToHeader(h)))
}
if h, ok := e.getEnvValue("METRICS_HEADERS"); ok {
opts = append(opts, WithMetricsHeaders(stringToHeader(h)))
}
// Compression
if c, ok := e.getEnvValue("COMPRESSION"); ok {
opts = append(opts, WithCompression(stringToCompression(c)))
}
if c, ok := e.getEnvValue("TRACES_COMPRESSION"); ok {
opts = append(opts, WithTracesCompression(stringToCompression(c)))
}
if c, ok := e.getEnvValue("METRICS_COMPRESSION"); ok {
opts = append(opts, WithMetricsCompression(stringToCompression(c)))
}
// Timeout
if t, ok := e.getEnvValue("TIMEOUT"); ok {
if d, err := strconv.Atoi(t); err == nil {
opts = append(opts, WithTimeout(time.Duration(d)*time.Millisecond))
}
}
if t, ok := e.getEnvValue("TRACES_TIMEOUT"); ok {
if d, err := strconv.Atoi(t); err == nil {
opts = append(opts, WithTracesTimeout(time.Duration(d)*time.Millisecond))
}
}
if t, ok := e.getEnvValue("METRICS_TIMEOUT"); ok {
if d, err := strconv.Atoi(t); err == nil {
opts = append(opts, WithMetricsTimeout(time.Duration(d)*time.Millisecond))
}
}
return opts
}
// getEnvValue gets an OTLP environment variable value of the specified key using the getEnv function.
// This function already prepends the OTLP prefix to all key lookup.
func (e *envOptionsReader) getEnvValue(key string) (string, bool) {
v := strings.TrimSpace(e.getEnv(fmt.Sprintf("OTEL_EXPORTER_OTLP_%s", key)))
return v, v != ""
}
func (e *envOptionsReader) readTLSConfig(path string) (*tls.Config, error) {
b, err := e.readFile(path)
if err != nil {
return nil, err
}
return otlpconfig.CreateTLSConfig(b)
}
func stringToCompression(value string) Compression {
switch value {
case "gzip":
return GzipCompression
}
return NoCompression
}
func stringToHeader(value string) map[string]string {
headersPairs := strings.Split(value, ",")
headers := make(map[string]string)
for _, header := range headersPairs {
nameValue := strings.SplitN(header, "=", 2)
if len(nameValue) < 2 {
continue
}
name, err := url.QueryUnescape(nameValue[0])
if err != nil {
continue
}
trimmedName := strings.TrimSpace(name)
value, err := url.QueryUnescape(nameValue[1])
if err != nil {
continue
}
trimmedValue := strings.TrimSpace(value)
headers[trimmedName] = trimmedValue
}
return headers
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| 0 | 0 | |
DiscordOauth2/wsgi.py
|
"""
WSGI config for DiscordOauth2 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'DiscordOauth2.settings')
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
valid.py
|
import os
import time
import torch
import argparse
import scipy.io
import warnings
from torch.autograd import Variable
from torchvision import datasets, transforms
import dataset
from darknet import Darknet
from utils import *
from MeshPly import MeshPly
from raptor_specific_utils import *
import pdb
def valid(datacfg, modelcfg, weightfile):
def truths_length(truths, max_num_gt=50):
for i in range(max_num_gt):
if truths[i][1] == 0:
return i
# Parse configuration files
data_options = read_data_cfg(datacfg)
valid_images = data_options['valid']
if 'mesh' in data_options:
meshname = data_options['mesh']
else:
meshname = None
assert('box_length' in data_options)
box_length = float(data_options['box_length'])
box_width = float(data_options['box_width'])
box_height = float(data_options['box_height'])
backupdir = data_options['backup']
name = data_options['name']
gpus = data_options['gpus']
fx = float(data_options['fx'])
fy = float(data_options['fy'])
u0 = float(data_options['u0'])
v0 = float(data_options['v0'])
im_width = int(data_options['width'])
im_height = int(data_options['height'])
if not os.path.exists(backupdir):
makedirs(backupdir)
# Parameters
seed = int(time.time())
os.environ['CUDA_VISIBLE_DEVICES'] = gpus
torch.cuda.manual_seed(seed)
save = False
testtime = True
num_classes = 1
testing_samples = 0.0
if save:
makedirs(backupdir + '/test')
makedirs(backupdir + '/test/gt')
makedirs(backupdir + '/test/pr')
# To save
testing_error_trans = 0.0
testing_error_angle = 0.0
testing_error_pixel = 0.0
errs_2d = []
errs_3d = []
errs_trans = []
errs_angle = []
errs_corner2D = []
preds_trans = []
preds_rot = []
preds_corners2D = []
gts_trans = []
gts_rot = []
gts_corners2D = []
# Read object model information, get 3D bounding box corners
if meshname is None:
# vertices must be 4 x N for compute_projections to work later
vertices = np.array([[ box_length/2, box_width/2, box_height/2, 1.],
[ box_length/2, box_width/2,-box_height/2, 1.],
[ box_length/2,-box_width/2,-box_height/2, 1.],
[ box_length/2,-box_width/2, box_height/2, 1.],
[-box_length/2,-box_width/2, box_height/2, 1.],
[-box_length/2,-box_width/2,-box_height/2, 1.],
[-box_length/2, box_width/2,-box_height/2, 1.],
[-box_length/2, box_width/2, box_height/2, 1.]]).T
diam = float(data_options['diam'])
else:
mesh = MeshPly(meshname)
vertices = np.c_[np.array(mesh.vertices), np.ones((len(mesh.vertices), 1))].transpose()
try:
diam = float(data_options['diam'])
except:
diam = calc_pts_diameter(np.array(mesh.vertices))
corners3D = get_3D_corners(vertices)
intrinsic_calibration = get_camera_intrinsic(u0, v0, fx, fy)
# Get validation file names
with open(valid_images) as fp:
tmp_files = fp.readlines()
valid_files = [item.rstrip() for item in tmp_files]
# Specicy model, load pretrained weights, pass to GPU and set the module in evaluation mode
model = Darknet(modelcfg)
model.print_network()
model.load_weights(weightfile)
model.cuda()
model.eval()
test_width = model.test_width
test_height = model.test_height
num_keypoints = model.num_keypoints
num_labels = num_keypoints * 2 + 3 # +2 for width, height, +1 for class label
# Get the parser for the test dataset
fx = float(data_options['fx'])
fy = float(data_options['fy'])
u0 = float(data_options['u0'])
v0 = float(data_options['v0'])
im_width = int(data_options['width'])
im_height = int(data_options['height'])
K = get_camera_intrinsic(u0, v0, fx, fy)
dist_coefs = None
tf_cam_ego = None
cam_params = (K, dist_coefs, im_width, im_height, tf_cam_ego)
valid_dataset = dataset.listDataset(valid_images,
shape=(test_width, test_height),
shuffle=False,
transform=transforms.Compose([transforms.ToTensor(),]),
cam_params=cam_params,
corners3D=corners3D)
# Specify the number of workers for multiple processing, get the dataloader for the test dataset
kwargs = {'num_workers': 4, 'pin_memory': True}
test_loader = torch.utils.data.DataLoader(valid_dataset, batch_size=1, shuffle=False, **kwargs)
logging(" Testing {}...".format(name))
logging(" Number of test samples: %d" % len(test_loader.dataset))
# Iterate through test batches (Batch size for test data is 1)
count = 0
for batch_idx, (data, target) in enumerate(test_loader):
t1 = time.time()
# Pass data to GPU
pdb.set_trace()
data = data.cuda()
target = target.cuda()
# Wrap tensors in Variable class, set volatile=True for inference mode and to use minimal memory during inference
data = Variable(data, volatile=True)
t2 = time.time()
# Forward pass
output = model(data).data
t3 = time.time()
# Using confidence threshold, eliminate low-confidence predictions
all_boxes = get_region_boxes(output, num_classes, num_keypoints)
t4 = time.time()
# Evaluation
# Iterate through all batch elements
for box_pr, target in zip([all_boxes], [target[0]]):
# For each image, get all the targets (for multiple object pose estimation, there might be more than 1 target per image)
truths = target.view(-1, num_labels)
# Get how many objects are present in the scene
num_gts = truths_length(truths)
# Iterate through each ground-truth object
for k in range(num_gts):
box_gt = list()
for j in range(1, 2*num_keypoints+1):
box_gt.append(truths[k][j])
box_gt.extend([1.0, 1.0])
box_gt.append(truths[k][0])
# Denormalize the corner predictions
corners2D_gt = np.array(np.reshape(box_gt[:18], [-1, 2]), dtype='float32')
corners2D_pr = np.array(np.reshape(box_pr[:18], [-1, 2]), dtype='float32')
corners2D_gt[:, 0] = corners2D_gt[:, 0] * im_width
corners2D_gt[:, 1] = corners2D_gt[:, 1] * im_height
corners2D_pr[:, 0] = corners2D_pr[:, 0] * im_width
corners2D_pr[:, 1] = corners2D_pr[:, 1] * im_height
preds_corners2D.append(corners2D_pr)
gts_corners2D.append(corners2D_gt)
# Compute corner prediction error
corner_norm = np.linalg.norm(corners2D_gt - corners2D_pr, axis=1)
corner_dist = np.mean(corner_norm)
errs_corner2D.append(corner_dist)
# [OPTIONAL] generate images with bb drawn on them
draw_2d_proj_of_3D_bounding_box(data, corners2D_pr, corners2D_gt, None, batch_idx, k, im_save_dir = "./backup/{}/valid_output_images/".format(name))
# Compute [R|t] by pnp
R_gt, t_gt = pnp(np.array(np.transpose(np.concatenate((np.zeros((3, 1)), corners3D[:3, :]), axis=1)), dtype='float32'), corners2D_gt, np.array(intrinsic_calibration, dtype='float32'))
R_pr, t_pr = pnp(np.array(np.transpose(np.concatenate((np.zeros((3, 1)), corners3D[:3, :]), axis=1)), dtype='float32'), corners2D_pr, np.array(intrinsic_calibration, dtype='float32'))
# Compute translation error
trans_dist = np.sqrt(np.sum(np.square(t_gt - t_pr)))
errs_trans.append(trans_dist)
# Compute angle error
angle_dist = calcAngularDistance(R_gt, R_pr)
errs_angle.append(angle_dist)
# Compute pixel error
Rt_gt = np.concatenate((R_gt, t_gt), axis=1)
Rt_pr = np.concatenate((R_pr, t_pr), axis=1)
proj_2d_gt = compute_projection(vertices, Rt_gt, intrinsic_calibration)
proj_2d_pred = compute_projection(vertices, Rt_pr, intrinsic_calibration)
norm = np.linalg.norm(proj_2d_gt - proj_2d_pred, axis=0)
pixel_dist = np.mean(norm)
errs_2d.append(pixel_dist)
# Compute 3D distances
transform_3d_gt = compute_transformation(vertices, Rt_gt)
transform_3d_pred = compute_transformation(vertices, Rt_pr)
norm3d = np.linalg.norm(transform_3d_gt - transform_3d_pred, axis=0)
vertex_dist = np.mean(norm3d)
errs_3d.append(vertex_dist)
# pdb.set_trace()
# Sum errors
testing_error_trans += trans_dist
testing_error_angle += angle_dist
testing_error_pixel += pixel_dist
testing_samples += 1
count = count + 1
if save:
preds_trans.append(t_pr)
gts_trans.append(t_gt)
preds_rot.append(R_pr)
gts_rot.append(R_gt)
np.savetxt(backupdir + '/test/gt/R_' + valid_files[count][-8:-3] + 'txt', np.array(R_gt, dtype='float32'))
np.savetxt(backupdir + '/test/gt/t_' + valid_files[count][-8:-3] + 'txt', np.array(t_gt, dtype='float32'))
np.savetxt(backupdir + '/test/pr/R_' + valid_files[count][-8:-3] + 'txt', np.array(R_pr, dtype='float32'))
np.savetxt(backupdir + '/test/pr/t_' + valid_files[count][-8:-3] + 'txt', np.array(t_pr, dtype='float32'))
np.savetxt(backupdir + '/test/gt/corners_' + valid_files[count][-8:-3] + 'txt', np.array(corners2D_gt, dtype='float32'))
np.savetxt(backupdir + '/test/pr/corners_' + valid_files[count][-8:-3] + 'txt', np.array(corners2D_pr, dtype='float32'))
t5 = time.time()
# Compute 2D projection error, 6D pose error, 5cm5degree error
px_threshold = 5 # 5 pixel threshold for 2D reprojection error is standard in recent sota 6D object pose estimation works
eps = 1e-5
acc = len(np.where(np.array(errs_2d) <= px_threshold)[0]) * 100. / (len(errs_2d)+eps)
acc5cm5deg = len(np.where((np.array(errs_trans) <= 0.05) & (np.array(errs_angle) <= 5))[0]) * 100. / (len(errs_trans)+eps)
acc3d10 = len(np.where(np.array(errs_3d) <= diam * 0.1)[0]) * 100. / (len(errs_3d)+eps)
acc5cm5deg = len(np.where((np.array(errs_trans) <= 0.05) & (np.array(errs_angle) <= 5))[0]) * 100. / (len(errs_trans)+eps)
corner_acc = len(np.where(np.array(errs_corner2D) <= px_threshold)[0]) * 100. / (len(errs_corner2D)+eps)
mean_err_2d = np.mean(errs_2d)
mean_corner_err_2d = np.mean(errs_corner2D)
nts = float(testing_samples)
if testtime:
print('-----------------------------------')
print(' tensor to cuda : %f' % (t2 - t1))
print(' forward pass : %f' % (t3 - t2))
print('get_region_boxes : %f' % (t4 - t3))
print(' prediction time : %f' % (t4 - t1))
print(' eval : %f' % (t5 - t4))
print('-----------------------------------')
# Print test statistics
logging('Results of {}'.format(name))
logging(' Acc using {} px 2D Projection = {:.2f}%'.format(px_threshold, acc))
logging(' Acc using 10% threshold - {} vx 3D Transformation = {:.2f}%'.format(diam * 0.1, acc3d10))
logging(' Acc using 5 cm 5 degree metric = {:.2f}%'.format(acc5cm5deg))
logging(" Mean 2D pixel error is %f, Mean vertex error is %f, mean corner error is %f" % (mean_err_2d, np.mean(errs_3d), mean_corner_err_2d))
logging(' Translation error: %f m, angle error: %f degree, pixel error: % f pix' % (testing_error_trans/nts, testing_error_angle/nts, testing_error_pixel/nts) )
if save:
predfile = backupdir + '/predictions_linemod_' + name + '.mat'
scipy.io.savemat(predfile, {'R_gts': gts_rot, 't_gts':gts_trans, 'corner_gts': gts_corners2D, 'R_prs': preds_rot, 't_prs':preds_trans, 'corner_prs': preds_corners2D})
if __name__ == '__main__':
# Parse configuration files
parser = argparse.ArgumentParser(description='SingleShotPose')
parser.add_argument('--datacfg', type=str, default='cfg/ape.data') # data config
parser.add_argument('--modelcfg', type=str, default='cfg/yolo-pose.cfg') # network config
parser.add_argument('--weightfile', type=str, default='backup/ape/model_backup.weights') # imagenet initialized weights
args = parser.parse_args()
datacfg = args.datacfg
modelcfg = args.modelcfg
weightfile = args.weightfile
valid(datacfg, modelcfg, weightfile)
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_VISIBLE_DEVICES"]
|
python
| 1 | 0 | |
sigopt/interface.py
|
import os
import warnings
from .compat import json as simplejson
from .config import config
from .endpoint import ApiEndpoint
from .objects import (
ApiObject,
BestAssignments,
Checkpoint,
Client,
Experiment,
Importances,
MetricImportances,
Observation,
Organization,
Pagination,
Project,
QueuedSuggestion,
Session,
StoppingCriteria,
Suggestion,
Token,
TrainingRun,
)
from .requestor import Requestor, DEFAULT_API_URL
from .resource import ApiResource
from .version import VERSION
class ConnectionImpl(object):
def __init__(self, requestor, api_url=None, user_agent=None, verify_ssl_certs=None):
self.requestor = requestor
self.api_url = api_url or DEFAULT_API_URL
suggestions = ApiResource(
self,
'suggestions',
endpoints=[
ApiEndpoint(None, Suggestion, 'POST', 'create'),
ApiEndpoint(None, object_or_paginated_objects(Suggestion), 'GET', 'fetch'),
ApiEndpoint(None, Suggestion, 'PUT', 'update'),
ApiEndpoint(None, None, 'DELETE', 'delete'),
],
)
queued_suggestions = ApiResource(
self,
'queued_suggestions',
endpoints=[
ApiEndpoint(None, QueuedSuggestion, 'POST', 'create'),
ApiEndpoint(None, object_or_paginated_objects(QueuedSuggestion), 'GET', 'fetch'),
ApiEndpoint(None, None, 'DELETE', 'delete'),
]
)
observations = ApiResource(
self,
'observations',
endpoints=[
ApiEndpoint('batch', paginated_objects(Observation), 'POST', 'create_batch'),
ApiEndpoint(None, Observation, 'POST', 'create'),
ApiEndpoint(None, object_or_paginated_objects(Observation), 'GET', 'fetch'),
ApiEndpoint(None, Observation, 'PUT', 'update'),
ApiEndpoint(None, None, 'DELETE', 'delete'),
],
)
best_assignments = ApiResource(
self,
'best_assignments',
endpoints=[
ApiEndpoint(None, object_or_paginated_objects(BestAssignments), 'GET', 'fetch'),
],
)
best_training_runs = ApiResource(
self,
'best_training_runs',
endpoints=[
ApiEndpoint(None, paginated_objects(TrainingRun), 'GET', 'fetch'),
],
)
importances = ApiResource(
self,
'importances',
endpoints=[
ApiEndpoint(None, Importances, 'GET', 'fetch'),
],
)
metric_importances = ApiResource(
self,
'metric_importances',
endpoints=[
ApiEndpoint(None, paginated_objects(MetricImportances), 'GET', 'fetch'),
],
)
stopping_criteria = ApiResource(
self,
'stopping_criteria',
endpoints=[
ApiEndpoint(None, StoppingCriteria, 'GET', 'fetch'),
],
)
checkpoints = ApiResource(
self,
'checkpoints',
endpoints=[
ApiEndpoint(None, Checkpoint, 'POST', 'create'),
ApiEndpoint(None, object_or_paginated_objects(Checkpoint), 'GET', 'fetch')
]
)
experiment_training_runs = ApiResource(
self,
'training_runs',
endpoints=[
ApiEndpoint(None, TrainingRun, 'POST', 'create'),
ApiEndpoint(None, object_or_paginated_objects(TrainingRun), 'GET', 'fetch'),
ApiEndpoint(None, TrainingRun, 'PUT', 'update'),
ApiEndpoint(None, None, 'DELETE', 'delete'),
],
resources=[checkpoints],
)
experiment_tokens = ApiResource(
self,
'tokens',
endpoints=[
ApiEndpoint(None, Token, 'POST', 'create'),
],
)
self.tokens = ApiResource(
self,
'tokens',
endpoints=[
ApiEndpoint(None, Token, 'GET', 'fetch'),
],
)
self.experiments = ApiResource(
self,
'experiments',
endpoints=[
ApiEndpoint(None, Experiment, 'POST', 'create'),
ApiEndpoint(None, object_or_paginated_objects(Experiment), 'GET', 'fetch'),
ApiEndpoint(None, Experiment, 'PUT', 'update'),
ApiEndpoint(None, None, 'DELETE', 'delete'),
],
resources=[
best_assignments,
best_training_runs,
experiment_tokens,
experiment_training_runs,
importances,
metric_importances,
observations,
queued_suggestions,
stopping_criteria,
suggestions,
],
)
client_experiments = ApiResource(
self,
'experiments',
endpoints=[
ApiEndpoint(None, Experiment, 'POST', 'create'),
ApiEndpoint(None, paginated_objects(Experiment), 'GET', 'fetch'),
],
)
client_project_experiments = ApiResource(
self,
'experiments',
endpoints=[
ApiEndpoint(None, paginated_objects(Experiment), 'GET', 'fetch'),
],
)
client_project_training_runs = ApiResource(
self,
'training_runs',
endpoints=[
ApiEndpoint(None, paginated_objects(TrainingRun), 'GET', 'fetch'),
ApiEndpoint(None, TrainingRun, 'POST', 'create'),
],
resources=[checkpoints],
)
client_projects = ApiResource(
self,
'projects',
endpoints=[
ApiEndpoint(None, Project, 'POST', 'create'),
ApiEndpoint(None, object_or_paginated_objects(Project), 'GET', 'fetch'),
ApiEndpoint(None, Project, 'PUT', 'update'),
],
resources=[
client_project_experiments,
client_project_training_runs,
],
)
self.training_runs = ApiResource(
self,
'training_runs',
endpoints=[
ApiEndpoint(None, object_or_paginated_objects(TrainingRun), 'GET', 'fetch'),
ApiEndpoint(None, TrainingRun, 'PUT', 'update'),
ApiEndpoint(None, None, 'DELETE', 'delete'),
],
resources=[checkpoints]
)
self.clients = ApiResource(
self,
'clients',
endpoints=[
ApiEndpoint(None, Client, 'GET', 'fetch'),
],
resources=[
client_experiments,
client_projects,
],
)
self.organizations = ApiResource(
self,
'organizations',
endpoints=[
ApiEndpoint(None, object_or_paginated_objects(Organization), 'GET', 'fetch'),
],
)
self.user_agent = user_agent
if verify_ssl_certs is not None:
self.set_verify_ssl_certs(verify_ssl_certs)
self.pki_sessions = ApiResource(
self,
'pki_sessions',
endpoints=[
ApiEndpoint(None, Session, 'POST', 'create'),
],
)
def _request(self, method, url, params, headers=None):
if method.upper() in ('GET', 'DELETE'):
json, params = None, self._request_params(params)
else:
json, params = ApiObject.as_json(params), None
return self.requestor.request(
method,
url,
json=json,
params=params,
headers=headers,
user_agent=self.user_agent,
)
def _get(self, url, params=None):
return self._request('GET', url, params)
def _post(self, url, params=None):
return self._request('POST', url, params)
def _put(self, url, params=None):
return self._request('PUT', url, params)
def _delete(self, url, params=None):
return self._request('DELETE', url, params)
def _request_params(self, params):
req_params = params or {}
def serialize(value):
if isinstance(value, (dict, list)):
return simplejson.dumps(value)
return str(value)
return dict((
(key, serialize(ApiObject.as_json(value)))
for key, value
in req_params.items()
if value is not None
))
def set_api_url(self, api_url):
self.api_url = api_url
def set_verify_ssl_certs(self, verify_ssl_certs):
self.requestor.verify_ssl_certs = verify_ssl_certs
def set_proxies(self, proxies):
self.requestor.proxies = proxies
def set_timeout(self, timeout):
self.requestor.timeout = timeout
def set_client_ssl_certs(self, client_ssl_certs):
self.requestor.client_ssl_certs = client_ssl_certs
def set_client_token(self, client_token):
self.requestor.set_client_token(client_token)
class Connection(object):
"""
Client-facing interface for creating Connections.
Shouldn't be changed without a major version change.
"""
def __init__(self, client_token=None, user_agent=None, session=None, _show_deprecation_warning=True):
if _show_deprecation_warning:
warnings.warn(
"You're currently using the old SigOpt Experience."
" Try out the new and improved SigOpt experience by getting started with the docs today."
" You have until July 2022 to migrate over without experiencing breaking changes.",
UserWarning,
)
client_token = client_token or os.environ.get('SIGOPT_API_TOKEN', config.api_token)
api_url = os.environ.get('SIGOPT_API_URL') or DEFAULT_API_URL
# no-verify overrides a passed in path
no_verify_ssl_certs = os.environ.get('SIGOPT_API_NO_VERIFY_SSL_CERTS')
if no_verify_ssl_certs:
verify_ssl_certs = False
else:
verify_ssl_certs = os.environ.get('SIGOPT_API_VERIFY_SSL_CERTS')
if not client_token:
raise ValueError('Must provide client_token or set environment variable SIGOPT_API_TOKEN')
default_headers = {
'Content-Type': 'application/json',
'X-SigOpt-Python-Version': VERSION,
}
requestor = Requestor(
client_token,
'',
default_headers,
session=session,
)
self.impl = ConnectionImpl(requestor, api_url=api_url, user_agent=user_agent, verify_ssl_certs=verify_ssl_certs)
def set_api_url(self, api_url):
self.impl.set_api_url(api_url)
def set_verify_ssl_certs(self, verify_ssl_certs):
self.impl.set_verify_ssl_certs(verify_ssl_certs)
def set_proxies(self, proxies):
self.impl.set_proxies(proxies)
def set_timeout(self, timeout):
self.impl.set_timeout(timeout)
def set_client_ssl_certs(self, client_ssl_certs):
self.impl.set_client_ssl_certs(client_ssl_certs)
def set_client_token(self, client_token):
self.impl.set_client_token(client_token)
@property
def clients(self):
return self.impl.clients
@property
def experiments(self):
return self.impl.experiments
@property
def organizations(self):
return self.impl.organizations
@property
def tokens(self):
return self.impl.tokens
@property
def training_runs(self):
return self.impl.training_runs
def pki_sessions(self):
return self.impl.pki_sessions
def paginated_objects(api_object):
def decorator(body, *args, **kwargs):
return Pagination(api_object, body, *args, **kwargs)
return decorator
# Allows response to be a single object of class some_class or a paginated
# response of objects that come from class some_class
def object_or_paginated_objects(api_object):
def decorator(body, *args, **kwargs):
if body.get('object') == 'pagination':
return Pagination(api_object, body, *args, **kwargs)
return api_object(body, *args, **kwargs)
return decorator
_global_connection = None
def get_connection():
global _global_connection
if _global_connection is None:
_global_connection = Connection(_show_deprecation_warning=False)
return _global_connection
|
[] |
[] |
[
"SIGOPT_API_TOKEN",
"SIGOPT_API_URL",
"SIGOPT_API_VERIFY_SSL_CERTS",
"SIGOPT_API_NO_VERIFY_SSL_CERTS"
] |
[]
|
["SIGOPT_API_TOKEN", "SIGOPT_API_URL", "SIGOPT_API_VERIFY_SSL_CERTS", "SIGOPT_API_NO_VERIFY_SSL_CERTS"]
|
python
| 4 | 0 | |
cspace/main/appletserver.py
|
import os, sys, threading
from string import Template
from ncrypt.rand import bytes as rand_bytes
from ncrypt.rsa import RSAKey, RSAError
from nitro.selectreactor import SelectReactor
from nitro.tcp import tcpListen, TCPStream
from nitro.ssl import sslAbort
from nitro.linestream import TCPLineStream
from cspace.util.spawn import spawnProcess
from cspace.util.hexcode import hexEncode, hexDecode, HexDecodeError
from cspace.util.wordcode import wordEncode, wordDecode, WordDecodeError
from cspace.util.settings import getAppDir
from cspace.util.queue import ThreadQueue
from cspace.main.common import localSettings, appSettings, \
isValidUserName, isValidServiceName
from cspace.main.sslbridge import SSLBridge
def _substituteMetaVars( s ) :
if sys.platform == 'win32' :
_metaDict = dict( python='python.exe', pythonw='pythonw.exe' )
else :
_metaDict = dict( python='python', pythonw='python' )
_metaDict['approot'] = getAppDir()
return Template( s ).safe_substitute( _metaDict )
def _readCommand( settings, entryPath ) :
data = settings.getData( entryPath ).strip()
lines = [line.strip() for line in data.split('\n')]
return lines
class ServiceConfig( object ) :
def _listServices( self, settings ) :
services = []
for entry in settings.listEntries('Services') :
serviceName = entry.split('/')[-1]
assert isValidServiceName(serviceName)
serviceCommand = _readCommand( settings, entry )
services.append( (serviceName,serviceCommand) )
return services
def listSystemServices( self ) :
return self._listServices( appSettings() )
def listUserServices( self ) :
return self._listServices( localSettings() )
def listActiveServices( self ) :
sysServices = self.listSystemServices()
userServices = self.listUserServices()
serviceDict = {}
out = []
for x in userServices+sysServices :
if x[0] in serviceDict : continue
serviceDict[x[0]] = x
out.append( x )
return out
class ActionConfig( object ) :
def _listActions( self, settings ) :
actions = []
for entry in settings.listEntries('ContactActions') :
actionDir = entry.split('/')[-1]
assert isValidServiceName(actionDir)
actionName = settings.getData( entry+'/Action' ).strip()
actionCommand = _readCommand( settings, entry+'/Command' )
actionOrder = settings.getInt(entry+'/SortOrder',10000)
actions.append( (actionDir,actionName,actionCommand,actionOrder) )
return actions
def listSystemActions( self ) :
return self._listActions( appSettings() )
def listUserActions( self ) :
return self._listActions( localSettings() )
def listActiveActions( self ) :
sysActions = self.listSystemActions()
userActions = self.listUserActions()
actionDict = {}
out = []
for x in userActions+sysActions :
if x[0] in actionDict : continue
actionDict[x[0]] = x
out.append( x )
return out
class BridgeThread( threading.Thread ) :
def __init__( self ) :
threading.Thread.__init__( self )
self.reactor = SelectReactor()
self.threadQueue = ThreadQueue( self._onMessage, self.reactor )
self.bridges = {}
self.start()
def _onMessage( self, msg ) :
cmd,args = msg[0],msg[1:]
if cmd == 'bridge' :
sock,sslConn = args
bridge = SSLBridge( sock, sslConn, self.reactor )
self.bridges[bridge] = 1
bridge.setCloseCallback( lambda : self._onBridgeClosed(bridge) )
elif cmd == 'clear' :
for b in self.bridges.keys() :
b.shutdown()
self.bridges.clear()
elif cmd == 'stop' :
for b in self.bridges.keys() :
b.shutdown()
self.bridges.clear()
self.reactor.stop()
def _onBridgeClosed( self, bridge ) :
del self.bridges[bridge]
def run( self ) :
self.reactor.run()
class AppletConnection( object ) :
DEFAULT = 0
CONNECTING = 1
WAITING_BRIDGE = 2
LISTENER = 3
CLOSED = 4
def __init__( self, sock, reactor, appletServer ) :
self.reactor = reactor
self.stream = TCPLineStream( sock, reactor )
self.appletServer = appletServer
self.appletServer.appletConnections[self] = 1
self.session = appletServer.session
self.incoming = appletServer.incoming
self.state = self.DEFAULT
self._writeData = self.stream.writeData
rt = {}
self.requestTable = rt
rt['echo'] = self._doEcho
rt['getcontacts'] = self._doGetContacts
rt['getpubkey'] = self._doGetPubKey
rt['getcontactpubkeys'] = self._doGetContactPubKeys
rt['connect'] = self._doConnect
rt['connectpubkey'] = self._doConnectPubKey
rt['accept'] = self._doAccept
rt['getincomingpubkey'] = self._doGetIncomingPubKey
rt['registerlistener'] = self._doRegisterListener
rt['sendlistener'] = self._doSendListener
self.stream.setInputCallback( self._onInput )
self.stream.setCloseCallback( self._onClose )
self.stream.setErrorCallback( self._onError )
self.stream.enableRead( True )
def _setClosed( self ) :
del self.appletServer.appletConnections[self]
self.state = self.CLOSED
def shutdown( self, deferred=False ) :
if self.state == self.CONNECTING :
self.connectOp.cancel()
elif self.state == self.LISTENER :
self.appletServer.unregisterListener( self.listenerName )
elif self.state == self.WAITING_BRIDGE :
sslAbort( self.peerSSLConn )
self.stream.close( deferred )
self._setClosed()
def _onClose( self ) :
self.shutdown()
def _onError( self, err, errMsg ) :
self.shutdown()
def _writeLine( self, line ) :
self._writeData( line + '\r\n' )
def _writeWords( self, words ) :
words = [wordEncode(w) for w in words]
self._writeData( ' '.join(words) + '\r\n' )
def _writeError( self, msg ) :
self._writeLine( 'ERROR %s' % msg )
def _writeResult( self, words ) :
self._writeWords( ['OK'] + words )
def dispatchMessage( self, msg ) :
assert self.state == self.LISTENER
self._writeWords( ['MSG'] + msg )
def _doEcho( self, words ) :
self._writeResult( words )
def _doGetContacts( self, words ) :
if len(words) != 0 :
self._writeError( 'Malformed request' )
return
if not self.session.isOnline() :
self._writeError( 'Not online' )
return
names = self.session.getProfile().getContactNames()
self._writeResult( [c.getNickName() for c in contacts] )
def _doGetPubKey( self, words ) :
if len(words) > 1 :
self._writeError( 'Malformed request' )
return
if not self.session.isOnline() :
self._writeError( 'Not online' )
return
if len(words) == 0 :
keyData = self.session.getProfile().rsaKey.toDER_PublicKey()
self._writeResult( [hexEncode(keyData)] )
return
contact = self.session.getProfile().getContactByName( words[0] )
if contact is None :
self._writeError( 'Unknown contact' )
return
self._writeResult( [hexEncode(contact.publicKeyData)] )
def _doGetContactPubKeys( self, words ) :
if len(words) != 0 :
self._writeError( 'Malformed request' )
return
if not self.session.isOnline() :
self._writeError( 'Not online' )
return
out = []
profile = self.session.getProfile()
for name in profile.getContactNames() :
c = profile.getContactByName( name )
out.extend( [c.name,hexEncode(c.publicKeyData)] )
self._writeResult( out )
def _connectInternal( self, publicKey, service ) :
def onWriteComplete() :
self.stream.shutdown()
sock = self.stream.getSock()
self.appletServer.bridgeThread.threadQueue.postMessage(
('bridge',sock,self.peerSSLConn) )
self._setClosed()
def onConnect( err, sslConn ) :
if err < 0 :
self._writeError( 'Connect failed' )
self.state = self.DEFAULT
return
self._writeResult( ['Connected'] )
self.peerSSLConn = sslConn
self.state = self.WAITING_BRIDGE
self.stream.enableRead( False )
self.stream.setWriteCompleteCallback( onWriteComplete )
self.connectOp = self.session.connectTo( publicKey, service,
onConnect )
self.state = self.CONNECTING
def _doConnect( self, words ) :
if len(words) != 2 :
self._writeError( 'Malformed request' )
return
contactName, service = words
if not self.session.isOnline() :
self._writeError( 'Not online' )
return
contact = self.session.getProfile().getContactByName( contactName )
if not contact :
self._writeError( 'Unknown contact' )
return
self._connectInternal( contact.publicKey, service )
def _doConnectPubKey( self, words ) :
if len(words) != 2 :
self._writeError( 'Malformed request' )
return
hexPubKey, service = words
if not self.session.isOnline() :
self._writeError( 'Not online' )
return
try :
pubKeyData = hexDecode( hexPubKey )
pubKey = RSAKey()
pubKey.fromDER_PublicKey( pubKeyData )
except (HexDecodeError,RSAError) :
self._writeError( 'Malformed publickey' )
return
self._connectInternal( pubKey, service )
def _doAccept( self, words ) :
if len(words) != 1 :
self._writeError( 'Malformed request' )
return
connectionId = words[0]
sslConn = self.incoming.acceptIncoming( connectionId )
if not sslConn :
self._writeError( 'Invalid connection' )
return
self._writeResult( ['Connected'] )
self.peerSSLConn = sslConn
self.state = self.WAITING_BRIDGE
self.stream.enableRead( False )
def onWriteComplete() :
self.stream.shutdown()
sock = self.stream.getSock()
self.appletServer.bridgeThread.threadQueue.postMessage(
('bridge',sock,self.peerSSLConn) )
self._setClosed()
self.stream.setWriteCompleteCallback( onWriteComplete )
def _doGetIncomingPubKey( self, words ) :
if len(words) != 1 :
self._writeError( 'Malformed request' )
return
connectionId = words[0]
peerKey = self.incoming.getPeerKey( connectionId )
if not peerKey :
self._writeError( 'Invalid connection' )
return
self._writeResult( [hexEncode(peerKey.toDER_PublicKey())] )
def _doRegisterListener( self, words ) :
if len(words) != 1 :
self._writeError( 'Malformed request' )
return
listenerName = words[0]
result = self.appletServer.registerListener( listenerName, self )
if not result :
self._writeError( 'Listener already registered' )
return
self.listenerName = listenerName
self.state = self.LISTENER
self._writeResult( ['Registered'] )
def _doSendListener( self, words ) :
if len(words) <= 1 :
self._writeError( 'Malformed request' )
return
listenerName = words[0]
listener = self.appletServer.getListener( listenerName )
if listener is None :
self._writeError( 'No such listener' )
return
listener.dispatchMessage( words[1:] )
self._writeResult( ['Sent'] )
def _onInput( self, line ) :
assert self.state in (self.DEFAULT,self.CONNECTING,self.LISTENER)
if self.state in (self.CONNECTING,self.LISTENER) :
self._writeError( 'Junk received' )
self.shutdown( deferred=True )
return
words = line.strip().split()
if len(words) == 0 : return
try :
words = [wordDecode(w) for w in words]
except WordDecodeError :
self._writeError( 'Malformed request' )
return
cmd = words[0].lower()
handler = self.requestTable.get( cmd, None )
if not handler :
self._writeError( 'Unknown request' )
return
handler( words[1:] )
class IncomingConnections( object ) :
def __init__( self, reactor ) :
self.reactor = reactor
self.connections = {}
def clearConnections( self ) :
for sslConn,peerKey,timerOp in self.connections.values() :
sslAbort( sslConn )
timerOp.cancel()
self.connections.clear()
def addIncoming( self, sslConn, peerKey ) :
while True :
connectionId = hexEncode( rand_bytes(8) )
if connectionId not in self.connections : break
def onTimeout() : self._onTimeout( connectionId )
timerOp = self.reactor.callLater( 30, onTimeout )
self.connections[connectionId] = (sslConn,peerKey,timerOp)
return connectionId
def acceptIncoming( self, connectionId ) :
info = self.connections.pop( connectionId, None )
if info is None :
return None
sslConn,peerKey,timerOp = info
timerOp.cancel()
return sslConn
def getPeerKey( self, connectionId ) :
info = self.connections.get( connectionId )
if info is None :
return None
return info[1]
def _onTimeout( self, connectionId ) :
sslConn,peerKey,timerOp = self.connections.pop( connectionId )
sslAbort( sslConn )
class AppletServer( object ) :
def __init__( self, session, actionManager, reactor ) :
self.session = session
self.actionManager = actionManager
self.reactor = reactor
self.listener = tcpListen( ('127.0.0.1',0), reactor, self._onNewConnection )
self.listenPort = self.listener.getSock().getsockname()[1]
print 'listenport = %d' % self.listenPort
self.serviceConfig = ServiceConfig()
self.actionConfig = ActionConfig()
self.listeners = {}
self.incoming = IncomingConnections( self.reactor )
self.services = []
self.appletConnections = {}
for (service,command) in self.serviceConfig.listActiveServices() :
def doRegisterService( service, command ) :
def onService( sslConn, peerKey, contactName, incomingName ) :
self._onService( service, command, sslConn,
peerKey, contactName, incomingName )
self.session.registerService( service, onService )
doRegisterService( service, command )
self.services.append( service )
self.actions = []
for (actionDir,action,command,order) in self.actionConfig.listActiveActions() :
def doRegisterAction( actionDir, action, command, order ) :
def onAction( contactName ) :
self._onAction( actionDir, action, command, contactName )
return self.actionManager.registerAction( action, onAction, order )
actionId = doRegisterAction( actionDir, action, command, order )
self.actions.append( actionId )
if actionDir == 'TextChat' :
self.actionManager.setDefaultAction( actionId )
self.bridgeThread = BridgeThread()
def shutdown( self ) :
self.incoming.clearConnections()
appletConns = self.appletConnections.keys()
for conn in appletConns :
conn.shutdown()
self.bridgeThread.threadQueue.postMessage( ('stop',) )
self.bridgeThread.join()
self.listener.close()
def clearConnections( self ) :
self.incoming.clearConnections()
appletConns = self.appletConnections.keys()
for conn in appletConns :
conn.shutdown()
self.bridgeThread.threadQueue.postMessage( ('clear',) )
def getListenPort( self ) : return self.listenPort
def registerListener( self, name, connection ) :
conn = self.listeners.setdefault( name, connection )
return conn is connection
def unregisterListener( self, name ) :
del self.listeners[name]
def getListener( self, name ) :
return self.listeners.get( name, None )
def _onNewConnection( self, sock ) :
AppletConnection( sock, self.reactor, self )
def _findProgram( self, relPath ) :
dirList = os.environ.get( 'PATH', '' ).split( ';' )
for d in dirList :
p = os.path.join( d, relPath )
if os.path.isfile(p) :
return p
return relPath
def _runCommand( self, command, envNew ) :
env = dict( os.environ.items() )
env.update( envNew )
cmdLine = [_substituteMetaVars(x) for x in command]
p = os.path.join( getAppDir(), cmdLine[0] )
if not os.path.isfile(p) :
p = self._findProgram( cmdLine[0] )
args = [p] + cmdLine[1:]
startingDir = os.getcwd()
result = spawnProcess( p, args, env, startingDir, 0 )
if not result :
print 'error starting command (%s)' % p
def _onService( self, service, command, sslConn, peerKey,
contactName, incomingName ) :
print '_onService( service=%s command=%s from=(%s,%s) )' % (
service, command, contactName, incomingName )
connectionId = self.incoming.addIncoming( sslConn, peerKey )
env = {}
env['CSPACE_PORT'] = str(self.listenPort)
env['CSPACE_USER'] = self.session.getProfile().name
env['CSPACE_EVENT'] = 'INCOMING'
env['CSPACE_SERVICE'] = service
env['CSPACE_CONNECTIONID'] = connectionId
env['CSPACE_CONTACTNAME'] = contactName
env['CSPACE_INCOMINGNAME'] = incomingName
self._runCommand( command, env )
def _onAction( self, actionDir, action, command, contactName ) :
print '_onAction( actionDir=%s, action=%s, command=%s, contact=%s )' % (
actionDir, action, command, contactName )
env = {}
env['CSPACE_PORT'] = str(self.listenPort)
env['CSPACE_USER'] = self.session.getProfile().name
env['CSPACE_EVENT'] = 'CONTACTACTION'
env['CSPACE_CONTACTNAME'] = contactName
env['CSPACE_ACTIONDIR'] = actionDir
env['CSPACE_ACTION'] = action
self._runCommand( command, env )
|
[] |
[] |
[
"PATH"
] |
[]
|
["PATH"]
|
python
| 1 | 0 | |
src/main/java/com/artipie/rpm/benchmarks/RpmBench.java
|
/*
* The MIT License (MIT) Copyright (c) 2020-2021 artipie.com
* https://github.com/artipie/rpm-adapter/LICENSE.txt
*/
package com.artipie.rpm.benchmarks;
import com.artipie.asto.Key;
import com.artipie.asto.Storage;
import com.artipie.asto.fs.FileStorage;
import com.artipie.asto.memory.InMemoryStorage;
import com.artipie.asto.rx.RxStorageWrapper;
import com.artipie.rpm.Rpm;
import hu.akarnokd.rxjava2.interop.CompletableInterop;
import hu.akarnokd.rxjava2.interop.SingleInterop;
import io.reactivex.Observable;
import io.reactivex.Single;
import java.nio.file.Paths;
import java.util.concurrent.TimeUnit;
import org.openjdk.jmh.annotations.Benchmark;
import org.openjdk.jmh.annotations.BenchmarkMode;
import org.openjdk.jmh.annotations.Level;
import org.openjdk.jmh.annotations.Measurement;
import org.openjdk.jmh.annotations.Mode;
import org.openjdk.jmh.annotations.OutputTimeUnit;
import org.openjdk.jmh.annotations.Scope;
import org.openjdk.jmh.annotations.Setup;
import org.openjdk.jmh.annotations.State;
import org.openjdk.jmh.annotations.Warmup;
import org.openjdk.jmh.infra.Blackhole;
import org.openjdk.jmh.runner.Runner;
import org.openjdk.jmh.runner.RunnerException;
import org.openjdk.jmh.runner.options.OptionsBuilder;
/**
* Benchmark for {@link RPM}.
* @since 1.4
* @checkstyle MagicNumberCheck (500 lines)
* @checkstyle DesignForExtensionCheck (500 lines)
* @checkstyle JavadocMethodCheck (500 lines)
*/
@BenchmarkMode(Mode.AverageTime)
@OutputTimeUnit(TimeUnit.MILLISECONDS)
@State(Scope.Benchmark)
@Warmup(iterations = 5)
@Measurement(iterations = 20)
public class RpmBench {
/**
* Benchmark directory.
*/
private static final String BENCH_DIR = System.getenv("BENCH_DIR");
/**
* Repository storage.
*/
private Storage storage;
@Setup
public void setup() {
if (RpmBench.BENCH_DIR == null) {
throw new IllegalStateException("BENCH_DIR environment variable must be set");
}
this.storage = new InMemoryStorage();
final Storage src = new FileStorage(Paths.get(RpmBench.BENCH_DIR));
RpmBench.sync(src, this.storage);
}
@Setup(Level.Iteration)
public void setupIter() {
final RxStorageWrapper rxst = new RxStorageWrapper(this.storage);
rxst.list(new Key.From("repodata"))
.flatMapObservable(Observable::fromIterable)
.flatMapCompletable(key -> rxst.delete(key))
.to(CompletableInterop.await()).toCompletableFuture().join();
}
@Benchmark
public void run(final Blackhole bhl) {
new Rpm(this.storage).batchUpdateIncrementally(Key.ROOT)
.to(CompletableInterop.await())
.toCompletableFuture().join();
}
/**
* Main.
* @param args CLI args
* @throws RunnerException On benchmark failure
*/
public static void main(final String... args) throws RunnerException {
new Runner(
new OptionsBuilder()
.include(RpmBench.class.getSimpleName())
.forks(1)
.build()
).run();
}
/**
* Sync storages.
* @param src Source storage
* @param dst Destination storage
*/
private static void sync(final Storage src, final Storage dst) {
Single.fromFuture(src.list(Key.ROOT))
.flatMapObservable(Observable::fromIterable)
.flatMapSingle(
key -> Single.fromFuture(
src.value(key)
.thenCompose(content -> dst.save(key, content))
.thenApply(none -> true)
)
).toList().map(ignore -> true).to(SingleInterop.get())
.toCompletableFuture().join();
}
}
|
[
"\"BENCH_DIR\""
] |
[] |
[
"BENCH_DIR"
] |
[]
|
["BENCH_DIR"]
|
java
| 1 | 0 | |
core/router/router_test.go
|
package router_test
import (
"context"
"github.com/go-chassis/go-chassis/v2/core/marker"
"net/http"
"os"
"path/filepath"
"testing"
"github.com/go-chassis/go-chassis/v2/core/lager"
"github.com/go-chassis/go-chassis/v2/core/common"
"github.com/go-chassis/go-chassis/v2/core/config"
"github.com/go-chassis/go-chassis/v2/core/invocation"
"github.com/go-chassis/go-chassis/v2/core/registry"
"github.com/go-chassis/go-chassis/v2/core/router"
_ "github.com/go-chassis/go-chassis/v2/core/router/servicecomb"
"github.com/stretchr/testify/assert"
)
func init() {
lager.Init(&lager.Options{
LoggerLevel: "INFO",
})
}
func TestBuildRouter(t *testing.T) {
path := os.Getenv("GOPATH")
os.Setenv("CHASSIS_HOME", filepath.Join(path, "src", "github.com", "go-chassis", "go-chassis", "examples", "discovery", "server"))
config.Init()
router.BuildRouter("cse")
err := router.BuildRouter("fake")
assert.Error(t, err)
err = router.BuildRouter("cse")
assert.NoError(t, err)
assert.NotNil(t, router.DefaultRouter)
}
func TestRPCRoute(t *testing.T) {
si := ®istry.SourceInfo{
Tags: map[string]string{},
}
si.Tags[common.BuildinTagVersion] = "v2"
d := map[string][]*config.RouteRule{
"RPCServer": {
{
Precedence: 2,
Match: config.Match{
Headers: map[string]map[string]string{
"test": {"exact": "user"},
},
},
Routes: []*config.RouteTag{
{Weight: 100, Tags: map[string]string{"version": "v2"}},
},
},
{
Precedence: 1,
Routes: []*config.RouteTag{
{Weight: 100, Tags: map[string]string{"version": "v3"}},
},
},
},
}
router.BuildRouter("cse")
router.DefaultRouter.SetRouteRule(d)
header := map[string]string{
"cookie": "user=jason",
"X-Age": "18",
"test": "user",
}
inv := new(invocation.Invocation)
inv.MicroServiceName = "RPCServer"
err := router.Route(header, si, inv)
assert.Nil(t, err, "")
assert.Equal(t, "v2", inv.RouteTags.Version())
assert.Equal(t, "RPCServer", inv.MicroServiceName)
}
func TestRoute(t *testing.T) {
si := ®istry.SourceInfo{
Tags: map[string]string{},
}
si.Name = "vmall"
si.Tags[common.BuildinTagApp] = "HelloWorld"
si.Tags[common.BuildinTagVersion] = "v2"
d := map[string][]*config.RouteRule{
"server": {
{
Precedence: 2,
Match: config.Match{
Headers: map[string]map[string]string{
"test": {"regex": "user"},
},
},
Routes: []*config.RouteTag{
{Weight: 80, Tags: map[string]string{"version": "1.2", "app": "HelloWorld"}},
{Weight: 20, Tags: map[string]string{"version": "2.0", "app": "HelloWorld"}},
},
},
},
"ShoppingCart": {
{
Precedence: 2,
Match: config.Match{
Headers: map[string]map[string]string{
"cookie": {"regex": "^(.*?;)?(user=jason)(;.*)?$"},
},
},
Routes: []*config.RouteTag{
{Weight: 80, Tags: map[string]string{"version": "1.2", "app": "HelloWorld"}},
{Weight: 20, Tags: map[string]string{"version": "2.0", "app": "HelloWorld"}},
},
}, {
Precedence: 1,
Routes: []*config.RouteTag{
{Weight: 100, Tags: map[string]string{"version": "v3", "app": "HelloWorld"}},
},
},
},
}
router.BuildRouter("cse")
router.DefaultRouter.SetRouteRule(d)
header := map[string]string{
"cookie": "user=jason",
"X-Age": "18",
}
inv := new(invocation.Invocation)
inv.MicroServiceName = "ShoppingCart"
err := router.Route(header, si, inv)
assert.Nil(t, err, "")
assert.Equal(t, "HelloWorld", inv.RouteTags.AppID())
assert.Equal(t, "1.2", inv.RouteTags.Version())
assert.Equal(t, "ShoppingCart", inv.MicroServiceName)
inv.MicroServiceName = "server"
header["test"] = "test"
si.Name = "reviews.default.svc.cluster.local"
err = router.Route(header, si, inv)
assert.Nil(t, err, "")
inv.MicroServiceName = "notexist"
err = router.Route(header, nil, inv)
assert.Nil(t, err, "")
}
func TestRoute2(t *testing.T) {
d := map[string][]*config.RouteRule{
"catalogue": {
{
Precedence: 2,
Routes: []*config.RouteTag{
{Weight: 100, Tags: map[string]string{"version": "0.0.1", "app": "sockshop"}},
},
},
},
"orders": {
{
Precedence: 2,
Routes: []*config.RouteTag{
{Weight: 100, Tags: map[string]string{"version": "0.0.1", "app": "sockshop"}},
},
},
},
"carts": {
{
Precedence: 2,
Routes: []*config.RouteTag{
{Weight: 100, Tags: map[string]string{"version": "0.0.1", "app": "sockshop"}},
},
},
},
}
router.BuildRouter("cse")
router.DefaultRouter.SetRouteRule(d)
header := map[string]string{}
inv := new(invocation.Invocation)
inv.MicroServiceName = "carts"
err := router.Route(header, nil, inv)
assert.Nil(t, err, "")
t.Log(inv.RouteTags.AppID())
t.Log(inv.RouteTags.Version())
assert.Equal(t, "sockshop", inv.RouteTags.AppID())
assert.Equal(t, "0.0.1", inv.RouteTags.Version())
}
func TestMatch(t *testing.T) {
si := ®istry.SourceInfo{
Tags: map[string]string{},
}
si.Name = "service"
si.Tags[common.BuildinTagApp] = "app"
si.Tags[common.BuildinTagVersion] = "0.1"
matchConf := initMatch("service", "((abc.)def.)ghi", map[string]string{"tag1": "v1"})
headers := map[string]string{}
headers["cookie"] = "abc-def-ghi"
assert.Equal(t, false, router.Match(nil, matchConf, headers, si))
si.Tags["tag1"] = "v1"
assert.Equal(t, false, router.Match(nil, matchConf, headers, si))
headers["age"] = "15"
assert.Equal(t, true, router.Match(nil, matchConf, headers, si))
matchConf.HTTPHeaders["noEqual"] = map[string]string{"noEqu": "e"}
assert.Equal(t, true, router.Match(nil, matchConf, headers, si))
headers["noEqual"] = "noe"
assert.Equal(t, true, router.Match(nil, matchConf, headers, si))
matchConf.HTTPHeaders["noLess"] = map[string]string{"noLess": "100"}
headers["noLess"] = "120"
assert.Equal(t, true, router.Match(nil, matchConf, headers, si))
matchConf.HTTPHeaders["noGreater"] = map[string]string{"noGreater": "100"}
headers["noGreater"] = "120"
assert.Equal(t, false, router.Match(nil, matchConf, headers, si))
si.Name = "error"
assert.Equal(t, false, router.Match(nil, matchConf, headers, si))
headers["cookie"] = "7gh"
si.Name = "service"
assert.Equal(t, false, router.Match(nil, matchConf, headers, si))
}
func TestMatchRefer(t *testing.T) {
m := config.Match{}
m.Refer = "testMarker"
inv := invocation.New(context.TODO())
b := router.Match(inv, m, nil, nil)
assert.False(t, b)
inv.Args, _ = http.NewRequest("GET", "some/api", nil)
inv.Metadata = make(map[string]interface{})
testMatchPolicy := `
apiPath:
contains: "some/api"
method: GET
`
marker.SaveMatchPolicy(testMatchPolicy, "servicecomb.marker."+m.Refer, m.Refer)
b = router.Match(inv, m, nil, nil)
assert.True(t, b)
}
func TestFitRate(t *testing.T) {
tags := InitTags("0.1", "0.2")
tag := router.FitRate(tags, "service") //0,0
assert.Equal(t, "0.1", tag.Tags["version"])
tag = router.FitRate(tags, "service") //100%, 0
assert.Equal(t, "0.2", tag.Tags["version"])
tag = router.FitRate(tags, "service") //50%, 50%
assert.Equal(t, "0.1", tag.Tags["version"])
count := 100
for count > 0 {
go fit()
count--
}
}
func fit() {
tags := InitTags("0.1", "0.2")
router.FitRate(tags, "service")
}
func TestSortRules(t *testing.T) {
router.BuildRouter("cse")
router.DefaultRouter.SetRouteRule(InitDests())
assert.Equal(t, 20, router.SortRules("test")[3].Precedence)
}
func InitDests() map[string][]*config.RouteRule {
r1 := &config.RouteRule{}
r2 := &config.RouteRule{}
r3 := &config.RouteRule{}
r4 := &config.RouteRule{}
r5 := &config.RouteRule{}
r1.Precedence = 20
r2.Precedence = 30
r3.Precedence = 50
r4.Precedence = 40
r5.Precedence = 10
r1.Routes = InitTags("0.11", "0.2")
r2.Routes = InitTags("1.1", "1.2")
r3.Routes = InitTags("2.1", "2.2")
match1 := initMatch("source", "((abc.)def.)ghi", map[string]string{"tag1": "v1"})
match2 := initMatch("source", "notmatch", map[string]string{"tag1": "v1"})
match3 := initMatch("source1", "((abc.)def.)ghi", map[string]string{"tag1": "v1"})
r2.Match = match2
r1.Match = match1
r3.Match = match3
rules := []*config.RouteRule{r1, r2, r3, r4, r5}
dests := map[string][]*config.RouteRule{"test": rules}
return dests
}
func InitTags(v1 string, v2 string) []*config.RouteTag {
tag1 := new(config.RouteTag)
tag2 := new(config.RouteTag)
tag1.Weight = 50
tag2.Weight = 50
tag1.Tags = map[string]string{"version": v1}
tag2.Tags = map[string]string{"version": v2}
tags := []*config.RouteTag{tag1, tag2}
return tags
}
func initMatch(source string, pat string, tags map[string]string) config.Match {
match := config.Match{}
match.Source = source
match.SourceTags = tags
regex := map[string]string{"regex": pat}
greater := map[string]string{"greater": "10"}
match.HTTPHeaders = map[string]map[string]string{"cookie": regex, "age": greater}
return match
}
|
[
"\"GOPATH\""
] |
[] |
[
"GOPATH"
] |
[]
|
["GOPATH"]
|
go
| 1 | 0 | |
conflicts/tests/conflictBP_processConflicts_test.py
|
from unittest.mock import MagicMock, patch
from scheduleServer import app
import unittest
from helperFunctions.helperFunctions import stdRet, AuthenticatedUser
class TestConflictBP_getRAConflicts(unittest.TestCase):
def setUp(self):
# Set up a number of items that will be used for these tests.
# -- Mock the os.environ method so that we can create the server. --
# Helper Dict for holding the os.environ configuration
self.helper_osEnviron = {
"CLIENT_ID": "TEST CLIENT_ID",
"PROJECT_ID": "TEST PROJECT_ID",
"AUTH_URI": "TEST AUTH_URI",
"TOKEN_URI": "TEST TOKEN_URI",
"AUTH_PROVIDER_X509_CERT_URL": "TEST AUTH_PROVIDER_X509_CERT_URL",
"CLIENT_SECRET": "TEST CLIENT_SECRET",
"REDIRECT_URIS": "TEST1,TEST2,TEST3,TEST4",
"JAVASCRIPT_ORIGINS": "TEST5,TEST6",
"EXPLAIN_TEMPLATE_LOADING": "FALSE",
"LOG_LEVEL": "WARNING",
"USE_ADHOC": "FALSE",
"SECRET_KEY": "TEST SECRET KEY",
"OAUTHLIB_RELAX_TOKEN_SCOPE": "1",
"OAUTHLIB_INSECURE_TRANSPORT": "1",
"HOST_URL": "https://localhost:5000",
"DATABASE_URL": "postgres://ra_sched"
}
# Create a dictionary patcher for the os.environ method
self.patcher_osEnviron = patch.dict("os.environ",
self.helper_osEnviron)
# Start the os patchers (No mock object is returned since we used patch.dict())
self.patcher_osEnviron.start()
# -- Create an instance of ScheduleServer that we may test against. --
# Mark the application as being tested
app.config["TESTING"] = True
# Disable the login_required decorator
app.config["LOGIN_DISABLED"] = True
# Reinitialize the Login Manager to accept the new configuration
app.login_manager.init_app(app)
# Create the test server
self.server = app.test_client()
# -- Create a patcher for the getAuth() method from helperFunctions --
# since we have disabled the login manager for testing
# First we must create an object for the auth_level that we can manipulate
# as needed for the tests. By default, the auth_level is set to 1.
self.mocked_authLevel = MagicMock(return_value=1)
# In order for the authLevel to respond to __lt__, __gt__, and __eq__ calls,
# we need to create lambda functions that can effectively implement the
# respective magic methods.
self.mocked_authLevel_ltMock = lambda me, other: me.return_value < other
self.mocked_authLevel_gtMock = lambda me, other: me.return_value > other
self.mocked_authLevel_eqMock = lambda me, other: me.return_value == other
# We then set the auth_level mock to return the __lt__ Mock
self.mocked_authLevel.__lt__ = self.mocked_authLevel_ltMock
# We then set the auth_level mock to return the __gt__ Mock
self.mocked_authLevel.__gt__ = self.mocked_authLevel_ltMock
# We then set the auth_level mock to return the __eq__ Mock
self.mocked_authLevel.__eq__ = self.mocked_authLevel_ltMock
# Set the ra_id and hall_id to values that can be used throughout
self.user_ra_id = 1
self.user_hall_id = 1
self.associatedResHalls = [
{
"id": self.user_hall_id,
"auth_level": self.mocked_authLevel,
"name": "Test Hall"
}
]
# Assemble all of the desired values into an Authenticated User Object
self.helper_getAuth = AuthenticatedUser(
"[email protected]",
self.user_ra_id,
"Test",
"User",
self.associatedResHalls
)
# Create the patcher for the getAuth() method
self.patcher_getAuth = patch("conflicts.conflicts.getAuth", autospec=True)
# Start the patcher - mock returned
self.mocked_getAuth = self.patcher_getAuth.start()
# Configure the mocked_getAuth to return the helper_getAuth dictionary
self.mocked_getAuth.return_value = self.helper_getAuth
# -- Create a patcher for the appGlobals file --
self.patcher_appGlobals = patch("conflicts.conflicts.ag", autospec=True)
# Start the patcher - mock returned
self.mocked_appGlobals = self.patcher_appGlobals.start()
# Configure the mocked appGlobals as desired
self.mocked_appGlobals.baseOpts = {"HOST_URL": "https://localhost:5000"}
self.mocked_appGlobals.conn = MagicMock()
self.mocked_appGlobals.UPLOAD_FOLDER = "./static"
self.mocked_appGlobals.ALLOWED_EXTENSIONS = {"txt", "csv"}
# -- Create a patchers for the logging --
self.patcher_loggingDEBUG = patch("logging.debug", autospec=True)
self.patcher_loggingINFO = patch("logging.info", autospec=True)
self.patcher_loggingWARNING = patch("logging.warning", autospec=True)
self.patcher_loggingCRITICAL = patch("logging.critical", autospec=True)
self.patcher_loggingERROR = patch("logging.error", autospec=True)
# Start the patcher - mock returned
self.mocked_loggingDEBUG = self.patcher_loggingDEBUG.start()
self.mocked_loggingINFO = self.patcher_loggingINFO.start()
self.mocked_loggingWARNING = self.patcher_loggingWARNING.start()
self.mocked_loggingCRITICAL = self.patcher_loggingCRITICAL.start()
self.mocked_loggingERROR = self.patcher_loggingERROR.start()
def tearDown(self):
# Stop all of the patchers
self.patcher_getAuth.stop()
self.patcher_appGlobals.stop()
self.patcher_osEnviron.stop()
self.patcher_loggingDEBUG.stop()
self.patcher_loggingINFO.stop()
self.patcher_loggingWARNING.stop()
self.patcher_loggingCRITICAL.stop()
self.patcher_loggingERROR.stop()
def resetAuthLevel(self):
# This function serves to reset the auth_level of the session
# to the default value which is 1.
self.mocked_authLevel.return_value = 1
def test_whenGivenNewConflicts_addsNewConflictsToDB(self):
# When a user calls the API and provides a set of conflicts
# that are not already registered in the DB, the method
# will add the new conflicts into the DB.
#
# monthNum <int> - an integer representing the numeric month number for
# the desired month using the standard gregorian
# calendar convention.
# year <int> - an integer denoting the year for the desired time period
# using the standard gregorian calendar convention.
# conflicts <lst<str>> - a list containing strings representing dates that the
# user has a duty conflict with.
# -- Arrange --
# Reset all of the mocked objects that will be used in this test
self.mocked_authLevel.reset_mock()
self.mocked_appGlobals.conn.reset_mock()
desiredMonthNum = 3
desiredYear = 2021
desiredNewConflicts = []
for i in range(13):
desiredNewConflicts.append("2021-01-{:02}".format(i))
expectedPrevConflicts = []
# Configure the appGlobals.conn.cursor.execute mock to return different values
# after subsequent calls.
# Fetchall() config
self.mocked_appGlobals.conn.cursor().fetchall.side_effect = [
tuple(expectedPrevConflicts) # First call returns the Previous conflicts
]
# -- Act --
# Make a request to the desired API endpoint
resp = self.server.post("/conflicts/api/enterConflicts/",
json=dict(
monthNum=desiredMonthNum,
year=desiredYear,
conflicts=desiredNewConflicts
),
base_url=self.mocked_appGlobals.baseOpts["HOST_URL"])
# -- Assert --
# Assert that the when the appGlobals.conn.cursor().execute was last called,
# it was an INSERT statement. Since this line is using triple-quote strings,
# the whitespace must match exactly.
self.mocked_appGlobals.conn.cursor().execute.assert_called_with(
"""INSERT INTO conflicts (ra_id, day_id)
SELECT %s, day.id FROM day
WHERE TO_CHAR(day.date, 'YYYY-MM-DD') IN %s
""", (self.user_ra_id, tuple(set(desiredNewConflicts))))
# Assert that appGlobals.conn.commit was never called
self.mocked_appGlobals.conn.commit.assert_called_once()
# Assert that appGlobals.conn.cursor().close was called
self.mocked_appGlobals.conn.cursor().close.assert_called_once()
# Assert that we received a json response
self.assertTrue(resp.is_json)
# Assert that we received our expected result
self.assertEqual(stdRet(1, "successful"), resp.json)
def test_whenGivenConflictsAlreadyInDB_returnsSuccessResponse(self):
# When a user calls the API and provides a set of conflicts that ARE
# already registered in the DB, the method will not modify the conflicts.
# -- Arrange --
# Reset all of the mocked objects that will be used in this test
self.mocked_authLevel.reset_mock()
self.mocked_appGlobals.conn.reset_mock()
desiredMonthNum = 3
desiredYear = 2021
desiredNewConflicts = ["2021-01-{:02}".format(i) for i in range(10)]
# Create the expected previous conflicts that should be returned
# from the DB.
expectedPrevConflicts = []
for i in desiredNewConflicts:
expectedPrevConflicts.append((i,))
# Configure the appGlobals.conn.cursor.execute mock to return different values
# after subsequent calls.
# Fetchall() config
self.mocked_appGlobals.conn.cursor().fetchall.side_effect = [
tuple(expectedPrevConflicts) # First call returns the Previous conflicts
]
# -- Act --
# Make a request to the desired API endpoint
resp = self.server.post("/conflicts/api/enterConflicts/",
json=dict(
monthNum=desiredMonthNum,
year=desiredYear,
conflicts=desiredNewConflicts
),
base_url=self.mocked_appGlobals.baseOpts["HOST_URL"])
# -- Assert --
# Assert that the when the appGlobals.conn.cursor().execute was last called,
# it was an INSERT statement. Since this line is using triple-quote strings,
# the whitespace must match exactly.
self.mocked_appGlobals.conn.cursor().execute.assert_called_once()
# Assert that appGlobals.conn.commit was never called
self.mocked_appGlobals.conn.commit.assert_not_called()
# Assert that appGlobals.conn.cursor().close was NOT called
self.mocked_appGlobals.conn.cursor().close.assert_called_once()
# Assert that we received a json response
self.assertTrue(resp.is_json)
# Assert that we received our expected result
self.assertEqual(stdRet(1, "successful"), resp.json)
def test_whenNotGivenPreviouslyEnteredConflicts_removesPreviouslyEnteredConflictsFromDB(self):
# When a user calls the API and excludes a set of conflicts that are
# already registered in the DB, the method will remove the excluded
# conflicts.
# -- Arrange --
# Reset all of the mocked objects that will be used in this test
self.mocked_authLevel.reset_mock()
self.mocked_appGlobals.conn.reset_mock()
desiredMonthNum = 3
desiredYear = 2021
desiredNewConflicts = ["2021-01-{:02}".format(i) for i in range(20)]
# Create the expected previous conflicts that should be returned
# from the DB.
expectedPrevConflicts = []
for i in desiredNewConflicts:
expectedPrevConflicts.append((i,))
# Configure the appGlobals.conn.cursor.execute mock to return different values
# after subsequent calls.
# Fetchall() config
self.mocked_appGlobals.conn.cursor().fetchall.side_effect = [
tuple(expectedPrevConflicts) # First call returns the Previous conflicts
]
# -- Act --
# Make a request to the desired API endpoint
resp = self.server.post("/conflicts/api/enterConflicts/",
json=dict(
monthNum=desiredMonthNum,
year=desiredYear,
# In this test, we will exclude several of the
# conflicts that we have in the expectedPrevConflicts
# so that these are removed from the DB.
conflicts=desiredNewConflicts[:15]
),
base_url=self.mocked_appGlobals.baseOpts["HOST_URL"])
# -- Assert --
# Assert that the when the appGlobals.conn.cursor().execute was last called,
# it was an Delete statement. Since this line is using triple-quote strings,
# the whitespace must match exactly.
self.mocked_appGlobals.conn.cursor().execute.assert_called_with("""
DELETE FROM conflicts
WHERE conflicts.day_id IN (
SELECT conflicts.day_id
FROM conflicts JOIN day ON (conflicts.day_id = day.id)
WHERE TO_CHAR(day.date, 'YYYY-MM-DD') IN %s
AND conflicts.ra_id = %s
);""", (tuple(set(desiredNewConflicts[15:])), self.user_ra_id))
# Assert that appGlobals.conn.commit was never called
self.mocked_appGlobals.conn.commit.assert_called_once()
# Assert that appGlobals.conn.cursor().close was NOT called
self.mocked_appGlobals.conn.cursor().close.assert_called_once()
# Assert that we received a json response
self.assertTrue(resp.is_json)
# Assert that we received our expected result
self.assertEqual(stdRet(1, "successful"), resp.json)
def test_ableToAddAndRemoveConflictsInSingleCall(self):
# Essentially, is the method able to add new conflicts and remove no longer needed
# conflicts at the same time.
# -- Arrange --
# Reset all of the mocked objects that will be used in this test
self.mocked_authLevel.reset_mock()
self.mocked_appGlobals.conn.reset_mock()
desiredMonthNum = 3
desiredYear = 2021
desiredNewConflicts = ["2021-01-{:02}".format(i) for i in range(5)]
expectedPrevConflicts = ["2021-01-{:02}".format(i) for i in range(10, 20)]
# Create a list of conflict dates to be sent to the server. It comprises of
# "new" dates as well as some of the dates that were already in the DB.
sentConflicts = desiredNewConflicts + expectedPrevConflicts[:5]
# Create the expected previous conflicts that should be returned
# from the DB.
for index, date in enumerate(expectedPrevConflicts):
expectedPrevConflicts[index] = tuple(date)
# Configure the appGlobals.conn.cursor.execute mock to return different values
# after subsequent calls.
# Fetchall() config
self.mocked_appGlobals.conn.cursor().fetchall.side_effect = [
tuple(expectedPrevConflicts) # First call returns the Previous conflicts
]
# Create the sets that the API will use to determine what needs to be added
# and removed.
prevSet = set([i[0] for i in expectedPrevConflicts])
newSet = set(sentConflicts)
deleteSet = prevSet.difference(newSet)
addSet = newSet.difference(prevSet)
# -- Act --
# Make a request to the desired API endpoint
resp = self.server.post("/conflicts/api/enterConflicts/",
json=dict(
monthNum=desiredMonthNum,
year=desiredYear,
conflicts=sentConflicts
),
base_url=self.mocked_appGlobals.baseOpts["HOST_URL"])
# -- Assert --
# Assert that the when the appGlobals.conn.cursor().execute was called,
# one of the calls was the following. Since this line is using triple-
# quote strings, the whitespace must match exactly.
self.mocked_appGlobals.conn.cursor().execute.assert_any_call("""
DELETE FROM conflicts
WHERE conflicts.day_id IN (
SELECT conflicts.day_id
FROM conflicts JOIN day ON (conflicts.day_id = day.id)
WHERE TO_CHAR(day.date, 'YYYY-MM-DD') IN %s
AND conflicts.ra_id = %s
);""", (tuple(deleteSet), self.user_ra_id)
)
# Assert that the when the appGlobals.conn.cursor().execute was called,
# one of the calls was the following. Since this line is using triple-
# quote strings, the whitespace must match exactly.
self.mocked_appGlobals.conn.cursor().execute.assert_any_call(
"""INSERT INTO conflicts (ra_id, day_id)
SELECT %s, day.id FROM day
WHERE TO_CHAR(day.date, 'YYYY-MM-DD') IN %s
""", (self.user_ra_id, tuple(addSet))
)
# Assert that the execute() method was called 3 times.
self.assertEqual(self.mocked_appGlobals.conn.cursor().execute.call_count, 3)
# Assert that appGlobals.conn.commit was never called
self.mocked_appGlobals.conn.commit.assert_called_once()
# Assert that appGlobals.conn.cursor().close was NOT called
self.mocked_appGlobals.conn.cursor().close.assert_called_once()
# Assert that we received a json response
self.assertTrue(resp.is_json)
# Assert that we received our expected result
self.assertEqual(stdRet(1, "successful"), resp.json)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
tfx/orchestration/kubeflow/test_utils.py
|
# Lint as: python2, python3
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common utility for testing Kubeflow-based orchestrator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import json
import os
import re
import shutil
import subprocess
import tarfile
import tempfile
import time
from typing import Any, Dict, List, Text
from absl import logging
import kfp
from kfp_server_api import rest
import tensorflow as tf
import tensorflow_model_analysis as tfma
from tfx.components import CsvExampleGen
from tfx.components import Evaluator
from tfx.components import ExampleValidator
from tfx.components import InfraValidator
from tfx.components import Pusher
from tfx.components import ResolverNode
from tfx.components import SchemaGen
from tfx.components import StatisticsGen
from tfx.components import Trainer
from tfx.components import Transform
from tfx.components.base import executor_spec
from tfx.components.base.base_component import BaseComponent
from tfx.dsl.experimental import latest_artifacts_resolver
from tfx.orchestration import pipeline as tfx_pipeline
from tfx.orchestration import test_utils
from tfx.orchestration.kubeflow import kubeflow_dag_runner
from tfx.orchestration.kubeflow.proto import kubeflow_pb2
from tfx.proto import infra_validator_pb2
from tfx.proto import pusher_pb2
from tfx.proto import trainer_pb2
from tfx.types import Channel
from tfx.types import channel_utils
from tfx.types import component_spec
from tfx.types import standard_artifacts
from tfx.types.standard_artifacts import Model
from tfx.utils import kube_utils
# TODO(jiyongjung): Merge with kube_utils.PodStatus
# Various execution status of a KFP pipeline.
KFP_RUNNING_STATUS = 'running'
KFP_SUCCESS_STATUS = 'succeeded'
KFP_FAIL_STATUS = 'failed'
KFP_SKIPPED_STATUS = 'skipped'
KFP_ERROR_STATUS = 'error'
KFP_FINAL_STATUS = frozenset(
(KFP_SUCCESS_STATUS, KFP_FAIL_STATUS, KFP_SKIPPED_STATUS, KFP_ERROR_STATUS))
def poll_kfp_with_retry(host: Text, run_id: Text, retry_limit: int,
timeout: datetime.timedelta,
polling_interval: int) -> Text:
"""Gets the pipeline execution status by polling KFP at the specified host.
Args:
host: address of the KFP deployment.
run_id: id of the execution of the pipeline.
retry_limit: number of retries that will be performed before raise an error.
timeout: timeout of this long-running operation, in timedelta.
polling_interval: interval between two consecutive polls, in seconds.
Returns:
The final status of the execution. Possible value can be found at
https://github.com/kubeflow/pipelines/blob/master/backend/api/run.proto#L254
Raises:
RuntimeError: if polling failed for retry_limit times consecutively.
"""
start_time = datetime.datetime.now()
retry_count = 0
while True:
# TODO(jxzheng): workaround for 1hr timeout limit in kfp.Client().
# This should be changed after
# https://github.com/kubeflow/pipelines/issues/3630 is fixed.
# Currently gcloud authentication token has a 1-hour expiration by default
# but kfp.Client() does not have a refreshing mechanism in place. This
# causes failure when attempting to get running status for a long pipeline
# execution (> 1 hour).
# Instead of implementing a whole authentication refreshing mechanism
# here, we chose re-creating kfp.Client() frequently to make sure the
# authentication does not expire. This is based on the fact that
# kfp.Client() is very light-weight.
# See more details at
# https://github.com/kubeflow/pipelines/issues/3630
client = kfp.Client(host=host)
# TODO(b/156784019): workaround the known issue at b/156784019 and
# https://github.com/kubeflow/pipelines/issues/3669
# by wait-and-retry when ApiException is hit.
try:
get_run_response = client.get_run(run_id=run_id)
except rest.ApiException as api_err:
# If get_run failed with ApiException, wait _POLLING_INTERVAL and retry.
if retry_count < retry_limit:
retry_count += 1
logging.info('API error %s was hit. Retrying: %s / %s.', api_err,
retry_count, retry_limit)
time.sleep(polling_interval)
continue
raise RuntimeError('Still hit remote error after %s retries: %s' %
(retry_limit, api_err))
else:
# If get_run succeeded, reset retry_count.
retry_count = 0
if (get_run_response and get_run_response.run and
get_run_response.run.status and
get_run_response.run.status.lower() in KFP_FINAL_STATUS):
# Return because final status is reached.
return get_run_response.run.status
if datetime.datetime.now() - start_time > timeout:
# Timeout.
raise RuntimeError('Waiting for run timeout at %s' %
datetime.datetime.now().strftime('%H:%M:%S'))
logging.info('Waiting for the job to complete...')
time.sleep(polling_interval)
def print_failure_log_for_run(host: Text, run_id: Text, namespace: Text):
"""Prints logs of failed components of a run.
Prints execution logs for failed componentsusing `logging.info`.
This resembles the behavior of `argo logs` but uses K8s API directly.
Don't print anything if the run was successful.
Args:
host: address of the KFP deployment.
run_id: id of the execution of the pipeline.
namespace: namespace of K8s cluster.
"""
client = kfp.Client(host=host)
run = client.get_run(run_id=run_id)
workflow_manifest = json.loads(run.pipeline_runtime.workflow_manifest)
if kube_utils.PodPhase(
workflow_manifest['status']['phase']) != kube_utils.PodPhase.FAILED:
return
k8s_client = kube_utils.make_core_v1_api()
pods = [i for i in workflow_manifest['status']['nodes'] if i['type'] == 'Pod']
for pod in pods:
if kube_utils.PodPhase(pod['phase']) != kube_utils.PodPhase.FAILED:
continue
display_name = pod['displayName']
pod_id = pod['id']
log = k8s_client.read_namespaced_pod_log(
pod_id, namespace=namespace, container='main')
for line in log.splitlines():
logging.info('%s:%s', display_name, line)
# Custom component definitions for testing purpose.
class _HelloWorldSpec(component_spec.ComponentSpec):
INPUTS = {}
OUTPUTS = {
'greeting':
component_spec.ChannelParameter(type=standard_artifacts.String)
}
PARAMETERS = {
'word': component_spec.ExecutionParameter(type=str),
}
class _ByeWorldSpec(component_spec.ComponentSpec):
INPUTS = {
'hearing': component_spec.ChannelParameter(type=standard_artifacts.String)
}
OUTPUTS = {}
PARAMETERS = {}
class HelloWorldComponent(BaseComponent):
"""Producer component."""
SPEC_CLASS = _HelloWorldSpec
EXECUTOR_SPEC = executor_spec.ExecutorContainerSpec(
# TODO(b/143965964): move the image to private repo if the test is flaky
# due to docker hub.
image='google/cloud-sdk:latest',
command=['sh', '-c'],
args=[
'echo "hello {{exec_properties.word}}" | gsutil cp - {{output_dict["greeting"][0].uri}}'
])
def __init__(self, word, greeting=None):
if not greeting:
artifact = standard_artifacts.String()
greeting = channel_utils.as_channel([artifact])
super(HelloWorldComponent,
self).__init__(_HelloWorldSpec(word=word, greeting=greeting))
class ByeWorldComponent(BaseComponent):
"""Consumer component."""
SPEC_CLASS = _ByeWorldSpec
EXECUTOR_SPEC = executor_spec.ExecutorContainerSpec(
image='bash:latest',
command=['echo'],
args=['received {{input_dict["hearing"][0].value}}'])
def __init__(self, hearing):
super(ByeWorldComponent, self).__init__(_ByeWorldSpec(hearing=hearing))
def create_primitive_type_components(
pipeline_name: Text) -> List[BaseComponent]:
"""Creates components for testing primitive type artifact passing.
Args:
pipeline_name: Name of this pipeline.
Returns:
A list of TFX custom container components.
"""
hello_world = HelloWorldComponent(word=pipeline_name)
bye_world = ByeWorldComponent(hearing=hello_world.outputs['greeting'])
return [hello_world, bye_world]
def create_e2e_components(
pipeline_root: Text,
csv_input_location: Text,
transform_module: Text,
trainer_module: Text,
) -> List[BaseComponent]:
"""Creates components for a simple Chicago Taxi TFX pipeline for testing.
Args:
pipeline_root: The root of the pipeline output.
csv_input_location: The location of the input data directory.
transform_module: The location of the transform module file.
trainer_module: The location of the trainer module file.
Returns:
A list of TFX components that constitutes an end-to-end test pipeline.
"""
example_gen = CsvExampleGen(input_base=csv_input_location)
statistics_gen = StatisticsGen(examples=example_gen.outputs['examples'])
schema_gen = SchemaGen(
statistics=statistics_gen.outputs['statistics'],
infer_feature_shape=False)
example_validator = ExampleValidator(
statistics=statistics_gen.outputs['statistics'],
schema=schema_gen.outputs['schema'])
transform = Transform(
examples=example_gen.outputs['examples'],
schema=schema_gen.outputs['schema'],
module_file=transform_module)
latest_model_resolver = ResolverNode(
instance_name='latest_model_resolver',
resolver_class=latest_artifacts_resolver.LatestArtifactsResolver,
latest_model=Channel(type=Model))
trainer = Trainer(
transformed_examples=transform.outputs['transformed_examples'],
schema=schema_gen.outputs['schema'],
base_model=latest_model_resolver.outputs['latest_model'],
transform_graph=transform.outputs['transform_graph'],
train_args=trainer_pb2.TrainArgs(num_steps=10),
eval_args=trainer_pb2.EvalArgs(num_steps=5),
module_file=trainer_module,
)
# Set the TFMA config for Model Evaluation and Validation.
eval_config = tfma.EvalConfig(
model_specs=[tfma.ModelSpec(signature_name='eval')],
metrics_specs=[
tfma.MetricsSpec(
metrics=[tfma.MetricConfig(class_name='ExampleCount')],
thresholds={
'accuracy':
tfma.MetricThreshold(
value_threshold=tfma.GenericValueThreshold(
lower_bound={'value': 0.5}),
change_threshold=tfma.GenericChangeThreshold(
direction=tfma.MetricDirection.HIGHER_IS_BETTER,
absolute={'value': -1e-10}))
})
],
slicing_specs=[
tfma.SlicingSpec(),
tfma.SlicingSpec(feature_keys=['trip_start_hour'])
])
evaluator = Evaluator(
examples=example_gen.outputs['examples'],
model=trainer.outputs['model'],
eval_config=eval_config)
infra_validator = InfraValidator(
model=trainer.outputs['model'],
examples=example_gen.outputs['examples'],
serving_spec=infra_validator_pb2.ServingSpec(
tensorflow_serving=infra_validator_pb2.TensorFlowServing(
tags=['latest']),
kubernetes=infra_validator_pb2.KubernetesConfig()),
request_spec=infra_validator_pb2.RequestSpec(
tensorflow_serving=infra_validator_pb2.TensorFlowServingRequestSpec())
)
pusher = Pusher(
model=trainer.outputs['model'],
model_blessing=evaluator.outputs['blessing'],
push_destination=pusher_pb2.PushDestination(
filesystem=pusher_pb2.PushDestination.Filesystem(
base_directory=os.path.join(pipeline_root, 'model_serving'))))
return [
example_gen,
statistics_gen,
schema_gen,
example_validator,
transform,
latest_model_resolver,
trainer,
evaluator,
infra_validator,
pusher,
]
def delete_ai_platform_model(model_name):
"""Delete pushed model with the given name in AI Platform."""
# In order to delete model, all versions in the model must be deleted first.
versions_command = ('gcloud', 'ai-platform', 'versions', 'list',
'--model={}'.format(model_name))
# The return code of the following subprocess call will be explicitly checked
# using the logic below, so we don't need to call check_output().
versions = subprocess.run(versions_command, stdout=subprocess.PIPE) # pylint: disable=subprocess-run-check
if versions.returncode == 0:
logging.info('Model %s has versions %s', model_name, versions.stdout)
# The first stdout line is headers, ignore. The columns are
# [NAME] [DEPLOYMENT_URI] [STATE]
#
# By specification of test case, the last version in the output list is the
# default version, which will be deleted last in the for loop, so there's no
# special handling needed hear.
# The operation setting default version is at
# https://github.com/tensorflow/tfx/blob/65633c772f6446189e8be7c6332d32ea221ff836/tfx/extensions/google_cloud_ai_platform/runner.py#L309
for version in versions.stdout.decode('utf-8').strip('\n').split('\n')[1:]:
version = version.split()[0]
logging.info('Deleting version %s of model %s', version, model_name)
version_delete_command = ('gcloud', '--quiet', 'ai-platform', 'versions',
'delete', version,
'--model={}'.format(model_name))
subprocess.run(version_delete_command, check=True)
logging.info('Deleting model %s', model_name)
subprocess.run(
('gcloud', '--quiet', 'ai-platform', 'models', 'delete', model_name),
check=True)
class BaseKubeflowTest(tf.test.TestCase):
"""Base class that defines testing harness for pipeline on KubeflowRunner."""
_POLLING_INTERVAL_IN_SECONDS = 10
# The following environment variables need to be set prior to calling the test
# in this file. All variables are required and do not have a default.
# The base container image name to use when building the image used in tests.
_BASE_CONTAINER_IMAGE = os.environ['KFP_E2E_BASE_CONTAINER_IMAGE']
# The src path to use to build docker image
_REPO_BASE = os.environ['KFP_E2E_SRC']
# The project id to use to run tests.
_GCP_PROJECT_ID = os.environ['KFP_E2E_GCP_PROJECT_ID']
# The GCP region in which the end-to-end test is run.
_GCP_REGION = os.environ['KFP_E2E_GCP_REGION']
# The GCP bucket to use to write output artifacts.
_BUCKET_NAME = os.environ['KFP_E2E_BUCKET_NAME']
# The location of test data. The input files are copied to a test-local
# location for each invocation, and cleaned up at the end of test.
_TEST_DATA_ROOT = os.environ['KFP_E2E_TEST_DATA_ROOT']
# The location of test user module
# It is retrieved from inside the container subject to testing.
_MODULE_ROOT = '/tfx-src/tfx/components/testdata/module_file'
_CONTAINER_IMAGE = '{}:{}'.format(_BASE_CONTAINER_IMAGE,
test_utils.random_id())
@classmethod
def setUpClass(cls):
super(BaseKubeflowTest, cls).setUpClass()
# Create a container image for use by test pipelines.
test_utils.build_and_push_docker_image(cls._CONTAINER_IMAGE, cls._REPO_BASE)
@classmethod
def tearDownClass(cls):
super(BaseKubeflowTest, cls).tearDownClass()
# Delete container image used in tests.
logging.info('Deleting image %s', cls._CONTAINER_IMAGE)
subprocess.run(
['gcloud', 'container', 'images', 'delete', cls._CONTAINER_IMAGE],
check=True)
@classmethod
def _get_mysql_pod_name(cls):
"""Returns MySQL pod name in the cluster."""
pod_name = subprocess.check_output([
'kubectl',
'-n',
'kubeflow',
'get',
'pods',
'-l',
'app=mysql',
'--no-headers',
'-o',
'custom-columns=:metadata.name',
]).decode('utf-8').strip('\n')
logging.info('MySQL pod name is: %s', pod_name)
return pod_name
@classmethod
def _get_mlmd_db_name(cls, pipeline_name: Text):
# MySQL DB names must not contain '-' while k8s names must not contain '_'.
# So we replace the dashes here for the DB name.
valid_mysql_name = pipeline_name.replace('-', '_')
# MySQL database name cannot exceed 64 characters.
return 'mlmd_{}'.format(valid_mysql_name[-59:])
def setUp(self):
super(BaseKubeflowTest, self).setUp()
self._old_cwd = os.getcwd()
self._test_dir = tempfile.mkdtemp()
os.chdir(self._test_dir)
self._test_output_dir = 'gs://{}/test_output'.format(self._BUCKET_NAME)
test_id = test_utils.random_id()
self._testdata_root = 'gs://{}/test_data/{}'.format(self._BUCKET_NAME,
test_id)
subprocess.run(
['gsutil', 'cp', '-r', self._TEST_DATA_ROOT, self._testdata_root],
check=True,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
self._data_root = os.path.join(self._testdata_root, 'external', 'csv')
self._transform_module = os.path.join(self._MODULE_ROOT,
'transform_module.py')
self._trainer_module = os.path.join(self._MODULE_ROOT, 'trainer_module.py')
self.addCleanup(self._delete_test_dir, test_id)
def tearDown(self):
super(BaseKubeflowTest, self).tearDown()
os.chdir(self._old_cwd)
shutil.rmtree(self._test_dir)
def _delete_test_dir(self, test_id: Text):
"""Deletes files for this test including the module file and data files.
Args:
test_id: Randomly generated id of the test.
"""
test_utils.delete_gcs_files(self._GCP_PROJECT_ID, self._BUCKET_NAME,
'test_data/{}'.format(test_id))
def _delete_workflow(self, workflow_name: Text):
"""Deletes the specified Argo workflow."""
logging.info('Deleting workflow %s', workflow_name)
subprocess.run(['argo', '--namespace', 'kubeflow', 'delete', workflow_name],
check=True)
def _run_workflow(self,
workflow_file: Text,
workflow_name: Text,
parameter: Dict[Text, Text] = None):
"""Runs the specified workflow with Argo.
Blocks until the workflow has run (successfully or not) to completion.
Args:
workflow_file: YAML file with Argo workflow spec for the pipeline.
workflow_name: Name to use for the workflow.
parameter: mapping from pipeline parameter name to its runtime value.
"""
# TODO(ajaygopinathan): Consider using KFP cli instead.
def _format_parameter(parameter: Dict[Text, Any]) -> List[Text]:
"""Format the pipeline parameter section of argo workflow."""
if parameter:
result = []
for k, v in parameter.items():
result.append('-p')
result.append('{}={}'.format(k, v))
return result
else:
return []
run_command = [
'argo',
'submit',
'--name',
workflow_name,
'--namespace',
'kubeflow',
'--serviceaccount',
'pipeline-runner',
workflow_file,
]
run_command += _format_parameter(parameter)
logging.info('Launching workflow %s with parameter %s', workflow_name,
_format_parameter(parameter))
with test_utils.Timer('RunningPipelineToCompletion'):
subprocess.run(run_command, check=True)
# Wait in the loop while pipeline is running.
status = 'Running'
while status == 'Running':
time.sleep(self._POLLING_INTERVAL_IN_SECONDS)
status = self._get_argo_pipeline_status(workflow_name)
def _delete_pipeline_output(self, pipeline_name: Text):
"""Deletes output produced by the named pipeline.
Args:
pipeline_name: The name of the pipeline.
"""
test_utils.delete_gcs_files(self._GCP_PROJECT_ID, self._BUCKET_NAME,
'test_output/{}'.format(pipeline_name))
def _delete_pipeline_metadata(self, pipeline_name: Text):
"""Drops the database containing metadata produced by the pipeline.
Args:
pipeline_name: The name of the pipeline owning the database.
"""
pod_name = self._get_mysql_pod_name()
db_name = self._get_mlmd_db_name(pipeline_name)
command = [
'kubectl',
'-n',
'kubeflow',
'exec',
'-it',
pod_name,
'--',
'mysql',
'--user',
'root',
'--execute',
'drop database if exists {};'.format(db_name),
]
logging.info('Dropping MLMD DB with name: %s', db_name)
with test_utils.Timer('DeletingMLMDDatabase'):
subprocess.run(command, check=True)
def _pipeline_root(self, pipeline_name: Text):
return os.path.join(self._test_output_dir, pipeline_name)
def _create_pipeline(self, pipeline_name: Text,
components: List[BaseComponent]):
"""Creates a pipeline given name and list of components."""
return tfx_pipeline.Pipeline(
pipeline_name=pipeline_name,
pipeline_root=self._pipeline_root(pipeline_name),
components=components,
enable_cache=True,
)
def _create_dataflow_pipeline(self,
pipeline_name: Text,
components: List[BaseComponent],
wait_until_finish_ms: int = 1000 * 60 * 20):
"""Creates a pipeline with Beam DataflowRunner."""
pipeline = self._create_pipeline(pipeline_name, components)
pipeline.beam_pipeline_args = [
'--runner=TestDataflowRunner',
'--wait_until_finish_duration=%d' % wait_until_finish_ms,
'--project=' + self._GCP_PROJECT_ID,
'--temp_location=' +
os.path.join(self._pipeline_root(pipeline_name), 'tmp'),
'--region=' + self._GCP_REGION,
]
return pipeline
def _get_kubeflow_metadata_config(
self) -> kubeflow_pb2.KubeflowMetadataConfig:
config = kubeflow_dag_runner.get_default_kubeflow_metadata_config()
return config
def _get_argo_pipeline_status(self, workflow_name: Text) -> Text:
"""Get Pipeline status.
Args:
workflow_name: The name of the workflow.
Returns:
Simple status string which is returned from `argo get` command.
"""
get_workflow_command = [
'argo', '--namespace', 'kubeflow', 'get', workflow_name
]
output = subprocess.check_output(get_workflow_command).decode('utf-8')
logging.info('Argo output ----\n%s', output)
match = re.search(r'^Status:\s+(.+)$', output, flags=re.MULTILINE)
self.assertIsNotNone(match)
return match.group(1)
def _compile_and_run_pipeline(self,
pipeline: tfx_pipeline.Pipeline,
workflow_name: Text = None,
parameters: Dict[Text, Any] = None):
"""Compiles and runs a KFP pipeline.
Args:
pipeline: The logical pipeline to run.
workflow_name: The argo workflow name, default to pipeline name.
parameters: Value of runtime paramters of the pipeline.
"""
pipeline_name = pipeline.pipeline_info.pipeline_name
config = kubeflow_dag_runner.KubeflowDagRunnerConfig(
kubeflow_metadata_config=self._get_kubeflow_metadata_config(),
tfx_image=self._CONTAINER_IMAGE)
kubeflow_dag_runner.KubeflowDagRunner(config=config).run(pipeline)
file_path = os.path.join(self._test_dir, '{}.tar.gz'.format(pipeline_name))
self.assertTrue(tf.io.gfile.exists(file_path))
tarfile.TarFile.open(file_path).extract('pipeline.yaml')
pipeline_file = os.path.join(self._test_dir, 'pipeline.yaml')
self.assertIsNotNone(pipeline_file)
workflow_name = workflow_name or pipeline_name
# Ensure cleanup regardless of whether pipeline succeeds or fails.
self.addCleanup(self._delete_workflow, workflow_name)
self.addCleanup(self._delete_pipeline_metadata, pipeline_name)
self.addCleanup(self._delete_pipeline_output, pipeline_name)
# Run the pipeline to completion.
self._run_workflow(pipeline_file, workflow_name, parameters)
# Obtain workflow logs.
get_logs_command = [
'argo', '--namespace', 'kubeflow', 'logs', '-w', workflow_name
]
logs_output = subprocess.check_output(get_logs_command).decode('utf-8')
# Check if pipeline completed successfully.
status = self._get_argo_pipeline_status(workflow_name)
self.assertEqual(
'Succeeded', status, 'Pipeline {} failed to complete successfully: {}'
'\nFailed workflow logs:\n{}'.format(pipeline_name, status,
logs_output))
|
[] |
[] |
[
"KFP_E2E_BASE_CONTAINER_IMAGE",
"KFP_E2E_GCP_REGION",
"KFP_E2E_TEST_DATA_ROOT",
"KFP_E2E_BUCKET_NAME",
"KFP_E2E_SRC",
"KFP_E2E_GCP_PROJECT_ID"
] |
[]
|
["KFP_E2E_BASE_CONTAINER_IMAGE", "KFP_E2E_GCP_REGION", "KFP_E2E_TEST_DATA_ROOT", "KFP_E2E_BUCKET_NAME", "KFP_E2E_SRC", "KFP_E2E_GCP_PROJECT_ID"]
|
python
| 6 | 0 | |
cmd/krel/cmd/anago/push.go
|
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package anago
import (
"os"
"path/filepath"
"github.com/pkg/errors"
"github.com/spf13/cobra"
"k8s.io/release/pkg/build"
"k8s.io/release/pkg/release"
)
// pushCmd represents the subcommand for `krel anago push`
var pushCmd = &cobra.Command{
Use: "push",
Short: "Push release artifacts into the Google Cloud",
Long: `krel anago push
This subcommand can be used to push the release artifacts to the Google Cloud.
It's only indented to be used from anago, which means the command might be
removed in future releases again when anago goes end of life.
`,
SilenceUsage: true,
SilenceErrors: true,
RunE: func(cmd *cobra.Command, args []string) error {
return errors.Wrap(runPush(pushOpts), "run krel anago push")
},
}
var (
pushOpts = &build.Options{}
runStage bool
runRelease bool
buildVersion string
)
func init() {
pushCmd.PersistentFlags().BoolVar(
&runStage,
"stage",
false,
"run in stage mode",
)
pushCmd.PersistentFlags().BoolVar(
&runRelease,
"release",
false,
"run in release mode",
)
pushCmd.PersistentFlags().StringVar(
&pushOpts.Version,
"version",
"",
"version to be used",
)
pushCmd.PersistentFlags().StringVar(
&pushOpts.BuildDir,
"build-dir",
"",
"build artifact directory of the release",
)
pushCmd.PersistentFlags().StringVar(
&pushOpts.Bucket,
"bucket",
"",
"GCS bucket to be used",
)
pushCmd.PersistentFlags().StringVar(
&pushOpts.Registry,
"container-registry",
"",
"Container image registry to be used",
)
pushCmd.PersistentFlags().StringVar(
&buildVersion,
"build-version",
"",
"Build version from Jenkins (only used when --release specified)",
)
pushOpts.AllowDup = true
pushOpts.ValidateRemoteImageDigests = true
AnagoCmd.AddCommand(pushCmd)
}
func runPush(opts *build.Options) error {
buildInstance := build.NewInstance(opts)
if err := buildInstance.CheckReleaseBucket(); err != nil {
return errors.Wrap(err, "check release bucket access")
}
if runStage {
return runPushStage(buildInstance, opts)
} else if runRelease {
return runPushRelease(buildInstance, opts)
}
return errors.New("neither --stage nor --release provided")
}
func runPushStage(
buildInstance *build.Instance,
opts *build.Options,
) error {
workDir := os.Getenv("GOPATH")
if workDir == "" {
return errors.New("GOPATH is not set")
}
// Stage the local source tree
if err := buildInstance.StageLocalSourceTree(workDir, buildVersion); err != nil {
return errors.Wrap(err, "staging local source tree")
}
// Stage local artifacts and write checksums
if err := buildInstance.StageLocalArtifacts(); err != nil {
return errors.Wrap(err, "staging local artifacts")
}
gcsPath := filepath.Join(opts.Bucket, "stage", buildVersion, opts.Version)
// Push gcs-stage to GCS
if err := buildInstance.PushReleaseArtifacts(
filepath.Join(opts.BuildDir, release.GCSStagePath, opts.Version),
filepath.Join(gcsPath, release.GCSStagePath, opts.Version),
); err != nil {
return errors.Wrap(err, "pushing release artifacts")
}
// Push container release-images to GCS
if err := buildInstance.PushReleaseArtifacts(
filepath.Join(opts.BuildDir, release.ImagesPath),
filepath.Join(gcsPath, release.ImagesPath),
); err != nil {
return errors.Wrap(err, "pushing release artifacts")
}
// Push container images into registry
if err := buildInstance.PushContainerImages(); err != nil {
return errors.Wrap(err, "pushing container images")
}
return nil
}
func runPushRelease(
buildInstance *build.Instance,
opts *build.Options,
) error {
if err := buildInstance.CopyStagedFromGCS(opts.Bucket, buildVersion); err != nil {
return errors.Wrap(err, "copy staged from GCS")
}
// In an official nomock release, we want to ensure that container images
// have been promoted from staging to production, so we do the image
// manifest validation against production instead of staging.
targetRegistry := opts.Registry
if targetRegistry == release.GCRIOPathStaging {
targetRegistry = release.GCRIOPathProd
}
// Image promotion has been done on nomock stage, verify that the images
// are available.
if err := release.NewImages().Validate(
targetRegistry, opts.Version, opts.BuildDir,
); err != nil {
return errors.Wrap(err, "validate container images")
}
if err := release.NewPublisher().PublishVersion(
"release", opts.Version, opts.BuildDir, opts.Bucket, "release", nil, false, false,
); err != nil {
return errors.Wrap(err, "publish release")
}
return nil
}
|
[
"\"GOPATH\""
] |
[] |
[
"GOPATH"
] |
[]
|
["GOPATH"]
|
go
| 1 | 0 | |
mnist/mnist_with_summaries_bf16.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A simple MNIST classifier which displays summaries in TensorBoard.
This is an unimpressive MNIST model, but it is a good example of using
tf.name_scope to make a graph legible in the TensorBoard graph explorer, and of
naming summary tags so that they are grouped meaningfully in TensorBoard.
It demonstrates the functionality of every TensorBoard dashboard.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import sys
import tensorflow as tf
#from tensorflow.python.lib.io import file_io
from tensorflow.examples.tutorials.mnist import input_data
from pgrad import *
FLAGS = None
def train():
# Import data
mnist = input_data.read_data_sets(FLAGS.data_dir,
fake_data=FLAGS.fake_data)
#sess = tf.InteractiveSession()
sess = tf.InteractiveSession()
# Create a multilayer model.
# Input placeholders
with tf.name_scope('input'):
x = tf.placeholder(tf.float32, [None, 784], name='x-input')
y_ = tf.placeholder(tf.int64, [None], name='y-input')
with tf.name_scope('input_reshape'):
image_shaped_input = tf.reshape(x, [-1, 28, 28, 1])
tf.summary.image('input', image_shaped_input, 10)
# We can't initialize these variables to 0 - the network will get stuck.
def weight_variable(shape):
"""Create a weight variable with appropriate initialization."""
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
"""Create a bias variable with appropriate initialization."""
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def variable_summaries(var):
"""Attach a lot of summaries to a Tensor (for TensorBoard visualization)."""
with tf.name_scope('summaries'):
mean = tf.reduce_mean(input_tensor=var)
tf.summary.scalar('mean', mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(input_tensor=tf.square(var - mean)))
tf.summary.scalar('stddev', stddev)
tf.summary.scalar('max', tf.reduce_max(input_tensor=var))
tf.summary.scalar('min', tf.reduce_min(input_tensor=var))
tf.summary.histogram('histogram', var)
def nn_layer(input_tensor, input_dim, output_dim, layer_name, act=tf.nn.relu):
"""Reusable code for making a simple neural net layer.
It does a matrix multiply, bias add, and then uses ReLU to nonlinearize.
It also sets up name scoping so that the resultant graph is easy to read,
and adds a number of summary ops.
"""
# Adding a name scope ensures logical grouping of the layers in the graph.
with tf.name_scope(layer_name):
# This Variable will hold the state of the weights for the layer
with tf.name_scope('weights'):
weights = weight_variable([input_dim, output_dim])
variable_summaries(weights)
with tf.name_scope('biases'):
biases = bias_variable([output_dim])
variable_summaries(biases)
with tf.name_scope('Wx_plus_b'):
# SSY /usr/local/lib/python3.5/dist-packages/tensorflow/python/ops/math_ops.py
input_tensor = id_bf16cut_fp(input_tensor)
weights = id_bf16cut_fp(weights)
preactivate = tf.matmul(input_tensor, weights) + biases
preactivate=id_bf16cut_bp(preactivate)
tf.summary.histogram('pre_activations', preactivate)
activations = act(preactivate, name='activation')
tf.summary.histogram('activations', activations)
return activations
hidden1 = nn_layer(x, 784, 500, 'layer1')
with tf.name_scope('dropout'):
keep_prob = tf.placeholder(tf.float32)
tf.summary.scalar('dropout_keep_probability', keep_prob)
#dropped = tf.nn.dropout(hidden1, rate=(1 - keep_prob))
dropped = tf.nn.dropout(hidden1, keep_prob=keep_prob)
# Do not apply softmax activation yet, see below.
y = nn_layer(dropped, 500, 10, 'layer2', act=tf.identity)
with tf.name_scope('cross_entropy'):
# The raw formulation of cross-entropy,
#
# tf.reduce_mean(-tf.reduce_sum(y_ * tf.math.log(tf.softmax(y)),
# reduction_indices=[1]))
#
# can be numerically unstable.
#
# So here we use tf.losses.sparse_softmax_cross_entropy on the
# raw logit outputs of the nn_layer above, and then average across
# the batch.
with tf.name_scope('total'):
cross_entropy = tf.losses.sparse_softmax_cross_entropy(
labels=y_, logits=y)
tf.summary.scalar('cross_entropy', cross_entropy)
with tf.name_scope('train'):
train_step = tf.train.AdamOptimizer(FLAGS.learning_rate).minimize(
cross_entropy)
with tf.name_scope('accuracy'):
with tf.name_scope('correct_prediction'):
correct_prediction = tf.equal(tf.argmax(input=y, axis=1), y_)
with tf.name_scope('accuracy'):
accuracy = tf.reduce_mean(input_tensor=tf.cast(correct_prediction,
tf.float32))
tf.summary.scalar('accuracy', accuracy)
# Merge all the summaries and write them out to
# /tmp/tensorflow/mnist/logs/mnist_with_summaries (by default)
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(FLAGS.log_dir + '/train',
sess.graph)
test_writer = tf.summary.FileWriter(FLAGS.log_dir + '/test')
tf.global_variables_initializer().run()
# Train the model, and also write summaries.
# Every 10th step, measure test-set accuracy, and write test summaries
# All other steps, run train_step on training data, & add training summaries
def feed_dict(train):
"""Make a TensorFlow feed_dict: maps data onto Tensor placeholders."""
if train or FLAGS.fake_data:
xs, ys = mnist.train.next_batch(100, fake_data=FLAGS.fake_data)
k = FLAGS.dropout
else:
xs, ys = mnist.test.images, mnist.test.labels
k = 1.0
return {x: xs, y_: ys, keep_prob: k}
for i in range(FLAGS.max_steps):
if i % 10 == 0: # Record summaries and test-set accuracy
summary, acc = sess.run([merged, accuracy], feed_dict=feed_dict(False))
test_writer.add_summary(summary, i)
print('Accuracy at step %s: %s' % (i, acc))
else: # Record train set summaries, and train
if i % 100 == 99: # Record execution stats
run_options = tf.RunOptions(
trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
summary, _ = sess.run([merged, train_step],
feed_dict=feed_dict(True),
options=run_options,
run_metadata=run_metadata)
train_writer.add_run_metadata(run_metadata, 'step%03d' % i)
train_writer.add_summary(summary, i)
print('Adding run metadata for', i)
else: # Record a summary
summary, _ = sess.run([merged, train_step], feed_dict=feed_dict(True))
train_writer.add_summary(summary, i)
train_writer.close()
test_writer.close()
def main(_):
if tf.gfile.Exists(FLAGS.log_dir):
tf.gfile.DeleteRecursively(FLAGS.log_dir)
tf.gfile.MakeDirs(FLAGS.log_dir)
with tf.Graph().as_default():
train()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--fake_data', nargs='?', const=True, type=bool,
default=False,
help='If true, uses fake data for unit testing.')
parser.add_argument('--max_steps', type=int, default=1000,
help='Number of steps to run trainer.')
parser.add_argument('--learning_rate', type=float, default=0.001,
help='Initial learning rate')
parser.add_argument('--dropout', type=float, default=0.9,
help='Keep probability for training dropout.')
parser.add_argument(
'--data_dir',
type=str,
default=os.path.join(os.getenv('TEST_TMPDIR', '/tmp'),
'tensorflow/mnist/input_data'),
help='Directory for storing input data')
parser.add_argument(
'--log_dir',
type=str,
default=os.path.join(os.getenv('TEST_TMPDIR', '/tmp'),
'tensorflow/mnist/logs/mnist_with_summaries'),
help='Summaries log directory')
FLAGS, unparsed = parser.parse_known_args()
# I am using tf 1.18 so I dont need compat v1
#tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
[] |
[] |
[
"TEST_TMPDIR"
] |
[]
|
["TEST_TMPDIR"]
|
python
| 1 | 0 | |
tool/demo.py
|
import os
import logging
import argparse
import cv2
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn.functional as F
import torch.nn.parallel
import torch.utils.data
from util import config
from util.util import colorize
cv2.ocl.setUseOpenCL(False)
def get_parser():
parser = argparse.ArgumentParser(description='PyTorch Semantic Segmentation')
parser.add_argument('--config', type=str, default='config/ade20k/ade20k_pspnet50.yaml', help='config file')
parser.add_argument('--image', type=str, default='figure/demo/ADE_val_00001515.jpg', help='input image')
parser.add_argument('opts', help='see config/ade20k/ade20k_pspnet50.yaml for all options', default=None, nargs=argparse.REMAINDER)
args = parser.parse_args()
assert args.config is not None
cfg = config.load_cfg_from_cfg_file(args.config)
cfg.image = args.image
if args.opts is not None:
cfg = config.merge_cfg_from_list(cfg, args.opts)
return cfg
def get_logger():
logger_name = "main-logger"
logger = logging.getLogger(logger_name)
logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
fmt = "[%(asctime)s %(levelname)s %(filename)s line %(lineno)d %(process)d] %(message)s"
handler.setFormatter(logging.Formatter(fmt))
logger.addHandler(handler)
return logger
def check(args):
assert args.classes > 1
assert args.zoom_factor in [1, 2, 4, 8]
assert args.split in ['train', 'val', 'test']
if args.arch == 'psp':
assert (args.train_h - 1) % 8 == 0 and (args.train_w - 1) % 8 == 0
elif args.arch == 'psa':
if args.compact:
args.mask_h = (args.train_h - 1) // (8 * args.shrink_factor) + 1
args.mask_w = (args.train_w - 1) // (8 * args.shrink_factor) + 1
else:
assert (args.mask_h is None and args.mask_w is None) or (args.mask_h is not None and args.mask_w is not None)
if args.mask_h is None and args.mask_w is None:
args.mask_h = 2 * ((args.train_h - 1) // (8 * args.shrink_factor) + 1) - 1
args.mask_w = 2 * ((args.train_w - 1) // (8 * args.shrink_factor) + 1) - 1
else:
assert (args.mask_h % 2 == 1) and (args.mask_h >= 3) and (
args.mask_h <= 2 * ((args.train_h - 1) // (8 * args.shrink_factor) + 1) - 1)
assert (args.mask_w % 2 == 1) and (args.mask_w >= 3) and (
args.mask_w <= 2 * ((args.train_h - 1) // (8 * args.shrink_factor) + 1) - 1)
else:
raise Exception('architecture not supported yet'.format(args.arch))
def main():
global args, logger
args = get_parser()
check(args)
logger = get_logger()
os.environ["CUDA_VISIBLE_DEVICES"] = ','.join(str(x) for x in args.test_gpu)
logger.info(args)
logger.info("=> creating model ...")
logger.info("Classes: {}".format(args.classes))
value_scale = 255
mean = [0.485, 0.456, 0.406]
mean = [item * value_scale for item in mean]
std = [0.229, 0.224, 0.225]
std = [item * value_scale for item in std]
colors = np.loadtxt(args.colors_path).astype('uint8')
if args.arch == 'psp':
from model.pspnet import PSPNet
model = PSPNet(layers=args.layers, classes=args.classes, zoom_factor=args.zoom_factor, pretrained=False)
elif args.arch == 'psa':
from model.psanet import PSANet
model = PSANet(layers=args.layers, classes=args.classes, zoom_factor=args.zoom_factor, compact=args.compact,
shrink_factor=args.shrink_factor, mask_h=args.mask_h, mask_w=args.mask_w,
normalization_factor=args.normalization_factor, psa_softmax=args.psa_softmax, pretrained=False)
logger.info(model)
model = torch.nn.DataParallel(model).cuda()
cudnn.benchmark = True
if os.path.isfile(args.model_path):
logger.info("=> loading checkpoint '{}'".format(args.model_path))
checkpoint = torch.load(args.model_path)
model.load_state_dict(checkpoint['state_dict'], strict=False)
logger.info("=> loaded checkpoint '{}'".format(args.model_path))
else:
raise RuntimeError("=> no checkpoint found at '{}'".format(args.model_path))
test(model.eval(), args.image, args.classes, mean, std, args.base_size, args.test_h, args.test_w, args.scales, colors)
def net_process(model, image, mean, std=None, flip=True):
input = torch.from_numpy(image.transpose((2, 0, 1))).float()
if std is None:
for t, m in zip(input, mean):
t.sub_(m)
else:
for t, m, s in zip(input, mean, std):
t.sub_(m).div_(s)
input = input.unsqueeze(0).cuda()
if flip:
input = torch.cat([input, input.flip(3)], 0)
with torch.no_grad():
output = model(input)
_, _, h_i, w_i = input.shape
_, _, h_o, w_o = output.shape
if (h_o != h_i) or (w_o != w_i):
output = F.interpolate(output, (h_i, w_i), mode='bilinear', align_corners=True)
output = F.softmax(output, dim=1)
if flip:
output = (output[0] + output[1].flip(2)) / 2
else:
output = output[0]
output = output.data.cpu().numpy()
output = output.transpose(1, 2, 0)
return output
def scale_process(model, image, classes, crop_h, crop_w, h, w, mean, std=None, stride_rate=2/3):
ori_h, ori_w, _ = image.shape
pad_h = max(crop_h - ori_h, 0)
pad_w = max(crop_w - ori_w, 0)
pad_h_half = int(pad_h / 2)
pad_w_half = int(pad_w / 2)
if pad_h > 0 or pad_w > 0:
image = cv2.copyMakeBorder(image, pad_h_half, pad_h - pad_h_half, pad_w_half, pad_w - pad_w_half, cv2.BORDER_CONSTANT, value=mean)
new_h, new_w, _ = image.shape
stride_h = int(np.ceil(crop_h*stride_rate))
stride_w = int(np.ceil(crop_w*stride_rate))
grid_h = int(np.ceil(float(new_h-crop_h)/stride_h) + 1)
grid_w = int(np.ceil(float(new_w-crop_w)/stride_w) + 1)
prediction_crop = np.zeros((new_h, new_w, classes), dtype=float)
count_crop = np.zeros((new_h, new_w), dtype=float)
for index_h in range(0, grid_h):
for index_w in range(0, grid_w):
s_h = index_h * stride_h
e_h = min(s_h + crop_h, new_h)
s_h = e_h - crop_h
s_w = index_w * stride_w
e_w = min(s_w + crop_w, new_w)
s_w = e_w - crop_w
image_crop = image[s_h:e_h, s_w:e_w].copy()
count_crop[s_h:e_h, s_w:e_w] += 1
prediction_crop[s_h:e_h, s_w:e_w, :] += net_process(model, image_crop, mean, std)
prediction_crop /= np.expand_dims(count_crop, 2)
prediction_crop = prediction_crop[pad_h_half:pad_h_half+ori_h, pad_w_half:pad_w_half+ori_w]
prediction = cv2.resize(prediction_crop, (w, h), interpolation=cv2.INTER_LINEAR)
return prediction
def test(model, image_path, classes, mean, std, base_size, crop_h, crop_w, scales, colors):
image = cv2.imread(image_path, cv2.IMREAD_COLOR) # BGR 3 channel ndarray wiht shape H * W * 3
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # convert cv2 read image from BGR order to RGB order
h, w, _ = image.shape
prediction = np.zeros((h, w, classes), dtype=float)
for scale in scales:
long_size = round(scale * base_size)
new_h = long_size
new_w = long_size
if h > w:
new_w = round(long_size/float(h)*w)
else:
new_h = round(long_size/float(w)*h)
image_scale = cv2.resize(image, (new_w, new_h), interpolation=cv2.INTER_LINEAR)
prediction += scale_process(model, image_scale, classes, crop_h, crop_w, h, w, mean, std)
prediction = np.argmax(prediction, axis=2)
gray = np.uint8(prediction)
color = colorize(gray, colors)
image_name = image_path.split('/')[-1].split('.')[0]
gray_path = os.path.join('./figure/demo/', image_name + '_gray.png')
color_path = os.path.join('./figure/demo/', image_name + '_color.png')
cv2.imwrite(gray_path, gray)
color.save(color_path)
logger.info("=> Prediction saved in {}".format(color_path))
if __name__ == '__main__':
main()
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_VISIBLE_DEVICES"]
|
python
| 1 | 0 | |
internal/gitlab.go
|
package internal
import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
"os"
"strconv"
"strings"
"time"
)
const gitlabURL = "http://gitlab.alx/api/v4/projects/%d/merge_requests/%d/commits"
type commit struct {
ID string `json:"id"`
Title string `json:"title"`
AuthorName string `json:"author_name"`
}
func getCommits(id, iid uint) ([]commit, error) {
url := fmt.Sprintf(gitlabURL, id, iid)
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return nil, err
}
token := os.Getenv("TOKEN")
if token == "" {
return nil, errors.New("$TOKEN must be set")
}
req.Header.Add("Private-Token", token)
tr := &http.Transport{
MaxIdleConns: 10,
IdleConnTimeout: 30 * time.Second,
}
client := &http.Client{Transport: tr}
resp, err := client.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
commits := []commit{}
json.Unmarshal(body, &commits)
if err != nil {
return nil, err
}
return commits, nil
}
func getTaskNumbers(commits []commit) ([]uint, error) {
uniq := make(map[uint]bool)
tasks := make([]uint, 0, len(commits))
for _, c := range commits {
id, err := parseTitle(c.Title)
if err != nil {
return nil, err
}
if id == 0 {
continue
}
if !uniq[id] {
tasks = append(tasks, id)
}
uniq[id] = true
}
return tasks, nil
}
func parseTitle(title string) (uint, error) {
if !strings.HasPrefix(title, "#") {
return 0, nil
}
var num string
for _, char := range title {
if isNumber(char) {
num += string(char)
}
}
res, err := strconv.ParseUint(num, 10, 0)
if err != nil {
return 0, err
}
return uint(res), nil
}
func isNumber(char rune) bool {
chars := [...]rune{'0', '1', '2', '3', '4', '5', '6', '7', '8', '9'}
for _, c := range chars {
if char == c {
return true
}
}
return false
}
|
[
"\"TOKEN\""
] |
[] |
[
"TOKEN"
] |
[]
|
["TOKEN"]
|
go
| 1 | 0 | |
pkg/cli/client_test.go
|
//go:build extended
// +build extended
package cli //nolint:testpackage
import (
"bytes"
"context"
"crypto/tls"
"crypto/x509"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"testing"
"time"
. "github.com/smartystreets/goconvey/convey"
"gopkg.in/resty.v1"
"zotregistry.io/zot/pkg/api"
"zotregistry.io/zot/pkg/api/config"
"zotregistry.io/zot/pkg/test"
)
const (
BaseURL1 = "http://127.0.0.1:8088"
BaseSecureURL1 = "https://127.0.0.1:8088"
HOST1 = "127.0.0.1:8088"
SecurePort1 = "8088"
BaseURL2 = "http://127.0.0.1:8089"
BaseSecureURL2 = "https://127.0.0.1:8089"
SecurePort2 = "8089"
BaseURL3 = "http://127.0.0.1:8090"
BaseSecureURL3 = "https://127.0.0.1:8090"
SecurePort3 = "8090"
username = "test"
passphrase = "test"
ServerCert = "../../test/data/server.cert"
ServerKey = "../../test/data/server.key"
CACert = "../../test/data/ca.crt"
sourceCertsDir = "../../test/data"
certsDir1 = "/.config/containers/certs.d/127.0.0.1:8088/"
)
func TestTLSWithAuth(t *testing.T) {
Convey("Make a new controller", t, func() {
caCert, err := ioutil.ReadFile(CACert)
So(err, ShouldBeNil)
caCertPool := x509.NewCertPool()
caCertPool.AppendCertsFromPEM(caCert)
resty.SetTLSClientConfig(&tls.Config{RootCAs: caCertPool, MinVersion: tls.VersionTLS12})
defer func() { resty.SetTLSClientConfig(nil) }()
conf := config.New()
conf.HTTP.Port = SecurePort1
htpasswdPath := test.MakeHtpasswdFile()
defer os.Remove(htpasswdPath)
conf.HTTP.Auth = &config.AuthConfig{
HTPasswd: config.AuthHTPasswd{
Path: htpasswdPath,
},
}
conf.HTTP.TLS = &config.TLSConfig{
Cert: ServerCert,
Key: ServerKey,
CACert: CACert,
}
ctlr := api.NewController(conf)
ctlr.Config.Storage.RootDirectory = t.TempDir()
go func() {
// this blocks
if err := ctlr.Run(); err != nil {
return
}
}()
// wait till ready
for {
_, err := resty.R().Get(BaseSecureURL1)
if err == nil {
break
}
time.Sleep(100 * time.Millisecond)
}
defer func() {
ctx := context.Background()
_ = ctlr.Server.Shutdown(ctx)
}()
Convey("Test with htpassw auth", func() {
configPath := makeConfigFile(`{"configs":[{"_name":"imagetest","showspinner":false}]}`)
defer os.Remove(configPath)
home := os.Getenv("HOME")
destCertsDir := filepath.Join(home, certsDir1)
if err = test.CopyFiles(sourceCertsDir, destCertsDir); err != nil {
panic(err)
}
defer os.RemoveAll(destCertsDir)
args := []string{"imagetest", "--name", "dummyImageName", "--url", HOST1}
imageCmd := NewImageCommand(new(searchService))
imageBuff := bytes.NewBufferString("")
imageCmd.SetOut(imageBuff)
imageCmd.SetErr(imageBuff)
imageCmd.SetArgs(args)
err := imageCmd.Execute()
So(err, ShouldNotBeNil)
So(imageBuff.String(), ShouldContainSubstring, "invalid URL format")
args = []string{"imagetest"}
configPath = makeConfigFile(
fmt.Sprintf(`{"configs":[{"_name":"imagetest","url":"%s/v2/_catalog","showspinner":false}]}`,
BaseSecureURL1))
defer os.Remove(configPath)
imageCmd = NewImageCommand(new(searchService))
imageBuff = bytes.NewBufferString("")
imageCmd.SetOut(imageBuff)
imageCmd.SetErr(imageBuff)
imageCmd.SetArgs(args)
err = imageCmd.Execute()
So(err, ShouldNotBeNil)
So(imageBuff.String(), ShouldContainSubstring, "check credentials")
user := fmt.Sprintf("%s:%s", username, passphrase)
args = []string{"imagetest", "-u", user}
configPath = makeConfigFile(
fmt.Sprintf(`{"configs":[{"_name":"imagetest","url":"%s/v2/_catalog","showspinner":false}]}`,
BaseSecureURL1))
defer os.Remove(configPath)
imageCmd = NewImageCommand(new(searchService))
imageBuff = bytes.NewBufferString("")
imageCmd.SetOut(imageBuff)
imageCmd.SetErr(imageBuff)
imageCmd.SetArgs(args)
err = imageCmd.Execute()
So(err, ShouldBeNil)
})
})
}
func TestTLSWithoutAuth(t *testing.T) {
Convey("Home certs - Make a new controller", t, func() {
caCert, err := ioutil.ReadFile(CACert)
So(err, ShouldBeNil)
caCertPool := x509.NewCertPool()
caCertPool.AppendCertsFromPEM(caCert)
resty.SetTLSClientConfig(&tls.Config{RootCAs: caCertPool, MinVersion: tls.VersionTLS12})
defer func() { resty.SetTLSClientConfig(nil) }()
conf := config.New()
conf.HTTP.Port = SecurePort1
conf.HTTP.TLS = &config.TLSConfig{
Cert: ServerCert,
Key: ServerKey,
CACert: CACert,
}
ctlr := api.NewController(conf)
ctlr.Config.Storage.RootDirectory = t.TempDir()
go func() {
// this blocks
if err := ctlr.Run(); err != nil {
return
}
}()
// wait till ready
for {
_, err := resty.R().Get(BaseURL1)
if err == nil {
break
}
time.Sleep(100 * time.Millisecond)
}
defer func() {
ctx := context.Background()
_ = ctlr.Server.Shutdown(ctx)
}()
Convey("Certs in user's home", func() {
configPath := makeConfigFile(
fmt.Sprintf(`{"configs":[{"_name":"imagetest","url":"%s/v2/_catalog","showspinner":false}]}`,
BaseSecureURL1))
defer os.Remove(configPath)
home := os.Getenv("HOME")
destCertsDir := filepath.Join(home, certsDir1)
if err = test.CopyFiles(sourceCertsDir, destCertsDir); err != nil {
panic(err)
}
defer os.RemoveAll(destCertsDir)
args := []string{"imagetest"}
imageCmd := NewImageCommand(new(searchService))
imageBuff := bytes.NewBufferString("")
imageCmd.SetOut(imageBuff)
imageCmd.SetErr(imageBuff)
imageCmd.SetArgs(args)
err := imageCmd.Execute()
So(err, ShouldBeNil)
})
})
Convey("Privileged certs - Make a new controller", t, func() {
caCert, err := ioutil.ReadFile(CACert)
So(err, ShouldBeNil)
caCertPool := x509.NewCertPool()
caCertPool.AppendCertsFromPEM(caCert)
resty.SetTLSClientConfig(&tls.Config{RootCAs: caCertPool, MinVersion: tls.VersionTLS12})
defer func() { resty.SetTLSClientConfig(nil) }()
conf := config.New()
conf.HTTP.Port = SecurePort2
conf.HTTP.TLS = &config.TLSConfig{
Cert: ServerCert,
Key: ServerKey,
CACert: CACert,
}
ctlr := api.NewController(conf)
ctlr.Config.Storage.RootDirectory = t.TempDir()
go func() {
// this blocks
if err := ctlr.Run(); err != nil {
return
}
}()
// wait till ready
for {
_, err := resty.R().Get(BaseURL2)
if err == nil {
break
}
time.Sleep(100 * time.Millisecond)
}
defer func() {
ctx := context.Background()
_ = ctlr.Server.Shutdown(ctx)
}()
Convey("Certs in privileged path", func() {
configPath := makeConfigFile(
fmt.Sprintf(`{"configs":[{"_name":"imagetest","url":"%s/v2/_catalog","showspinner":false}]}`,
BaseSecureURL2))
defer os.Remove(configPath)
args := []string{"imagetest"}
imageCmd := NewImageCommand(new(searchService))
imageBuff := bytes.NewBufferString("")
imageCmd.SetOut(imageBuff)
imageCmd.SetErr(imageBuff)
imageCmd.SetArgs(args)
err := imageCmd.Execute()
So(err, ShouldBeNil)
})
})
}
func TestTLSBadCerts(t *testing.T) {
Convey("Make a new controller", t, func() {
caCert, err := ioutil.ReadFile(CACert)
So(err, ShouldBeNil)
caCertPool := x509.NewCertPool()
caCertPool.AppendCertsFromPEM(caCert)
resty.SetTLSClientConfig(&tls.Config{RootCAs: caCertPool, MinVersion: tls.VersionTLS12})
defer func() { resty.SetTLSClientConfig(nil) }()
conf := config.New()
conf.HTTP.Port = SecurePort3
conf.HTTP.TLS = &config.TLSConfig{
Cert: ServerCert,
Key: ServerKey,
CACert: CACert,
}
ctlr := api.NewController(conf)
ctlr.Config.Storage.RootDirectory = t.TempDir()
go func() {
// this blocks
if err := ctlr.Run(); err != nil {
return
}
}()
// wait till ready
for {
_, err := resty.R().Get(BaseURL3)
if err == nil {
break
}
time.Sleep(100 * time.Millisecond)
}
defer func() {
ctx := context.Background()
_ = ctlr.Server.Shutdown(ctx)
}()
Convey("Test with system certs", func() {
configPath := makeConfigFile(
fmt.Sprintf(`{"configs":[{"_name":"imagetest","url":"%s/v2/_catalog","showspinner":false}]}`,
BaseSecureURL3))
defer os.Remove(configPath)
args := []string{"imagetest"}
imageCmd := NewImageCommand(new(searchService))
imageBuff := bytes.NewBufferString("")
imageCmd.SetOut(imageBuff)
imageCmd.SetErr(imageBuff)
imageCmd.SetArgs(args)
err := imageCmd.Execute()
So(err, ShouldNotBeNil)
So(imageBuff.String(), ShouldContainSubstring, "certificate signed by unknown authority")
})
})
}
|
[
"\"HOME\"",
"\"HOME\""
] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
go
| 1 | 0 | |
junos/provider_test.go
|
package junos_test
import (
"os"
"testing"
"terraform-provider-junos/junos"
"github.com/hashicorp/terraform-plugin-sdk/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/terraform"
)
var (
testAccProviders = map[string]terraform.ResourceProvider{
"junos": testAccProvider,
}
testAccProvider = junos.Provider().(*schema.Provider)
)
func TestProvider(t *testing.T) {
if err := junos.Provider().(*schema.Provider).InternalValidate(); err != nil {
t.Fatalf("err: %s", err)
}
}
func TestProvider_impl(t *testing.T) {
var _ terraform.ResourceProvider = junos.Provider()
}
// export TESTACC_SWITCH not empty for test switch options (interface mode trunk, vlan native/members)
// with switch Junos device, else it's test for all others parameters
// (interface inet, 802.3ad, routing instance, security zone/nat/ike/ipsec, etc ).
// Some resources and parameters works on both devices, but most tested without TESTACC_SWITCH
func testAccPreCheck(t *testing.T) {
if os.Getenv("JUNOS_HOST") == "" && os.Getenv("JUNOS_KEYFILE") == "" {
t.Fatal("JUNOS_HOST or JUNOS_KEYFILE must be set for acceptance tests")
}
err := testAccProvider.Configure(terraform.NewResourceConfigRaw(nil))
if err != nil {
t.Fatal(err)
}
}
|
[
"\"JUNOS_HOST\"",
"\"JUNOS_KEYFILE\""
] |
[] |
[
"JUNOS_HOST",
"JUNOS_KEYFILE"
] |
[]
|
["JUNOS_HOST", "JUNOS_KEYFILE"]
|
go
| 2 | 0 | |
swaggertosdk/restapi/sdkbot.py
|
import logging
import os
from pathlib import Path
import tempfile
from git import Repo
from swaggertosdk.SwaggerToSdkCore import (
CONFIG_FILE,
read_config_from_github,
build_swaggertosdk_conf_from_json_readme,
)
from azure_devtools.ci_tools.git_tools import (
do_commit,
)
from azure_devtools.ci_tools.github_tools import (
configure_user,
manage_git_folder,
GithubLink
)
from swaggertosdk.python_sdk_tools import build_installation_message
from azure_devtools.ci_tools.bot_framework import (
order
)
_LOGGER = logging.getLogger("swaggertosdk.restapi.sdkbot")
class GithubHandler:
def __init__(self, gh_token=None):
# I need a token to do PR. Nothing to do with the bot.
self.gh_token = os.environ["GH_TOKEN"] if not gh_token else gh_token
@order
def install(self, issue):
if not issue.pull_request:
return "No installation instruction possible for issue. You need a PR for this command."
sdk_pr = issue.repository.get_pull(issue.number)
return build_installation_message(sdk_pr)
@order
def rebase(self, issue, branch=None):
if not issue.pull_request:
return "Rebase is just supported in PR for now"
pr = issue.repository.get_pull(issue.number)
branch_name = pr.head.ref
branched_sdk_id = pr.head.repo.full_name+'@'+branch_name
upstream_url = 'https://github.com/{}.git'.format(pr.base.repo.full_name)
upstream_base = pr.base.ref if not branch else branch
with tempfile.TemporaryDirectory() as temp_dir, \
manage_git_folder(self.gh_token, Path(temp_dir) / Path("sdk"), branched_sdk_id) as sdk_folder:
sdk_repo = Repo(str(sdk_folder))
configure_user(self.gh_token, sdk_repo)
upstream = sdk_repo.create_remote('upstream', url=upstream_url)
upstream.fetch()
msg = sdk_repo.git.rebase('upstream/{}'.format(upstream_base))
_LOGGER.debug(msg)
msg = sdk_repo.git.push(force=True)
_LOGGER.debug(msg)
return "Rebase done and pushed to the branch"
#@order
def git(self, issue, *git_parameters):
if not issue.pull_request:
return "Rebase is just supported in PR for now"
pr_obj = issue.repository.get_pull(issue.number)
branch_name = pr_obj.head.ref
branched_sdk_id = pr_obj.head.repo.full_name+'@'+branch_name
with tempfile.TemporaryDirectory() as temp_dir, \
manage_git_folder(self.gh_token, Path(temp_dir) / Path("sdk"), branched_sdk_id) as sdk_folder:
sdk_repo = Repo(str(sdk_folder))
configure_user(self.gh_token, sdk_repo)
command = getattr(sdk_repo.git, git_parameters[0])
initial_answer = command(*git_parameters[1:])
_LOGGER.debug(initial_answer)
msg = sdk_repo.git.push(force=True)
_LOGGER.debug(msg)
return "```shell\n"+initial_answer+"\n```"
@order
def rebuild(self, issue, project_pattern):
if not issue.pull_request:
return "Rebuild is just supported in PR for now"
sdkid = issue.repository.full_name
pr = issue.repository.get_pull(issue.number)
new_comment = issue.create_comment("Working on generating {} for you!!!".format(project_pattern))
config_path = CONFIG_FILE
message = "Rebuild by "+issue.html_url
autorest_bin = None
branch_name = pr.head.ref
branched_sdk_id = pr.head.repo.full_name+'@'+branch_name
if project_pattern.startswith("https://"):
link = GithubLink.from_string(project_pattern)
link = link.as_raw_link() # Ensure this is a raw link.
rest_api_id = link.gitid
rest_api_branch = link.branch_or_commit
token = link.token if link.token else self.gh_token
path = link.path
else:
rest_api_id = "Azure/azure-rest-api-specs"
rest_api_branch = "master"
token = self.gh_token
path = None # Not such notion of path here, since it's inside SwaggerToSdk conf
branched_rest_api_id = rest_api_id + "@" + rest_api_branch
config = read_config_from_github(pr.head.repo.full_name, branch_name, token)
with tempfile.TemporaryDirectory() as temp_dir, \
manage_git_folder(token, Path(temp_dir) / Path("rest"), branched_rest_api_id) as restapi_git_folder, \
manage_git_folder(self.gh_token, Path(temp_dir) / Path("sdk"), branched_sdk_id) as sdk_folder:
sdk_repo = Repo(str(sdk_folder))
configure_user(self.gh_token, sdk_repo)
if path: # Assume this is a Readme path
config["projects"] = {} # Wipe out everything
build_swaggertosdk_conf_from_json_readme(path, sdkid, config, base_folder=restapi_git_folder)
skip_callback = lambda x, y: False
else:
def skip_callback(project, local_conf):
del local_conf # Unused
if not project.startswith(project_pattern):
return True
return False
from swaggertosdk import SwaggerToSdkNewCLI
SwaggerToSdkNewCLI.build_libraries(config, skip_callback, restapi_git_folder,
sdk_repo, temp_dir, autorest_bin)
new_comment.edit("End of generation, doing commit")
commit_sha = do_commit(sdk_repo, message, branch_name, "")
if commit_sha:
new_comment.edit("Pushing")
sdk_repo.git.push('origin', branch_name, set_upstream=True)
new_comment.delete()
else:
new_comment.delete()
return "Nothing to rebuild, this PR is up to date"
_LOGGER.info("Build SDK finished and cleaned")
return "Build SDK finished and cleaned"
|
[] |
[] |
[
"GH_TOKEN"
] |
[]
|
["GH_TOKEN"]
|
python
| 1 | 0 | |
guillotina/tests/test_catalog.py
|
from datetime import datetime
from guillotina import configure
from guillotina import task_vars
from guillotina.catalog import index
from guillotina.catalog.utils import get_index_fields
from guillotina.catalog.utils import get_metadata_fields
from guillotina.catalog.utils import parse_query
from guillotina.component import get_adapter
from guillotina.component import query_utility
from guillotina.content import Container
from guillotina.content import create_content
from guillotina.content import Resource
from guillotina.directives import index_field
from guillotina.event import notify
from guillotina.events import ObjectModifiedEvent
from guillotina.interfaces import ICatalogDataAdapter
from guillotina.interfaces import ICatalogUtility
from guillotina.interfaces import IResource
from guillotina.interfaces import ISecurityInfo
from guillotina.tests import mocks
from guillotina.tests import utils as test_utils
import json
import os
import pytest
pytestmark = pytest.mark.asyncio
NOT_POSTGRES = os.environ.get("DATABASE", "DUMMY") in ("cockroachdb", "DUMMY")
PG_CATALOG_SETTINGS = {
"applications": ["guillotina.contrib.catalog.pg"],
"load_utilities": {
"catalog": {
"provides": "guillotina.interfaces.ICatalogUtility",
"factory": "guillotina.contrib.catalog.pg.utility.PGSearchUtility",
}
},
}
class ICustomItem(IResource):
pass
@index_field.with_accessor(ICustomItem, "title", type="text", field="title")
def get_title(ob):
return f"The title is: {ob.title}"
@configure.contenttype(type_name="CustomItem", schema=ICustomItem)
class CustomItem(Resource):
"""
Basic item content type. Inherits from Resource
"""
async def test_indexed_fields(dummy_guillotina):
fields = get_index_fields("Item")
assert "uuid" in fields
assert "path" in fields
assert "title" in fields
assert "creation_date" in fields
metadata = get_metadata_fields("Example")
assert len(metadata) == 1
async def test_get_index_data(dummy_txn_root):
async with dummy_txn_root:
container = await create_content("Container", id="guillotina", title="Guillotina")
container.__name__ = "guillotina"
ob = await create_content("Item", id="foobar")
data = ICatalogDataAdapter(ob)
fields = await data()
assert "type_name" in fields
assert "uuid" in fields
assert "path" in fields
assert "title" in fields
async def test_get_index_data_with_accessors(dummy_txn_root):
async with dummy_txn_root:
container = await create_content("Container", id="guillotina", title="Guillotina")
container.__name__ = "guillotina"
ob = await create_content("Example", id="foobar", categories=[{"label": "foo", "number": 1}])
data = ICatalogDataAdapter(ob)
fields = await data()
for field_name in (
"categories_accessor",
"foobar_accessor",
"type_name",
"categories",
"uuid",
"path",
"title",
"tid",
):
assert field_name in fields
# now only with indexes specified
data = ICatalogDataAdapter(ob)
fields = await data(indexes=["categories"])
# but should also pull in `foobar_accessor` because it does not
# have a field specified for it.
for field_name in (
"categories_accessor",
"foobar_accessor",
"type_name",
"categories",
"uuid",
"tid",
):
assert field_name in fields
assert "title" not in fields
async def test_override_index_directive(dummy_txn_root):
container = await create_content("Container", id="guillotina", title="Guillotina")
container.__name__ = "guillotina"
ob = await create_content("CustomItem", id="foobar", title="Test")
data = ICatalogDataAdapter(ob)
fields = await data()
assert fields["title"] == "The title is: Test" # Good, uses the custom accessor
ob = await create_content("Item", id="foobar", title="Test")
data = ICatalogDataAdapter(ob)
fields = await data(indexes=["title"])
assert fields["title"] == "Test"
# E AssertionError: assert 'The title is: Test' == 'Test'
# E - The title is: Test
# E + Test
async def test_registered_base_utility(dummy_guillotina):
util = query_utility(ICatalogUtility)
assert util is not None
async def test_get_security_data(dummy_guillotina):
ob = test_utils.create_content()
adapter = get_adapter(ob, ISecurityInfo)
data = adapter()
assert "id" in data
assert "access_users" in data
assert "access_roles" in data
async def test_get_data_uses_indexes_param(dummy_txn_root):
async with dummy_txn_root:
util = query_utility(ICatalogUtility)
container = await create_content("Container", id="guillotina", title="Guillotina")
container.__name__ = "guillotina"
ob = await create_content("Item", id="foobar")
data = await util.get_data(ob, indexes=["title"])
assert len(data) == 8 # @uid, type_name, etc always returned
data = await util.get_data(ob, indexes=["title", "id"])
assert len(data) == 9
data = await util.get_data(ob)
assert len(data) > 10
async def test_modified_event_gathers_all_index_data(dummy_guillotina):
container = await create_content("Container", id="guillotina", title="Guillotina")
container.__name__ = "guillotina"
task_vars.container.set(container)
ob = await create_content("Item", id="foobar")
ob.__uuid__ = "foobar"
await notify(ObjectModifiedEvent(ob, payload={"title": "", "id": ""}))
fut = index.get_indexer()
assert len(fut.update["foobar"]) == 9
await notify(ObjectModifiedEvent(ob, payload={"creation_date": ""}))
assert "modification_date" in fut.update["foobar"]
assert len(fut.update["foobar"]) == 10
@pytest.mark.app_settings(PG_CATALOG_SETTINGS)
@pytest.mark.skipif(NOT_POSTGRES, reason="Only PG")
async def test_search_endpoint(container_requester):
async with container_requester as requester:
await requester("POST", "/db/guillotina", data=json.dumps({"@type": "Item"}))
response, status = await requester("GET", "/db/guillotina/@search")
assert status == 200
assert len(response["items"]) == 1
@pytest.mark.skipif(not NOT_POSTGRES, reason="Only not PG")
async def test_search_endpoint_no_pg(container_requester):
async with container_requester as requester:
response, status = await requester("GET", "/db/guillotina/@search")
assert status == 200
assert len(response["items"]) == 0
async def test_search_post_endpoint(container_requester):
async with container_requester as requester:
response, status = await requester("POST", "/db/guillotina/@search", data="{}")
assert status == 200
async def test_reindex_endpoint(container_requester):
async with container_requester as requester:
response, status = await requester("POST", "/db/guillotina/@catalog-reindex", data="{}")
assert status == 200
async def test_async_reindex_endpoint(container_requester):
async with container_requester as requester:
response, status = await requester("POST", "/db/guillotina/@async-catalog-reindex", data="{}")
assert status == 200
async def test_create_catalog(container_requester):
async with container_requester as requester:
response, status = await requester("POST", "/db/guillotina/@catalog", data="{}")
assert status == 200
response, status = await requester("DELETE", "/db/guillotina/@catalog")
assert status == 200
@pytest.mark.skipif(NOT_POSTGRES, reason="Only PG")
async def test_query_stored_json(container_requester):
async with container_requester as requester:
await requester(
"POST", "/db/guillotina/", data=json.dumps({"@type": "Item", "title": "Item1", "id": "item1"})
)
await requester(
"POST", "/db/guillotina/", data=json.dumps({"@type": "Item", "title": "Item2", "id": "item2"})
)
async with requester.db.storage.pool.acquire() as conn:
result = await conn.fetch(
"""
select json from {0}
where json->>'type_name' = 'Item' AND json->>'container_id' = 'guillotina'
order by json->>'id'
""".format(
requester.db.storage._objects_table_name
)
)
print(f"{result}")
assert len(result) == 2
assert json.loads(result[0]["json"])["id"] == "item1"
assert json.loads(result[1]["json"])["id"] == "item2"
result = await conn.fetch(
"""
select json from {0}
where json->>'id' = 'item1' AND json->>'container_id' = 'guillotina'
""".format(
requester.db.storage._objects_table_name
)
)
assert len(result) == 1
@pytest.mark.app_settings(PG_CATALOG_SETTINGS)
@pytest.mark.skipif(NOT_POSTGRES, reason="Only PG")
async def test_query_pg_catalog(container_requester):
from guillotina.contrib.catalog.pg.utility import PGSearchUtility
async with container_requester as requester:
await requester(
"POST", "/db/guillotina/", data=json.dumps({"@type": "Item", "title": "Item1", "id": "item1"})
)
await requester(
"POST", "/db/guillotina/", data=json.dumps({"@type": "Item", "title": "Item2", "id": "item2"})
)
async with requester.db.get_transaction_manager() as tm, await tm.begin():
test_utils.login()
root = await tm.get_root()
container = await root.async_get("guillotina")
util = PGSearchUtility()
await util.initialize()
results = await util.search(container, {"id": "item1"})
assert len(results["items"]) == 1
results = await util.search(container, {"_size": "1"})
assert len(results["items"]) == 1
results = await util.search(container, {"_size": "1", "_from": "1"})
assert len(results["items"]) == 1
results = await util.query_aggregation(container, {"_metadata": "title"})
assert len(results["items"]) == 2
assert results["items"][0][0] == "Item1"
results = await util.query_aggregation(container, {"_metadata": ["title", "creators"]})
assert len(results["items"]) == 2
assert results["items"][0][1][0] == "root"
results = await util.query_aggregation(container, {"_metadata": ["creators"]})
assert len(results["items"]) == 1
assert results["items"][0][0][0] == "root"
results = await util.query_aggregation(
container, {"_metadata": ["title", "creators"], "title__eq": "Item2"}
)
assert len(results["items"]) == 1
assert results["items"][0][1][0] == "root"
resp, status = await requester(
"GET", "/db/guillotina/@aggregation?title__eq=Item2&_metadata=title,creators",
)
assert status == 200
assert resp == {
"title": {"items": {"Item2": 1}, "total": 1},
"creators": {"items": {"root": 1}, "total": 1},
}
@pytest.mark.app_settings(PG_CATALOG_SETTINGS)
@pytest.mark.skipif(NOT_POSTGRES, reason="Only PG")
async def test_fulltext_query_pg_catalog(container_requester):
from guillotina.contrib.catalog.pg.utility import PGSearchUtility
async with container_requester as requester:
await requester(
"POST",
"/db/guillotina/",
data=json.dumps({"@type": "Item", "id": "item1", "title": "Something interesting about foobar"}),
)
await requester(
"POST",
"/db/guillotina/",
data=json.dumps({"@type": "Item", "title": "Something else", "id": "item2"}),
)
async with requester.db.get_transaction_manager() as tm, await tm.begin():
test_utils.login()
root = await tm.get_root()
container = await root.async_get("guillotina")
util = PGSearchUtility()
await util.initialize()
results = await util.search(container, {"title": "something"})
assert len(results["items"]) == 2
results = await util.search(container, {"title": "interesting"})
assert len(results["items"]) == 1
results = await util.search(container, {"title": "interest"})
assert len(results["items"]) == 1
results = await util.search(container, {"title": "someth"})
assert len(results["items"]) == 2
@pytest.mark.app_settings(PG_CATALOG_SETTINGS)
@pytest.mark.skipif(NOT_POSTGRES, reason="Only PG")
async def test_fulltext_query_pg_catalog_lang(container_requester):
from guillotina.contrib.catalog.pg.utility import PGSearchUtility
async with container_requester as requester:
await requester(
"POST",
"/db/guillotina/",
data=json.dumps({"@type": "Item", "id": "item1", "title": "Something Grande Casa foobar"}),
)
await requester(
"POST",
"/db/guillotina/",
data=json.dumps({"@type": "Item", "title": "Something Grande", "id": "item2"}),
)
async with requester.db.get_transaction_manager() as tm, await tm.begin():
test_utils.login()
root = await tm.get_root()
container = await root.async_get("guillotina")
util = PGSearchUtility()
await util.initialize()
results = await util.search(container, {"title": "Grande"})
assert len(results["items"]) == 2
results = await util.search(container, {"title": "Casa"})
assert len(results["items"]) == 1
@pytest.mark.app_settings(PG_CATALOG_SETTINGS)
@pytest.mark.skipif(NOT_POSTGRES, reason="Only PG")
async def test_fulltext_query_pg_catalog_order(container_requester):
from guillotina.contrib.catalog.pg.utility import PGSearchUtility
async with container_requester as requester:
await requester(
"POST",
"/db/guillotina/",
data=json.dumps({"@type": "Item", "id": "item1", "title": "Something interesting"}),
)
await requester(
"POST",
"/db/guillotina/",
data=json.dumps(
{"@type": "Item", "title": "Something else something foobar something", "id": "item2"}
),
)
async with requester.db.get_transaction_manager() as tm, await tm.begin():
test_utils.login()
root = await tm.get_root()
container = await root.async_get("guillotina")
util = PGSearchUtility()
await util.initialize()
results = await util.search(container, {"title": "something", "_sort_asc": "title"})
assert results["items"][0]["title"] == "Something interesting"
assert len(results["items"]) == 2
results = await util.search(container, {"_sort_asc": "title"})
assert results["items"][0]["title"] == "Something else something foobar something"
assert len(results["items"]) == 2
@pytest.mark.app_settings(PG_CATALOG_SETTINGS)
@pytest.mark.skipif(NOT_POSTGRES, reason="Only PG")
async def test_date_query_pg_catalog_sort(container_requester):
from guillotina.contrib.catalog.pg.utility import PGSearchUtility
async with container_requester as requester:
await requester(
"POST",
"/db/guillotina/",
data=json.dumps({"@type": "Item", "id": "item1", "title": "Something interesting"}),
)
await requester(
"POST",
"/db/guillotina/",
data=json.dumps(
{"@type": "Item", "title": "Something else something foobar something", "id": "item2"}
),
)
async with requester.db.get_transaction_manager() as tm, await tm.begin():
test_utils.login()
root = await tm.get_root()
container = await root.async_get("guillotina")
util = PGSearchUtility()
await util.initialize()
results = await util.search(
container, {"creation_date__gt": "1/1/1900", "_sort_asc": "creation_date"}
)
assert len(results["items"]) == 2
results = await util.search(
container, {"creation_date__gt": "1/1/3000", "_sort_asc": "creation_date"}
)
assert len(results["items"]) == 0
@pytest.mark.app_settings(PG_CATALOG_SETTINGS)
@pytest.mark.skipif(NOT_POSTGRES, reason="Only PG")
async def test_build_pg_query(dummy_guillotina):
from guillotina.contrib.catalog.pg.utility import PGSearchUtility
util = PGSearchUtility()
with mocks.MockTransaction():
content = test_utils.create_content(Container)
query = parse_query(content, {"uuid": content.uuid}, util)
assert content.uuid == query["wheres_arguments"][0]
assert "json->'uuid'" in query["wheres"][0]
async def test_parse_bbb_plone(dummy_guillotina):
from guillotina.catalog.parser import BaseParser
content = test_utils.create_content(Container)
parser = BaseParser(None, content)
result = parser(
{"portal_type": "Folder", "SearchableText": "foobar", "b_size": 45, "b_start": 50, "path.depth": 2}
)
assert "searchabletext__or" in result["params"]
assert "title__in" in result["params"]["searchabletext__or"]
assert "depth" in result["params"]
assert "type_name" in result["params"]
assert "portal_type" not in result["params"]
assert result["_from"] == 50
assert result["size"] == 45
async def test_parse_base():
from guillotina.catalog.parser import BaseParser
content = test_utils.create_content(Container)
parser = BaseParser(None, content)
result = parser({"_from": 5, "_sort_asc": "modification_date", "path__starts": "foo/bar"})
assert result["_from"] == 5
assert result["sort_on"] == "modification_date"
assert result["sort_dir"] == "ASC"
assert result["params"]["path__starts"] == "/foo/bar"
result = parser({"_sort_des": "modification_date"})
assert result["sort_on"] == "modification_date"
assert result["sort_dir"] == "DESC"
result = parser({"_metadata": "modification_date"})
result["metadata"] == ["modification_date"]
result = parser({"_metadata": "_all"})
result["metadata"] is None
result = parser({"_metadata_not": "modification_date"})
result["excluded_metadata"] == ["modification_date"]
async def test_basic_index_generator():
from guillotina.contrib.catalog.pg.indexes import BasicJsonIndex
index = BasicJsonIndex("foobar")
assert "json->'" in index.where("foobar", "?")
assert "json->>'" in index.where("foobar", "=")
async def test_pg_field_parser(dummy_guillotina):
from guillotina.contrib.catalog.pg.parser import Parser
content = test_utils.create_content(Container)
parser = Parser(None, content)
# test convert operators
for q1, q2 in (("gte", ">="), ("gt", ">"), ("eq", "="), ("lte", "<="), ("not", "!="), ("lt", "<")):
where, value, select, field = parser.process_queried_field(f"depth__{q1}", "2")
assert f" {q2} " in where
assert value == [2]
# bad int
assert parser.process_queried_field(f"depth__{q1}", "foobar") is None
# convert bool
where, value, select, field = parser.process_queried_field(f"boolean_field", "true")
assert value == [True]
where, value, select, field = parser.process_queried_field(f"boolean_field", "false")
assert value == [False]
# none for invalid
assert parser.process_queried_field(f"foobar", None) is None
# convert to list
where, value, select, field = parser.process_queried_field(f"tags__in", "foo,bar")
assert value == [["foo", "bar"]]
assert " ?| " in where
where, value, select, field = parser.process_queried_field(f"tags", "bar")
assert " ? " in where
where, value, select, field = parser.process_queried_field(f"tags", ["foo", "bar"])
assert " ?| " in where
# date parsing
where, value, select, field = parser.process_queried_field(
f"creation_date__gte", "2019-06-15T18:37:31.008359+00:00"
)
assert isinstance(value[0], datetime)
# path
where, value, select, field = parser.process_queried_field(f"path", "/foo/bar")
assert "substring(json->>" in where
# ft
where, value, select, field = parser.process_queried_field(f"title", "foobar")
assert "to_tsvector" in where
@pytest.mark.app_settings(PG_CATALOG_SETTINGS)
@pytest.mark.skipif(NOT_POSTGRES, reason="Only PG")
async def test_parse_metadata(dummy_guillotina):
from guillotina.contrib.catalog.pg.utility import PGSearchUtility
util = PGSearchUtility()
with mocks.MockTransaction():
content = test_utils.create_content(Container)
query = parse_query(content, {"_metadata": "foobar"})
result = util.load_meatdata(query, {"foobar": "foobar", "blah": "blah"})
assert result == {"foobar": "foobar"}
query = parse_query(content, {"_metadata_not": "foobar"})
result = util.load_meatdata(query, {"foobar": "foobar", "blah": "blah"})
assert result == {"blah": "blah"}
@pytest.mark.app_settings(PG_CATALOG_SETTINGS)
@pytest.mark.skipif(NOT_POSTGRES, reason="Only PG")
async def test_not_in(container_requester):
async with container_requester as requester:
await requester(
"POST",
"/db/guillotina/",
data=json.dumps({"@type": "Item", "id": "item1", "title": "Something interesting"}),
)
await requester(
"POST",
"/db/guillotina/",
data=json.dumps(
{"@type": "Item", "title": "Something else something foobar something", "id": "item2"}
),
)
await requester(
"POST",
"/db/guillotina/",
data=json.dumps(
{"@type": "Item", "title": "Something else something foobar something", "id": "item3"}
),
)
async with requester.db.get_transaction_manager() as tm, await tm.begin():
test_utils.login()
root = await tm.get_root()
container = await root.async_get("guillotina")
util = query_utility(ICatalogUtility)
results = await util.search(container, {"id__not": "item1"})
assert len(results["items"]) == 2
util = query_utility(ICatalogUtility)
results = await util.search(container, {"type_name__not": "Item"})
assert len(results["items"]) == 0
|
[] |
[] |
[
"DATABASE"
] |
[]
|
["DATABASE"]
|
python
| 1 | 0 | |
pgpool2_exporter.go
|
/*
Copyright (c) 2021 PgPool Global Development Group
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
package main
import (
"database/sql"
"errors"
"fmt"
"math"
"net/http"
"net/url"
"os"
"regexp"
"strconv"
"sync"
"time"
"github.com/blang/semver"
"github.com/go-kit/kit/log/level"
_ "github.com/lib/pq"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/prometheus/common/promlog"
"github.com/prometheus/common/promlog/flag"
"github.com/prometheus/common/version"
"gopkg.in/alecthomas/kingpin.v2"
)
var (
listenAddress = kingpin.Flag("web.listen-address", "Address on which to expose metrics and web interface.").Default(":9719").String()
metricsPath = kingpin.Flag("web.telemetry-path", "Path under which to expose metrics.").Default("/metrics").String()
logger = promlog.New(&promlog.Config{})
)
const (
namespace = "pgpool2"
exporter = "exporter"
landingPage = `
<html>
<head>
<title>Pgpool-II Exporter</title>
</head>
<body>
<h1>Pgpool-II Exporter</h1>
<p>
<a href='%s'>Metrics</a>
</p>
</body>
</html>`
)
// columnUsage should be one of several enum values which describe how a
// queried row is to be converted to a Prometheus metric.
type columnUsage int
// Convert a string to the corresponding columnUsage
func stringTocolumnUsage(s string) (u columnUsage, err error) {
switch s {
case "DISCARD":
u = DISCARD
case "LABEL":
u = LABEL
case "COUNTER":
u = COUNTER
case "GAUGE":
u = GAUGE
case "MAPPEDMETRIC":
u = MAPPEDMETRIC
case "DURATION":
u = DURATION
default:
err = fmt.Errorf("wrong columnUsage given : %s", s)
}
return
}
// nolint: golint
const (
DISCARD columnUsage = iota // Ignore this column
LABEL columnUsage = iota // Use this column as a label
COUNTER columnUsage = iota // Use this column as a counter
GAUGE columnUsage = iota // Use this column as a gauge
MAPPEDMETRIC columnUsage = iota // Use this column with the supplied mapping of text values
DURATION columnUsage = iota // This column should be interpreted as a text duration (and converted to milliseconds)
)
// Implement the yaml.Unmarshaller interface
func (cu *columnUsage) UnmarshalYAML(unmarshal func(interface{}) error) error {
var value string
if err := unmarshal(&value); err != nil {
return err
}
columnUsage, err := stringTocolumnUsage(value)
if err != nil {
return err
}
*cu = columnUsage
return nil
}
// Groups metric maps under a shared set of labels
type MetricMapNamespace struct {
labels []string // Label names for this namespace
columnMappings map[string]MetricMap // Column mappings in this namespace
}
// Stores the prometheus metric description which a given column will be mapped
// to by the collector
type MetricMap struct {
discard bool // Should metric be discarded during mapping?
vtype prometheus.ValueType // Prometheus valuetype
namespace string
desc *prometheus.Desc // Prometheus descriptor
conversion func(interface{}) (float64, bool) // Conversion function to turn PG result into float64
}
// User-friendly representation of a prometheus descriptor map
type ColumnMapping struct {
usage columnUsage `yaml:"usage"`
description string `yaml:"description"`
}
// Exporter collects Pgpool-II stats from the given server and exports
// them using the prometheus metrics package.
type Exporter struct {
dsn string
namespace string
mutex sync.RWMutex
duration prometheus.Gauge
up prometheus.Gauge
error prometheus.Gauge
totalScrapes prometheus.Counter
metricMap map[string]MetricMapNamespace
db *sql.DB
}
var (
metricMaps = map[string]map[string]ColumnMapping{
"pool_nodes": {
"hostname": {LABEL, "Backend hostname"},
"port": {LABEL, "Backend port"},
"role": {LABEL, "Role (primary or standby)"},
"status": {GAUGE, "Backend node Status (1 for up or waiting, 0 for down or unused)"},
"select_cnt": {COUNTER, "SELECT statement counts issued to each backend"},
"replication_delay": {GAUGE, "Replication delay"},
},
"pool_backend_stats": {
"hostname": {LABEL, "Backend hostname"},
"port": {LABEL, "Backend port"},
"role": {LABEL, "Role (primary or standby)"},
"status": {GAUGE, "Backend node Status (1 for up or waiting, 0 for down or unused)"},
"select_cnt": {COUNTER, "SELECT statement counts issued to each backend"},
"insert_cnt": {COUNTER, "INSERT statement counts issued to each backend"},
"update_cnt": {COUNTER, "UPDATE statement counts issued to each backend"},
"delete_cnt": {COUNTER, "DELETE statement counts issued to each backend"},
"ddl_cnt": {COUNTER, "DDL statement counts issued to each backend"},
"other_cnt": {COUNTER, "other statement counts issued to each backend"},
"panic_cnt": {COUNTER, "Panic message counts returned from backend"},
"fatal_cnt": {COUNTER, "Fatal message counts returned from backend)"},
"error_cnt": {COUNTER, "Error message counts returned from backend"},
},
"pool_health_check_stats": {
"hostname": {LABEL, "Backend hostname"},
"port": {LABEL, "Backend port"},
"role": {LABEL, "Role (primary or standby)"},
"status": {GAUGE, "Backend node Status (1 for up or waiting, 0 for down or unused)"},
"total_count": {GAUGE, "Number of health check count in total"},
"success_count": {GAUGE, "Number of successful health check count in total"},
"fail_count": {GAUGE, "Number of failed health check count in total"},
"skip_count": {GAUGE, "Number of skipped health check count in total"},
"retry_count": {GAUGE, "Number of retried health check count in total"},
"average_retry_count": {GAUGE, "Number of average retried health check count in a health check session"},
"max_retry_count": {GAUGE, "Number of maximum retried health check count in a health check session"},
"max_duration": {GAUGE, "Maximum health check duration in Millie seconds"},
"min_duration": {GAUGE, "Minimum health check duration in Millie seconds"},
"average_duration": {GAUGE, "Average health check duration in Millie seconds"},
},
"pool_processes": {
"pool_pid": {DISCARD, "PID of Pgpool-II child processes"},
"database": {DISCARD, "Database name of the currently active backend connection"},
},
"pool_cache": {
"cache_hit_ratio": {GAUGE, "Query cache hit ratio"},
"num_hash_entries": {GAUGE, "Number of total hash entries"},
"used_hash_entries": {GAUGE, "Number of used hash entries"},
"num_cache_entries": {GAUGE, "Number of used cache entries"},
"used_cache_entries_size": {GAUGE, "Total size of used cache size"},
"free_cache_entries_size": {GAUGE, "Total size of free cache size"},
},
}
)
// Pgpool-II version
var pgpoolVersionRegex = regexp.MustCompile(`^((\d+)(\.\d+)(\.\d+)?)`)
var version42 = semver.MustParse("4.2.0")
var pgpoolSemver semver.Version
func NewExporter(dsn string, namespace string) *Exporter {
db, err := getDBConn(dsn)
if err != nil {
level.Error(logger).Log("err", err)
os.Exit(1)
}
return &Exporter{
dsn: dsn,
namespace: namespace,
up: prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: namespace,
Name: "up",
Help: "Whether the Pgpool-II server is up (1 for yes, 0 for no).",
}),
duration: prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: namespace,
Name: "last_scrape_duration_seconds",
Help: "Duration of the last scrape of metrics from Pgpool-II.",
}),
totalScrapes: prometheus.NewCounter(prometheus.CounterOpts{
Namespace: namespace,
Name: "scrapes_total",
Help: "Total number of times Pgpool-II has been scraped for metrics.",
}),
error: prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: namespace,
Name: "last_scrape_error",
Help: "Whether the last scrape of metrics from Pgpool-II resulted in an error (1 for error, 0 for success).",
}),
metricMap: makeDescMap(metricMaps, namespace),
db: db,
}
}
// Query within a namespace mapping and emit metrics. Returns fatal errors if
// the scrape fails, and a slice of errors if they were non-fatal.
func queryNamespaceMapping(ch chan<- prometheus.Metric, db *sql.DB, namespace string, mapping MetricMapNamespace) ([]error, error) {
query := fmt.Sprintf("SHOW %s;", namespace)
// Don't fail on a bad scrape of one metric
rows, err := db.Query(query)
if err != nil {
return []error{}, errors.New(fmt.Sprintln("Error running query on database: ", namespace, err))
}
defer rows.Close()
var columnNames []string
columnNames, err = rows.Columns()
if err != nil {
return []error{}, errors.New(fmt.Sprintln("Error retrieving column list for: ", namespace, err))
}
// Make a lookup map for the column indices
var columnIdx = make(map[string]int, len(columnNames))
for i, n := range columnNames {
columnIdx[n] = i
}
var columnData = make([]interface{}, len(columnNames))
var scanArgs = make([]interface{}, len(columnNames))
for i := range columnData {
scanArgs[i] = &columnData[i]
}
nonfatalErrors := []error{}
// Read from the result of "SHOW pool_processes"
if namespace == "pool_processes" {
frontendByUserDb := make(map[string]map[string]int)
var frontend_total float64
var frontend_used float64
for rows.Next() {
err = rows.Scan(scanArgs...)
if err != nil {
return []error{}, errors.New(fmt.Sprintln("Error retrieving rows:", namespace, err))
}
frontend_total++
// Loop over column names to find currently connected backend database
var valueDatabase string
var valueUsername string
for idx, columnName := range columnNames {
switch columnName {
case "database":
valueDatabase, _ = dbToString(columnData[idx])
case "username":
valueUsername, _ = dbToString(columnData[idx])
}
}
if len(valueDatabase) > 0 && len(valueUsername) > 0 {
frontend_used++
dbCount, ok := frontendByUserDb[valueUsername]
if !ok {
dbCount = map[string]int{valueDatabase: 0}
}
dbCount[valueDatabase]++
frontendByUserDb[valueUsername] = dbCount
}
}
variableLabels := []string{"username", "database"}
for userName, dbs := range frontendByUserDb {
for dbName, count := range dbs {
labels := []string{userName, dbName}
ch <- prometheus.MustNewConstMetric(
prometheus.NewDesc(prometheus.BuildFQName("pgpool2", "", "frontend_used"), "Number of used child processes", variableLabels, nil),
prometheus.GaugeValue,
float64(count),
labels...,
)
}
}
// Generate the metric for "pool_processes"
ch <- prometheus.MustNewConstMetric(
prometheus.NewDesc(prometheus.BuildFQName("pgpool2", "", "frontend_total"), "Number of total child processed", nil, nil),
prometheus.GaugeValue,
frontend_total,
)
ch <- prometheus.MustNewConstMetric(
prometheus.NewDesc(prometheus.BuildFQName("pgpool2", "", "frontend_used_ratio"), "Ratio of child processes to total processes", nil, nil),
prometheus.GaugeValue,
frontend_used/frontend_total,
)
return nonfatalErrors, nil
}
for rows.Next() {
err = rows.Scan(scanArgs...)
if err != nil {
return []error{}, errors.New(fmt.Sprintln("Error retrieving rows:", namespace, err))
}
// Get the label values for this row.
labels := make([]string, len(mapping.labels))
for idx, label := range mapping.labels {
labels[idx], _ = dbToString(columnData[columnIdx[label]])
}
// Loop over column names, and match to scan data.
for idx, columnName := range columnNames {
if metricMapping, ok := mapping.columnMappings[columnName]; ok {
// Is this a metricy metric?
if metricMapping.discard {
continue
}
// If status column, convert string to int.
if columnName == "status" {
valueString, ok := dbToString(columnData[idx])
if !ok {
nonfatalErrors = append(nonfatalErrors, errors.New(fmt.Sprintln("Unexpected error parsing column: ", namespace, columnName, columnData[idx])))
continue
}
value := parseStatusField(valueString)
// Generate the metric
ch <- prometheus.MustNewConstMetric(metricMapping.desc, metricMapping.vtype, value, labels...)
continue
}
value, ok := dbToFloat64(columnData[idx])
if !ok {
nonfatalErrors = append(nonfatalErrors, errors.New(fmt.Sprintln("Unexpected error parsing column: ", namespace, columnName, columnData[idx])))
continue
}
// Generate the metric
ch <- prometheus.MustNewConstMetric(metricMapping.desc, metricMapping.vtype, value, labels...)
}
}
}
return nonfatalErrors, nil
}
// Establish a new DB connection using dsn.
func getDBConn(dsn string) (*sql.DB, error) {
db, err := sql.Open("postgres", dsn)
if err != nil {
return nil, err
}
db.SetMaxOpenConns(1)
db.SetMaxIdleConns(1)
err = ping(db)
if err != nil {
return nil, err
}
return db, nil
}
// Connect to Pgpool-II and run "SHOW POOL_VERSION;" to check connection availability.
func ping(db *sql.DB) error {
rows, err := db.Query("SHOW POOL_VERSION;")
if err != nil {
return fmt.Errorf("error connecting to Pgpool-II: %s", err)
}
defer rows.Close()
return nil
}
// Convert database.sql types to float64s for Prometheus consumption. Null types are mapped to NaN. string and []byte
// types are mapped as NaN and !ok
func dbToFloat64(t interface{}) (float64, bool) {
switch v := t.(type) {
case int64:
return float64(v), true
case float64:
return v, true
case time.Time:
return float64(v.Unix()), true
case []byte:
// Try and convert to string and then parse to a float64
strV := string(v)
result, err := strconv.ParseFloat(strV, 64)
if err != nil {
return math.NaN(), false
}
return result, true
case string:
result, err := strconv.ParseFloat(v, 64)
if err != nil {
level.Error(logger).Log("msg", "Could not parse string", "err", err)
return math.NaN(), false
}
return result, true
case bool:
if v {
return 1.0, true
}
return 0.0, true
case nil:
return math.NaN(), true
default:
return math.NaN(), false
}
}
// Convert database.sql to string for Prometheus labels. Null types are mapped to empty strings.
func dbToString(t interface{}) (string, bool) {
switch v := t.(type) {
case int64:
return fmt.Sprintf("%v", v), true
case float64:
return fmt.Sprintf("%v", v), true
case time.Time:
return fmt.Sprintf("%v", v.Unix()), true
case nil:
return "", true
case []byte:
// Try and convert to string
return string(v), true
case string:
return v, true
case bool:
if v {
return "true", true
}
return "false", true
default:
return "", false
}
}
// Convert bool to int.
func parseStatusField(value string) float64 {
switch value {
case "true", "up", "waiting":
return 1.0
case "false", "unused", "down":
return 0.0
}
return 0.0
}
// Mask user password in DSN
func maskPassword(dsn string) string {
pDSN, err := url.Parse(dsn)
if err != nil {
return "could not parse DATA_SOURCE_NAME"
}
// Mask user password in DSN
if pDSN.User != nil {
pDSN.User = url.UserPassword(pDSN.User.Username(), "MASKED_PASSWORD")
}
return pDSN.String()
}
// Retrieve Pgpool-II version.
func queryVersion(db *sql.DB) (semver.Version, error) {
level.Debug(logger).Log("msg", "Querying Pgpool-II version")
versionRows, err := db.Query("SHOW POOL_VERSION;")
if err != nil {
return semver.Version{}, errors.New(fmt.Sprintln("Error querying SHOW POOL_VERSION:", err))
}
defer versionRows.Close()
var columnNames []string
columnNames, err = versionRows.Columns()
if err != nil {
return semver.Version{}, errors.New(fmt.Sprintln("Error retrieving column name for version:", err))
}
if len(columnNames) != 1 || columnNames[0] != "pool_version" {
return semver.Version{}, errors.New(fmt.Sprintln("Error returning Pgpool-II version:", err))
}
var pgpoolVersion string
for versionRows.Next() {
err := versionRows.Scan(&pgpoolVersion)
if err != nil {
return semver.Version{}, errors.New(fmt.Sprintln("Error retrieving SHOW POOL_VERSION rows:", err))
}
}
v := pgpoolVersionRegex.FindStringSubmatch(pgpoolVersion)
if len(v) > 1 {
level.Debug(logger).Log("pgpool_version", v[1])
return semver.ParseTolerant(v[1])
}
return semver.Version{}, errors.New(fmt.Sprintln("Error retrieving Pgpool-II version:", err))
}
// Iterate through all the namespace mappings in the exporter and run their queries.
func queryNamespaceMappings(ch chan<- prometheus.Metric, db *sql.DB, metricMap map[string]MetricMapNamespace) map[string]error {
// Return a map of namespace -> errors
namespaceErrors := make(map[string]error)
for namespace, mapping := range metricMap {
// pool_backend_stats and pool_health_check_stats can not be used before 4.1.
if namespace == "pool_backend_stats" || namespace == "pool_health_check_stats" {
if pgpoolSemver.LT(version42) {
continue
}
}
level.Debug(logger).Log("msg", "Querying namespace", "namespace", namespace)
nonFatalErrors, err := queryNamespaceMapping(ch, db, namespace, mapping)
// Serious error - a namespace disappeard
if err != nil {
namespaceErrors[namespace] = err
level.Info(logger).Log("msg", "namespace disappeard", "err", err)
}
// Non-serious errors - likely version or parsing problems.
if len(nonFatalErrors) > 0 {
for _, err := range nonFatalErrors {
level.Info(logger).Log("msg", "error parsing", "err", err.Error())
}
}
}
return namespaceErrors
}
// Describe implements prometheus.Collector.
func (e *Exporter) Describe(ch chan<- *prometheus.Desc) {
// We cannot know in advance what metrics the exporter will generate
// from Postgres. So we use the poor man's describe method: Run a collect
// and send the descriptors of all the collected metrics. The problem
// here is that we need to connect to the Postgres DB. If it is currently
// unavailable, the descriptors will be incomplete. Since this is a
// stand-alone exporter and not used as a library within other code
// implementing additional metrics, the worst that can happen is that we
// don't detect inconsistent metrics created by this exporter
// itself. Also, a change in the monitored Postgres instance may change the
// exported metrics during the runtime of the exporter.
metricCh := make(chan prometheus.Metric)
doneCh := make(chan struct{})
go func() {
for m := range metricCh {
ch <- m.Desc()
}
close(doneCh)
}()
e.Collect(metricCh)
close(metricCh)
<-doneCh
}
// Collect implements prometheus.Collector.
func (e *Exporter) Collect(ch chan<- prometheus.Metric) {
e.scrape(ch)
ch <- e.duration
ch <- e.up
ch <- e.totalScrapes
ch <- e.error
}
func (e *Exporter) scrape(ch chan<- prometheus.Metric) {
e.totalScrapes.Inc()
var err error
defer func(begun time.Time) {
e.duration.Set(time.Since(begun).Seconds())
if err == nil {
e.error.Set(0)
} else {
e.error.Set(1)
}
}(time.Now())
// Check connection availability and close the connection if it fails.
if err = ping(e.db); err != nil {
level.Error(logger).Log("msg", "Error pinging Pgpool-II", "err", err)
if cerr := e.db.Close(); cerr != nil {
level.Error(logger).Log("msg", "Error while closing non-pinging connection", "err", err)
}
level.Info(logger).Log("msg", "Reconnecting to Pgpool-II")
e.db, err = sql.Open("postgres", e.dsn)
e.db.SetMaxOpenConns(1)
e.db.SetMaxIdleConns(1)
if err = ping(e.db); err != nil {
level.Error(logger).Log("msg", "Error pinging Pgpool-II", "err", err)
if cerr := e.db.Close(); cerr != nil {
level.Error(logger).Log("msg", "Error while closing non-pinging connection", "err", err)
}
e.up.Set(0)
return
}
}
e.up.Set(1)
e.error.Set(0)
e.mutex.RLock()
defer e.mutex.RUnlock()
errMap := queryNamespaceMappings(ch, e.db, e.metricMap)
if len(errMap) > 0 {
level.Error(logger).Log("err", errMap)
e.error.Set(1)
}
}
// Turn the MetricMap column mapping into a prometheus descriptor mapping.
func makeDescMap(metricMaps map[string]map[string]ColumnMapping, namespace string) map[string]MetricMapNamespace {
var metricMap = make(map[string]MetricMapNamespace)
for metricNamespace, mappings := range metricMaps {
thisMap := make(map[string]MetricMap)
// Get the constant labels
var variableLabels []string
for columnName, columnMapping := range mappings {
if columnMapping.usage == LABEL {
variableLabels = append(variableLabels, columnName)
}
}
for columnName, columnMapping := range mappings {
// Determine how to convert the column based on its usage.
switch columnMapping.usage {
case DISCARD, LABEL:
thisMap[columnName] = MetricMap{
discard: true,
conversion: func(_ interface{}) (float64, bool) {
return math.NaN(), true
},
}
case COUNTER:
thisMap[columnName] = MetricMap{
vtype: prometheus.CounterValue,
desc: prometheus.NewDesc(fmt.Sprintf("%s_%s_%s", namespace, metricNamespace, columnName), columnMapping.description, variableLabels, nil),
conversion: func(in interface{}) (float64, bool) {
return dbToFloat64(in)
},
}
case GAUGE:
thisMap[columnName] = MetricMap{
vtype: prometheus.GaugeValue,
desc: prometheus.NewDesc(fmt.Sprintf("%s_%s_%s", namespace, metricNamespace, columnName), columnMapping.description, variableLabels, nil),
conversion: func(in interface{}) (float64, bool) {
return dbToFloat64(in)
},
}
}
}
metricMap[metricNamespace] = MetricMapNamespace{variableLabels, thisMap}
}
return metricMap
}
func main() {
promlogConfig := &promlog.Config{}
flag.AddFlags(kingpin.CommandLine, promlogConfig)
kingpin.Version(version.Print("pgpool2_exporter"))
kingpin.HelpFlag.Short('h')
kingpin.Parse()
dsn := os.Getenv("DATA_SOURCE_NAME")
exporter := NewExporter(dsn, namespace)
defer func() {
exporter.db.Close()
}()
prometheus.MustRegister(exporter)
// Retrieve Pgpool-II version
v, err := queryVersion(exporter.db)
if err != nil {
level.Error(logger).Log("err", err)
}
pgpoolSemver = v
level.Info(logger).Log("msg", "Starting pgpool2_exporter", "version", version.Info(), "dsn", maskPassword(dsn))
level.Info(logger).Log("msg", "Listening on address", "address", *listenAddress)
http.Handle(*metricsPath, promhttp.Handler())
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte(fmt.Sprintf(landingPage, *metricsPath)))
})
if err := http.ListenAndServe(*listenAddress, nil); err != nil {
level.Error(logger).Log("err", err)
os.Exit(1)
}
}
|
[
"\"DATA_SOURCE_NAME\""
] |
[] |
[
"DATA_SOURCE_NAME"
] |
[]
|
["DATA_SOURCE_NAME"]
|
go
| 1 | 0 | |
lib/streamlit/bootstrap.py
|
# -*- coding: utf-8 -*-
# Copyright 2018-2020 Streamlit Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import signal
import sys
import click
import tornado.ioloop
from streamlit import config
from streamlit import net_util
from streamlit import url_util
from streamlit import env_util
from streamlit import util
from streamlit.Report import Report
from streamlit.logger import get_logger
from streamlit.server.Server import Server
LOGGER = get_logger(__name__)
# Wait for 1 second before opening a browser. This gives old tabs a chance to
# reconnect.
# This must be >= 2 * WebSocketConnection.ts#RECONNECT_WAIT_TIME_MS.
BROWSER_WAIT_TIMEOUT_SEC = 1
def _set_up_signal_handler():
LOGGER.debug("Setting up signal handler")
def signal_handler(signal_number, stack_frame):
# The server will shut down its threads and stop the ioloop
Server.get_current().stop()
signal.signal(signal.SIGTERM, signal_handler)
signal.signal(signal.SIGINT, signal_handler)
if sys.platform == "win32":
signal.signal(signal.SIGBREAK, signal_handler)
else:
signal.signal(signal.SIGQUIT, signal_handler)
def _fix_sys_path(script_path):
"""Add the script's folder to the sys path.
Python normally does this automatically, but since we exec the script
ourselves we need to do it instead.
"""
sys.path.insert(0, os.path.dirname(script_path))
def _fix_matplotlib_crash():
"""Set Matplotlib backend to avoid a crash.
The default Matplotlib backend crashes Python on OSX when run on a thread
that's not the main thread, so here we set a safer backend as a fix.
Users can always disable this behavior by setting the config
runner.fixMatplotlib = false.
This fix is OS-independent. We didn't see a good reason to make this
Mac-only. Consistency within Streamlit seemed more important.
"""
if config.get_option("runner.fixMatplotlib"):
try:
# TODO: a better option may be to set
# os.environ["MPLBACKEND"] = "Agg". We'd need to do this towards
# the top of __init__.py, before importing anything that imports
# pandas (which imports matplotlib). Alternately, we could set
# this environment variable in a new entrypoint defined in
# setup.py. Both of these introduce additional trickiness: they
# need to run without consulting streamlit.config.get_option,
# because this would import streamlit, and therefore matplotlib.
import matplotlib
matplotlib.use("Agg")
except ImportError:
pass
def _fix_tornado_crash():
"""Set default asyncio policy to be compatible with Tornado 6.
Tornado 6 (at least) is not compatible with the default
asyncio implementation on Windows. So here we
pick the older SelectorEventLoopPolicy when the OS is Windows
if the known-incompatible default policy is in use.
This has to happen as early as possible to make it a low priority and
overrideable
See: https://github.com/tornadoweb/tornado/issues/2608
FIXME: if/when tornado supports the defaults in asyncio,
remove and bump tornado requirement for py38
"""
if env_util.IS_WINDOWS and sys.version_info >= (3, 8):
import asyncio
try:
from asyncio import (
WindowsProactorEventLoopPolicy,
WindowsSelectorEventLoopPolicy,
)
except ImportError:
pass
# Not affected
else:
if type(asyncio.get_event_loop_policy()) is WindowsProactorEventLoopPolicy:
# WindowsProactorEventLoopPolicy is not compatible with
# Tornado 6 fallback to the pre-3.8 default of Selector
asyncio.set_event_loop_policy(WindowsSelectorEventLoopPolicy())
def _fix_sys_argv(script_path, args):
"""sys.argv needs to exclude streamlit arguments and parameters
and be set to what a user's script may expect.
"""
import sys
sys.argv = [script_path] + list(args)
def _on_server_start(server):
_print_url()
def maybe_open_browser():
if config.get_option("server.headless"):
# Don't open browser when in headless mode.
return
if server.browser_is_connected:
# Don't auto-open browser if there's already a browser connected.
# This can happen if there's an old tab repeatedly trying to
# connect, and it happens to success before we launch the browser.
return
if config.is_manually_set("browser.serverAddress"):
addr = config.get_option("browser.serverAddress")
else:
addr = "localhost"
util.open_browser(Report.get_url(addr))
# Schedule the browser to open using the IO Loop on the main thread, but
# only if no other browser connects within 1s.
ioloop = tornado.ioloop.IOLoop.current()
ioloop.call_later(BROWSER_WAIT_TIMEOUT_SEC, maybe_open_browser)
def _print_url():
title_message = "You can now view your Streamlit app in your browser."
named_urls = []
if config.is_manually_set("browser.serverAddress"):
named_urls = [
("URL", Report.get_url(config.get_option("browser.serverAddress")))
]
elif config.get_option("server.headless"):
named_urls = [
("Network URL", Report.get_url(net_util.get_internal_ip())),
("External URL", Report.get_url(net_util.get_external_ip())),
]
else:
named_urls = [
("Local URL", Report.get_url("localhost")),
("Network URL", Report.get_url(net_util.get_internal_ip())),
]
click.secho("")
click.secho(" %s" % title_message, fg="blue", bold=True)
click.secho("")
for url_name, url in named_urls:
url_util.print_url(url_name, url)
click.secho("")
def run(script_path, command_line, args):
"""Run a script in a separate thread and start a server for the app.
This starts a blocking ioloop.
Parameters
----------
script_path : str
command_line : str
args : [str]
"""
_fix_sys_path(script_path)
_fix_matplotlib_crash()
_fix_tornado_crash()
_fix_sys_argv(script_path, args)
# Install a signal handler that will shut down the ioloop
# and close all our threads
_set_up_signal_handler()
ioloop = tornado.ioloop.IOLoop.current()
# Create and start the server.
server = Server(ioloop, script_path, command_line)
server.start(_on_server_start)
# (Must com after start(), because this starts a new thread and start() may
# call sys.exit() which doesn't kill other threads.
server.add_preheated_report_session()
# Start the ioloop. This function will not return until the
# server is shut down.
ioloop.start()
|
[] |
[] |
[
"MPLBACKEND"
] |
[]
|
["MPLBACKEND"]
|
python
| 1 | 0 | |
build/android/test_runner.py
|
#!/usr/bin/env python
# Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""
Runs tests on Android devices.
This script exists to avoid WebRTC being broken by changes in the Chrome Android
test execution toolchain. It also conveniently sets the CHECKOUT_SOURCE_ROOT
environment variable.
"""
import os
import sys
SCRIPT_DIR = os.path.dirname(__file__)
SRC_DIR = os.path.abspath(os.path.join(SCRIPT_DIR, os.pardir, os.pardir,
os.pardir))
CHROMIUM_BUILD_ANDROID_DIR = os.path.join(SRC_DIR, 'build', 'android')
sys.path.insert(0, CHROMIUM_BUILD_ANDROID_DIR)
import test_runner # pylint: disable=W0406
from pylib.gtest import gtest_test_instance
def main():
# Set our own paths to the .isolate files.
# pylint: disable=protected-access
gtest_test_instance._DEFAULT_ISOLATE_FILE_PATHS.update({
'audio_decoder_unittests':
'webrtc/modules/audio_decoder_unittests.isolate',
'common_audio_unittests':
'webrtc/common_audio/common_audio_unittests.isolate',
'common_video_unittests':
'webrtc/common_video/common_video_unittests.isolate',
'modules_tests': 'webrtc/modules/modules_tests.isolate',
'modules_unittests': 'webrtc/modules/modules_unittests.isolate',
'rtc_unittests': 'webrtc/rtc_unittests.isolate',
'system_wrappers_unittests':
'webrtc/system_wrappers/system_wrappers_unittests.isolate',
'test_support_unittests': 'webrtc/test/test_support_unittests.isolate',
'tools_unittests': 'webrtc/tools/tools_unittests.isolate',
'video_capture_tests':
'webrtc/modules/video_capture/video_capture_tests.isolate',
'video_engine_tests': 'webrtc/video_engine_tests.isolate',
'video_engine_core_unittests':
'webrtc/video_engine/video_engine_core_unittests.isolate',
'voice_engine_unittests':
'webrtc/voice_engine/voice_engine_unittests.isolate',
'webrtc_nonparallel_tests': 'webrtc/webrtc_nonparallel_tests.isolate',
'webrtc_perf_tests': 'webrtc/webrtc_perf_tests.isolate',
})
# Override environment variable to make it possible for the scripts to find
# the root directory (our symlinking of the Chromium build toolchain would
# otherwise make them fail to do so).
os.environ['CHECKOUT_SOURCE_ROOT'] = SRC_DIR
return test_runner.main()
if __name__ == '__main__':
sys.exit(main())
|
[] |
[] |
[
"CHECKOUT_SOURCE_ROOT"
] |
[]
|
["CHECKOUT_SOURCE_ROOT"]
|
python
| 1 | 0 | |
repos/system_upgrade/el7toel8/actors/selinux/selinuxcontentscanner/tests/component_test.py
|
import os
import pytest
from leapp.snactor.fixture import current_actor_context
from leapp.models import SELinuxModule, SELinuxModules, SELinuxCustom, SELinuxFacts, SELinuxRequestRPMs
from leapp.libraries.stdlib import api, run, CalledProcessError
from leapp.reporting import Report
TEST_MODULES = [
["400", "mock1"],
["99", "mock1"],
["200", "mock1"],
["400", "mock2"],
["999", "mock3"]
]
SEMANAGE_COMMANDS = [
['fcontext', '-t', 'httpd_sys_content_t', '"/web(/.*)?"'],
['fcontext', '-t', 'cgdcbxd_unit_file_t', '"cgdcbxd/(/.*)?"'],
['port', '-t', 'http_port_t', '-p', 'udp', '81'],
['permissive', 'abrt_t']
]
testmoduledir = "tests/mock_modules/"
def _run_cmd(cmd, logmsg="", split=False):
try:
return run(cmd, split=split).get("stdout", "")
except CalledProcessError as e:
# Only report issues when they are explicitly described.
# This way expected failures are not reported.
if logmsg:
api.current_logger().warning("%s: %s", logmsg, str(e.stderr))
@pytest.fixture(scope="function")
def destructive_selinux_env():
tests_dir = os.path.join(os.path.realpath(__file__).rsplit(os.path.sep, 2)[0], testmoduledir)
for priority, module in TEST_MODULES:
_run_cmd(["semodule", "-X", priority, "-i", os.path.join(tests_dir, module + ".cil")],
"Error installing mock module {} before test".format(module))
for command in SEMANAGE_COMMANDS:
_run_cmd(["semanage", command[0], "-a"] + command[1:],
"Error applying selinux customizations before test")
yield
for command in SEMANAGE_COMMANDS[:-1]:
_run_cmd(["semanage", command[0], "-d"] + command[1:],
"Error removing selinux customizations after testing")
for priority, module in reversed(TEST_MODULES + [["400", "permissive_abrt_t"]]):
_run_cmd(["semodule", "-X", priority, "-r", module],
"Error removing selinux module {} after testing".format(module))
def find_module(selinuxmodules, name, priority):
return next((module for module in selinuxmodules.modules
if (module.name == name and module.priority == int(priority))), None)
def find_semanage_rule(rules, rule):
return next((r for r in rules if all(word in r for word in rule)), None)
@pytest.mark.skipif(os.getenv("DESTRUCTIVE_TESTING", False) in [False, "0"],
reason='Test disabled by default because it would modify the system')
def test_SELinuxContentScanner(current_actor_context, destructive_selinux_env):
expected_data = {'policy': 'targeted',
'mls_enabled': True,
'enabled': True,
'runtime_mode': 'enforcing',
'static_mode': 'enforcing'}
current_actor_context.feed(SELinuxFacts(**expected_data))
current_actor_context.run()
modules = current_actor_context.consume(SELinuxModules)[0]
api.current_logger().warning("Modules: %s", str(modules))
assert modules
# check that all modules installed during test setup where reported
for priority, name in TEST_MODULES:
if priority not in ('100', '200'):
assert find_module(modules, name, priority)
rpms = current_actor_context.consume(SELinuxRequestRPMs)[0]
assert rpms
# modules with priority 200 should only originate in "<module_name>-selinux" rpms
assert "mock1-selinux" in rpms.to_keep
# mock1 contains container related type
assert "container-selinux" in rpms.to_install
custom = current_actor_context.consume(SELinuxCustom)[0]
assert custom
# the second command contains removed type and should be discarded
assert find_semanage_rule(custom.removed, SEMANAGE_COMMANDS[1])
# the rest of the commands should be reported (except for the last which will show up in modules)
assert find_semanage_rule(custom.commands, SEMANAGE_COMMANDS[0])
assert find_semanage_rule(custom.commands, SEMANAGE_COMMANDS[2])
|
[] |
[] |
[
"DESTRUCTIVE_TESTING"
] |
[]
|
["DESTRUCTIVE_TESTING"]
|
python
| 1 | 0 | |
api/go-tonicpow/examples/goals/get/get.go
|
package main
import (
"log"
"os"
"github.com/tonicpow/go-tonicpow"
)
func main() {
// Load the api client
client, err := tonicpow.NewClient(
tonicpow.WithAPIKey(os.Getenv("TONICPOW_API_KEY")),
tonicpow.WithEnvironmentString(os.Getenv("TONICPOW_ENVIRONMENT")),
)
if err != nil {
log.Fatalf("error in NewClient: %s", err.Error())
}
// Get a goal
var goal *tonicpow.Goal
goal, _, err = client.GetGoal(13)
if err != nil {
log.Fatalf("error in GetGoal: %s", err.Error())
}
log.Printf("goal: %s", goal.Name)
}
|
[
"\"TONICPOW_API_KEY\"",
"\"TONICPOW_ENVIRONMENT\""
] |
[] |
[
"TONICPOW_API_KEY",
"TONICPOW_ENVIRONMENT"
] |
[]
|
["TONICPOW_API_KEY", "TONICPOW_ENVIRONMENT"]
|
go
| 2 | 0 | |
baseline/cnn/fashion_mnist_cnn_error_learning.py
|
# -*- coding:utf-8 -*-
from __future__ import print_function
import keras
import tensorflow as tf
from keras import backend as K
from keras.callbacks import ModelCheckpoint, EarlyStopping
import os
# Helper libraries
import random
import matplotlib.pyplot as plt
import numpy as np
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import train_test_split
from keras.preprocessing.image import ImageDataGenerator
from base_utils import plot_confusion_matrix, AdvancedLearnignRateScheduler, get_random_eraser
from networks import create_base_cnn_model_with_kernels
###################################################################
### 配置 Tensorflow ###
###################################################################
# Seed value
# Apparently you may use different seed values at each stage
seed_value= 0
# 1. Set the `PYTHONHASHSEED` environment variable at a fixed value
os.environ['PYTHONHASHSEED']=str(seed_value)
# 2. Set the `python` built-in pseudo-random generator at a fixed value
random.seed(seed_value)
# 3. Set the `numpy` pseudo-random generator at a fixed value
np.random.seed(seed_value)
# 4. Set the `tensorflow` pseudo-random generator at a fixed value
tf.set_random_seed(seed_value)
# 5. Configure a new global `tensorflow` session
session_conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
K.set_session(sess)
###################################################################
### 读取训练、测试数据 ###
###################################################################
num_classes = 10
# image dimensions
img_rows, img_cols = 28, 28
classes = ["Top", "Trouser", "Pullover", "Dress", "Coat",
"Sandal", "Shirt", "Sneaker", "Bag", "Ankle Boot"]
def load_data_from_keras():
# get data using tf.keras.datasets. Train and test set is automatically split from datasets
(x_train, y_train), (x_test, y_test) = keras.datasets.fashion_mnist.load_data()
return (x_train, y_train), (x_test, y_test)
(x_train, y_train), (x_test, y_test) = load_data_from_keras()
x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size=0.2, random_state=0)
if K.image_data_format() == 'channels_first':
x_train_with_channels = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
x_val_with_channels = x_val.reshape(x_val.shape[0], 1, img_rows, img_cols)
x_test_with_channels = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
x_train_with_channels = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_val_with_channels = x_val.reshape(x_val.shape[0], img_rows, img_cols, 1)
x_test_with_channels = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
print("train feature shape = ", x_train_with_channels.shape)
print("validation feature shape = ", x_val_with_channels.shape)
print("test feature shape = ", x_test_with_channels.shape)
x_train_with_channels = x_train_with_channels.astype("float32") / 255.0
x_val_with_channels = x_val_with_channels.astype("float32") / 255.0
x_test_with_channels = x_test_with_channels.astype("float32") / 255.0
y_train_categorical = keras.utils.to_categorical(y_train, num_classes)
y_val_categorical = keras.utils.to_categorical(y_val, num_classes)
y_test_categorical = keras.utils.to_categorical(y_test, num_classes)
###################################################################
### 创建模型 ###
###################################################################
kernels = [3,3]
model = create_base_cnn_model_with_kernels(input_shape, kernels=kernels, optimizer="adamax")
model.summary()
model_name = "base_cnn_error_learning"
loss_value = 'val_acc'
checkpoint_path = './weights/{}_weight.ckpt'.format(model_name)
checkpoint_dir = os.path.dirname(checkpoint_path)
callbacks = [
# Early stopping definition
EarlyStopping(monitor=loss_value, patience=10, verbose=1),
# Decrease learning rate by 0.1 factor
AdvancedLearnignRateScheduler(monitor=loss_value, patience=10, verbose=1, mode='auto', decayRatio=0.9),
# Saving best model
ModelCheckpoint(checkpoint_path, monitor=loss_value, save_best_only=True, verbose=1),
]
###################################################################
### 模型训练 ###
###################################################################
load = True
batch_size = 100
epochs = 50
data_augmentation = False
pixel_level = True
Training = False
Fine_tuning = True
if load:
model.load_weights(checkpoint_path)
if Training:
if not data_augmentation:
model_train_history = model.fit(x_train_with_channels, y_train_categorical,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_val_with_channels, y_val_categorical),
callbacks=callbacks)
else:
print('Using real-time data augmentation.')
# This will do preprocessing and realtime data augmentation:
datagen = ImageDataGenerator(
featurewise_center=False, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
featurewise_std_normalization=False, # divide inputs by std of the dataset
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # apply ZCA whitening
rotation_range=0, # randomly rotate images in the range (degrees, 0 to 180)
width_shift_range=0.1, # randomly shift images horizontally (fraction of total width)
height_shift_range=0.1, # randomly shift images vertically (fraction of total height)
horizontal_flip=True, # randomly flip images
vertical_flip=False, # randomly flip images
preprocessing_function=get_random_eraser(probability = 0.33))
# Compute quantities required for featurewise normalization
# (std, mean, and principal components if ZCA whitening is applied).
datagen.fit(x_train_with_channels)
# Fit the model on the batches generated by datagen.flow().
model_train_history = model.fit_generator(datagen.flow(x_train_with_channels, y_train_categorical),
steps_per_epoch=x_train_with_channels.shape[0] // batch_size,
validation_data=(x_val_with_channels, y_val_categorical),
epochs=epochs,
verbose=1,
workers=4,
callbacks=callbacks)
###################################################################
### 保存训练信息 ###
###################################################################
print(model_train_history.history['acc'])
print(model_train_history.history['val_acc'])
print(model_train_history.history['loss'])
print(model_train_history.history['val_loss'])
# Save
filename = "{}_result.npz".format(model_name)
save_dict = {
"acc": model_train_history.history['acc'],
"val_acc": model_train_history.history['val_acc'],
"loss": model_train_history.history['loss'],
"val_loss":model_train_history.history['val_loss']
}
output = os.path.join("./results/", filename)
np.savez(output, **save_dict)
# Plot training & validation accuracy values
plt.plot(model_train_history.history['acc'])
plt.plot(model_train_history.history['val_acc'])
plt.title('Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Validation'], loc='upper left')
plt.grid(True)
plt.savefig('./images/{}_acc.png'.format(model_name))
plt.show()
# Plot training & validation loss values
plt.plot(model_train_history.history['loss'])
plt.plot(model_train_history.history['val_loss'])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Validation'], loc='upper left')
plt.grid(True)
plt.savefig('./images/{}_loss.png'.format(model_name))
plt.show()
epochs = 1
if Fine_tuning:
#for epoch in range(epochs):
for epoch in range(epochs):
prediction_classes = model.predict(x_test_with_channels)
y_pred = np.argmax(prediction_classes, axis=1)
y_pred_categorical = keras.utils.to_categorical(y_pred, num_classes)
print(classification_report(y_test, y_pred))
counter = 0
error = 0
pred_counter = 0
pred_error = 0
print("prediction_classes: ", prediction_classes.shape)
x_train_preds = []
y_train_preds = []
for ii in range(len(prediction_classes)):
if(prediction_classes[ii][y_pred[ii]] > 0.99):
x_train_preds.append(x_test_with_channels[ii])
y_train_preds.append(y_pred[ii])
x_train_preds = np.array(x_train_preds)
y_train_preds = np.array(y_train_preds)
y_train_preds = keras.utils.to_categorical(y_train_preds, num_classes)
for ii in range(len(prediction_classes)):
if(prediction_classes[ii][y_pred[ii]] <= 0.99):
if (y_pred[ii] == y_test[ii]):
counter = counter + 1
else:
error = error + 1
sim_all = np.array(prediction_classes[ii])
sorted_dela_errors = np.argsort(sim_all)
most_important_errors = sorted_dela_errors[-3:]
#print(most_important_errors)
print(y_pred[ii], " ", y_test[ii], " ", most_important_errors, sorted_dela_errors, prediction_classes[ii][sorted_dela_errors])
max_preds = {}
for kk in range(len(most_important_errors)):
if prediction_classes[ii][most_important_errors[kk]] < 0.1:
continue
x_test_append = []
y_test_append = []
#x_test_append_2 = []
#y_test_append_2 = []
for jj in range(batch_size):
x_test_append.append(x_test_with_channels[ii])
y_test_append.append(most_important_errors[kk])
#x_test_append_2.append(x_test_with_channels[ii])
#y_test_append_2.append(sorted_dela_errors[1])
#break
x_test_append = np.array(x_test_append)
y_test_append = np.array(y_test_append)
#x_test_append_2 = np.array(x_test_append_2)
#y_test_append_2 = np.array(y_test_append_2)
y_test_append_categorical = keras.utils.to_categorical(y_test_append, num_classes)
#y_test_append_2_categorical = keras.utils.to_categorical(y_test_append_2, num_classes)
model_1 = keras.models.clone_model(model)
model_1.set_weights(model.get_weights())
model_1.compile(loss=keras.losses.categorical_crossentropy,
optimizer="sgd",
metrics=['accuracy'])
# model_2 = keras.models.clone_model(model)
# model_2.set_weights(model.get_weights())
# model_2.compile(loss=keras.losses.categorical_crossentropy,
# optimizer="sgd",
# metrics=['accuracy'])
model_train_history_1 = model_1.fit(np.vstack([x_test_append, x_train_with_channels]), np.vstack([y_test_append_categorical, y_train_categorical]),
batch_size=batch_size,
epochs=5,
verbose=2,
validation_data=(x_train_preds, y_train_preds))
# model_train_history_2 = model_2.fit(np.vstack([x_test_append_2, x_train_with_channels]), np.vstack([y_test_append_2_categorical, y_train_categorical]),
# batch_size=batch_size,
# epochs=5,
# verbose=2,
# validation_data=(x_val_with_channels, y_val_categorical))
max_preds[most_important_errors[kk]] = max(model_train_history_1.history['val_acc'])
pred = y_pred[ii]
# if max(model_train_history_1.history['val_acc']) > max(model_train_history_2.history['val_acc']):
# pred = most_important_errors[0]
# else:
# pred = most_important_errors[1]
max_acc = -1000000
for (d, x) in max_preds.items():
if x >= max_acc:
pred = d
max_acc = x
if(pred == y_test[ii]):
pred_counter = pred_counter + 1
else:
pred_error = pred_error + 1
print(y_pred[ii], " ", y_test[ii], " ", pred)
print("counter: ", counter)
print("error: ", error)
print("pred_counter: ", pred_counter)
print("pred_error: ", pred_error)
###################################################################
### 模型分析 ###
###################################################################
# prediction_classes = model.predict(x_test_with_channels)
# y_val_pred = np.argmax(prediction_classes, axis=1)
#
#
# prediction_threshold = 0.99
# val_pred_under_results = {}
# val_pred_upper_results = {}
#
# val_pred_under_counters = {}
# val_pred_upper_counters = {}
#
# val_pred_counters = {}
#
# for ii in range(len(x_test_with_channels)):
# if y_val_pred[ii] != y_test[ii]:
# if prediction_classes[ii][y_val_pred[ii]] > prediction_threshold:
# # if y_val_pred[ii] not in val_pred_upper_results.keys():
# # val_preds = {}
# # val_preds[y_val[ii]] = 1
# # val_pred_upper_results[y_val_pred[ii]] = val_preds
# # else:
# # val_preds = val_pred_upper_results[y_val_pred[ii]]
# # if y_val[ii] in val_preds.keys():
# # val_pred = val_preds[y_val[ii]]
# # val_pred = val_pred + 1
# # val_preds[y_val[ii]] = val_pred
# # else:
# # val_preds[y_val[ii]] = 1
# # val_pred_upper_results[y_val_pred[ii]] = val_preds
#
# if "wrong" in val_pred_upper_counters.keys():
# val_pred_upper_counters["wrong"] = val_pred_upper_counters["wrong"] + 1
# else:
# val_pred_upper_counters["wrong"] = 1
#
# else:
# # if y_val_pred[ii] not in val_pred_under_results.keys():
# # val_preds = {}
# # val_preds[y_val[ii]] = 1
# # val_pred_under_results[y_val_pred[ii]] = val_preds
# # else:
# # val_preds = val_pred_under_results[y_val_pred[ii]]
# # if y_val[ii] in val_preds.keys():
# # val_pred = val_preds[y_val[ii]]
# # val_pred = val_pred + 1
# # val_preds[y_val[ii]] = val_pred
# # else:
# # val_preds[y_val[ii]] = 1
# # val_pred_under_results[y_val_pred[ii]] = val_preds
#
# if "wrong" in val_pred_under_counters.keys():
# val_pred_under_counters["wrong"] = val_pred_under_counters["wrong"] + 1
# else:
# val_pred_under_counters["wrong"] = 1
#
#
# # 记录每个分类的错误分类个数
# if y_test[ii] not in val_pred_counters.keys():
# val_counters = {}
# val_counters["wrong"] = 1
# val_pred_counters[y_test[ii]] = val_counters
# else:
# val_counters = val_pred_counters[y_test[ii]]
# if "wrong" in val_counters.keys():
# val_counter = val_counters["wrong"]
# val_counter = val_counter + 1
# val_counters["wrong"] = val_counter
# else:
# val_counters["wrong"] = 1
#
# else:
# # 记录每个分类的正确分类个数
# if y_test[ii] not in val_pred_counters.keys():
# val_counters = {}
# val_counters["right"] = 1
# val_pred_counters[y_test[ii]] = val_counters
# else:
# val_counters = val_pred_counters[y_test[ii]]
# if "right" in val_counters.keys():
# val_counter = val_counters["right"]
# val_counter = val_counter + 1
# val_counters["right"] = val_counter
# else:
# val_counters["right"] = 1
#
#
# if prediction_classes[ii][y_val_pred[ii]] > prediction_threshold:
# if "right" in val_pred_upper_counters.keys():
# val_pred_upper_counters["right"] = val_pred_upper_counters["right"] + 1
# else:
# val_pred_upper_counters["right"] = 1
# else:
# if "right" in val_pred_under_counters.keys():
# val_pred_under_counters["right"] = val_pred_under_counters["right"] + 1
# else:
# val_pred_under_counters["right"] = 1
#
# # if prediction_classes[ii][y_val_pred[ii]] > prediction_threshold:
# # if y_val_pred[ii] not in val_pred_upper_results.keys():
# # val_preds = {}
# # val_preds[y_val[ii]] = 1
# # val_pred_upper_results[y_val_pred[ii]] = val_preds
# # else:
# # val_preds = val_pred_upper_results[y_val_pred[ii]]
# # if y_val[ii] in val_preds.keys():
# # val_pred = val_preds[y_val[ii]]
# # val_pred = val_pred + 1
# # val_preds[y_val[ii]] = val_pred
# # else:
# # val_preds[y_val[ii]] = 1
# # val_pred_upper_results[y_val_pred[ii]] = val_preds
# #
# # else:
# if y_val_pred[ii] not in val_pred_under_results.keys():
# val_preds = {}
# val_preds[y_test[ii]] = 1
# val_pred_under_results[y_val_pred[ii]] = val_preds
# else:
# val_preds = val_pred_under_results[y_val_pred[ii]]
# if y_test[ii] in val_preds.keys():
# val_pred = val_preds[y_test[ii]]
# val_pred = val_pred + 1
# val_preds[y_test[ii]] = val_pred
# else:
# val_preds[y_test[ii]] = 1
# val_pred_under_results[y_val_pred[ii]] = val_preds
#
# # # 记录每个分类的错误分类个数
# # if y_val[ii] not in val_pred_counters.keys():
# # val_counters = {}
# # val_counters["wrong"] = 1
# # val_pred_counters[y_val[ii]] = val_counters
# # else:
# # val_counters = val_pred_counters[y_val[ii]]
# # if "wrong" in val_counters.keys():
# # val_counter = val_counters["wrong"]
# # val_counter = val_counter + 1
# # val_counters["wrong"] = val_counter
# # else:
# # val_counters["wrong"] = 1
#
#
# print(val_pred_upper_results)
# print(val_pred_under_results)
#
# print("val_pred_upper_counters: ", val_pred_upper_counters)
# print("val_pred_under_counters: ", val_pred_under_counters)
#
# print(val_pred_counters)
#
# filename = './images/{}_confusion_matrix.png'.format(model_name)
# plot_confusion_matrix(y_test, y_val_pred, classes=classes, filename=filename, normalize=False,
# title='confusion matrix')
# print(classification_report(y_test, y_val_pred))
#
#
#
# prediction_classes = model.predict(x_test_with_channels)
# y_pred = np.argmax(prediction_classes, axis=1)
#
#
# # return ax
# filename = './images/{}_confusion_matrix.png'.format(model_name)
# # Plot confusion matrix
# plot_confusion_matrix(y_test, y_pred, classes=classes, filename=filename, normalize=False,
# title='confusion matrix')
#
# print(classification_report(y_test, y_pred))
#
#
#
#
# pred_right_counter = 0
# pred_change_right_counter = 0
#
# counter = 0
# for ii in range(len(x_test_with_channels)):
# if prediction_classes[ii][y_pred[ii]] <= prediction_threshold:
# counter += 1
# if y_pred[ii] in val_pred_under_results.keys():
#
# # 随机选择 大于错误率 的预测进行修改
# rnd = random.random()
# #print(rnd)
# #print(val_pred_under_counters["wrong"] / (val_pred_under_counters["wrong"] + val_pred_under_counters["right"]))
# #if rnd > (val_pred_under_counters["right"] / (val_pred_under_counters["wrong"] + val_pred_under_counters["right"])):
#
# #对其他分类预测错误的数量
# val_pred_under = val_pred_under_results[y_pred[ii]]
#
# wrong_sum = 0.0
# for k, v in val_pred_under.items():
# wrong_sum = wrong_sum + val_pred_under[k]
#
# dict = sorted(val_pred_under.items(), key=lambda d: d[1], reverse=True)
# #print(wrong_sum, " ", dict)
#
# select_rnd = random.random()
#
# select_index = y_pred
#
# prob = 0.0
# jj = 0
# for v in dict:
#
# if prob < select_rnd < prob + (float(v[1]) / wrong_sum):
# select_index = int(v[0])
# #break
# #print(select_rnd, " ", prob, " ", prob + (float(v[1]) / wrong_sum))
# prob = prob + (float(v[1]) / wrong_sum)
#
# print(y_pred[ii], " ", y_test[ii], " ", select_index)
#
# if(y_pred[ii] == y_test[ii]):
# pred_right_counter += 1
#
# if (select_index == y_test[ii]):
# pred_change_right_counter += 1
# print("counter: ", counter)
# print("pred_right_counter ", pred_right_counter)
# print("pred_change_right_counter ", pred_change_right_counter)
#
#
|
[] |
[] |
[
"PYTHONHASHSEED"
] |
[]
|
["PYTHONHASHSEED"]
|
python
| 1 | 0 | |
tracker/yarn/src/main/java/org/apache/hadoop/yarn/dmlc/Client.java
|
package org.apache.hadoop.yarn.dmlc;
import java.io.File;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.Collections;
import java.util.Map;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.yarn.api.ApplicationConstants;
import org.apache.hadoop.yarn.api.ApplicationConstants.Environment;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ApplicationReport;
import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
import org.apache.hadoop.yarn.api.records.LocalResource;
import org.apache.hadoop.yarn.api.records.LocalResourceType;
import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.QueueInfo;
import org.apache.hadoop.yarn.api.records.YarnApplicationState;
import org.apache.hadoop.yarn.client.api.YarnClient;
import org.apache.hadoop.yarn.client.api.YarnClientApplication;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.util.ConverterUtils;
import org.apache.hadoop.yarn.util.Records;
import sun.misc.Signal;
import sun.misc.SignalHandler;
public class Client {
// logger
private static final Log LOG = LogFactory.getLog(Client.class);
// permission for temp file
private static final FsPermission permTemp = new FsPermission("777");
// configuration
private YarnConfiguration conf = new YarnConfiguration();
// hdfs handler
private FileSystem dfs;
// cached maps
private Map<String, String> cacheFiles = new java.util.HashMap<String, String>();
// enviroment variable to setup cachefiles
private String cacheFileArg = "";
// args to pass to application master
private String appArgs = "";
// HDFS Path to store temporal result
private String tempdir = "/tmp";
// user name
private String userName = "";
// user credentials
private Credentials credentials = null;
// job name
private String jobName = "";
// queue
private String queue = "default";
// ApplicationMaster classpath
private String appCp = null;
// ApplicationMaster env
private Map<String, String> env = new java.util.HashMap<String, String>();
/**
* constructor
* @throws IOException
*/
private Client() throws IOException {
conf.set("fs.hdfs.impl", org.apache.hadoop.hdfs.DistributedFileSystem.class.getName()); // add this
conf.set("fs.file.impl", org.apache.hadoop.fs.LocalFileSystem.class.getName());// add this
conf.addResource(new Path(System.getenv("HADOOP_CONF_DIR") +"/core-site.xml"));
conf.addResource(new Path(System.getenv("HADOOP_CONF_DIR") +"/hdfs-site.xml"));
dfs = FileSystem.get(conf);
userName = UserGroupInformation.getCurrentUser().getShortUserName();
credentials = UserGroupInformation.getCurrentUser().getCredentials();
}
/**
* setup security token given current user
* @return the ByeBuffer containing the security tokens
* @throws IOException
*/
private ByteBuffer setupTokens() throws IOException {
DataOutputBuffer buffer = new DataOutputBuffer();
String loc = System.getenv().get("HADOOP_TOKEN_FILE_LOCATION");
if ((loc != null && loc.trim().length() > 0)
|| (!UserGroupInformation.isSecurityEnabled())) {
this.credentials.writeTokenStorageToStream(buffer);
} else {
// Note: Credentials class is marked as LimitedPrivate for HDFS and MapReduce
Credentials credentials = new Credentials();
String tokenRenewer = conf.get(YarnConfiguration.RM_PRINCIPAL);
if (tokenRenewer == null || tokenRenewer.length() == 0) {
throw new IOException(
"Can't get Master Kerberos principal for the RM to use as renewer");
}
// For now, only getting tokens for the default file-system.
final Token<?> tokens[] = dfs.addDelegationTokens(tokenRenewer, credentials);
if (tokens != null) {
for (Token<?> token : tokens) {
LOG.info("Got dt for " + dfs.getUri() + "; " + token);
}
}
credentials.writeTokenStorageToStream(buffer);
}
return ByteBuffer.wrap(buffer.getData(), 0, buffer.getLength());
}
/**
* setup all the cached files
*
* @param fmaps
* the file maps
* @return the resource map
* @throws IOException
*/
private Map<String, LocalResource> setupCacheFiles(ApplicationId appId) throws IOException {
// create temporary dmlc directory
Path tmpPath = new Path(this.tempdir);
if (!dfs.exists(tmpPath)) {
dfs.mkdirs(tmpPath, permTemp);
LOG.info("HDFS temp directory do not exist, creating.. " + tmpPath);
}
tmpPath = new Path(tmpPath + "/temp-dmlc-yarn-" + appId);
if (dfs.exists(tmpPath)) {
dfs.delete(tmpPath, true);
}
// create temporary directory
FileSystem.mkdirs(dfs, tmpPath, permTemp);
StringBuilder cstr = new StringBuilder();
Map<String, LocalResource> rmap = new java.util.HashMap<String, LocalResource>();
for (Map.Entry<String, String> e : cacheFiles.entrySet()) {
LocalResource r = Records.newRecord(LocalResource.class);
Path path = new Path(e.getValue());
// copy local data to temporary folder in HDFS
if (!e.getValue().startsWith("hdfs://")) {
Path dst = new Path("hdfs://" + tmpPath + "/"+ path.getName());
dfs.copyFromLocalFile(false, true, path, dst);
dfs.setPermission(dst, permTemp);
dfs.deleteOnExit(dst);
path = dst;
}
FileStatus status = dfs.getFileStatus(path);
r.setResource(ConverterUtils.getYarnUrlFromPath(path));
r.setSize(status.getLen());
r.setTimestamp(status.getModificationTime());
r.setType(LocalResourceType.FILE);
r.setVisibility(LocalResourceVisibility.APPLICATION);
rmap.put(e.getKey(), r);
cstr.append(" -file \"");
cstr.append(path.toString());
cstr.append('#');
cstr.append(e.getKey());
cstr.append("\"");
}
dfs.deleteOnExit(tmpPath);
this.cacheFileArg = cstr.toString();
return rmap;
}
/**
* get the environment variables for container
*
* @return the env variable for child class
*/
private Map<String, String> getEnvironment() {
// Setup environment variables
if (appCp != null) {
env.put("CLASSPATH", appCp);
} else {
StringBuilder cpath = new StringBuilder()
.append(Environment.CLASSPATH.$$())
.append(File.pathSeparatorChar)
.append("." + File.pathSeparator + "*");
for (String c : conf.getStrings(
YarnConfiguration.YARN_APPLICATION_CLASSPATH,
YarnConfiguration.DEFAULT_YARN_APPLICATION_CLASSPATH)) {
cpath.append(File.pathSeparatorChar)
.append(c.trim());
}
env.put("CLASSPATH", cpath.toString());
}
for (Map.Entry<String, String> e : System.getenv().entrySet()) {
if (e.getKey().startsWith("DMLC_")) {
env.put(e.getKey(), e.getValue());
}
if (e.getKey().startsWith("AWS_")) {
env.put(e.getKey(), e.getValue());
}
if (e.getKey().startsWith("rabit_")) {
env.put(e.getKey(), e.getValue());
}
if (e.getKey() == "LIBHDFS_OPTS") {
env.put(e.getKey(), e.getValue());
}
if (e.getKey().equals("LD_LIBRARY_PATH")) {
env.put(e.getKey(), e.getValue());
}
}
LOG.debug(env);
return env;
}
/**
* initialize the settings
*
* @param args
*/
private void initArgs(String[] args) {
// directly pass all arguments except args0
StringBuilder sargs = new StringBuilder("");
for (int i = 0; i < args.length; ++i) {
if (args[i].equals("-file")) {
String[] arr = args[++i].split("#");
if (arr.length == 1) {
cacheFiles.put(new Path(arr[0]).getName(), arr[0]);
} else {
cacheFiles.put(arr[1], arr[0]);
}
} else if(args[i].equals("-jobname")) {
this.jobName = args[++i];
} else if(args[i].equals("-tempdir")) {
this.tempdir = args[++i];
} else if(args[i].equals("-queue")) {
this.queue = args[++i];
} else if(args[i].equals("-appcp")) {
this.appCp = args[++i];
} else if(args[i].equals("-env")) {
sargs.append(" ");
sargs.append(args[i]);
sargs.append(" ");
sargs.append(args[i+1]);
String[] pair = args[++i].split("=", 2);
env.put(pair[0], (pair.length == 1) ? "" : pair[1]);
} else {
sargs.append(" ");
sargs.append(args[i]);
}
}
this.appArgs = sargs.toString();
}
private void run(String[] args) throws Exception {
if (args.length == 0) {
System.out.println("Usage: [options] [commands..]");
System.out.println("options: [-file filename] [-appcp appClasspath]");
return;
}
this.initArgs(args);
// Create yarnClient
YarnClient yarnClient = YarnClient.createYarnClient();
yarnClient.init(conf);
yarnClient.start();
// Create application via yarnClient
YarnClientApplication app = yarnClient.createApplication();
// Set up the container launch context for the application master
ContainerLaunchContext amContainer = Records
.newRecord(ContainerLaunchContext.class);
ApplicationSubmissionContext appContext = app
.getApplicationSubmissionContext();
// Submit application
ApplicationId appId = appContext.getApplicationId();
//add ctrl+c signal handler
CtrlCHandler handler = new CtrlCHandler(appId, yarnClient);
Signal intSignal = new Signal("INT");
Signal.handle(intSignal, handler);
// setup security token
amContainer.setTokens(this.setupTokens());
// setup cache-files and environment variables
amContainer.setLocalResources(this.setupCacheFiles(appId));
amContainer.setEnvironment(this.getEnvironment());
String cmd = Environment.JAVA_HOME.$$() + "/bin/java"
+ " -Xmx900m"
+ " org.apache.hadoop.yarn.dmlc.ApplicationMaster"
+ this.cacheFileArg + ' ' + this.appArgs + " 1>"
+ ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stdout"
+ " 2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stderr";
LOG.debug(cmd);
amContainer.setCommands(Collections.singletonList(cmd));
// Set up resource type requirements for ApplicationMaster
Resource capability = Records.newRecord(Resource.class);
capability.setMemory(1024);
capability.setVirtualCores(1);
LOG.info("jobname=" + this.jobName + ",username=" + this.userName);
appContext.setApplicationName(jobName + ":DMLC-YARN");
appContext.setAMContainerSpec(amContainer);
appContext.setResource(capability);
appContext.setQueue(queue);
//appContext.setUser(userName);
LOG.info("Submitting application " + appId);
yarnClient.submitApplication(appContext);
ApplicationReport appReport = yarnClient.getApplicationReport(appId);
YarnApplicationState appState = appReport.getYarnApplicationState();
while (appState != YarnApplicationState.FINISHED
&& appState != YarnApplicationState.KILLED
&& appState != YarnApplicationState.FAILED) {
Thread.sleep(100);
appReport = yarnClient.getApplicationReport(appId);
appState = appReport.getYarnApplicationState();
}
System.out.println("Application " + appId + " finished with"
+ " state " + appState + " at " + appReport.getFinishTime());
if (!appReport.getFinalApplicationStatus().equals(
FinalApplicationStatus.SUCCEEDED)) {
System.err.println(appReport.getDiagnostics());
System.out.println("Available queues:");
for (QueueInfo q : yarnClient.getAllQueues()) {
System.out.println(q.getQueueName());
}
yarnClient.killApplication(appId);
}
}
class CtrlCHandler implements SignalHandler{
private ApplicationId appId;
private YarnClient yarnClient;
public CtrlCHandler(ApplicationId appId, YarnClient yarnClient){
this.appId = appId;
this.yarnClient = yarnClient;
}
public void handle(Signal signal){
try{
yarnClient.killApplication(appId);
}catch (Exception e){
System.out.println("yarn client exception");
}
}
}
public static void main(String[] args) throws Exception {
new Client().run(args);
}
}
|
[
"\"HADOOP_CONF_DIR\"",
"\"HADOOP_CONF_DIR\""
] |
[] |
[
"HADOOP_CONF_DIR"
] |
[]
|
["HADOOP_CONF_DIR"]
|
java
| 1 | 0 | |
scrapy/settings/default_settings.py
|
"""
This module contains the default values for all settings used by Scrapy.
For more information about these settings you can read the settings
documentation in docs/topics/settings.rst
Scrapy developers, if you add a setting here remember to:
* add it in alphabetical order
* group similar settings without leaving blank lines
* add its documentation to the available settings documentation
(docs/topics/settings.rst)
"""
import os
import sys
from importlib import import_module
from os.path import join, abspath, dirname
AJAXCRAWL_ENABLED = False
BOT_NAME = 'scrapybot'
CLOSESPIDER_TIMEOUT = 0
CLOSESPIDER_PAGECOUNT = 0
CLOSESPIDER_ITEMCOUNT = 0
CLOSESPIDER_ERRORCOUNT = 0
COMMANDS_MODULE = ''
COMPRESSION_ENABLED = True
CONCURRENT_ITEMS = 100
CONCURRENT_REQUESTS = 16
CONCURRENT_REQUESTS_PER_DOMAIN = 8
CONCURRENT_REQUESTS_PER_IP = 0
COOKIES_ENABLED = True
COOKIES_DEBUG = False
DEFAULT_ITEM_CLASS = 'scrapy.item.Item'
DEFAULT_REQUEST_HEADERS = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en',
}
DEPTH_LIMIT = 0
DEPTH_STATS = True
DEPTH_PRIORITY = 0
DNSCACHE_ENABLED = True
DOWNLOAD_DELAY = 0
DOWNLOAD_HANDLERS = {}
DOWNLOAD_HANDLERS_BASE = {
'file': 'scrapy.core.downloader.handlers.file.FileDownloadHandler',
'http': 'scrapy.core.downloader.handlers.http.HTTPDownloadHandler',
'https': 'scrapy.core.downloader.handlers.http.HTTPDownloadHandler',
's3': 'scrapy.core.downloader.handlers.s3.S3DownloadHandler',
'ftp': 'scrapy.core.downloader.handlers.ftp.FTPDownloadHandler',
}
DOWNLOAD_TIMEOUT = 180 # 3mins
DOWNLOAD_MAXSIZE = 1024*1024*1024 # 1024m
DOWNLOAD_WARNSIZE = 32*1024*1024 # 32m
DOWNLOADER = 'scrapy.core.downloader.Downloader'
DOWNLOADER_HTTPCLIENTFACTORY = 'scrapy.core.downloader.webclient.ScrapyHTTPClientFactory'
DOWNLOADER_CLIENTCONTEXTFACTORY = 'scrapy.core.downloader.contextfactory.ScrapyClientContextFactory'
DOWNLOADER_MIDDLEWARES = {}
DOWNLOADER_MIDDLEWARES_BASE = {
# Engine side
'scrapy.contrib.downloadermiddleware.robotstxt.RobotsTxtMiddleware': 100,
'scrapy.contrib.downloadermiddleware.httpauth.HttpAuthMiddleware': 300,
'scrapy.contrib.downloadermiddleware.downloadtimeout.DownloadTimeoutMiddleware': 350,
'scrapy.contrib.downloadermiddleware.useragent.UserAgentMiddleware': 400,
'scrapy.contrib.downloadermiddleware.retry.RetryMiddleware': 500,
'scrapy.contrib.downloadermiddleware.defaultheaders.DefaultHeadersMiddleware': 550,
'scrapy.contrib.downloadermiddleware.ajaxcrawl.AjaxCrawlMiddleware': 560,
'scrapy.contrib.downloadermiddleware.redirect.MetaRefreshMiddleware': 580,
'scrapy.contrib.downloadermiddleware.httpcompression.HttpCompressionMiddleware': 590,
'scrapy.contrib.downloadermiddleware.redirect.RedirectMiddleware': 600,
'scrapy.contrib.downloadermiddleware.cookies.CookiesMiddleware': 700,
'scrapy.contrib.downloadermiddleware.httpproxy.HttpProxyMiddleware': 750,
'scrapy.contrib.downloadermiddleware.chunked.ChunkedTransferMiddleware': 830,
'scrapy.contrib.downloadermiddleware.stats.DownloaderStats': 850,
'scrapy.contrib.downloadermiddleware.httpcache.HttpCacheMiddleware': 900,
# Downloader side
}
DOWNLOADER_STATS = True
DUPEFILTER_CLASS = 'scrapy.dupefilter.RFPDupeFilter'
try:
EDITOR = os.environ['EDITOR']
except KeyError:
if sys.platform == 'win32':
EDITOR = '%s -m idlelib.idle'
else:
EDITOR = 'vi'
EXTENSIONS = {}
EXTENSIONS_BASE = {
'scrapy.contrib.corestats.CoreStats': 0,
'scrapy.telnet.TelnetConsole': 0,
'scrapy.contrib.memusage.MemoryUsage': 0,
'scrapy.contrib.memdebug.MemoryDebugger': 0,
'scrapy.contrib.closespider.CloseSpider': 0,
'scrapy.contrib.feedexport.FeedExporter': 0,
'scrapy.contrib.logstats.LogStats': 0,
'scrapy.contrib.spiderstate.SpiderState': 0,
'scrapy.contrib.throttle.AutoThrottle': 0,
}
FEED_URI = None
FEED_URI_PARAMS = None # a function to extend uri arguments
FEED_FORMAT = 'jsonlines'
FEED_STORE_EMPTY = False
FEED_STORAGES = {}
FEED_STORAGES_BASE = {
'': 'scrapy.contrib.feedexport.FileFeedStorage',
'file': 'scrapy.contrib.feedexport.FileFeedStorage',
'stdout': 'scrapy.contrib.feedexport.StdoutFeedStorage',
's3': 'scrapy.contrib.feedexport.S3FeedStorage',
'ftp': 'scrapy.contrib.feedexport.FTPFeedStorage',
}
FEED_EXPORTERS = {}
FEED_EXPORTERS_BASE = {
'json': 'scrapy.contrib.exporter.JsonItemExporter',
'jsonlines': 'scrapy.contrib.exporter.JsonLinesItemExporter',
'jl': 'scrapy.contrib.exporter.JsonLinesItemExporter',
'csv': 'scrapy.contrib.exporter.CsvItemExporter',
'xml': 'scrapy.contrib.exporter.XmlItemExporter',
'marshal': 'scrapy.contrib.exporter.MarshalItemExporter',
'pickle': 'scrapy.contrib.exporter.PickleItemExporter',
}
HTTPCACHE_ENABLED = False
HTTPCACHE_DIR = 'httpcache'
HTTPCACHE_IGNORE_MISSING = False
HTTPCACHE_STORAGE = 'scrapy.contrib.httpcache.FilesystemCacheStorage'
HTTPCACHE_EXPIRATION_SECS = 0
HTTPCACHE_IGNORE_HTTP_CODES = []
HTTPCACHE_IGNORE_SCHEMES = ['file']
HTTPCACHE_DBM_MODULE = 'anydbm'
HTTPCACHE_POLICY = 'scrapy.contrib.httpcache.DummyPolicy'
HTTPCACHE_GZIP = False
ITEM_PROCESSOR = 'scrapy.contrib.pipeline.ItemPipelineManager'
ITEM_PIPELINES = {}
ITEM_PIPELINES_BASE = {}
LOG_ENABLED = True
LOG_ENCODING = 'utf-8'
LOG_FORMATTER = 'scrapy.logformatter.LogFormatter'
LOG_STDOUT = False
LOG_LEVEL = 'DEBUG'
LOG_FILE = None
LOG_UNSERIALIZABLE_REQUESTS = False
LOGSTATS_INTERVAL = 60.0
MAIL_HOST = 'localhost'
MAIL_PORT = 25
MAIL_FROM = 'scrapy@localhost'
MAIL_PASS = None
MAIL_USER = None
MEMDEBUG_ENABLED = False # enable memory debugging
MEMDEBUG_NOTIFY = [] # send memory debugging report by mail at engine shutdown
MEMUSAGE_ENABLED = False
MEMUSAGE_LIMIT_MB = 0
MEMUSAGE_NOTIFY_MAIL = []
MEMUSAGE_REPORT = False
MEMUSAGE_WARNING_MB = 0
METAREFRESH_ENABLED = True
METAREFRESH_MAXDELAY = 100
NEWSPIDER_MODULE = ''
RANDOMIZE_DOWNLOAD_DELAY = True
REDIRECT_ENABLED = True
REDIRECT_MAX_TIMES = 20 # uses Firefox default setting
REDIRECT_PRIORITY_ADJUST = +2
REFERER_ENABLED = True
RETRY_ENABLED = True
RETRY_TIMES = 2 # initial response + 2 retries = 3 requests
RETRY_HTTP_CODES = [500, 502, 503, 504, 400, 408]
RETRY_PRIORITY_ADJUST = -1
ROBOTSTXT_OBEY = False
SCHEDULER = 'scrapy.core.scheduler.Scheduler'
SCHEDULER_DISK_QUEUE = 'scrapy.squeue.PickleLifoDiskQueue'
SCHEDULER_MEMORY_QUEUE = 'scrapy.squeue.LifoMemoryQueue'
SPIDER_MANAGER_CLASS = 'scrapy.spidermanager.SpiderManager'
SPIDER_MIDDLEWARES = {}
SPIDER_MIDDLEWARES_BASE = {
# Engine side
'scrapy.contrib.spidermiddleware.httperror.HttpErrorMiddleware': 50,
'scrapy.contrib.spidermiddleware.offsite.OffsiteMiddleware': 500,
'scrapy.contrib.spidermiddleware.referer.RefererMiddleware': 700,
'scrapy.contrib.spidermiddleware.urllength.UrlLengthMiddleware': 800,
'scrapy.contrib.spidermiddleware.depth.DepthMiddleware': 900,
# Spider side
}
SPIDER_MODULES = []
STATS_CLASS = 'scrapy.statscol.MemoryStatsCollector'
STATS_DUMP = True
STATSMAILER_RCPTS = []
TEMPLATES_DIR = abspath(join(dirname(__file__), '..', 'templates'))
URLLENGTH_LIMIT = 2083
USER_AGENT = 'Scrapy/%s (+http://scrapy.org)' % import_module('scrapy').__version__
TELNETCONSOLE_ENABLED = 1
TELNETCONSOLE_PORT = [6023, 6073]
TELNETCONSOLE_HOST = '127.0.0.1'
SPIDER_CONTRACTS = {}
SPIDER_CONTRACTS_BASE = {
'scrapy.contracts.default.UrlContract': 1,
'scrapy.contracts.default.ReturnsContract': 2,
'scrapy.contracts.default.ScrapesContract': 3,
}
|
[] |
[] |
[
"EDITOR"
] |
[]
|
["EDITOR"]
|
python
| 1 | 0 | |
src/main/java/com/captech/embeddedServerExample/Launch.java
|
package com.captech.embeddedServerExample;
import java.io.File;
import org.apache.catalina.WebResourceRoot;
import org.apache.catalina.core.StandardContext;
import org.apache.catalina.startup.Tomcat;
import org.apache.catalina.webresources.DirResourceSet;
import org.apache.catalina.webresources.StandardRoot;
public class Launch {
public static void main(String[] args) throws Exception {
//String webappDirLocation = "src/main/webapp/";
String webappDirLocation = "./";
Tomcat tomcat = new Tomcat();
//The port that we should run on can be set into an environment variable
//Look for that variable and default to 8080 if it isn't there.
String webPort = System.getenv("PORT");
if(webPort == null || webPort.isEmpty()) {
webPort = "8080";
}
tomcat.setPort(Integer.valueOf(webPort));
StandardContext ctx = (StandardContext) tomcat.addWebapp("", new File(webappDirLocation).getAbsolutePath());
System.out.println("configuring app with basedir: " + new File("./" + webappDirLocation).getAbsolutePath());
// If you want to declare an alternative location for your "WEB-INF/classes" dir
// (Servlet 3.0 annotation will also work)
/*
File additionWebInfClasses = new File("target/classes");
WebResourceRoot resources = new StandardRoot(ctx);
resources.addPreResources(new DirResourceSet(resources, "/WEB-INF/classes",
additionWebInfClasses.getAbsolutePath(), "/"));
ctx.setResources(resources);
*/
tomcat.start();
tomcat.getServer().await();
}
}
|
[
"\"PORT\""
] |
[] |
[
"PORT"
] |
[]
|
["PORT"]
|
java
| 1 | 0 | |
flask/cli.py
|
# -*- coding: utf-8 -*-
"""
flask.cli
~~~~~~~~~
A simple command line application to run flask apps.
:copyright: © 2010 by the Pallets team.
:license: BSD, see LICENSE for more details.
"""
from __future__ import print_function
import ast
import inspect
import os
import re
import ssl
import sys
import traceback
from functools import update_wrapper
from operator import attrgetter
from threading import Lock, Thread
import click
from werkzeug.utils import import_string
from . import __version__
from ._compat import getargspec, itervalues, reraise, text_type
from .globals import current_app
from .helpers import get_debug_flag, get_env, get_load_dotenv
try:
import dotenv
except ImportError:
dotenv = None
class NoAppException(click.UsageError):
"""Raised if an application cannot be found or loaded."""
def find_best_app(script_info, module):
"""Given a module instance this tries to find the best possible
application in the module or raises an exception.
"""
from . import Flask
# Search for the most common names first.
for attr_name in ('app', 'application'):
app = getattr(module, attr_name, None)
if isinstance(app, Flask):
return app
# Otherwise find the only object that is a Flask instance.
matches = [
v for v in itervalues(module.__dict__) if isinstance(v, Flask)
]
if len(matches) == 1:
return matches[0]
elif len(matches) > 1:
raise NoAppException(
'Detected multiple Flask applications in module "{module}". Use '
'"FLASK_APP={module}:name" to specify the correct '
'one.'.format(module=module.__name__)
)
# Search for app factory functions.
for attr_name in ('create_app', 'make_app'):
app_factory = getattr(module, attr_name, None)
if inspect.isfunction(app_factory):
try:
app = call_factory(script_info, app_factory)
if isinstance(app, Flask):
return app
except TypeError:
if not _called_with_wrong_args(app_factory):
raise
raise NoAppException(
'Detected factory "{factory}" in module "{module}", but '
'could not call it without arguments. Use '
'"FLASK_APP=\'{module}:{factory}(args)\'" to specify '
'arguments.'.format(
factory=attr_name, module=module.__name__
)
)
raise NoAppException(
'Failed to find Flask application or factory in module "{module}". '
'Use "FLASK_APP={module}:name to specify one.'.format(
module=module.__name__
)
)
def call_factory(script_info, app_factory, arguments=()):
"""Takes an app factory, a ``script_info` object and optionally a tuple
of arguments. Checks for the existence of a script_info argument and calls
the app_factory depending on that and the arguments provided.
"""
args_spec = getargspec(app_factory)
arg_names = args_spec.args
arg_defaults = args_spec.defaults
if 'script_info' in arg_names:
return app_factory(*arguments, script_info=script_info)
elif arguments:
return app_factory(*arguments)
elif not arguments and len(arg_names) == 1 and arg_defaults is None:
return app_factory(script_info)
return app_factory()
def _called_with_wrong_args(factory):
"""Check whether calling a function raised a ``TypeError`` because
the call failed or because something in the factory raised the
error.
:param factory: the factory function that was called
:return: true if the call failed
"""
tb = sys.exc_info()[2]
try:
while tb is not None:
if tb.tb_frame.f_code is factory.__code__:
# in the factory, it was called successfully
return False
tb = tb.tb_next
# didn't reach the factory
return True
finally:
del tb
def find_app_by_string(script_info, module, app_name):
"""Checks if the given string is a variable name or a function. If it is a
function, it checks for specified arguments and whether it takes a
``script_info`` argument and calls the function with the appropriate
arguments.
"""
from flask import Flask
match = re.match(r'^ *([^ ()]+) *(?:\((.*?) *,? *\))? *$', app_name)
if not match:
raise NoAppException(
'"{name}" is not a valid variable name or function '
'expression.'.format(name=app_name)
)
name, args = match.groups()
try:
attr = getattr(module, name)
except AttributeError as e:
raise NoAppException(e.args[0])
if inspect.isfunction(attr):
if args:
try:
args = ast.literal_eval('({args},)'.format(args=args))
except (ValueError, SyntaxError)as e:
raise NoAppException(
'Could not parse the arguments in '
'"{app_name}".'.format(e=e, app_name=app_name)
)
else:
args = ()
try:
app = call_factory(script_info, attr, args)
except TypeError as e:
if not _called_with_wrong_args(attr):
raise
raise NoAppException(
'{e}\nThe factory "{app_name}" in module "{module}" could not '
'be called with the specified arguments.'.format(
e=e, app_name=app_name, module=module.__name__
)
)
else:
app = attr
if isinstance(app, Flask):
return app
raise NoAppException(
'A valid Flask application was not obtained from '
'"{module}:{app_name}".'.format(
module=module.__name__, app_name=app_name
)
)
def prepare_import(path):
"""Given a filename this will try to calculate the python path, add it
to the search path and return the actual module name that is expected.
"""
path = os.path.realpath(path)
fname, ext = os.path.splitext(path)
if ext == '.py':
path = fname
if os.path.basename(path) == '__init__':
path = os.path.dirname(path)
module_name = []
# move up until outside package structure (no __init__.py)
while True:
path, name = os.path.split(path)
module_name.append(name)
if not os.path.exists(os.path.join(path, '__init__.py')):
break
if sys.path[0] != path:
sys.path.insert(0, path)
return '.'.join(module_name[::-1])
def locate_app(script_info, module_name, app_name, raise_if_not_found=True):
__traceback_hide__ = True
try:
__import__(module_name)
except ImportError:
# Reraise the ImportError if it occurred within the imported module.
# Determine this by checking whether the trace has a depth > 1.
if sys.exc_info()[-1].tb_next:
raise NoAppException(
'While importing "{name}", an ImportError was raised:'
'\n\n{tb}'.format(name=module_name, tb=traceback.format_exc())
)
elif raise_if_not_found:
raise NoAppException(
'Could not import "{name}".'.format(name=module_name)
)
else:
return
module = sys.modules[module_name]
if app_name is None:
return find_best_app(script_info, module)
else:
return find_app_by_string(script_info, module, app_name)
def get_version(ctx, param, value):
if not value or ctx.resilient_parsing:
return
message = 'Flask %(version)s\nPython %(python_version)s'
click.echo(message % {
'version': __version__,
'python_version': sys.version,
}, color=ctx.color)
ctx.exit()
version_option = click.Option(
['--version'],
help='Show the flask version',
expose_value=False,
callback=get_version,
is_flag=True,
is_eager=True
)
class DispatchingApp(object):
"""Special application that dispatches to a Flask application which
is imported by name in a background thread. If an error happens
it is recorded and shown as part of the WSGI handling which in case
of the Werkzeug debugger means that it shows up in the browser.
"""
def __init__(self, loader, use_eager_loading=False):
self.loader = loader
self._app = None
self._lock = Lock()
self._bg_loading_exc_info = None
if use_eager_loading:
self._load_unlocked()
else:
self._load_in_background()
def _load_in_background(self):
def _load_app():
__traceback_hide__ = True
with self._lock:
try:
self._load_unlocked()
except Exception:
self._bg_loading_exc_info = sys.exc_info()
t = Thread(target=_load_app, args=())
t.start()
def _flush_bg_loading_exception(self):
__traceback_hide__ = True
exc_info = self._bg_loading_exc_info
if exc_info is not None:
self._bg_loading_exc_info = None
reraise(*exc_info)
def _load_unlocked(self):
__traceback_hide__ = True
self._app = rv = self.loader()
self._bg_loading_exc_info = None
return rv
def __call__(self, environ, start_response):
__traceback_hide__ = True
if self._app is not None:
return self._app(environ, start_response)
self._flush_bg_loading_exception()
with self._lock:
if self._app is not None:
rv = self._app
else:
rv = self._load_unlocked()
return rv(environ, start_response)
class ScriptInfo(object):
"""Help object to deal with Flask applications. This is usually not
necessary to interface with as it's used internally in the dispatching
to click. In future versions of Flask this object will most likely play
a bigger role. Typically it's created automatically by the
:class:`FlaskGroup` but you can also manually create it and pass it
onwards as click object.
"""
def __init__(self, app_import_path=None, create_app=None,
set_debug_flag=True):
#: Optionally the import path for the Flask application.
self.app_import_path = app_import_path or os.environ.get('FLASK_APP')
#: Optionally a function that is passed the script info to create
#: the instance of the application.
self.create_app = create_app
#: A dictionary with arbitrary data that can be associated with
#: this script info.
self.data = {}
self.set_debug_flag = set_debug_flag
self._loaded_app = None
def load_app(self):
"""Loads the Flask app (if not yet loaded) and returns it. Calling
this multiple times will just result in the already loaded app to
be returned.
"""
__traceback_hide__ = True
if self._loaded_app is not None:
return self._loaded_app
app = None
if self.create_app is not None:
app = call_factory(self, self.create_app)
else:
if self.app_import_path:
path, name = (self.app_import_path.split(':', 1) + [None])[:2]
import_name = prepare_import(path)
app = locate_app(self, import_name, name)
else:
for path in ('wsgi.py', 'app.py'):
import_name = prepare_import(path)
app = locate_app(self, import_name, None,
raise_if_not_found=False)
if app:
break
if not app:
raise NoAppException(
'Could not locate a Flask application. You did not provide '
'the "FLASK_APP" environment variable, and a "wsgi.py" or '
'"app.py" module was not found in the current directory.'
)
if self.set_debug_flag:
# Update the app's debug flag through the descriptor so that
# other values repopulate as well.
app.debug = get_debug_flag()
self._loaded_app = app
return app
pass_script_info = click.make_pass_decorator(ScriptInfo, ensure=True)
def with_appcontext(f):
"""Wraps a callback so that it's guaranteed to be executed with the
script's application context. If callbacks are registered directly
to the ``app.cli`` object then they are wrapped with this function
by default unless it's disabled.
"""
@click.pass_context
def decorator(__ctx, *args, **kwargs):
with __ctx.ensure_object(ScriptInfo).load_app().app_context():
return __ctx.invoke(f, *args, **kwargs)
return update_wrapper(decorator, f)
class AppGroup(click.Group):
"""This works similar to a regular click :class:`~click.Group` but it
changes the behavior of the :meth:`command` decorator so that it
automatically wraps the functions in :func:`with_appcontext`.
Not to be confused with :class:`FlaskGroup`.
"""
def command(self, *args, **kwargs):
"""This works exactly like the method of the same name on a regular
:class:`click.Group` but it wraps callbacks in :func:`with_appcontext`
unless it's disabled by passing ``with_appcontext=False``.
"""
wrap_for_ctx = kwargs.pop('with_appcontext', True)
def decorator(f):
if wrap_for_ctx:
f = with_appcontext(f)
return click.Group.command(self, *args, **kwargs)(f)
return decorator
def group(self, *args, **kwargs):
"""This works exactly like the method of the same name on a regular
:class:`click.Group` but it defaults the group class to
:class:`AppGroup`.
"""
kwargs.setdefault('cls', AppGroup)
return click.Group.group(self, *args, **kwargs)
class FlaskGroup(AppGroup):
"""Special subclass of the :class:`AppGroup` group that supports
loading more commands from the configured Flask app. Normally a
developer does not have to interface with this class but there are
some very advanced use cases for which it makes sense to create an
instance of this.
For information as of why this is useful see :ref:`custom-scripts`.
:param add_default_commands: if this is True then the default run and
shell commands wil be added.
:param add_version_option: adds the ``--version`` option.
:param create_app: an optional callback that is passed the script info and
returns the loaded app.
:param load_dotenv: Load the nearest :file:`.env` and :file:`.flaskenv`
files to set environment variables. Will also change the working
directory to the directory containing the first file found.
:param set_debug_flag: Set the app's debug flag based on the active
environment
.. versionchanged:: 1.0
If installed, python-dotenv will be used to load environment variables
from :file:`.env` and :file:`.flaskenv` files.
"""
def __init__(self, add_default_commands=True, create_app=None,
add_version_option=True, load_dotenv=True,
set_debug_flag=True, **extra):
params = list(extra.pop('params', None) or ())
if add_version_option:
params.append(version_option)
AppGroup.__init__(self, params=params, **extra)
self.create_app = create_app
self.load_dotenv = load_dotenv
self.set_debug_flag = set_debug_flag
if add_default_commands:
self.add_command(run_command)
self.add_command(shell_command)
self.add_command(routes_command)
self._loaded_plugin_commands = False
def _load_plugin_commands(self):
if self._loaded_plugin_commands:
return
try:
import pkg_resources
except ImportError:
self._loaded_plugin_commands = True
return
for ep in pkg_resources.iter_entry_points('flask.commands'):
self.add_command(ep.load(), ep.name)
self._loaded_plugin_commands = True
def get_command(self, ctx, name):
self._load_plugin_commands()
# We load built-in commands first as these should always be the
# same no matter what the app does. If the app does want to
# override this it needs to make a custom instance of this group
# and not attach the default commands.
#
# This also means that the script stays functional in case the
# application completely fails.
rv = AppGroup.get_command(self, ctx, name)
if rv is not None:
return rv
info = ctx.ensure_object(ScriptInfo)
try:
rv = info.load_app().cli.get_command(ctx, name)
if rv is not None:
return rv
except NoAppException:
pass
def list_commands(self, ctx):
self._load_plugin_commands()
# The commands available is the list of both the application (if
# available) plus the builtin commands.
rv = set(click.Group.list_commands(self, ctx))
info = ctx.ensure_object(ScriptInfo)
try:
rv.update(info.load_app().cli.list_commands(ctx))
except Exception:
# Here we intentionally swallow all exceptions as we don't
# want the help page to break if the app does not exist.
# If someone attempts to use the command we try to create
# the app again and this will give us the error.
# However, we will not do so silently because that would confuse
# users.
traceback.print_exc()
return sorted(rv)
def main(self, *args, **kwargs):
# Set a global flag that indicates that we were invoked from the
# command line interface. This is detected by Flask.run to make the
# call into a no-op. This is necessary to avoid ugly errors when the
# script that is loaded here also attempts to start a server.
os.environ['FLASK_RUN_FROM_CLI'] = 'true'
if get_load_dotenv(self.load_dotenv):
load_dotenv()
obj = kwargs.get('obj')
if obj is None:
obj = ScriptInfo(create_app=self.create_app,
set_debug_flag=self.set_debug_flag)
kwargs['obj'] = obj
kwargs.setdefault('auto_envvar_prefix', 'FLASK')
return super(FlaskGroup, self).main(*args, **kwargs)
def _path_is_ancestor(path, other):
"""Take ``other`` and remove the length of ``path`` from it. Then join it
to ``path``. If it is the original value, ``path`` is an ancestor of
``other``."""
return os.path.join(path, other[len(path):].lstrip(os.sep)) == other
def load_dotenv(path=None):
"""Load "dotenv" files in order of precedence to set environment variables.
If an env var is already set it is not overwritten, so earlier files in the
list are preferred over later files.
Changes the current working directory to the location of the first file
found, with the assumption that it is in the top level project directory
and will be where the Python path should import local packages from.
This is a no-op if `python-dotenv`_ is not installed.
.. _python-dotenv: https://github.com/theskumar/python-dotenv#readme
:param path: Load the file at this location instead of searching.
:return: ``True`` if a file was loaded.
.. versionadded:: 1.0
"""
if dotenv is None:
if path or os.path.isfile('.env') or os.path.isfile('.flaskenv'):
click.secho(
' * Tip: There are .env or .flaskenv files present.'
' Do "pip install python-dotenv" to use them.',
fg='yellow')
return
if path is not None:
return dotenv.load_dotenv(path)
new_dir = None
for name in ('.env', '.flaskenv'):
path = dotenv.find_dotenv(name, usecwd=True)
if not path:
continue
if new_dir is None:
new_dir = os.path.dirname(path)
dotenv.load_dotenv(path)
if new_dir and os.getcwd() != new_dir:
os.chdir(new_dir)
return new_dir is not None # at least one file was located and loaded
def show_server_banner(env, debug, app_import_path, eager_loading):
"""Show extra startup messages the first time the server is run,
ignoring the reloader.
"""
if os.environ.get('WERKZEUG_RUN_MAIN') == 'true':
return
if app_import_path is not None:
message = ' * Serving Flask app "{0}"'.format(app_import_path)
if not eager_loading:
message += ' (lazy loading)'
click.echo(message)
click.echo(' * Environment: {0}'.format(env))
if env == 'production':
click.secho(
' WARNING: Do not use the development server in a production'
' environment.', fg='red')
click.secho(' Use a production WSGI server instead.', dim=True)
if debug is not None:
click.echo(' * Debug mode: {0}'.format('on' if debug else 'off'))
class CertParamType(click.ParamType):
"""Click option type for the ``--cert`` option. Allows either an
existing file, the string ``'adhoc'``, or an import for a
:class:`~ssl.SSLContext` object.
"""
name = 'path'
def __init__(self):
self.path_type = click.Path(
exists=True, dir_okay=False, resolve_path=True)
def convert(self, value, param, ctx):
try:
return self.path_type(value, param, ctx)
except click.BadParameter:
value = click.STRING(value, param, ctx).lower()
if value == 'adhoc':
try:
import OpenSSL
except ImportError:
raise click.BadParameter(
'Using ad-hoc certificates requires pyOpenSSL.',
ctx, param)
return value
obj = import_string(value, silent=True)
if sys.version_info < (2, 7, 9):
if obj:
return obj
else:
if isinstance(obj, ssl.SSLContext):
return obj
raise
def _validate_key(ctx, param, value):
"""The ``--key`` option must be specified when ``--cert`` is a file.
Modifies the ``cert`` param to be a ``(cert, key)`` pair if needed.
"""
cert = ctx.params.get('cert')
is_adhoc = cert == 'adhoc'
if sys.version_info < (2, 7, 9):
is_context = cert and not isinstance(cert, (text_type, bytes))
else:
is_context = isinstance(cert, ssl.SSLContext)
if value is not None:
if is_adhoc:
raise click.BadParameter(
'When "--cert" is "adhoc", "--key" is not used.',
ctx, param)
if is_context:
raise click.BadParameter(
'When "--cert" is an SSLContext object, "--key is not used.',
ctx, param)
if not cert:
raise click.BadParameter(
'"--cert" must also be specified.',
ctx, param)
ctx.params['cert'] = cert, value
else:
if cert and not (is_adhoc or is_context):
raise click.BadParameter(
'Required when using "--cert".',
ctx, param)
return value
@click.command('run', short_help='Runs a development server.')
@click.option('--host', '-h', default='127.0.0.1',
help='The interface to bind to.')
@click.option('--port', '-p', default=5000,
help='The port to bind to.')
@click.option('--cert', type=CertParamType(),
help='Specify a certificate file to use HTTPS.')
@click.option('--key',
type=click.Path(exists=True, dir_okay=False, resolve_path=True),
callback=_validate_key, expose_value=False,
help='The key file to use when specifying a certificate.')
@click.option('--reload/--no-reload', default=None,
help='Enable or disable the reloader. By default the reloader '
'is active if debug is enabled.')
@click.option('--debugger/--no-debugger', default=None,
help='Enable or disable the debugger. By default the debugger '
'is active if debug is enabled.')
@click.option('--eager-loading/--lazy-loader', default=None,
help='Enable or disable eager loading. By default eager '
'loading is enabled if the reloader is disabled.')
@click.option('--with-threads/--without-threads', default=True,
help='Enable or disable multithreading.')
@pass_script_info
def run_command(info, host, port, reload, debugger, eager_loading,
with_threads, cert):
"""Run a local development server.
This server is for development purposes only. It does not provide
the stability, security, or performance of production WSGI servers.
The reloader and debugger are enabled by default if
FLASK_ENV=development or FLASK_DEBUG=1.
"""
debug = get_debug_flag()
if reload is None:
reload = debug
if debugger is None:
debugger = debug
if eager_loading is None:
eager_loading = not reload
show_server_banner(get_env(), debug, info.app_import_path, eager_loading)
app = DispatchingApp(info.load_app, use_eager_loading=eager_loading)
from werkzeug.serving import run_simple
run_simple(host, port, app, use_reloader=reload, use_debugger=debugger,
threaded=with_threads, ssl_context=cert)
@click.command('shell', short_help='Runs a shell in the app context.')
@with_appcontext
def shell_command():
"""Runs an interactive Python shell in the context of a given
Flask application. The application will populate the default
namespace of this shell according to it's configuration.
This is useful for executing small snippets of management code
without having to manually configure the application.
"""
import code
from flask.globals import _app_ctx_stack
app = _app_ctx_stack.top.app
banner = 'Python %s on %s\nApp: %s [%s]\nInstance: %s' % (
sys.version,
sys.platform,
app.import_name,
app.env,
app.instance_path,
)
ctx = {}
# Support the regular Python interpreter startup script if someone
# is using it.
startup = os.environ.get('PYTHONSTARTUP')
if startup and os.path.isfile(startup):
with open(startup, 'r') as f:
eval(compile(f.read(), startup, 'exec'), ctx)
ctx.update(app.make_shell_context())
code.interact(banner=banner, local=ctx)
@click.command('routes', short_help='Show the routes for the app.')
@click.option(
'--sort', '-s',
type=click.Choice(('endpoint', 'methods', 'rule', 'match')),
default='endpoint',
help=(
'Method to sort routes by. "match" is the order that Flask will match '
'routes when dispatching a request.'
)
)
@click.option(
'--all-methods',
is_flag=True,
help="Show HEAD and OPTIONS methods."
)
@with_appcontext
def routes_command(sort, all_methods):
"""Show all registered routes with endpoints and methods."""
rules = list(current_app.url_map.iter_rules())
if not rules:
click.echo('No routes were registered.')
return
ignored_methods = set(() if all_methods else ('HEAD', 'OPTIONS'))
if sort in ('endpoint', 'rule'):
rules = sorted(rules, key=attrgetter(sort))
elif sort == 'methods':
rules = sorted(rules, key=lambda rule: sorted(rule.methods))
rule_methods = [
', '.join(sorted(rule.methods - ignored_methods)) for rule in rules
]
headers = ('Endpoint', 'Methods', 'Rule')
widths = (
max(len(rule.endpoint) for rule in rules),
max(len(methods) for methods in rule_methods),
max(len(rule.rule) for rule in rules),
)
widths = [max(len(h), w) for h, w in zip(headers, widths)]
row = '{{0:<{0}}} {{1:<{1}}} {{2:<{2}}}'.format(*widths)
click.echo(row.format(*headers).strip())
click.echo(row.format(*('-' * width for width in widths)))
for rule, methods in zip(rules, rule_methods):
click.echo(row.format(rule.endpoint, methods, rule.rule).rstrip())
cli = FlaskGroup(help="""\
A general utility script for Flask applications.
Provides commands from Flask, extensions, and the application. Loads the
application defined in the FLASK_APP environment variable, or from a wsgi.py
file. Setting the FLASK_ENV environment variable to 'development' will enable
debug mode.
\b
{prefix}{cmd} FLASK_APP=hello.py
{prefix}{cmd} FLASK_ENV=development
{prefix}flask run
""".format(
cmd='export' if os.name == 'posix' else 'set',
prefix='$ ' if os.name == 'posix' else '> '
))
def main(as_module=False):
args = sys.argv[1:]
if as_module:
this_module = 'flask'
if sys.version_info < (2, 7):
this_module += '.cli'
name = 'python -m ' + this_module
# Python rewrites "python -m flask" to the path to the file in argv.
# Restore the original command so that the reloader works.
sys.argv = ['-m', this_module] + args
else:
name = None
cli.main(args=args, prog_name=name)
if __name__ == '__main__':
main(as_module=True)
|
[] |
[] |
[
"WERKZEUG_RUN_MAIN",
"FLASK_RUN_FROM_CLI",
"PYTHONSTARTUP",
"FLASK_APP"
] |
[]
|
["WERKZEUG_RUN_MAIN", "FLASK_RUN_FROM_CLI", "PYTHONSTARTUP", "FLASK_APP"]
|
python
| 4 | 0 | |
test/test_main.py
|
import pytest
import os
@pytest.fixture
def app():
# Arrange
os.environ['PROJECT'] = 'mock_project'
os.environ['SUBSCRIPTION'] = 'mock_sub'
os.environ['DATASET'] = 'mock_dataset'
os.environ['TABLE'] = 'mock_table'
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = 'test/mock-credentials.json'
os.environ['GOOGLE_CLOUD_PROJECT'] = 'mock_project'
import app.main
app.main.app.testing = True
return app.main.app.test_client()
def test_health(app):
# Arrange
# Act
r = app.get('/')
# Assert
assert r.status_code == 200
assert 'ok' in r.data.decode('utf-8')
|
[] |
[] |
[
"PROJECT",
"SUBSCRIPTION",
"GOOGLE_APPLICATION_CREDENTIALS",
"TABLE",
"DATASET",
"GOOGLE_CLOUD_PROJECT"
] |
[]
|
["PROJECT", "SUBSCRIPTION", "GOOGLE_APPLICATION_CREDENTIALS", "TABLE", "DATASET", "GOOGLE_CLOUD_PROJECT"]
|
python
| 6 | 0 | |
flaml/tune/result.py
|
'''
Copyright 2020 The Ray Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
This source file is adapted here because ray does not fully support Windows.
Copyright (c) Microsoft Corporation.
'''
import os
# yapf: disable
# __sphinx_doc_begin__
# (Optional/Auto-filled) training is terminated. Filled only if not provided.
DONE = "done"
# (Optional) Enum for user controlled checkpoint
SHOULD_CHECKPOINT = "should_checkpoint"
# (Auto-filled) The hostname of the machine hosting the training process.
HOSTNAME = "hostname"
# (Auto-filled) The auto-assigned id of the trial.
TRIAL_ID = "trial_id"
# (Auto-filled) The auto-assigned id of the trial.
EXPERIMENT_TAG = "experiment_tag"
# (Auto-filled) The node ip of the machine hosting the training process.
NODE_IP = "node_ip"
# (Auto-filled) The pid of the training process.
PID = "pid"
# (Optional) Default (anonymous) metric when using tune.report(x)
DEFAULT_METRIC = "_metric"
# (Optional) Mean reward for current training iteration
EPISODE_REWARD_MEAN = "episode_reward_mean"
# (Optional) Mean loss for training iteration
MEAN_LOSS = "mean_loss"
# (Optional) Mean loss for training iteration
NEG_MEAN_LOSS = "neg_mean_loss"
# (Optional) Mean accuracy for training iteration
MEAN_ACCURACY = "mean_accuracy"
# Number of episodes in this iteration.
EPISODES_THIS_ITER = "episodes_this_iter"
# (Optional/Auto-filled) Accumulated number of episodes for this trial.
EPISODES_TOTAL = "episodes_total"
# Number of timesteps in this iteration.
TIMESTEPS_THIS_ITER = "timesteps_this_iter"
# (Auto-filled) Accumulated number of timesteps for this entire trial.
TIMESTEPS_TOTAL = "timesteps_total"
# (Auto-filled) Time in seconds this iteration took to run.
# This may be overridden to override the system-computed time difference.
TIME_THIS_ITER_S = "time_this_iter_s"
# (Auto-filled) Accumulated time in seconds for this entire trial.
TIME_TOTAL_S = "time_total_s"
# (Auto-filled) The index of this training iteration.
TRAINING_ITERATION = "training_iteration"
# __sphinx_doc_end__
# yapf: enable
DEFAULT_EXPERIMENT_INFO_KEYS = ("trainable_name", EXPERIMENT_TAG, TRIAL_ID)
DEFAULT_RESULT_KEYS = (TRAINING_ITERATION, TIME_TOTAL_S, TIMESTEPS_TOTAL,
MEAN_ACCURACY, MEAN_LOSS)
# Make sure this doesn't regress
AUTO_RESULT_KEYS = (
TRAINING_ITERATION,
TIME_TOTAL_S,
EPISODES_TOTAL,
TIMESTEPS_TOTAL,
NODE_IP,
HOSTNAME,
PID,
TIME_TOTAL_S,
TIME_THIS_ITER_S,
"timestamp",
"experiment_id",
"date",
"time_since_restore",
"iterations_since_restore",
"timesteps_since_restore",
"config",
)
# __duplicate__ is a magic keyword used internally to
# avoid double-logging results when using the Function API.
RESULT_DUPLICATE = "__duplicate__"
# __trial_info__ is a magic keyword used internally to pass trial_info
# to the Trainable via the constructor.
TRIAL_INFO = "__trial_info__"
# __stdout_file__/__stderr_file__ are magic keywords used internally
# to pass log file locations to the Trainable via the constructor.
STDOUT_FILE = "__stdout_file__"
STDERR_FILE = "__stderr_file__"
# Where Tune writes result files by default
DEFAULT_RESULTS_DIR = (os.environ.get("TEST_TMPDIR")
or os.environ.get("TUNE_RESULT_DIR")
or os.path.expanduser("~/ray_results"))
# Meta file about status under each experiment directory, can be
# parsed by automlboard if exists.
JOB_META_FILE = "job_status.json"
# Meta file about status under each trial directory, can be parsed
# by automlboard if exists.
EXPR_META_FILE = "trial_status.json"
# File that stores parameters of the trial.
EXPR_PARAM_FILE = "params.json"
# Pickle File that stores parameters of the trial.
EXPR_PARAM_PICKLE_FILE = "params.pkl"
# File that stores the progress of the trial.
EXPR_PROGRESS_FILE = "progress.csv"
# File that stores results of the trial.
EXPR_RESULT_FILE = "result.json"
# Config prefix when using Analysis.
CONFIG_PREFIX = "config/"
|
[] |
[] |
[
"TEST_TMPDIR",
"TUNE_RESULT_DIR"
] |
[]
|
["TEST_TMPDIR", "TUNE_RESULT_DIR"]
|
python
| 2 | 0 | |
compute/common_instances.go
|
package compute
import (
"context"
"fmt"
"log"
"os"
"strings"
"sync"
"github.com/databrickslabs/databricks-terraform/common"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest"
)
var (
oncePool sync.Once
commonInstancePool *InstancePoolAndStats
)
// CommonRuntimeVersion presents recommended Spark Version
func CommonRuntimeVersion() string {
return "6.6.x-scala2.11"
}
// CommonInstanceType presents smallest recommended instance type
func CommonInstanceType() string {
cloudEnv := os.Getenv("CLOUD_ENV")
if strings.ToLower(cloudEnv) == "azure" {
return "Standard_DS3_v2"
}
// TODO: create a method on ClustersAPI to give
// cloud specific delta-cache enabled instance by default.
return "m4.large"
}
// CommonInstancePoolID returns common instance pool that is supposed to be used for internal testing purposes
func CommonInstancePoolID() string {
if commonInstancePool != nil {
return commonInstancePool.InstancePoolID
}
client := common.CommonEnvironmentClient()
oncePool.Do(func() { // atomic
log.Printf("[INFO] Initializing common instance pool")
ctx := context.Background()
instancePools := NewInstancePoolsAPI(ctx, client)
clusters := NewClustersAPI(ctx, client)
currentUserPool := fmt.Sprintf("Terraform Integration Test by %s", os.Getenv("USER"))
pools, err := instancePools.List()
if err != nil {
log.Printf("[ERROR] Cannot list instance pools: %v", err)
panic(err)
}
for _, existingPool := range pools.InstancePools {
if existingPool.InstancePoolName == currentUserPool {
log.Printf(
"[INFO] Using existing instance pool: %s/#setting/clusters/instance-pools/view/%s",
client.Host, existingPool.InstancePoolID)
commonInstancePool = &existingPool
return
}
}
instancePool := InstancePool{
PreloadedSparkVersions: []string{CommonRuntimeVersion()},
NodeTypeID: clusters.GetSmallestNodeType(NodeTypeRequest{
LocalDisk: true,
}),
InstancePoolName: currentUserPool,
MaxCapacity: 10,
IdleInstanceAutoTerminationMinutes: 15,
}
if !client.IsAzure() {
instancePool.AwsAttributes = &InstancePoolAwsAttributes{
Availability: AwsAvailabilitySpot,
}
}
newPool, err := instancePools.Create(instancePool)
if err != nil {
log.Printf("[ERROR] Cannot create instance pool: %v", err)
panic(err)
}
log.Printf("[INFO] Created common instance pool: %s/#setting/clusters/instance-pools/view/%s",
client.Host, newPool.InstancePoolID)
commonInstancePool = &newPool
})
return commonInstancePool.InstancePoolID
}
// CommonEnvironmentClientWithRealCommandExecutor is good for internal tests
func CommonEnvironmentClientWithRealCommandExecutor() *common.DatabricksClient {
client := common.CommonEnvironmentClient()
client.WithCommandExecutor(func(ctx context.Context, _ *common.DatabricksClient) common.CommandExecutor {
return NewCommandsAPI(ctx, client)
})
return client
}
// NewTinyClusterInCommonPool creates new cluster for short-lived purposes
func NewTinyClusterInCommonPool() (c ClusterInfo, err error) {
randomName := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)
ctx := context.Background()
clusters := NewClustersAPI(ctx, CommonEnvironmentClientWithRealCommandExecutor())
c, err = clusters.Create(Cluster{
NumWorkers: 1,
ClusterName: "Terraform " + randomName,
SparkVersion: CommonRuntimeVersion(),
InstancePoolID: CommonInstancePoolID(),
IdempotencyToken: "tf-" + randomName,
AutoterminationMinutes: 20,
})
return
}
// NewTinyClusterInCommonPoolPossiblyReused is recommended to be used for testing only
func NewTinyClusterInCommonPoolPossiblyReused() (c ClusterInfo) {
randomName := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)
currentCluster := "TerraformIntegrationTest"
ctx := context.Background()
clusters := NewClustersAPI(ctx, CommonEnvironmentClientWithRealCommandExecutor())
c, err := clusters.GetOrCreateRunningCluster(currentCluster, Cluster{
NumWorkers: 1,
ClusterName: currentCluster,
SparkVersion: CommonRuntimeVersion(),
InstancePoolID: CommonInstancePoolID(),
IdempotencyToken: "tf-" + randomName,
AutoterminationMinutes: 20,
})
if err != nil {
panic(err)
}
return
}
|
[
"\"CLOUD_ENV\"",
"\"USER\""
] |
[] |
[
"USER",
"CLOUD_ENV"
] |
[]
|
["USER", "CLOUD_ENV"]
|
go
| 2 | 0 | |
docs/source/conf.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# simplesat documentation build configuration file, created by
# sphinx-quickstart on Tue Mar 8 13:08:34 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.graphviz',
'sphinx.ext.napoleon',
]
# Add any paths that contain templates here, relative to this directory.
# templates_path = ['_templates']
# The suffix(es) of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'simplesat'
copyright = '2016, Enthought, Inc.'
author = 'Enthought, Inc.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.2.0'
# The full version, including alpha/beta/rc tags.
release = '0.2.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if on_rtd:
html_theme = 'default'
else:
import sphinx_rtd_theme
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'simplesatdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'simplesat.tex', 'simplesat Documentation',
'Enthought, Inc.', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'simplesat', 'simplesat Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'simplesat', 'simplesat Documentation',
author, 'simplesat', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
[] |
[] |
[
"READTHEDOCS"
] |
[]
|
["READTHEDOCS"]
|
python
| 1 | 0 | |
fx_get_terminal.py
|
# Programa para mostrar el tipo de cambio MXN:USD
# Para un periodo de fechas.
# Imports del Programa
######################
import os
import requests
import pandas as pd
# Fechas para el calculo
########################
print("\n Busqueda de FX para Solventar Obligaciones: \n")
fecha_inicial = input("Fecha Inicial de Busqueda yyyy-mm-dd: ")
fecha_final = input("Fecha Final de Busqueda yyyy-mm-dd: ")
# Conexion a Banxico
####################
token = os.environ.get("token_banxico")
# Token de Consulta Banxico
obligaciones = "SF60653" # FX Para Solventar Obligaciones
# Clave de Descarga Banxico
# Funcion de descarga de datos
##############################
def descarga_bmx_serie(serie, fechainicio, fechafin, token):
# Al site de banxico se le pegan los datos de consulta
url = ("https://www.banxico.org.mx/SieAPIRest/service/v1/series/"
+ serie
+ "/datos/"
+ fechainicio
+ "/"
+ fechafin
)
# Se le tienen que pasar Headers
headers = {"Bmx-Token": token}
# Se pasa como un request con metodo get
response = requests.get(url, headers=headers)
# Se le solicita el codigo de respuesta al servidor.
status = response.status_code
if status == 200:
# Si el estatus esta Ok crear el dataframe
raw_data = response.json()
# Se guarda la respuesta como una variable.
data = raw_data["bmx"]["series"][0]["datos"]
# Se filtra el json
# Se accesa el diccionario con los datos
global df
# Hacemos que la variable df sea global para poder accesarla despues
df = pd.DataFrame(data)
# Creamos un dataframe con la informacion
df["dato"] = df["dato"].apply(lambda x: float(x))
# Volvemos los datos floats en vez de strings
df["fecha"] = pd.to_datetime(df["fecha"], format="%d/%m/%Y")
# Volvemos las fechas a formato fecha
df.columns = ['Fecha', 'Tipo de Cambio']
# Cambia el nombre de la columna "dato" por tipo de cambio
return(df)
else:
# Si el estatus esta mal imprimir el prror en la terminal
print(status)
# Ejecutando la Solicitud de Descarga
#####################################
dolares_bmx = descarga_bmx_serie(obligaciones,
str(fecha_inicial),
str(fecha_final),
token)
# Mostramos la informacion sin el indice
########################################
print("\n")
print(df.to_string(index=False))
print("\n")
|
[] |
[] |
[
"token_banxico"
] |
[]
|
["token_banxico"]
|
python
| 1 | 0 | |
main.go
|
package main
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"math/rand"
"os"
"regexp"
"runtime"
"strconv"
"strings"
"sync"
"time"
"github.com/alecthomas/kingpin"
foundation "github.com/estafette/estafette-foundation"
"github.com/rs/zerolog/log"
"github.com/prometheus/client_golang/prometheus"
"github.com/go-acme/lego/v3/certificate"
"github.com/go-acme/lego/v3/lego"
"github.com/go-acme/lego/v3/providers/dns/cloudflare"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
k8sruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/cache"
)
const annotationLetsEncryptCertificate string = "estafette.io/letsencrypt-certificate"
const annotationLetsEncryptCertificateHostnames string = "estafette.io/letsencrypt-certificate-hostnames"
const annotationLetsEncryptCertificateCopyToAllNamespaces string = "estafette.io/letsencrypt-certificate-copy-to-all-namespaces"
const annotationLetsEncryptCertificateLinkedSecret string = "estafette.io/letsencrypt-certificate-linked-secret"
const annotationLetsEncryptCertificateUploadToCloudflare string = "estafette.io/letsencrypt-certificate-upload-to-cloudflare"
const annotationLetsEncryptCertificateState string = "estafette.io/letsencrypt-certificate-state"
// LetsEncryptCertificateState represents the state of the secret with respect to Let's Encrypt certificates
type LetsEncryptCertificateState struct {
Enabled string `json:"enabled"`
Hostnames string `json:"hostnames"`
CopyToAllNamespaces bool `json:"copyToAllNamespaces"`
UploadToCloudflare bool `json:"uploadToCloudflare"`
LastRenewed string `json:"lastRenewed"`
LastAttempt string `json:"lastAttempt"`
}
var (
appgroup string
app string
version string
branch string
revision string
buildDate string
goVersion = runtime.Version()
)
var (
cfAPIKey = kingpin.Flag("cloudflare-api-key", "The API key to connect to cloudflare.").Envar("CF_API_KEY").Required().String()
cfAPIEmail = kingpin.Flag("cloudflare-api-email", "The API email address to connect to cloudflare.").Envar("CF_API_EMAIL").Required().String()
daysBeforeRenewal = kingpin.Flag("days-before-renewal", "Number of days after which to renew the certificate.").Default("60").OverrideDefaultFromEnvar("DAYS_BEFORE_RENEWAL").Int()
// seed random number
r = rand.New(rand.NewSource(time.Now().UnixNano()))
// define prometheus counter
certificateTotals = prometheus.NewCounterVec(
prometheus.CounterOpts{
Name: "estafette_letsencrypt_certificate_totals",
Help: "Number of generated certificates with LetsEncrypt.",
},
[]string{"namespace", "status", "initiator", "type"},
)
// set controller Start time to watch only for newly created resources
controllerStartTime time.Time = time.Now().Local()
)
func init() {
// metrics have to be registered to be exposed
prometheus.MustRegister(certificateTotals)
}
func main() {
// parse command line parameters
kingpin.Parse()
// init log format from envvar ESTAFETTE_LOG_FORMAT
foundation.InitLoggingFromEnv(foundation.NewApplicationInfo(appgroup, app, version, branch, revision, buildDate))
// init /liveness endpoint
foundation.InitLiveness()
// create kubernetes api client
kubeClientConfig, err := rest.InClusterConfig()
if err != nil {
log.Fatal().Err(err)
}
// creates the clientset
kubeClientset, err := kubernetes.NewForConfig(kubeClientConfig)
if err != nil {
log.Fatal().Err(err)
}
// create the shared informer factory and use the client to connect to Kubernetes API
factory := informers.NewSharedInformerFactory(kubeClientset, 0)
// create a channel to stop the shared informers gracefully
stopper := make(chan struct{})
defer close(stopper)
// handle kubernetes API crashes
defer k8sruntime.HandleCrash()
foundation.InitMetrics()
gracefulShutdown, waitGroup := foundation.InitGracefulShutdownHandling()
// watch secrets for all namespaces
go watchSecrets(waitGroup, kubeClientset)
go listSecrets(waitGroup, kubeClientset)
// watch namespaces
watchNamespaces(waitGroup, kubeClientset, factory, stopper)
foundation.HandleGracefulShutdown(gracefulShutdown, waitGroup)
}
func watchSecrets(waitGroup *sync.WaitGroup, kubeClientset *kubernetes.Clientset) {
// loop indefinitely
for {
log.Info().Msg("Watching secrets for all namespaces...")
timeoutSeconds := int64(300)
watcher, err := kubeClientset.CoreV1().Secrets("").Watch(metav1.ListOptions{
TimeoutSeconds: &timeoutSeconds,
})
if err != nil {
log.Error().Err(err).Msg("WatchSecrets call failed")
} else {
// loop indefinitely, unless it errors
for {
event, ok := <-watcher.ResultChan()
if !ok {
log.Warn().Msg("Watcher for secrets is closed")
break
}
if event.Type == watch.Added || event.Type == watch.Modified {
secret, ok := event.Object.(*v1.Secret)
if !ok {
log.Warn().Msg("Watcher for secrets returns event object of incorrect type")
break
}
waitGroup.Add(1)
status, err := processSecret(kubeClientset, secret, fmt.Sprintf("watcher:%v", event.Type))
certificateTotals.With(prometheus.Labels{"namespace": secret.Namespace, "status": status, "initiator": "watcher", "type": "secret"}).Inc()
waitGroup.Done()
if err != nil {
log.Error().Err(err).Msgf("Processing secret %v.%v failed", secret.Name, secret.Namespace)
continue
}
}
}
}
// sleep random time between 22 and 37 seconds
sleepTime := applyJitter(30)
log.Info().Msgf("Sleeping for %v seconds...", sleepTime)
time.Sleep(time.Duration(sleepTime) * time.Second)
}
}
func listSecrets(waitGroup *sync.WaitGroup, kubeClientset *kubernetes.Clientset) {
// loop indefinitely
for {
// get secrets for all namespaces
log.Info().Msg("Listing secrets for all namespaces...")
secrets, err := kubeClientset.CoreV1().Secrets("").List(metav1.ListOptions{})
if err != nil {
log.Error().Err(err).Msg("ListSecrets call failed")
}
log.Info().Msgf("Cluster has %v secrets", len(secrets.Items))
// loop all secrets
for _, secret := range secrets.Items {
waitGroup.Add(1)
status, err := processSecret(kubeClientset, &secret, "poller")
certificateTotals.With(prometheus.Labels{"namespace": secret.Namespace, "status": status, "initiator": "poller", "type": "secret"}).Inc()
waitGroup.Done()
if err != nil {
log.Error().Err(err).Msgf("Processing secret %v.%v failed", secret.Name, secret.Namespace)
continue
}
}
// sleep random time around 900 seconds
sleepTime := applyJitter(900)
log.Info().Msgf("Sleeping for %v seconds...", sleepTime)
time.Sleep(time.Duration(sleepTime) * time.Second)
}
}
func watchNamespaces(waitGroup *sync.WaitGroup, kubeClientset *kubernetes.Clientset, factory informers.SharedInformerFactory, stopper chan struct{}) {
log.Info().Msg("Watching for new namespaces...")
namespacesInformer := factory.Core().V1().Namespaces().Informer()
namespacesInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
namespace, ok := obj.(*v1.Namespace)
if !ok {
log.Warn().Msg("Watcher for namespaces returns event object of incorrect type")
return
}
// compare CreationTimestamp and controllerStartTime and act only on latest events
isNewNamespace := namespace.CreationTimestamp.Sub(controllerStartTime).Seconds() > 0
if isNewNamespace {
log.Info().Msg("Listing secrets with 'copyToAllNamespaces' for all namespaces...")
secrets, err := kubeClientset.CoreV1().Secrets("").List(metav1.ListOptions{})
if err != nil {
log.Error().Err(err).Msgf("[%v] ListSecrets call failed", "ns-watcher:ADDED")
} else {
// loop all secrets
for _, secret := range secrets.Items {
copyToAllNamespacesValue, ok := secret.Annotations[annotationLetsEncryptCertificateCopyToAllNamespaces]
if ok {
shouldCopyToAllNamespaces, err := strconv.ParseBool(copyToAllNamespacesValue)
if err != nil {
log.Error().Err(err)
continue
}
if shouldCopyToAllNamespaces {
waitGroup.Add(1)
err = copySecretToNamespace(kubeClientset, &secret, namespace, "ns-watcher:ADDED")
waitGroup.Done()
if err != nil {
log.Error().Err(err)
continue
}
}
}
}
}
}
},
})
go namespacesInformer.Run(stopper)
}
func applyJitter(input int) (output int) {
deviation := int(0.25 * float64(input))
return input - deviation + r.Intn(2*deviation)
}
func getDesiredSecretState(secret *v1.Secret) (state LetsEncryptCertificateState) {
var ok bool
// get annotations or set default value
state.Enabled, ok = secret.Annotations[annotationLetsEncryptCertificate]
if !ok {
state.Enabled = "false"
}
state.Hostnames, ok = secret.Annotations[annotationLetsEncryptCertificateHostnames]
if !ok {
state.Hostnames = ""
}
copyToAllNamespacesValue, ok := secret.Annotations[annotationLetsEncryptCertificateCopyToAllNamespaces]
if ok {
b, err := strconv.ParseBool(copyToAllNamespacesValue)
if err == nil {
state.CopyToAllNamespaces = b
}
}
uploadToCloudflare, ok := secret.Annotations[annotationLetsEncryptCertificateUploadToCloudflare]
if ok {
b, err := strconv.ParseBool(uploadToCloudflare)
if err == nil {
state.UploadToCloudflare = b
}
}
return
}
func getCurrentSecretState(secret *v1.Secret) (state LetsEncryptCertificateState) {
// get state stored in annotations if present or set to empty struct
letsEncryptCertificateStateString, ok := secret.Annotations[annotationLetsEncryptCertificateState]
if !ok {
// couldn't find saved state, setting to default struct
state = LetsEncryptCertificateState{}
return
}
if err := json.Unmarshal([]byte(letsEncryptCertificateStateString), &state); err != nil {
// couldn't deserialize, setting to default struct
state = LetsEncryptCertificateState{}
return
}
// return deserialized state
return
}
func makeSecretChanges(kubeClientset *kubernetes.Clientset, secret *v1.Secret, initiator string, desiredState, currentState LetsEncryptCertificateState) (status string, err error) {
status = "failed"
// parse last renewed time from state
lastRenewed := time.Time{}
if currentState.LastRenewed != "" {
var err error
lastRenewed, err = time.Parse(time.RFC3339, currentState.LastRenewed)
if err != nil {
lastRenewed = time.Time{}
}
}
lastAttempt := time.Time{}
if currentState.LastAttempt != "" {
var err error
lastAttempt, err = time.Parse(time.RFC3339, currentState.LastAttempt)
if err != nil {
lastAttempt = time.Time{}
}
}
// check if letsencrypt is enabled for this secret, hostnames are set and either the hostnames have changed or the certificate is older than 60 days and the last attempt was more than 15 minutes ago
if desiredState.Enabled == "true" && len(desiredState.Hostnames) > 0 && time.Since(lastAttempt).Minutes() > 15 && (desiredState.Hostnames != currentState.Hostnames || time.Since(lastRenewed).Hours() > float64(*daysBeforeRenewal*24)) {
log.Info().Msgf("[%v] Secret %v.%v - Certificates are more than %v days old or hostnames have changed (%v), renewing them with Let's Encrypt...", initiator, secret.Name, secret.Namespace, *daysBeforeRenewal, desiredState.Hostnames)
// 'lock' the secret for 15 minutes by storing the last attempt timestamp to prevent hitting the rate limit if the Let's Encrypt call fails and to prevent the watcher and the fallback polling to operate on the secret at the same time
currentState.LastAttempt = time.Now().Format(time.RFC3339)
// serialize state and store it in the annotation
letsEncryptCertificateStateByteArray, err := json.Marshal(currentState)
if err != nil {
log.Error().Err(err)
return status, err
}
secret.Annotations[annotationLetsEncryptCertificateState] = string(letsEncryptCertificateStateByteArray)
// update secret, with last attempt; this will fire an event for the watcher, but this shouldn't lead to any action because storing the last attempt locks the secret for 15 minutes
_, err = kubeClientset.CoreV1().Secrets(secret.Namespace).Update(secret)
if err != nil {
log.Error().Err(err).Msgf("[%v] Secret %v.%v - Updating secret state has failed", initiator, secret.Name, secret.Namespace)
return status, err
}
// error if any of the host names is longer than 64 bytes
hostnames := strings.Split(desiredState.Hostnames, ",")
for _, hostname := range hostnames {
if !validateHostname(hostname) {
err = fmt.Errorf("Hostname %v is invalid", hostname)
log.Error().Err(err)
return status, err
}
}
// load account.json
log.Info().Msgf("[%v] Secret %v.%v - Loading account.json...", initiator, secret.Name, secret.Namespace)
fileBytes, err := ioutil.ReadFile("/account/account.json")
if err != nil {
log.Error().Err(err)
return status, err
}
var letsEncryptUser LetsEncryptUser
err = json.Unmarshal(fileBytes, &letsEncryptUser)
if err != nil {
log.Error().Err(err)
return status, err
}
// load private key
log.Info().Msgf("[%v] Secret %v.%v - Loading account.key...", initiator, secret.Name, secret.Namespace)
privateKey, err := loadPrivateKey("/account/account.key")
if err != nil {
log.Error().Err(err)
return status, err
}
letsEncryptUser.key = privateKey
log.Info().Msgf("[%v] Secret %v.%v - Creating lego config...", initiator, secret.Name, secret.Namespace)
config := lego.NewConfig(&letsEncryptUser)
// create letsencrypt lego client
log.Info().Msgf("[%v] Secret %v.%v - Creating lego client...", initiator, secret.Name, secret.Namespace)
legoClient, err := lego.NewClient(config)
if err != nil {
log.Error().Err(err)
return status, err
}
// get dns challenge
log.Info().Msgf("[%v] Secret %v.%v - Creating cloudflare provider...", initiator, secret.Name, secret.Namespace)
cloudflareConfig := cloudflare.NewDefaultConfig()
cloudflareConfig.AuthEmail = *cfAPIEmail
cloudflareConfig.AuthKey = *cfAPIKey
cloudflareConfig.PropagationTimeout = 10 * time.Minute
cloudflareProvider, err := cloudflare.NewDNSProviderConfig(cloudflareConfig)
if err != nil {
log.Error().Err(err)
return status, err
}
// clean up acme challenge records in advance
// for _, hostname := range hostnames {
// log.Info().Msgf("[%v] Secret %v.%v - Cleaning up TXT record _acme-challenge.%v...", initiator, secret.Name, secret.Namespace, hostname)
// err = cloudflareProvider.CleanUp(hostname, "", "123d==")
// if err != nil {
// log.Info().Err(err).Msgf("[%v] Secret %v.%v - Cleaning up TXT record _acme-challenge.%v failed", initiator, secret.Name, secret.Namespace, hostname)
// }
// }
// set challenge provider
legoClient.Challenge.SetDNS01Provider(cloudflareProvider)
// get certificate
log.Info().Msgf("[%v] Secret %v.%v - Obtaining certificate...", initiator, secret.Name, secret.Namespace)
request := certificate.ObtainRequest{
Domains: hostnames,
Bundle: true,
}
certificates, err := legoClient.Certificate.Obtain(request)
// if obtaining secret failed exit and retry after more than 15 minutes
if err != nil {
log.Error().Err(err).Msgf("Could not obtain certificates for domains %v due to error", hostnames)
return status, err
}
if certificates == nil {
log.Error().Msgf("Could not obtain certificates for domains %v, certificates are empty", hostnames)
return status, err
}
// clean up acme challenge records afterwards
// for _, hostname := range hostnames {
// log.Info().Msgf("[%v] Secret %v.%v - Cleaning up TXT record _acme-challenge.%v...", initiator, secret.Name, secret.Namespace, hostname)
// err = cloudflareProvider.CleanUp(hostname, "", "123d==")
// if err != nil {
// log.Info().Err(err).Msgf("[%v] Secret %v.%v - Cleaning up TXT record _acme-challenge.%v failed", initiator, secret.Name, secret.Namespace, hostname)
// }
// }
// reload secret to avoid object has been modified error
secret, err = kubeClientset.CoreV1().Secrets(secret.Namespace).Get(secret.Name, metav1.GetOptions{})
if err != nil {
log.Error().Err(err)
return status, err
}
// update the secret
currentState = desiredState
currentState.LastRenewed = time.Now().Format(time.RFC3339)
log.Info().Msgf("[%v] Secret %v.%v - Updating secret because new certificates have been obtained...", initiator, secret.Name, secret.Namespace)
// serialize state and store it in the annotation
letsEncryptCertificateStateByteArray, err = json.Marshal(currentState)
if err != nil {
log.Error().Err(err)
return status, err
}
secret.Annotations[annotationLetsEncryptCertificateState] = string(letsEncryptCertificateStateByteArray)
// store the certificates
if secret.Data == nil {
secret.Data = make(map[string][]byte)
}
log.Info().Msgf("[%v] Secret %v.%v - Secret has %v data items before writing the certificates...", initiator, secret.Name, secret.Namespace, len(secret.Data))
// ssl keys
secret.Data["ssl.crt"] = certificates.Certificate
secret.Data["ssl.key"] = certificates.PrivateKey
secret.Data["ssl.pem"] = bytes.Join([][]byte{certificates.Certificate, certificates.PrivateKey}, []byte{})
if certificates.IssuerCertificate != nil {
secret.Data["ssl.issuer.crt"] = certificates.IssuerCertificate
}
jsonBytes, err := json.MarshalIndent(certificates, "", "\t")
if err != nil {
log.Error().Msgf("[%v] Secret %v.%v - Unable to marshal CertResource for domain %s\n\t%s", initiator, secret.Name, secret.Namespace, certificates.Domain, err.Error())
return status, err
}
secret.Data["ssl.json"] = jsonBytes
// tls keys for ingress object
secret.Data["tls.crt"] = certificates.Certificate
secret.Data["tls.key"] = certificates.PrivateKey
secret.Data["tls.pem"] = bytes.Join([][]byte{certificates.Certificate, certificates.PrivateKey}, []byte{})
if certificates.IssuerCertificate != nil {
secret.Data["tls.issuer.crt"] = certificates.IssuerCertificate
}
secret.Data["tls.json"] = jsonBytes
log.Info().Msgf("[%v] Secret %v.%v - Secret has %v data items after writing the certificates...", initiator, secret.Name, secret.Namespace, len(secret.Data))
// update secret, because the data and state annotation have changed
_, err = kubeClientset.CoreV1().Secrets(secret.Namespace).Update(secret)
if err != nil {
log.Error().Err(err)
return status, err
}
status = "succeeded"
log.Info().Msgf("[%v] Secret %v.%v - Certificates have been stored in secret successfully...", initiator, secret.Name, secret.Namespace)
if desiredState.CopyToAllNamespaces {
// copy to other namespaces if annotation is set to true
err = copySecretToAllNamespaces(kubeClientset, secret, initiator)
if err != nil {
return status, err
}
}
if desiredState.UploadToCloudflare {
// upload certificate to cloudflare for each hostname
err = uploadToCloudflare(desiredState.Hostnames, certificates.Certificate, certificates.PrivateKey)
if err != nil {
return status, err
}
}
return status, nil
}
status = "skipped"
return status, nil
}
func copySecretToAllNamespaces(kubeClientset *kubernetes.Clientset, secret *v1.Secret, initiator string) (err error) {
// get all namespaces
namespaces, err := kubeClientset.CoreV1().Namespaces().List(metav1.ListOptions{})
// loop namespaces
for _, ns := range namespaces.Items {
err := copySecretToNamespace(kubeClientset, secret, &ns, initiator)
if err != nil {
return err
}
}
return nil
}
func copySecretToNamespace(kubeClientset *kubernetes.Clientset, secret *v1.Secret, namespace *v1.Namespace, initiator string) error {
if namespace.Name == secret.Namespace || namespace.Status.Phase != v1.NamespaceActive {
return nil
}
log.Info().Msgf("[%v] Secret %v.%v - Copying secret to namespace %v...", initiator, secret.Name, secret.Namespace, namespace.Name)
// check if secret with same name already exists
secretInNamespace, err := kubeClientset.CoreV1().Secrets(namespace.Name).Get(secret.Name, metav1.GetOptions{})
if errors.IsNotFound(err) {
// doesn't exist, create new secret
secretInNamespace = &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: secret.Name,
Namespace: namespace.Name,
Labels: secret.Labels,
Annotations: map[string]string{
annotationLetsEncryptCertificateLinkedSecret: fmt.Sprintf("%v/%v", secret.Namespace, secret.Name),
annotationLetsEncryptCertificateState: secret.Annotations[annotationLetsEncryptCertificateState],
},
},
Data: secret.Data,
}
_, err = kubeClientset.CoreV1().Secrets(namespace.Name).Create(secretInNamespace)
if err != nil {
return err
}
return nil
}
if err != nil {
return err
}
// already exists
log.Info().Msgf("[%v] Secret %v.%v - Already exists in namespace %v, updating data...", initiator, secret.Name, secret.Namespace, namespace.Name)
// update data in secret
secretInNamespace.Data = secret.Data
secretInNamespace.Annotations[annotationLetsEncryptCertificateState] = secret.Annotations[annotationLetsEncryptCertificateState]
_, err = kubeClientset.CoreV1().Secrets(namespace.Name).Update(secretInNamespace)
if err != nil {
return err
}
return nil
}
func isEventExist(kubeClientset *kubernetes.Clientset, namespace string, name string) (*v1.Event, string, error) {
event, err := kubeClientset.CoreV1().Events(namespace).Get(name, metav1.GetOptions{})
if errors.IsNotFound(err) {
return nil, "not found", err
}
if err != nil {
log.Error().Msgf("Event %v.%v - Getting event has an error.\n\t%s", name, namespace, err.Error())
return nil, "error", err
}
return event, "found", nil
}
func postEventAboutStatus(kubeClientset *kubernetes.Clientset, secret *v1.Secret, eventType string, action string, reason string, message string, kind string, reportingController string, reportingInstance string) (err error) {
now := time.Now().UTC()
count := int32(1)
eventName := fmt.Sprintf("%v-%v", secret.Name, action)
eventSource := os.Getenv("HOSTNAME")
eventResp, exist, err := isEventExist(kubeClientset, secret.Namespace, eventName)
if exist == "error" {
return err
}
if exist == "found" {
count = eventResp.Count + 1
eventResp.Type = eventType
eventResp.Action = action
eventResp.Reason = reason
eventResp.Message = message
eventResp.Count = count
eventResp.LastTimestamp = metav1.NewTime(now)
_, err = kubeClientset.CoreV1().Events(secret.Namespace).Update(eventResp)
if err != nil {
log.Error().Msgf("Event %v.%v - Updating Event has an error.\n\t%s", eventResp.Name, eventResp.Namespace, err.Error())
return err
}
log.Info().Msgf("Event %v.%v - has been updated successfully...", eventResp.Name, eventResp.Namespace)
return
}
event := &v1.Event{
ObjectMeta: metav1.ObjectMeta{
Name: eventName,
Namespace: secret.Namespace,
CreationTimestamp: metav1.NewTime(now),
Labels: secret.Labels,
},
FirstTimestamp: metav1.NewTime(now),
LastTimestamp: metav1.NewTime(now),
Type: eventType,
Action: action,
Reason: reason,
Message: message,
Count: count,
Source: v1.EventSource{
Component: eventSource,
},
InvolvedObject: v1.ObjectReference{
APIVersion: secret.APIVersion,
Kind: kind,
Namespace: secret.Namespace,
Name: secret.Name,
ResourceVersion: secret.ResourceVersion,
UID: secret.UID,
},
EventTime: metav1.NewMicroTime(now),
ReportingController: reportingController,
ReportingInstance: reportingInstance,
}
_, err = kubeClientset.CoreV1().Events(event.Namespace).Create(event)
if err != nil {
log.Error().Msgf("Event %v.%v - Creating Event has an error. %s", event.Name, event.Namespace, err.Error())
return err
}
log.Info().Msgf("Event %v.%v - has been created successfully...", event.Name, event.Namespace)
return
}
func processSecret(kubeClientset *kubernetes.Clientset, secret *v1.Secret, initiator string) (status string, err error) {
status = "failed"
if secret != nil {
desiredState := getDesiredSecretState(secret)
currentState := getCurrentSecretState(secret)
status, err = makeSecretChanges(kubeClientset, secret, initiator, desiredState, currentState)
if err != nil {
log.Error().Err(err).Msgf("[%v] Secret %v.%v - Error occurred...", initiator, secret.Name, secret.Namespace)
}
if status == "failed" {
err = postEventAboutStatus(kubeClientset, secret, "Warning", strings.Title(status), "FailedObtain", fmt.Sprintf("Certificate for secret %v obtaining failed", secret.Name), "Secret", "estafette.io/letsencrypt-certificate", os.Getenv("HOSTNAME"))
return
}
if status == "succeeded" {
err = postEventAboutStatus(kubeClientset, secret, "Normal", strings.Title(status), "SuccessfulObtain", fmt.Sprintf("Certificate for secret %v has been obtained succesfully", secret.Name), "Secret", "estafette.io/letsencrypt-certificate", os.Getenv("HOSTNAME"))
return
}
}
status = "skipped"
return status, nil
}
func validateHostname(hostname string) bool {
if len(hostname) > 253 {
return false
}
dnsNameParts := strings.Split(hostname, ".")
// we need at least a subdomain within a zone
if len(dnsNameParts) < 2 {
return false
}
// each label needs to be max 63 characters and only have alphanumeric or hyphen; or a wildcard star for it's first label
for index, label := range dnsNameParts {
if index != 0 || label != "*" {
matchesInvalidChars, _ := regexp.MatchString("[^a-zA-Z0-9-]", label)
if matchesInvalidChars {
return false
}
}
if len(label) > 63 {
return false
}
}
return true
}
func uploadToCloudflare(hostnames string, certificate, privateKey []byte) (err error) {
// init cf
authentication := APIAuthentication{Key: *cfAPIKey, Email: *cfAPIEmail}
cf := NewCloudflare(authentication)
// loop hostnames
hostnameList := strings.Split(hostnames, ",")
for _, hostname := range hostnameList {
_, err := cf.UpsertSSLConfigurationByDNSName(hostname, certificate, privateKey)
if err != nil {
return err
}
}
return nil
}
|
[
"\"HOSTNAME\"",
"\"HOSTNAME\"",
"\"HOSTNAME\""
] |
[] |
[
"HOSTNAME"
] |
[]
|
["HOSTNAME"]
|
go
| 1 | 0 | |
Godeps/_workspace/src/github.com/opencontainers/runc/libcontainer/init_linux.go
|
// +build linux
package libcontainer
import (
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net"
"os"
"strconv"
"strings"
"syscall"
"github.com/Sirupsen/logrus"
"github.com/opencontainers/runc/libcontainer/cgroups"
"github.com/opencontainers/runc/libcontainer/configs"
"github.com/opencontainers/runc/libcontainer/system"
"github.com/opencontainers/runc/libcontainer/user"
"github.com/opencontainers/runc/libcontainer/utils"
"github.com/vishvananda/netlink"
)
type initType string
const (
initSetns initType = "setns"
initStandard initType = "standard"
)
type pid struct {
Pid int `json:"pid"`
}
// network is an internal struct used to setup container networks.
type network struct {
configs.Network
// TempVethPeerName is a unique temporary veth peer name that was placed into
// the container's namespace.
TempVethPeerName string `json:"temp_veth_peer_name"`
}
// initConfig is used for transferring parameters from Exec() to Init()
type initConfig struct {
Args []string `json:"args"`
Env []string `json:"env"`
Cwd string `json:"cwd"`
Capabilities []string `json:"capabilities"`
User string `json:"user"`
Config *configs.Config `json:"config"`
Console string `json:"console"`
Networks []*network `json:"network"`
PassedFilesCount int `json:"passed_files_count"`
}
type initer interface {
Init() error
}
func newContainerInit(t initType, pipe *os.File) (initer, error) {
var config *initConfig
if err := json.NewDecoder(pipe).Decode(&config); err != nil {
return nil, err
}
if err := populateProcessEnvironment(config.Env); err != nil {
return nil, err
}
switch t {
case initSetns:
return &linuxSetnsInit{
config: config,
}, nil
case initStandard:
return &linuxStandardInit{
pipe: pipe,
parentPid: syscall.Getppid(),
config: config,
}, nil
}
return nil, fmt.Errorf("unknown init type %q", t)
}
// populateProcessEnvironment loads the provided environment variables into the
// current processes's environment.
func populateProcessEnvironment(env []string) error {
for _, pair := range env {
p := strings.SplitN(pair, "=", 2)
if len(p) < 2 {
return fmt.Errorf("invalid environment '%v'", pair)
}
if err := os.Setenv(p[0], p[1]); err != nil {
return err
}
}
return nil
}
// finalizeNamespace drops the caps, sets the correct user
// and working dir, and closes any leaked file descriptors
// before executing the command inside the namespace
func finalizeNamespace(config *initConfig) error {
// Ensure that all unwanted fds we may have accidentally
// inherited are marked close-on-exec so they stay out of the
// container
if err := utils.CloseExecFrom(config.PassedFilesCount + 3); err != nil {
return err
}
capabilities := config.Config.Capabilities
if config.Capabilities != nil {
capabilities = config.Capabilities
}
w, err := newCapWhitelist(capabilities)
if err != nil {
return err
}
// drop capabilities in bounding set before changing user
if err := w.dropBoundingSet(); err != nil {
return err
}
// preserve existing capabilities while we change users
if err := system.SetKeepCaps(); err != nil {
return err
}
if err := setupUser(config); err != nil {
return err
}
if err := system.ClearKeepCaps(); err != nil {
return err
}
// drop all other capabilities
if err := w.drop(); err != nil {
return err
}
if config.Cwd != "" {
if err := syscall.Chdir(config.Cwd); err != nil {
return err
}
}
return nil
}
// syncParentReady sends to the given pipe a JSON payload which indicates that
// the init is ready to Exec the child process. It then waits for the parent to
// indicate that it is cleared to Exec.
func syncParentReady(pipe io.ReadWriter) error {
// Tell parent.
if err := utils.WriteJSON(pipe, syncT{procReady}); err != nil {
return err
}
// Wait for parent to give the all-clear.
var procSync syncT
if err := json.NewDecoder(pipe).Decode(&procSync); err != nil {
if err == io.EOF {
return fmt.Errorf("parent closed synchronisation channel")
}
if procSync.Type != procRun {
return fmt.Errorf("invalid synchronisation flag from parent")
}
}
return nil
}
// joinExistingNamespaces gets all the namespace paths specified for the container and
// does a setns on the namespace fd so that the current process joins the namespace.
func joinExistingNamespaces(namespaces []configs.Namespace) error {
for _, ns := range namespaces {
if ns.Path != "" {
f, err := os.OpenFile(ns.Path, os.O_RDONLY, 0)
if err != nil {
return err
}
err = system.Setns(f.Fd(), uintptr(ns.Syscall()))
f.Close()
if err != nil {
return err
}
}
}
return nil
}
// setupUser changes the groups, gid, and uid for the user inside the container
func setupUser(config *initConfig) error {
// Set up defaults.
defaultExecUser := user.ExecUser{
Uid: syscall.Getuid(),
Gid: syscall.Getgid(),
Home: "/",
}
passwdPath, err := user.GetPasswdPath()
if err != nil {
return err
}
groupPath, err := user.GetGroupPath()
if err != nil {
return err
}
execUser, err := user.GetExecUserPath(config.User, &defaultExecUser, passwdPath, groupPath)
if err != nil {
return err
}
var addGroups []int
if len(config.Config.AdditionalGroups) > 0 {
addGroups, err = user.GetAdditionalGroupsPath(config.Config.AdditionalGroups, groupPath)
if err != nil {
return err
}
}
// before we change to the container's user make sure that the processes STDIO
// is correctly owned by the user that we are switching to.
if err := fixStdioPermissions(execUser); err != nil {
return err
}
suppGroups := append(execUser.Sgids, addGroups...)
if err := syscall.Setgroups(suppGroups); err != nil {
return err
}
if err := system.Setgid(execUser.Gid); err != nil {
return err
}
if err := system.Setuid(execUser.Uid); err != nil {
return err
}
// if we didn't get HOME already, set it based on the user's HOME
if envHome := os.Getenv("HOME"); envHome == "" {
if err := os.Setenv("HOME", execUser.Home); err != nil {
return err
}
}
return nil
}
// fixStdioPermissions fixes the permissions of PID 1's STDIO within the container to the specified user.
// The ownership needs to match because it is created outside of the container and needs to be
// localized.
func fixStdioPermissions(u *user.ExecUser) error {
var null syscall.Stat_t
if err := syscall.Stat("/dev/null", &null); err != nil {
return err
}
for _, fd := range []uintptr{
os.Stdin.Fd(),
os.Stderr.Fd(),
os.Stdout.Fd(),
} {
var s syscall.Stat_t
if err := syscall.Fstat(int(fd), &s); err != nil {
return err
}
// skip chown of /dev/null if it was used as one of the STDIO fds.
if s.Rdev == null.Rdev {
continue
}
if err := syscall.Fchown(int(fd), u.Uid, u.Gid); err != nil {
return err
}
}
return nil
}
// setupNetwork sets up and initializes any network interface inside the container.
func setupNetwork(config *initConfig) error {
for _, config := range config.Networks {
strategy, err := getStrategy(config.Type)
if err != nil {
return err
}
if err := strategy.initialize(config); err != nil {
return err
}
}
return nil
}
func setupRoute(config *configs.Config) error {
for _, config := range config.Routes {
_, dst, err := net.ParseCIDR(config.Destination)
if err != nil {
return err
}
src := net.ParseIP(config.Source)
if src == nil {
return fmt.Errorf("Invalid source for route: %s", config.Source)
}
gw := net.ParseIP(config.Gateway)
if gw == nil {
return fmt.Errorf("Invalid gateway for route: %s", config.Gateway)
}
l, err := netlink.LinkByName(config.InterfaceName)
if err != nil {
return err
}
route := &netlink.Route{
Scope: netlink.SCOPE_UNIVERSE,
Dst: dst,
Src: src,
Gw: gw,
LinkIndex: l.Attrs().Index,
}
if err := netlink.RouteAdd(route); err != nil {
return err
}
}
return nil
}
func setupRlimits(config *configs.Config) error {
for _, rlimit := range config.Rlimits {
l := &syscall.Rlimit{Max: rlimit.Hard, Cur: rlimit.Soft}
if err := syscall.Setrlimit(rlimit.Type, l); err != nil {
return fmt.Errorf("error setting rlimit type %v: %v", rlimit.Type, err)
}
}
return nil
}
func setOomScoreAdj(oomScoreAdj int) error {
path := "/proc/self/oom_score_adj"
return ioutil.WriteFile(path, []byte(strconv.Itoa(oomScoreAdj)), 0700)
}
// killCgroupProcesses freezes then iterates over all the processes inside the
// manager's cgroups sending a SIGKILL to each process then waiting for them to
// exit.
func killCgroupProcesses(m cgroups.Manager) error {
var procs []*os.Process
if err := m.Freeze(configs.Frozen); err != nil {
logrus.Warn(err)
}
pids, err := m.GetAllPids()
if err != nil {
m.Freeze(configs.Thawed)
return err
}
for _, pid := range pids {
if p, err := os.FindProcess(pid); err == nil {
procs = append(procs, p)
if err := p.Kill(); err != nil {
logrus.Warn(err)
}
}
}
if err := m.Freeze(configs.Thawed); err != nil {
logrus.Warn(err)
}
for _, p := range procs {
if _, err := p.Wait(); err != nil {
logrus.Warn(err)
}
}
return nil
}
|
[
"\"HOME\""
] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
go
| 1 | 0 | |
analysis/cliffsDelta.py
|
from __future__ import division
def cliffsDelta(lst1, lst2, **dull):
"""Returns delta and true if there are more than 'dull' differences"""
if not dull:
dull = {'small': 0.147, 'medium': 0.33, 'large': 0.474} # effect sizes from (Hess and Kromrey, 2004)
m, n = len(lst1), len(lst2)
lst2 = sorted(lst2)
j = more = less = 0
for repeats, x in runs(sorted(lst1)):
while j <= (n - 1) and lst2[j] < x:
j += 1
more += j*repeats
while j <= (n - 1) and lst2[j] == x:
j += 1
less += (n - j)*repeats
d = (more - less) / (m*n)
size = lookup_size(d, dull)
return d, size
def lookup_size(delta: float, dull: dict) -> str:
"""
:type delta: float
:type dull: dict, a dictionary of small, medium, large thresholds.
"""
delta = abs(delta)
if delta < dull['small']:
return 'negligible'
if dull['small'] <= delta < dull['medium']:
return 'small'
if dull['medium'] <= delta < dull['large']:
return 'medium'
if delta >= dull['large']:
return 'large'
def runs(lst):
"""Iterator, chunks repeated values"""
for j, two in enumerate(lst):
if j == 0:
one, i = two, 0
if one != two:
yield j - i, one
i = j
one = two
yield j - i + 1, two
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
luigi/contrib/webhdfs.py
|
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Provides a WebHdfsTarget and WebHdfsClient using the
python [hdfs](https://pypi.python.org/pypi/hdfs/) library.
"""
from __future__ import absolute_import
import logging
import os
import random
import tempfile
from luigi import configuration
from luigi.target import FileSystemTarget
logger = logging.getLogger("luigi-interface")
try:
import hdfs as webhdfs
except ImportError:
logger.warning("Loading webhdfs module without `hdfs` package installed. "
"Will crash at runtime if webhdfs functionality is used.")
class WebHdfsTarget(FileSystemTarget):
fs = None
def __init__(self, path, client=None):
super(WebHdfsTarget, self).__init__(path)
path = self.path
self.fs = client or WebHdfsClient()
def open(self, mode='r'):
if mode not in ('r', 'w'):
raise ValueError("Unsupported open mode '%s'" % mode)
if mode == 'r':
return ReadableWebHdfsFile(path=self.path, client=self.fs)
elif mode == 'w':
return AtomicWebHdfsFile(path=self.path, client=self.fs)
class ReadableWebHdfsFile(object):
def __init__(self, path, client):
self.path = path
self.client = client
self.generator = None
def read(self):
self.generator = self.client.read(self.path)
return list(self.generator)[0]
def readlines(self, char='\n'):
self.generator = self.client.read(self.path, buffer_char=char)
return self.generator
def __enter__(self):
return self
def __exit__(self, exc_type, exc, traceback):
self.close()
def __iter__(self):
self.generator = self.readlines('\n')
has_next = True
while has_next:
try:
chunk = self.generator.next()
yield chunk
except StopIteration:
has_next = False
self.close()
def close(self):
self.generator.close()
class AtomicWebHdfsFile(file):
"""
An Hdfs file that writes to a temp file and put to WebHdfs on close.
"""
def __init__(self, path, client):
unique_name = 'luigi-webhdfs-tmp-%09d' % random.randrange(0, 1e10)
self.tmp_path = os.path.join(tempfile.gettempdir(), unique_name)
self.path = path
self.client = client
super(AtomicWebHdfsFile, self).__init__(self.tmp_path, 'w')
def close(self):
super(AtomicWebHdfsFile, self).close()
if not self.client.exists(self.path):
self.client.upload(self.path, self.tmp_path)
def __enter__(self):
return self
def __exit__(self, exc_type, exc, traceback):
"""
Close/commit the file if there are no exception.
"""
if exc_type:
return
return file.__exit__(self, exc_type, exc, traceback)
def __del__(self):
"""
Remove the temporary directory.
"""
if os.path.exists(self.tmp_path):
os.remove(self.tmp_path)
class WebHdfsClient(object):
def __init__(self, host=None, port=None, user=None):
host = self.get_config('namenode_host') if host is None else host
port = self.get_config('namenode_port') if port is None else port
user = self.get_config('user') if user is None else os.environ['USER']
url = 'http://' + host + ':' + port
self.webhdfs = webhdfs.InsecureClient(url=url, user=user)
def get_config(self, key):
config = configuration.get_config()
try:
return config.get('hdfs', key)
except:
raise RuntimeError("You must specify %s in the [hdfs] section of "
"the luigi client.cfg file" % key)
def walk(self, path, depth=1):
return self.webhdfs.walk(path, depth=depth)
def exists(self, path):
"""
Returns true if the path exists and false otherwise.
"""
try:
self.webhdfs.status(path)
return True
except webhdfs.util.HdfsError as e:
if str(e).startswith('File does not exist: '):
return False
else:
raise e
def upload(self, hdfs_path, local_path, overwrite=False):
return self.webhdfs.upload(hdfs_path, local_path, overwrite=overwrite)
def download(self, hdfs_path, local_path, overwrite=False, n_threads=-1):
return self.webhdfs.download(hdfs_path, local_path, overwrite=overwrite,
n_threads=n_threads)
def remove(self, hdfs_path, recursive=False):
return self.webhdfs.delete(hdfs_path, recursive=recursive)
def read(self, hdfs_path, offset=0, length=None, buffer_size=None,
chunk_size=1024, buffer_char=None):
return self.webhdfs.read(hdfs_path, offset=offset, length=length,
buffer_size=buffer_size, chunk_size=chunk_size,
buffer_char=buffer_char)
|
[] |
[] |
[
"USER"
] |
[]
|
["USER"]
|
python
| 1 | 0 | |
vertica_python/tests/base.py
|
from __future__ import print_function, division, absolute_import
import os
import unittest
from six import string_types
from .. import *
from ..compat import as_text, as_str, as_bytes
DEFAULT_VP_TEST_HOST = '127.0.0.1'
DEFAULT_VP_TEST_PORT = 5433
DEFAULT_VP_TEST_USER = 'dbadmin'
DEFAULT_VP_TEST_PASSWD = ''
DEFAULT_VP_TEST_DB = 'docker'
DEFAULT_VP_TEST_TABLE = 'vertica_python_unit_test'
class VerticaPythonTestCase(unittest.TestCase):
"""Base class for tests that query Vertica."""
@classmethod
def setUpClass(cls):
cls._host = os.getenv('VP_TEST_HOST', DEFAULT_VP_TEST_HOST)
cls._port = int(os.getenv('VP_TEST_PORT', DEFAULT_VP_TEST_PORT))
cls._user = os.getenv('VP_TEST_USER', DEFAULT_VP_TEST_USER)
cls._password = os.getenv('VP_TEST_PASSWD', DEFAULT_VP_TEST_PASSWD)
cls._database = os.getenv('VP_TEST_DB', DEFAULT_VP_TEST_DB)
cls._table = os.getenv('VP_TEST_TABLE', DEFAULT_VP_TEST_TABLE)
cls._conn_info = {
'host': cls._host,
'port': cls._port,
'database': cls._database,
'user': cls._user,
'password': cls._password,
}
@classmethod
def tearDownClass(cls):
with cls._connect() as conn:
cur = conn.cursor()
cur.execute("DROP TABLE IF EXISTS {0}".format(cls._table))
@classmethod
def _connect(cls):
"""Connects to vertica.
:return: a connection to vertica.
"""
return connect(**cls._conn_info)
def _query_and_fetchall(self, query):
"""Creates a new connection, executes a query and fetches all the results.
:param query: query to execute
:return: all fetched results as returned by cursor.fetchall()
"""
with self._connect() as conn:
cur = conn.cursor()
cur.execute(query)
results = cur.fetchall()
return results
def _query_and_fetchone(self, query):
"""Creates a new connection, executes a query and fetches one result.
:param query: query to execute
:return: the first result fetched by cursor.fetchone()
"""
with self._connect() as conn:
cur = conn.cursor()
cur.execute(query)
result = cur.fetchone()
return result
def assertTextEqual(self, first, second, msg=None):
first_text = as_text(first)
second_text = as_text(second)
self.assertEqual(first=first_text, second=second_text, msg=msg)
def assertStrEqual(self, first, second, msg=None):
first_str = as_str(first)
second_str = as_str(second)
self.assertEqual(first=first_str, second=second_str, msg=msg)
def assertBytesEqual(self, first, second, msg=None):
first_bytes = as_bytes(first)
second_bytes = as_bytes(second)
self.assertEqual(first=first_bytes, second=second_bytes, msg=msg)
def assertResultEqual(self, value, result, msg=None):
if isinstance(value, string_types):
self.assertTextEqual(first=value, second=result, msg=msg)
else:
self.assertEqual(first=value, second=result, msg=msg)
def assertListOfListsEqual(self, list1, list2, msg=None):
self.assertEqual(len(list1), len(list2), msg=msg)
for l1, l2 in zip(list1, list2):
self.assertListEqual(l1, l2, msg=msg)
|
[] |
[] |
[
"VP_TEST_DB",
"VP_TEST_TABLE",
"VP_TEST_PORT",
"VP_TEST_HOST",
"VP_TEST_USER",
"VP_TEST_PASSWD"
] |
[]
|
["VP_TEST_DB", "VP_TEST_TABLE", "VP_TEST_PORT", "VP_TEST_HOST", "VP_TEST_USER", "VP_TEST_PASSWD"]
|
python
| 6 | 0 | |
sifriah-java/Launch.java
|
package df.sifriah;
import df.sifriah.view.MainPanel;
public class Launch {
/**
* @param args
*/
public static void main(String[] args) {
System.out.println(">>>Start");
MainPanel mainPanel = new MainPanel();
String repXml = System.getenv("CONTENTDIR");
mainPanel.displayMainPanel(repXml+"/xml/tanach.xml");
System.out.println("<<<Stop");
}
}
|
[
"\"CONTENTDIR\""
] |
[] |
[
"CONTENTDIR"
] |
[]
|
["CONTENTDIR"]
|
java
| 1 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.